diff --git "a/5066.jsonl" "b/5066.jsonl" new file mode 100644--- /dev/null +++ "b/5066.jsonl" @@ -0,0 +1,494 @@ +{"seq_id":"40734323628","text":"import pygame \nimport CreateMap.Build as bd\nimport CreateMap.IforItem as ifitem\nimport Interface.Constants as ct\nimport Interface.Button as bt\nfrom pygame.locals import *\n\n#Controlador da Construção\nclass CreateMapMenuBuild():\n def __init__(self,father) -> None:\n #Construtor\n self.windows = pygame.display.get_surface()\n self.createInterfaceMenu()\n self.stateBuild = False\n self.father = father\n pass\n\n def createInterfaceMenu(self):\n #Criar Menus\n self.buttonModelImage = pygame.image.load(ct.dicImageBut2)\n self.buttonModelImage = pygame.transform.scale(self.buttonModelImage,(85,85))\n\n self.backGroundMenu = pygame.image.load(ct.dicBackGroundImageMenu)\n self.backGroundMenu = pygame.transform.scale(self.backGroundMenu,(ct.gameWidth + 10,100))\n self.backgroundMenuButtonsRenderized = self.windows.blit(self.backGroundMenu,(-5,885))\n \n posButsX,posButsY = self.backgroundMenuButtonsRenderized.topleft\n\n #Shope de Itens\n for item in ct.itemBuild:\n item.itemIcone = pygame.transform.scale(item.itemIcone,(50,50))\n item.But = bt.Button(self.buttonModelImage,posButsX + 50 + (item.id * 90), posButsY + 50,item.name,self.windows,iconBut=item.itemIcone,panding=25)\n\n def controllerButs(self):\n #Verificar Item Selecionado\n for item in ct.itemBuild:\n item.But.update()\n if item.But.onMouseEnter(pygame.mouse.get_pos()):\n ifitem.iforItem(item)\n if(pygame.mouse.get_pressed()[0]):\n if item.But.checkInput(pygame.mouse.get_pos()) == True:\n self.father.inBuild = True\n self.father.itemBuildController = bd.Build(item)\n self.father.itemBuildController.timeBuild = 0\n \n def update(self):\n #Atualizar\n self.backgroundMenuButtonsRenderized = self.windows.blit(self.backGroundMenu,(-5,885))\n self.controllerButs()","repo_name":"MushroomAngelsGames/Ai-Neural-NetWorking","sub_path":"Scripts/build/exe.win-amd64-3.11/CreateMap/CreateMapMenuBuild.py","file_name":"CreateMapMenuBuild.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"13820071926","text":"import tensorflow as tf\nfrom tensorflow.keras import layers\nfrom tensorflow import keras\n\n#tf.keras..layers.SimpleRNN, layers.GRU, layers.LSTM\n\ndef RNN_model():\n\n model = keras.Sequential()\n model.add(layers.Embedding(input_dim = 1000, output_dim = 64))\n model.add(layers.LSTM(128))\n model.add(layers.Dense(10))\n model.summary()\n\nif __name__ == \"__main__\":\n RNN_model()","repo_name":"zhangenzhi/2020-winter","sub_path":"7_Keras_layers/keras_rnn.py","file_name":"keras_rnn.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"5618418022","text":"import numpy as np\nimport pickle\nfrom matplotlib import pyplot as plt\nimport os\n\nnoise_mapping = (0.5, 0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001)\nfor parvalue in range(8):\n with open('./data/noise_' + str(parvalue) + '.pkl', 'rb') as f:\n xout, tout, xout_without_noise, euler_param, sim_param = pickle.load(f)\n\n x = xout[:, 0::10, :]\n t = tout[:, 0::10]\n\n fig = plt.figure()\n ax = fig.gca()\n ax.set_xticks(np.arange(0, 11, 1))\n ax.set_yticks(np.arange(-1.0, 3.0, 0.5))\n\n for i in range(10):\n ic = ((np.round(euler_param.ic[i], 2)))\n plt.plot(t[i, :], x[i, :, 0], label='ic:' + str(ic))\n\n plt.legend(bbox_to_anchor = (1.05, 1), loc = 2, borderaxespad = 0.)\n plt.title('Observed data used for experiments in 1D, noise = ' + str(noise_mapping[parvalue]))\n plt.grid()\n plt.savefig('./data/plots/noise_' + str(parvalue) + '.pdf', format = 'pdf', bbox_inches='tight')\n","repo_name":"hbhat4000/pathsamp","sub_path":"1dcode/plot_data.py","file_name":"plot_data.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"70655789520","text":"from datetime import datetime\nimport pytz\nfrom dateutil import parser\nimport logging\n\nfrom methods import get_point_from_object\n\nclass TestModel:\n def __init__(self, scheduled_tasks, emergencies, resources, assigned_resources, COMPLIANCE_RATE, DELAY_TIME, RESOURCE_SPEED) -> None:\n self.scheduled_tasks = scheduled_tasks\n self.emergencies = emergencies\n self.resources = resources\n self.assigned_resources = assigned_resources\n self.COMPLIANCE_RATE = COMPLIANCE_RATE\n self.DELAY_TIME = DELAY_TIME\n self.RESOURCE_SPEED = RESOURCE_SPEED\n\n def getTasksResourceId(self, assignment_id):\n return self.assigned_resources[assignment_id] # return resource_id\n\n @staticmethod\n def getDistanceTwoPointsinKM(point1, point2):\n from math import sin, cos, acos, radians\n R = 6371.0 # approximate radius of earth in km\n distance = acos(sin(radians(point1.y))*sin(radians(point2.y))+cos(radians(point1.y))*cos(radians(point2.y))*cos(radians(point2.x)-radians(point1.x)))*R\n return distance\n\n @staticmethod\n def getDateTime(date):\n if not date:\n return None\n dt = parser.parse((date.split(\"+\")[0])+'Z')\n return dt\n\n def getResourcesTasks(self):\n ''' Returns a list of tasks for each resource'''\n resource_tasks ={}\n for task in self.scheduled_tasks:\n resource_id = self.getTasksResourceId(task[\"_id\"])\n current_resource_tasks = resource_tasks.get(resource_id) or []\n current_resource_tasks.append(task)\n resource_tasks[resource_id] = current_resource_tasks\n return resource_tasks\n\n def getResourceTaskBeforeEmg(self, resource, task_list, emergency): \n ''' Returns resources last task before emergency starts'''\n emergency_start_time = self.getDateTime(emergency['schedStartTime'])\n current_task = None\n current_task_time = datetime.min.replace(tzinfo=pytz.UTC)\n for task in task_list:\n task_start_time = self.getDateTime(task['schedStartTime'])\n if task_start_time <= emergency_start_time and task_start_time > current_task_time:\n current_task = task\n current_task_time = task_start_time\n return current_task or resource\n\n def getTasksBeforeEmg(self):\n # type: () -> dict[list]\n ''' Returns a list of tasks in same timeframe as emergency'''\n emergency_tasks ={}\n resource_tasks = self.getResourcesTasks()\n for emergency in self.emergencies:\n tasks_during_emg = []\n for resource in self.resources:\n task = self.getResourceTaskBeforeEmg(resource, resource_tasks[resource[\"resource_id\"]], emergency)\n tasks_during_emg.append(task)\n emergency_tasks[emergency[\"_id\"]] = tasks_during_emg\n return emergency_tasks\n\n def getNearestTask(self, emergency, task_list):\n closest_task = task_list[0]\n min_distance = self.getDistanceTwoPointsinKM(get_point_from_object(task_list[0]), get_point_from_object(emergency))\n for task in task_list:\n distance = self.getDistanceTwoPointsinKM(get_point_from_object(task), get_point_from_object(emergency))\n if distance < min_distance:\n min_distance = distance\n closest_task = task\n return closest_task, min_distance\n\n def run(self):\n result = []\n success_count = 0\n total_time = 0\n emergency_tasks = self.getTasksBeforeEmg()\n for emergency in self.emergencies:\n task, distance = self.getNearestTask(emergency, emergency_tasks[emergency[\"_id\"]])\n if len(task) < 6: # task is actually the resources home\n task_id = \"Resource Home\"\n resource_id = task[\"resource_id\"]\n else:\n task_id = task[\"_id\"] \n resource_id = self.getTasksResourceId(task[\"_id\"])\n time_to_emg = (distance / self.RESOURCE_SPEED) * 60 # minutes\n total_time += time_to_emg\n successful_emg = time_to_emg <= self.DELAY_TIME\n if successful_emg: success_count += 1\n\n emergency_result = {\n \"Emergency\": emergency[\"_id\"],\n \"Nearest Task\": task_id,\n \"Resource\": resource_id,\n \"Distance\": distance,\n \"Time to emergency\": time_to_emg,\n \"Successful\": successful_emg,\n }\n logging.info(emergency_result)\n result.append(emergency_result)\n\n average_time = total_time / len(self.emergencies)\n success_rate = success_count / len(self.emergencies)\n return result, round(success_rate * 100), average_time\n\n\n","repo_name":"OfekDinisman/The-Gas-Leak-Problem","sub_path":"src/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":4791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"292228876","text":"from django.db import models\n\nfrom django.forms import ValidationError\n\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass DayOfTheWeek(models.Model):\n day = models.CharField(\n max_length=9,\n primary_key=True,\n db_index=True,\n verbose_name=_(\"Day\"),\n )\n\n class Meta:\n db_table = \"day_of_the_week\"\n verbose_name = _(\"Day Of The Week\")\n verbose_name_plural = _(\"Day of The Weeks\")\n\n def __str__(self):\n return f\"{self.day}\"\n\n @classmethod\n def save(cls, *args, **kwargs):\n if cls.objects.count() <= 7:\n super().save(*args, **kwargs)\n raise ValidationError(\"Too many entries in the table \\\"Day Of The Week\\\"!\")\n\n\nclass Schedule(models.Model):\n worker = models.ForeignKey(\n \"users.Worker\",\n on_delete=models.CASCADE,\n verbose_name=_(\"Service\"),\n )\n\n day = models.ForeignKey(\n \"DayOfTheWeek\",\n on_delete=models.CASCADE,\n related_name=\"m2o_day\",\n verbose_name=_(\"Day\")\n )\n\n start_time = models.TimeField(\n verbose_name=_(\"Start time\")\n )\n\n end_time = models.TimeField(\n verbose_name=_(\"End time\")\n )\n\n class Meta:\n db_table = \"worker_schedules\"\n verbose_name = _(\"Worker Schedule\")\n verbose_name_plural = _(\"Worker Schedules\")\n constraints = [\n models.UniqueConstraint(\n fields=[\"worker\", \"start_time\", \"end_time\"],\n name=\"unique_time_for_worker\",\n )\n ]\n\n def __str__(self):\n return f\"{self.worker} ({self.day}: {self.start_time}-{self.end_time})\"\n","repo_name":"sazzeck/Web-Project","sub_path":"app/main/models/schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"36896594697","text":"# -*- coding: utf-8 -*-\n\"\"\"FilePage tests.\"\"\"\n#\n# (C) Pywikibot team, 2014\n#\n# Distributed under the terms of the MIT license.\n#\n__version__ = '$Id: f8a7c14f4fc2daf69ae74ca57f07de6b4bff4eec $'\n\nimport pywikibot\n\nfrom tests.aspects import unittest, TestCase\n\n\nclass TestShareFiles(TestCase):\n\n \"\"\"Test methods fileIsShared, exists and fileUrl with shared files.\"\"\"\n\n sites = {\n 'enwiki': {\n 'family': 'wikipedia',\n 'code': 'en',\n },\n 'itwiki': {\n 'family': 'wikipedia',\n 'code': 'it',\n },\n 'testwiki': {\n 'family': 'wikipedia',\n 'code': 'test',\n },\n 'commons': {\n 'family': 'commons',\n 'code': 'commons',\n },\n }\n\n cached = True\n\n def testSharedOnly(self):\n title = 'File:Sepp Maier 1.JPG'\n\n commons = self.get_site('commons')\n itwp = self.get_site('itwiki')\n itwp_file = pywikibot.FilePage(itwp, title)\n for using in itwp_file.usingPages():\n self.assertIsInstance(using, pywikibot.Page)\n\n commons_file = pywikibot.FilePage(commons, title)\n\n self.assertFalse(itwp_file.exists())\n self.assertTrue(commons_file.exists())\n\n self.assertTrue(itwp_file.fileIsShared())\n self.assertTrue(commons_file.fileIsShared())\n self.assertTrue(commons_file.fileUrl())\n\n self.assertIn('/wikipedia/commons/', itwp_file.fileUrl())\n self.assertRaises(pywikibot.NoPage, itwp_file.get)\n\n def testLocalOnly(self):\n title = 'File:April Fools Day Adminship discussion (2005).png'\n\n commons = self.get_site('commons')\n enwp = self.get_site('enwiki')\n enwp_file = pywikibot.FilePage(enwp, title)\n for using in enwp_file.usingPages():\n self.assertIsInstance(using, pywikibot.Page)\n\n commons_file = pywikibot.FilePage(commons, title)\n\n self.assertTrue(enwp_file.fileUrl())\n self.assertTrue(enwp_file.exists())\n self.assertFalse(commons_file.exists())\n\n self.assertFalse(enwp_file.fileIsShared())\n self.assertRaises(pywikibot.NoPage, commons_file.fileIsShared)\n\n self.assertRaises(pywikibot.NoPage, commons_file.fileUrl)\n self.assertRaises(pywikibot.NoPage, commons_file.get)\n\n def testOnBoth(self):\n title = 'File:Pulsante spam.png'\n\n commons = self.get_site('commons')\n itwp = self.get_site('itwiki')\n itwp_file = pywikibot.FilePage(itwp, title)\n for using in itwp_file.usingPages():\n self.assertIsInstance(using, pywikibot.Page)\n\n commons_file = pywikibot.FilePage(commons, title)\n\n self.assertTrue(itwp_file.fileUrl())\n self.assertTrue(itwp_file.exists())\n self.assertTrue(commons_file.exists())\n\n self.assertFalse(itwp_file.fileIsShared())\n self.assertTrue(commons_file.fileIsShared())\n\n def testNonFileLocal(self):\n \"\"\"Test file page, without local file, existing on the local wiki.\"\"\"\n title = 'File:Sepp Maier 1.JPG'\n\n commons = self.get_site('commons')\n testwp = self.get_site('testwiki')\n testwp_file = pywikibot.FilePage(testwp, title)\n\n self.assertTrue(testwp_file.fileUrl())\n self.assertTrue(testwp_file.exists())\n self.assertTrue(testwp_file.fileIsShared())\n\n commons_file = pywikibot.FilePage(commons, title)\n self.assertEqual(testwp_file.fileUrl(),\n commons_file.fileUrl())\n\n\nif __name__ == '__main__':\n try:\n unittest.main()\n except SystemExit:\n pass\n","repo_name":"speedydeletion/pywikibot","sub_path":"tests/file_tests.py","file_name":"file_tests.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"21627657050","text":"import itertools\r\n\r\nn = int(input())\r\np = [list(map(int, input().split())) for _ in range(n)]\r\n\r\nfor p1, p2, p3 in itertools.combinations(p, 3):\r\n x1, y1 = p1\r\n x2, y2 = p2\r\n x3, y3 = p3\r\n\r\n # ax + by + c = 0\r\n a = y2 - y1\r\n b = x1 - x2\r\n\r\n if a * x3 + b * y3 == a * x1 + b * y1:\r\n print('Yes')\r\n break\r\nelse:\r\n print('No')\r\n","repo_name":"mgmk2/atcoder-python","sub_path":"ABC/181/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"19381959046","text":"import psycopg2\n\nfrom myPackage import read_yaml as ryaml\nfrom myPackage import write_to_csv as wcsv\n\n# import read_yaml as ryaml\n\ncredential_path = '/Users/huangyiling/Github/credential/db.yaml' # Mac\n# credential_path = '/home/cavalown/credential/db.yaml' # linode\n\n\"\"\"\nsteps:\n1. make a connection\n2. use the connection to build a cursor\n3. read or update tables need cursor\n4. in the end, close the connection\n\"\"\"\n\n# connection\n\n\ndef postgres_connection(machine, db_class, database):\n credential = ryaml.read_yaml(credential_path)\n db_info = credential[machine][db_class]\n host = db_info['host']\n port = db_info['port']\n # db = db_info['db']\n user = db_info['user']\n password = db_info['pswd']\n connection = psycopg2.connect(database=database, user=user,\n password=password, host=host, port=port)\n print(f\"Connect to {database} successfully!\")\n return connection\n\n\n# cursor\ndef make_cursor(connection):\n cursor = connection.cursor()\n print(\"And get cursor.\")\n return cursor\n\n\n# create table db.schema.table and default schema : public\ndef createTable(connection, cursor, sql):\n # cursor = make_cursor(connection)\n cursor.execute(sql)\n connection.commit()\n # close_connection(connection)\n print(f\"Create table successfully!\")\n\n\n# Read tables\ndef readTable(query, cursor):\n # query = \"\"\"SELECT {} from {};\"\"\".format(item, tableName))\n cursor.execute(query)\n rows = cursor.fetchall()\n return rows\n\n\n# Update tables\ndef updateTable(query, cursor, connection):\n # query = f\"\"\"Update book set {} where {}\"\"\"\n cursor.execute(query)\n connection.commit()\n count = cursor.rowcount\n # close_connection(connection)\n print(count, \"rows Updated successfully!\")\n\n\n# insert table\ndef insertTable(connection, cursor, query, exceptionfile):\n # \"INSERT INTO a_table (c1, c2, c3) VALUES(%s, %s, %s)\", (v1, v2, v3)\n try:\n cursor.execute(query)\n connection.commit() # <- We MUST commit to reflect the inserted data\n print(connection, \"Insert successfully!\")\n connection.commit()\n except psycopg2.IntegrityError as e:\n connection.rollback()\n except Exception as e:\n wcsv.writeToCsv(\n f'./dataStore/pos_insert_exception_{exceptionfile}', [e])\n print(e)\n connection.rollback()\n\n\n# check if table exist\ndef check_table_exist(cursor, tableName):\n result = cursor.execute(f\"\"\"SELECT EXISTS (\n SELECT FROM information_schema.tables\n WHERE table_schema = 'public'\n AND table_name = '{tableName}');\"\"\")\n print(type(result), result)\n return result\n\n\n# close connection\ndef close_connection(connection):\n connection.close()\n print(f'{connection} is closed!')\n\n\nif __name__ == '__main__':\n connection = postgres_connection('linode1', 'postgres', 'test')\n cursor = make_cursor(connection)\n # check_table_exist(cursor, \"persons\")\n # query = \"\"\"INSERT INTO persons (\"personid\", \"lastname\", \"firstname\", \"address\", \"city\") VALUES (124, 'chen', 'yishien', 'No. 155, second Road', 'chiayi');\"\"\"\n # insertTable(connection, cursor, query)\n\n sql = \"\"\"create table if not exists table2 (\n PersonID int primary key,\n LastName varchar(255),\n FirstName varchar(255),\n Address varchar(255),\n City varchar(255));\n \"\"\"\n createTable(connection, cursor, sql)\n","repo_name":"cavalown/stock","sub_path":"myPackage/postgresServer.py","file_name":"postgresServer.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"21827225533","text":"from enum import IntEnum\n\nclass ShelfType(IntEnum):\n Weapon = 0\n Drone = 1\n Augment = 2\n Crew = 3\n System = 4\n \nclass ItemAvailability(IntEnum):\n Unavailable = -1\n Purchased = 0\n Available = 1\n\nclass Store:\n shelf = []\n fuelCount = 0\n missileCount = 0\n dronePartCount = 0\n \nclass StoreShelf:\n type = ShelfType.Weapon\n item = []\n \nclass StoreItem:\n availability = ItemAvailability.Unavailable\n itemID = \"\"\n extraData = 0\n","repo_name":"Tsubashi/iOS-FTL-Save-Game-Editor","sub_path":"store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"38287835968","text":"from .base_model import BaseModel\nfrom .networks import define_generator\nfrom utils import data_utils\n\n\nclass VESPCNModel(BaseModel):\n def __init__(self, opt):\n super(VESPCNModel, self).__init__(opt)\n\n if self.verbose:\n self.logger.info('{} Model Info {}'.format('=' * 20, '=' * 20))\n self.logger.info('Model: {}'.format(opt['model']['name']))\n\n # set network\n self.set_network()\n\n def set_network(self):\n # define net G\n self.net_G = define_generator(self.opt).to(self.device)\n if self.verbose:\n self.logger.info('Generator: {}\\n'.format(\n self.opt['model']['generator']['name']) + self.net_G.__str__())\n\n # load network\n load_path_G = self.opt['model']['generator'].get('load_path')\n if load_path_G is not None:\n self.load_network(self.net_G, load_path_G)\n if self.verbose:\n self.logger.info('Load generator from: {}'.format(load_path_G))\n\n\n # def eval(self, inputs, labels=None, **kwargs):\n # metrics = {}\n # frames = [x.squeeze(1) for x in inputs[0].split(1, dim=1)]\n # _frames = [pad_if_divide(x, 4, 'reflect') for x in frames]\n # a = (_frames[0].size(2) - frames[0].size(2)) * self.scale\n # b = (_frames[0].size(3) - frames[0].size(3)) * self.scale\n # slice_h = slice(None) if a == 0 else slice(a // 2, -a // 2)\n # slice_w = slice(None) if b == 0 else slice(b // 2, -b // 2)\n # sr, warps, flows = self.vespcn(*_frames)\n # sr = sr[..., slice_h, slice_w].cpu().detach()\n # if labels is not None:\n # targets = torch.split(labels[0], 1, dim=1)\n # targets = [t.squeeze(1) for t in targets]\n # hr = targets[self.depth // 2]\n # metrics['psnr'] = psnr(sr, hr)\n # writer = get_writer(self.name)\n # if writer is not None:\n # step = kwargs['epoch']\n # writer.image('clean', sr.clamp(0, 1), step=step)\n # writer.image('warp/0', warps[0].clamp(0, 1), step=step)\n # writer.image('warp/1', warps[-1].clamp(0, 1), step=step)\n # return [sr.numpy()], metrics\n\n def infer(self, lr_data):\n\n lr_data = data_utils.canonicalize(lr_data) # to torch.FloatTensor\n lr_data = lr_data.permute(0, 3, 1, 2) # tchw\n\n # dual direct temporal padding\n lr_data, n_pad_front = self.pad_sequence(lr_data)\n\n # infer\n hr_seq = self.net_G.infer_sequence(lr_data, self.device)\n\n return hr_seq\n","repo_name":"Thmen/EGVSR","sub_path":"codes/models/vespcn_model.py","file_name":"vespcn_model.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","stars":862,"dataset":"github-code","pt":"29"} +{"seq_id":"39344303781","text":"data = open('input2.txt','r')\n\nfile = open(\"output2.txt\", \"a\")\n\n\nlength = int(data.readline())\n\narr_str = data.readline().split()\n\narr = [int(i) for i in arr_str]\n\ndef bubbleSort(arr): \n flag = False\n for i in range(len(arr)-1):\n \n for j in range(len(arr)-i-1):\n if arr[j] > arr[j+1]:\n arr[j], arr[j+1] = arr[j+1], arr[j]\n flag = True \n if flag == False:\n break\nbubbleSort(arr)\n\nfile.write(' '.join(str(n) for n in arr))\n\nfile.close()\n\n\n# We use a flag achieved the θ(n) for the best-case scenario. In the inner loop if any value of array get swapped \n# then flag become true which indicates loop is not sorted. On the other case, if the array is sorted then flag will remain false \n# and it will terminates the outer loop.\n\n\n","repo_name":"alam265/cse221_assignment","sub_path":"LAB 1/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"3220705447","text":"#!/usr/bin/env python3\n\nfrom __init__ import *\n\nmemory = {}\n\nfor line in get_input_stream():\n line = line.strip()\n if line.startswith(\"mask\"):\n mask_form = line.split()[2]\n antimask_0 = 0\n mask_1 = 0\n for char in mask_form:\n antimask_0 *= 2\n mask_1 *= 2\n if char in \"X1\":\n antimask_0 += 1\n if char == \"1\":\n mask_1 += 1\n elif line.startswith(\"mem\"):\n address = int(line.split(\"[\")[1].split(\"]\")[0])\n value = int(line.split()[2])\n value &= antimask_0\n value |= mask_1\n memory[address] = value\nprint(sum(memory.values()))\n","repo_name":"makrzor/advent-of-code","sub_path":"2020/14.1.py","file_name":"14.1.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"35154529253","text":"import time\r\nfrom requests import get\r\nimport os\r\nimport utils\r\nimport threading\r\n\r\n\r\nclass Downloader:\r\n\r\n def __init__(self, url):\r\n self.url = url\r\n self.cancel : bool = False\r\n self.complete : bool = False\r\n self.pause : bool = False\r\n self.taken_part : int = 0\r\n\r\n try:\r\n self.content = get(url=self.url, stream=True)\r\n self.size = int(self.content.headers.get('content-length'))\r\n except Exception as e:\r\n print(\"Error in get the data stream.\")\r\n print(f\"Error message: \\n{str(e)}\")\r\n\r\n def update_progress_bar(self):\r\n last_value = self.taken_part\r\n while not self.complete:\r\n time.sleep(1)\r\n\r\n speed = (self.taken_part - last_value) / 1024\r\n suffix = \"KBps\"\r\n \r\n if speed > 1000:\r\n speed /= 1024\r\n suffix = \"MBps\"\r\n\r\n utils.print_progress_bar(self.taken_part, self.size, suffix=f\"{round(speed, 2)} KBps\")\r\n \r\n last_value = self.taken_part\r\n \r\n def start_download(self, res_path: str):\r\n if os.path.exists(res_path):\r\n raise FileExistsError(f\"{res_path} already exists.\")\r\n\r\n dirname = os.path.dirname(res_path)\r\n if not os.path.isdir(dirname):\r\n raise FileNotFoundError(f\"{dirname} directory is not valid.\")\r\n \r\n start = time.time()\r\n\r\n self.taken_part = 0\r\n\r\n try:\r\n chunk_size = 128\r\n\r\n with open(f'{res_path}', 'wb') as f:\r\n \r\n clock_thread = threading.Thread(target=self.update_progress_bar)\r\n clock_thread.start()\r\n for i, item in enumerate(self.content.iter_content(chunk_size=chunk_size)):\r\n while self.pause:\r\n pass\r\n\r\n if self.cancel:\r\n return\r\n\r\n f.write(item)\r\n\r\n self.taken_part += len(item)\r\n\r\n self.complete = True\r\n clock_thread.join()\r\n \r\n except Exception as err:\r\n raise err\r\n\r\n print(f'\\ntime spent downloading : {round(time.time() - start, 2)} seconds')\r\n\r\n def cancel_download(self):\r\n self.cancel = True\r\n\r\n def pause_download(self):\r\n self.pause = True\r\n\r\n def play_download(self):\r\n self.pause = False\r\n","repo_name":"Mohsen-Rahimi8001/youtube_downloader","sub_path":"downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"70035250639","text":"import logging\nimport sqlite3\nimport sys\nimport time\nfrom copy import deepcopy\nfrom pathlib import Path\n\n\nimport numpy as np\nif sys.platform == 'win32':\n import numpy.core._dtype_ctypes #don't remove this line, pyinstaller need this\nfrom PySide2 import QtWidgets\nfrom PySide2 import QtCore\nfrom PySide2.QtCore import QPoint\nfrom PySide2.QtCore import QSortFilterProxyModel\nfrom PySide2.QtCore import QItemSelectionModel\nfrom PySide2.QtCore import QRect\nfrom PySide2.QtCore import QSize\nfrom PySide2.QtCore import Qt\nfrom PySide2.QtGui import QColor, QIntValidator, QBrush, QFont\nfrom PySide2.QtGui import QStandardItemModel\nfrom PySide2.QtGui import QStandardItem\nfrom PySide2.QtGui import QPixmap\nfrom PySide2.QtGui import QRegion\nfrom PySide2.QtGui import QKeySequence\nfrom PySide2.QtGui import QPainter\nfrom PySide2.QtGui import QPen\nfrom PySide2.QtWidgets import QTableView, QLineEdit\nfrom PySide2.QtWidgets import QToolButton\nfrom PySide2.QtWidgets import QWidget\nfrom PySide2.QtWidgets import QApplication\nfrom PySide2.QtWidgets import QDesktopWidget\nfrom PySide2.QtWidgets import QFileDialog\nfrom PySide2.QtWidgets import QHBoxLayout\nfrom PySide2.QtWidgets import QLabel\nfrom PySide2.QtWidgets import QMessageBox\nfrom PySide2.QtWidgets import QPushButton\nfrom PySide2.QtWidgets import QShortcut\nfrom PySide2.QtWidgets import QVBoxLayout\n\n\ndef order_points(point_list):\n point_list = sorted(point_list, key=lambda x: x[0])\n a1 = sorted([*point_list[:2]], key=lambda x: x[1])\n a2 = sorted([*point_list[2:]], key=lambda x: x[1])\n return np.array([a1[0], a2[0], a2[1], a1[1]], np.int)\n\nclass DBLabelText:\n def __init__(self, lable_data_path):\n self.conn = sqlite3.connect(lable_data_path)\n self.cursor = self.conn.cursor()\n self.cursor.execute(r'''\n CREATE TABLE IF NOT EXISTS label_text (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n img_name TEXT NOT NULL, --图片文件名\n x1 INTEGER NOT NULL, --左上角x坐标\n y1 INTEGER NOT NULL, --左上角y坐标\n x2 INTEGER NOT NULL, --右上角x坐标\n y2 INTEGER NOT NULL, --右上角y坐标\n x3 INTEGER NOT NULL, --右下角x坐标\n y3 INTEGER NOT NULL, --右下角y坐标\n x4 INTEGER NOT NULL, --左下角x坐标\n y4 INTEGER NOT NULL, --左下角y坐标\n img_text TEXT NOT NULL, -- 文本内容\n tsp INTEGER NOT NULL --最后一次修改的时间戳\n );\n ''')\n\n self.cursor.execute(f'''\n CREATE INDEX IF NOT EXISTS `idx_label_text_img_name` ON `label_text` (`img_name` ASC);\n ''')\n\n def get_all_text(self, img_name):\n result_list = self.cursor.execute(r'''\n SELECT id,x1,y1,x2,y2,x3,y3,x4,y4,img_text\n FROM label_text\n WHERE img_name = ?\n ORDER BY id\n ''', (img_name,)).fetchall()\n result = []\n if result_list:\n for id,x1,y1,x2,y2,x3,y3,x4,y4,img_text in result_list:\n result.append([id, np.array([(x1,y1), (x2,y2), (x3,y3), (x4,y4)], dtype=np.int).reshape((4,2)), img_text])\n return result\n\n def add_text(self, img_name, point_list, img_text):\n self.cursor.execute(r'''\n INSERT INTO label_text (img_name,x1,y1,x2,y2,x3,y3,x4,y4,img_text,tsp) \n VALUES (?,?,?,?,?,?,?,?,?,?,?)\n ''', (img_name, *point_list.flatten().tolist(), img_text, int(time.time())))\n self.conn.commit()\n return self.cursor.lastrowid\n\n def del_text(self, img_name, id):\n self.cursor.execute(r'''\n DELETE FROM label_text WHERE img_name=? AND id=?;\n ''', (img_name, id))\n self.conn.commit()\n\n def update_text(self, img_name, id, img_text):\n self.cursor.execute(r'''\n UPDATE label_text SET img_text=? WHERE img_name=? AND id=?\n ''', (img_text, img_name, id))\n self.conn.commit()\n\n def update_points(self, img_name, id, point_list):\n self.cursor.execute(r'''\n UPDATE label_text SET x1=?, y1=?, x2=?, y2=?, x3=?, y3=?, x4=?, y4=?\n WHERE img_name=? AND id=?\n ''', (*point_list.flatten().tolist(), img_name, id))\n self.conn.commit()\n\n def __del__(self):\n self.conn.close()\n\nclass DragButton(QToolButton):\n def __init__(self, parent=None):\n super(DragButton, self).__init__(parent)\n self.setStyleSheet('''\n background-color: red;\n ''')\n\n self.setFixedSize(10, 10)\n self.border_range = self.parent().size()\n\n def mousePressEvent(self, event):\n if event.button() == QtCore.Qt.LeftButton:\n self.setStyleSheet('''\n background-color: yellow;\n ''')\n self.__mousePressPos = event.globalPos()\n self.__mouseMovePos = event.globalPos()\n\n def mouseMoveEvent(self, event):\n if event.buttons() == QtCore.Qt.LeftButton:\n currPos = self.mapToGlobal(self.pos())\n globalPos = event.globalPos()\n diff = globalPos - self.__mouseMovePos\n newPos = self.mapFromGlobal(currPos + diff)\n\n center_point = [newPos.x() + self.width() / 2,\n newPos.y() + self.height() / 2]\n\n if center_point[0] > self.border_range[0][1]:\n center_point[0] = self.border_range[0][1]\n\n if center_point[0] < self.border_range[0][0]:\n center_point[0] = self.border_range[0][0]\n\n if center_point[1] > self.border_range[1][1]:\n center_point[1] = self.border_range[1][1]\n\n if center_point[1] < self.border_range[1][0]:\n center_point[1] = self.border_range[1][0]\n\n self.move(QPoint(\n center_point[0] - self.width() / 2,\n center_point[1] - self.height() / 2\n ))\n self.__mouseMovePos = globalPos\n self.parent().update_points()\n\n def mouseReleaseEvent(self, event):\n if self.__mousePressPos is not None:\n moved = event.globalPos() - self.__mousePressPos\n if moved.manhattanLength() > 3:\n self.setStyleSheet('''\n background-color: red;\n ''')\n event.ignore()\n\n\n def resizeEvent(self, event):\n self.setMask(QRegion(self.rect(), QRegion.Ellipse))\n QtWidgets.QToolButton.resizeEvent(self, event)\n\nclass ImageLabel(QLabel):\n def __init__(self, parent):\n super(ImageLabel, self).__init__(parent)\n\n self.img_extra_border_size = (5, 5)\n self.scaled_img = None\n self.scaled_ratio = None\n self.scaled_img_rect = None\n self.img_all_text = None\n self.img_all_text_dict = {}\n self.img_activate_idx = None\n\n self.mouse_mark_flag = False\n self.mouse_start_pos = None\n self.mouse_end_pos = None\n\n self.btn_point1 = DragButton(self)\n self.btn_point2 = DragButton(self)\n self.btn_point3 = DragButton(self)\n self.btn_point4 = DragButton(self)\n\n self.lineedit_input = QLineEdit(self)\n self.lineedit_input.setFont(QFont('宋体',22))\n self.lineedit_input.setFixedWidth(500)\n self.lineedit_input.textChanged.connect(self.on_text_change)\n self.lineedit_input.returnPressed.connect(self.parent().on_nonactivate)\n\n self.btn_point1.setVisible(False)\n self.btn_point2.setVisible(False)\n self.btn_point3.setVisible(False)\n self.btn_point4.setVisible(False)\n self.lineedit_input.setVisible(False)\n\n def mousePressEvent(self, event):\n if self.scaled_img is None:\n return\n\n if event.button() == QtCore.Qt.LeftButton:\n self.mouse_mark_flag = True\n self.mouse_start_pos = event.pos()\n self.mouse_end_pos = event.pos()\n self.update()\n\n def mouseMoveEvent(self, event):\n if self.scaled_img is None or not self.mouse_mark_flag:\n return\n\n self.mouse_end_pos = event.pos()\n self.update()\n\n def mouseReleaseEvent(self, event):\n if self.scaled_img is None or not self.mouse_mark_flag:\n return\n\n if event.button() == QtCore.Qt.LeftButton:\n self.mouse_end_pos = event.pos()\n\n if abs(self.mouse_start_pos.x() - self.mouse_end_pos.x()) < 5 or \\\n abs(self.mouse_start_pos.y() - self.mouse_end_pos.y()) < 5:\n self.mouse_mark_flag = False\n self.mouse_start_pos = None\n self.mouse_end_pos = None\n return\n\n p1 = [self.mouse_start_pos.x(), self.mouse_start_pos.y()]\n p2 = [self.mouse_end_pos.x(), self.mouse_end_pos.y()]\n p3 = [p1[0], p2[1]]\n p4 = [p2[0], p1[1]]\n point_list = np.array([p1,p2,p3,p4])\n point_list = order_points(point_list)\n\n point_list[:, 0] -= self.img_extra_border_size[1]\n point_list[:, 1] -= self.img_extra_border_size[0]\n point_list = point_list.astype(np.float)\n point_list /= self.scaled_ratio\n point_list = point_list.astype(np.int)\n point_list += 1\n\n self.parent().add_text(point_list)\n\n def on_text_change(self):\n if self.scaled_img is None:\n return\n\n if self.img_activate_idx is None:\n return\n\n new_text = self.lineedit_input.text()\n if self.img_all_text_dict[self.img_activate_idx] != new_text:\n self.parent().on_imglabel_text_change(self.img_activate_idx, new_text)\n\n def show_activate_img(self, img, all_text, activate_idx):\n self.scaled_img = None\n self.scaled_ratio = None\n self.scaled_img_rect = None\n self.img_all_text = []\n self.img_all_text_dict = {}\n self.img_activate_idx = None\n self.mouse_mark_flag = False\n self.mouse_start_pos = None\n self.mouse_end_pos = None\n\n self.btn_point1.setVisible(False)\n self.btn_point2.setVisible(False)\n self.btn_point3.setVisible(False)\n self.btn_point4.setVisible(False)\n self.lineedit_input.setVisible(False)\n self.lineedit_input.clear()\n\n if img:\n # scaled_size = QSize(\n # self.size().width()-self.img_extra_border_size[1]*2,\n # self.size().height()-self.img_extra_border_size[0]*2\n # )\n scaled_size = QSize(\n img.width(),\n img.height()\n )\n\n self.scaled_img = img.scaled(scaled_size, Qt.KeepAspectRatio)\n\n self.scaled_ratio = self.scaled_img.width() / img.width()\n\n self.scaled_img_rect = QRect(\n self.img_extra_border_size[0],\n self.img_extra_border_size[1],\n self.scaled_img.width(),\n self.scaled_img.height()\n )\n\n self.mouse_border_range = (\n (self.scaled_img_rect.x(), self.scaled_img_rect.x()+self.scaled_img_rect.width()),\n (self.scaled_img_rect.y(), self.scaled_img_rect.y()+self.scaled_img_rect.height()),\n )\n\n self.img_activate_idx = activate_idx\n for idx, point_list, img_text in all_text:\n point_list = point_list.astype(np.float)\n point_list *= self.scaled_ratio\n point_list = point_list.astype(np.int)\n\n point_list -= self.btn_point1.width() // 2\n point_list[:, 0] += self.img_extra_border_size[1]\n point_list[:, 1] += self.img_extra_border_size[0]\n for p in point_list:\n if p[0] < 10:\n p[0] = 10\n elif p[0] > self.size().width()-10:\n p[0] = self.size().width()-10\n\n if p[1] < 10:\n p[1] = 10\n elif p[1] > self.size().height()-10:\n p[1] = self.size().height()-10\n\n self.img_all_text.append([idx, point_list, img_text])\n self.img_all_text_dict[idx] = img_text\n\n if activate_idx == idx:\n self.btn_point1.move(QPoint(point_list[0, 0], point_list[0, 1]))\n self.btn_point2.move(QPoint(point_list[1, 0], point_list[1, 1]))\n self.btn_point3.move(QPoint(point_list[2, 0], point_list[2, 1]))\n self.btn_point4.move(QPoint(point_list[3, 0], point_list[3, 1]))\n\n self.btn_point1.setVisible(True)\n self.btn_point2.setVisible(True)\n self.btn_point3.setVisible(True)\n self.btn_point4.setVisible(True)\n\n self.btn_point1.border_range = (\n (self.scaled_img_rect.x(), self.scaled_img_rect.x()+self.scaled_img_rect.width()),\n (self.scaled_img_rect.y(), self.scaled_img_rect.y()+self.scaled_img_rect.height()),\n )\n self.btn_point2.border_range = self.btn_point1.border_range\n self.btn_point3.border_range = self.btn_point1.border_range\n self.btn_point4.border_range = self.btn_point1.border_range\n\n self.lineedit_input.setVisible(True)\n self.lineedit_input.setFocus()\n self.lineedit_input.move(self.btn_point4.pos().x(), self.btn_point4.pos().y()+10)\n self.lineedit_input.setText(img_text)\n\n self.repaint()\n\n def update_points(self):\n if self.img_activate_idx is None or self.scaled_img is None:\n return\n\n pos1 = self.btn_point1.pos()\n pos2 = self.btn_point2.pos()\n pos3 = self.btn_point3.pos()\n pos4 = self.btn_point4.pos()\n point_list = np.array([(pos1.x(), pos1.y()),\n (pos2.x(), pos2.y()),\n (pos3.x(), pos3.y()),\n (pos4.x(), pos4.y())])\n point_list = order_points(point_list)\n\n self.lineedit_input.move(point_list[3][0], point_list[3][1]+10)\n\n for idx, (id, _, _) in enumerate(self.img_all_text):\n if id != self.img_activate_idx:\n continue\n self.img_all_text[idx][1] = point_list\n break\n\n point_list = deepcopy(point_list)\n point_list[:, 0] -= self.img_extra_border_size[1]\n point_list[:, 1] -= self.img_extra_border_size[0]\n point_list += self.btn_point1.width() // 2\n point_list = point_list.astype(np.float)\n point_list /= self.scaled_ratio\n point_list = point_list.astype(np.int)\n point_list += 1\n\n self.parent().update_points(self.img_activate_idx, point_list)\n self.repaint()\n\n def paintEvent(self, event):\n painter = QPainter()\n painter.begin(self)\n painter.setPen(Qt.NoPen)\n painter.fillRect(self.rect(), QColor(190, 190, 190, 255))\n\n if self.scaled_img:\n painter.drawPixmap(self.scaled_img_rect, self.scaled_img)\n\n for idx, point_list, img_text in self.img_all_text:\n point_list = point_list + self.btn_point1.width() // 2\n if idx == self.img_activate_idx:\n painter.setPen(QPen(Qt.red, 1))\n else:\n painter.setPen(QPen(Qt.green, 1))\n painter.drawLine(\n point_list[0, 0],\n point_list[0, 1],\n point_list[1, 0],\n point_list[1, 1]\n )\n painter.drawLine(\n point_list[1, 0],\n point_list[1, 1],\n point_list[2, 0],\n point_list[2, 1]\n )\n painter.drawLine(\n point_list[2, 0],\n point_list[2, 1],\n point_list[3, 0],\n point_list[3, 1]\n )\n painter.drawLine(\n point_list[3, 0],\n point_list[3, 1],\n point_list[0, 0],\n point_list[0, 1]\n )\n\n if self.mouse_mark_flag:\n painter.setPen(QPen(Qt.red, 1))\n painter.drawRect(QtCore.QRect(self.mouse_start_pos, self.mouse_end_pos))\n\n painter.end()\n\n\nclass TextTableView(QTableView):\n def __init__(self, parent):\n super(TextTableView, self).__init__(parent)\n\n self.model = QStandardItemModel(self)\n self.proxy = QSortFilterProxyModel()\n self.proxy.setSourceModel(self.model)\n self.setModel(self.proxy)\n\n self.selectionModel().selectionChanged.connect(self.on_select_change)\n self.model.dataChanged.connect(self.on_text_change)\n\n self.all_text_dict = {}\n\n self.show_activate_img_flag = False\n\n def show_activate_img(self, all_text, activate_idx):\n self.show_activate_img_flag = True\n try:\n self.all_text_dict = {}\n self.model.clear()\n self.model.setHorizontalHeaderLabels(['文本', '编号'])\n\n self.setAutoScroll(True)\n self.setColumnWidth(0, 300)\n self.setColumnWidth(1, 0)\n self.setSelectionMode(QTableView.SingleSelection)\n for col_id, (idx, point_list, img_text) in enumerate(all_text):\n self.all_text_dict[idx] = img_text\n\n it1 = QStandardItem(img_text)\n it1.setEditable(True)\n self.model.setItem(col_id, 0, it1)\n\n it2 = QStandardItem(str(idx))\n it2.setEditable(False)\n self.model.setItem(col_id, 1, it2)\n\n if activate_idx == idx:\n self.selectionModel().select(\n self.model.index(col_id, 0),\n QItemSelectionModel.ClearAndSelect | QItemSelectionModel.Rows\n )\n finally:\n self.show_activate_img_flag = False\n\n def on_select_change(self):\n if self.show_activate_img_flag:\n return\n\n select_row_indexs = self.selectionModel().selectedIndexes()\n if not select_row_indexs:\n return\n\n row_index = select_row_indexs[0]\n row = row_index.row()\n activate_idx = row_index.sibling(row, 1).data()\n self.parent().on_activate_idx_change(int(activate_idx))\n\n def remove_selected_row(self):\n select_row_indexs = self.selectionModel().selectedIndexes()\n if not select_row_indexs:\n return None\n\n row_index = select_row_indexs[0]\n row = row_index.row()\n img_idx = row_index.sibling(row, 1).data()\n self.model.removeRow(row)\n return int(img_idx)\n\n def on_text_change(self, idx1, idx2):\n row = idx1.row()\n new_text = idx1.sibling(row, 0).data()\n activate_idx = idx1.sibling(row, 1).data()\n if activate_idx is not None:\n activate_idx = int(activate_idx)\n if self.all_text_dict[activate_idx] != new_text:\n self.parent().on_tableview_text_change(activate_idx, new_text)\n\nclass MainWindow(QWidget):\n def __init__(self, parent=None):\n QtWidgets.QWidget.__init__(self, parent)\n\n # 界面配置\n self.setWindowTitle('文字识别标注工具')\n self.setFixedSize(1200, 800)\n self.move_to_center()\n\n #\n self.label_img = ImageLabel(self)\n self.label_img.setAlignment(Qt.AlignCenter)\n self.label_img.setText('没有选择任何图片')\n self.label_img.setFixedWidth(900)\n self.label_img.setFixedHeight(750)\n\n #\n self.label_status_running1 = QLabel(self)\n self.label_status_running1.setAlignment(Qt.AlignLeft)\n self.label_status_running1.setText('请选择需要标注的目录')\n\n self.label_status_page_number_validator = QIntValidator()\n self.label_status_page_number = QLineEdit(self)\n self.label_status_page_number.setMaximumWidth(50)\n self.label_status_page_number.setValidator(\n self.label_status_page_number_validator)\n self.label_status_page_number.hide()\n self.label_status_page_number.returnPressed.connect(self.on_page_jump)\n\n self.label_status_running2 = QLabel(self)\n self.label_status_running2.setAlignment(Qt.AlignLeft)\n self.label_status_running2.setText('张')\n self.label_status_running2.hide()\n\n\n self.btn_select_dir = QPushButton(self)\n self.btn_select_dir.setText('选择目录...')\n self.btn_select_dir.clicked.connect(self.on_select_diectory)\n\n self.btn_prev_img = QPushButton(self)\n self.btn_prev_img.setText('上一张')\n self.btn_prev_img.clicked.connect(self.on_prev_img)\n self.connect(\n QShortcut(QKeySequence(QtCore.Qt.Key_Left), self),\n QtCore.SIGNAL('activated()'),\n self.btn_prev_img.click\n )\n\n self.btn_next_img = QPushButton(self)\n self.btn_next_img.setText('下一张')\n self.btn_next_img.clicked.connect(self.on_next_img)\n self.connect(\n QShortcut(QKeySequence(Qt.Key_Right), self),\n QtCore.SIGNAL('activated()'),\n self.btn_next_img.click\n )\n\n self.btn_del_text = QPushButton(self)\n self.btn_del_text.setText('删除')\n self.btn_del_text.clicked.connect(self.on_del_text)\n\n self.btn_nonactivate = QPushButton(self)\n self.btn_nonactivate.setText('不选')\n self.btn_nonactivate.clicked.connect(self.on_nonactivate)\n self.connect(\n QShortcut(QKeySequence(Qt.Key_Escape), self),\n QtCore.SIGNAL('activated()'),\n self.btn_nonactivate.click\n )\n\n self.tableview_text = TextTableView(self)\n\n # 布局\n layout_root = QHBoxLayout()\n layout_col1 = QVBoxLayout()\n layout_col2 = QVBoxLayout()\n layout_root.addLayout(layout_col1)\n layout_root.addLayout(layout_col2)\n\n layout_col1.addWidget(self.label_img)\n layout_col1_row2 = QHBoxLayout()\n layout_col1_row2.setSpacing(1)\n layout_col1_row2.setAlignment(Qt.AlignLeft)\n layout_col1_row2.addWidget(self.label_status_running1, 0, Qt.AlignRight)\n layout_col1_row2.addWidget(self.label_status_page_number, 0, Qt.AlignRight)\n layout_col1_row2.addWidget(self.label_status_running2, 0, Qt.AlignRight)\n\n layout_col1.addLayout(layout_col1_row2)\n\n layout_col2_row1 = QHBoxLayout()\n layout_col2_row1.addWidget(self.btn_select_dir)\n\n layout_col2_row2 = QHBoxLayout()\n layout_col2_row2.addWidget(self.btn_prev_img)\n layout_col2_row2.addWidget(self.btn_next_img)\n\n layout_col2_row3 = QHBoxLayout()\n layout_col2_row3.addWidget(self.btn_del_text)\n layout_col2_row3.addWidget(self.btn_nonactivate)\n\n layout_col2.addLayout(layout_col2_row1)\n layout_col2.addLayout(layout_col2_row2)\n layout_col2.addLayout(layout_col2_row3)\n layout_col2.addWidget(self.tableview_text)\n\n self.setLayout(layout_root)\n\n self.directory = None\n self.all_img_file = []\n self.all_img_file_index = 0\n self.db_label = None\n\n self.update_btn_status()\n\n def move_to_center(self):\n screen = QDesktopWidget().screenGeometry()\n size = self.geometry()\n self.move((screen.width() - size.width()) / 2,(screen.height() - size.height()) / 2)\n\n def update_btn_status(self):\n try:\n self.label_status_page_number.setEnabled(False)\n self.btn_prev_img.setEnabled(False)\n self.btn_next_img.setEnabled(False)\n self.btn_del_text.setEnabled(False)\n self.btn_nonactivate.setEnabled(False)\n\n if not self.all_img_file:\n self.label_status_running1.setText('请选择需要标注的目录')\n self.label_status_page_number.hide()\n self.label_status_running2.hide()\n else:\n img_name = self.all_img_file[self.all_img_file_index]\n\n self.label_status_page_number.show()\n self.label_status_running2.show()\n self.label_status_page_number_validator.setRange(1, len(self.all_img_file))\n self.label_status_page_number.setText(f'{self.all_img_file_index+1}')\n self.label_status_running1.setText( f'当前图片: {img_name} ({self.all_img_file_index + 1}/{len(self.all_img_file)}) 跳转到')\n self.label_status_running2.setText(f'张')\n self.label_status_page_number.setEnabled(True)\n\n if self.all_img_file_index == 0:\n self.btn_prev_img.setEnabled(False)\n else:\n self.btn_prev_img.setEnabled(True)\n\n if self.all_img_file_index == len(self.all_img_file) - 1:\n self.btn_next_img.setEnabled(False)\n else:\n self.btn_next_img.setEnabled(True)\n\n self.btn_del_text.setEnabled(True)\n self.btn_nonactivate.setEnabled(True)\n except:\n logging.exception('update_btn_status exception')\n\n def on_select_diectory(self):\n try:\n self.all_img_file = []\n self.all_img_file_index = 0\n self.db_label = None\n self.label_img.show_activate_img(None, [], None)\n\n self.directory = QFileDialog.getExistingDirectory(self, '选择目录')\n self.setWindowTitle(f'文字识别标注工具: {self.directory}')\n\n self.get_all_img_file()\n if len(self.all_img_file) <= 0:\n QMessageBox.information(\n self,\n '<提示>',\n f'{self.directory}\\n目录下没有找到图片文件',\n QMessageBox.Ok\n )\n return\n\n self.read_label_file()\n self.show_img()\n finally:\n self.update_btn_status()\n\n def get_all_img_file(self):\n self.all_img_file_index = 0\n self.all_img_file = sorted([str(x.name) for x in Path(self.directory).iterdir(\n ) if x.is_file() and x.suffix.upper() in ['.JPG', '.JPEG', '.BMP', '.PNG']])\n\n def read_label_file(self):\n label_file = Path(self.directory).joinpath('label.sqllite3')\n self.db_label = DBLabelText(str(label_file))\n\n def on_next_img(self):\n try:\n self.all_img_file_index += 1\n self.show_img()\n finally:\n self.update_btn_status()\n\n def on_prev_img(self):\n try:\n self.all_img_file_index -= 1\n self.show_img()\n finally:\n self.update_btn_status()\n\n def on_page_jump(self):\n try:\n page_num = int(self.label_status_page_number.text())\n if page_num >= 1 and page_num <= len(self.all_img_file):\n self.all_img_file_index = page_num - 1\n self.show_img()\n self.setFocus()\n finally:\n self.update_btn_status()\n\n def add_text(self, point_list):\n try:\n if not self.all_img_file:\n return\n\n img_name = self.all_img_file[self.all_img_file_index]\n img_text = ''\n activate_idx = self.db_label.add_text(img_name, point_list, img_text)\n self.show_img(activate_idx)\n finally:\n self.update_btn_status()\n\n def on_del_text(self):\n try:\n img_idx = self.tableview_text.remove_selected_row()\n if img_idx is not None:\n img_name = self.all_img_file[self.all_img_file_index]\n activate_idx = self.db_label.del_text(img_name, img_idx)\n self.show_img(activate_idx)\n finally:\n self.update_btn_status()\n\n def on_nonactivate(self):\n try:\n self.show_img(activate_idx=None)\n finally:\n self.update_btn_status()\n\n def show_img(self, activate_idx=None, img_update=True, table_update=True):\n img_name = self.all_img_file[self.all_img_file_index]\n\n all_text = self.db_label.get_all_text(img_name)\n\n if img_update:\n img_path = Path(self.directory).joinpath(img_name)\n img = QPixmap(str(img_path))\n\n self.label_img.show_activate_img(img, all_text, activate_idx)\n\n if table_update:\n self.tableview_text.show_activate_img(all_text, activate_idx)\n\n def update_points(self, activate_idx, point_list):\n if self.all_img_file:\n img_name = self.all_img_file[self.all_img_file_index]\n self.db_label.update_points(img_name, activate_idx, point_list)\n\n def on_activate_idx_change(self, activate_idx):\n self.show_img(activate_idx, table_update=False)\n\n def on_tableview_text_change(self, activate_idx, new_text):\n img_name = self.all_img_file[self.all_img_file_index]\n self.db_label.update_text(img_name, activate_idx, new_text)\n self.show_img(activate_idx, img_update=True, table_update=False)\n\n def on_imglabel_text_change(self, activate_idx, new_text):\n img_name = self.all_img_file[self.all_img_file_index]\n self.db_label.update_text(img_name, activate_idx, new_text)\n self.show_img(activate_idx, img_update=False, table_update=True)\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n widget = MainWindow()\n widget.show()\n sys.exit(app.exec_())\n","repo_name":"daltonxiong/TextLabelTool","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":29633,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"31969114737","text":"'''\nSolicite o preço de uma mercadoria e o percentual de desconto. Exiba o valor do desconto e o preço a pagar.\n'''\n\npreco = float(input('Digite o preço da mercadoria: '))\npercentual = float(input('Digite o percentual de desconto: '))\ndesconto =(preco * percentual) / 100\nnovoPreco = preco = preco - (preco * percentual)/100\n\nprint ('O desconto foi de %.2f reais' %desconto)\nprint ('O novo preço é R$ %.2f' %novoPreco)\n","repo_name":"aparecidapires/Python","sub_path":"Python para Zumbis/1 - Comecando com o basico/Comentarios e Resolucao de Exercicios/Lista de Exercicios 1/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"35131478568","text":"import setuptools\n\nwith open(\"requirements.txt\", \"r\") as fh:\n install_requires = fh.read().splitlines()\n\nsetuptools.setup(\n name=\"job_wrapper\",\n version='0.0.1',\n author='ERST',\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.8.0',\n install_requires=install_requires,\n entry_points={\n \"console_scripts\": [\n \"job_wrapper = job_wrapper.main:main\",\n ],\n },\n)\n","repo_name":"TheRacetrack/plugin-python-job-type","sub_path":"python3-job-type/python_wrapper/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"36416774094","text":"# -*- coding: utf-8 -*-\n\n# Задача: вычислить 3 тикера с максимальной и 3 тикера с минимальной волатильностью в МНОГОПОТОЧНОМ стиле\n#\n# Бумаги с нулевой волатильностью вывести отдельно.\n# Резу��ьтаты вывести на консоль в виде:\n# Максимальная волатильность:\n# ТИКЕР1 - ХХХ.ХХ %\n# ТИКЕР2 - ХХХ.ХХ %\n# ТИКЕР3 - ХХХ.ХХ %\n# Минимальная волатильность:\n# ТИКЕР4 - ХХХ.ХХ %\n# ТИКЕР5 - ХХХ.ХХ %\n# ТИКЕР6 - ХХХ.ХХ %\n# Нулевая волатильность:\n# ТИКЕР7, ТИКЕР8, ТИКЕР9, ТИКЕР10, ТИКЕР11, ТИКЕР12\n# Волатильности указывать в порядке убывания. Тикеры с нулевой волатильностью упорядочить по имени.\n\nimport os.path\nimport time\nimport threading\n\nclass Parser(threading.Thread):\n\n def __init__(self, source, stats, lock, encoding='utf8', *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.source = source\n self.stats = stats\n self.lock = lock\n self.encoding = encoding\n self.prices = []\n\n def run(self):\n\n with open(self.source, encoding=self.encoding) as source:\n\n headers_line_index = 1\n\n for line_index, line in enumerate(source):\n\n if line_index <= headers_line_index:\n continue\n\n try:\n ticker, date, price, qty = line.strip().split(',')\n except BaseException as exc:\n print(f'Ошибка чтения данных в строке <{line}>, описание ошибки: {exc.args}')\n continue\n\n self.prices.append(float(price))\n\n if not self.prices:\n return\n\n self.prices.sort()\n min = self.prices[0]\n max = self.prices[-1]\n avg = (max + min) / 2\n result = (max - min) / avg * 100 if avg else 0\n with self.lock:\n self.stats.append((ticker, result))\n\ndef time_track(func):\n def surrogate(*args, **kwargs):\n started_at = time.time()\n\n result = func(*args, **kwargs)\n\n ended_at = time.time()\n elapsed = round(ended_at - started_at, 4)\n print(f'Функция работала {elapsed} секунд(ы)')\n return result\n return surrogate\n\ndef filter_values(seq, value_wanted=0):\n key, value = seq\n return value == value_wanted\n\ndef get_limit_values(seq, limit, skip_zeros=False):\n\n if not skip_zeros:\n return seq[:limit]\n\n result = []\n\n for stats in seq:\n\n ticker, stat = stats\n\n if stat == 0:\n continue\n\n if len(result) >= limit:\n break\n\n result.append(stats)\n\n return result\n\ndef print_stats(header, stats):\n print(header)\n for ticker, stat in stats:\n print(f'\\t{ticker} - {stat:6.2f} %')\n\n@time_track\ndef main():\n\n path = os.path.join(os.path.dirname(__file__), 'trades')\n path = os.path.normpath(path)\n stats = []\n files = []\n limit = 3\n\n for dirpath, dirnames, filenames in os.walk(path):\n for file in filenames:\n file_full_path = os.path.join(dirpath, file)\n files.append(file_full_path)\n\n lock = threading.RLock()\n parsers = [Parser(file, stats, lock) for file in files]\n\n for parser in parsers:\n parser.start()\n\n for parser in parsers:\n parser.join()\n\n if not stats:\n print('Нет данных для анализа.')\n return\n\n stats = sorted(stats, key = lambda x: x[1])\n mins = get_limit_values(stats, limit, True)\n mins.sort(key=lambda x: x[1], reverse=True)\n\n stats.reverse()\n maxs = get_limit_values(stats, limit)\n\n stats = sorted(stats, key=lambda x: x[0])\n zeros = filter(filter_values, stats)\n\n print_stats('Максимальная волатильность', maxs)\n print_stats('Минимальная волатильность', mins)\n print('Нулевая волатильность')\n for ticker, stat in zeros:\n print(f'\\t{ticker}', sep=' ,')\n\nif __name__ == '__main__':\n main()","repo_name":"karpov-dmitry-py/threads-and-processes","sub_path":"02_volatility_with_threads.py","file_name":"02_volatility_with_threads.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"72005186637","text":"from random import *\nimport matplotlib.pyplot as plt\nimport numpy\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\ndef conti_sampling(min, max, sample_size):\n res = []\n accept = 0\n while accept 0:\r\n sim_min = min([abs(model.similarity(idiom, token)) for token in tokens])\r\n sim_max = max([abs(model.similarity(idiom, token)) for token in tokens])\r\n sim_avg = np.mean([abs(model.similarity(idiom, token)) for token in tokens])\r\n sim_idi = abs(1.0 - cosine(model[idiom], np.average(np.array(token_embeddings), axis=0)))\r\n group2lit[group] = (sim_min, sim_max, sim_avg, sim_idi, len(tokens))\r\n # end if\r\n\r\n csv_writer.writerow(line + [sim_min, sim_max, sim_avg, sim_idi, len(tokens)])\r\n # end for\r\n\r\n # end with\r\n\r\n# end def\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n model = load_embeddings('wordvectors_both.kv')\r\n with open('stopwords-short.dat', 'r') as fin: stopwords = fin.read().split()\r\n filename = 'idioms-definitions-final-counts.csv' # csv with counts\r\n compute_compositionality(filename, model, stopwords)\r\n\r\n\r\n# end if\r\n","repo_name":"ellarabi/gender-idiomatic-language","sub_path":"code/literality_assessment.py","file_name":"literality_assessment.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"74958812878","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.urls import include, path, re_path\nfrom django.views import defaults as default_views\nfrom django.views.decorators.cache import cache_page\nfrom django.views.generic import TemplateView\nfrom drf_spectacular.views import (\n SpectacularAPIView,\n SpectacularRedocView,\n SpectacularSwaggerView,\n)\n\nfrom akarpov.about.views import about_view, list_faq\nfrom akarpov.tools.shortener.views import redirect_view\nfrom config.sitemaps import sitemaps\n\nurlpatterns = [\n path(\n \"home\",\n cache_page(600)(TemplateView.as_view(template_name=\"pages/home.html\")),\n name=\"home\",\n ),\n re_path(r\"^robots\\.txt\", include(\"robots.urls\")),\n path(\n \"sitemap.xml\",\n sitemap,\n {\"sitemaps\": sitemaps},\n name=\"django.contrib.sitemaps.views.sitemap\",\n ),\n path(\"health/\", include(\"health_check.urls\")),\n # Django Admin, use {% url 'admin:index' %}\n path(settings.ADMIN_URL, admin.site.urls),\n # User management\n path(\"users/\", include(\"akarpov.users.urls\", namespace=\"users\")),\n path(\"about\", cache_page(600)(about_view), name=\"about\"),\n path(\"faq/\", list_faq, name=\"faq\"),\n path(\"about/\", include(\"akarpov.about.urls\", namespace=\"about\")),\n path(\"files/\", include(\"akarpov.files.urls\", namespace=\"files\")),\n path(\"music/\", include(\"akarpov.music.urls\", namespace=\"music\")),\n path(\"forms/\", include(\"akarpov.test_platform.urls\", namespace=\"forms\")),\n path(\"tools/\", include(\"akarpov.tools.urls\", namespace=\"tools\")),\n path(\"gallery/\", include(\"akarpov.gallery.urls\", namespace=\"gallery\")),\n path(\"ckeditor/\", include(\"ckeditor_uploader.urls\")),\n path(\"accounts/\", include(\"allauth.urls\")),\n path(\"\", include(\"akarpov.blog.urls\", namespace=\"blog\")),\n path(\"\", redirect_view, name=\"short_url\"),\n # Your stuff: custom urls includes go here\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n# API URLS\nurlpatterns += [\n # API base url\n path(\"api/\", include(\"config.api_router\", namespace=\"api\")),\n # DRF auth token\n path(\"api/schema/\", SpectacularAPIView.as_view(), name=\"api-schema\"),\n path(\"api/schema/\", SpectacularAPIView.as_view(), name=\"api-redoc-schema\"),\n path(\n \"api/docs/\",\n SpectacularSwaggerView.as_view(url_name=\"api-schema\"),\n name=\"swagger\",\n ),\n path(\n \"api/redoc/\",\n SpectacularRedocView.as_view(url_name=\"api-redoc-schema\"),\n name=\"redoc\",\n ),\n]\n\nif settings.DEBUG:\n # This allows the error pages to be debugged during development, just visit\n # these url in browser to see how these error pages look like.\n urlpatterns += [\n path(\n \"400/\",\n default_views.bad_request,\n kwargs={\"exception\": Exception(\"Bad Request!\")},\n ),\n path(\n \"403/\",\n default_views.permission_denied,\n kwargs={\"exception\": Exception(\"Permission Denied\")},\n ),\n path(\n \"404/\",\n default_views.page_not_found,\n kwargs={\"exception\": Exception(\"Page not Found\")},\n ),\n path(\"500/\", default_views.server_error),\n ]\n if \"debug_toolbar\" in settings.INSTALLED_APPS:\n import debug_toolbar\n\n urlpatterns = [path(\"__debug__/\", include(debug_toolbar.urls))] + urlpatterns\n","repo_name":"Alexander-D-Karpov/akarpov","sub_path":"config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"29"} +{"seq_id":"6408793755","text":"from __future__ import annotations\n\n\ndef fractional_knapsack(\n value: list[int], weight: list[int], capacity: int\n) -> tuple[float, list[float]]:\n \"\"\"\n >>> value = [1, 3, 5, 7, 9]\n >>> weight = [0.9, 0.7, 0.5, 0.3, 0.1]\n >>> fractional_knapsack(value, weight, 5)\n (25, [1, 1, 1, 1, 1])\n >>> fractional_knapsack(value, weight, 15)\n (25, [1, 1, 1, 1, 1])\n >>> fractional_knapsack(value, weight, 25)\n (25, [1, 1, 1, 1, 1])\n >>> fractional_knapsack(value, weight, 26)\n (25, [1, 1, 1, 1, 1])\n >>> fractional_knapsack(value, weight, -1)\n (-90.0, [0, 0, 0, 0, -10.0])\n >>> fractional_knapsack([1, 3, 5, 7], weight, 30)\n (16, [1, 1, 1, 1])\n >>> fractional_knapsack(value, [0.9, 0.7, 0.5, 0.3, 0.1], 30)\n (25, [1, 1, 1, 1, 1])\n >>> fractional_knapsack([], [], 30)\n (0, [])\n \"\"\"\n index = list(range(len(value)))\n ratio = [v / w for v, w in zip(value, weight)]\n index.sort(key=lambda i: ratio[i], reverse=True)\n\n max_value: float = 0\n fractions: list[float] = [0] * len(value)\n for i in index:\n if weight[i] <= capacity:\n fractions[i] = 1\n max_value += value[i]\n capacity -= weight[i]\n else:\n fractions[i] = capacity / weight[i]\n max_value += value[i] * capacity / weight[i]\n break\n\n return max_value, fractions\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n","repo_name":"isaccanedo/Python-fractional-knapsack-2","sub_path":"fractional_knapsack_2.py","file_name":"fractional_knapsack_2.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"38469835892","text":"from lib import Coord\n\n\nclass Node(object):\n def __init__(self, position: Coord, value):\n self.position = position\n self.value = value\n self.neighbors = []\n\n def find_neighbor(self, node):\n for neighbor in self.neighbors:\n if neighbor.node == node:\n return neighbor\n return None\n\n def remove_neighbor(self, node):\n for i in range(len(self.neighbors)):\n if self.neighbors[i].node == node:\n del self.neighbors[i]\n break\n\n def __eq__(self, other):\n return self.position == other.position\n\n def __hash__(self):\n return hash(self.position)\n\n def __repr__(self):\n return f\"Node('{self.value}', {self.position}, {self.neighbors})\"\n\n\nclass Neighbor(object):\n def __init__(self, node: Node, distance: int):\n self.distance = distance\n self.node = node\n\n def __repr__(self):\n return f\"Neighbor('{self.node.value}', {self.distance})\"\n\n\nclass Path(object):\n def __init__(self, start: Node):\n self.path = [\n start,\n ]\n self.distance = 0\n\n def append(self, neighbor: Neighbor):\n self.path.append(neighbor.node)\n self.distance += neighbor.distance\n\n @property\n def end(self):\n return self.path[-1]\n\n def __copy__(self):\n newone = Path(self.path[0])\n newone.distance = self.distance\n newone.path += self.path[1:]\n return newone\n\n def __contains__(self, other):\n return other in self.path\n\n def __len__(self):\n return self.distance\n\n def __repr__(self):\n nodes = \"\\n\\t\".join(str(v) for v in self.path)\n return f\"Path({self.distance}:\\n\\t{nodes})\\n\"\n\n\ndef build_graph(maze, wall, space, trimdeadends):\n nodes = {}\n\n # build the graph\n for position, value in maze.iter():\n if value != wall:\n if position not in nodes:\n nodes[position] = Node(position, value)\n neighbors = maze.adjacent(position, exclude=[wall,])\n for neighbor_position, neighbor_value in neighbors:\n if neighbor_position not in nodes:\n nodes[neighbor_position] = Node(neighbor_position, neighbor_value)\n nodes[position].neighbors.append(Neighbor(nodes[neighbor_position], 1))\n\n def trim_deadend(deadend: Node):\n neighbor = deadend.neighbors[0].node\n neighbor.remove_neighbor(node)\n if deadend.position in nodes:\n del nodes[deadend.position]\n if len(neighbor.neighbors) == 1:\n trim_deadend(neighbor)\n\n # trim deadends\n if trimdeadends:\n nodevalues = list(nodes.values())\n for node in nodevalues:\n if node.value == space:\n if len(node.neighbors) == 1:\n trim_deadend(node)\n\n # remove straight through paths\n nodevalues = list(nodes.values())\n for node in nodevalues:\n if node.value == space:\n if len(node.neighbors) == 2:\n left = node.neighbors[0]\n right = node.neighbors[1]\n\n otherleft = left.node.find_neighbor(node)\n otherright = right.node.find_neighbor(node)\n\n otherleft.node = right.node\n otherleft.distance += right.distance\n\n otherright.node = left.node\n otherright.distance += left.distance\n\n del nodes[node.position]\n\n return nodes\n","repo_name":"wrenoud/aoc2019","sub_path":"lib/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"10070255043","text":"\"\"\"\n tela com as informações sobre o tempo\n\n\"\"\"\n\nimport PySimpleGUI as sg\n\nfrom weathercrawler import get_weather\n\n\n_map = {'time': 'dia',\n 'temperature_2m_max': 'Temperatura (máx)',\n 'temperature_2m_min': 'Temperatura (min)',\n 'sunrise': 'Nascer do sol',\n 'sunset': 'Por do sol',\n 'precipitation_sum': 'Precipitação',\n 'rain_sum': 'Chuva',\n 'precipitation_hours': 'Previsão de chuva',\n 'winddirection_10m_dominant' : 'Direção do vento predominante',\n }\n\n\nif __name__ == '__main__':\n # localização\n local = \"Farol da Barra, Salvador/BA\"\n lat = -13.01009515567156\n lon = -38.532752829369244\n weather_info = get_weather(lat=lat, lon=lon)\n\n infos = [[sg.Text(v, key=f\"lab-{k}-\"),\n sg.Text(weather_info[\"daily\"][k][0], key=k),\n sg.Text(weather_info[\"daily_units\"][k], key=f\"met-{k}\")] for k, v in _map.items()]\n\n layout = [\n [sg.VPush()],\n [sg.Text(f\"Local: {local} - Latitude: {lat:.2f} Longitude: {lon:.2f}\")],\n infos,\n [sg.VPush()],\n ]\n\n #Create the Window\n window = sg.Window('Cobertura do Tempo', layout)\n while True:\n event, values = window.read()\n if event == sg.WIN_CLOSED or event == '-close-': # if user closes window or clicks cancel\n break\n window.close()\n","repo_name":"h3dema/gui_with_python","sub_path":"F5/tempo0.py","file_name":"tempo0.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"22455679567","text":"# Dana jest tablica liczb rzeczywistych wielkości n reprezentująca kopiec minimum.\n# Mając daną liczbę rzeczywistą x sprawdź, czy k-ty najmnijeszy element jest większy lub równy x.\n\n\ndef buildheap_min(A):\n n = len(A)\n for i in range((n - 2) // 2, -1, -1):\n heapify_min(A, n, i)\n\n\ndef heapify_min(A, n, i):\n l = 2 * i + 1\n r = 2 * i + 2\n m = i\n if l < n and A[l] < A[m]:\n m = l\n if r < n and A[r] < A[m]:\n m = r\n if m != i:\n A[i], A[m] = A[m], A[i]\n heapify_min(A, n, m)\n\n\ndef search(A, k, x):\n counter = 1\n while counter != k:\n A.pop(0)\n heapify_min(A, len(A), 0)\n counter += 1\n print(A)\n if A[0] >= x:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n A = [10, 9, 8, 4, 3, 2, 1, 6, 7, 5]\n buildheap_min(A)\n print(A)\n print(search(A, 6, 2))\n","repo_name":"S0jer/algorithms-and-data-structures-course-2021","sub_path":"BIT-ALGO/C1_C2_C3/C2_Z_1.py","file_name":"C2_Z_1.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"pl","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"70335629200","text":"import ctypes\r\nimport sys\r\n\r\nimport cv2\r\nimport numpy\r\n\r\n__all__ = ['resolution', 'cursor', 'grab']\r\n\r\n\r\nclass POINT(ctypes.Structure):\r\n _fields_ = [('x', ctypes.c_long), ('y', ctypes.c_long)]\r\n\r\n\r\ndef resolution():\r\n \"\"\"\r\n Remember the transaction.\r\n\r\n Accepts a state, action, reward, next_state, terminal transaction.\r\n\r\n # Arguments\r\n transaction (abstract): state, action, reward, next_state, terminal transaction.\r\n \"\"\"\r\n return ctypes.windll.user32.GetSystemMetrics(1), ctypes.windll.user32.GetSystemMetrics(0)\r\n\r\n\r\ndef cursor():\r\n \"\"\"\r\n Remember the transaction.\r\n\r\n Accepts a state, action, reward, next_state, terminal transaction.\r\n\r\n # Arguments\r\n transaction (abstract): state, action, reward, next_state, terminal transaction.\r\n \"\"\"\r\n _cursor = POINT()\r\n ctypes.windll.user32.GetCursorPos(ctypes.byref(_cursor))\r\n return _cursor.y, _cursor.x\r\n\r\n\r\ndef grab(region=None) -> numpy.ndarray:\r\n \"\"\"\r\n Remember the transaction.\r\n\r\n Accepts a state, action, reward, next_state, terminal transaction.\r\n\r\n # Arguments\r\n transaction (abstract): state, action, reward, next_state, terminal transaction.\r\n \"\"\"\r\n if sys.platform == 'win32':\r\n import win32api\r\n import win32con\r\n import win32gui\r\n import win32ui\r\n\r\n window_handle = win32gui.GetDesktopWindow()\r\n\r\n if region:\r\n left, top, right, bot = region\r\n width = right - left + 1\r\n height = bot - top + 1\r\n else:\r\n # width, height = resolution()\r\n # left = 0\r\n # top = 0\r\n\r\n width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)\r\n height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)\r\n left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)\r\n top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)\r\n\r\n window_handle_dc = win32gui.GetWindowDC(window_handle)\r\n source_dc = win32ui.CreateDCFromHandle(window_handle_dc)\r\n memory_dc = source_dc.CreateCompatibleDC()\r\n bit_map = win32ui.CreateBitmap()\r\n bit_map.CreateCompatibleBitmap(source_dc, width, height)\r\n memory_dc.SelectObject(bit_map)\r\n memory_dc.BitBlt((0, 0), (width, height), source_dc, (left, top), win32con.SRCCOPY)\r\n\r\n signed_ints_array = bit_map.GetBitmapBits(True)\r\n image = numpy.fromstring(signed_ints_array, dtype=numpy.uint8)\r\n image.shape = (height, width, 4)\r\n\r\n source_dc.DeleteDC()\r\n memory_dc.DeleteDC()\r\n win32gui.ReleaseDC(window_handle, window_handle_dc)\r\n win32gui.DeleteObject(bit_map.GetHandle())\r\n\r\n return cv2.cvtColor(image, cv2.COLOR_BGRA2BGR)\r\n","repo_name":"malkoch/joatmon","sub_path":"joatmon/system/hid/screen.py","file_name":"screen.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"73593756879","text":"#Check labels and raw images\nimport pandas as pd\nfrom tqdm import tqdm, trange\nimport cv2\nimport os\nfrom skimage import io\nimport shutil\n\n\nwork_diretory = \"/4t/yangchihyuan/TransmittedImages/ShuffleNet/\" \ncsv_file = os.path.join(work_diretory,\"csv_files/0503_lab.csv\")\nimage_label_check_directory = os.path.join(work_diretory, \"image_label_check\")\npositive_directory = os.path.join( image_label_check_directory, \"positive\")\nnegative_directory = os.path.join( image_label_check_directory, \"negative\")\n\npositive_files = [f for f in os.listdir(positive_directory) if os.path.isfile(os.path.join(positive_directory, f))]\nnegative_files = [f for f in os.listdir(negative_directory) if os.path.isfile(os.path.join(negative_directory, f))]\n\ndf = pd.read_csv(csv_file, dtype={\"filename\": str, 'index': int, 'x': int, 'y': int, 'width': int, 'height': int})\nnumber_of_rows = df.shape[0]\nfor i in trange(number_of_rows):\n filename = df.loc[i,'filename'] + '_' + str(df.loc[i,'index']) + '.jpg'\n if filename in positive_files:\n df.loc[i,'label'] = \"1\"\n elif filename in negative_files:\n df.loc[i,'label'] = \"0\"\n\ndf.to_csv(csv_file, index=False)\n","repo_name":"yangchihyuan/RobotVideoSummary_ServerSide","sub_path":"RetroAssignLabelsFromDirectories.py","file_name":"RetroAssignLabelsFromDirectories.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"14903310721","text":"import pygame\r\nfrom VariablesGlobales import menusActivos, menus, buttons, buttonsActivos\r\n#from JuegoTest1 import *\r\n'''\r\nclass Menus:\r\n def __init__(self, menu_width, menu_height, n_buttons, func_list, text_list):\r\n self.menu_width = menu_width\r\n self.menu_height = menu_height\r\n self.button_width = int(menu_width/n_buttons)\r\n self.button_height = int(menu_height/n_buttons)\r\n self.n_buttons = n_buttons\r\n self.func_list = func_list\r\n self.text = text_list\r\n\r\n def draw_menu(self, pos_xy, screen, mouse_xy, click):\r\n pos_x = pos_xy[0]\r\n for i in range(self.n_buttons):\r\n pos_y = pos_xy[1] + self.button_height*i\r\n button = pygame.Rect((pos_x, pos_y, self.button_width, self.button_height))\r\n pygame.draw.rect(screen, (100, 200, 0), button)\r\n pygame.draw.rect(screen, (0, 0, 0), button, 2)\r\n font = pygame.font.SysFont(None, 20)\r\n draw_text_centered(self.text[i], font, (0,0,0), screen, pos_x + self.button_width/2, pos_y + self.button_height/2)\r\n if button.collidepoint(mouse_xy) and click:\r\n eval(self.func_list[i])\r\n return False\r\n return True\r\ndef prueba_click(text):\r\n print(text)\r\n'''\r\ndef draw_text_centered(text, font, color, surface, x, y):\r\n textobj = font.render(text, 1, color)\r\n x -= textobj.get_width()/2\r\n y -= textobj.get_height()/2\r\n textrect = textobj.get_rect()\r\n textrect.topleft = (x, y)\r\n surface.blit(textobj, textrect)\r\n\r\n\r\n\r\n\r\nclass Menus:\r\n\r\n def menuClicked(xy):\r\n for menuActivo in menusActivos.values():\r\n if menuActivo.collidepoint(xy):\r\n print(menuActivo)\r\n return True\r\n return False\r\n\r\n def test(x):\r\n return x.collidepoint(xy)\r\n\r\n def menuClicked_alternative(xy):\r\n return any(map(lambda x: x.collidepoint(xy), menusActivos.values()))\r\n\r\n def menuAdd(x,y,width,height,id):\r\n global menusActivos\r\n menu = pygame.Rect((x, y, width, height))\r\n menusActivos[id] = menu\r\n print(menusActivos)\r\n \r\n def menuErase(id):\r\n del menusActivos[id]\r\n\r\n def menuDraw():\r\n for key in menusActivos.keys():\r\n #myIter = iter(menusActivos) no se como usarlo pero es significativamente mas eficiente y nos vendira bien\r\n #key = next(myIter)\r\n x = menus[key]['x']\r\n y = menus[key]['y']\r\n width = menus[key]['width']\r\n height = menus[key]['height']\r\n screen = menus[key]['screen']\r\n #pantallita.blit(key, (0,0))\r\n\r\n pygame.draw.rect(screen, (0, 0, 0), (x,y,width, height))\r\n\r\nclass Menu:\r\n\r\n def __init__(self, x, y, width, height, screen, buttons, id):\r\n global menus\r\n self.x = x\r\n self.y = y\r\n self.width = width\r\n self.height = height\r\n self.screen = screen\r\n self.buttons = buttons\r\n self.id = id\r\n menus[id] = {'x' : self.x, 'y' : self.y, 'width' : self.width, 'height' : self.height, 'screen' : self.screen, 'buttons' : self.buttons}\r\n #self.surface = pygame.Surface((x,y))\r\n\r\n\r\n def activateMenu(self): \r\n Menus.menuAdd(self.x, self.y, self.width, self.height, self.id) \r\n for i in self.buttons:\r\n key = self.buttons[i]\r\n x = buttons[key]['x']\r\n y = buttons[key]['y']\r\n width = buttons[key]['width']\r\n height = buttons[key]['height']\r\n surface = menusActivos[self.id]\r\n Buttons.buttonAdd(x,y,width,height,key,surface)\r\n\r\n\r\nclass Buttons:\r\n\r\n def buttonClicked(xy):\r\n for i in range(0,len(buttonsActivos)):\r\n key = list(buttonsActivos.keys())[i]\r\n if buttonsActivos[key].collidepoint(xy):\r\n \r\n return key\r\n return -1\r\n\r\n def buttonAdd(x,y,width,height,id):\r\n global buttonsActivos\r\n button = pygame.Rect((x, y, width, height))\r\n buttonsActivos[id] = button\r\n \r\n def buttonErase(id):\r\n del buttonsActivos[id]\r\n\r\n def buttonDraw():\r\n for i in range(0,len(buttonsActivos)):\r\n #myIter = iter(menusActivos) no se como usarlo pero es significativamente mas eficiente y nos vendira bien\r\n #key = next(myIter)\r\n key = list(menusActivos.keys())[i]\r\n x = button[key]['x']\r\n y = button[key]['y']\r\n width = button[key]['width']\r\n height = button[key]['height']\r\n screen = button[key]['screen']\r\n pygame.draw.rect(screen, (255, 0, 0), (x,y,width, height))\r\n\r\nclass Button:\r\n\r\n def __init__(self, x, y, width, height, screen, function, id):\r\n global menus\r\n self.x = x\r\n self.y = y\r\n self.width = width\r\n self.height = height\r\n self.screen = screen\r\n self.function\r\n self.id = id\r\n buttons[id] = {'x' : self.x, 'y' : self.y, 'width' : self.width, 'height' : self.height, 'screen' : self.screen, 'function' : self.function}\r\n\r\n\r\n def activateButton(self): \r\n Buttons.buttonAdd(self.x, self.y, self.width, self.height, self.id, self.function)\r\n","repo_name":"migangal/AgeOfPhysics","sub_path":"Menus.py","file_name":"Menus.py","file_ext":"py","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"13567849459","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('regions/', views.RegionList.as_view(), name='region_list'),\n path('regions/', views.RegionDetail.as_view(), name='region_detail'),\n path('countries/', views.CountryList.as_view(), name='country_list'),\n path('countries/', views.CountryDetail.as_view(), name='country_detail'),\n path('locations/', views.LocationList.as_view(), name='location_list'),\n path('locations/', views.LocationDetail.as_view(), name='location_detail'),\n path('departments/', views.DepartmentList.as_view(), name='departments_list'),\n path('departments/', views.DepartmentDetail.as_view(), name='departments_detail'),\n path('jobs/', views.JobList.as_view(), name='jobs_list'),\n path('jobs/', views.JobDetail.as_view(), name='jobs_detail'),\n path('employees/', views.EmployeeList.as_view(), name='employees_list'),\n path('employees/', views.EmployeeDetail.as_view(), name='employees_detail'),\n path('job_histories/', views.JobHistoryList.as_view(), name='job_histories_list'),\n path('job_histories/', views.JobHistoryDetail.as_view(), name='job_histories_detail'),\n]","repo_name":"leejoonli/python-postgres-db-1","sub_path":"python_postgres_exercise_1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"29143509058","text":"from flask import Flask, request, jsonify\nimport json\nfrom pymongo import MongoClient\nfrom bson import ObjectId\n\napp = Flask(__name__)\n\nclient = MongoClient('localhost', 27017)\nrestaurant_db = client['restaurant-db']\n\nclass JSONEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, ObjectId):\n return str(o)\n return json.JSONEncoder.default(self, o)\\\n\n\n# {\n# \t\"name\": \"tastygrill\",\n# \"address\": \"easton ave\"\n# }\n\n\n@app.route(\"/restaurants\", methods=[\"GET\", \"POST\"])\ndef restaurants():\n if request.method == \"GET\":\n restaurant_coll = restaurant_db['restaurants']\n return JSONEncoder().encode(list(restaurant_coll.find()))\n else:\n body = request.get_json()\n name = body['name']\n res = {\n \"name\": body['name'],\n \"address\": body['address'],\n \"menu_items\": []\n }\n restaurant_coll = restaurant_db['restaurants']\n restaurant_coll.insert_one(res)\n return \"Restaurant inserted\"\n\n\n# {\n# \"name\": \"madhu\",\n# \"price\": 9 \n# }\n\n@app.route(\"/restaurants//items\", methods=[\"GET\", \"POST\"])\ndef menu_items(res_id):\n restaurant_coll = restaurant_db['restaurants']\n if request.method==\"GET\":\n return JSONEncoder().encode(restaurant_coll.find_one({\"_id\":ObjectId(res_id)})[\"menu_items\"])\n else:\n body = request.get_json()\n item = {\n \"name\": body[\"name\"],\n \"price\": body[\"price\"]\n }\n\n restaurant_coll.update(\n {\"_id\": ObjectId(res_id)},\n {\"$push\":\n {\"menu_items\" : item}\n }\n )\n return \"Item added to menu\"\n\n@app.route(\"/restaurants//reviews\", methods=[\"GET\",\"POST\"])\ndef reviews(res_id):\n review_collection = restaurant_db[\"reviews\"]\n if request.method == \"GET\":\n output = review_collection.find({\"res_id\":ObjectId(res_id)})\n return JSONEncoder().encode(list(output))\n else:\n body = request.get_json()\n stars = int(body[\"stars\"])\n if(stars>5):\n return \"Too many stars\"\n \n\n review = {\n \"reviewer\" : body[\"reviewer\"],\n \"review_body\": body[\"review_body\"],\n \"stars\": int(body[\"stars\"]),\n \"res_id\": ObjectId(res_id)\n }\n review_collection.insert_one(review)\n return \"Successful\"","repo_name":"RutgersMobileApplicationDevelopment/Backend-Accelerator-2019","sub_path":"week6/restaurants.py","file_name":"restaurants.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"29"} +{"seq_id":"32984199520","text":"import numpy as np\nimport pickle\nfrom sklearn.base import BaseEstimator\nfrom sklearn.tree import DecisionTreeClassifier\nfrom os.path import isfile\n\n### START NEW CONTRIBUTION OF GROUP ORBITER ###\nfrom sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import GridSearchCV\nimport time\n\nfrom prepro import transforme\n\nsoumission = True\nif not soumission :\n import matplotlib.pyplot as plt \n### END NEW CONTRIBUTION OF GROUP ORBITER ###\n\ndef requires_grad(p):\n return p.requires_grad\n\n\nclass baselineModel(BaseEstimator):\n def __init__(self, max_depth=5):\n \"\"\"\n Using DecisionTreeClassifier from sklearn as Baseline Model\n Has one parameter which is the max depth of the tree (base value of 5)\n \"\"\"\n super(baselineModel, self).__init__()\n self.classifier = SVC(C=10, cache_size=200, class_weight=None, coef0=0.0, decision_function_shape='ovr', degree=3, gamma=0.001, kernel='linear', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False)\n\n #DecisionTreeClassifier(max_depth=max_depth)\n self.num_train_samples=0\n self.num_feat=1\n self.num_labels=1\n self.is_trained=False\n ### START NEW CONTRIBUTION OF GROUP ORBITER ###\n #ajout d'attributs\n self.best_prepro=False\n self.value_best_prepro=0\n ### END NEW CONTRIBUTION OF GROUP ORBITER ###\n\n def fit(self, X, y):\n ### START NEW CONTRIBUTION OF GROUP ORBITER ###\n if self.best_prepro:\n X = transforme(X, self.value_best_prepro)\n ### END NEW CONTRIBUTION OF GROUP ORBITER ###\n \n self.num_train_samples = X.shape[0]\n if X.ndim>1: self.num_feat = X.shape[1]\n print(\"FIT: dim(X)= [{:d}, {:d}]\".format(self.num_train_samples, self.num_feat))\n num_train_samples = y.shape[0]\n if y.ndim>1: self.num_labels = y.shape[1]\n print(\"FIT: dim(y)= [{:d}, {:d}]\".format(num_train_samples, self.num_labels))\n if (self.num_train_samples != num_train_samples):\n print(\"ARRGH: number of samples in X and y do not match!\")\n self.is_trained=True\n self.classifier.fit(X, y)\n\n def predict(self, X):\n ### START NEW CONTRIBUTION OF GROUP ORBITER ###\n if self.best_prepro:\n X = transforme(X, self.value_best_prepro)\n ### END NEW CONTRIBUTION OF GROUP ORBITER ###\n \n num_test_samples = X.shape[0]\n if X.ndim>1:\n num_feat = X.shape[1]\n print(\"PREDICT: dim(X)= [{:d}, {:d}]\".format(num_test_samples, num_feat))\n if (self.num_feat != num_feat):\n print(\"ARRGH: number of features in X does not match training data!\")\n print(\"PREDICT: dim(y)= [{:d}, {:d}]\".format(num_test_samples, self.num_labels))\n y = np.zeros([num_test_samples, self.num_labels])\n return self.classifier.predict(X)\n\n def save(self, path=\"./\"):\n #pickle.dump(self, open(path + '_model.pickle', \"wb\"))\n pass\n\n def load(self, path=\"./\"):\n #modelfile = path + '_model.pickle'\n #if isfile(modelfile):\n # with open(modelfile, 'rb') as f:\n # self = pickle.load(f)\n # print(\"Model reloaded from: \" + modelfile)\n return self\n\n\n### START NEW CONTRIBUTION OF GROUP ORBITER ### \ndef tests_auto(X_train, Y_train):\n\n Y_train = Y_train.ravel()\n #definition des paramètres à tester pour le classifier SVC:\n tuned_parameters = {'C':[1, 10, 100]} \n\n print(\"1° cas : recherche des meilleurs paramètres avec les images de base (SVC):\")\n grid = GridSearchCV(SVC(gamma = 'auto', kernel = 'linear'), tuned_parameters, cv=5, scoring='accuracy')\n\n debut = time.time()\n grid.fit(X_train, Y_train)\n fin = time.time() - debut\n \n max_cv_non_prepro_svc = grid.best_score_\n \n print(\"Correspondances graphique :\\n\")\n means = [result for result in grid.cv_results_['params']]\n for i in range(0, len(means)):\n print(\"\\t{} --> {}\".format(i, means[i]))\n \n #Affichage du meilleur score avec les meilleurs paramètres (temps)\n print(\"Les meilleurs paramètres sont : {}, qui donnent un score de : {} (en {} secondes)\".format(grid.best_params_, round(grid.best_score_, 3), round(fin)))\n if not soumission : \n #Graphe scores de cv avec le prepro de base et détermination meilleurs paramètres pour l'apprentissage\n stds_svc = grid.cv_results_['std_test_score']\n grid_mean_scores = [result for result in grid.cv_results_['mean_test_score']]\n plt.figure(figsize=(10, 10))\n ax = plt.subplot(121)\n plt.plot(range(0, len(grid_mean_scores)), grid_mean_scores)\n plt.xlabel('N° de test des paramètres')\n plt.ylabel('Cross-Validation Accuracy')\n plt.errorbar(range(0, len(grid_mean_scores)), grid_mean_scores, yerr=stds_svc, fmt='.k');\n\n plt.title('Cross Validation sans notre preprocessing (SVC)')\n plt.show\n \n #definition des paramètres à tester pour le classifier MLPClassifier:\n tuned_parameters_mlp = parameters = {'solver': ['lbfgs'], 'max_iter': [1000,1500,2000,2500,3000]}\n print(\"2° cas : recherche des meilleurs paramètres avec les images de base (MLPClassifier):\")\n grid_mlp = GridSearchCV(MLPClassifier(), tuned_parameters_mlp, cv=5, scoring='accuracy')\n\n debut = time.time()\n grid_mlp.fit(X_train, Y_train)\n fin = time.time() - debut\n \n max_cv_non_prepro_mlp = grid_mlp.best_score_\n \n #Affichage du meilleur score avec les meilleurs paramètres (temps)\n print(\"Les meilleurs paramètres sont : {}, qui donnent un score de : {} (en {} secondes)\".format(grid_mlp.best_params_, round(grid_mlp.best_score_, 3), round(fin)))\n \n print(\"Correspondances graphique :\\n\")\n means = [result for result in grid_mlp.cv_results_['params']]\n for i in range(0, len(means)):\n print(\"\\t{} --> {}\".format(i, means[i]))\n \n if not soumission : \n #Graphe scores de cv avec le prepro de base et détermination meilleurs paramètres pour l'apprentissage\n stds_mlp = grid_mlp.cv_results_['std_test_score']\n grid_mean_scores = [result for result in grid_mlp.cv_results_['mean_test_score']]\n plt.figure(figsize=(10, 10))\n ax = plt.subplot(122)\n plt.plot(range(0, len(grid_mean_scores)), grid_mean_scores)\n plt.xlabel('N° de test des paramètres')\n plt.ylabel('Cross-Validation Accuracy')\n plt.errorbar(range(0, len(grid_mean_scores)), grid_mean_scores, yerr=stds_mlp, fmt='.k');\n plt.title('Cross Validation sans notre preprocessing (MLPClassifier)')\n plt.show\n \n\n if max_cv_non_prepro_mlp > max_cv_non_prepro_svc:\n print(\"On sélectionne MLPClassifer !\")\n best_classifier = grid_mlp.best_estimator_\n parameters = tuned_parameters_mlp\n max_cv_non_prepro = max_cv_non_prepro_mlp\n else :\n print(\"On sélectionne SVC !\")\n best_classifier = grid.best_estimator_\n parameters = tuned_parameters\n max_cv_non_prepro = max_cv_non_prepro_svc\n \n\n print(\"\\n3° cas : recherche des améliorations possibles de notre preprocessing (PCA):\")\n result = []\n\n for i in range(128, 1024, 128):\n X = transforme(X_train, i)\n g = GridSearchCV(best_classifier, parameters, cv=5, scoring='accuracy')\n debut = time.time()\n g.fit(X, Y_train)\n fin = time.time() - debut\n res = [i, fin, g.best_score_, g.best_params_, g.best_estimator_]\n print(\"Pour n_components = {}, on obtient un score de {}.\".format(i, round(res[2], 3)))\n result.append(res)\n\n #Affichage des différentes valeurs pour connaitre la meilleur pour le prepro\n valeurs_prepro_GSCV = []\n for i in range(0, len(result)):\n valeurs_prepro_GSCV.append(result[i][2])\n \n if not soumission :\n plt.subplot(221)\n plt.plot(range(128, 1024, 128), valeurs_prepro_GSCV)\n plt.xlabel('Valeur de n_components')\n plt.ylabel('Cross-Validation Accuracy')\n plt.title('Cross Validation avec notre preprocessing')\n plt.show\n \n #déterminer la cv max dans result\n max_cv_prepro = result[0]\n for i in range(0, len(result)):\n if result[i][2] > max_cv_prepro[2]:\n max_cv_prepro = result[i]\n \n #Petite phrase qui affiche les meilleurs paramètres\n print(\"Le meilleur paramètre pour n_components : {}, qui donnent un score de : {}\".format(max_cv_prepro[0], round(max_cv_prepro[2],3)))\n \n #Modifications a apportées en conséquences\n retour = baselineModel()\n retour.classifier = best_classifier\n if max_cv_prepro[2] > max_cv_non_prepro:\n #On chooisit de faire notre prepro puis l'apprentissage avec les paramètres qui ont le mieux réussit\n print(\"\\nLe model choisit est celui avec notre preprocessing !\")\n #retour.classifier = max_cv_prepro[4]\n retour.best_prepro = True\n retour.value_best_prepro = max_cv_prepro[0]\n else:\n print(\"\\nLe model choisit est celui sans notre preprocessing !\")\n #retour.classifier = best_classifier\n \n return retour\n \n \n","repo_name":"grpOrbiter/prepro_orbiter","sub_path":"starting_kit/sample_code_submission/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"41654268577","text":"from debsources.filetype import get_highlightjs_language\n\n\nclass SourceCodeIterator(object):\n def __init__(self, filepath, hl=None, msg=None, encoding=\"utf8\", lang=None):\n \"\"\"\n creates a new SourceCodeIterator object\n\n Arguments:\n filename: the source code file\n\n Keyword arguments:\n hlbegin: first line whixh will be highlighted\n hlend: last line which will be highlighted\n encoding: the file character encoding\n classes_exts: a tuples list, containing classes to associate with\n file extensions, eg:\n [(\"cpp\", ['cpp','hpp']), (...), ...]\n \"\"\"\n self.filepath = filepath\n self.filename = self.filepath.name\n self.file = open(filepath, encoding=encoding, errors=\"ignore\")\n # we store the firstline (used to determine file language)\n try:\n self.firstline = next(self.file)\n except Exception: # empty file\n self.firstline = \"\"\n\n self.file.seek(0)\n # TODO: proper generator (but 'with' is not available in jinja2)\n\n self.encoding = encoding\n self.lang = lang\n self.current_line = 0\n self.number_of_lines = None\n self.msgs = msg\n self.hls = set()\n if hl is not None:\n hlranges = hl.split(\",\")\n for r in hlranges:\n if \":\" in r: # it's a range\n try:\n rbegin, rend = r.split(\":\")\n for i in range(int(rbegin), int(rend) + 1):\n self.hls.add(i)\n except (ValueError, TypeError):\n pass\n else: # it's a single line\n try:\n self.hls.add(int(r))\n except Exception:\n pass\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self.current_line += 1\n if self.current_line in self.hls:\n class_ = True\n else:\n class_ = False\n try:\n line = next(self.file)\n except StopIteration:\n # end of file, we close it\n self.file.close()\n raise StopIteration\n return (line, class_)\n\n def get_number_of_lines(self):\n if self.number_of_lines is not None:\n return self.number_of_lines\n self.number_of_lines = 0\n with open(self.filepath, errors=\"ignore\") as sfile:\n for line in sfile:\n self.number_of_lines += 1\n return self.number_of_lines\n\n def get_file_language(self):\n \"\"\"\n Returns a class name, usable by highlight.hs, to help it to guess\n the source language.\n \"\"\"\n return get_highlightjs_language(self.filename, self.firstline, self.lang)\n\n def get_msgdict(self):\n \"\"\"\n returns a dict(position=, title=, message=) generated from\n the string message (position:title:message)\n \"\"\"\n if self.msgs is None:\n return dict()\n msg_list = []\n for msg in self.msgs:\n msgsplit = msg.split(\":\")\n msgdict = dict()\n try:\n msgdict[\"position\"] = int(msgsplit[0])\n except ValueError:\n msgdict[\"position\"] = 1\n try:\n msgdict[\"title\"] = msgsplit[1]\n except IndexError:\n msgdict[\"title\"] = \"\"\n try:\n msgdict[\"message\"] = \":\".join(msgsplit[2:])\n except IndexError:\n msgdict[\"message\"] = \"\"\n msg_list.append(msgdict)\n return msg_list\n","repo_name":"Debian/debsources","sub_path":"lib/debsources/app/sourcecode.py","file_name":"sourcecode.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"29"} +{"seq_id":"7724615318","text":"from nltk.corpus import PlaintextCorpusReader\r\nimport nltk\r\nnltk.download('stopwords')\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import word_tokenize\r\nsr = stopwords.words('english')\r\ncorpus_root = 'E:/Code/machine-learning/machine-learning-python/natural_language_processing/inaugurals'\r\nwordlist = PlaintextCorpusReader(corpus_root, \".*\")\r\nprint(wordlist.fileids())\r\n\r\ntrump_words = wordlist.words('trump2017.txt')\r\nobama_words = wordlist.words('obama2009.txt')\r\n# remove punctuation\r\ntrump_words = [word for word in trump_words if len(word) > 1]\r\nobama_words = [word for word in obama_words if len(word) > 1]\r\n# remove applause\r\nobama_words = [word for word in obama_words if word != 'Applause'] \r\nobama_words = [word for word in obama_words if word != '.)'] \r\nfreq_trump = nltk.FreqDist(w for w in trump_words if not w in sr)\r\nfreq_obama = nltk.FreqDist(w for w in obama_words if not w in sr)\r\n \r\n# for key,val in freq_obama:\r\n# print(str(key) + ':' + str(val))\r\n\r\n# freq_trump.plot(20, cumulative=False)\r\nfreq_obama.plot(20, cumulative=False)","repo_name":"bry-an/ml-python","sub_path":"natural_language_processing/inaugural_addresses.py","file_name":"inaugural_addresses.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"75001857038","text":"class Solution(object):\n def buddyStrings(self, A, B):\n if len(A) != len(B): return False\n if A == B and len(set(A)) <= len(A)-1 : return True\n \n \n pairs = []\n for a, b in zip(A, B):\n if a != b:\n pairs.append((a, b))\n if len(pairs) >= 3: return False\n return len(pairs) == 2 and pairs[0] == pairs[1][::-1]\n","repo_name":"tayciryahmed/data-structures-and-algorithms","sub_path":"buddy-strings.py","file_name":"buddy-strings.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"29"} +{"seq_id":"15949115638","text":"from dataclasses import dataclass\nfrom typing import Dict, List\n\nimport numpy as np\nfrom transformers import PreTrainedTokenizerBase\n\nfrom .data_preprocessing import (\n noise_span_to_unique_sentinel,\n nonnoise_span_to_unique_sentinel,\n)\n\n\ndef shift_tokens_right(\n input_ids: np.ndarray,\n pad_token_id: int,\n decoder_start_token_id: int,\n) -> np.ndarray:\n \"\"\"\n Shift input ids one token to the right.\n \"\"\"\n shifted_input_ids = np.zeros_like(input_ids)\n shifted_input_ids[..., 1:] = input_ids[..., :-1]\n shifted_input_ids[..., 0] = decoder_start_token_id\n\n # replace possible -100 values in labels by `pad_token_id`\n shifted_input_ids = np.where(\n shifted_input_ids == -100,\n pad_token_id,\n shifted_input_ids,\n )\n\n return shifted_input_ids\n\n\n@dataclass\nclass DataCollatorForT5MLM:\n \"\"\"\n Data collator used for T5 span-masked language modeling.\n It is made sure that after masking the inputs are of length `data_args.max_seq_length` and targets are also of fixed length.\n For more information on how T5 span-masked language modeling works, one can take a look\n at the `official paper `__\n or the `official code for preprocessing `__ .\n Args:\n tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):\n The tokenizer used for encoding the data.\n noise_density (:obj:`float`):\n The probability with which to (randomly) mask tokens in the input.\n mean_noise_span_length (:obj:`float`):\n The average span length of the masked tokens.\n input_length (:obj:`int`):\n The expected input length after masking.\n target_length (:obj:`int`):\n The expected target length after masking.\n eos_token_id: (:obj:`int`):\n The eos token id of the model.\n pad_token_id: (:obj:`int`):\n The pad token id of the model.\n decoder_start_token_id: (:obj:`int):\n The decoder start token id of the model.\n sentinel_token_id: (:obj:`int):\n The first sentinel token id of the model.\n sentinel_tokens_reversed: (:obj:`bool):\n Whether sentinel tokens arranged in reverse order.\n \"\"\"\n\n tokenizer: PreTrainedTokenizerBase\n noise_density: float\n mean_noise_span_length: float\n input_length: int\n target_length: int\n eos_token_id: int\n pad_token_id: int\n decoder_start_token_id: int\n sentinel_token_id: int\n sentinel_tokens_reversed: bool = False\n\n def __call__(self, examples: List[Dict[str, np.ndarray]]) -> Dict[str, np.ndarray]:\n # add span corruption noise for each example\n examples = [self.denoise(example) for example in examples]\n\n # collate input examples\n batch = {\n column: np.array([examples[idx][column] for idx in range(len(examples))])\n for column in examples[0].keys()\n }\n\n if batch[\"input_ids\"].shape[-1] != self.input_length:\n raise ValueError(\n f\"`input_ids` are incorrectly preprocessed. `input_ids` length is {batch['input_ids'].shape[-1]}, but should be {self.input_length}.\"\n )\n\n if batch[\"labels\"].shape[-1] != self.target_length:\n raise ValueError(\n f\"`labels` are incorrectly preprocessed. `labels` length is {batch['labels'].shape[-1]}, but should be {self.target_length}.\"\n )\n\n return batch\n\n def denoise(self, features: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:\n tokens = features[\"input_ids\"]\n noise_mask = self.random_spans_noise_mask(np.size(tokens))\n\n input_ids = self.append_eos_token(\n noise_span_to_unique_sentinel(\n tokens,\n noise_mask,\n self.sentinel_token_id,\n self.sentinel_tokens_reversed,\n )\n )\n labels = self.append_eos_token(\n nonnoise_span_to_unique_sentinel(\n tokens,\n noise_mask,\n self.sentinel_token_id,\n self.sentinel_tokens_reversed,\n )\n )\n decoder_input_ids = shift_tokens_right(\n labels,\n self.pad_token_id,\n self.decoder_start_token_id,\n )\n\n return {\n \"input_ids\": input_ids,\n \"labels\": labels,\n \"decoder_input_ids\": decoder_input_ids,\n }\n\n def append_eos_token(self, tokens: np.ndarray) -> np.ndarray:\n return np.concatenate((tokens, [self.eos_token_id]), axis=0)\n\n def random_spans_noise_mask(self, length: int) -> np.ndarray:\n \"\"\"This function is copy of `random_spans_helper `__ .\n Noise mask consisting of random spans of noise tokens.\n The number of noise tokens and the number of noise spans and non-noise spans\n are determined deterministically as follows:\n num_noise_tokens = round(length * noise_density)\n num_nonnoise_spans = num_noise_spans = round(num_noise_tokens / mean_noise_span_length)\n Spans alternate between non-noise and noise, beginning with non-noise.\n Subject to the above restrictions, all masks are equally likely.\n Args:\n length: an int32 scalar (length of the incoming token sequence)\n noise_density: a float - approximate density of output mask\n mean_noise_span_length: a number\n Returns:\n a boolean tensor with shape [length]\n \"\"\"\n\n orig_length = length\n\n num_noise_tokens = int(np.round(length * self.noise_density))\n # avoid degeneracy by ensuring positive numbers of noise and nonnoise tokens.\n num_noise_tokens = min(max(num_noise_tokens, 1), length - 1)\n num_noise_spans = int(np.round(num_noise_tokens / self.mean_noise_span_length))\n\n # avoid degeneracy by ensuring positive number of noise spans\n num_noise_spans = max(num_noise_spans, 1)\n num_nonnoise_tokens = length - num_noise_tokens\n\n # pick the lengths of the noise spans and the non-noise spans\n def _random_segmentation(num_items: int, num_segments: int) -> np.ndarray:\n \"\"\"Partition a sequence of items randomly into non-empty segments.\n Args:\n num_items: an integer scalar > 0\n num_segments: an integer scalar in [1, num_items]\n Returns:\n a Tensor with shape [num_segments] containing positive integers that add\n up to num_items\n \"\"\"\n mask_indices = np.arange(num_items - 1) < (num_segments - 1)\n np.random.shuffle(mask_indices)\n first_in_segment = np.pad(mask_indices, [[1, 0]])\n segment_id = np.cumsum(first_in_segment)\n _, segment_length = np.unique(segment_id, return_counts=True)\n return segment_length\n\n noise_span_lengths = _random_segmentation(num_noise_tokens, num_noise_spans)\n nonnoise_span_lengths = _random_segmentation(\n num_nonnoise_tokens, num_noise_spans\n )\n\n interleaved_span_lengths = np.reshape(\n np.stack([nonnoise_span_lengths, noise_span_lengths], axis=1),\n [num_noise_spans * 2],\n )\n span_starts = np.cumsum(interleaved_span_lengths)[:-1]\n span_start_indicator = np.zeros((length,), dtype=np.int8)\n span_start_indicator[span_starts] = True\n span_num = np.cumsum(span_start_indicator)\n is_noise = np.equal(span_num % 2, 1)\n\n return is_noise[:orig_length]\n","repo_name":"formermagic/git-t5","sub_path":"git_t5/data/data_collator.py","file_name":"data_collator.py","file_ext":"py","file_size_in_byte":7878,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"5869252547","text":"import ImageFilters\nimport cv2 as cv\nimport streamlit as st\nimport altair as alt\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom PIL import Image\nimport numpy as np\n\n\nclass interface(ImageFilters.filters):\n functions = [\n \"Introduction\",\n \"Blur\",\n \"Gaussian Blur\",\n \"Gray\",\n \"HDR\",\n \"Emboss\",\n \"Invert\",\n \"Histogram_equalize\",\n \"Adjust_brightness\",\n \"Color_filters\",\n \"Sketch\",\n \"Cartoon\",\n ]\n\n def __init__(self, mode=\"image\"):\n\n super().__init__()\n\n st.title(\"Image Filters using OpenCV\")\n\n self.mode = mode\n self.__default_image = \"./assets/default.jpg\"\n self.__kid_image = \"./assets/kid.png\"\n\n self.upload_handler()\n self.selector()\n\n def upload_handler(self):\n if self.mode == \"image\":\n uploaded = st.sidebar.file_uploader(\n \"Upload a image\",\n type=[\"png\", \"jpg\", \"jpeg\"],\n help=\"Default image is chosen if not uploaded\",\n )\n if uploaded is not None:\n self.img = cv.cvtColor(np.array(Image.open(uploaded)), cv.COLOR_RGB2BGR)\n\n else:\n self.img = cv.imread(self.__default_image)\n\n elif self.mode == \"video\":\n pass\n\n def selector(self):\n func = st.sidebar.selectbox(\"Select functions\", options=self.functions, index=0)\n\n if func == \"Introduction\":\n self.introduction()\n\n elif func == \"Blur\":\n st.subheader(\"Blur Filter\")\n self.blur()\n\n elif func == \"Gaussian Blur\":\n st.subheader(\"Gaussian Filter\")\n self.gblur()\n\n elif func == \"Gray\":\n st.subheader(\"Gray Filter\")\n self.gray()\n\n elif func == \"Emboss\":\n st.subheader(\"Emboss Filter\")\n self.emboss()\n\n elif func == \"Invert\":\n st.subheader(\"Invert Filter\")\n self.invert()\n\n elif func == \"Histogram_equalize\":\n st.subheader(\"Histogram Equalize\")\n self.hist_equalize()\n\n elif func == \"Adjust_brightness\":\n st.subheader(\"Adjust brightness/Gamma\")\n self.adjust_brightness()\n\n elif func == \"Color_filters\":\n st.subheader(\"Color Filters\")\n self.color_filters()\n\n elif func == \"Sketch\":\n st.subheader(\"Sketch Filter\")\n self.sketch()\n\n elif func == \"Cartoon\":\n st.subheader(\"Cartoon Filter\")\n self.cartoon()\n\n elif func == \"HDR\":\n st.subheader(\"HDR Filter\")\n self.hdr()\n\n def introduction(self):\n st.markdown(\n \"\"\"\n This is a webapp which allows you to apply multiple filters to your images.\n\n If no image is uploaded, it uses a default image, you can upload your own too.\n\n ---\n\n # About me\n I am currently engineering student, aspiring Data Scientist.\n Feel free to suggest changes.\n\n I am also open for **collaborations** and **interships**.\n\n Here are my profiles\n\n [LinkedIn](www.linkedin.com/in/aditya-ms)\n\n [Kaggle](https://www.kaggle.com/imams2000)\n\n [GitHub](https://github.com/im-AMS)\n\n [Resume](https://drive.google.com/drive/folders/1MzX_eL5B40HFEXgG1c1e7b1ukBbRa1Ha?usp=sharing)\n\n\n \"\"\",\n unsafe_allow_html=True,\n )\n\n def show(self, file):\n if self.mode == \"image\":\n st.image(\n image=file,\n channels=\"BGR\",\n )\n\n elif self.mode == \"video\":\n pass\n\n def blur(self, img=None):\n\n if img is None:\n img = self.img\n\n enable = st.sidebar.checkbox(\"Enable effect\", value=\"True\")\n st.sidebar.title(\"Params for **Regular Blur**\")\n a = st.sidebar.slider(\n \"kernel size\", min_value=1, max_value=50, step=1, value=20\n )\n\n if enable:\n self.show(file=super().blur(img=img, ksize=(a, a)))\n\n else:\n self.show(img)\n\n def gblur(self, img=None):\n if img is None:\n img = self.img\n\n st.sidebar.title(\"Params for **Gaussian Blur**\")\n enable = st.sidebar.checkbox(\"Enable effect\", value=\"True\")\n a = st.sidebar.slider(\n \"kernel size\", min_value=3, max_value=49, step=2, value=11\n )\n blur = st.sidebar.slider(\n \"Blur strength\", min_value=1.0, max_value=20.0, step=0.1, value=11.0\n )\n\n if enable:\n self.show(file=super().gblur(img=img, ksize=(a, a), blur=blur))\n else:\n self.show(file=img)\n\n def sketch(self, img=None):\n\n if img is None:\n img = self.img\n\n st.sidebar.title(\"Params for **Sketch**\")\n\n auto = st.sidebar.checkbox(\"Auto Mode\", value=\"True\")\n enable = st.sidebar.checkbox(\"Enable effect\", value=\"True\")\n invert = st.sidebar.checkbox(\"Invert\", value=\"True\")\n detail = st.sidebar.slider(\n \"detail\", min_value=0.005, max_value=1.0, step=0.005, value=0.13\n )\n\n if auto:\n file = super().sketch(\n img=img, detail=detail, invert=invert, three_channel=True, auto=True\n )\n\n else:\n thresh1, thresh2 = st.sidebar.slider(\n \"threshold value\", value=[42, 200], min_value=0, max_value=255, step=1\n )\n size = st.sidebar.slider(\"Size\", min_value=3, max_value=7, step=2, value=3)\n\n file = super().sketch(\n img=img,\n thresh1=thresh1,\n thresh2=thresh2,\n size=size,\n detail=detail,\n invert=invert,\n three_channel=True,\n auto=False,\n )\n\n if enable:\n self.show(file=file)\n\n else:\n self.show(file=img)\n\n def gray(self, img=None):\n if img is None:\n img = self.img\n\n st.sidebar.title(\"Params for **Gray**\")\n enable = st.sidebar.checkbox(\"Enable effect\", value=\"True\")\n st.sidebar.markdown(\"**None**\")\n\n if enable:\n self.show(file=super().gray(img=img, three_channel=True))\n else:\n self.show(file=img)\n\n def hdr(self, img=None):\n if img is None:\n img = self.img\n\n st.sidebar.title(\"Params for **HDR**\")\n enable = st.sidebar.checkbox(\"Enable effect\", value=\"True\")\n param1 = st.sidebar.slider(\n \"param1\", min_value=0.0, max_value=200.0, step=0.5, value=9.\n )\n param2 = st.sidebar.slider(\n \"param2\", min_value=0.0, max_value=1.0, step=0.05, value=0.1\n )\n\n if enable:\n self.show(file=super().hdr(img=img, sigma_s=param1, sigma_r=param2))\n\n else:\n self.show(file=self.img)\n\n def emboss(self, img=None):\n if img is None:\n img = self.img\n\n st.sidebar.title(\"Params for **Emboss**\")\n enable = st.sidebar.checkbox(\"Enable effect\", value=\"True\")\n a = st.sidebar.slider(\"kernel size\", min_value=3, max_value=29, step=2)\n\n if enable:\n self.show(\n file=super().emboss(\n img=img,\n ksize=a,\n )\n )\n\n else:\n self.show(file=img)\n\n def invert(self, img=None):\n\n if img is None:\n img = self.img\n\n st.sidebar.title(\"Params for **Invert**\")\n st.sidebar.markdown(\"**None**\")\n\n self.show(file=super().invert(img=img))\n\n def hist_equalize(self, img=None):\n\n if img is None:\n img = self.img\n\n st.sidebar.title(\"Params for **Histogram Equalize**\")\n\n enable = st.sidebar.checkbox(\"Enable effect\", value=\"True\")\n enable_hist = st.sidebar.checkbox(\"Show Histogram\", value=\"False\")\n a = st.sidebar.slider(\n \"kernel size\", min_value=3, max_value=99, step=2, value=50\n )\n clip = st.sidebar.slider(\n \"Clip limit\", min_value=0.0, max_value=200.0, step=0.1, value=3.0\n )\n\n backend = st.sidebar.selectbox(\n \"Select Plotting backend\", [\"Matplotlib\", \"Altair\"]\n )\n\n if enable:\n file = super().hist_equalize(img=img, ksize=(a, a), clip_limit=clip)\n\n else:\n file = self.img\n\n self.show(file=file)\n if enable_hist:\n self.plot_histogram(img=file, backend=backend)\n\n def adjust_brightness(self, img=None):\n\n if img is None:\n img = self.img\n\n st.sidebar.title(\"Params for **Adjust Brightness**\")\n enable = st.sidebar.checkbox(\"Enable effect\", value=\"True\")\n gamma = st.sidebar.slider(\n \"Gamma\", min_value=0.01, max_value=10.0, step=0.1, value=0.8\n )\n if enable:\n self.show(file=super().adjust_brightness(img=img, param=gamma))\n\n else:\n self.show(file=img)\n\n def color_filters(self, img=None):\n\n if img is None:\n img = self.img\n\n cmaps = {\n \"AUTMN\": self.CMAP_AUTMN,\n \"BONE\": self.CMAP_BONE,\n \"JET\": self.CMAP_JET,\n \"WINTER\": self.CMAP_WINTER,\n \"RAINBOW\": self.CMAP_RAINBOW,\n \"OCEAN\": self.CMAP_OCEAN,\n \"SUMMER\": self.CMAP_SUMMER,\n \"SPRING\": self.CMAP_SPRING,\n \"COOL\": self.CMAP_COOL,\n \"HSV\": self.CMAP_HSV,\n \"PINK\": self.CMAP_PINK,\n \"HOT\": self.CMAP_HOT,\n }\n\n st.sidebar.title(\"Params for **Color Filters**\")\n enable = st.sidebar.checkbox(\"Enable effect\", value=\"True\")\n color = st.sidebar.selectbox(\n \"Color Maps\",\n cmaps,\n )\n\n if enable:\n self.show(file=super().color_filters(img=img, color_map=cmaps[color]))\n\n else:\n self.show(file=img)\n\n def cartoon(self, img=None):\n if img is None:\n img = cv.imread(self.__kid_image)\n\n st.sidebar.title(\"Params for **Cartoon Filter**\")\n enable = st.sidebar.checkbox(\"Enable effect\", value=\"True\")\n enable_uploaded = st.sidebar.checkbox(\"Use uploaded Image\", value=\"false\")\n sigma_color = st.sidebar.slider(\n \"Sigma Color\", min_value=0.01, max_value=90.0, step=0.1, value=18.0\n )\n sigma_space = st.sidebar.slider(\n \"Sigma Space\", min_value=0.01, max_value=90.0, step=0.1, value=10.0\n )\n a = st.sidebar.slider(\"kernel size\", min_value=2, max_value=20, step=1, value=8)\n iterations = st.sidebar.slider(\n \"Number of Passes\", min_value=1, max_value=10, step=1, value=8\n )\n\n if enable:\n if enable_uploaded:\n self.show(\n file=super().cartoon(\n img=self.img,\n sigma_color=sigma_color,\n sigma_space=sigma_space,\n ksize=a,\n iterations=iterations,\n )\n )\n else:\n self.show(\n file=super().cartoon(\n img=img,\n sigma_color=sigma_color,\n sigma_space=sigma_space,\n ksize=a,\n iterations=iterations,\n )\n )\n\n else:\n self.show(file=img)\n\n def plot_histogram(self, img=None, backend=\"Matplotlib\"):\n \"\"\"backend = 'Altair' or 'MPL'\"\"\"\n\n if img is None:\n img = self.img\n\n b, g, r = cv.split(img)\n\n if backend == \"Altair\":\n source = pd.DataFrame(\n {\n \"R\": r.ravel(),\n \"G\": g.ravel(),\n \"B\": b.ravel(),\n }\n )\n\n # TODO: assign right color to channels\n c = (\n alt.Chart(source)\n .transform_fold([\"R\", \"G\", \"B\"], as_=[\"Channel\", \"Range\"])\n .mark_area(opacity=0.7)\n .encode(\n alt.X(\"Range:Q\", bin=alt.Bin(maxbins=256), axis=None),\n alt.Y(\n \"count()\", stack=None, axis=None, scale=alt.Scale(type=\"log\")\n ),\n alt.Color(\"Channel:N\"),\n )\n )\n\n st.altair_chart(c, use_container_width=True)\n\n elif backend == \"Matplotlib\":\n fig, ax = plt.subplots()\n ax.set_axis_off()\n ax.set_yscale(\"log\")\n st.markdown(\"**Y axis is semilog**\")\n ax.hist(r.ravel(), 256, [0, 256], \"r\", alpha=0.6)\n ax.hist(g.ravel(), 256, [0, 256], \"g\", alpha=0.6)\n ax.hist(b.ravel(), 256, [0, 256], \"b\", alpha=0.6)\n st.pyplot(fig)\n\n\nif __name__ == \"__main__\":\n ui = interface()\n","repo_name":"im-AMS/Filters_Project","sub_path":"Interface.py","file_name":"Interface.py","file_ext":"py","file_size_in_byte":13012,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"28869804331","text":"import pytest\nimport util\nimport nodetool\nimport json\n\ndef test_snapshots_table(scylla_only, cql, test_keyspace):\n with util.new_test_table(cql, test_keyspace, 'pk int PRIMARY KEY, v int') as table:\n cql.execute(f\"INSERT INTO {table} (pk, v) VALUES (0, 0)\")\n nodetool.take_snapshot(cql, table, 'my_tag', False)\n res = list(cql.execute(f\"SELECT keyspace_name, table_name, snapshot_name, live, total FROM system.snapshots\"))\n assert len(res) == 1\n ks, tbl = table.split('.')\n assert res[0][0] == ks\n assert res[0][1] == tbl\n assert res[0][2] == 'my_tag'\n\ndef test_clients(scylla_only, cql):\n columns = ', '.join([\n 'address',\n 'port',\n 'client_type',\n 'connection_stage',\n 'driver_name',\n 'driver_version',\n 'hostname',\n 'protocol_version',\n 'shard_id',\n 'ssl_cipher_suite',\n 'ssl_enabled',\n 'ssl_protocol',\n 'username',\n ])\n cls = list(cql.execute(f\"SELECT {columns} FROM system.clients\"))\n for cl in cls:\n assert(cl[0] == '127.0.0.1')\n assert(cl[2] == 'cql')\n\n# We only want to check that the table exists with the listed columns, to assert\n# backwards compatibility.\ndef _check_exists(cql, table_name, columns):\n cols = \", \".join(columns)\n assert list(cql.execute(f\"SELECT {cols} FROM system.{table_name}\"))\n\ndef test_protocol_servers(scylla_only, cql):\n _check_exists(cql, \"protocol_servers\", (\"name\", \"listen_addresses\", \"protocol\", \"protocol_version\"))\n\ndef test_runtime_info(scylla_only, cql):\n _check_exists(cql, \"runtime_info\", (\"group\", \"item\", \"value\"))\n\ndef test_versions(scylla_only, cql):\n _check_exists(cql, \"versions\", (\"key\", \"build_id\", \"build_mode\", \"version\"))\n\n# Check reading the system.config table, which should list all configuration\n# parameters. As we noticed in issue #10047, each type of configuration\n# parameter can have a different function for printing it out, and some of\n# those may be wrong so we want to check as many as we can - including\n# specifically the experimental_features option which was wrong in #10047\n# and #11003.\ndef test_system_config_read(scylla_only, cql):\n # All rows should have the columns name, source, type and value:\n rows = list(cql.execute(\"SELECT name, source, type, value FROM system.config\"))\n values = dict()\n for row in rows:\n values[row.name] = row.value\n # Check that experimental_features exists and makes sense.\n # It needs to be a JSON-formatted strings, and the strings need to be\n # ASCII feature names - not binary garbage as it was in #10047,\n # and not numbers-formatted-as-string as in #11003.\n assert 'experimental_features' in values\n obj = json.loads(values['experimental_features'])\n assert isinstance(obj, list)\n assert isinstance(obj[0], str)\n assert obj[0] and obj[0].isascii() and obj[0].isprintable()\n assert not obj[0].isnumeric() # issue #11003\n # Check formatting of tri_mode_restriction like\n # restrict_replication_simplestrategy. These need to be one of\n # allowed string values 0, 1, true, false or warn - but in particular\n # non-empty and printable ASCII, not garbage.\n assert 'restrict_replication_simplestrategy' in values\n obj = json.loads(values['restrict_replication_simplestrategy'])\n assert isinstance(obj, str)\n assert obj and obj.isascii() and obj.isprintable()\n","repo_name":"scylladb/scylladb","sub_path":"test/cql-pytest/test_virtual_tables.py","file_name":"test_virtual_tables.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","stars":11533,"dataset":"github-code","pt":"29"} +{"seq_id":"21553700954","text":"#author: @katetrinkaus\n##read data from simulation output files to plot and analyse\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom statistics import mean\nimport math\n\n\n#User enters file name and varied parameter/corresponding labels for legend, axes, and plot titles\n\n\n#****************************************\n##Enter file name ###\nwith open('output_vary_wire_lengths_0.8to1.2um_0.1um_steps_d1nm_bl5um_uniforml_Rjcn100k_Rtube1_15samples_REPEAT copy.txt','r') as f: \n data = f.readlines()\n \n\n#arrays to store data points #list entry \nnetwork_dens_list=[] #0\nAF=[] #1\ntransmittancelist=[] #2\nresistancelist=[] #3\nresistance_stdev_list=[] #4\njcn_dens_list=[] #5\n\nRjcn=[] #6\nrho0=[] #7\nwire_diameter=[] #8\nwire_length=[] #9\nbox_length=[] #10\nsamples=[] #11\n\nnstep=[] #12\nn_initial=[] #13\nn_final=[] #14\ntol_minres=[] #15\ndisl=[] #16\nlower_l=[] #17\nsigmal=[] #18\njcn_removal=[] #19\n\ncalctime=[] #20\n\n#nanotube resistance to be calculated for each loop\nnanotube_res=[]\n\ni=0\nj=-1\nd_prev=100\nfor line in data:\n \n words = line.split()\n \n if i>0:\n dens=float(words[0])\n #print(dens)\n if dens Type[EmissionMap]:\n \"\"\"\n Returns\n -------\n Type[EmissionMap]\n The emission map class of the experiment configuration.\n \"\"\"\n return self.experiment_config.emission_map\n\n @property\n def experiment_name(self) -> str:\n \"\"\"\n Returns\n -------\n str\n The folder where the results are stored.\n \"\"\"\n return self.result_folder[self.result_folder.rfind(os.sep) + 1 :]\n\n @property\n def experiment_label(self) -> str:\n \"\"\"\n Returns\n -------\n str\n The label for the experiment, which identifies the agent class, agent gin config, MDP class, and MDP gin\n config.\n \"\"\"\n return (\n f\"{self.mdp_scope}{config.EXPERIMENT_SEPARATOR_PRMS}{self.mdp_class.__name__}\"\n + f\"{config.EXPERIMENT_SEPARATOR_MDP_AGENT}\"\n + f\"{self.agent_scope}{config.EXPERIMENT_SEPARATOR_PRMS}{self.agent_class.__name__}\"\n )\n\n @property\n def does_log_file_exists(self) -> bool:\n \"\"\"\n Returns\n -------\n bool\n True if the csv log file where the results of the interaction were supposed to be stored exists.\n \"\"\"\n lf = (\n ensure_folder(self.result_folder)\n + \"logs\"\n + os.sep\n + self.experiment_label\n + f\"{os.sep}seed{self.seed}_logs.csv\"\n )\n return os.path.exists(lf)\n\n def __str__(self):\n return f\"{self.experiment_name} for seed:{self.seed}, \" + self.experiment_label\n\n def __repr__(self):\n return str(self)\n","repo_name":"MichelangeloConserva/Colosseum","sub_path":"colosseum/experiment/experiment_instance.py","file_name":"experiment_instance.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"4"} +{"seq_id":"28943889743","text":"#Daily 16S and 18S\nimport matplotlib.pyplot as plt\nimport csv\nfrom colorsys import hls_to_rgb\nimport numpy\nimport random\nimport statsmodels.stats.multitest as smm\n\nfn_16S, fn_18S = '16S_daily_percent_grouped.csv', '18S_daily_percent_grouped.csv'\nsim_16S, sim_18S = '16S_daily_simper.csv', '18S_daily_simper.csv'\nord_16S, ord_18S = '16S_3_order.csv', '18S_3_order.csv'\ntax_16S, tax_18S = '16S_taxonomy.csv', '18S_taxonomy.csv'\nmeta_16S, meta_18S = '16S_daily_meta.csv', '18S_daily_meta.csv'\n\ndef simper_mothur(fn, order, meta, tax, rng):\n with open(order, 'rU') as f:\n rows = []\n for r in csv.reader(f):\n rows.append(r)\n simp_order, cont = [], []\n for row in range(len(rows)):\n if row > 0 and row < 6:\n simp_order.append(rows[row][0])\n cont.append(float(rows[row][1])*100)\n with open(tax, 'rU') as f:\n rows = []\n for row in csv.reader(f):\n rows.append(row)\n tax = []\n for a in range(len(simp_order)):\n for b in range(len(rows)):\n if simp_order[a] == rows[b][0]:\n phylo = [rows[b][2], rows[b][3], rows[b][4], rows[b][5], rows[b][6]]\n if phylo[4][-12:] != 'unclassified':\n this_tax = r'$'+str(phylo[4])+'$'\n else:\n this_tax = phylo[4][:-13]\n tax.append(this_tax) \n print_otus = []\n for c in simp_order:\n totu = 'OTU'\n d = 0\n while d < len(c):\n if d > 2 and c[d] != '0':\n totu += c[d:]\n d = len(c)\n d += 1\n totu += '\\n'\n print_otus.append(totu)\n with open(fn, 'rU') as f:\n rows = []\n for row in csv.reader(f):\n rows.append(row)\n simp_rows = []\n for e in range(len(simp_order)):\n for f in range(len(rows)):\n if rows[f][0] == simp_order[e]:\n simp_rows.append(rows[f])\n krusk, krusk_p, treat_mean, treat_sd = [], [], [], []\n for g in simp_rows:\n krusk.append(float(g[-2]))\n krusk_p.append(float(g[-1]))\n this_mean, this_sd = [], []\n for h in range(rng):\n h += 1\n if h % 2 != 0:\n this_mean.append(float(g[h])*100)\n else:\n this_sd.append(float(g[h])*100)\n treat_mean.append(this_mean)\n treat_sd.append(this_sd)\n krusk_p = smm.fdrcorrection(krusk_p)[1]\n return krusk, krusk_p, treat_mean, treat_sd, cont, print_otus, tax\n\ndef simper_plot_mothur(krusk, krusk_p, treat_mean, treat_sd, cont, print_otus, tax, axis, x, xlim, ylim, colors, xtxt, ytxt, ylab):\n for a in range(len(axis)):\n ax = axis[a]\n if a > 0:\n plt.setp(ax.get_yticklabels(), visible=False)\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n ax.bar(x, treat_mean[a], yerr=treat_sd[a], color=colors, error_kw=dict(ecolor='gray', lw=1, capsize=3, capthick=1, alpha=0.5), edgecolor='k')\n if tax[a] == 'Eukaryota':\n tax[a] = r'$(Cafeteria)$'\n title = print_otus[a]+tax[a]+'\\n'\n title += 'SIMPER: %.0f'%cont[a]+'%'\n ax.set_title(title, fontsize=8)\n h, p = 'H=%.2f'%krusk[a], '${'+r'p = '+'}$'+'%.3f'%float(krusk_p[a])\n ax.text(xtxt, ytxt, h+', '+p, va='bottom', ha='left', color='#bd0303', fontsize=8)\n ax.tick_params(axis='y',which='both',left='on',right='off')\n ax.tick_params(axis='x',which='both',top='off',bottom='on')\n plt.setp(ax, xticks=[1, 2, 3, 4], xticklabels=[1, 2, 3, 4])\n ax.set_xlim([0.5, 4.5])\n ylab += '\\n Relative abundance (%)'\n axis[0].set_ylabel(ylab, fontsize=8)\n return\n \ndef get_distinct_colors(n):\n colors = []\n for i in numpy.arange(0., 360., 360. / n):\n h = i / 360.\n l = (50 + numpy.random.rand() * 10) / 100.\n s = (90 + numpy.random.rand() * 10) / 100.\n colors.append(hls_to_rgb(h, l, s))\n random.shuffle(colors)\n return colors\n \ndef get_tax_mothur(otus, tax):\n with open(tax, 'rU') as f:\n rows = []\n for row in csv.reader(f):\n rows.append(row)\n tax = []\n for a in range(len(otus)):\n for b in range(len(rows)):\n if otus[a] == rows[b][0]:\n phylo = [rows[b][2], rows[b][3], rows[b][4], rows[b][5], rows[b][6]]\n if phylo[4][-12:] != 'unclassified' or phylo[4][-12:] == '':\n this_tax = r'$'+str(phylo[4])+'$'\n else:\n this_tax = phylo[4][:-13]\n for c in range(len(this_tax)):\n if this_tax[c] == '_':\n this_tax = this_tax[:c]+' '+this_tax[c+1:]\n tax.append(this_tax)\n for c in range(len(otus)):\n totu = 'OTU'\n d = 3\n while otus[c][d] == '0':\n d += 1\n totu += otus[c][d:]+' '\n otus[c] = totu\n for e in range(len(otus)):\n otus[e] += tax[e]\n return otus\n \ndef barplot_mothur(fn, tax, lim, ax, alpha):\n with open(fn, 'rU') as f:\n rows = []\n for row in csv.reader(f):\n rows.append(row)\n otus, samples = [], []\n for r in range(len(rows)):\n if r > 0:\n this_row = []\n for c in range(len(rows[r])):\n if c == 0:\n otus.append(rows[r][c])\n else:\n this_row.append(float(rows[r][c])*100)\n samples.append(this_row)\n other, new_samples, new_otus = [], [], []\n for a in range(len(samples[0])):\n other.append(0)\n for b in range(len(samples)):\n if max(samples[b]) > lim:\n new_samples.append(samples[b])\n new_otus.append(otus[b])\n else:\n for c in range(len(samples[b])):\n other[c] += samples[b][c]\n otus = get_tax(new_otus, tax)\n otus.append('Other')\n for c in range(len(otus)):\n if otus[c] == 'OTU2 Bacteria':\n otus[c] = 'OTU2 '+r'$(Spirochaeta)$'\n if otus[c] == 'OTU190 Bacteria':\n otus[c] = 'OTU190 '+r'$(Thermobrachium)$'\n if otus[c] == 'OTU1 Eukaryota':\n otus[c] = 'OTU1 '+r'$(Cafeteria)$'\n if otus[c] == 'OTU2 Eukaryota':\n otus[c] = 'OTU2 '+r'$(Cafeteria)$'\n new_samples.append(other)\n data = numpy.array(new_samples)\n bottom = numpy.cumsum(data, axis=0)\n x = [1, 2, 3, 4]\n colors = get_distinct_colors(len(new_samples))\n ax.bar(x, data[0], color=colors[0], label=otus[0], alpha=alpha, edgecolor='k')\n for j in range(1, data.shape[0]):\n ax.bar(x, data[j], color=colors[j], bottom=bottom[j-1], label=otus[j], alpha=alpha, edgecolor='k')\n ax.set_ylim([0, 100])\n ax.legend(bbox_to_anchor=(1.0, 1.01), fontsize=7)\n plt.setp(ax, xticks=[1, 2, 3, 4], xticklabels=[1, 2, 3, 4])\n ax.set_xlim([0.5, 4.5])\n return\n\n\nfig = plt.figure(figsize=(8.27, 12))\nh, w, ss1, ss2 = 5, 10, 3, 4\nl16S = plt.subplot2grid((h,12), (0,0), colspan=3, rowspan=3)\nl18S = plt.subplot2grid((h,12), (0,6), colspan=3, rowspan=3, sharey=l16S, sharex=l16S)\ns1_16S = plt.subplot2grid((h,w), (ss1,0), colspan=2)\ns2_16S, s3_16S, s4_16S, s5_16S = plt.subplot2grid((h,w), (ss1,2), sharey=s1_16S, colspan=2), plt.subplot2grid((h,w), (ss1,4), sharey=s1_16S, colspan=2), plt.subplot2grid((h,w), (ss1,6), sharey=s1_16S, colspan=2), plt.subplot2grid((h,w), (ss1,8), sharey=s1_16S, colspan=2)\ns1_18S = plt.subplot2grid((h,w), (ss2,0), colspan=2)\ns2_18S, s3_18S, s4_18S, s5_18S = plt.subplot2grid((h,w), (ss2,2), sharey=s1_18S, colspan=2), plt.subplot2grid((h,w), (ss2,4), sharey=s1_18S, colspan=2), plt.subplot2grid((h,w), (ss2,6), sharey=s1_18S, colspan=2), plt.subplot2grid((h,w), (ss2,8), sharey=s1_18S, colspan=2)\ns16S, s18S = [s1_16S, s2_16S, s3_16S, s4_16S, s5_16S], [s1_18S, s2_18S, s3_18S, s4_18S, s5_18S]\nremovey = [s2_16S, s3_16S, s4_16S, s5_16S, s2_18S, s3_18S, s4_18S, s5_18S]\nremovex = [s1_16S, s2_16S, s3_16S, s4_16S, s5_16S]\nfsl, fst, fsst, fspv = 7, 10, 8, 8\ncols_16S, cols_18S = ['#33FFFF', '#33CCFF', '#3399FF', '#3366FF'], ['#CCFF99', '#66FF66', '#009900', '#006400']\n\nfor a in removey:\n plt.setp(a.get_yticklabels(), visible=False)\n\nkrusk, krusk_p, treat_mean, treat_sd, cont, print_otus, tax = simper(sim_16S, ord_16S, meta_16S, tax_16S, 8)\nsimper_plot(krusk, krusk_p, treat_mean, treat_sd, cont, print_otus, tax, s16S, [1, 2, 3, 4], [0.75, 5], [0, 40], cols_16S, 0.6, 35, '')\nkrusk, krusk_p, treat_mean, treat_sd, cont, print_otus, tax = simper(sim_18S, ord_18S, meta_18S, tax_18S, 8)\nsimper_plot(krusk, krusk_p, treat_mean, treat_sd, cont, print_otus, tax, s18S, [1, 2, 3, 4], [0.75, 5], [0, 90], cols_18S, 0.6, 79, '')\n\nbarplot(fn_16S, tax_16S, 1, l16S, 0.8)\nbarplot(fn_18S, tax_18S, 1, l18S, 0.5)\n\n\nl16S.set_title('16S rRNA gene', fontsize=14)\nl18S.set_title('18S rRNA gene', fontsize=14)\nl16S.text(-0.98, 99, 'A', fontsize=16, weight='bold')\nl16S.text(-0.98, -16, 'B', fontsize=16, weight='bold')\n\n\nl16S.set_ylabel('Relative abundance (%)')\nl16S.set_xlabel('Days')\nl18S.set_xlabel('Days')\ns3_18S.set_xlabel('Days')\ns1_16S.text(-1.4, 20, '16S rRNA gene', fontsize=10, ha='center', va='center', rotation=90)\ns1_18S.text(-1.6, 50, '18S rRNA gene', fontsize=10, ha='center', va='center', rotation=90)\nplt.setp(l18S.get_yticklabels(), visible=False)\nfig.subplots_adjust(hspace=1, wspace=0.4)\nplt.savefig('16S and 18S daily.png', bbox_inches='tight', dpi=600)\n\n\n\n\n","repo_name":"R-Wright-1/ChitinActivity","sub_path":"CommunityAnalysis/mothur_DADA/daily_analysis_2.py","file_name":"daily_analysis_2.py","file_ext":"py","file_size_in_byte":9384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"74061180917","text":"from django.conf import settings\nfrom django.urls import include, path\n\nfrom rest_framework.routers import DefaultRouter, SimpleRouter\nfrom rest_framework_simplejwt.views import TokenRefreshView\n\n# from training_fyg.users.views.auth import (\n# UserAuthNonAtomicViewSet,\n# UserAuthViewSet,\n# )\n# from training_fyg.users.views.user_addresses import UserAddressesViewSet\n# from training_fyg.users.views.users import UserViewSet\n\nif settings.DEBUG:\n router = DefaultRouter()\nelse:\n router = SimpleRouter()\n\n# # Auth\n# router.register(\"auth\", UserAuthViewSet, basename=\"auth\")\n# router.register(\"auth\", UserAuthNonAtomicViewSet, basename=\"auth_not_atomic\")\n# router.register(\"users\", UserViewSet, basename=\"users\")\n# router.register(\"user-addresses\", UserAddressesViewSet, basename=\"user-addresses\")\n\napp_name = \"api\"\n\nurlpatterns = [\n path(\"\", include(\"training_fyg.categories.urls\")),\n path(\"\", include(\"training_fyg.courses.urls\")),\n path(\"\", include(\"training_fyg.users.urls\")),\n path(\n \"token/\",\n include(\n [ # noqa DJ05\n path(\"refresh/\", TokenRefreshView.as_view(), name=\"token_refresh\")\n ]\n ),\n ),\n]\n","repo_name":"TlaloCode/TechVolt","sub_path":"config/api_router.py","file_name":"api_router.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"26330573735","text":"\n\"\"\"\nThis is a python implementation of the bogo sort algorithm.\nDOCtest\n\npython3 -m doctest -v bogosort.py\n\nparam collection: some mutable ordered collection with heterogenous comparable items\ninside.\nreturn: the same collection ordered by ascending.\nExamples:\nbogosort([0, 5, 3, 2, 2])\n[0, 2, 2, 3, 5]\n\"\"\"\n\nfrom __future__ import print_function\nimport random\n\ndef bogosort(collection):\n\n\n\n def isSorted(collection):\n if len(collection) < 2:\n return True\n for i in range(len(collection)-1):\n if collection[i] > collection[i+1]:\n return False\n return True\n\n while not isSorted(collection):\n random.shuffle(collection)\n return collection\n\nif __name__ == '__main__':\n try:\n raw_input\n except NameError:\n raw_input = input\n\nuser_input = raw_input('Enter numbers seperated by a comma: \\n').strip()\nunsorted = [int(item)for item in user_input.split(',')]\nprint(bogosort(unsorted))\n","repo_name":"omhmichaels/Gucci-Lemonade","sub_path":"Algorithms.py","file_name":"Algorithms.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"42242281503","text":"G_BUSY=89\nH_BUSY=105\nH_REQ=104\n\nfrom time import sleep\n\nimport ctypes\nc = ctypes\nlib23lc1024 = c.CDLL(\"lib23lc1024.so\")\nl=lib23lc1024\nl._SPI_init()\nl._23lc1024_reset()\n\nimport gpio\ng=gpio\n\n\ng.pinMode(G_BUSY,\"in\")\ng.pinMode(H_BUSY,\"in\")\ng.pinMode(H_REQ,\"out\")\ng.digitalWrite(H_REQ,0)\n\ndef request():\n g.digitalWrite(H_REQ,1)\n while 1:\n if g.digitalRead(H_BUSY)==1 : break\n sleep(0.001)\n\n\ndef release():\n g.digitalWrite(H_REQ,0)\n\n\ndef write(addr,data,size):\n request()\n l._23lc1024_write(addr,data,size)\n release()\n\n\ndef read(addr,data,size):\n request()\n l._23lc1024_read(addr,data,size)\n release()\n\n\ndef read_str(addr,size):\n temp_str = c.create_string_buffer(size)\n temp_str_p = c.pointer(temp_str)\n read(addr, temp_str_p,size)\n return temp_str.value\n\n\ndef test_write_str(data):\n size_t = len(data)+1\n temp_int_size = c.c_uint(size_t)\n temp_size_t_p = c.pointer(temp_int_size)\n\n temp_str = c.create_string_buffer(size_t)\n temp_str.value = data\n temp_str_p = c.pointer(temp_str)\n\n request()\n l._23lc1024_write(0,temp_size_t_p ,4)\n l._23lc1024_write(4,temp_str_p,size_t)\n release()\n\n\ndef test_read_str():\n return\n\n\n\n","repo_name":"panjingwei1945/udoo_spi","sub_path":"test_23lc1024_python/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"5001192511","text":"\"\"\" Delivers and fetches data from the database\n By: Isak Sylvin, @sylvinite\"\"\"\n\n#!/usr/bin/env python\n\nimport hashlib\nimport sys\nimport warnings\n\nfrom collections import OrderedDict\nfrom datetime import datetime, timezone\nfrom sqlalchemy import *\nfrom sqlalchemy.orm import sessionmaker\nfrom dateutil.parser import parse\n\n# maintain the same connection per thread\nfrom sqlalchemy.pool import SingletonThreadPool\nfrom typing import Dict, List\n\nfrom microSALT import __version__\nfrom microSALT.store.orm_models import (\n app,\n Collections,\n Expacs,\n Projects,\n Reports,\n Resistances,\n Samples,\n Seq_types,\n Versions,\n)\nfrom microSALT.store.models import Profiles, Novel\n\n\nclass DB_Manipulator:\n def __init__(self, config, log):\n self.config = config\n self.logger = log\n self.engine = create_engine(\n app.config[\"SQLALCHEMY_DATABASE_URI\"], poolclass=SingletonThreadPool\n )\n Session = sessionmaker(bind=self.engine)\n self.session = Session()\n self.metadata = MetaData(self.engine)\n self.profiles = Profiles(self.metadata, self.config, self.logger).tables\n self.novel = Novel(self.metadata, self.config, self.logger).tables\n # Turns off pymysql deprecation warnings until they can update their code\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n self.create_tables()\n\n def create_tables(self):\n \"\"\"Creates all tables individually. A bit more control than usual\"\"\"\n if not self.engine.dialect.has_table(self.engine, \"projects\"):\n Projects.__table__.create(self.engine)\n self.logger.info(\"Created projects table\")\n if not self.engine.dialect.has_table(self.engine, \"samples\"):\n Samples.__table__.create(self.engine)\n self.logger.info(\"Created samples table\")\n if not self.engine.dialect.has_table(self.engine, \"versions\"):\n Versions.__table__.create(self.engine)\n self.logger.info(\"Created versions table\")\n if not self.engine.dialect.has_table(self.engine, \"seq_types\"):\n Seq_types.__table__.create(self.engine)\n self.logger.info(\"Created sequencing types table\")\n if not self.engine.dialect.has_table(self.engine, \"resistances\"):\n Resistances.__table__.create(self.engine)\n self.logger.info(\"Created resistance table\")\n if not self.engine.dialect.has_table(self.engine, \"reports\"):\n Reports.__table__.create(self.engine)\n self.logger.info(\"Created reports table\")\n if not self.engine.dialect.has_table(self.engine, \"collections\"):\n Collections.__table__.create(self.engine)\n self.logger.info(\"Created collections table\")\n if not self.engine.dialect.has_table(self.engine, \"expacs\"):\n Expacs.__table__.create(self.engine)\n self.logger.info(\"Created ExPEC table\")\n for k, v in self.profiles.items():\n if not self.engine.dialect.has_table(self.engine, \"profile_{}\".format(k)):\n self.profiles[k].create()\n self.init_profiletable(k, v)\n self.add_rec(\n {\"name\": \"profile_{}\".format(k), \"version\": \"0\"},\n \"Versions\",\n force=True,\n )\n self.logger.info(\"Profile table profile_{} initialized\".format(k))\n for k, v in self.novel.items():\n if not self.engine.dialect.has_table(self.engine, \"novel_{}\".format(k)):\n self.novel[k].create()\n self.add_rec(\n {\"name\": \"novel_{}\".format(k), \"version\": \"0\"},\n \"Versions\",\n force=True,\n )\n self.logger.info(\"Profile table novel_{} initialized\".format(k))\n\n def add_rec(self, data_dict: Dict[str, str], tablename: str, force=False):\n \"\"\"Adds a record to the specified table through a dict with columns as keys.\"\"\"\n pk_list = list()\n # Non-orm\n if not isinstance(tablename, str):\n # check for existence\n table = tablename\n pk_list = table.primary_key.columns.keys()\n args = list()\n for pk in pk_list:\n args.append(\"table.c.{}=={}\".format(pk, data_dict[pk]))\n args = \"or_(\" + \",\".join(args) + \")\"\n exist = self.session.query(table).filter(eval(args)).all()\n # Add record\n if len(exist) == 0:\n data = table.insert()\n #Loads any dates as datetime objects\n for k, v in data_dict.items():\n if isinstance(v, str):\n try:\n parse(v, fuzzy=False)\n data_dict[k] = datetime.strptime(v, '%Y-%m-%d %H:%M:%S')\n except ValueError as ve:\n if len(ve.args) > 0 and ve.args[0].startswith('unconverted data remains: '):\n data_dict[k] = datetime.strptime(v, '%Y-%m-%d %H:%M:%S.%f')\n else:\n pass\n data.execute(data_dict)\n self.logger.info(\"Added entry to table {}\".format(tablename.fullname))\n # ORM\n else:\n try:\n table = eval(tablename)\n # Check for existing entry\n pk_list = table.__table__.primary_key.columns.keys()\n except Exception as e:\n self.logger.error(\n \"Attempted to access table {} which has not been created\".format(\n tablename\n )\n )\n pk_values = list()\n for item in pk_list:\n pk_values.append(data_dict[item])\n existing = self.session.query(table).get(pk_values)\n # Add record\n if not existing or force:\n newobj = table()\n #Loads any dates as datetime objects\n for k, v in data_dict.items():\n if isinstance(v, str):\n try:\n parse(v, fuzzy=False)\n data_dict[k] = datetime.strptime(v, '%Y-%m-%d %H:%M:%S')\n except ValueError as ve:\n if len(ve.args) > 0 and ve.args[0].startswith('unconverted data remains: '):\n data_dict[k] = datetime.strptime(v, '%Y-%m-%d %H:%M:%S.%f')\n else:\n pass\n for k, v in data_dict.items():\n setattr(newobj, k, v)\n self.session.add(newobj)\n self.session.commit()\n else:\n self.logger.warning(\n \"Record [{}]=[{}] in table {} already exists\".format(\n \", \".join(pk_list), \", \".join(pk_values), tablename\n )\n )\n def upd_rec(\n self, req_dict: Dict[str, str], tablename: str, upd_dict: Dict[str, str]\n ):\n \"\"\"Updates a record to the specified table through a dict with columns as keys.\"\"\"\n table = eval(tablename)\n argy = list()\n for k, v in req_dict.items():\n if v != None:\n argy.append(\".filter(table.{}=='{}')\".format(k, v))\n filter = \"\".join(argy)\n megastring = \"self.session.query(table){}\".format(filter)\n if len(eval(megastring + \".all()\")) > 1:\n self.logger.error(\"More than 1 record found when orm updating. Exited.\")\n sys.exit()\n else:\n eval(megastring + \".update(upd_dict)\")\n self.session.commit()\n\n def purge_rec(self, name: str, type: str):\n \"\"\"Removes seq_data, resistances, sample(s) and possibly project\"\"\"\n entries = list()\n if type == \"Projects\":\n entries.append(\n self.session.query(Expacs)\n .filter(Expacs.CG_ID_sample.like(\"{}%\".format(name)))\n .all()\n )\n entries.append(\n self.session.query(Seq_types)\n .filter(Seq_types.CG_ID_sample.like(\"{}%\".format(name)))\n .all()\n )\n entries.append(\n self.session.query(Resistances)\n .filter(Resistances.CG_ID_sample.like(\"{}%\".format(name)))\n .all()\n )\n entries.append(\n self.session.query(Samples)\n .filter(Samples.CG_ID_sample.like(\"{}%\".format(name)))\n .all()\n )\n # entries.append(self.session.query(Projects).filter(Projects.CG_ID_project==name).all())\n elif type == \"Samples\":\n entries.append(\n self.session.query(Expacs).filter(Expacs.CG_ID_sample == name).all()\n )\n entries.append(\n self.session.query(Seq_types)\n .filter(Seq_types.CG_ID_sample == name)\n .all()\n )\n entries.append(\n self.session.query(Resistances)\n .filter(Resistances.CG_ID_sample == name)\n .all()\n )\n entries.append(\n self.session.query(Samples).filter(Samples.CG_ID_sample == name).all()\n )\n elif type == \"Collections\":\n entries.append(\n self.session.query(Collections)\n .filter(Collections.ID_collection == name)\n .all()\n )\n else:\n self.logger.error(\n \"Incorrect type {} specified for removal of {}. Check code\".format(\n type, name\n )\n )\n sys.exit()\n for entry in entries:\n for instance in entry:\n self.session.delete(instance)\n self.session.commit()\n self.logger.info(\"Removed information for {}\".format(name))\n\n def query_rec(self, tablename: str, filters: Dict[str, str]):\n \"\"\"Fetches records table, using a primary-key dict with columns as keys.\n Non-PK are ignored\"\"\"\n # Non-orm\n if not isinstance(tablename, str):\n # check for existence\n table = tablename\n pk_list = table.primary_key.columns.keys()\n args = list()\n for k, v in filters.items():\n args.append(\"table.c.{}=={}\".format(k, v))\n args = \"or_(\" + \",\".join(args) + \")\"\n exist = self.session.query(table).filter(eval(args)).all()\n return exist\n # ORM\n else:\n table = eval(tablename)\n args = list()\n for k, v in filters.items():\n if v != None:\n args.append(\"table.{}=='{}'\".format(k, v))\n filter = \" and \".join(args)\n entries = self.session.query(table).filter(eval(filter)).all()\n return entries\n\n def top_index(self, table_str: str, filters: Dict[str, str], column: str):\n \"\"\"Fetches the top index from column of table, by applying a dict with columns as keys.\"\"\"\n table = eval(table_str)\n args = list()\n for k, v in filters.items():\n if v != None:\n args.append(\"table.{}=='{}'\".format(k, v))\n filter = \" and \".join(args)\n entry = (\n self.session.query(table)\n .filter(eval(filter))\n .order_by(desc(eval(\"{}.{}\".format(table_str, column))))\n .limit(1)\n .all()\n )\n if entry == []:\n return int(-1)\n else:\n return eval(\"entry[0].{}\".format(column))\n\n def reload_profiletable(self, organism: str):\n \"\"\"Drop the named non-orm table, then load it with fresh data\"\"\"\n table = self.profiles[organism]\n self.profiles[organism].drop()\n self.profiles[organism].create()\n self.init_profiletable(organism, table)\n\n def init_profiletable(self, filename: str, table):\n \"\"\"Creates profile tables by looping, since a lot of infiles exist\"\"\"\n data = table.insert()\n linedict = dict.fromkeys(table.c.keys())\n with open(\"{}/{}\".format(self.config[\"folders\"][\"profiles\"], filename), \"r\") as fh:\n # Skips header\n head = fh.readline()\n head = head.rstrip().split(\"\\t\")\n for line in fh:\n line = line.rstrip().split(\"\\t\")\n index = 0\n while index < len(line):\n linedict[head[index]] = line[index]\n index = index + 1\n data.execute(linedict)\n\n def get_columns(self, tablename: str):\n \"\"\" Returns all records for a given ORM table\"\"\"\n table = eval(tablename)\n return dict.fromkeys(table.__table__.columns.keys())\n\n def exists(self, table, item: Dict[str, str]):\n \"\"\" Takes a k-v pair and checks for the entrys existence in the given table \"\"\"\n filterstring = \"\"\n for k, v in item.items():\n filterstring += \"{}.{}=='{}',\".format(table, k, v)\n filterstring = filterstring[:-1]\n table = eval(table)\n entry = self.session.query(table).filter(eval(filterstring)).scalar()\n if entry is None:\n return False\n else:\n return True\n\n def get_version(self, name: str):\n \"\"\" Gets the version from a given name. Should be generalized to return any value for any input\"\"\"\n version = self.session.query(Versions).filter(Versions.name == name).scalar()\n if version is None:\n return \"0\"\n else:\n return version.version\n\n def get_report(self, name: str):\n # Sort based on version\n prev_report = []\n prev_reports = (\n self.session.query(Reports)\n .filter(Reports.CG_ID_project == name)\n .order_by(desc(Reports.version))\n .all()\n )\n if len(prev_reports) > 0:\n prev_report = prev_reports[0]\n return prev_report\n\n def set_report(self, name: str):\n # Generate string\n totalstring = list()\n dt = datetime.now()\n default_method = \"Not in LIMS\"\n samples = (\n self.session.query(Samples)\n .filter(Samples.CG_ID_project == name)\n .order_by(desc(Samples.CG_ID_sample))\n .all()\n )\n for sample in samples:\n if sample.date_libprep:\n totalstring.append(\n str(\n datetime.timestamp(\n sample.date_libprep.replace(tzinfo=timezone.utc)\n )\n )\n )\n else:\n totalstring.append(\n str(datetime.timestamp(datetime.min.replace(tzinfo=timezone.utc)))\n )\n\n if sample.method_libprep:\n totalstring.append(sample.method_libprep)\n else:\n totalstring.append(default_method)\n\n if sample.date_sequencing:\n totalstring.append(\n str(\n datetime.timestamp(\n sample.date_sequencing.replace(tzinfo=timezone.utc)\n )\n )\n )\n else:\n totalstring.append(\n str(datetime.timestamp(datetime.min.replace(tzinfo=timezone.utc)))\n )\n\n if sample.method_sequencing:\n totalstring.append(sample.method_sequencing)\n else:\n totalstring.append(default_method)\n\n totalstring.append(__version__)\n totalstring = \"\".join(totalstring).encode()\n hashstring = hashlib.md5(totalstring).hexdigest()\n\n prev_report = self.get_report(name)\n # Compare\n if prev_report:\n if (\n \"steps_aggregate\" in dir(prev_report)\n and prev_report.steps_aggregate != hashstring\n ):\n self.add_rec(\n {\n \"CG_ID_project\": name,\n \"steps_aggregate\": hashstring,\n \"date\": dt,\n \"version\": prev_report.version + 1,\n },\n \"Reports\",\n )\n else:\n self.add_rec(\n {\n \"CG_ID_project\": name,\n \"steps_aggregate\": hashstring,\n \"date\": dt,\n \"version\": 1,\n },\n \"Reports\",\n )\n\n def sync_novel(self, overwrite=False, sample=\"\"):\n \"\"\"Looks at each novel table. See if any record has a profile match in the profile table.\n Updates these based on parameters\"\"\"\n prequery = self.session.query(Samples)\n\n for org, novel_table in self.novel.items():\n novel_list = self.session.query(novel_table).all()\n org_keys = novel_table.c.keys()\n profile_list = self.session.query(self.profiles[org]).all()\n # Filter\n for novel in novel_list:\n args = list()\n for key in org_keys:\n if key != \"ST\" and key != \"clonal_complex\" and key != \"species\":\n args.append(\n \"self.profiles[org].c.{}=={}\".format(\n key, eval(\"novel.{}\".format(key))\n )\n )\n args = \"and_(\" + \",\".join(args) + \")\"\n exist = self.session.query(self.profiles[org]).filter(eval(args)).all()\n\n if exist:\n exist = exist[0]\n if sample == \"\":\n onelap = prequery.filter(\n and_(\n Samples.ST == novel.ST,\n Samples.organism == org,\n Samples.ST <= -10,\n )\n ).all()\n else:\n onelap = prequery.filter(\n and_(\n Samples.ST == novel.ST,\n Samples.organism == org,\n Samples.ST <= -10,\n Samples.CG_ID_sample == sample,\n )\n ).all()\n for entry in onelap:\n # review\n if entry.pubmlst_ST == -1 and not overwrite:\n self.logger.info(\n \"Update: Sample {} of organism {}; Internal ST {} is now linked to {} '{}'\".format(\n entry.CG_ID_sample, org, novel.ST, exist.ST, exist\n )\n )\n self.upd_rec(\n {\"CG_ID_sample\": entry.CG_ID_sample},\n \"Samples\",\n {\"pubmlst_ST\": exist.ST},\n )\n # overwrite\n elif overwrite:\n self.logger.info(\n \"Replacement: Sample {} of organism {}; Internal ST {} is now {} '{}'\".format(\n entry.CG_ID_sample, org, novel.ST, exist.ST, exist\n )\n )\n self.upd_rec(\n {\"CG_ID_sample\": entry.CG_ID_sample},\n \"Samples\",\n {\"ST\": exist.ST, \"pubmlst_ST\": exist.ST},\n )\n\n def rm_novel(self, sample=\"\"):\n \"\"\"Flags a sample as pubMLST resolved by merit of ignoring it\"\"\"\n query = self.session.query(Samples).filter(Samples.CG_ID_sample == sample).all()\n if len(query) > 0:\n self.logger.info(\n \"Ignore: Sample {} from organism {} with ST {}; is now flagged as resolved.\".format(\n query[0].CG_ID_sample, query[0].organism, query[0].ST\n )\n )\n self.upd_rec(\n {\"CG_ID_sample\": query[0].CG_ID_sample}, \"Samples\", {\"pubmlst_ST\": 0}\n )\n else:\n self.logger.error(\n \"Sample {} not found in database. Verify name\".format(sample)\n )\n\n def list_unresolved(self):\n \"\"\"Lists all novel samples that current havent been flagged as resolved\"\"\"\n # ST currently not updated at all\n novelbkt = OrderedDict()\n prequery = (\n self.session.query(Samples)\n .filter(and_(Samples.ST <= -10, Samples.pubmlst_ST == -1))\n .all()\n )\n for entry in prequery:\n if not entry.organism in novelbkt:\n novelbkt[entry.organism] = dict()\n if not entry.ST in novelbkt[entry.organism]:\n novelbkt[entry.organism][entry.ST] = list()\n novelbkt[entry.organism][entry.ST].append(entry.CG_ID_sample)\n novelbkt = OrderedDict(sorted(novelbkt.items(), key=lambda t: t[0]))\n\n # ST updated on pubMLST but not marked as resolved:\n novelbkt2 = OrderedDict()\n postquery = (\n self.session.query(Samples)\n .filter(\n and_(\n Samples.ST <= -10, Samples.pubmlst_ST != -1, Samples.pubmlst_ST != 0\n )\n )\n .all()\n )\n for entry in postquery:\n if not entry.organism in novelbkt2:\n novelbkt2[entry.organism] = dict()\n if not entry.ST in novelbkt2[entry.organism]:\n novelbkt2[entry.organism][entry.ST] = list()\n novelbkt2[entry.organism][entry.ST].append(entry.CG_ID_sample)\n\n # Unresolved samples and their respective error flags:\n novelbkt3 = OrderedDict()\n naquery = (\n self.session.query(Samples)\n .filter(and_(Samples.ST < 0, Samples.ST > -10, Samples.pubmlst_ST != 0))\n .all()\n )\n for entry in naquery:\n if not entry.ST in novelbkt3:\n novelbkt3[entry.ST] = dict()\n if not entry.organism in novelbkt3[entry.ST]:\n novelbkt3[entry.ST][entry.organism] = list()\n novelbkt3[entry.ST][entry.organism].append(entry.CG_ID_sample)\n novelbkt3 = OrderedDict(\n sorted(novelbkt3.items(), key=lambda t: t[0], reverse=True)\n )\n\n codetrans = {\n -1: \"Invalid pubMLST reference\",\n -2: \"Possibly novel allele, novel ST\",\n -3: \"Can't establish 7 loci due to low quality\",\n -4: \"Miscellaneous issues\",\n }\n\n print(\"\\n####Unresolved samples and their respective error flags:####\\n\")\n for k, v in novelbkt3.items():\n print(\"\\n##Code {} - {}##\".format(k, codetrans[k]))\n for x, y in v.items():\n if x is not None:\n x = x.replace(\"_\", \" \").capitalize()\n print(\"{} ({} samples):\\n{}\".format(x, len(y), sorted(y),))\n if len(novelbkt3) == 0:\n print(\"None!\")\n\n print(\"\\n####ST updated on pubMLST but not marked as resolved:####\\n\")\n for k, v in novelbkt2.items():\n if k is not None:\n k = k.replace(\"_\", \" \").capitalize()\n print(\"Organism {} ({}):\".format(k, len(v)))\n for x, y in v.items():\n print(\"{}:{} ({} ST)\".format(x, sorted(y), len(y)))\n if len(novelbkt2) == 0:\n print(\"None!\")\n\n print(\"\\n####ST currently not updated at all:####\\n\")\n for k, v in novelbkt.items():\n if k is not None:\n k = k.replace(\"_\", \" \").capitalize()\n print(\"Organism {} ({}):\".format(k, len(v)))\n for x, y in v.items():\n print(\"{}:{} ({} novel ST)\".format(x, sorted(y), len(y)))\n if len(novelbkt) == 0:\n print(\"None!\")\n\n def setPredictor(self, cg_sid: str, pks=dict()):\n \"\"\" Helper function. Flags a set of seq_types as part of the final prediction.\n Uses optional pks[PK_NAME] = VALUE dictionary to distinguish in scenarios where an allele number has multiple hits\"\"\"\n sample = self.session.query(Seq_types).filter(Seq_types.CG_ID_sample == cg_sid)\n\n if pks == dict():\n sample.update({Seq_types.st_predictor: 1})\n else:\n # Resets all previous predictors\n sample.update({Seq_types.st_predictor: None})\n # Set subset\n for loci, columns in pks.items():\n arglist = list()\n for key, val in columns.items():\n arglist.append(\"Seq_types.{}=='{}'\".format(key, val))\n args = \"and_(\" + \", \".join(arglist) + \")\"\n sample.filter(eval(args)).update({Seq_types.st_predictor: 1})\n self.session.commit()\n\n def alleles2st(self, cg_sid: str):\n \"\"\" Takes a CG_ID_sample and predicts the correct ST \"\"\"\n threshold = True\n organism = (\n self.session.query(Samples.organism)\n .filter(Samples.CG_ID_sample == cg_sid)\n .scalar()\n )\n if organism is None:\n self.logger.warning(\n \"No organism set for {}. Most likely control sample. Setting ST to -1\".format(\n cg_sid\n )\n )\n return -1\n [alleles, allelediff] = self.get_unique_alleles(cg_sid, organism, threshold)\n if allelediff < 0:\n threshold = False\n [alleles, allelediff] = self.get_unique_alleles(cg_sid, organism, threshold)\n if allelediff < 0:\n self.logger.warning(\n \"Insufficient allele hits to establish ST for sample {}, even without thresholds. Setting ST to -3\".format(\n cg_sid, organism\n )\n )\n self.setPredictor(cg_sid)\n return -3\n\n # Tests all allele combinations found to see if any of them result in ST\n filter = list()\n for key, val in alleles.items():\n subfilter = list()\n for num in val:\n subfilter.append(\" self.profiles[organism].c.{}=={} \".format(key, num))\n subfilter = \",\".join(subfilter)\n if len(val) > 1:\n subfilter = \"or_({})\".format(subfilter)\n filter.append(subfilter)\n filter = \",\".join(filter)\n filter = \"and_({})\".format(filter)\n output = self.session.query(self.profiles[organism]).filter(eval(filter)).all()\n\n # Check for existence in profile database\n if len(output) > 1:\n STlist = list()\n for st in output:\n STlist.append(st.ST)\n best = self.bestST(cg_sid, STlist, \"profile\")\n if threshold:\n self.logger.warning(\n \"Multiple ST within threshold found for sample {}, list: {}. Established ST{} as best hit.\".format(\n cg_sid, STlist, best\n )\n )\n return best\n elif len(output) == 1:\n # Arbitary call\n return self.bestST(cg_sid, [output[0].ST], \"profile\")\n # Check for existence in novel database\n elif threshold:\n self.logger.info(\n \"Sample {} on {} has novel ST reliably established. Searching for prior novel definition...\".format(\n cg_sid, organism\n )\n )\n filter = list()\n for key, val in alleles.items():\n subfilter = list()\n for num in val:\n subfilter.append(\" self.novel[organism].c.{}=={} \".format(key, num))\n subfilter = \",\".join(subfilter)\n if len(val) > 1:\n subfilter = \"or_({})\".format(subfilter)\n filter.append(subfilter)\n filter = \",\".join(filter)\n filter = \"and_({})\".format(filter)\n output = self.session.query(self.novel[organism]).filter(eval(filter)).all()\n\n if len(output) > 1:\n STlist = list()\n for st in output:\n STlist.append(st.ST)\n best = self.bestST(cg_sid, STlist, \"novel\")\n if threshold:\n self.logger.warning(\n \"Multiple ST within novel threshold found for sample {}, list: {}. Established ST{} as best hit.\".format(\n cg_sid, STlist, best\n )\n )\n return best\n elif len(output) == 1:\n return self.bestST(cg_sid, [output[0].ST], \"novel\")\n else:\n # Create new novel ST\n # Set ST -10 per default, or one below the current min, whichever is smaller.\n st = -9\n query = self.session.query(self.novel[organism]).all()\n for entry in query:\n if entry.ST < st:\n st = entry.ST\n st = st - 1\n\n bestSet = self.bestAlleles(cg_sid)\n newEntry = dict()\n for allele, columns in bestSet.items():\n newEntry[allele] = columns[\"allele\"]\n newEntry[\"ST\"] = st\n self.add_rec(newEntry, self.novel[organism])\n return self.bestST(cg_sid, [st], \"novel\")\n else:\n self.logger.warning(\n \"Sample {} on {} has an allele set but hits are low-quality and\\\n do not resolve to an ST. Setting ST to -2\".format(\n cg_sid, organism\n )\n )\n bestSet = self.bestAlleles(cg_sid)\n self.setPredictor(cg_sid, bestSet)\n return -2\n\n def bestST(self, cg_sid: str, st_list: List, type=\"profile\"):\n \"\"\"Takes in a list of ST and a sample.\n Establishes which ST is most likely by criteria id*span -> eval -> contig coverage\n & flags involved alleles\"\"\"\n profiles = list()\n scores = dict()\n bestalleles = dict()\n organism = (\n self.session.query(Samples.organism)\n .filter(Samples.CG_ID_sample == cg_sid)\n .scalar()\n )\n for st in st_list:\n scores[st] = dict()\n bestalleles[st] = dict()\n scores[st][\"spanid\"] = 0\n scores[st][\"eval\"] = 0\n scores[st][\"cc\"] = 0\n scores[st][\"span\"] = 0\n if type == \"profile\":\n profiles.append(\n self.session.query(self.profiles[organism])\n .filter(text(\"ST={}\".format(st)))\n .first()\n )\n elif type == \"novel\":\n profiles.append(\n self.session.query(self.novel[organism])\n .filter(text(\"ST={}\".format(st)))\n .first()\n )\n\n # Get values for each allele set that resolves an ST\n for prof in profiles:\n alleleconditions = list()\n alleledict = dict()\n allconditions = [\"Seq_types.CG_ID_sample=='{}'\".format(cg_sid)]\n\n for index, allele in enumerate(prof):\n if (\n \"ST\" not in prof.keys()[index]\n and \"clonal_complex\" not in prof.keys()[index]\n and \"species\" not in prof.keys()[index]\n ):\n condition = \"Seq_types.loci=='{}' , Seq_types.allele=='{}'\".format(\n prof.keys()[index], allele\n )\n alleledict[prof.keys()[index]] = \"\"\n alleleconditions.append(\"and_({})\".format(condition))\n\n alleleconditions = \"or_({})\".format(\",\".join(alleleconditions))\n allconditions.append(alleleconditions)\n allconditions = \"and_({})\".format(\",\".join(allconditions))\n all_alleles = (\n self.session.query(Seq_types).filter(eval(allconditions)).all()\n )\n\n # Keep only best hit each loci\n for allele in all_alleles:\n if alleledict[allele.loci] == \"\":\n alleledict[allele.loci] = allele\n else:\n old_al = alleledict[allele.loci]\n\n if allele.span * allele.identity >= old_al.span * old_al.identity:\n if (\n allele.span * allele.identity\n > old_al.span * old_al.identity\n ):\n alleledict[allele.loci] = allele\n elif float(allele.evalue) <= float(old_al.evalue):\n if float(allele.evalue) < float(old_al.evalue):\n alleledict[allele.loci] = allele\n elif allele.contig_coverage > old_al.contig_coverage:\n alleledict[allele.loci] = allele\n\n # Create score dict for the ST\n for key, allele in alleledict.items():\n scores[prof.ST][\"spanid\"] += allele.span * allele.identity\n scores[prof.ST][\"eval\"] += float(allele.evalue)\n scores[prof.ST][\"cc\"] += allele.contig_coverage\n if not allele.loci in bestalleles[prof.ST].keys():\n bestalleles[prof.ST][allele.loci] = dict()\n if not \"contig_name\" in bestalleles[prof.ST][allele.loci].keys():\n bestalleles[prof.ST][allele.loci][\"contig_name\"] = str(\n allele.contig_name\n )\n\n # Establish best ST\n topST = \"\"\n topID = 0\n topEval = 100\n topCC = 0\n for key, val in scores.items():\n if scores[key][\"spanid\"] > topID:\n topID = scores[key][\"spanid\"]\n topEval = scores[key][\"eval\"]\n topCC = scores[key][\"cc\"]\n topST = key\n elif scores[key][\"spanid\"] == topID and scores[key][\"eval\"] < topEval:\n topID = scores[key][\"spanid\"]\n topEval = scores[key][\"eval\"]\n topCC = scores[key][\"cc\"]\n topST = key\n elif (\n scores[key][\"spanid\"] == topID\n and scores[key][\"eval\"] == topEval\n and scores[key][\"cc\"] > topCC\n ):\n topID = scores[key][\"spanid\"]\n topEval = scores[key][\"eval\"]\n topCC = scores[key][\"cc\"]\n topST = key\n self.setPredictor(cg_sid, bestalleles[topST])\n return topST\n\n def bestAlleles(self, cg_sid: str):\n \"\"\" Establishes which allele set (for bad samples) is most likely by criteria span* id -> eval -> contig coverage\"\"\"\n hits = (\n self.session.query(\n Seq_types.contig_name,\n Seq_types.loci,\n Seq_types.span,\n Seq_types.identity,\n Seq_types.evalue,\n Seq_types.contig_coverage,\n Seq_types.allele,\n )\n .filter(Seq_types.CG_ID_sample == cg_sid)\n .all()\n )\n bestHits = dict()\n alleledict = dict()\n for allele in hits:\n if allele.loci not in bestHits.keys():\n bestHits[allele.loci] = dict()\n bestHits[allele.loci][\"contig_name\"] = allele.contig_name\n bestHits[allele.loci][\"allele\"] = allele.allele\n alleledict[allele.loci] = [\n allele.identity,\n allele.evalue,\n allele.contig_coverage,\n allele.span,\n ]\n else:\n if (\n (\n allele.identity * allele.span\n > alleledict[allele.loci][0] * alleledict[allele.loci][3]\n )\n or (\n allele.identity * allele.span\n == alleledict[allele.loci][0] * alleledict[allele.loci][3]\n and float(allele.evalue) < float(alleledict[allele.loci][1])\n )\n or (\n allele.identity * allele.span\n == alleledict[allele.loci][0] * alleledict[allele.loci][3]\n and float(allele.evalue) == float(alleledict[allele.loci][1])\n and allele.contig_coverage > alleledict[allele.loci][2]\n )\n ):\n bestHits[allele.loci][\"contig_name\"] = allele.contig_name\n alleledict[allele.loci] = [\n allele.identity,\n allele.evalue,\n allele.contig_coverage,\n allele.span,\n ]\n return bestHits\n\n def get_unique_alleles(self, cg_sid: str, organism: str, threshold=True):\n \"\"\" Returns a dict containing all unique alleles at every loci, and allele difference from expected\"\"\"\n tid = float(self.config[\"threshold\"][\"mlst_id\"])\n tspan = (self.config[\"threshold\"][\"mlst_span\"]) / 100.0\n if threshold:\n hits = (\n self.session.query(Seq_types.loci, Seq_types.allele)\n .filter(\n Seq_types.CG_ID_sample == cg_sid,\n Seq_types.identity >= tid,\n Seq_types.span >= tspan,\n )\n .all()\n )\n else:\n hits = (\n self.session.query(Seq_types.loci, Seq_types.allele)\n .filter(Seq_types.CG_ID_sample == cg_sid)\n .all()\n )\n\n # Establish number of unique hits\n uniqueDict = dict()\n for hit in hits:\n if hit.loci not in uniqueDict.keys():\n uniqueDict[hit.loci] = list()\n uniqueDict[hit.loci].append(hit.allele)\n elif hit.allele not in uniqueDict[hit.loci]:\n uniqueDict[hit.loci].append(hit.allele)\n non_allele_columns = 1\n if \"clonal_complex\" in self.profiles[organism].columns.keys():\n non_allele_columns += 1\n if \"species\" in self.profiles[organism].columns.keys():\n non_allele_columns += 1\n allele_overabundance = len(uniqueDict.keys()) - (\n len(self.profiles[organism].columns.values()) - non_allele_columns\n )\n return [uniqueDict, allele_overabundance]\n","repo_name":"Clinical-Genomics/microSALT","sub_path":"microSALT/store/db_manipulator.py","file_name":"db_manipulator.py","file_ext":"py","file_size_in_byte":39003,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"3852522162","text":"from Tic_tac_package.Board import Board\nfrom Tic_tac_package.Player import Player\n\n\ndef main():\n board = Board()\n player1 = Player(\"Player 1\", \"X\")\n player2 = Player(\"Player 2\", \"O\")\n\n current_player = player1\n\n while not board.full_board():\n board.display_board()\n move = current_player.make_move()\n\n if board.make_move(move, current_player.symbol):\n if board.check_winner(current_player.symbol):\n board.display_board()\n print(f\"{current_player.name} wins!\")\n break\n else:\n current_player = player2 if current_player == player1 else player1\n else:\n print(\"Invalid move. That spot is already taken.\")\n\n if not board.check_winner(player1.symbol) and not board.check_winner(player2.symbol):\n board.display_board()\n print(\"It's a tie!\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Xalmon/PythonProject","sub_path":"Tic_tac_package/Tic_tac_app.py","file_name":"Tic_tac_app.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"19374959877","text":"from pydub import AudioSegment\nfrom os.path import join, basename\nimport glob\nfrom queue import Queue\nimport logging\nimport os\nfrom threading import Thread\nimport audiotools\nfrom audiotools.wav import InvalidWave\n\n\"\"\"\nFlac 2 Wav converter script\nusing audiotools\nFrom http://magento4newbies.blogspot.com/2014/11/converting-wav-files-to-flac-with.html\n\"\"\"\nclass F2W:\n\n logger = ''\n\n def __init__(self):\n global logger\n # create logger\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG)\n\n # create a file handler\n handler = logging.FileHandler('converter.log')\n handler.setLevel(logging.INFO)\n\n # create a logging format\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n\n # add the handlers to the logger\n logger.addHandler(handler)\n\n def convert(self):\n global logger\n file_queue = Queue()\n num_converter_threads = 5\n\n # collect files to be converted\n data_path = 'E:\\corpus_en\\LibriSpeech_small'\n parts = ['dev-clean']\n\n for part in parts:\n # part/speaker/book/*.wav\n flac_paths = [p for p in glob(join(data_path, part, '*/*/*.flac'))]\n for flac_path in flac_paths:\n file_queue.put(flac_path)\n\n # for root, dirs, files in os.walk(\"/Volumes/music\"):\n #\n # for file in files:\n # if file.endswith(\".wav\"):\n # file_wav = os.path.join(root, file)\n # file_flac = file_wav.replace(\".wav\", \".flac\")\n #\n # if (os.path.exists(file_flac)):\n # logger.debug(''.join([\"File \",file_flac, \" already exists.\"]))\n # else:\n # file_queue.put(file_wav)\n\n logger.info(\"Start converting: %s files\", str(file_queue.qsize()))\n\n # Set up some threads to convert files\n for i in range(num_converter_threads):\n worker = Thread(target=self.process, args=(file_queue,))\n worker.setDaemon(True)\n worker.start()\n\n file_queue.join()\n\n def process(self, q):\n \"\"\"This is the worker thread function.\n It processes files in the queue one after\n another. These daemon threads go into an\n infinite loop, and only exit when\n the main thread ends.\n \"\"\"\n while True:\n global logger\n compression_quality = '0' #min compression\n file_flac = q.get()\n file_wav = file_flac.replace(\".flac\", \".wav\")\n\n try:\n audiotools.open(file_flac).convert(file_wav,audiotools.WavAudio, compression_quality)\n logger.info(''.join([\"Converted \", file_flac, \" to: \", file_wav]))\n os.remove(file_flac)\n q.task_done()\n except InvalidWave:\n logger.error(''.join([\"Failed to open file \", file_flac, \" to: \", file_wav,\" failed.\"]), exc_info=True)\n except Exception as e:\n logger.error('ExFailed to open file', exc_info=True)","repo_name":"xingchensong/Speech-Transformer-tf2.0","sub_path":"datasets/librispeech/flac2wav.py","file_name":"flac2wav.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"4"} +{"seq_id":"34844934746","text":"# coding=utf-8\n\nimport os\nimport logging\nimport hashlib\n\nfrom charset_normalizer import detect\nfrom bs4 import UnicodeDammit\n\nfrom app.config import settings\n\n\ndef check_credentials(user, pw):\n username = settings.auth.username\n password = settings.auth.password\n return hashlib.md5(pw.encode('utf-8')).hexdigest() == password and user == username\n\n\ndef get_subtitle_destination_folder():\n fld_custom = str(settings.general.subfolder_custom).strip() if (settings.general.subfolder_custom and\n settings.general.subfolder != 'current') else None\n return fld_custom\n\n\ndef get_target_folder(file_path):\n subfolder = settings.general.subfolder\n fld_custom = str(settings.general.subfolder_custom).strip() \\\n if settings.general.subfolder_custom else None\n\n if subfolder != \"current\" and fld_custom:\n # specific subFolder requested, create it if it doesn't exist\n fld_base = os.path.split(file_path)[0]\n\n if subfolder == \"absolute\":\n # absolute folder\n fld = fld_custom\n elif subfolder == \"relative\":\n fld = os.path.join(fld_base, fld_custom)\n else:\n fld = None\n\n fld = force_unicode(fld)\n\n if not os.path.isdir(fld):\n try:\n os.makedirs(fld)\n except Exception:\n logging.error('BAZARR is unable to create directory to save subtitles: ' + fld)\n fld = None\n else:\n fld = None\n\n return fld\n\n\ndef force_unicode(s):\n \"\"\"\n Ensure a string is unicode, not encoded; used for enforcing file paths to be unicode upon saving a subtitle,\n to prevent encoding issues when saving a subtitle to a non-ascii path.\n :param s: string\n :return: unicode string\n \"\"\"\n if not isinstance(s, str):\n try:\n s = s.decode(\"utf-8\")\n except UnicodeDecodeError:\n t = detect(s)['encoding']\n try:\n s = s.decode(t)\n except UnicodeDecodeError:\n s = UnicodeDammit(s).unicode_markup\n return s\n","repo_name":"morpheus65535/bazarr","sub_path":"bazarr/utilities/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":2298,"dataset":"github-code","pt":"4"} +{"seq_id":"6361331743","text":"from five import grok\nfrom plone.directives import dexterity, form\nfrom plone.indexer import indexer\nfrom zope import schema\n\nfrom my315ok.wechat import MessageFactory as _\n\n# Interface class; used to define content-type schema.\nfrom my315ok.wechat.interfaces import IMessage, ITextMessage,IImageMessage,\\\nIVoiceMessage,ILinkMessage,IVideoMessage,ILocationMessage\n\n \n# Custom content-type class; objects created for this content type will\n# be instances of this class. Use this class to add content-type specific\n# methods and properties. Put methods that are mainly useful for rendering\n# in separate view classes.\n\nclass TextMessage(dexterity.Item):\n grok.implements(ITextMessage)\n grok.provides(ITextMessage)\n \n # Add your class methods and properties here\nclass ImageMessage(dexterity.Item):\n grok.implements(IImageMessage)\n grok.provides(IImageMessage)\n \nclass LinkMessage(dexterity.Item):\n grok.implements(ILinkMessage)\n grok.provides(ILinkMessage)\n \nclass VoiceMessage(dexterity.Item):\n grok.implements(IVoiceMessage)\n grok.provides(IVoiceMessage) \n \nclass VideoMessage(dexterity.Item):\n grok.implements(IVideoMessage)\n grok.provides(IVideoMessage) \n \nclass LocationMessage(dexterity.Item):\n grok.implements(ILocationMessage)\n grok.provides(ILocationMessage) \n\n\n@indexer(IMessage)\ndef FromUserName(context):\n \"\"\"Create a catalogue indexer, registered as an adapter, which can\n populate the ``content`` index with the answer .\n \"\"\"\n# context = aq_inner(context)\n return context.FromUserName\n","repo_name":"adam139/my315ok.wechat","sub_path":"my315ok/wechat/content/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"19202853678","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nDescription du pogramme: à faire.\n\"\"\"\n\nimport os\n\nfrom flask import Blueprint\nfrom flask import current_app as app\nfrom flask import request, render_template\n\nfrom .utils import *\nfrom .auth import auth\n\nroot_path = os.path.dirname(os.path.abspath(__file__))\nkerguelen = Blueprint('kerguelen', __name__, url_prefix='/kerguelen')\n\n@kerguelen.route('/', methods = ['GET'])\n@auth.login_required\ndef home():\n\n # calcul du remplissage de la carte SD\n sd_percent = os.statvfs(os.path.dirname(os.path.abspath(__file__)))\n sd_percent = int(100-float(sd_percent.f_bavail)/float(sd_percent.f_blocks)*100)\n if ( sd_percent < 80 ):\n sd_color = \"green\"\n else:\n sd_color = \"deep-orange\"\n\n return render_template(\n \"home.html\",\n home_menu = True,\n sd_percent = sd_percent,\n sd_color = sd_color,\n version = app.config['VERSION']\n )\n\n@kerguelen.route('/camera', methods = ['GET'])\n@auth.login_required\ndef camera():\n messages = list()\n\n photo_param = request.args.get('photo')\n if photo_param == 'new':\n if not take_a_picture(app):\n messages.append('Impossible de prendre la photo, problème avec la caméra.')\n\n purge_param = request.args.get('purge')\n images = get_images(os.path.join(root_path, 'static'))\n if purge_param == 'all':\n purge(list(images.values()))\n images = get_images(os.path.join(root_path, 'static'))\n elif purge_param == 'old':\n purge(list(images.values())[10:])\n images = get_images(os.path.join(root_path, 'static'))\n else:\n pass\n\n # get the last picture:\n if ( len(images) > 0 ):\n for image in images.values():\n last_image = image\n break\n else:\n last_image = None\n\n # affichage de la page sur la base du modele\n return render_template(\n 'camera.html',\n camera_menu = True,\n image = last_image,\n num_images = len(images),\n version = app.config['VERSION'],\n messages = messages\n )\n\n@kerguelen.route('/historique', methods = ['GET'])\n@auth.login_required\ndef historique():\n images = get_images(os.path.join(root_path, 'static'))\n for image in images.values():\n current_image=image\n break\n\n action = request.args.get('action')\n if ( action == 'view' ):\n current_image = request.args.get('image')\n elif ( action == 'del' ):\n current_image = request.args.get('image')\n purge(list([current_image]))\n images = get_images(os.path.join(root_path, 'static'))\n for image in images.values():\n current_image=image\n break\n else:\n pass\n\n return render_template(\n 'historique.html',\n historique_menu = True,\n images = images,\n current_image = current_image,\n version = app.config['VERSION']\n )\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n#\n# END OF FILE\n#\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -","repo_name":"uko31/kerguelen-project","sub_path":"kerguelen_v1/kerguelen.py","file_name":"kerguelen.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"4661189965","text":"from copy import deepcopy\nfrom collections import deque\n\nclass Intcode:\n \n original = []\n\n @staticmethod\n def setup():\n f = open(\"data.txt\")\n Intcode.original = [int(i) for i in f.read().split(\",\")]\n f.close()\n\n diff = 10000 - len(Intcode.original)\n if diff > 0:\n Intcode.original.extend([0] * diff)\n\n def __init__(self):\n if not Intcode.original:\n Intcode.setup()\n\n self.d = deepcopy(Intcode.original)\n self.inp = None\n self.outp = None\n self.status = None\n self.it = self.run()\n next(self.it)\n\n def input_signal(self, a):\n self.inp = a\n next(self.it)\n\n def output_signal(self):\n outp_temp = self.outp\n next(self.it)\n return outp_temp\n\n def run(self):\n\n d = self.d\n i = 0\n r = 0\n\n while True:\n # Parse opcode\n o = []\n c = d[i]\n o.append(c % 100)\n c //= 100\n o.append(c % 10)\n c //= 10\n o.append(c % 10)\n o.append(c // 10)\n\n if o[0] == 99: # HCF\n self.status = \"halted\"\n while True:\n yield self.status\n raise Exception(\"intcode computer halted\")\n\n # Assign parameter values\n p = [0,0,0]\n\n if o[0] == 3:\n if o[1] == 0:\n p[0] = d[i+1]\n elif o[1] == 1:\n raise Exception(\"Parameter 1 invalid mode\")\n elif o[1] == 2:\n p[0] = d[i+1] + r\n else:\n if o[1] == 0:\n p[0] = d[d[i+1]]\n elif o[1] == 1:\n p[0] = d[i+1]\n elif o[1] == 2:\n p[0] = d[d[i+1] + r]\n\n if o[0] == 1 or o[0] == 2 or o[0] == 5 or o[0] == 6 or o[0] == 7 or o[0] == 8:\n if o[2] == 0:\n p[1] = d[d[i+2]]\n elif o[2] == 1:\n p[1] = d[i+2]\n elif o[2] == 2:\n p[1] = d[d[i+2] + r]\n\n if o[0] == 1 or o[0] == 2 or o[0] == 7 or o[0] == 8:\n if o[3] == 0:\n p[2] = d[i+3]\n elif o[3] == 1:\n raise Exception(\"Parameter 3 invalid mode\")\n elif o[3] == 2:\n p[2] = d[i+3] + r\n\n # Select action by opcode\n if o[0] == 1:\n d[p[2]] = p[0] + p[1] # Add\n i += 4\n\n elif o[0] == 2:\n d[p[2]] = p[0] * p[1] # Mult\n i += 4\n\n elif o[0] == 3:\n self.status = \"awaiting input\"\n yield self.status\n d[p[0]] = self.inp # Input\n i += 2\n\n elif o[0] == 4:\n self.outp = p[0] # Output\n self.status = \"awaiting output\"\n yield self.status\n i += 2\n\n elif o[0] == 5:\n if p[0] != 0: # Jump !=\n i = p[1]\n else:\n i += 3\n\n elif o[0] == 6:\n if p[0] == 0: # Jump ==\n i = p[1]\n else:\n i += 3\n\n elif o[0] == 7:\n if p[0] < p[1]: # Test <\n d[p[2]] = 1\n else:\n d[p[2]] = 0\n i += 4\n\n elif o[0] == 8:\n if p[0] == p[1]: # Test ==\n d[p[2]] = 1\n else:\n d[p[2]] = 0\n i += 4\n \n elif o[0] == 9: # Relative base\n r += p[0]\n i += 2\n\n else:\n raise Exception(\"Bad command i: %d - o[0]: %d\" % (i, o[0])) # Oops\n\nclass ASCII_Computer:\n\n def __init__(self, input_first = False):\n self.intcode_program = Intcode()\n self.print_output = True\n self.output_queue = deque()\n\n status = self.intcode_program.status\n if status == \"halted\" \\\n or status == \"awaiting output\" and input_first == \"input\" \\\n or status == \"awaiting input\" and input_first == \"output\":\n raise Exception(\"Invalid ASCII computer mode.\")\n \n self._run()\n\n def input_cmd(self, cmd):\n if not cmd or type(cmd) is not str:\n raise Exception(\"Invalid ASCII input.\")\n\n if cmd[-1] != \"\\n\":\n cmd += \"\\n\"\n\n self._run(cmd)\n\n def output_cmd(self):\n return \"\".join(self.output_queue)\n\n def _run(self, line = \"\"):\n if self.intcode_program.status == \"awaiting input\":\n for letter in line:\n self.intcode_program.input_signal(ord(letter))\n elif self.intcode_program.status == \"halted\":\n print(\"ASCII computer halted.\")\n return\n\n self.output_queue.clear()\n output = []\n while self.intcode_program.status == \"awaiting output\":\n val = self.intcode_program.output_signal()\n \n if val >= 256:\n print(val)\n break\n \n output.append(chr(val))\n if output[-1] == \"\\n\":\n self.output_queue.append(\"\".join(output[:-1]))\n if self.print_output:\n print(\"\".join(output[:-1]))\n output = []","repo_name":"fishlips13/AoC","sub_path":"AoC-19/intcode.py","file_name":"intcode.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"34796121941","text":"import RPi.GPIO as GPIO\nimport time\n\n#GPIO Mode (BOARD / BCM)\nGPIO.setmode(GPIO.BOARD)\n \n#set GPIO Pins\nGPIO_TRIGGERL = 36\nGPIO_ECHOL = 37\n \n#set GPIO direction (IN / OUT)\nGPIO.setup(GPIO_TRIGGERL, GPIO.OUT)\nGPIO.setup(GPIO_ECHOL, GPIO.IN)\n \ndef distance_left():\n # set Trigger to HIGH\n GPIO.output(GPIO_TRIGGERL, True)\n \n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(GPIO_TRIGGERL, False)\n \n StartTime = time.time()\n StopTime = time.time()\n \n # save StartTime\n while GPIO.input(GPIO_ECHOL) == 0:\n StartTime = time.time()\n \n # save time of arrival\n while GPIO.input(GPIO_ECHOL) == 1:\n StopTime = time.time()\n \n # time difference between start and arrival\n TimeElapsed = StopTime - StartTime\n # multiply with the sonic speed (34300 cm/s)\n # and divide by 2, because there and back\n distance = (TimeElapsed * 34300) / 2\n \n return distance\n","repo_name":"Project-Lidar/TINLAB_ES_Lidar_System","sub_path":"robot/ultrasoon/ultrasoon.py","file_name":"ultrasoon.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"6674673636","text":"from Team import Team\n\n##Gets team stats from CSV file\n## YEAR IS HARD CODED\ndef getTeamStats( teamname ):\n import csv\n ##YEAR/FILE is hard coded below\n with open('data/train/summary16_pt.csv') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n for row in spamreader:\n if( row[0] == teamname ):\n return Team(row)\n\n return;\n\n\ndef compareTeams( team1, team2):\n val=0\n val=val+(team1.RankTempo - team2.RankTempo)\n val=val+(team1.RankAdjTempo - team2.RankAdjTempo)\n val=val+(team1.RankOE - team2.RankDE)\n val=val+(team1.RankAdjOE - team2.RankAdjDE)\n\n if(abs(val) < 200):\n if(team1.RankEM > team2.RankEM):\n return team1.TeamName\n else:\n return team2.TeamName\n return val;\n\nCal = getTeamStats('Syracuse')\nHawaii = getTeamStats('North Carolina')\n\nprint(Cal.TeamName)\nprint(Hawaii.TeamName)\n\nprint(compareTeams( Cal, Hawaii))\n","repo_name":"bpchiv/BasketBot","sub_path":"getTeamStats.py","file_name":"getTeamStats.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"73029513397","text":"# 재귀호출에 비해 동적 프로그래밍이 얼마나 빠른지 확인해 보자.\n\ndef fib_rec(n):\n global cnt1\n if n == 1 or n == 2:\n return 1\n else:\n cnt1 += 1\n return fib_rec(n-1) + fib_rec(n-2)\n \ndef fib_dp(n):\n global cnt2\n f = [0] * 41\n f[1] = f[2] = 1\n for i in range(3, n+1):\n cnt2 += 1\n f[i] = f[i-1] + f[i-2]\n return f[n]\n\nN = int(input())\n\ncnt1 = 1\ncnt2 = 0\n\nfib_rec(N)\nfib_dp(N)\n\nprint(cnt1, cnt2)","repo_name":"baamkyu/TIL","sub_path":"Algorithm/Python/BOJ/티어별/브론즈1/24416_알고리즘 수업 - 피보나치 수 1.py","file_name":"24416_알고리즘 수업 - 피보나치 수 1.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"779969103","text":"from django.contrib.gis import forms\n\nfrom workplace.models import Zone\n\n\nclass ZoneForm(forms.ModelForm):\n class Meta:\n model = Zone\n exclude = [\"nothing\"]\n\n poly = forms.PolygonField(widget=forms.OSMWidget(attrs={\n 'map_width': 800,\n 'map_height': 500,\n 'default_lat': 55.751244,\n 'default_lon': 37.618423\n }))\n","repo_name":"UnstoppableBuilder/server","sub_path":"workplace/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"30027771515","text":"import cv2\r\nimport pytesseract\r\nfrom imutils.object_detection import non_max_suppression\r\nimport numpy as np\r\npytesseract.pytesseract.tesseract_cmd=r\"C:\\Program Files (x86)\\Tesseract-OCR\\tesseract.exe\"\r\ndef decode_predictions(scores, geometry):\r\n (numRows, numCols)=scores.shape[2:4]\r\n rects=[]\r\n confidences=[]\r\n\r\n for y in range(0,numRows):\r\n scoresData = scores[0, 0, y]\r\n xData0 = geometry[0, 0, y]\r\n xData1 = geometry[0, 1, y]\r\n xData2 = geometry[0, 2, y]\r\n xData3 = geometry[0, 3, y]\r\n anglesData = geometry[0, 4, y]\r\n\r\n for x in range(0, numCols):\r\n if scoresData[x] < min_confidence:\r\n continue\r\n # compute the offset factor as our resulting feature maps will\r\n # be 4x smaller than the input image\r\n (offsetX, offsetY) = (x * 4.0, y * 4.0)\r\n # extract the rotation angle for the prediction and then\r\n # compute the sin and cosine\r\n angle = anglesData[x]\r\n cos = np.cos(angle)\r\n sin = np.sin(angle)\r\n # use the geomtery volume to derive the width and height of the bounding box\r\n h = xData0[x] + xData2[x]\r\n w = xData1[x] + xData3[x]\r\n\r\n endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))\r\n endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))\r\n startX = int(endX - w)\r\n startY = int(endY - h)\r\n # add the bounding boxe coordinates and probability scores to our respective lists\r\n rects.append((startX, startY, endX, endY))\r\n confidences.append(scoresData[x])\r\n return (rects,confidences)\r\n\r\nimage=\"believe3.png\"\r\neast=\"frozen_east_text_detection.pb\"\r\nmin_confidence=0.5\r\nwidth=320\r\nheight=320\r\npadding=0.3\r\nimage=cv2.imread(image)\r\norig=image.copy()\r\n(origW, origH)=image.shape[:2]\r\n(newW,newH)=(width,height)\r\nrW=origW/float(newW)\r\nrH=origH/float(newH)\r\nimage=cv2.resize(image,(newW,newH))\r\n(H,W)=image.shape[:2]\r\nlayerNames=[\r\n \"feature_fusion/Conv_7/Sigmoid\",\r\n \"feature_fusion/concat_3\"]\r\nprint('loading East Text detector..')\r\nnet=cv2.dnn.readNet(east)\r\nblob=cv2.dnn.blobFromImage(image,1.0,(W,H),(123.68,116.78,103.94),swapRB=None,crop=False)\r\nnet.setInput(blob)\r\n(scores, geometry)=net.forward(layerNames)\r\n(rects,confidences)=decode_predictions(scores,geometry)\r\nboxes=non_max_suppression(np.array(rects),probs=confidences)\r\nresults=[]\r\n\r\nfor (startX,startY,endX,endY) in boxes:\r\n #scale the bounding box coordinates based on their ratios\r\n startX = int(startX * rW)\r\n startY = int(startY * rH)\r\n endX = int(endX * rW)\r\n endY = int(endY * rH)\r\n\r\n # in order to obtain a better OCR of the text we can potentially\r\n # apply a bit of padding surrounding the bounding box -- here we\r\n # are computing the deltas in both the x and y directions\r\n dx=int((endX-startX)*padding)\r\n dy=int((endY-startY)*padding)\r\n\r\n # apply padding to each side of the bounding box, respectively\r\n startX=max(0,startX-dx)\r\n startY = max(0, startY - dy)\r\n endX = min(origW, endX + (dx*2))\r\n endY = min(origH, endY + (dy*2))\r\n #extract the actual padded ROI\r\n roi=orig[startY:endY,startX:endX]\r\n # in order to apply Tesseract v4 to OCR text we must supply\r\n # (1) a language, (2) an OEM flag of 4, indicating that the we\r\n # wish to use the LSTM neural net model for OCR, and finally\r\n # (3) an OEM value, in this case, 7 which implies that we are\r\n # treating the ROI as a single line of text\r\n config=(\"-l eng --oem 1 --psm 7\")\r\n text=pytesseract.image_to_string(roi,config=config)\r\n results.append(((startX,startY,endX,endY),text))\r\nresults=sorted(results,key=lambda r:r[0][1])\r\nfor((startX,startY,endX,endY),text) in results:\r\n print(\"OCR text\")\r\n print(\"========\")\r\n print(\"{}\\n\".format(text))\r\n\r\n # strip out non-ASCII text so we can draw the text on the image\r\n # using OpenCV, then draw the text and a bounding box surrounding the text region of the input image\r\n\r\n text=\"\".join([c if ord(c)<128 else \"\" for c in text]).strip()\r\n output=orig.copy()\r\n cv2.rectangle(output,(startX,startY),(endX,endY),(0,0,255),2)\r\n cv2.putText(output,text,(startX,startY-18),cv2.FONT_HERSHEY_COMPLEX,1.2,(0,0,255),2)\r\noutput=cv2.cvtColor(output, cv2.COLOR_BGR2RGB)\r\ncv2.imshow('Text Detection',output)\r\ncv2.waitKey(0)","repo_name":"Komalika-minkstas/IOT-COMPUTERVISION--TSF","sub_path":"OBJECT_DETECTOR/Text_detection.py","file_name":"Text_detection.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"73688557876","text":"import os\nimport csv\nimport sys\nimport pandas as pd\nfrom tqdm import tqdm\n\nr = pd.read_csv('ratings.csv')\ntr = pd.read_csv('to_read.csv')\nb = pd.read_csv('books.csv')\nt = pd.read_csv('tags.csv')\nbt = pd.read_csv('book_tags.csv')\n\n# Let us merge tag names into tag applications.\nbt = bt.merge( t, on = 'tag_id' )\n# Why don't we merge book titles for good measure.\nbt = bt.merge( b[[ 'goodreads_book_id', 'title']], on = 'goodreads_book_id' )\n# fix negative tag counts\nbt.loc[ bt['count'] < 0, 'count'] = 0\n\nprint(\"Collecting tags from book_tags.csv\")\nbook_tags = {}\nwith tqdm(total=len(bt)) as pbar:\n for index, row in bt.iterrows():\n if row['goodreads_book_id'] not in book_tags:\n book_tags[row['goodreads_book_id']] = []\n book_tags[row['goodreads_book_id']].append(row['tag_name'])\n pbar.update(1)\n\nprint(\"Creating new CSV file\")\nwith open('new_.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n with tqdm(total=len(b)) as pbar:\n for index, row in b.iterrows():\n tags = book_tags[row['goodreads_book_id']]\n tag_string = '|'.join(tags)\n writer.writerow([row['goodreads_book_id'], row['title'], tag_string])\n pbar.update(1)","repo_name":"JBAhire/hypertrace-sample-app","sub_path":"csv_upload.py","file_name":"csv_upload.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"14775178393","text":"n=int(input())\navailable={}\n\nfor i in range(n):\n key,value=input().split()\n available[key]=int(value)\n\ndataList=[]\n\nm=int(input())\nfor i in range(m):\n data=input().split()\n dataList.append([-float(data[1]),data[0],data[2:]])\n\nans=[]\n\ndataList.sort()\nfor data in dataList:\n for subject in data[2]:\n if available[subject]>0:\n available[subject]-=1\n ans.append([data[1],subject])\n break\n\nans.sort()\nfor id,subject in ans:\n print(id,subject)","repo_name":"PongDev/2110101-COMP-PROG","sub_path":"Grader/09_MoreDC_37/09_MoreDC_37.py","file_name":"09_MoreDC_37.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"29792649371","text":"from django.shortcuts import render\nfrom .utils import get_categories\n\n\ndef index(request):\n return render(request, \"index.haml\", {\"privileges\": get_categories()})\n\n\ndef sheet(request):\n data = get_categories()\n new_data = []\n for i in data:\n p = i.copy()\n p[\"choice\"] = {x[\"value\"]: x.copy() for x in p[\"options\"]}[request.GET[p[\"name\"]]]\n p[\"others\"] = filter(lambda x: x[\"value\"] != request.GET[p[\"name\"]], i[\"options\"])\n new_data.append(p)\n return render(request, \"sheet.haml\", {\"categories\": new_data})\n","repo_name":"Psycojoker/irlcharactersheets","sub_path":"privilege/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"12104863655","text":"#This code is based on the standardized pseudocode included in the Game Boy algorithm section\n#input: a list of seeds/ a file of seeds\n#input is a bit different from the standardized code because of \n#the different randomization technique in the GameBoy\n#(the seed is fetched one at a time)\n\nimport sys, random\n\nglobal bottle, seedList, numRows, numCols, levelNum, colorPrefer, numVirus\n\ndef randomIndex(seed):\n #print(\"randomIndex\")\n global numRows, numCols\n\n swappedNibbleSeed = (seed//16)+(seed%16)*16\n b = swappedNibbleSeed % 16 # & 0x0F for level 25-30 (buggy)\n if levelNum == 30: a = seed & 0xFE\n elif levelNum == 29: a = seed & 0x0A\n elif levelNum == 28: a = seed & 0xC1\n elif levelNum == 27: a = seed & 0xD1\n elif levelNum >= 25: a = seed & 0xE5\n else:\n b = swappedNibbleSeed % 64 # & 0x3F for level 0-24\n if levelNum >= 22: #a = seed % 16 + (seed // 32)%2*32 #& 0x2F\n a = seed & 0x2F\n elif levelNum >= 19: # a = seed % 8 + (seed // 32)%2*32 #& 0x27\n a = seed & 0x27\n elif levelNum >= 17: #a = seed % 32 #& 0x1F\n a = seed & 0x1F\n elif levelNum >= 15: #a = seed % 8 + (seed // 16)%2*16 #& 0x17\n a = seed & 0x17\n else: #a = seed % 16 #& 0x0F\n a = seed & 0x0F\n pos = 0x7F - (a+b)\n if (pos > 127) or (pos < 0): #if index outside of the bottle\n #in the asm code, this is taken care of by checking if pos is FF (empty)\n return None\n return pos\n\n#isAvailable AND nextEmptyRightDown both check whether the cell is empty \n#redundant \ndef isAvailable(r,c,color):\n global bottle, numRows, numCols\n #empty?\n if bottle[r][c] != None:\n return False\n cset = set([0,1,2])\n #2-away rule? \n if r >= 2:\n cset.discard(bottle[r-2][c])\n if c >= 2:\n cset.discard(bottle[r][c-2])\n if r < numRows - 2:\n cset.discard(bottle[r+2][c])\n if c < numCols - 2:\n cset.discard(bottle[r][c+2])\n return color in cset\n\n#return the next cell \n#or None if reached the end of the bottle \ndef nextCellRightDown(r, c):\n global numRows, numCols\n c = (c + 1) % numCols\n if c == 0:\n r = (r + 1) % 16\n if r == 0: #end of bottle, not cyclic so stops here\n return (None, None)\n return (r,c)\n\n\n#if (r,c) can't have any color and IS empty then infinite loop here\n#remove this helper function and delegate checking if cell is empty to isAvailable?\n# def nextEmptyRightDown(r, c):\n# #print(\"nextEmptyRightDown\")\n# global bottle, numCols, numRows\n# while bottle[r][c] != None:\n# #if (r, c) == (0, numCols-1): #if end of bottle\n# if r == (numRows - 1) and c == (numCols-1):\n# return (None, None)\n# (r, c) = nextCellRightDown(r, c)\n# return (r, c)\n\n\ndef addVirusGB(colorPrefer,seed):\n global bottle, numCols, numRows\n # R = random({0,1,...,numRows-1})\n # C = random({1,2,...,numCols-1})\n #get random index into the bottle \n loc = randomIndex(seed)\n if loc == None: #location is outside of the bottle\n #try next seed \n return False\n #candidate cell is at (R,C)\n R = loc // numCols\n C = loc % numCols\n #get empty cell starting from candidate cell \n #(r,c) = nextEmptyRightDown(R,C) \n (r,c) = (R,C)\n while (r,c) != (None, None): #if not at end of bottle\n colorOffset = 0\n #try all 3 colors for (r,c)\n while colorOffset < 3:\n color = (colorPrefer+colorOffset) % 3\n if isAvailable(r, c, color):\n bottle[r][c] = color\n return color\n colorOffset += 1\n #(r,c) can't have any colors, increment to next cell \n #if nextEmptyRightDown here then will keep returning (r,c)\n (r,c) = nextCellRightDown(r,c)\n return None #no empty cell for this color\n\n#input: one seed at a time\ndef fillBottleGB(numRemain, seed):\n global bottle, colorPrefer\n # colorPrefer = 0\n # numRemain = numVirus\n if addVirusGB(colorPrefer,seed) != None:\n numRemain = numRemain - 1\n colorPrefer = (colorPrefer + 1) % 3\n return numRemain\n\ndef initPuzzleGB(): \n global levelNum, numRows, numCols, bottle, seedList, colorPrefer, numVirus\n assert levelNum >= 0 and levelNum <= 30\n numRows = 10\n if levelNum >= 15: numRows += 1\n if levelNum >= 17: numRows += 1\n if levelNum >= 19: numRows += 1\n if levelNum > 22: numRows += 1\n numCols = 8\n numVirus = min(levelNum,20)*4 + 4\n bottle = [[None for i in range(8)] for j in range(16)]\n\ndef genPuzzleGB():\n global levelNum, numRows, numCols, bottle, seedList, colorPrefer, numVirus\n initPuzzleGB()\n colorPrefer = 0\n numRemain = numVirus\n for seed in seedList: \n if numRemain == 0: #the puzzle is complete \n break\n numRemain = fillBottleGB(numRemain, seed)\n return bottle\n\ndef printBottle():\n global bottle, numCols\n res = \"\"\n for row in bottle:\n res+=\"#\"\n for value in row:\n if value == None: res+= \".\"\n if value == 0: res += \"S\"\n if value == 1: res += \"B\"\n if value == 2: res += \"W\"\n res+=\"#\"\n res+=\"\\n\"\n #bottom wall\n for _ in range(numCols+2):\n res+=\"#\"\n print(res)\n\nif __name__ == \"__main__\":\n levelNum = int(sys.argv[1])\n seedList = [71, 46, 196, 201, 189, 243, 210, 83, 68, 112, 185, 8, 211, 151, 206, 147, 243, 133, 175, 104, \n 205, 96, 78, 150, 198, 6, 182, 95, 101, 111, 4, 21, 114, 104, 40, 21, 84, 92, 6, 42, 195, 76, 203, 156, 77, 1, 61, 97, 174, 16, 76, 181, 213, 139, 188, 166, 172, 39, 220, 63, 39, 52, 48, 14, 253, 68, 227, 135, 198, 57, 32, 90, 253, 148, 32, 89, 174, 73, 59, 64, 71, 254, 246, 60, 88, 141, 21, 74, 50, 106, 239, 205, 147, 137, 31, 132, 22, 20, 155, 112]\n # a = [None]*100\n # for _ in range(0,100):\n # a[_] = random.randint(0,255)\n # print(a)\n genPuzzleGB()\n printBottle()\n","repo_name":"trangqngo/Dr-Mario-virus-generation","sub_path":"GameBoy-standardized.py","file_name":"GameBoy-standardized.py","file_ext":"py","file_size_in_byte":5954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"35242040699","text":"from CustomRFLib.PerformanceFeedback import get_sales_performance_feedback\nfrom CustomRFLib.ExampleLibrary import ExampleLibrary as EL\n\n\ndef main():\n print('this message is from main function')\n sales_result = 100\n sales_target = 200\n Comment = get_sales_performance_feedback(sales_result,sales_target)\n print('The comment from our boss is {}'.format(Comment))\n ExamlelibInst = EL()\n print( 'The date of today is {}\\n'.format(ExamlelibInst.get_current_data() ) )\n\n\nif __name__ == '__main__':\n main()","repo_name":"samliu0631/RPA","sub_path":"RFCases/AddLibrary/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"8280579497","text":"from statistics import multimode\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\ndef odlegloscXPoCzasie(predkoscPoczatkowa: float, czas: float, kat: float) -> float:\n return predkoscPoczatkowa*czas*math.cos(np.deg2rad(kat))\n\ndef czasLotu(predkoscPoczatkowa: float, kat: float) -> float:\n return (2*predkoscPoczatkowa*math.sin(np.deg2rad(kat)))/9.81\n\ndef maxWysokosc(predkoscPoczatkowa: float, kat: float) -> float:\n return (predkoscPoczatkowa**2 * math.sin(np.deg2rad(kat))**2)/(2*9.81)\n\nif __name__ == \"__main__\":\n V0 = float(input(\"Podaj predkosc poczatkowa: \"))\n alpha = float(input(\"Podaj kat rzutu (w stopniach): \"))\n\n print(\"Czas lotu: \"+str(czasLotu(V0,alpha))+\"s\")\n print(\"Zasieg rzutu: \"+str(odlegloscXPoCzasie(V0, czasLotu(V0, alpha),alpha))+\"m\")\n print(\"Maksymalna wysokosc: \"+str(maxWysokosc(V0,alpha))+\"m\")\n\n t = np.linspace(0,czasLotu(V0,alpha))\n\n #Obliczamy skladowe predkosci od czasu\n Vx = np.linspace(V0*math.cos(np.deg2rad(alpha)),V0*math.cos(np.deg2rad(alpha)))\n Vy = V0*math.sin(np.deg2rad(alpha)) - np.multiply(9.81,t)\n\n #Tworzenie wykresu predkosci od czasu\n fig, ax1 = plt.subplots()\n\n color = 'tab:red'\n ax1.set_xlabel('time [s]')\n ax1.set_ylabel('Vx [m/s]', color=color)\n ax1.plot(t,Vx, color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n\n ax2 = ax1.twinx()\n\n color = 'tab:blue'\n ax2.set_ylabel('Vy [m/s]', color=color)\n ax2.plot(t, Vy, color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n\n fig.tight_layout()\n\n #Obliczamy polozenia od czasu\n Sx = np.multiply(V0*math.cos(np.deg2rad(alpha)),t)\n Sy = np.multiply(V0*math.sin(np.deg2rad(alpha)),t) + np.multiply(-9.81/2,np.power(t,2))\n\n #Tworzenie wykresu polozenia od czasu\n fig, ax1 = plt.subplots()\n\n color = 'tab:red'\n ax1.set_xlabel('time [s]')\n ax1.set_ylabel('Sx [m]', color=color)\n ax1.plot(t,Sx, color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n\n ax2 = ax1.twinx()\n\n color = 'tab:blue'\n ax2.set_ylabel('Sy [m]', color=color)\n ax2.plot(t, Sy, color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n\n fig.tight_layout()\n\n\n #Tworzenie wykresu toru ruchu\n fig, ax1 = plt.subplots()\n\n color = 'tab:red'\n ax1.set_xlabel('Sx [s]')\n ax1.set_ylabel('Sy [m]', color=color)\n ax1.plot(Sx,Sy, color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n\n fig.tight_layout()\n plt.show()","repo_name":"DorianSzlachcic/jsp2021","sub_path":"Lista9/zadanie3.py","file_name":"zadanie3.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"22310191040","text":"from abc import ABC, abstractmethod\nimport argparse\nfrom dataclasses import dataclass\n@dataclass\nclass Config(ABC):\n test: bool = False\n verbose: bool = False\n very_verbose: bool = False\n debug: bool = False\n\n @abstractmethod\n def parse(self, **kwargs) -> None:\n ...\n\n@dataclass\nclass ArgConfig(Config):\n def parse(self, args=argparse.ArgumentParser) -> None:\n if hasattr(args, 'test'):\n self.test = args.test\n if args.verbose:\n self.verbose = True\n if args.verbose >= 2:\n self.very_verbose = True\n if args.verbose >= 3:\n self.debug = True\n","repo_name":"0xFA7E/data_clean","sub_path":"data_clean/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"43736853487","text":"import polars as pl\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pathlib\nfrom dataclasses import dataclass\nfrom functools import partial, singledispatch\n\npath: pathlib.Path = 'data/Police_Department_Incident_Reports__Historical_2003_to_May_2018.csv'\ndata1 = pl.scan_csv(path).collect()\n\npath: pathlib.Path = 'data/Police_Department_Incident_Reports__2018_to_Present.csv'\ndata2 = pl.scan_csv(path).collect()\n\ndef correct_time(dataframe):\n return dataframe.with_columns(\n pl.col('Time').str.strptime(\n datatype = pl.Time,\n fmt='%H:%M'\n )\n )\n\ndata1 = correct_time(data1)\ndata2 = correct_time(data2)\n\n# == summary statistics dataset 1 ==\n\n# number of entries in each category\ndata1.groupby(pl.col('Category')).agg(pl.count())\ndata1.groupby(pl.col('Category')).agg(pl.count()).to_numpy()\n\n# median for time of incident for each category\ndata1.groupby(pl.col('Category')).agg(pl.col('Time').median()).to_numpy()\n\n# == summary statistics dataset 2 ==\n\n# number of entries in each category\ndata2.groupby(pl.col('Category')).agg(pl.count()).to_numpy()\n\n# median for time of incident for each category\ndata2.groupby(pl.col('Category')).agg(pl.col('Time').median()).to_numpy()\n","repo_name":"Starostka/socviz","sub_path":"sandbox/archive/summary_statistics.py","file_name":"summary_statistics.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"73245770998","text":"\n# coding: utf-8\n\n# In[3]:\n\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n\n\n# In[4]:\n\n\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n# In[5]:\n\n\nX_train= mnist.train.images[:,:]\nX_test = mnist.test.images[:,:]\ny_train = mnist.train.labels\ny_test = mnist.test.labels\n\n\n# In[20]:\n\n\nprint(X_train.shape)\nprint(y_test.shape)\n\n\n# In[7]:\n\n\nimage = X_train[530].reshape([28,28])\nplt.imshow(image, cmap=plt.cm.binary)#=plt.get_cmap('viridis'))\nplt.show()\n\n\n# In[8]:\n\n\n# Parameters\nlearning_rate = 0.001\ntraining_epochs = 15\nbatch_size = 100\ndisplay_step = 1\n\n\n# In[9]:\n\n\n# Network Parameters\nn_hidden_1 = 256 # 1st layer number of neurons\nn_hidden_2 = 256 # 2nd layer number of neurons\nn_input = 784 # MNIST data input (img shape: 28*28)\nn_classes = 10 # MNIST total classes (0-9 digits)\n\n\n# In[10]:\n\n\n# tf Graph input\nX = tf.placeholder(\"float\", [None, n_input])\nY = tf.placeholder(\"float\", [None, n_classes])\n\n\n# In[11]:\n\n\n# Store layers weight & bias\nweights = {\n 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))\n}\nbiases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n}\n\n\n# In[12]:\n\n\n# Create model\ndef multilayer_perceptron(x):\n # Hidden fully connected layer with 256 neurons\n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n layer_1 = tf.nn.relu(layer_1)\n # Hidden fully connected layer with 256 neurons\n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n layer_2 = tf.nn.relu(layer_2)\n \n # Output fully connected layer with a neuron for each class\n out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n return out_layer\n\n\n\n# In[13]:\n\n\n# Construct model\nlogits = multilayer_perceptron(X)\n\n# Define loss and optimizer\nloss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=Y))\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\ntrain_op = optimizer.minimize(loss_op)\n# Initializing the variables\ninit = tf.global_variables_initializer()\n\n\n# In[14]:\n\n\nsess = tf.Session()\n\n\n# In[15]:\n\n\n\nsess.run(init)\n\n # Training cycle\nfor epoch in range(training_epochs):\n avg_cost = 0.\n total_batch = int(mnist.train.num_examples/batch_size)\n # Loop over all batches\n for i in range(total_batch):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n # Run optimization op (backprop) and cost op (to get loss value)\n _, c = sess.run([train_op, loss_op], feed_dict={X: batch_x,\n Y: batch_y})\n # Compute average loss\n avg_cost += c / total_batch\n # Display logs per epoch step\n if epoch % display_step == 0:\n #print(\"Epoch:\", '%04d' % (epoch+1), \"cost={:.9f}\".format(avg_cost))\n print(\"Epoch:\", '%04d' % (epoch+1), \"cost=\",'%.9f' %(avg_cost))\n\nprint(\"Optimization Finished!\")\n\n # Test model\npred = tf.nn.softmax(logits) # Apply softmax to logits\ncorrect_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(Y, 1))\n # Calculate accuracy\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\nprint(\"Accuracy:\", sess.run(accuracy, feed_dict={X: X_test, Y: y_test}))\n \n \n\n\n# In[30]:\n\n\ndef display_compare(num):\n x_test= mnist.test.images[num,:].reshape(1,784)\n y_test = mnist.test.labels[num,:]\n\n label = y_test.argmax()\n # THIS GETS OUR PREDICTION AS A INTEGER\n prediction = sess.run(pred, feed_dict={X: x_test}).argmax()\n plt.title('Prediction: %d Label: %d' % (prediction, label))\n plt.imshow(x_test.reshape([28,28]), cmap=plt.cm.binary)# plt.get_cmap('gray_r'))\n plt.show()\n\n\n# In[49]:\n\n\nimport random as ran\ndisplay_compare(ran.randint(0, 10000))\n\n","repo_name":"ArnabPurohit/Machine-Learning-applications-in-HEP","sub_path":"Hands-on-sessions/MNIST_ANN.py","file_name":"MNIST_ANN.py","file_ext":"py","file_size_in_byte":4031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"38594816263","text":"#\n# @lc app=leetcode id=25 lang=python3\n#\n# [25] Reverse Nodes in k-Group\n#\nfrom dis import pretty_flags\nfrom typing import List, Optional\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n def print_sequence_nodes(self):\n node = self\n while node:\n print(node.val, end=' ')\n node = node.next\n print()\n\n\n# @lc code=start\nclass Solution:\n # Solution 1: for 1 -> k, for each group reverse same as reverse all list\n # Solution 2 (better): count number elements of list, for each group reverse inline\n # Solution 3 (more better): count number elements of list, reverse inline for all elements\n\n def reverseKGroup(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:\n return self.reverseKGroup(head=head, k=k)\n\n # TODO: Make better solution works\n def reverseKGroup_better(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:\n dummy = ListNode(next=head)\n cnt, node = 0, head\n while node:\n cnt += 1\n node = node.next\n\n prev, prev_tail = dummy, dummy\n node = dummy.next\n while cnt >= k:\n for _ in range(0, k):\n cnt -= 1\n next = node\n node.next = prev\n prev = node\n node = next\n\n t = prev_tail.next\n prev_tail.next = prev\n prev_tail = t\n\n return dummy.next\n\n\n def reverseKGroup(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:\n head = ListNode(next=head)\n prev_group = head\n node = head.next\n\n while True:\n i = 0\n head_group = node\n while node and i < k:\n node = node.next\n i += 1\n if i != k:\n prev_group.next = head_group\n break\n\n local_head, local_tail = self.reverseLL(head=head_group, k=k)\n prev_group.next = local_head\n prev_group = local_tail\n\n return head.next\n\n\n def reverseLL(self, head: Optional[ListNode], k: int = 9999):\n node: ListNode = head\n prev = None\n i = 0\n while node and i < k:\n next = node.next\n node.next = prev\n prev = node\n node = next\n i += 1\n return prev, head\n \n \n# @lc code=end\nif __name__ == '__main__':\n head = ListNode()\n node = head\n for n in range(1, 9):\n node.next = ListNode(val=n)\n node = node.next\n \n # head, tail = Solution().reverseLL(head=head.next)\n # head.print_sequence_nodes()\n\n head = Solution().reverseKGroup(head=head.next, k=3)\n head.print_sequence_nodes()\n","repo_name":"anhtranbk/challengers","sub_path":"leetcode/25.reverse-nodes-in-k-group.py","file_name":"25.reverse-nodes-in-k-group.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"42322263275","text":"from aniversario.views import PessoaListView, Home, router, pizzas\nfrom django.conf import settings\nfrom django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.conf.urls.static import static\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\nurlpatterns = patterns('',\n # Examples:\n url(r'^$',\n Home.as_view()\n , name='home'\n ),\n url(r'^pizzas/$',\n pizzas\n , name='pizzas'\n ),\n\n\n url(r'^ajax/',\n include(router.urls)\n , name='pessoa-list'\n ),\n\n # url(r'^blog/', include('blog.urls')),\n #url(r'^djangular/', include('djangular.urls')),\n url(r'^admin/', include(admin.site.urls)),\n)\n\n\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n\nurlpatterns += staticfiles_urlpatterns()\n","repo_name":"luzfcb/angularapp","sub_path":"angularapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"29543923840","text":"import argparse\nimport json\nimport logging\nimport subprocess\nimport sys\nfrom typing import Optional\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--verbose\", \"-v\", action=\"store_true\", default=False)\n parser.add_argument(\"mac\", type=str, nargs=\"*\",\n help=\"MAC address to include in the whitelist\")\n args = parser.parse_args()\n level = logging.INFO if args.verbose else logging.WARNING\n logging.basicConfig(level=level)\n macs = map(lambda s: s.lower(), args.mac)\n sys.exit(\n 0\n if get_router_mac_address() in macs\n else 1\n )\n\ndef get_router_mac_address() -> Optional[str]:\n route_process = subprocess.run(\n [\"ip\", \"--json\", \"route\", \"list\"], capture_output=True\n )\n routes = json.loads(route_process.stdout.decode())\n logging.info(f\"Routes: {routes}\")\n\n gateway_ips = [\n route[\"gateway\"] for route in routes if route[\"dst\"] == \"default\"\n ]\n logging.info(f\"Gateway IPs: {gateway_ips}\")\n if len(set(gateway_ips)) != 1:\n logging.info(\n f\"{len(set(gateway_ips))} different gateway IPs, giving up.\"\n )\n return None\n\n gateway_ip = gateway_ips[0]\n subprocess.run([\"ping\", \"-c\", \"1\", gateway_ip], capture_output=True)\n neighbour_process = subprocess.run(\n [\"ip\", \"--json\", \"neigh\"], capture_output=True\n )\n neighbours = json.loads(neighbour_process.stdout.decode())\n logging.info(f\"Neighbours: {neighbours}\")\n\n gateway_macs = [\n neighbour[\"lladdr\"]\n for neighbour in neighbours\n if neighbour[\"dst\"] == gateway_ip and \"lladdr\" in neighbour\n ]\n logging.info(f\"Gateway MACs: {gateway_macs}\")\n\n return (\n gateway_macs[0].lower()\n if len(set(gateway_macs)) == 1\n else None\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pont-us/checkroutermac","sub_path":"checkroutermac.py","file_name":"checkroutermac.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"11821329586","text":"# coding: utf-8\n\nfrom datetime import datetime\n\nfrom werkzeug.utils import cached_property\n\nfrom libs.db.store import db\nfrom libs.cache import mc, cache\nfrom core.models.base import EntityModel\n\n\nclass UniversePushRecord(EntityModel):\n\n table_name = 'pusher_universe_record'\n cache_key = 'pusher:universe_record:v1:{id_}'\n cache_key_by_bulletin = 'pusher:universe_record:v1:bulletin:{bulletin_id}'\n\n def __init__(self, id_, bulletin_id, is_pushed, jmsg_id, creation_time, push_time):\n self.id_ = str(id_)\n self.bulletin_id = str(bulletin_id)\n self.is_pushed = is_pushed\n self.jmsg_id = jmsg_id\n self.creation_time = creation_time\n self.push_time = push_time\n\n @cached_property\n def bulletin(self):\n from core.models.site.bulletin import Bulletin\n return Bulletin.get(self.bulletin_id)\n\n @classmethod\n def create(cls, bulletin):\n from core.models.site.bulletin import Bulletin\n assert isinstance(bulletin, Bulletin)\n\n sql = ('insert into {.table_name} (bulletin_id, is_pushed, creation_time) '\n 'values (%s, %s, %s)').format(cls)\n params = (bulletin.id_, False, datetime.now())\n id_ = db.execute(sql, params)\n db.commit()\n\n cls.clear_cache(id_)\n cls.clear_cache_by_bulletin(bulletin.id_)\n return cls.get(id_)\n\n @classmethod\n @cache(cache_key)\n def get(cls, id_):\n sql = ('select id, bulletin_id, is_pushed, jmsg_id, creation_time, '\n 'push_time from {.table_name} where id=%s').format(cls)\n params = (id_,)\n rs = db.execute(sql, params)\n return cls(*rs[0]) if rs else None\n\n @classmethod\n @cache(cache_key_by_bulletin)\n def get_id_by_bulletin_id(cls, bulletin_id):\n sql = 'select id from {.table_name} where bulletin_id=%s'.format(cls)\n params = (bulletin_id, )\n rs = db.execute(sql, params)\n if rs:\n return rs[0][0]\n\n @classmethod\n def get_by_bulletin_id(cls, bulletin_id):\n id_ = cls.get_id_by_bulletin_id(bulletin_id)\n return cls.get(id_)\n\n def mark_as_pushed(self, msg_id):\n \"\"\"标记推送已发出\"\"\"\n sql = ('update {.table_name} set is_pushed=%s, jmsg_id=%s, '\n 'push_time=%s where id=%s').format(self)\n params = (True, msg_id, datetime.now(), self.id_)\n db.execute(sql, params)\n db.commit()\n\n new_state = vars(self.get(self.id_))\n vars(self).update(new_state)\n\n self.clear_cache(self.id_)\n self.clear_cache_by_bulletin(self.bulletin_id)\n\n @classmethod\n def clear_cache(cls, id_):\n mc.delete(cls.cache_key.format(id_=id_))\n\n @classmethod\n def clear_cache_by_bulletin(cls, bulletin_id):\n mc.delete(cls.cache_key_by_bulletin.format(**locals()))\n","repo_name":"c1xfr2e/soledad","sub_path":"core/models/pusher/universe_record.py","file_name":"universe_record.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"26900277982","text":"import os\nfrom typing import List\n\nimport pandas as pd\n\n\nclass Reader:\n\n def __init__(self, dataset: str):\n self.data_path = './data'\n self.dataset = dataset\n self.text_file = 'seq.in'\n self.slot_file = 'seq.out'\n self.intent_file = 'label'\n\n @staticmethod\n def _read_file(filename, split=True):\n with open(filename, mode=\"r\", encoding=\"utf-8\") as f:\n return [line.strip().split() if split else line.strip() for line in f]\n\n def read_dataset(self, mode='train'):\n sentences = Reader._read_file(os.path.join(self.data_path, self.dataset, mode, self.text_file))\n slots = Reader._read_file(os.path.join(self.data_path, self.dataset, mode, self.slot_file))\n intents = Reader._read_file(os.path.join(self.data_path, self.dataset, mode, self.intent_file), split=False)\n assert len(sentences) == len(slots) == len(intents)\n\n return sentences, slots, intents\n\n def construct_intent_and_slot_vocabs(self, write_to_disk=True):\n _, slots, intents = self.read_dataset(mode='train')\n\n sorted_intent_labels = sorted(list(set(intents)))\n\n slot_labels = {slot for line in slots for slot in line}\n sorted_slot_labels = sorted(list(slot_labels),\n key=lambda slot_name: (slot_name[2:], slot_name[:2]))\n\n # Add \"unknown\" token in case of missing intents and slots in the training set\n # and \"padding\" slot to ignore slot predictions referring to the padding token with id = -100\n sorted_intent_labels = [\"UNK\"] + sorted_intent_labels\n sorted_slot_labels = [\"UNK\", \"PAD\"] + sorted_slot_labels\n\n if write_to_disk:\n with open(os.path.join(self.data_path, self.dataset, \"intent_labels.txt\"), mode=\"w\", encoding=\"utf-8\") as f:\n for intent in sorted_intent_labels:\n f.write(intent + '\\n')\n with open(os.path.join(self.data_path, self.dataset, \"slot_labels.txt\"), mode=\"w\", encoding=\"utf-8\") as f:\n for slot in sorted_slot_labels:\n f.write(slot + '\\n')\n else:\n return sorted_intent_labels, sorted_slot_labels\n\n def get_intent_labels(self):\n return Reader._read_file(os.path.join(self.data_path, self.dataset, \"intent_labels.txt\"), split=False)\n\n def get_slot_labels(self):\n return Reader._read_file(os.path.join(self.data_path, self.dataset, \"slot_labels.txt\"), split=False)\n\n def save_test_preds_to_csv(self, slot_preds: List[List[str]], intent_preds: List[str]):\n sentences, slots, intents = self.read_dataset(mode='test')\n df = pd.DataFrame(list(zip(sentences, slots, intents, slot_preds, intent_preds)),\n columns=['utterance', 'slots', 'intent', 'intent_pred', 'slots_preds'])\n\n df.to_csv(f\"{self.dataset}_test_preds.csv\")\n\n def construct_df_from_dataset(self, mode='train'):\n sentences, slots, intents = self.read_dataset(mode=mode)\n\n return pd.DataFrame(list(zip(sentences, slots, intents)),\n columns=['utterance', 'slots', 'intent'])\n","repo_name":"tassiasP/Intent_Recognition_and_Slot_Filling-using_BERT_and_Prompting","sub_path":"reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"29155931690","text":"import numpy as np\nfrom sklearn.cluster import MiniBatchKMeans\n\nfrom bias_assessment_module.BiasAssessor import BiasAssessor\nfrom bias_assessment_module.Utils import Utils\n\n\nclass EmbeddigsClusterer:\n \"\"\"\n implements the unsupervised approach to finding new target word sets from the embedding's own vocabulary\n using the k-means++ algorithm for clustering word embeddings.\n \"\"\"\n\n def __init__(self, cluster2word, model, config):\n self._model = model\n self._config = config\n self._cluster2word = cluster2word\n\n @staticmethod\n def create(model, config):\n \"\"\"\n instantiates the EmbeddingsClusterer object by applying the MiniBatchKMeans\n algorithm to the model vectors and assigning each word from the model vocabulary to a cluster.\n :param model: word embeddings model\n :param config: EmbeddingsClustererConfig object\n :return: new EmbeddingsClusterer instance\n \"\"\"\n # create an instance of MiniBatchKMeans\n mbk = MiniBatchKMeans(init=config.init, n_clusters=config.n_clusters, batch_size=config.batch_size,\n max_no_improvement=config.max_no_improvement, verbose=config.verbose)\n # cluster the word embeddings of the model\n mbk.fit(model.wv.vectors)\n cluster2word = {}\n for word, cluster in zip(model.wv.vocab, mbk.labels_):\n cluster_record = cluster2word.get(cluster, None)\n if cluster_record is None:\n cluster_record = []\n cluster2word[cluster] = cluster_record\n cluster_record.append(word)\n return EmbeddigsClusterer(cluster2word, model, config)\n\n def calculate_score(self, weat_config):\n \"\"\"\n calculates score for each word in a cluster.\n :param weat_config: WeatConfig object\n :return: map cluster => word scores grouped and mapped by cluster id\n \"\"\"\n score_for_word_in_cluster = {}\n # filter out attribute words from the list A that are not in model vocabulary.\n a_attrs, _ = Utils.filter_list(self._model.wv.vocab, weat_config.a)\n # filter out attribute words from the list B that are not in model vocabulary.\n b_attrs, _ = Utils.filter_list(self._model.wv.vocab, weat_config.b)\n for cluster in sorted(self._cluster2word.keys()):\n cluster_words = self._cluster2word[cluster]\n word_score = []\n for word in cluster_words:\n # calculate the cosine mean difference between the vector of the word in the cluster\n # and vectors of the attribute words in list A and vectors of the attribute words in list B.\n score = BiasAssessor.cosine_means_difference(self._model.wv, word, a_attrs, b_attrs)\n word_score.append((word, score))\n score_for_word_in_cluster.update({cluster: word_score})\n return score_for_word_in_cluster\n\n def get_target_words(self, score_for_word_in_cluster):\n \"\"\"\n creates target words lists X and Y.\n :param score_for_word_in_cluster: map cluster => word scores grouped and mapped by cluster id\n :return: list of tuples consisting of the target word list X and the target word list Y.\n \"\"\"\n target_words = []\n for cluster, word_score in score_for_word_in_cluster.items():\n y_target_words = []\n y_target_scores = []\n x_target_words = []\n x_target_scores = []\n # Based on the scores for each word in a cluster,\n # add the word either to the target word list X or to the target word list Y.\n for word, score in word_score:\n if score < 0:\n y_target_words.append(word)\n y_target_scores.append(score * -1) # convert score to a positive value\n else:\n x_target_words.append(word)\n x_target_scores.append(score)\n # select the words with the highest score to create the final X and Y target words sets\n x_target_words, y_target_words = self.choose_words(x_target_words, y_target_words, x_target_scores,\n y_target_scores, self._config.cluster_words_count)\n target_words.append((x_target_words, y_target_words))\n return target_words\n\n def prepare_config_for_weat(self, bias_category):\n \"\"\"\n is never used??\n :param bias_category: bias category name\n :return:\n \"\"\"\n score_for_word_in_cluster = self.calculate_score(bias_category)\n target_words_from_clusters = self.get_target_words(score_for_word_in_cluster)\n for x_target_words, y_target_words in target_words_from_clusters:\n weat_config_for_cluster = bias_category.copy({\n \"x\": x_target_words,\n \"y\": y_target_words\n })\n return weat_config_for_cluster\n\n @staticmethod\n def choose_words(x_target_words, y_target_words, x_target_scores, y_target_scores, nwords=-1):\n \"\"\"\n selects the best X and Y target words based on the word scores.\n :param x_target_words: list X of target words\n :param y_target_words: list Y of target words\n :param x_target_scores: list of scores for each word in the X target word list\n :param y_target_scores: list of scores for each word in the Y target word list\n :param nwords: maximum number of words in the single target word set\n :return: tuple of target word sets X and Y, each consisting of selected target words\n \"\"\"\n n = np.min([len(x_target_words), len(y_target_words)] + ([nwords] if nwords >= 0 else []))\n x_target_scores = np.array(x_target_scores)\n y_target_scores = np.array(y_target_scores)\n x_topn_indices = x_target_scores.argsort()[::-1][:n]\n y_topn_indices = y_target_scores.argsort()[::-1][:n]\n sel_x_words = [x_target_words[i] for i in x_topn_indices]\n sel_y_words = [y_target_words[i] for i in y_topn_indices]\n return sel_x_words, sel_y_words\n\n @property\n def model(self):\n return self._model\n","repo_name":"vlebedynska/word-embeddings-childrens-books","sub_path":"bias_assessment_module/EmbeddingsClusterer.py","file_name":"EmbeddingsClusterer.py","file_ext":"py","file_size_in_byte":6216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"12464204615","text":"from odoo import models, fields\n\n\nclass MedicalSpecialty(models.Model):\n _name = 'medical.specialty'\n _description = 'Medical Specialties'\n\n code = fields.Char(\n string='ID',\n help='Speciality Code',\n size=256,\n )\n name = fields.Char(\n string='Specialty',\n help='Name of the specialty',\n size=256,\n required=True,\n translate=True,\n )\n\n _sql_constraints = [\n ('name_uniq', 'UNIQUE(name)', 'Name must be unique!'),\n ]\n","repo_name":"icrea-t/OdooV10-vertical_medical","sub_path":"medical_physician/models/medical_specialty.py","file_name":"medical_specialty.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"17317680159","text":"from dao.dao import DAO\nfrom entidade.jogo import Jogo\nfrom exception.nao_encontrado_error import NaoEncontradoErro\n\n\nclass JogoDAO(DAO):\n __instancia = None\n\n def __init__(self):\n super().__init__('jogo.pkl')\n\n def __new__(cls):\n if JogoDAO.__instancia is None:\n JogoDAO.__instancia = object.__new__(cls)\n return JogoDAO.__instancia\n \n def add(self, jogo: Jogo):\n if isinstance(jogo, Jogo) and isinstance(jogo.id, int):\n super().add(jogo.id, jogo)\n\n def get(self, id_jogo: int):\n try:\n if isinstance(id_jogo, int):\n return super().get(id_jogo)\n except KeyError:\n raise NaoEncontradoErro('jogo')\n\n def remove(self, jogo: Jogo):\n try:\n if isinstance(jogo, Jogo) and isinstance(jogo.id, int):\n super().remove(jogo.id)\n except KeyError:\n raise NaoEncontradoErro('jogo')\n","repo_name":"Davitorino/naval-warfare-game","sub_path":"dao/jogo_dao.py","file_name":"jogo_dao.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"31838173725","text":"nums = [9, 8, 7, 6, 5, 4, 3, 2, 1]\r\nprint(\"PRE SORT: {0}\".format(nums))\r\n\r\n\r\ndef bubble_sort(arr):\r\n iteration_count = 0\r\n for i in range(len(arr)):\r\n # iterate through unplaced elements\r\n for idx in range(len(arr) - i - 1):\r\n iteration_count += 1\r\n if arr[idx] > arr[idx + 1]:\r\n # replacement for swap function\r\n arr[idx], arr[idx + 1] = arr[idx + 1], arr[idx]\r\n \r\n print(\"POST-OPTIMIZED ITERATION COUNT: {0}\".format(iteration_count))\r\n\r\n\r\nbubble_sort(nums)\r\nprint(\"POST SORT: {0}\".format(nums))","repo_name":"anjiladhikari/allPythonPractice","sub_path":"sortingInPython/bubbleWithOpt.py","file_name":"bubbleWithOpt.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"39131549273","text":"import torch\nimport torch.nn as nn\nimport neko_sdk.encoders.ocr_networks.dan.dan_reslens_naive as rescco\n\n'''\nLens_Feature_Extractor\n'''\nclass neko_cco_Feature_Extractor(nn.Module):\n def __init__(self, strides, compress_layer, input_shape,hardness=2,oupch=512,expf=1):\n super(neko_cco_Feature_Extractor, self).__init__()\n self.model = rescco.res_naive_lens45(strides, compress_layer,hardness,oupch=oupch,inpch=input_shape[0],expf=expf)\n self.input_shape = input_shape\n\n def forward(self, input,debug=False):\n features,grid = self.model(input)\n if debug:\n return features,grid;\n return features\n\n def Iwantshapes(self):\n pseudo_input = torch.rand(1, self.input_shape[0], self.input_shape[1], self.input_shape[2])\n features,grio = self.model(pseudo_input)\n return [feat.size()[1:] for feat in features]\n","repo_name":"lancercat/VSDF","sub_path":"neko_2020nocr/dan/dan_modules_pami/neko_xtra_fe.py","file_name":"neko_xtra_fe.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"4"} +{"seq_id":"3864780490","text":"from random import randint\nfrom sys import argv\nscript, filename, n = argv\n\n\ndef main():\n file = open(filename, 'w+')\n for i in range(0, int(n)):\n nums = randint(1, 10)\n file.write(\" \".join(str(nums)))\n\n file.close()\n\nif __name__ == '__main__':\n main()\n","repo_name":"smilen4eto/week0-file-problems","sub_path":"generate_numbers.py","file_name":"generate_numbers.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"8253980711","text":"from django.conf.urls import url\n\nfrom . import views\n\n# from .views import CategoryViewSet, EntryViewSet, UserViewSet\n\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nurlpatterns = [\n # url(r'^$', views.index, name='index'),\n # url(r'^categorys/(?P\\d+)/$', views.categorys, name='categorys'),\n # url(r'^category/(?P\\d+)/(?P\\d+)/$', views.category, name='category'),\n # url(r'^new_category$', views.new_category, name='new_category'),\n # url(r'^edit_category/(?P\\d+)/$', views.edit_category, name='edit_category'),\n # url(r'^new_entry/(?P\\d+)/$', views.new_entry, name='new_entry'),\n # url(r'^edit_entry/(?P\\d+)/$', views.edit_entry, name='edit_entry'),\n # url(r'^delete_entry/(?P\\d+)/(?P\\d+)/(?P\\d+)/$', views.delete_entry, name='delete_entry'),\n # url(r'^delete_category/(?P\\d+)/(?P\\d+)/$', views.delete_category, name='delete_category'),\n # url(r'^test$', views.test, name='test'),\n # url(r'^search$', views.search, name='search'),\n\n\n url(r'^categorys/$', views.CategoryList.as_view()),\n url(r'^categorys/(?P[0-9]+)/$', views.CategoryDetail.as_view()),\n\n url(r'^entrys/$', views.EntryList.as_view()),\n url(r'^entrys/(?P[0-9]+)/$', views.EntryDetail.as_view()),\n\n url(r'^entry/$', views.EntryList.as_view()),\n url(r'^entry/(?P[0-9]+)/$', views.EntryDetail.as_view()),\n\n url(r'^users/$', views.UserList.as_view()),\n url(r'^users/(?P[0-9]+)/$', views.UserDetail.as_view()),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n\n\n# category_list = CategoryViewSet.as_view({\n# 'get': 'list',\n# 'post': 'create',\n# })\n# category_detail = CategoryViewSet.as_view({\n# 'get': 'retrieve',\n# 'put': 'update',\n# 'patch': 'partial_update',\n# 'delete': 'destroy',\n# })\n#\n#\n# entry_list = EntryViewSet.as_view({\n# 'get': 'list',\n# 'post': 'create',\n# })\n# entry_detail = EntryViewSet.as_view({\n# 'get': 'retrieve',\n# 'put': 'update',\n# 'patch': 'partial_update',\n# 'delete': 'destroy',\n# })\n#\n#\n# user_list = UserViewSet.as_view({\n# 'get': 'list',\n# })\n# user_detail = UserViewSet.as_view({\n# 'get': 'retrieve',\n# })\n#\n#\n# urlpatterns = format_suffix_patterns([\n# url(r'^$', api_root),\n# url(r'^categorys/$', category_list, name='category-list'),\n# url(r'^categorys/(?P[0-9])/$', category_detail, name='category-detail'),\n# url(r'^entrys/$', entry_list, name='entry-list'),\n# url(r'^entrys/(?P[0-9])/$', entry_detail, name='entry-detail'),\n# url(r'^users/$', user_list, name='entry-list'),\n# url(r'^users/(?P[0-9])/$', user_detail, name='entry-detail'),\n# ])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"PhelanWang/Project","sub_path":"address_books/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"14623142132","text":"# @author Tasuku Miura\n# @brief PyTorch implementation of PilotNet (Assumes CUDA enabled)\n\nimport os\nimport pickle\n\nfrom skimage import io, transform\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom torch.utils.data import Dataset, DataLoader, ConcatDataset\nfrom torchvision import transforms, utils\n\n\n# used for logging to TensorBoard\nfrom tensorboard_logger import configure, log_value\n\nfrom data_loader import *\nfrom data_transforms import *\nfrom pilot_net import *\n\nimport math\n\n\ndef get_device_stats():\n print(\"Device count: {}\".format(torch.cuda.device_count()))\n # only can use from PyTorch v0.4\n # print(\"Max_memory_allocated: {}\".format(torch.cuda.max_memory_allocated()))\n # print(\"Max_memory_cached: {}\".format(torch.cuda.max_memory_cached()))\n # print(\"Memory_allocated: {}\".format(torch.cuda.memory_allocated()))\n\n\ndef train_one_epoch_sequence(epoch, model, loss_fn, optimizer, train_loader):\n model.train()\n print(\"Epoch {} starting.\".format(epoch))\n epoch_loss = 0\n for batch in train_loader:\n data, target = torch.squeeze(torch.stack(batch['image'])).cuda(), batch['steer'].cuda()\n data = Variable(data).type(torch.cuda.FloatTensor)\n target = Variable(target).type(torch.cuda.FloatTensor)\n\n predict = model(data)\n loss = loss_fn(predict, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n epoch_loss += loss.data[0]\n\n epoch_loss /= len(train_loader.dataset)\n print(\"Epoch {:.4f}: Train set: Average loss: {:.6f}\\t\".format(epoch, epoch_loss))\n log_value('train_loss', epoch_loss, epoch)\n\n\ndef validate_sequence(epoch, model, loss_fn, optimizer, valid_loader):\n model.eval()\n valid_loss = 0\n for batch in valid_loader:\n data, target = torch.stack(batch['image']).cuda(), batch['steer'].cuda()\n data = Variable(data, volatile=True).type(torch.cuda.FloatTensor)\n target = Variable(target).type(torch.cuda.FloatTensor)\n predict = model(data, train=False)\n valid_loss += loss_fn(predict, target).data[0] # sum up batch loss\n\n valid_loss /= len(valid_loader.dataset)\n print('Valid set: Average loss: {:.6f}\\n'.format(valid_loss))\n log_value('valid_loss', valid_loss, epoch)\n return valid_loss\n\n\ndef test_sequence(model, loss_fn, optimizer, test_loader):\n model.eval()\n images = []\n targets = []\n predicts = []\n test_loss = 0\n for batch in test_loader:\n data, target = torch.stack(batch['image']).cuda(), batch['steer'].cuda()\n data = Variable(data, volatile=True).type(torch.cuda.FloatTensor)\n target = Variable(target).type(torch.cuda.FloatTensor)\n output = model(data, train=False)\n test_loss += loss_fn(output, target).data[0] # sum up batch loss\n\n # Store image path as raw image too large.\n images.append(batch['image_path'])\n targets.append(target.data.cpu().numpy())\n predicts.append(output.data.cpu().numpy())\n\n test_loss /= len(test_loader.dataset)\n print('Test set: Average loss: {:.4f}\\n'.format(test_loss))\n\n data_dict = {\n \"image\": np.array(images),\n \"steer_target\": np.array(targets).astype('float'),\n \"steer_pred\": np.array(predicts).astype('float')\n }\n\n with open(\"pyt_predictions_lstm.pickle\", 'wb') as f:\n pickle.dump(data_dict, f)\n print(\"Predictions pickled...\")\n\n\ndef train_one_epoch(epoch, model, loss_fn, optimizer, train_loader):\n model.train()\n print(\"Epoch {} starting.\".format(epoch))\n epoch_loss = 0\n for batch in train_loader:\n data, target = batch['image'].cuda(), batch['steer'].cuda()\n data = Variable(data).type(torch.cuda.FloatTensor)\n target = Variable(target).type(torch.cuda.FloatTensor)\n\n predict = model(data)\n loss = loss_fn(predict, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n epoch_loss += loss.data[0]\n\n epoch_loss /= len(train_loader.dataset)\n print(\"Epoch {:.4f}: Train set: Average loss: {:.6f}\\t\".format(epoch, epoch_loss))\n log_value('train_loss', epoch_loss, epoch)\n\n\ndef validate(epoch, model, loss_fn, optimizer, valid_loader):\n model.eval()\n valid_loss = 0\n for batch in valid_loader:\n data, target = batch['image'].cuda(), batch['steer'].cuda()\n data = Variable(data, volatile=True).type(torch.cuda.FloatTensor)\n target = Variable(target).type(torch.cuda.FloatTensor)\n predict = model(data)\n valid_loss += loss_fn(predict, target).data[0] # sum up batch loss\n\n valid_loss /= len(valid_loader.dataset)\n print('Valid set: Average loss: {:.6f}\\n'.format(valid_loss))\n log_value('valid_loss', valid_loss, epoch)\n return valid_loss\n\n\ndef test(model, loss_fn, optimizer, test_loader):\n model.eval()\n images = []\n targets = []\n predicts = []\n test_loss = 0\n for batch in test_loader:\n data, target = batch['image'].cuda(), batch['steer'].cuda()\n data = Variable(data, volatile=True).type(torch.cuda.FloatTensor)\n target = Variable(target).type(torch.cuda.FloatTensor)\n output = model(data)\n test_loss += loss_fn(output, target).data[0] # sum up batch loss\n\n # Store image path as raw image too large.\n images.append(batch['image_path'])\n targets.append(target.data.cpu().numpy())\n predicts.append(output.data.cpu().numpy())\n\n test_loss /= len(test_loader.dataset)\n print('Test set: Average loss: {:.4f}\\n'.format(test_loss))\n\n data_dict = {\n \"image\": np.array(images),\n \"steer_target\": np.array(targets).astype('float'),\n \"steer_pred\": np.array(predicts).astype('float')\n }\n\n with open(\"pyt_predictions_lstm.pickle\", 'wb') as f:\n pickle.dump(data_dict, f)\n print(\"Predictions pickled...\")\n\n\ndef save_checkpoint(state, is_best, file_name='/output/checkpoint.pth.tar'):\n \"\"\"Save checkpoint if a new best is achieved\"\"\"\n if is_best:\n print (\"=> Saving a new best\")\n torch.save(state, file_name) # save checkpoint\n else:\n print (\"=> Validation Accuracy did not improve\")\n\n\ndef create_dir(dir_name):\n if not os.path.isdir(dir_name):\n os.makedirs(dir_name)\n\n\ndef main():\n # Set random seed to 0\n np.random.seed(0)\n torch.manual_seed(0)\n\n # Set bags and file paths.\n bags = ['bag1']#, 'bag2'] # , 'bag4', 'bag5', 'bag6']\n root_dir = r'/home/ubuntu/ws/deep_learning/projects/self_driving_car/1-pilot_net/data'\n ckpt_path = os.path.join(root_dir, 'output') # checkpoint.pth.tar')\n log_path = os.path.join(root_dir, 'log')\n\n create_dir(ckpt_path)\n create_dir(log_path)\n\n print(get_device_stats())\n # Configure tensorboard log dir\n configure(os.path.join(root_dir, 'log'))\n\n train_csv_file = r'train_interpolated.csv'\n valid_csv_file = r'valid_interpolated.csv'\n\n # Get transforms\n transforms = imagenet_transforms()\n train_transforms = transforms['train_transforms']\n pre_process = transforms['eval_transforms']\n\n # Set up data.\n time_step = 5\n train_data_aug = SequenceDriveDataset(train_csv_file, root_dir, bags, time_step, train_transforms)\n train_data_orig = SequenceDriveDataset(train_csv_file, root_dir, bags, time_step, pre_process)\n train_data = ConcatDataset([train_data_orig, train_data_aug])\n\n print(\"Train data size: {}\".format(len(train_data)))\n train_loader = DataLoader(train_data, batch_size=64, shuffle=True, num_workers=4)\n\n valid_data = SequenceDriveDataset(valid_csv_file, root_dir, bags, time_step, pre_process)\n print(\"Valid data size: {}\".format(len(valid_data)))\n valid_loader = DataLoader(valid_data, batch_size=1, shuffle=False, num_workers=1)\n print(\"Data loaded...\")\n\n # Initiate model.\n # model = PilotNetAlexNetTransfer().cuda()\n model = PilotNetCNNLSTM().cuda()\n\n resume = False # set to false for now.\n if resume:\n state_dict = torch.load(ckpt_path)\n model.load_state_dict(state_dict)\n\n # Set up optimizer and define loss function.\n optimizer = torch.optim.Adam(model.parameters())\n loss_fn = nn.MSELoss()\n print(\"Model setup...\")\n\n # Train\n for epoch in range(5):\n train_one_epoch_sequence(epoch, model, loss_fn, optimizer, train_loader)\n ave_valid_loss = validate_sequence(epoch, model, loss_fn, optimizer, valid_loader)\n\n is_best = True # Save checkpoint every epoch for now.\n\n save_checkpoint({\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict()\n }, is_best, os.path.join(ckpt_path, 'checkpoint.pth.tar'))\n\n # Test\n test_sequence(model, loss_fn, optimizer, valid_loader)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"surfertas/deep_learning","sub_path":"projects/self_driving_car/2-pilot_net/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8886,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"4"} +{"seq_id":"73148154677","text":"from contextlib import contextmanager\nfrom enum import Enum\n\nimport datetime\n\nimport math\nfrom flask import render_template, redirect, url_for, abort, flash, jsonify, make_response, request, current_app\nfrom flask_login import login_required, current_user\nfrom flask_sqlalchemy import get_debug_queries\nfrom sqlalchemy import desc\nfrom ..models.users import User\nfrom ..models.category import Category\nfrom ..models.flashcard_collections import FlashcardCollection\nfrom ..models.flashcard import Flashcard\nfrom . import main\nfrom .. import db\nfrom .forms import FlashcardCollectionForm, AddFlashcardForm, EditFlashcardForm\nfrom flask import g\n\n\n@main.after_app_request\ndef after_request(response):\n for query in get_debug_queries():\n if query.duration >= current_app.config['FLASHCARD_SLOW_DB_QUERY_TIME']:\n current_app.logger.warning(\n 'Slow query: %s\\nParameters: %s\\nDuration: %fs\\nContext: %s\\n' %\n (query.statement, query.parameters, query.duration, query.context))\n return response\n\n\n@main.route('/')\ndef index():\n if current_user.is_authenticated:\n collections = current_user.collections.order_by(FlashcardCollection.timestamp.desc()).all()\n else:\n collections = []\n return render_template('index.html', collections=collections)\n\n\n@main.route('/user/')\n@login_required\ndef user(username):\n user = User.query.filter_by(username=username).first()\n if user is None:\n abort(404)\n collections = current_user.collections.order_by(FlashcardCollection.timestamp.desc()).all()\n return render_template('user.html', user=user, collections=collections)\n\n\n@main.route('/add-collection', methods=['GET', 'POST'])\n@login_required\ndef add_collection():\n form = FlashcardCollectionForm()\n if form.validate_on_submit():\n category = Category.query.filter_by(name=form.category.data).first()\n if category is None:\n category = Category(name=form.category.data)\n collection = FlashcardCollection(name=form.name.data)\n collection.categories.append(category)\n collection.user = current_user\n db.session.add(collection)\n db.session.commit()\n flash('Flashcard Collection added.')\n return redirect(url_for('.index'))\n return render_template('add_collection.html', form=form)\n\n\n@main.route('/get-category', methods=['GET', 'POST'])\n@login_required\ndef get_category():\n return jsonify({\n 'category': [category.name for category in Category.query.order_by(Category.name).all()]\n })\n\n\n@main.route('/flashcardcollection/')\n@login_required\ndef flashcardcollection(collId):\n flashcardcollection = FlashcardCollection.query.get_or_404(collId)\n return render_template('flashcardcollection.html', flashcardcollection=flashcardcollection)\n\n\n@main.route('/flashcardcollection//delete')\n@login_required\ndef delete_flashcardcollection(collId):\n flashcardcollection = FlashcardCollection.query.get_or_404(collId)\n db.session.delete(flashcardcollection)\n db.session.commit()\n flash('Flashcardcollection {0} has been deleted'.format(flashcardcollection.name))\n return redirect(request.referrer)\n\n\n@main.route('/flashcardcollection//add-flashcard', methods=['GET', 'POST'])\n@login_required\ndef add_flashcard(collId):\n form = AddFlashcardForm()\n flashcardcollection = FlashcardCollection.query.get_or_404(collId)\n if request.method == 'POST' and form.validate_on_submit():\n card = Flashcard(question=form.question.data,\n answer=form.answer.data,\n hint1=form.hint1.data,\n hint2=form.hint2.data,\n hint3=form.hint3.data)\n flashcardcollection.flashcards.append(card)\n db.session.add(flashcardcollection)\n db.session.commit()\n flash('Flashcard added to the Collection {0}'.format(flashcardcollection.name))\n if form.next.data:\n return redirect(url_for('.add_flashcard', collId=flashcardcollection.id))\n else:\n return redirect(url_for('.flashcardcollection', collId=flashcardcollection.id))\n return render_template('add_flashcard.html', form=form, name=flashcardcollection.name)\n\n\n@main.route('/flashcardcollection//flashcard/')\n@login_required\ndef flashcard(collId, cardId):\n flashcardcollection = FlashcardCollection.query.get_or_404(collId)\n flashcard = flashcardcollection.flashcards.filter_by(id=cardId).first()\n if flashcard is None:\n abort(404)\n return render_template('flashcard.html', flashcardcollection=flashcardcollection, flashcard=flashcard)\n\n\n@main.route('/flashcardcollection//flashcard//edit', methods=['GET', 'POST'])\n@login_required\ndef edit_flashcard(collId, cardId):\n flashcardcollection = FlashcardCollection.query.get_or_404(collId)\n flashcard = flashcardcollection.flashcards.filter_by(id=cardId).first()\n form = EditFlashcardForm()\n if flashcard is None:\n abort(404)\n if request.method == 'POST' and form.validate_on_submit():\n flashcard.question = form.question.data\n flashcard.answer = form.answer.data\n flashcard.hint1 = form.hint1.data\n flashcard.hint2 = form.hint2.data\n flashcard.hint3 = form.hint3.data\n db.session.add(flashcard)\n db.session.commit()\n flash('Flashcard was updated.')\n return redirect(url_for('.flashcard', collId=collId, cardId=cardId))\n form = EditFlashcardForm(flashcard)\n return render_template('edit_flashcard.html', form=form)\n\n\n@main.route('/flashcardcollection//learn')\n@login_required\ndef learn(collId):\n flashcardcollection = FlashcardCollection.query.get_or_404(collId)\n mode = request.args.get('mode')\n cardId = int(request.args.get('cardId'))\n percent_done = 0 if 'percent_done' not in request.args else request.args.get('percent_done')\n\n if cardId == 0:\n Cards.init(collId)\n percent_done, cardId = Cards.choose_next()\n elif cardId < 0:\n flash('No Cards to learn. Please reset the Cards or learn the Wrong ones if there are any.')\n return redirect(url_for('.flashcardcollection', collId=collId))\n\n flashcard = Flashcard.query.get_or_404(cardId)\n\n return render_template('learn.html',\n flashcard=flashcard,\n collection=flashcardcollection,\n percent_done=percent_done,\n mode=mode)\n\n\n@main.route('/flashcardcollection//reset-cards')\n@login_required\ndef reset_cards(collId):\n coll = FlashcardCollection.query.get_or_404(collId)\n for card in coll.flashcards.all():\n card.interval = 0\n card.repetitions = 0\n card.easiness = 0\n card.time = datetime.datetime.now()\n card.next_time = None\n db.session.add(coll)\n db.session.commit()\n return redirect(url_for('.flashcardcollection', collId=collId))\n\n\n@main.route('/flashcardcollection//delete-flashcard/')\n@login_required\ndef delete_card(collId, cardId):\n flashcard = Flashcard.query.get_or_404(cardId)\n db.session.delete(flashcard)\n db.session.commit()\n return redirect(url_for('.flashcardcollection', collId=collId))\n\n\n@main.route('/flashcardcollection//learn//result')\n@login_required\ndef result(collId, cardId):\n flashcard = db.session.query(Flashcard).filter_by(id=cardId).first()\n performance = request.args.get('performance')\n flashcard.repeat(Performance[performance].value, datetime.datetime.now())\n db.session.add(flashcard)\n db.session.commit()\n percent_done, cardId = Cards.choose_next()\n # db.session.expunge(flashcard)\n # db.session.close()\n return redirect(\n url_for('.learn', collId=collId, cardId=cardId, percent_done=percent_done, mode=request.args.get('mode')))\n\n\nclass Performance(Enum):\n again = 1\n good = 3\n easy = 5\n\n\nclass Cards:\n flashcardcollection = None\n new_card_ids = None\n to_review_ids = None\n current_place = 0\n collection_count = 0\n\n @staticmethod\n def init(collId):\n Cards.current_place = 0\n Cards.flashcardcollection_id = collId\n flashcardcollection = FlashcardCollection.query.get_or_404(collId)\n Cards.collection_count = len(flashcardcollection.flashcards.all())\n Cards.new_card_ids = flashcardcollection.get_new_card_ids()\n Cards.to_review_ids = flashcardcollection.get_reviewed_card_ids()\n\n @staticmethod\n def choose_next():\n if len(Cards.to_review_ids) > 0:\n percent_done = (Cards.current_place / Cards.collection_count) * 100\n Cards.current_place += 1\n p, c = math.ceil(percent_done), Cards.to_review_ids.pop()\n return p, c\n elif len(Cards.new_card_ids) > 0:\n percent_done = (Cards.current_place / Cards.collection_count) * 100\n Cards.current_place += 1\n p, c = math.ceil(percent_done), Cards.new_card_ids.pop()\n return p, c\n else:\n return 100, -1\n\n @staticmethod\n def reject_card(self, card):\n if card.is_new:\n self.new_cards.insert(0, card)\n else:\n self.to_review.insert(0, card)\n","repo_name":"cliffmin/cards","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"73434406837","text":"import csv\nimport glob\nimport hashlib\nimport multiprocessing\nimport os\nimport pprint\nimport re\n\n\ndef create_md(arg):\n \"\"\"\n Returns: key/value where key is the file path and value is the md5 hash\n item: full path of file from which the hash is build\n remove_front: part of path which is not part of the key use later\n \"\"\"\n item, remove_front = arg\n if os.path.isdir(item):\n return [] # ignore dirs\n # return item[len(remove_front) :], \"dir\"\n else:\n md5_hash = hashlib.md5()\n with open(item, \"rb\") as fd:\n md5_hash.update(fd.read())\n return item[len(remove_front) :], md5_hash.hexdigest()\n\n\ndef create_dir_diff(\n path,\n remove_front=\"\",\n exclude=(\n r\".*sca$\",\n r\".*vec$\",\n r\".*container.*\\.out$\",\n r\".*command\\.out$\",\n r\".*\\.scenario\",\n ),\n):\n dir_content = glob.glob(path, recursive=True)\n dir_content_filtered = []\n ex_pattern = re.compile(\"|\".join(exclude))\n for item in dir_content:\n if ex_pattern.match(item):\n continue\n else:\n dir_content_filtered.append((item, remove_front))\n\n njobs = int(multiprocessing.cpu_count() * 0.60)\n pool = multiprocessing.Pool(processes=njobs)\n ret = pool.map(create_md, dir_content_filtered)\n ret = {e[0]: e[1] for e in ret if len(e) == 2}\n\n return ret\n\n\ndef diffdict_to_csv(path, diff_dict: dict, delimiter=\";\"):\n with open(path, \"w\", encoding=\"utf-8\") as fd:\n fd.write(f\"path{delimiter}md5_digest\\n\")\n for p, d in diff_dict.items():\n fd.write(f\"{p}{delimiter}{d}\\n\")\n\n\ndef csv_to_diffdict(path, delimiter=\";\"):\n with open(path, \"r\") as fd:\n _csv = csv.reader(fd, delimiter=delimiter)\n next(_csv) # skip header\n ret = {}\n for line in _csv:\n if len(line) == 2:\n ret[line[0]] = line[1]\n return ret\n\n\ndef compare_diff(left: dict, right: dict):\n ret = True\n diff = []\n diff.append([\"key\", \"expected Hash\", \" \", \"computed Hash\"])\n for k, v in left.items():\n if k not in right:\n diff.append([k, v, \"??\", \"file not found!\"])\n ret = False\n else:\n if v != right[k]:\n diff.append([k, v, \"!=\", right[k]])\n ret = False\n not_expected_keys = set(right.keys()) - set(left.keys())\n if len(not_expected_keys) > 0:\n for k in not_expected_keys:\n ret = False\n diff.append([k, \"file not found!\", \"??\", right[k]])\n\n err_list = []\n if len(diff) > 0:\n c = [0, 0, 0, 0]\n for row in diff:\n for idx, _ in enumerate(c):\n c[idx] = max(c[idx], len(row[idx]))\n for row in diff:\n err_list.append(\n f\"{row[0].ljust(c[0])} {row[1].ljust(c[1])} {row[2].rjust(c[2])} {row[3].ljust(c[3])}\"\n )\n\n return ret, err_list\n\n\nif __name__ == \"__main__\":\n p = \"/home/vm-sts/repos/crownet/crownet/tests/fingerprint/hash.d/guiding_crowds/final_test_3.csv\"\n p2 = \"/home/vm-sts/repos/crownet/crownet/tests/fingerprint/hash.d/guiding_crowds/final_test_3.csv.UPDATED\"\n dict1 = csv_to_diffdict(p)\n dict2 = csv_to_diffdict(p2)\n pprint.pprint(compare_diff(dict1, dict2))\n","repo_name":"roVer-HM/crownetutils","sub_path":"crownetutils/utils/dirdiff.py","file_name":"dirdiff.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"1314227248","text":"import socket\n\nserv_sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserv_sock.bind(('127.0.0.1', 3030))\n\nserv_sock.listen()\n\ndata_sock, clnt_addr=serv_sock.accept()\n\nif data_sock:\n while True:\n received=data_sock.recv(1024)\n if not received:\n break\n print('received from client : {}'.format(received))\n data_sock.sendall(received)\n\ndata_sock.close()\nserv_sock.close()\n\n","repo_name":"SeongYoonHuh/CS","sub_path":"network/socket_programming/echo/echo_server.py","file_name":"echo_server.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"4"} +{"seq_id":"6527328954","text":"# coding=utf-8\nimport numpy as np\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.neighbors import KernelDensity\n\n\n#  parameter grid_sizes represents all the different grids we want to compute\n# it is a list of length between points for each grid, default value is 100\ndef getKernelDensityEstimationForDifferentGrids(nodes, metric='euclidean', metric_params=None, bandwidth=0.002,\n optimizeBandwidth=False, bwmin=0.0001, bwmax=0.01, crossValidation=20,\n grid_sizes=None):\n lon = []\n lat = []\n for nlon, nlat in nodes:\n lon.append(nlon)\n lat.append(nlat)\n lon = np.array(lon)\n lat = np.array(lat)\n\n # bbox automatically calculated\n xmin, xmax = min(lon), max(lon)\n ymin, ymax = min(lat), max(lat)\n bbox = [xmin, xmax, ymin, ymax]\n\n # grid size every 100m, 250m, 500m\n # list of x and y for each grid sizes\n grids = grid_sizes if grid_sizes is not None else [100]\n xy = [np.mgrid[xmin:xmax:i, ymin:ymax:i] for i in grids]\n # list of grids\n positions = [np.vstack([x.ravel(), y.ravel()]) for x, y in xy]\n\n # build single D matrix for grid (positions) and data (values)\n values = np.vstack([lon, lat])\n\n if optimizeBandwidth:\n grid = GridSearchCV(\n KernelDensity(kernel='gaussian', metric=metric, metric_params=metric_params, algorithm='ball_tree'),\n {'bandwidth': np.linspace(bwmin, bwmax, 30)}, cv=crossValidation) # 20-fold cross-validation\n grid.fit(zip(*values))\n\n bandwidth = grid.best_params_['bandwidth']\n kernel = grid.best_estimator_\n else:\n kernel = KernelDensity(kernel='gaussian', metric=metric, metric_params=metric_params, algorithm='ball_tree',\n bandwidth=bandwidth)\n kernel.fit(zip(*values))\n\n return kernel, positions, xy, bbox, bandwidth\n\n\ndef getGrid(nodes, grid_sizes=None):\n lon = []\n lat = []\n for nlon, nlat in nodes:\n lon.append(nlon)\n lat.append(nlat)\n lon = np.array(lon)\n lat = np.array(lat)\n\n xmin, xmax = min(lon), max(lon)\n ymin, ymax = min(lat), max(lat)\n\n # grid size every 100m, 250m, 500m\n # list of x and y for each grid sizes\n grids = grid_sizes if grid_sizes is not None else [100]\n xy = [np.mgrid[xmin:xmax:i, ymin:ymax:i] for i in grids]\n # list of grids\n positions = [np.vstack([x.ravel(), y.ravel()]) for x, y in xy]\n return [zip(*pos) for pos in positions]\n","repo_name":"ComplexCity/policosm","sub_path":"policosm/functions/getKernelDensityEstimationForDifferentGrids.py","file_name":"getKernelDensityEstimationForDifferentGrids.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"} +{"seq_id":"73534404598","text":"import numpy as np\nfrom scipy.optimize import brentq\n\nimport analyze_data as ad\nimport myFunctions as myFun\n\n\ndef Rstar_following_pellis_2(nh, betaG, betaH, nu, gamma):\n # Go to function p in fiile myFun.py to change the way q(k) is computed (using Pellis method or using trasition matrix)\n summation = 0\n for k in range(0, nh):\n summation = summation + myFun.mu(nh, 1, nh - 1, k, betaH, nu, gamma)\n return summation * (betaG / gamma)\n\n\ndef betaG_given_r(nh, r, betaG, betaH, nu, gamma, initial_infected=1):\n QH, states_to_id, id_to_states = myFun.get_QH(nh, betaH, nu, gamma, initial_infected)\n\n number_of_states = len(QH[0])\n\n initial_state = states_to_id[(nh - 1, 1, 0)]\n matrix = QH - (r * np.identity(number_of_states))\n Q_HI = np.linalg.inv(matrix)\n result = 0\n\n for i in range(number_of_states):\n result = result + id_to_states[i][2] * (-Q_HI[initial_state][i])\n return 1 / result\n\n\ndef R_0_Household(nh, betaG, betaH, gamma, nu):\n epsilon = 0.0001\n return brentq(myFun.g_nh, 0 + epsilon, 20, args=(nh, betaG, betaH, gamma, nu))\n\n\ndef R0_from_r(algorithm, tot_simulations, nu, gamma):\n r = ad.logistic_regression(algorithm, tot_simulations)[0]\n R0 = 1 + (r * (r + nu + gamma) / (nu * gamma))\n return R0\n\n\ndef Rstar_from_r(nh, betaG, betaH, nu, gamma, a, b, initial_infected):\n r = compute_growth_rate_r(nh, betaG, betaH, nu, gamma, a, b, -1)\n QH, states_to_id, id_to_states = myFun.get_QH(nh, betaH, nu, gamma, initial_infected)\n initial_state = states_to_id[(nh - 1, 1, 0)]\n QH_1 = np.linalg.inv(QH)\n matrix = np.matmul(QH_1, np.matmul(-(np.identity(len(QH)) / r) + QH_1, QH_1))\n Rstar = 1\n for i in range(len(QH)):\n Rstar = Rstar - betaG * id_to_states[i][2] * matrix[initial_state][i]\n return Rstar\n\n\ndef compute_growth_rate_r(nh, betaG, betaH, nu, gamma, a, b, initial_infected=1):\n QH, states_to_id, id_to_states = myFun.get_QH(nh, betaH, nu, gamma, initial_infected)\n # find the solution of the laplace transform\n root = brentq(myFun.laplace_transform_infectious_profile, a, b,\n args=(nh, betaG, QH, states_to_id, id_to_states, -1))\n return root\n\n\ndef compute_Rstar_following_pellis_markov(nh, betaG, betaH, nu, gamma, initial_infected=1):\n # ------------------------------------------------------------------------------------------------------------------\n '''\n transition_matrix, states_to_id, id_to_states = myFun.get_continuous_transition_matrix(nh, betaH, nu, gamma)\n initial_state = states_to_id[(nh - 1, 1, 0)]\n number_of_states = len(transition_matrix[0])\n\n Rstar = 0\n for i in range(number_of_states):\n if id_to_states[i][2] != 0:\n func = lambda t: scipy.linalg.expm(transition_matrix * t)[initial_state, i]\n Rstar = Rstar + (scipy.integrate.quad(func, 0, np.inf)[0] * id_to_states[i][2])\n\n return betaG * Rstar\n '''\n # ------------------------------------------------------------------------------------------------------------------\n\n transition_matrix, states_to_id, id_to_states = myFun.get_QH(nh, betaH, nu, gamma, initial_infected)\n initial_state = states_to_id[(nh - 1, 1, 0)]\n\n number_of_states = len(transition_matrix[0])\n\n Q_1 = np.linalg.inv(transition_matrix)\n\n Rstar = 0\n for i in range(number_of_states):\n Rstar = Rstar + ((- Q_1[initial_state][i]) * id_to_states[i][2])\n return betaG * Rstar\n\n\ndef growth_rate_r_SIR(nh, betaG, betaH, gamma):\n root = brentq(myFun.inverse_Qr_SIR, 0.01, 100, args=(nh, betaG, betaH, gamma))\n return root\n\n\ndef growth_rate_vanilla_model( beta, nu, gamma):\n R0 = beta / gamma\n r = -((nu + gamma) / 2) + np.sqrt(((nu + gamma) * (nu + gamma) / 4) + nu * gamma * (R0 - 1))\n return r\n\n\ndef growth_rate_r_SEIR_3(nh, betaG, betaH, nu, gamma):\n root = brentq(myFun.inverse_Qr_SEIR_3, 0.01, 100, args=(nh, betaG, betaH, nu, gamma))\n return root\n","repo_name":"LexILuthor/Python_scripts_for_epiemics","sub_path":"r_star.py","file_name":"r_star.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"25853381182","text":"#this is the first test of the prv32\n#author Adancurusul\n#\nimport re\nimport array\n\n#str00 = 'addi x 7,x0,0x0008;asdfja;;21,,,,'\n\n\nglobal_label_data_dict = {}\nbase = [str(x) for x in range(10)] + [ chr(x) for x in range(ord('A'),ord('A')+6)]\ndef dec2bin(string_num):\n num = int(string_num)\n mid = []\n while True:\n if num == 0: break\n num,rem = divmod(num, 2)\n mid.append(base[rem])\n\n return ''.join([str(x) for x in mid[::-1]])\n\ndef dec2hex(string_num):\n num = int(string_num)\n mid = []\n while True:\n if num == 0: break\n num,rem = divmod(num, 16)\n mid.append(base[rem])\n\n return ''.join([str(x) for x in mid[::-1]])\n\ndef hex2dec(string_num):\n return str(int(string_num.upper(), 16))\n\ndef hex2bin(string_num):\n return dec2bin(hex2dec(string_num.upper()))\n\ndef bin2hex(string_num):\n\n num = int(string_num,2)\n #num = int(string_num)\n mid = []\n while True:\n if num == 0: break\n num,rem = divmod(num, 16)\n mid.append(base[rem])\n\n return ''.join([str(x) for x in mid[::-1]])\n#def bin2hex(self.string_num):\n# return dec2hex(bin2dec(string_num))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass split_str:#can return as dict like{'opcode': 'addi', 'key': 'x7,x0,0x0008'}\n #正则表达式\n parttern_code = r'^\\s*(?P\\w+)\\b\\s+(?P.+)(;|#)?'#匹配汇编并拆分\n def __init__(self,str0):\n self.st = ''.join(str0.split())\n self.str0 = str0.split(';')[0]#分出代码区和注释区\n #print(self.str0)\n self.comma_count()\n self.code_split()\n self.dict_of_ass=self.code_split()\n self.change_to()\n\n def change_to(self):\n if(self.dict_of_ass):\n self.dict_of_ass['key'] =''.join(self.dict_of_ass['key'].split())\n self.opcode = self.dict_of_ass['opcode']\n self.key = self.dict_of_ass['key'].split(',')\n else:\n print('no')#去除空格\n def comma_count(self):#找到逗号的个数个数\n self.count = self.str0.count(',')\n print(self.count)\n def code_split(self):\n self.str0 = self.str0.lower()\n self.result = re.match(self.parttern_code,self.str0)\n if (self.result):\n self.opcode = self.result.group('opcode')\n self.str_key = self.result.group('key')\n self.dict ={'opcode':self.opcode,'key':self.str_key}#创建字典保存分割后的字符串\n\n return self.dict\n\n else :\n return None\n'''\n该类的使用方法\nstr = 'addi x 7,x0,0x0008;asdfja;;21,,,,'\nl = split_str(str)\nprint(l.dict_of_ass)#{'opcode': 'addi', 'key': 'x7,x0,0x0008'}\n'''\n\nclass change_into_bin(split_str):#直接将每行完成转化\n def __init__(self,str0):#继承split的属性\n super().__init__(str0)\n self.list = []\n self.list.append(self.opcode)\n self.list = self.list+self.key\n self.change_and_return()\n\n def change16(self,wei,str1):#将16进制数转化为二进制\n print(str1)\n q = int(str1,16)\n return str(bin(q))[2:].zfill(wei)\n #Zero = int(wei-len(a))\n #s = '0'*Zero+a\n\n def change(self,wei,str1):#将寄存器值转化出来\n return str(bin(int(str1[-1])))[2:].zfill(wei)\n '''\n def change16(self,wei,str1):\n q = int(str1,16)\n a = str(bin(q))[2:].zfill(wei)\n #Zero = int(wei-len(a))\n #s = '0'*Zero+a\n return a\n def change(self,wei,str1):\n a = str(bin(int(str1[-1])))[2:].zfill(wei)\n return a\n '''\n\n def do_with_fucking_csr(self,str11):#fucking csr!!!!fuck!!!!!!\n str1 = str11.lower()\n\n csr_head = {\n\n 'mstatus':hex2bin('300').zfill(12),\n 'mie':hex2bin('304').zfill(12),\n 'mtvee':hex2bin('305').zfill(12),\n 'mscratch':hex2bin('340').zfill(12),\n 'mepc':hex2bin('341').zfill(12),\n 'mcause':hex2bin('342').zfill(12),\n 'mtval':hex2bin('343').zfill(12),\n 'mbadaddr':hex2bin('343').zfill(12),\n 'mip':hex2bin('344').zfill(12),\n\n\n }\n return csr_head[str1]()\n def do_with_fucking_label(self,str0):#fuuuuuucking\n global global_label_data_dict\n self.hex_location = dec2bin(global_label_data_dict[str0]).zfill(12)\n '''\n 记得\n 回来\n 改\n 这个\n 地方\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n '''\n\n\n\n\n\n def change_and_return(self):\n\n head = {#这是一个操作码的指向字典来模拟switch语句\n 'addi':self.ADDI,\n 'lui':self.LUI,\n 'auipc':self.AUIPC,\n 'jal':self.JAL,\n 'jalr':self.JALR,\n 'beq':self.BEQ,\n 'bne':self.BNE,\n 'blt':self.BLT,\n 'bge':self.BGE,\n 'bltu':self.BLTU,\n 'bgeu':self.BGEU,\n 'lb':self.LB,\n 'lh':self.LH,\n 'lw':self.LW,\n 'lbu':self.LBU,\n 'lhu':self.LHU,\n 'sb':self.SB,\n 'sh':self.SH,\n 'sw':self.SW,\n 'slti':self.SLTI,\n 'sltiu':self.SLTIU,\n 'xori':self.XORI,\n 'ori':self.ORI,\n 'andi':self.ANDI,\n 'slli':self.SLLI,\n 'srli':self.SRLI,\n 'srai':self.SRAI,\n 'add':self.ADD,\n 'sub':self.SUB,\n 'sll':self.SLL,\n 'slt':self.SLT,\n 'sltu':self.SLTU,\n 'xor':self.XOR,\n 'srl':self.SRL,\n 'sra':self.SRA,\n 'or':self.OR,\n 'and':self.AND,\n #'fence':FENCE,\n 'fence.i':self.FENCE_I,\n 'ecall':self.ECALL,\n 'ebreak':self.EBREAK,\n 'cssrrw':self.CSRRW,\n 'csrrs':self.CSRRS,\n 'csrrc':self.CSRRC,\n 'csrrwi':self.CSRRWI,\n 'csrrsi':self.CSRRSI,\n 'csrrci':self.CSRRCI,\n\n }\n self.after_change_str = head[self.list[0]]()\n\n\n print(self.after_change_str)\n\n\n\n\n\n def ADDI(self):\n\n st = self.change16(12,self.list[3])+self.change(5,self.list[2])+'000'+self.change(5,self.list[1])+'0010011'\n return st\n def SLTI(self):\n st = self.change16(12,self.list[3])+self.change(5,self.list[2])+'010'+self.change(5,self.list[1])+'0010011'\n return st\n def SLTIU(self):\n st = self.change16(12,self.list[3])+self.change(5,self.list[2])+'011'+self.change(5,self.list[1])+'0010011'\n return st\n def ANDI(self):\n st = self.change16(12,self.list[3])+self.change(5,self.list[2])+'111'+self.change(5,self.list[1])+'0010011'\n return st\n def ORI(self):\n st = self.change16(12,self.list[3])+self.change(5,self.list[2])+'110'+self.change(5,self.list[1])+'0010011'\n return st\n def XORI(self):\n st = self.change16(12,self.list[3])+self.change(5,self.list[2])+'100'+self.change(5,self.list[1])+'0010011'\n return st\n def SLLI(self):\n st = '0000000'+self.change16(5,self.list[-1])+self.change(5,self.list[2])+'001'+self.change(5,self.list[1])+'0010011'\n return st\n def SRLI(self):\n st = '0000000'+self.change16(5,self.list[-1])+self.change(5,self.list[2])+'101'+self.change(5,self.list[1])+'0010011'\n return st\n def SRAI(self):\n st = '0100000'+self.change16(5,self.list[-1])+self.change(5,self.list[2])+'101'+self.change(5,self.list[1])+'0010011'\n return st\n def ADD(self):\n st = '0000000'+self.change(5,self.list[-1])+self.change(5,self.list[2])+'000'+self.change(5,self.list[1])+'0110011'\n return st\n def SUB(self):\n st = '0100000'+self.change(5,self.list[-1])+self.change(5,self.list[2])+'000'+self.change(5,self.list[1])+'0110011'\n return st\n def SLL(self):\n st = '0000000'+self.change(5,self.list[-1])+self.change(5,self.list[2])+'001'+self.change(5,self.list[1])+'0110011'\n return st\n def SLT(self):\n st = '0000000'+self.change(5,self.list[-1])+self.change(5,self.list[2])+'010'+self.change(5,self.list[1])+'0110011'\n return st\n def SLTU(self):\n st = '0000000'+self.change(5,self.list[-1])+self.change(5,self.list[2])+'011'+self.change(5,self.list[1])+'0110011'\n return st\n def XOR(self):\n st = '0000000'+self.change(5,self.list[-1])+self.change(5,self.list[2])+'100'+self.change(5,self.list[1])+'0110011'\n return st\n def SRL(self):\n st = '0000000'+self.change(5,self.list[-1])+self.change(5,self.list[2])+'101'+self.change(5,self.list[1])+'0110011'\n return st\n def SRA(self):\n st= '0100000'+self.change(5,self.list[-1])+self.change(5,self.list[2])+'101'+self.change(5,self.list[1])+'0110011'\n return st\n def OR(self):\n st = '0000000'+self.change(5,self.list[-1])+self.change(5,self.list[2])+'110'+self.change(5,self.list[1])+'0110011'\n return st\n def AND(self):\n st = '0000000'+self.change(5,self.list[-1])+self.change(5,self.list[2])+'111'+self.change(5,self.list[1])+'0110011'\n return st\n def LUI(self):\n st = self.change16(20,self.list[-1])+self.change(5,self.list[1])+'0110111'\n return st\n def AUIPC(self):\n st = self.change16(20,self.list[-1])+self.change(5,self.list[1])+'0010111'\n return st\n def JAL(self):\n st = self.change16(20,self.list[-1])+self.change(5,self.list[1])+'1101111'\n return st\n def JALR(self):\n st = self.change16(12,self.list[-1])+self.change(5,self.list[2])+'000'+self.change(5,self.list[1])+'1100111'\n return st\n def BEQ(self):\n s = self.change16(12,self.list[-1])\n st1 = s[0:7]\n st2 = s[7:12]\n st = st1 +self.change(5,self.list[2])+self.change(5,self.list[1])+'000'+st2+'1100011'\n return st\n def BNE(self):\n s = self.change16(12,self.list[-1])\n st1 = s[0:7]\n st2 = s[7:12]\n st = st1 +self.change(5,self.list[2])+self.change(5,self.list[1])+'001'+st2+'1100011'\n return st\n def BLT(self):\n s = self.change16(12,self.list[-1])\n st1 = s[0:7]\n st2 = s[7:12]\n st= st1 +self.change(5,self.list[2])+self.change(5,self.list[1])+'100'+st2+'1100011'\n return st\n def BGE(self):\n s = self.change16(12,self.list[-1])\n st1 = s[0:7]\n st2 = s[7:12]\n st = st1 +self.change(5,self.list[2])+self.change(5,self.list[1])+'101'+st2+'1100011'\n return st\n def BLTU(self):\n s = self.change16(12,self.list[-1])\n st1 = s[0:7]\n st2 = s[7:12]\n st = st1 +self.change(5,self.list[2])+self.change(5,self.list[1])+'110'+st2+'1100011'\n\n return st\n def BGEU(self):\n s = self.change16(12,self.list[-1])\n st1 = s[1:7]\n st2 = s[7:12]\n st = st1 +self.change(5,self.list[2])+self.change(5,self.list[1])+'111'+st2+'1100011'\n return st\n def LW(self):\n l1 =self.list[2].partition(\"(\")\n t = l1[-1][0:-1]\n st = self.change16(12,l1[0])+self.change(5,t)+'010'+self.change(5,self.list[1])+'0000011'\n return st\n def LH(self):\n l1 =self.list[2].partition(\"(\")\n t = l1[-1][0:-1]\n str = self.change16(12,l1[0])+self.change(5,t)+'001'+self.change(5,self.list[1])+'0000011'\n return str\n\n\n def LB(self):\n l1 =self.list[2].partition(\"(\")\n t = l1[-1][0:-1]\n st = self.change16(12,l1[0])+self.change(5,t)+'000'+self.change(5,self.list[1])+'0000011'\n return st\n def LHU(self):\n l1 =self.list[2].partition(\"(\")\n t = l1[-1][0:-1]\n st = self.change16(12,l1[0])+self.change(5,t)+'101'+self.change(5,self.list[1])+'0000011'\n return st\n def LBU(self):\n l1 =self.list[2].partition(\"(\")\n t = l1[-1][0:-1]\n str = self.change16(12,l1[0])+self.change(5,t)+'100'+self.change(5,self.list[1])+'0000011'\n return str\n def SW(self):\n l1 =self.list[2].partition(\"(\")\n t = l1[-1][0:-1]\n s = self.change16(12,l1[0])\n st =s[0:7] +self.change(5,self.list[1])+self.change(5,t)+'010'+s[7:12]+'0100011'\n return st\n def SH(self):\n l1 =self.list[2].partition(\"(\")\n t = l1[-1][0:-1]\n s = self.change16(12,l1[0])\n st =s[0:7] +self.change(5,self.list[1])+self.change(5,t)+'001'+s[7:12]+'0100011'\n return st\n def SB(self):\n l1 =self.list[2].partition(\"(\")\n t = l1[-1][0:-1]\n s = self.change16(12,l1[0])\n st =s[0:7] +self.change(5,self.list[1])+self.change(5,t)+'000'+s[7:12]+'0100011'\n return st\n def CSRRW(self):\n st = self.do_with_fucking_csr(self.list[-2])+self.change(5,self.list[-1])+'001'+self.change(5,self.list[1])+'1110011'\n return st\n def CSRRS(self):\n st = self.do_with_fucking_csr(self.list[-2])+self.change(5,self.list[-1])+'010'+self.change(5,self.list[1])+'1110011'\n return st\n def CSRRC(self):\n st = self.do_with_fucking_csr(self.list[-2])+self.change(5,self.list[-1])+'011'+self.change(5,self.list[1])+'1110011'\n return st\n def CSRRWI(self):\n st = self.do_with_fucking_csr(self.list[-2])+self.change16(5,self.list[-1])+'101'+self.change(5,self.list[1])+'1110011'\n return st\n def CSRRCI(self):\n st = self.do_with_fucking_csr(self.list[-2])+self.change(5,self.list[-1])+'111'+self.change(5,self.list[1])+'1110011'\n return st\n def CSRRSI(self):\n st = self.do_with_fucking_csr(self.list[-2])+self.change(5,self.list[-1])+'110'+self.change(5,self.list[1])+'1110011'\n return st\n def FENCE_I(self):\n st = '00000000000000000001000000001111'\n return st\n def ECALL(self):\n st = '00000000000000000000000001110011'\n return st\n def EBREAK(self):\n st = '00000000000100000000000001110011'\n return st\n\n'''\n类用法:\nl = change_into_bin(str00)\nprint(l.after_change_str)\n'''\n\n\n\nclass check_label_or_opcode():#check if it is label or opcode\n parttern_label = r'.section\\s+'\n parttern_section = r''\n parttern_data = r''\n parttern_text = r''\n\n def __init__(self,str0):\n self.str0 = str0\n\n def if_label(self):\n re.match(self.parttern_label,self.str0)\n #def if_section(self):\n # se = re.match(self.parttern_section,self.str0)\n #if (se):\n\n\n'''\n主要扫描方式:\n1遍扫描\n先给定义的量以及函数分配位置\n利用字典查找\n'''\n\nclass do_scan():#扫描类\n label_times =0#label出现次数\n defin_times = 0#定义出现次数\n label_lines = 0 #每个label下行数\n data_lines = 0#data的长度\n data_scan =0 #地址标志位\n line_of_file = 0\n label_scan = 0\n parttern_label = r'^(\\w+)[:]$'#检测是不是label\n parttern_section = r''#检测是不是函数\n data_dict = {}\n label_data_dict = {}\n data_list = []\n parttern_data = r'(\\w+\\s*) [:] (.+)'# 检测data\n parttern_text = r'\\w+'\n parttern_define=r''\n partterna = r'\\s{0,}[.section]+[.]+(.+)'\n flag = 0#检测每个代码段的边缘\n label_flag =0\n size_of_label = {}#每个label大小\n size_of_data = {}#每个data大小\n check = 0\n no_label = 1\n name_of_label = ''\n section_change =0\n stored_or_not = 0\n first_label = 0\n def __init__(self,filename):#得到文件名对其每行进行遍历\n self.filename = filename\n with open(self.filename,'r+') as self.file:#打开文件\n self.scan_first() #做第一次扫描\n\n def scan_first(self):#第一次扫描函数(扫描主函数)\n for line in self.file:\n\n self.line_of_file +=1\n self.str0 = line.lstrip()\n print('str0:',self.str0)\n if self.str0 :#如果不是空的行\n self.if_label()\n if self.data_scan :#为data段\n self.scan_data()\n elif self.label_scan :#为label段\n self.label_store()\n return self.data_dict , self.label_data_dict\n\n def scan_data(self):#data段的储存\n self.dataname = re.match(self.parttern_data,self.str0)\n\n if (self.dataname):#找到data名字\n print('b')\n self.name_of_data = self.dataname.group(1)\n self.name_of_data= ''.join(self.name_of_data).split()[0]#去除空格\n #print(self.name_of_data)\n self.data_of_data =self.dataname.group(2)\n self.data_of_data= ''.join(self.data_of_data).split()[0]#草草草草草这nm又忘了取第一个卧槽\n print('data',self.data_of_data)\n self.count_a = self.data_of_data.count(',')\n print(\"count\",self.count_a)\n if self.count_a>0:#一次定义多个数\n self.datalist = self.data_of_data.split(',')\n #print('datalist',self.datalist)\n self.if_more_define = 1\n else :#一次定义单个数\n self.if_more_define = 0\n self.data_list = self.data_of_data\n\n self.data_count = self.count_a + 1\n self.h = self.size_of_data.setdefault(self.name_of_data,self.data_count)#将地址大小存入dict\n self.m = self.data_dict.setdefault(self.name_of_data,self.datalist)#将得到的data丢入存放data的字典\n #print('datalist',self.data_dict)\n #此处修改为将\n\n\n def label_store(self):#将label 移入字典\n print('lab_store')\n self.labelname = re.match(self.parttern_label,self.str0)\n self.label_check = re.match(self.partterna,self.str0)\n\n '''\n if self.label_check:\n pass\n\n if self.first_label:\n\n if not self.stored_or_not:\n self.n = self.label_data_dict.setdefault(self.name_of_label,self.data_list)\n self.o = self.size_of_label.setdefault(self.name_of_label,self.label_lines)\n print(self.label_data_dict)\n print('*'*20)\n else:\n self.first_label = 1\n\n else:\n '''\n if(self.labelname):\n print('getgetfucking'*5)\n\n\n if( self.labelname and self.check):#无label定义前的代码处理\n print('c')\n self.no_label =0\n self.n = self.label_data_dict.setdefault(self.name_of_label,self.data_list)\n self.o = self.size_of_label.setdefault(self.name_of_label,self.label_lines)\n self.label_lines = 0\n self.data_list = []\n self.name_of_label = self.labelname.group(1)\n elif ( self.labelname and self.check):#无label定义前无代码\n self.no_label = 0\n self.stored_or_not = 0\n\n elif (self.labelname and not self.label_flag and not self.no_label):#找到label\n self.stored_or_not = 0\n print('find label')\n self.label_flag = 1\n self.name_of_label = self.labelname.group(1)\n elif not self.labelname and self.label_flag and not self.no_label :\n self.stored_or_not = 0\n print('fuuuuuuck')\n self.label_lines += 1 #label行数加一\n self.data_list.append(self.str0)#将该行加入list\n elif (self.labelname and self.label_flag and not self.no_label ):#label切换点\n print('change_label')\n\n #self.label_lines = str(self.label_lines)#以字符串存入\n self.n = self.label_data_dict.setdefault(self.name_of_label,self.data_list)\n self.o = self.size_of_label.setdefault(self.name_of_label,self.label_lines)\n self.label_lines = 0\n self.stored_or_not = 1\n self.data_list = []#清空列表以储存下一次\n self.name_of_label = self.labelname.group(1)\n elif self.no_label:#如果是之前没有定义label\n self.stored_or_not = 0\n print('no_label')\n self.no_label = 0\n self.check = 1\n self.label_flag = 1\n self.name_of_label = 'first_of_labels'\n #self.data_list.append(self.str0)\n else:\n print('nothing')\n\n\n\n\n\n def if_label(self):\n\n self.my_label = 'label'\n #self.change = re.match(self.partterna,self,str0)\n #if self.change:\n # self.section_change = 1\n\n\n self.label_check = re.match(self.partterna,self.str0)\n if (self.label_check):\n self.data_or_text = self.label_check.group(1)\n if (self.data_or_text=='data' and not self.flag ):\n self.data_scan = 1\n self.label_scan = 0\n self.flag =1\n print('get_data')\n\n elif (self.data_or_text == 'text' and not self.flag):#guguguugugugugugguguguguguguugu\n self.flag = 1\n self.label_scan = 1\n self.data_scan = 0\n print('get_label')\n\n\n\n elif (self.data_or_text=='text' and self.flag):\n if not self.stored_or_not:\n self.h = self.size_of_data.setdefault(self.name_of_data,self.data_count)#将地址大小存入dict\n self.m = self.data_dict.setdefault(self.name_of_data,self.datalist)\n print('*'*20)\n self.data_scan = 0\n self.label_scan = 1\n self.flag = 1\n print('change')\n '''\n 下面有问题我c\n '''\n\n\n elif (self.data_or_text=='data' and self.flag):\n if not self.stored_or_not:\n self.n = self.label_data_dict.setdefault(self.name_of_label,self.data_list)\n self.o = self.size_of_label.setdefault(self.name_of_label,self.label_lines)\n print(self.label_data_dict)\n print('*//'*20)\n self.label_scan = 0\n self.data_scan =1\n self.flag = 1\n print('change')\n\n\n else :\n pass\n\n #self.give_location(self.label_check.group(0),self.my_label)\n\n #return self.label_check.group(0)\n def if_define(self):\n self.mydefine = 'define'\n self.define_check = re.match(self.parttern_define,self.str0)\n if (self.define_check):\n #self.give_location(self.define_check.group(0),\"define\")\n return self.define_check.group(0)\n\n\n\n\n\n'''\n该类阔以利用一遍扫描算法得到data和text的字典\n\n用法\ndata_dict,label_dict = do_scan(file)\n\n\n'''\n\n\n\nclass change_into_hex(do_scan):\n '''\n label_location_dict,data。。。。用于之后的翻译器查找\n locationdict直接存放地址和对应的东西\n\n\n\n '''\n\n location_dict = {}#用于存放每个地址以及对应的01010\n label_location_dict ={}#用于后面转化汇编代码时直接找label\n data_location_dict = {}\n start_position = 0x0000\n data_start_position = 0x0000\n #flag = 0\n line = 0\n def __init__(self,filename,save_location):#继承do_scan的属性\n super().__init__(filename)\n #print('type',type(self.data_start_position))\n print(self.data_start_position)\n self.save_location = save_location\n self.data = self.data_dict\n self.label = self.label_data_dict\n #self.size_of_label = {}#每个label大小\n #self.size_of_data = {}#每个data大小\n #\n self.give_location()\n self.do_fucking_change()\n\n\n\n #下面一段很可能有个问题\n\n\n #明天记得调\n\n\n\n\n\n def give_location(self):#给每个data以及函数分配地址\n\n print('sizelabel',self.size_of_label)\n if self.size_of_label:\n for name in self.size_of_label.keys():\n print('xxcxc'*10)\n self.o = self.label_location_dict.setdefault(name,self.start_position)#将label和起始地址存入dict\n self.increase = int(4*self.size_of_label[name])\n #self.increase = 0x0001\n\n self.start_position = self.increase +self.start_position #给开始地址加入相应地址\n print('start po: ',self.start_position)\n #self.start_position = str(self.start_position[2:]).zfill(4)#四位对其\n self.o = self.label_location_dict.setdefault(name,hex(self.start_position))#将label和起始地址存入dict\n if self.size_of_data:\n #print('okokok'*10)\n\n self.data_start_position = self.start_position +self.data_start_position #将label段末地址赋给data段初始地址\n '''\n 话撂这了,不回来改这一段算我输好吧,绝壁要改 \n '''\n for name in self.size_of_data.keys():\n self.o = self.data_location_dict.setdefault(name,self.data_start_position)\n #self.o = self.data_location_dict.setdefault(name,self.data_start_position)#将data存入dict\n self.data_increase = int(4*self.size_of_data[name])\n #print('data_increase',type(self.data_increase))\n print('start_position',self.data_start_position)\n self.data_start_position += self.data_increase#给data地址加入地址\n self.data_start_position =hex(self.data_start_position)\n print(self.data_start_position)\n #self.o = self.data_location_dict.setdefault(name,self.data_start_position)\n #self.data_start_position = str(self.data_start_position[2:]).zfill(4)\n self.o = self.data_location_dict.setdefault(name,self.data_start_position)\n #self.o = self.data_location_dict.setdefault(name,self.data_start_position)\n print('fuuuuck',self.data_location_dict)\n '''\n 我杀她ma的\n hex他妈的出来时str???!!!!\n\n '''\n\n def check_check(self,DD):\n length=len(DD) #求长度\n\n #创建一个list,将传入的str的每两个数合在一起,再求和\n list1=[]\n if(length%2==1): #如果str长度为单数,则抛出错误\n print('数据长度有误')\n else:\n for i in range(0, length, 2): #range(开始,结束-1,每次加多少) 这里即0——length-1 每次循环i+2\n hex_digit=DD[i:i + 2] #将传入的str的每两个数合在一起\n list1.append('0x'+hex_digit) #再每个字符前+0x 但是它仍然是字符,但更便于下面通过int(list1[i], 16)转换成16进制\n print(list1)\n\n sum=0\n for i in range(int(length/2)): #求和\n sum=int(list1[i], 16)+sum #int(list1[i], 16)将16进制转换成10进制 int类型\n sum=sum%256\n sum=256-sum\n\n #print('校验码: '+hex(sum)) #将sum和结果转换成16进制 hex(sum)\n return dec2hex(sum)\n\n def do_fucking_change(self):#do the fucking change !!!!!!fuck!!!我他妈好困!!!!操\n global global_label_data_dict\n if self.label_data_dict:\n print('a')\n for name in self.label_data_dict.keys():#获取label\n print('fucktadfaasf',name,self.label_data_dict[name])\n for i in range(len(self.label_data_dict[name])):#得到label下的每行\n self.lines_in_label =self.label_data_dict[name][i]#依次获取每一行\n self.changed = change_into_bin(self.lines_in_label)#利用转化类进行转化\n\n self.chan = bin2hex(self.changed.after_change_str).zfill(8)\n print('changed'*10,self.chan)\n self.high = self.chan[0:2]\n self.high2 = self.chan[2:4]\n self.low2 = self.chan[4:6]\n self.low = self.chan[6:]#高低位互换\n self.word = self.low+self.low2+self.high2+self.high#互换后的16进制数\n self.location = self.label_location_dict[name] #得到函数初始地址\n global_label_data_dict = self.label_location_dict\n print('aaa'*15,self.label_location_dict)\n print('地址',self.location)\n self.location += i*4 #换算出当前地址值操操操\n #这个狗逼东西没有转化成16进制操操操\n self.location = dec2hex(str(self.location) )\n\n print('location',self.location)\n self.load = self.location_dict.setdefault(self.location,self.word)#存放进地址-数据字典\n if self.data_location_dict:\n\n for name in self.data_location_dict.keys():\n #for i in range(len(self.data_dict[name])):\n print('a')\n if str(type(self.data_dict[name])) == \"\":#如果一次定义多个数据#10.20做修改为找到其数据类型从而判断\n #print('b',self.data_dict)\n #self.list_of_data =self.data_dict[name].split(',')#依次获取每一行 10.20改做直接获取列表\n for i in range(0,len(self.data_dict[name])):\n self.str_of_data = self.data_dict[name][i][2:]#获取data的值\n\n #print('dddddd',self.str_of_data)\n self.high = self.str_of_data[0:2]\n self.low = self.str_of_data[2:]#高低位互换\n self.word = self.low+self.high\n self.location = self.data_location_dict[name]+i*2\n #print('操',self.location)\n #self.location += dec2hex(str(hex(i)*4))#记住加str“”\n self.load = self.location_dict.setdefault(self.location,self.word)\n else:\n self.str_of_data = self.data_dict[name][2:]\n self.low = self.str_of_data[2:]#高低位互换\n self.word = self.low+self.high\n self.location = self.data_location_dict[name]\n #print('操',self.location)\n #self.location += dec2hex(str(hex(i)*4))#记住加str“”\n self.load = self.location_dict.setdefault(self.location,self.word)\n\n '''\n 是否直接05出函数开始地址有待继续研究\n '''\n def write_hex(self):#写入hex\n with open(self.save_location,\"w+\") as sl:#打开待生成的hex\n self.firstr_to_write =':020000040800F2\\n'\n sl.write(self.firstr_to_write)\n for name in self.location_dict.keys():\n #name = str(name).zfill(4)\n print(str(name).zfill(4),',,,',self.location_dict[name])\n if len(self.location_dict[name])==4:\n self.str_without_check_to_write = '02'+str(name).zfill(4)+'00'+self.location_dict[name]#生成无校验的str\n else :\n self.str_without_check_to_write = '04'+str(name).zfill(4)+'00'+self.location_dict[name]#生成无校验的str\n\n self.str_check = self.check_check(self.str_without_check_to_write)\n print('sdafdsf',self.str_check)\n self.str_to_write = ':'+self.str_without_check_to_write + self.str_check+'\\n'#得到有校验位的\n sl.write(self.str_to_write)\n print('ok')\n self.main_begin = ':0400000508000000ef'+'\\n'#本汇编器出来的代码默认0000开始\n sl.write(self.main_begin)\n self.last_to_write = ':00000001FF'+'\\n'#文件结束\n sl.write(self.last_to_write)\n\n\nfrom_file = 'asm.txt'\nfilea = 'hex.txt'\na = change_into_hex(from_file,filea)\nprint('地址.......',a.location_dict)\n\nprint('global_label_data_dict',global_label_data_dict)\nprint('end')\na.write_hex()\n\n\n'''\n利用单次扫描后得到的相应字典\n上面对每个label进行获取并转化\n并存入新的地址-数据dict\n\n\n'''\n\n\n\n\n'''\n\ndef change_file():\n with open('ass.txt','r+') as assfile:\n with open('bin.txt','w+') as binfile:\n for line in assfile:\n changed = change_into_bin(line)\n chan = changed.after_change_str+'\\n'\n binfile.write(chan)\n\n\nchange_file()\n'''\n\n\n\n\n\n\n\n\n\n#class_change = change_into_bin(str00)\n\n\n#print(class_change.after_change_str)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Adancurusul/For-fun","sub_path":"汇编器_new.py","file_name":"汇编器_new.py","file_ext":"py","file_size_in_byte":32283,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"32706677890","text":"import time\n\n\nclass CapsuleInfo:\n \"\"\"Miscellaneous Capsule helper methods\"\"\"\n\n def wait_for_tasks(\n self, search_query, search_rate=1, max_tries=10, poll_rate=None, poll_timeout=None\n ):\n \"\"\"Search for tasks by specified search query and poll them to ensure that\n task has finished.\n\n :param search_query: Search query that will be passed to API call.\n :param search_rate: Delay between searches.\n :param max_tries: How many times search should be executed.\n :param poll_rate: Delay between the end of one task check-up and\n the start of the next check-up. Parameter for ``sat.api.ForemanTask.poll()`` method.\n :param poll_timeout: Maximum number of seconds to wait until timing out.\n Parameter for ``sat.api.ForemanTask.poll()`` method.\n :return: List of ``sat.api.ForemanTasks`` entities.\n :raises: ``AssertionError``. If not tasks were found until timeout.\n \"\"\"\n for _ in range(max_tries):\n tasks = self.api.ForemanTask().search(query={'search': search_query})\n if tasks:\n for task in tasks:\n task.poll(poll_rate=poll_rate, timeout=poll_timeout)\n break\n else:\n time.sleep(search_rate)\n else:\n raise AssertionError(f\"No task was found using query '{search_query}'\")\n return tasks\n","repo_name":"peterdragun/robottelo","sub_path":"robottelo/host_helpers/capsule_mixins.py","file_name":"capsule_mixins.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"4"} +{"seq_id":"32978747329","text":"from os import system\n\n\nclass Contacto ():\n def __init__(self, nombre, telefono, correo):\n self.nombre = nombre\n self.telefono = telefono\n self.correo = correo\n\n\nclass Agenda ():\n def __init__(self):\n self._contactos = []\n\n def agregar(self, nombre, telefono, correo):\n contacto = Contacto(nombre, telefono, correo)\n self._contactos.append(contacto)\n print('')\n self.imprimir_contacto(contacto)\n self.regresar()\n\n def buscar_contacto(self, nombre):\n for contacto in self._contactos:\n if contacto.nombre.lower() == nombre.lower():\n _contacto = contacto\n return _contacto\n else:\n _contacto = False\n return _contacto\n\n def elimiar_contacto(self, nombre):\n contacto = self.buscar_contacto(nombre)\n if contacto:\n self.imprimir_contacto(contacto)\n del self._contactos[self._contactos.index(contacto)]\n print(f' Contacto eliminardo')\n self.regresar()\n else:\n print('')\n print(' Contacto no encontrado.')\n self.regresar()\n\n def lista_contactos(self):\n for contacto in self._contactos:\n self.imprimir_contacto(contacto)\n print('')\n self.regresar()\n\n def imprimir_contacto(self, contacto):\n print(' *---*---*---*---*---*---*---*')\n print(f' {contacto.nombre.title()}')\n print(f' {contacto.telefono}')\n print(f' {contacto.correo}')\n print(' *---*---*---*---*---*---*---*')\n\n def buscar_imprimir(self, nombre):\n _nombre = self.buscar_contacto(nombre)\n if _nombre:\n self.imprimir_contacto(_nombre)\n else:\n print('')\n print(' Contacto no encontrado.')\n\n def actualizar_contacto(self, nombre):\n def guardar_cambios(contacto, index):\n self._contactos[index] = contacto\n\n self.buscar_imprimir(nombre)\n contacto = self.buscar_contacto(nombre)\n if contacto:\n index_contacto = self._contactos.index(contacto)\n print('')\n print(' ¿Qué quieres cambiar?')\n print('')\n opcion = input(' [n]ombre, [t]elefono, [c]orreo : ')\n\n if opcion == 'n':\n print('')\n n = input(' Ingresa el nuevo nombre: ')\n contacto.nombre = n\n guardar_cambios(contacto, index_contacto)\n hacer_mas = self._cambiar_mas()\n if hacer_mas:\n self.actualizar_contacto(contacto.nombre)\n else:\n return\n elif opcion == 't':\n print('')\n t = input(' Ingresa el nuevo telefono: ')\n contacto.telefono = t\n guardar_cambios(contacto, index_contacto)\n hacer_mas = self._cambiar_mas()\n if hacer_mas:\n self.actualizar_contacto(contacto.nombre)\n else:\n return\n elif opcion == 'c':\n print('')\n c = input(' Ingresa el nuevo correo: ')\n contacto.correo = c\n guardar_cambios(contacto, index_contacto)\n hacer_mas = self._cambiar_mas()\n if hacer_mas:\n self.actualizar_contacto(contacto.nombre)\n else:\n return\n else:\n print('')\n print(' * --- Ingresa una opcion valida. --- *')\n print('')\n system('clear')\n self.actualizar_contacto(nombre)\n\n else:\n print('')\n print(' Contacto no encontrado.')\n self.regresar()\n\n def _cambiar_mas(self):\n print('')\n print(' ¿Quieres cambiar algo más?')\n print('')\n opcion = input(' [S]i, [N]o : ')\n print('')\n continuar = False\n if opcion.lower() == 's':\n continuar = True\n elif opcion.lower() == 'n':\n continuar = False\n else:\n print(' Ingresa una opcion valida')\n self._cambiar_mas()\n\n return continuar\n\n def regresar(self):\n print('')\n opcion = input(' [r]egresar - [s]alir : ')\n if opcion.lower() == 'r':\n system('clear')\n return\n elif opcion.lower() == 's':\n system('clear')\n exit()\n else:\n print('')\n print(' Opción no valida')\n self.regresar()\n\n\ndef run():\n system('clear')\n agenda = Agenda()\n\n while True:\n system('clear')\n print('')\n print(' B I E N V E N I D O A L A A G E N D A ! ! !')\n print('')\n opcion = str(input('''\n ¿Qué deseas hacer?\n\n [a]ñadir contacto\n [ac]tualizar contacto\n [b]uscar contacto\n [e]liminar contacto\n [l]istar contactos\n [s]alir\n\n Ingresa la opcion: '''))\n\n if opcion.lower() == 'a':\n system('clear')\n print('')\n print(' **A Ñ A D I R C O N T A C T O**')\n print('')\n nombre = input(' Ingresa el nombre: ')\n telefono = input(' Ingresa el telefono: ')\n correo = input(' Ingresa el correo: ')\n\n agenda.agregar(nombre, telefono, correo)\n\n elif opcion.lower() == 'ac':\n system('clear')\n print('')\n print(' **A C T U A L I Z A R C O N T A C T O**')\n print('')\n nombre = input(' Ingresa el nombre del contacto a actualizar: ')\n agenda.actualizar_contacto(nombre)\n\n elif opcion.lower() == 'b':\n system('clear')\n print('')\n print(' **B U S C A R C O N T A C T O**')\n print('')\n nombre = input(\n ' Ingresa el nombre del contacto que quieres buscar: ')\n print('')\n agenda.buscar_imprimir(nombre)\n print('')\n agenda.regresar()\n\n elif opcion.lower() == 'e':\n system('clear')\n print('')\n print(' **E L I M I N A R C O N T A C T O**')\n print('')\n nombre = input(' Ingresa el nombre del contacto a eliminar: ')\n agenda.elimiar_contacto(nombre)\n\n elif opcion.lower() == 'l':\n system('clear')\n print('')\n print(' **L I S T A D E C O N T A C T O**')\n print('')\n agenda.lista_contactos()\n\n elif opcion.lower() == 's':\n system('clear')\n exit()\n else:\n system('clear')\n print('')\n print(' O P C I Ó N N O V A L I D A I N T E N T A D E N U E V O .')\n print('')\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"dariogguillen/ejercicios_python","sub_path":"agenda.py","file_name":"agenda.py","file_ext":"py","file_size_in_byte":6962,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"24800899848","text":"from django.urls import include, path\nfrom rest_framework.routers import DefaultRouter\n\nfrom .views import (CategoriesViewSet, CommentsViewSet, GenresViewSet,\n GetUserAPIView, GetWorkingTokenAPIView, ReviewsViewSet,\n TitlesViewSet, UsersViewSet)\n\napp_name = 'api'\n\nrouter_v1 = DefaultRouter()\nrouter_v1.register('categories', CategoriesViewSet, basename='category')\nrouter_v1.register('genres', GenresViewSet, basename='genre')\nrouter_v1.register('titles', TitlesViewSet, basename='title')\nrouter_v1.register('users', UsersViewSet)\nrouter_v1.register(\n r'titles/(?P\\d+)/reviews/(?P<review_id>\\d+)/comments',\n CommentsViewSet, basename='comments')\nrouter_v1.register(\n r'titles/(?P<title>\\d+)/reviews', ReviewsViewSet, basename='reviews')\n\nurlpatterns = [\n path('v1/', include(router_v1.urls)),\n path('v1/auth/signup/', GetUserAPIView.as_view()),\n path('v1/auth/token/', GetWorkingTokenAPIView.as_view()),\n]\n","repo_name":"distemper17/yamdb_final","sub_path":"api_yamdb/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"27062624867","text":"#another script of notes for file stuff\n\nfilename = \"text.txt\" #if same directory\nfilename = \"C:\\\\files\\text.txt\" #if not in same dir\n\n\ndef appFile (filename): #add to end of file\n app = open(filename, 'a')\n a = input(\"append file: \")\n app.write(a)\n app.close()\n \ndef writeFile (filename): #write over file\n w = input(\"write to file: \")\n write = open(filename, 'w')\n write.write(w)\n write.close()\n \ndef openFile (filename): #open file and print lines\n read = open(filename, 'r')\n for x in read:\n print(x)\n \ndef readChar (filename): #read specific # of characters\n read = open(filename, 'r')\n r = int(input(\"characters: \"))\n print(read.read(r))\n read.close()\n","repo_name":"elys33ve/python-notes","sub_path":"random-notes/files_functions.py","file_name":"files_functions.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"34421741257","text":"from time import sleep\nimport os, re, traceback, random, requests\n\nfrom selenium.webdriver.chrome.webdriver import WebDriver as Chrome\nfrom selenium.webdriver.chrome.options import Options as ChromeOptions\nfrom selenium.webdriver.common.keys import Keys\nfrom bs4 import BeautifulSoup\n\nfrom django import setup\nfrom django.utils import timezone\n\nif __name__ == \"__main__\":\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"storage.settings\")\n setup()\n\nfrom storage.models import TaobaoShop, TaobaoItem, TaobaoItemRecord\n\n\nheaders = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4620.400 QQBrowser/9.7.13014.400'}\n\n\ndef rest(rest=(10, 20)):\n t = random.uniform(*rest)\n print('休息: %ds'%t)\n sleep(t)\n\n\nclass TaoBaoVisitor():\n '浏览淘宝网'\n def __init__(self):\n chrome_options = ChromeOptions()\n chrome_options.set_headless()\n self.driver = Chrome(chrome_options=chrome_options)\n self.driver.implicitly_wait(5)\n self.shops = None\n\n def quit(self):\n self.driver.quit()\n\n def get_shops(self):\n '获取收藏的店铺'\n if self.shops:\n return self.shops\n else:\n return TaobaoShop.objects.all()\n\n def get_all_items(self):\n '获取今日所有商品和价格'\n if not self.shops:\n self.shops = self.get_shops()\n for shop in self.shops:\n shop_url = shop.url\n if 'taobao' in shop_url:\n self.get_taobao_shop_items(shop)\n elif 'tmall' in shop_url:\n self.get_tmall_shop_items(shop)\n rest([180, 420])\n\n def get_taobao_shop_items(self, shop):\n '获取淘宝店铺中的商品及其价格'\n driver = self.driver\n driver.get(shop.get_list_url())\n shop_search_result = driver.find_elements_by_id('J_ShopSearchResult')[0]\n if shop.keyword:\n rest()\n input_keyword = shop_search_result.find_elements_by_name('keyword')[0]\n input_keyword.send_keys(shop.keyword)\n input_keyword.send_keys(Keys.RETURN)\n next_page = True\n while next_page:\n shop_search_result = driver.find_elements_by_id('J_ShopSearchResult')[0]\n items = shop_search_result.find_elements_by_css_selector('.item')\n self.collect_item_datas(items, shop)\n pagination_a = shop_search_result.find_elements_by_css_selector('.pagination-mini>a')\n next_page = False\n for a in pagination_a:\n if a.text=='下一页':\n if not a.get_attribute('class'):\n next_page =True\n driver.execute_script('arguments[0].click();', a)\n rest()\n\n def get_tmall_shop_items(self, shop):\n '获取天猫店铺中的商品及其价格'\n driver = self.driver\n driver.get(shop.get_list_url())\n rest()\n shop_search_result = driver.find_elements_by_id('J_ShopSearchResult')[0]\n if shop.keyword:\n input_keyword = shop_search_result.find_elements_by_name('keyword')[0]\n input_keyword.send_keys(shop.keyword)\n input_keyword.send_keys(Keys.RETURN)\n rest()\n next_page = True\n while next_page:\n shop_search_result = driver.find_elements_by_id('J_ShopSearchResult')[0]\n j_titems = shop_search_result.find_elements_by_css_selector('.J_TItems>div')\n items = []\n for div in j_titems:\n if div.get_attribute('class')=='pagination':\n break\n else:\n items += div.find_elements_by_css_selector('.item')\n self.collect_item_datas(items, shop)\n pagination_a = shop_search_result.find_elements_by_css_selector('.ui-page-s>a')\n next_page = False\n for a in pagination_a:\n if a.text=='>':\n next_page = True\n driver.execute_script('arguments[0].click();', a)\n rest()\n \n def collect_item_datas(self, items, shop):\n '收集商品数据并保存至数据库'\n new_items = []\n new_records = []\n for item in items:\n item_id = item.get_attribute('data-id')\n if not TaobaoItem.objects.filter(id=item_id).exists():\n new_items.append(TaobaoItem(\n id=item_id,\n name=item.find_elements_by_css_selector('.detail>.item-name')[0].text,\n shop_id=shop.id\n ))\n print(new_items[-1])\n today = timezone.now().date()\n if not TaobaoItemRecord.objects.filter(item_id=item_id, date=today).exists():\n new_records.append(TaobaoItemRecord(\n item_id=item_id,\n date=today,\n price=float(item.find_elements_by_css_selector('.detail .c-price')[0].text)\n ))\n print(new_records[-1])\n TaobaoItem.objects.bulk_create(new_items)\n TaobaoItemRecord.objects.bulk_create(new_records)\n\n\ndef add_shop(url, keyword=None):\n '��加要追踪的店铺'\n url = url.split('com')[0] + 'com/'\n r = requests.get(url)\n match = re.search(r'shopId=([0-9]*)', r.text)\n if match:\n shop_id = int(match.group().replace('shopId=', ''))\n shop_name = re.search(r'<title>([.\\n]*)',r.text).group().split('-')[1]\n shop = TaobaoShop(id=shop_id, name=shop_name, url=url, keyword=keyword)\n shop.save()\n return shop\n else:\n print('无法找到该店铺')\n\n\ndef get_url_and_cookies(shop):\n chrome_options = ChromeOptions()\n chrome_options.set_headless()\n driver = Chrome(chrome_options=chrome_options)\n driver.get(shop.get_list_url())\n\n shop_asyn_search = driver.find_elements_by_id('J_ShopAsynSearchURL')[0]\n asyn_search_url = shop.url + shop_asyn_search.get_attribute('value') \\\n + '&callback=jsonp' + str(random.randint(160,200))\n asyn_search_urls = [asyn_search_url]\n\n page_info = driver.find_elements_by_css_selector('.pagination-mini .page-info')[0].text\n for i in range(2, int(page_info.split('/')[1])+1):\n asyn_search_urls.append(asyn_search_url+'&pageNo='+str(i))\n\n full_cookies = driver.get_cookies()\n driver.quit()\n rest()\n return asyn_search_urls, {c['name']:c['value'] for c in full_cookies}\n\n\ndef asyn_search(shop):\n asyn_search_urls, cookies = get_url_and_cookies(shop)\n headers['referer'] = shop.get_list_url()\n for url in asyn_search_urls:\n print('爬取网站中:%s'%url)\n r = requests.get(url, headers=headers, cookies=cookies)\n soup = BeautifulSoup(r.text.replace('\\\\',''))\n items = soup.find_all(class_='item')\n for item in items:\n item_id = item['data-id']\n name = item.find(class_='item-name').text.replace(' ','')\n price = float(item.find(class_='c-price').text.replace(' ',''))\n print(item_id, name, price)\n rest()\n\n\nif __name__ == \"__main__\":\n try:\n v = TaoBaoVisitor()\n v.get_all_items()\n except Exception as e:\n print(traceback.format_exc())\n v.quit()\n","repo_name":"FossenWang/WebCrawler","sub_path":"taobao.py","file_name":"taobao.py","file_ext":"py","file_size_in_byte":7348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"16037850788","text":"def construct_json(structure, newick_string, tree=None, placement=False):\n amount_leafs = 0\n\n if tree is None:\n tree = {\"children\": {}}\n internal_str = newick_string[structure['begin'] + 1:structure['end']]\n internal_list = internal_str.split(',')\n for leaf_unf in internal_list:\n tree_element = {}\n if '(' in leaf_unf or ')' in leaf_unf:\n # not a leaf\n continue\n element_name, element_length = leaf_unf.split(':')\n if placement:\n element_length, node_index = element_length.split('{')\n # Remove last curly bracket from index\n node_index = node_index[:-1]\n tree_element[\"index\"] = node_index\n\n tree_element[\"name\"] = element_name\n tree_element[\"length\"] = element_length\n tree_element[\"children\"] = {}\n tree_element[\"leafCount\"] = 1\n tree[\"children\"][element_name] = tree_element\n #the leaf is a child of this node\n amount_leafs += 1\n for child in structure[\"children\"]:\n tree_element = {}\n element_specification = newick_string[child[\"end\"]+1:]\n element_name_length_str = element_specification.split(':')\n element_name = element_name_length_str[0]\n #in the middle it is comma seperated only at the end point comma\n end_length = element_name_length_str[1].find(',')\n if end_length == -1: end_length = element_name_length_str[1].find(')')\n if end_length == -1: end_length = element_name_length_str[1].find(';')\n element_length = element_name_length_str[1][:end_length]\n\n if placement:\n element_length, node_index = element_length.split('{')\n # Remove last curly bracket from index\n node_index = node_index[:-1]\n tree_element[\"index\"] = node_index\n\n #for inner nodes we don't insert names\n tree_element[\"length\"] = element_length\n tree_element[\"children\"] = {}\n\n tree_element, new_amount_leafs = construct_json(child, newick_string, tree_element, placement=placement)\n amount_leafs += new_amount_leafs\n tree_element[\"leafCount\"] = new_amount_leafs\n\n tree[\"children\"][element_name] = tree_element\n return tree, amount_leafs\n\n#helperfunc to convert neweck to json (as this is more usefull for handling in jquery)\ndef convert_newick_json(newick_file, placement=False):\n content = open(newick_file, \"r\")\n if content:\n newick_string = content.readline()\n\n\n current_node = None\n #we keep where the last bracket was opened to find the inner text of 'leaves'\n last_open = -1\n #we first combine all positions of an opening parentheses with the position of the closing one\n for iter in range(0, len(newick_string)):\n if newick_string[iter] == '(':\n last_open = iter\n node = {\n 'parent': current_node,\n 'begin': iter,\n 'end': None,\n 'children': []\n }\n if current_node is not None:\n current_node['children'].append(node)\n current_node=node\n\n if newick_string[iter] == ')':\n current_node['end'] = iter\n if current_node['parent'] is not None:\n current_node = current_node['parent']\n\n if current_node is None:\n return None\n\n tree_json, amount_leafs = construct_json(current_node, newick_string, placement=placement)\n #-2 because 1 for the top node and one because the root also adds one\n tree_json['leafCount'] = amount_leafs\n return tree_json\n","repo_name":"ThomasVanOnsem/SARS-CoV-2-Phylogeny","sub_path":"src/newick.py","file_name":"newick.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"73645607157","text":"class Node:\n def __init__(self,data):\n self.data = data\n self.next = None\n\ndef printList(head):\n\n curr = head\n llstr = ' '\n while curr:\n llstr += str(curr.data)+'--->' if curr.next else str(curr.data)\n curr = curr.next\n print(llstr)\n\ndef reorderList(head):\n slow = head\n fast = head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n print(slow.data)\n second = slow.next\n prev = slow.next = None\n while second:\n next = second.next\n second.next = prev\n prev = second\n second = next\n\n #merge two halfs\n first,second = head,prev\n while second:\n tmp1,tmp2 = first.next,second.next\n first.next = second\n second.next = tmp1\n first,second = tmp1,tmp2\n\nhead = Node(10)\nhead.next = Node(20)\nhead.next.next = Node(30)\nhead.next.next.next = Node(40)\nhead.next.next.next.next = Node(50)\nprintList(head)\nreorderList(head)\nprintList(head)\n\n","repo_name":"arunpoy/python_code","sub_path":"Linkedlist/reorderlist.py","file_name":"reorderlist.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"8129511718","text":"#! /usr/bin/env python3\n\nfrom argparse import ArgumentParser\n\nfrom gainer import Gainer\nfrom lock import Lock, LockError\nfrom utils import error, info\n\n# SUPPORTED_MEDIA_TYPES = (\"flac\", \"vorbis\", \"mp3\")\nSUPPORTED_MEDIA_TYPES = (\"flac\", \"mp3\")\n\n\ndef main():\n lock = Lock()\n try:\n lock.acquire()\n except LockError as exc:\n error(\"Couldn't create lock file: %s\" % exc)\n lock = None\n else:\n try:\n gainer = Gainer(_get_options())\n gainer.process()\n except KeyboardInterrupt:\n error(\"Interrupted by user.\")\n finally:\n if lock:\n lock.release()\n\n\ndef _check_options(options):\n if all((options.add_replay_gain, options.remove_replay_gain)):\n raise RuntimeError(\n \"Conflict in arguments: \"\n \"--add-replay-gain and --remove-replay-gain are mutually exclusive\"\n )\n\n if not any((options.add_replay_gain, options.remove_replay_gain)):\n info(\"No action selected, --add-replay-gain used as default\")\n options.add_replay_gain = True\n\n if options.all:\n for opt in SUPPORTED_MEDIA_TYPES:\n setattr(options, opt, True)\n\n # if not any((options.flac, options.vorbis, options.mp3)):\n if not any((options.flac, options.mp3)):\n raise RuntimeError(\n \"No media type selected, will do nothing. \"\n \"Enable at least one of the following: \"\n \"--flac, --mp3, --flac\"\n )\n\n\ndef _get_options():\n\n def add_switch(parser, opt):\n \"\"\"\n Adds switch-like argument to argument parser.\n\n When this options is given from cmd, it is 'store_const'ed with True value\n \"\"\"\n parser.add_argument(\n opt, action=\"store_const\", const=True, default=False\n )\n\n parser = ArgumentParser(\n description=\"MPD ReplayGain tool\"\n )\n parser.add_argument(\"--directory\", \"-d\", default=None)\n add_switch(parser, \"--add-replay-gain\")\n add_switch(parser, \"--remove-replay-gain\")\n add_switch(parser, \"--debug\")\n add_switch(parser, \"--force\")\n add_switch(parser, \"--all\")\n for media_type in SUPPORTED_MEDIA_TYPES:\n add_switch(parser, \"--%s\" % media_type)\n\n options = parser.parse_args()\n _check_options(options)\n return options\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ondrejkajinek/replay_gainer","sub_path":"mpd_gainer.py","file_name":"mpd_gainer.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"4092050372","text":"# 开发时间:2023-05-22 10:08\n# 开发人员:林坚洪\n# encodeing \"UTF-8\"\nimport pandas as pd\n\ndata = pd.read_csv('sales.csv', encoding='utf-8')\nprint(data.head(5))\nprint(data.describe(include='all'))\nprint(data.shape)\n\nimport matplotlib.pyplot as plt\n\nplt.hist(data['mj'].dropna(), bins=60) # bins 制定直方图中条形的个数\n# plt.show()\n\nplt.boxplot(data['mj'].dropna())\n# plt.show()\n\nX = data[['qy', 'fx', 'mj', 'jg']]\ny = data['lb']\nfrom sklearn.model_selection import train_test_split\n\ntrain_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.2, random_state=1)\nprint(train_X.shape, train_y.shape)\nprint(test_X.shape, test_y.shape)\n\nmj_train_na = pd.isnull(train_X['mj'])\nmj_test_na = pd.isnull(test_X['mj'])\nprint(mj_train_na)\nprint(mj_test_na)\n\ntrain_X = train_X.values\ntest_X = test_X.values\ntrain_X_copy = train_X.copy()\n\nfrom sklearn.impute import SimpleImputer\n\nimp = SimpleImputer(strategy='mean')\n\nimp.fit(train_X[:, [2]])\ntrain_X[:, [2]] = imp.transform(train_X[:, [2]])\nprint(train_X[:])\ntest_X[:, [2]] = imp.transform(test_X[:, [2]])\n\nfrom sklearn.preprocessing import LabelEncoder\n\nprint(train_X[:5, 0])\nprint(train_X[:5, 1])\n\nle_qy = LabelEncoder()\nle_qy.fit(train_X[:, 0])\ntrain_X[:, 0] = le_qy.transform(train_X[:, 0])\nle_fx = LabelEncoder()\nle_fx.fit(train_X[:, 1])\ntrain_X[:, 1] = le_fx.transform(train_X[:, 1])\nprint(train_X[:5])\n\nqy = train_X[:, 0]\nfx = train_X[:, 1]\nmj = train_X[:, 2]\njg = train_X[:, 3]\nlb = train_y\nfig, axes = plt.subplots(1, 4, figsize=[12, 3])\naxes[0].scatter(qy, lb)\naxes[1].scatter(fx, lb)\naxes[2].scatter(mj, lb)\naxes[3].scatter(jg, lb)\n# plt.show()\n","repo_name":"Lindeyi5/Python_pro","sub_path":"charpter_5/jiqixuexi.py","file_name":"jiqixuexi.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"30432137692","text":"from aiogram import Dispatcher\nfrom aiogram.types import ForceReply, InlineKeyboardMarkup, ReplyKeyboardMarkup, KeyboardButton\n\nfrom . import BaseMarkupFactory\n\n\nclass StartMarkupFactory(BaseMarkupFactory):\n def __init__(self, support_chat_link: str):\n self.support_chat_link = support_chat_link\n\n def create(self) -> ForceReply | InlineKeyboardMarkup | ReplyKeyboardMarkup:\n markup = ReplyKeyboardMarkup(resize_keyboard=True)\n btn_callback = KeyboardButton(text='Заказать звонок')\n #btn_chat = KeyboardButton(text='Чат со специалистом')\n btn_presentation = KeyboardButton(text='Получить презентацию')\n #btn_come_to_see = KeyboardButton(text='Виртуальный тур')\n\n markup.row(btn_callback, btn_presentation)\n #markup.row(btn_chat, btn_come_to_see)\n return markup\n","repo_name":"Visceros/Klenovye_allei_bot","sub_path":"markup/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"10669705773","text":"## @package NumberCombinations\n# \n# Publicly available NumberCombinations access method\n\nfrom genetic import Genetic\n\n## Driver method for the genetic algorithm\n#\n#\ndef NumberCombinations():\n gen = Genetic(\"nums.txt\", 20, 0.3)\n o = gen.compute_member_output(\"00100110001110101001\")\n\nNumberCombinations()","repo_name":"DipinjitHanspal/NumberCombinations","sub_path":"NumberCombinations.py","file_name":"NumberCombinations.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"24533188276","text":"import os\nimport random\nfrom random import randrange\nimport argparse\nimport logging\n\nfrom findpeaks import findpeaks\n\nlogging.basicConfig(format='[%(asctime)s] %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-e\", \"--event_sents_pairs_file\", help = \"Event-sentences pairs file path\")\nparser.add_argument(\"-p\", \"--predicted_qa_file\", help = \"Predicted QA file path\")\nparser.add_argument(\"-o\", \"--output_dir\", help = \"Output dir\")\nargs = parser.parse_args()\n\nevent_sents_pairs_file = args.event_sents_pairs_file\npredicted_qa_file = args.predicted_qa_file\noutput_dir = args.output_dir\n\n\n\"\"\"\nRead input files and generate the duration distribution for each event.\n\"\"\"\n\nevent_sents_pairs = []\nwith open(event_sents_pairs_file, 'r') as reader:\n for line in reader:\n line_splits = line.split('\\t')\n sent = line_splits[0]\n event = line_splits[1].replace('\\n', '')\n event_sents_pairs.append((sent, event))\n\npredicted_qa = []\nwith open(predicted_qa_file, 'r') as reader:\n for line in reader:\n predicted_qa.append(line.replace('\\n', ''))\n\ndur_dist = {}\ndur_dist_by_context = {}\n\nk = 8 # Number of answers for each question, i.e., number of duration units (seconds to decades)\nfor i, (sent, event) in enumerate(event_sents_pairs):\n if event not in dur_dist.keys():\n dur_dist[event] = [0] * k\n\n event_sent_pair = event+'\\t'+sent\n if event_sent_pair not in dur_dist_by_context.keys():\n dur_dist_by_context[event_sent_pair] = [0] * k\n\n for j in range(k):\n if predicted_qa[i * k + j] == 'yes':\n dur_dist[event][j] += 1\n dur_dist_by_context[event_sent_pair][j] = 1\n\n\n\"\"\"\nFind the episodic and habitual durations from the peaks and their neighbouring units from the duration distributions.\n\"\"\"\n\nlogging.info(\"Finding the episodic and habitual durations from the duration distributions....\")\n\nneighbour_thr = 0.75\n\ndur_units_to_index = {'seconds': 0, 'minutes': 1, 'hours': 2, 'days': 3, 'weeks': 4, 'months': 5, 'years': 6, 'decades': 7}\ndur_units = list(dur_units_to_index.keys())\n\ntypical_duration_all = []\ntypical_duration_episodic = []\n\nfor i, event in enumerate(dur_dist.keys()):\n durations = dur_dist[event]\n\n # k predictions are removed in order to reduce noises\n k = 3\n durations = [unit_count - k if unit_count >= k else 0 for unit_count in durations]\n durations = [0] + durations + [0]\n\n multiPeak = False\n\n fp = findpeaks(method='topology', lookahead=1)\n results = fp.fit(durations)\n\n episodic_dur, episodic_dur_value, habitual_dur, habitual_dur_value = -1, -1, -1, -1\n\n for w, peak in enumerate(results['df']['peak']):\n if w < 4:\n if peak and results['df']['y'][w] > episodic_dur_value:\n episodic_dur = w\n episodic_dur_value = results['df']['y'][w]\n else:\n if peak and results['df']['y'][w] > habitual_dur_value:\n habitual_dur = w\n habitual_dur_value = results['df']['y'][w]\n\n peaks = []\n for z, peak in enumerate(results['df']['peak']):\n if peak and not results['df']['valley'][z]:\n peaks.append(z)\n\n peaks = [x-1 for x in peaks] \n\n peak_count = len(peaks)\n if peak_count > 0:\n last_peak_idx = peaks[-1]\n\n if peak_count > 1:\n episodic_peak_idx = peaks[0]\n habitual_peak_idx = peaks[-1]\n\n episodic_units_list = [0] * 8\n episodic_units_list[episodic_peak_idx] = 1\n\n episodic_peak_unit = dur_units[episodic_peak_idx]\n habitual_peak_unit = dur_units[habitual_peak_idx]\n\n episodic_peak_left_neighbour, episodic_peak_right_neigbour = '', ''\n habitual_peak_left_neighbour, habitual_peak_right_neigbour = '', ''\n\n # Find the neighbouring units after checking if the peak is not on the left-most or the right-most of the distribution\n if episodic_peak_idx != 0 and durations[episodic_peak_idx - 1] >= neighbour_thr * durations[episodic_peak_idx]:\n episodic_peak_left_neighbour = dur_units[episodic_peak_idx - 1]\n episodic_units_list[episodic_peak_idx - 1] = 1\n\n if episodic_peak_idx != 7 and durations[episodic_peak_idx + 1] >= neighbour_thr * durations[episodic_peak_idx]:\n episodic_peak_right_neigbour = dur_units[episodic_peak_idx + 1]\n episodic_units_list[episodic_peak_idx +1 ] = 1\n\n if habitual_peak_idx != 0 and durations[habitual_peak_idx - 1] >= neighbour_thr * durations[habitual_peak_idx]:\n habitual_peak_left_neighbour = dur_units[habitual_peak_idx - 1]\n\n if habitual_peak_idx != 7 and durations[habitual_peak_idx + 1] >= neighbour_thr * durations[habitual_peak_idx]:\n habitual_peak_right_neigbour = dur_units[habitual_peak_idx + 1]\n\n typical_duration_all.append((event, durations, episodic_peak_left_neighbour, episodic_peak_unit, episodic_peak_right_neigbour,\n habitual_peak_left_neighbour, habitual_peak_unit, habitual_peak_right_neigbour))\n typical_duration_episodic.append((event, durations, episodic_units_list))\n\n else:\n episodic_units_list = [0] * 8\n episodic_units_list[last_peak_idx] = 1\n\n peak_unit = dur_units[last_peak_idx]\n\n left_neighbour = ''\n right_neigbour = ''\n\n if last_peak_idx != 0:\n if durations[last_peak_idx - 1] >= neighbour_thr * durations[last_peak_idx]:\n left_neighbour = dur_units[last_peak_idx - 1]\n episodic_units_list[last_peak_idx - 1] = 1\n\n if last_peak_idx != 7:\n if durations[last_peak_idx + 1] >= neighbour_thr * durations[last_peak_idx]:\n right_neigbour = dur_units[last_peak_idx + 1]\n episodic_units_list[last_peak_idx + 1] = 1\n\n typical_duration_all.append((event, durations, left_neighbour, peak_unit, right_neigbour))\n typical_duration_episodic.append((event, durations, episodic_units_list))\n\n\n\"\"\"\nGenerate the answers for the pseudo QA data\n\"\"\"\n\nlogging.info(\"Generating the answers for the pseudo QA data...\")\n\ndur_upper_bounds = [(60, 'seconds'), (60, 'minutes'), (24, 'hours'), (7, 'days'), (52, 'weeks'), (12, 'months'), (10, 'years'), (10, 'decades'), (10, 'centuries')]\n\ndur_variations = [['a few seconds', 'several seconds'],\n ['a few minutes', 'several minutes'],\n ['a few hours', 'several hours', 'for hours'],\n ['a few days', 'several days', 'for days'],\n ['a few weeks', 'several weeks', 'for weeks'],\n ['a few months', 'several months', 'for months'],\n ['a few years', 'several years', 'for years'],\n ['a few decades', 'several decades', 'for decades']]\n\n\ndef generate_answers(num, range_min, range_max, dur_unit, ans_type):\n \"\"\"\n Return a list of answers with size n for pseudo QA data.\n TODO: simplify and refactor this function, reduce loops.\n \"\"\"\n answers = []\n number_selected= []\n\n if ans_type == 'pos': # Generate positive answers.\n number_1_selected = False \n while len(answers) < num:\n \n # Randomly select which kind of answers will be generated, i.e, random numbers or random phrases from dur_variations.\n if randrange(4) in range(3):\n if int(range_max) - int(range_min) > 1:\n number = 1\n # Randomly select even number or multiples of 5 and check if such number hasn't been selected before.\n while (number % 2 != 0 and number % 5 != 0) and number not in number_selected:\n number = randrange(int(range_min), int(range_max))\n number_selected.append(number)\n else:\n number = randrange(int(range_min), int(range_max))\n\n if number == 1:\n if not number_1_selected:\n answers.append(str(number) + ' ' + dur_upper_bounds[dur_unit][1][:-1])\n number_1_selected = True\n else:\n phrase_selected = random.choice(dur_variations[dur_unit])\n while phrase_selected not in answers:\n answers.append(phrase_selected)\n else:\n answers.append(str(number)+' '+dur_upper_bounds[dur_unit][1])\n else:\n phrase_selected = random.choice(dur_variations[dur_unit])\n while phrase_selected not in answers:\n answers.append(phrase_selected)\n\n else: # Generate negative answers.\n for l in range(num):\n if randrange(5) in range(2):\n answers.append(random.choice(dur_variations[dur_unit]))\n else:\n number = 1\n while number % 2 != 0 and number not in number_selected:\n number = randrange(1, int(dur_upper_bounds[dur_unit][0]))\n number_selected.append(number)\n answers.append(str(number) + ' ' + dur_upper_bounds[dur_unit][1])\n\n return answers\n\n# Number of positive answers and negative answers to be generated.\npos_num = 3\nneg_num = 4\n\n# Number of contexts for each event.\nm = 1\n\npseudo_qa = {}\n\nfor item in typical_duration_episodic:\n event = item[0]\n episodic_peak_units = item[2]\n question = 'How long does it take to ' + event + '?'\n contexts = []\n\n for key in dur_dist_by_context:\n if key.startswith(event+'\\t') and episodic_peak_units == dur_dist_by_context[key]:\n contexts.append(key.split('\\t')[1])\n\n if len(contexts) >= m:\n sampled_contexts = random.sample(contexts, m)\n\n for j, context in enumerate(sampled_contexts):\n pos_features = []\n neg_features = []\n pos_index = [i for i, x in enumerate(episodic_peak_units) if x == 1]\n pos_index_w_neighbours = []\n\n for index in pos_index:\n pos_index_w_neighbours.append(index-1)\n pos_index_w_neighbours.append(index)\n pos_index_w_neighbours.append(index+1)\n pos_index_w_neighbours = set(pos_index_w_neighbours)\n neg_index = []\n\n for i in range(8):\n if i not in pos_index_w_neighbours:\n neg_index.append(i)\n\n for unit_idx in pos_index:\n pos_features.extend(generate_answers(pos_num, 1, dur_upper_bounds[unit_idx][0], unit_idx, 'pos'))\n\n for unit_idx in neg_index:\n neg_features.extend(generate_answers(neg_num, 1, dur_upper_bounds[unit_idx][0], unit_idx, 'neg'))\n\n pos_features = list(set(pos_features))\n neg_features = list(set(neg_features))\n\n if len(pos_features) > pos_num:\n pos_features = random.sample(pos_features, pos_num)\n\n if len(neg_features) > neg_num:\n neg_features = random.sample(neg_features, 4)\n\n for ans in pos_features:\n feat = context + '\\t' + question + '\\t' + ans + '\\tyes\\tEvent Duration'\n\n if event not in pseudo_qa:\n pseudo_qa[event] = []\n pseudo_qa[event].append(feat)\n\n for ans in neg_features:\n feat = context + '\\t' + question + '\\t' + ans + '\\tno\\tEvent Duration'\n\n if event not in pseudo_qa:\n pseudo_qa[event] = []\n pseudo_qa[event].append(feat)\n\npseudo_qa_events = list(pseudo_qa.keys())\nrandom.shuffle(pseudo_qa_events)\n\nlogging.info(\"Generated pseudo QA data with {} events\".format(len(pseudo_qa_events)))\n\n\"\"\"\nSplit the pseudo QA data and output it into files\n\"\"\"\n\nn = 200 \npseudo_qa_events_splits = []\nfor i in range(0, len(pseudo_qa_events), n):\n pseudo_qa_events_splits.append(pseudo_qa_events[i:i+n])\n\n# Use the first 400 events for hold-out validation\npseudo_qa_dev = []\nfor i, split in enumerate(pseudo_qa_events_splits[:2]):\n for event in split:\n pseudo_qa_dev.extend(pseudo_qa[event])\n\nwith open(os.path.join(args.output_dir, 'pseudo_qa_dev.tsv'), 'w') as writer:\n for line in pseudo_qa_dev:\n writer.write(line+'\\n')\n\n# Generate pseudo QA data with 200 events increments for student model training\npseudo_qa_data = []\nfor i, split in enumerate(pseudo_qa_events_splits[2:]):\n for event in split:\n pseudo_qa_data.extend(pseudo_qa[event])\n\n with open(os.path.join(args.output_dir, 'pseudo_qa_{}.tsv'.format((i+1)*n)), 'w') as writer:\n for line in pseudo_qa_data:\n writer.write(line+'\\n')\n\n","repo_name":"felixgiov/AcTED","sub_path":"data_generation/generate_pseudo_qa_data.py","file_name":"generate_pseudo_qa_data.py","file_ext":"py","file_size_in_byte":12857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"28952858207","text":"\"\"\"struct del\n\nRevision ID: c9afe6a7c9eb\nRevises: b23be10a3e42\nCreate Date: 2021-03-21 09:28:06.928096\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = 'c9afe6a7c9eb'\ndown_revision = 'b23be10a3e42'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('struct_file', 'type')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('struct_file', sa.Column('type', postgresql.ENUM('SURF_TOP', 'SURF_BOT', 'GRID', 'GRID_FES', name='struct'), autoincrement=False, nullable=True))\n # ### end Alembic commands ###\n","repo_name":"kpfu-ses/uvo-reserves-demo","sub_path":"migrations/versions/c9afe6a7c9eb_struct_del.py","file_name":"c9afe6a7c9eb_struct_del.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"32352727750","text":"import numpy as np\nimport re\nimport random\n\n\ndef text_parse(input_string):\n \"\"\"\n text preprocess: parse all the emails to a list of words\n \"\"\"\n list_of_tokens = re.split(r'\\W+', input_string)\n return [tok.lower() for tok in list_of_tokens if len(list_of_tokens) > 2]\n\n\ndef creat_vocabulary(doc_list):\n \"\"\"\n return a vocabulary of all the unique words\n \"\"\"\n vocab_set = set([])\n for doc in doc_list:\n vocab_set = vocab_set | set(doc)\n return list(vocab_set)\n\n\ndef set_of_word2vec(vocab_list, input_set):\n \"\"\"\n construct a list that shows how many words from a input document appears in the vocabulary\n if appears, set the relevant position to 1, others 0\n \"\"\"\n # the length should be the same as vocabulary, not the document\n returned_vec = [0] * len(vocab_list)\n for word in input_set:\n if word in vocab_list:\n returned_vec[vocab_list.index(word)] = 1\n\n return returned_vec\n\n\ndef train_nb(training_matrix, training_labels):\n \"\"\"\n training a Naive Bayes classifier\n :returns p_word_spam, p_word_ham: probabilities of each word appearing in spam/ham\n spam_percentage: the percentage of spam in total training samples\n \"\"\"\n num_samples = len(training_matrix)\n # each vector's length from training matrix is the same as vocabulary size\n vocab_size = len(training_matrix[0])\n spam_percentage = sum(training_labels) / float(num_samples)\n # here we don't use np.zeros to avoid (0 * p(..)) then the result will not become zero\n # compute p(word|ham) and p(word|spam)\n words_in_spam = np.ones(vocab_size)\n words_in_ham = np.ones(vocab_size) # number of words appears in ham\n denom_spam = 2\n denom_ham = 2\n\n for i in range(num_samples):\n if training_labels[i] == 1:\n words_in_spam += training_matrix[i]\n denom_spam += sum(training_matrix[i])\n else:\n words_in_ham += training_matrix[i]\n denom_ham += sum(training_matrix[i])\n\n # notice: words_in_spam is a vector, thus p_spam, p_ham is a vector which\n # stores all the probabilities of each word appearing in spam/ham\n p_word_spam = np.log(words_in_spam / denom_spam)\n p_word_ham = np.log(words_in_ham / denom_ham)\n\n return p_word_spam, p_word_ham, spam_percentage\n\n\ndef classify_nb(word_vec, p_word_spam, p_word_ham, spam_percentage):\n \"\"\"\n :param word_vec: a word vector for classifying e.g. [0, 1, 1, 0, 1, 0, 0, 1]\n :param p_word_spam: a vector stores all the probabilities of each word appearing in spam\n :param p_word_ham: a vector stores all the probabilities of each word appearing in ham\n :param spam_percentage: (num_spam / num_total_documents)\n :return: 1 spam\n 0 ham\n \"\"\"\n p_spam = np.log(spam_percentage) + sum(word_vec * p_word_spam)\n p_ham = np.log(1.0 - spam_percentage) + sum(word_vec * p_word_ham)\n if p_spam > p_ham:\n return 1\n else:\n return 0\n\n\ndef spam():\n doc_list = [] # emails\n label_list = [] # labels of emails\n for i in range(1, 26):\n word_list = text_parse(open('email/spam/%d.txt' % i, 'r').read())\n doc_list.append(word_list)\n label_list.append(1) # 1 means spam\n\n word_list = text_parse(open('email/ham/%d.txt' % i, 'r').read())\n doc_list.append(word_list)\n label_list.append(0) # 0 means ham\n\n # vocabulary is vocab_list\n vocab_list = creat_vocabulary(doc_list)\n training_set = list(range(50)) # stores all 50 indices\n test_set = [] # to store 10 test indices\n for i in range(10):\n rand_index = int(random.uniform(0, len(training_set)))\n test_set.append(training_set[rand_index])\n # remove the index from training set, thus the actual size of training set will be 40\n del (training_set[rand_index])\n\n training_matrix = []\n training_labels = []\n for doc_index in training_set:\n # construct training data\n training_matrix.append(set_of_word2vec(vocab_list, doc_list[doc_index]))\n training_labels.append(label_list[doc_index])\n\n p_word_spam, p_word_ham, spam_percentage = train_nb(np.array(training_matrix), np.array(training_labels))\n\n error_count = 0\n for doc_index in test_set:\n # convert current document to word vector\n test_word_vec = set_of_word2vec(vocab_list, doc_list[doc_index])\n # do classification on the document\n if classify_nb(np.array(test_word_vec), p_word_spam, p_word_ham, spam_percentage) != label_list[doc_index]:\n error_count += 1\n print('wrongly classified samples: ' + str(error_count) + ' samples')\n\n\nif __name__ == '__main__':\n spam()\n","repo_name":"zwzwtao/Machine-Learning","sub_path":"12-Bayes/bayes_spam_detection.py","file_name":"bayes_spam_detection.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"41086601010","text":"def merge(array, low, mid, high):\n \"\"\"\n 1. Here 'array' is the array whose sorted left half and sorted right half \n elements are to be merged resulting in sorted array as a whole.\n \"\"\"\n \n i = low\n j = mid + 1\n copy = []\n \n while i <= mid and j <= high:\n if array[i] <= array[j]:\n copy.append(array[i])\n i += 1\n else:\n copy.append(array[j])\n j += 1\n \n if i > mid:\n while j <= high:\n copy.append(array[j])\n j += 1\n else:\n while i <= mid:\n copy.append(array[i])\n i += 1\n \n y = 0\n for x in range(low, high + 1):\n array[x] = copy[y]\n y += 1\n \ndef merge_sort(array, low, high):\n \"\"\"\n 1. Here 'array' is the array to be sorted.\n 2. 'low' and 'high' are the index of first and last element in array respectively.\n 3. The function sorts the array passed.\n \"\"\"\n \n if low < high:\n mid = (low + high) // 2\n merge_sort(array, low, mid)\n merge_sort(array, mid + 1, high)\n merge(array, low, mid, high)\n \n\"\"\" \nEXAMPLE TO TRY:\n\narr = [1, 3, 5, 2, 4, 6]\nmerge_sort(arr, 0, len(arr) - 1)\nprint(arr)\n\"\"\"\n\n \n \n","repo_name":"neha39/Algorithms-in-Python","sub_path":"MergeSort Algorithm.py","file_name":"MergeSort Algorithm.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"73766550198","text":"\"\"\"Underlying processing module of the user interface.\"\"\"\nimport settings\nimport read_properties_file\nimport cv2\nimport resolution\nimport mask\nimport thresholds\nimport skycover\nimport createregions\nimport numpy as np\nimport ratio\nimport overlay\nimport gzip\nimport matplotlib.pyplot as plt\nimport tarfile\nimport createregions\n\n\ndef save_original_image(data, fn):\n \"\"\"Save image, converting it from BGR to RGB.\n\n Args:\n data: image in array format to be saved (BGR)\n fn: filename\n \"\"\"\n sizes = np.shape(data)\n height = float(sizes[0])\n width = float(sizes[1])\n\n fig = plt.figure()\n fig.set_size_inches(width / height, 1, forward=False)\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n\n ax.imshow(cv2.cvtColor(data, cv2.COLOR_BGR2RGB))\n plt.savefig(fn, dpi=height)\n plt.close()\n\n\ndef save_processed_image(data, fn):\n \"\"\"Save image as RGB.\n\n Args:\n data: image in array format to be saved (RGB)\n fn: filename\n \"\"\"\n sizes = np.shape(data)\n height = float(sizes[0])\n width = float(sizes[1])\n\n fig = plt.figure()\n fig.set_size_inches(width / height, 1, forward=False)\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n\n ax.imshow(data)\n plt.savefig(fn, dpi=height)\n plt.close()\n\n\ndef save_original_image_and_histogram(img, hist_data, fn):\n \"\"\"Save image, converting it from BGR to RGB.\n\n Args:\n data: image in array format to be saved (BGR)\n\n fn: filename\n \"\"\"\n hist_data.flatten()\n hist_data = hist_data[hist_data > 0]\n hist_data = np.divide(hist_data - 1, hist_data + 1)\n print(max(hist_data), min(hist_data))\n\n x_min = -0.5\n x_max = 0.5\n nbins = 50\n step = (abs(x_min) + abs(x_max)) / (nbins + 1)\n\n bin_edges = np.arange(x_min, x_max, step)\n\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,3))\n ax1.axis('off')\n ax1.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n ax2.set_xlim((x_min,x_max))\n ax2.hist(hist_data, bins=bin_edges, density=True, color='gray', edgecolor='black')\n ax2.set_xlabel('Normalized R/B')\n ax2.set_ylabel('Frequency')\n plt.tight_layout()\n plt.savefig(fn, bbox_inches=\"tight\")\n plt.close()\n\n\ndef read_from_tar(filename_no_ext):\n \"\"\"Temporarily open source tar file, extracting any information out of it and saving to tmp folder.\n\n Args:\n filename_no_ext: filename without any extension\n\n Returns:\n tuple: imgages and additional information required for processing\n \"\"\"\n # filename variables\n filename_jpg = filename_no_ext + '.jpg'\n filename_png = filename_no_ext + '.png'\n properties_file = filename_no_ext + '.properties.gz'\n\n settings.year = filename_no_ext[0:4]\n settings.month = filename_no_ext[4:6]\n settings.day = filename_no_ext[6:8]\n settings.hour = filename_no_ext[8:10]\n settings.minute = filename_no_ext[10:12]\n\n path = settings.tsi_database + settings.year + '/' + settings.month + '/DBASE/' + settings.year + settings.month + \\\n settings.day + '_tsi-cabauw_realtime.tar'\n\n tar = tarfile.open(path)\n\n tar.extract(filename_jpg, 'tmp')\n tar.extract(filename_png, 'tmp')\n tar.extract(properties_file, 'tmp')\n\n tar.close()\n\n jpg_loc = 'tmp/' + filename_jpg\n png_loc = 'tmp/' + filename_png\n properties_loc = 'tmp/' + properties_file\n\n # unzip the gzip file, open the file as rt=read text\n with gzip.open(properties_loc, 'rt') as f:\n lines = []\n # read the file and store line per line\n for line in f:\n lines.append(line)\n\n # get the altitude and azimuth from the defs\n altitude = read_properties_file.get_altitude(lines)\n azimuth = read_properties_file.get_azimuth(lines)\n\n img = cv2.imread(jpg_loc)\n img_tsi_processed = cv2.imread(png_loc)\n\n return img, img_tsi_processed, lines, filename_jpg, filename_png, azimuth, altitude\n\n\ndef single(filename):\n \"\"\"Process a single image\n\n Args:\n filename: filename without an extension\n\n Returns:\n tuple: information about the processed images\n \"\"\"\n img, img_tsi_processed, properties_file, filename_jpg, filename_png, azimuth, altitude = read_from_tar(filename)\n\n if altitude >= settings.minimum_altitude:\n # get the fractional sky cover from 'old' TSI software\n cover_thin_tsi, cover_opaque_tsi, cover_total_tsi = read_properties_file.get_fractional_sky_cover_tsi(properties_file)\n\n # get the resolution of the image\n resolution.get_resolution(img)\n\n # create and apply the mask\n mask_array = mask.create(img, azimuth)\n masked_img = mask.apply(img, mask_array)\n\n masked_regions, outlines, labels, stencil, image_with_outlines = createregions.create(img, azimuth, altitude, mask_array)\n\n # calculate red/blue ratio per pixel\n red_blue_ratio = ratio.red_blue_v2(masked_img)\n\n # calculate fixed fractional skycover\n fixed_sunny_threshold, fixed_thin_threshold = thresholds.fixed()\n cover_thin_fixed, cover_opaque_fixed, cover_total_fixed = skycover.fixed(red_blue_ratio,\n fixed_sunny_threshold,\n fixed_thin_threshold)\n\n # calculate hybrid sky cover\n ratio_br_norm_1d_nz, blue_red_ratio_norm, st_dev, hybrid_threshold_mce, hybrid_threshold_otsu, \\\n hybrid_threshold_kmeans = thresholds.hybrid(masked_img)\n cover_total_hybrid_mce = skycover.hybrid(ratio_br_norm_1d_nz, hybrid_threshold_mce)\n cover_total_hybrid_otsu = skycover.hybrid(ratio_br_norm_1d_nz, hybrid_threshold_otsu)\n cover_total_hybrid_kmeans = skycover.hybrid(ratio_br_norm_1d_nz, hybrid_threshold_kmeans)\n\n # create the segments for solar correction\n regions, outlines, labels, stencil, image_with_outlines = createregions.create(img, azimuth,\n altitude,\n mask_array)\n\n # overlay outlines on image(s)\n image_with_outlines_fixed = overlay.fixed(red_blue_ratio, outlines, stencil,\n fixed_sunny_threshold,\n fixed_thin_threshold)\n image_with_outlines_hybrid = overlay.hybrid(masked_img, outlines, stencil, hybrid_threshold_mce)\n\n save_processed_image(image_with_outlines_hybrid, settings.tmp + filename + '_hybrid.png')\n save_processed_image(image_with_outlines_fixed, settings.tmp + filename + '_fixed.png')\n save_original_image(img_tsi_processed, settings.tmp + filename + '_fixed_old.png')\n save_original_image(img, settings.tmp + filename + '_original.png')\n # save_original_image_and_histogram(masked_img, red_blue_ratio, '/usr/people/mos/Documents/Report/images/orig_hist.png')\n\n azimuth = round(azimuth, 3)\n altitude = round(altitude , 3)\n cover_total_fixed= round(cover_total_fixed, 3)\n cover_total_hybrid = round(cover_total_hybrid_mce, 3)\n cover_total_tsi = round(cover_total_tsi, 3)\n\n return azimuth, altitude, cover_total_fixed, cover_total_hybrid, cover_total_tsi\n","repo_name":"KNMI-DataLab/PyTSI","sub_path":"src/image_interface.py","file_name":"image_interface.py","file_ext":"py","file_size_in_byte":7413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"40936676459","text":"import numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\nfrom random import shuffle\nfrom collections import defaultdict\n\n\ndef input_img_label_pairs(img_label_pairs, img_preprocessing_fn, batch_size=64, num_threads=12, num_epochs=None):\n def input_fn():\n encoded_img_label_pairs = [(f.encode(), label) for f, label in img_label_pairs]\n dataset = tf.data.Dataset.from_generator(lambda: tqdm(encoded_img_label_pairs),\n (tf.string, tf.int64), (tf.TensorShape([]), tf.TensorShape([])))\n dataset = dataset.repeat(num_epochs)\n\n # Preprocess\n def load_image(img_path1, label):\n to_be_batched = dict()\n to_be_batched['images'], to_be_batched['image_sizes'] = img_preprocessing_fn(tf.read_file(img_path1))\n to_be_batched['labels'] = label\n return to_be_batched\n\n dataset = dataset.map(load_image, num_threads)\n dataset = dataset.prefetch(batch_size*3)\n batched_dataset = dataset.batch(batch_size)\n\n iterator = batched_dataset.make_one_shot_iterator()\n next_batch = iterator.get_next()\n\n return next_batch, next_batch['labels']\n\n return input_fn\n\n\ndef input_set_classification_train(img_uid_label_pairs, class_uids, uid_to_id_dict, batch_size=12, num_epochs=None):\n def input_fn():\n encoded_img_label_pairs = [(uid_to_id_dict[uid], label) for uid, label in img_uid_label_pairs]\n encoded_class_uids = [{uid_to_id_dict[uid] for uid in uids} for uids in class_uids]\n shuffle(encoded_img_label_pairs)\n n_classes = len(encoded_class_uids)\n n_images = len(uid_to_id_dict)\n\n id_to_class = {_id: class_id for class_id, ids in enumerate(encoded_class_uids)\n for _id in ids}\n\n default_class_ids = np.array([id_to_class[_id] for _id in range(len(uid_to_id_dict))])\n\n def _gen():\n for i in tqdm(range(0, len(encoded_img_label_pairs), batch_size)):\n img_label_batch = encoded_img_label_pairs[i:i+batch_size]\n input_ids, labels = zip(*img_label_batch)\n\n class_to_be_used = set(np.random.choice(n_classes, 10, replace=False))\n class_to_be_used.update([id_to_class[_id] for _id in input_ids])\n class_to_be_used = list(class_to_be_used)\n\n label_dict = {c_id: i for i, c_id in enumerate(class_to_be_used)}\n labels = [label_dict[l] for l in labels]\n\n class_ids = np.empty(n_images, np.int32)\n class_ids[:] = -1\n for c_id in class_to_be_used:\n class_ids[list(encoded_class_uids[c_id])] = label_dict[c_id]\n for _id in input_ids:\n class_ids[_id] = -1\n\n #TODO sanity checks?\n r = {\n 'input_ids': input_ids,\n 'labels': labels,\n 'class_ids': class_ids\n }\n #print(r)\n yield r\n\n dataset_output_type = {\n 'input_ids': tf.int32,\n 'labels': tf.int64,\n 'class_ids': tf.int32\n }\n dataset_output_shapes = {\n 'input_ids': tf.TensorShape([None]),\n 'labels': tf.TensorShape([None]),\n 'class_ids': tf.TensorShape([None]),\n }\n\n dataset = tf.data.Dataset.from_generator(_gen, dataset_output_type, dataset_output_shapes)\n print(dataset)\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.prefetch(3)\n\n iterator = dataset.make_one_shot_iterator()\n next_batch = iterator.get_next()\n\n return next_batch, next_batch['labels']\n\n return input_fn\n\n\ndef input_set_classification_inference(img_vectors, labels=None, batch_size=12, num_epochs=None):\n def input_fn():\n dataset = tf.data.Dataset.from_tensor_slices({'inputs': img_vectors,\n 'labels': np.array(labels, dtype=np.int64)})\n dataset = dataset.repeat(num_epochs)\n if batch_size > 1:\n dataset = dataset.batch(batch_size)\n else:\n dataset = dataset.map(lambda d: {k: v[None] for k, v in d.items()})\n dataset = dataset.prefetch(3)\n\n iterator = dataset.make_one_shot_iterator()\n next_batch = iterator.get_next()\n print(next_batch)\n\n return next_batch, next_batch.get('labels')\n\n return input_fn\n\n\ndef input_pairs_from_csv(csv_filename, img_preprocessing_fn, batch_size=8, num_threads=8, num_epochs=None):\n def input_fn():\n filename_queue = tf.train.string_input_producer([csv_filename], num_epochs=num_epochs)\n\n # Skip lines that have already been processed\n reader = tf.TextLineReader(name='CSV_Reader')\n key, value = reader.read(filename_queue, name='file_reading_op')\n\n # value = tf.Print(value, [value])\n\n default_line = [['None'], ['None'], [0.0]]\n img_path1, img_path2, label = tf.decode_csv(value, record_defaults=default_line, field_delim=',',\n name='csv_reading_op')\n\n # Preprocess\n to_be_batched = dict()\n to_be_batched['images_1'], to_be_batched['image_sizes_1'] = img_preprocessing_fn(tf.read_file(img_path1))\n to_be_batched['images_2'], to_be_batched['image_sizes_2'] = img_preprocessing_fn(tf.read_file(img_path2))\n to_be_batched['labels'] = label[None]\n\n # Batch\n capacity = batch_size * num_threads * 3\n batch = tf.train.batch(to_be_batched,\n batch_size=batch_size,\n num_threads=num_threads,\n capacity=capacity,\n # min_after_dequeue=batch_size * 3,\n allow_smaller_final_batch=True)\n\n return batch, batch['labels']\n\n return input_fn\n\n\ndef input_triplets_from_csv(csv_filename, img_preprocessing_fn, batch_size=8, num_threads=8, num_epochs=None):\n def input_fn():\n filename_queue = tf.train.string_input_producer([csv_filename], num_epochs=num_epochs)\n\n # Skip lines that have already been processed\n reader = tf.TextLineReader(name='CSV_Reader')\n key, value = reader.read(filename_queue, name='file_reading_op')\n\n default_line = [['None'], ['None'], ['None']]\n img_path1, img_path2, img_path3 = tf.decode_csv(value, record_defaults=default_line, field_delim=',',\n name='csv_reading_op')\n\n # Preprocess\n to_be_batched = dict()\n to_be_batched['images_1'], to_be_batched['image_sizes_1'] = img_preprocessing_fn(tf.read_file(img_path1))\n to_be_batched['images_2'], to_be_batched['image_sizes_2'] = img_preprocessing_fn(tf.read_file(img_path2))\n to_be_batched['images_3'], to_be_batched['image_sizes_3'] = img_preprocessing_fn(tf.read_file(img_path3))\n\n # Batch\n capacity = batch_size * num_threads * 3\n batch = tf.train.batch(to_be_batched,\n batch_size=batch_size,\n num_threads=num_threads,\n capacity=capacity,\n # min_after_dequeue=batch_size * 3,\n allow_smaller_final_batch=True)\n\n return batch, None\n\n return input_fn\n\n\ndef input_uid_filename_from_csv(csv_filename, img_preprocessing_fn, batch_size=16, num_threads=8, num_epochs=None):\n def input_fn():\n csv_filenames = [csv_filename] if csv_filename is not None else []\n filename_queue = tf.train.string_input_producer(csv_filenames, num_epochs=num_epochs)\n\n # Skip lines that have already been processed\n reader = tf.TextLineReader(name='CSV_Reader')\n key, value = reader.read(filename_queue, name='file_reading_op')\n\n default_line = [['None'], ['None']]\n img_uid, img_path = tf.decode_csv(value, record_defaults=default_line, field_delim=',',\n name='csv_reading_op')\n\n # Preprocess\n to_be_batched = dict()\n to_be_batched['images_1'], to_be_batched['image_sizes_1'] = img_preprocessing_fn(tf.read_file(img_path))\n to_be_batched['uids'] = img_uid\n\n # Batch\n capacity = batch_size * num_threads * 3\n batch = tf.train.batch(to_be_batched,\n batch_size=batch_size,\n num_threads=num_threads,\n capacity=capacity,\n allow_smaller_final_batch=True)\n\n return batch, None\n\n return input_fn\n\n\ndef decode_and_resize(max_size, increment, data_augmentation_fn=None):\n def fn(raw_input):\n with tf.variable_scope('Preprocess'):\n decoded_image = tf.cast(tf.image.decode_jpeg(raw_input, channels=3,\n try_recover_truncated=True), tf.float32)\n if data_augmentation_fn:\n decoded_image = data_augmentation_fn(decoded_image)\n original_shape = tf.cast(tf.shape(decoded_image)[:2], tf.float32)\n ratio = tf.reduce_min(max_size / original_shape)\n new_shape = original_shape * ratio\n rounded_shape = tf.cast(tf.round(new_shape / increment) * increment, tf.int32)\n rounded_shape = tf.maximum(rounded_shape, increment) # In order to avoid having some shape set to 0 if ratio < 0.1\n resized_image = tf.image.resize_images(decoded_image, rounded_shape)\n paddings = tf.minimum(rounded_shape - 1, max_size - rounded_shape)\n # Do as much reflecting padding as possible to avoid screwing the batch_norm statistics\n padded_image = tf.pad(resized_image, [[0, paddings[0]], [0, paddings[1]], [0, 0]],\n mode='REFLECT')\n padded_image = tf.pad(padded_image, [[0, max_size - rounded_shape[0] - paddings[0]],\n [0, max_size - rounded_shape[1] - paddings[1]], [0, 0]],\n mode='CONSTANT')\n padded_image.set_shape([max_size, max_size, 3])\n return padded_image, rounded_shape\n\n return fn\n\n\ndef data_augmentation_fn(lr_flip=True, rotation=True, zoom=True, color=True):\n def fn(img_tensor):\n with tf.variable_scope('DataAugmentation'):\n original_input = img_tensor\n if lr_flip:\n img_tensor = tf.image.random_flip_left_right(img_tensor)\n if zoom:\n img_tensor = random_zoom(img_tensor, max_zoom=0.1)\n if rotation:\n img_tensor = random_rotation(img_tensor)\n # img_tensor = tf.image.random_brightness(img_tensor, max_delta=10) # Tends to saturate the image\n if color:\n img_tensor = tf.image.random_contrast(img_tensor, lower=0.8, upper=1.)\n img_tensor = tf.image.random_hue(img_tensor, max_delta=0.15)\n img_tensor = tf.image.random_saturation(img_tensor, lower=0.8, upper=1.2)\n\n return tf.cond(tf.equal(tf.reduce_min(tf.shape(img_tensor[:2])), 0),\n lambda: original_input,\n lambda: img_tensor)\n\n return fn\n\n\ndef random_rotation(img, max_rotation=0.1, crop=True):\n with tf.name_scope('RandomRotation'):\n rotation = tf.random_uniform([], -max_rotation, max_rotation)\n rotated_image = tf.contrib.image.rotate(img, rotation)\n if crop:\n rotation = tf.abs(rotation)\n original_shape = tf.shape(rotated_image)[:2]\n h, w = original_shape[0], original_shape[1]\n # see https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders for formulae\n old_l, old_s = tf.cond(h > w, lambda: [h, w], lambda: [w, h])\n old_l, old_s = tf.cast(old_l, tf.float32), tf.cast(old_s, tf.float32)\n new_l = (old_l * tf.cos(rotation) - old_s * tf.sin(rotation)) / tf.cos(2 * rotation)\n new_s = (old_s - tf.sin(rotation) * new_l) / tf.cos(rotation)\n new_h, new_w = tf.cond(h > w, lambda: [new_l, new_s], lambda: [new_s, new_l])\n new_h, new_w = tf.cast(new_h, tf.int32), tf.cast(new_w, tf.int32)\n bb_begin = tf.cast(tf.ceil((h - new_h) / 2), tf.int32), tf.cast(tf.ceil((w - new_w) / 2), tf.int32)\n rotated_image_crop = rotated_image[bb_begin[0]:h - bb_begin[0], bb_begin[1]:w - bb_begin[1], :]\n\n # If image was cropped to 0 size then forget rotation\n\n #TODO probablement pas parfait encore\n rotated_image = tf.cond(tf.equal(tf.reduce_min(tf.shape(rotated_image_crop[:2])), 0),\n lambda: img,\n lambda: rotated_image_crop)\n return rotated_image\n\n\ndef random_zoom(input_image, max_zoom=0.15):\n with tf.name_scope('RandomZoom'):\n zoom = tf.random_uniform([], 0, max_zoom)\n input_size = tf.shape(input_image)[:2]\n new_size = tf.cast(tf.cast(input_size, tf.float32) * (1. - zoom), tf.int32)\n crop = tf.random_crop(input_image, tf.concat([new_size, [3]], 0))\n return tf.image.resize_images(crop, input_size)\n","repo_name":"SeguinBe/Replica-Search","sub_path":"replica_learn/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":13409,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"15093978270","text":"import pandas as pd\n\n# finding the minimum coverage and summing the number of sites affected\ndef summarize_gene_data(lc_filtered):\n\tif 'Gene' in lc_filtered.columns:\n\n\t\tif 'Minimum.Depth' in lc_filtered.columns:\n\t\t\tif (lc_filtered['Minimum.Depth'] % 1 == 0).all():\n\t\t\t\tmin_lc = lc_filtered.groupby(\"Gene\", as_index=False)[\"Minimum.Depth\"].min()\n\t\t\telse:\n\t\t\t\traise TypeError(f\"Minimum.Depth column contains non-integer whole values\") # assuming that coverage cannot be a decimal value as well\n\t\telse:\n\t\t\traise RuntimeError(f\"'Minimum.Depth' column not found, please check the .csv file\")\n\n\t\tif 'Number.of.Sites' in lc_filtered.columns:\n\t\t\tif (lc_filtered['Number.of.Sites'] % 1 == 0).all():\n\t\t\t\tsum_pos = lc_filtered.groupby(\"Gene\", as_index=False)[\"Number.of.Sites\"].sum()\n\t\t\telse:\n\t\t\t\traise TypeError(f\"Number.of.Sites column contains non-integer whole values\") # assuming that number of sites cannot be a decimal value as well\n\t\telse:\n\t\t\traise RuntimeError(f\"'Number.of.Sites' column not found, please check the .csv file\")\n\n\t\tgene_summary = min_lc.merge(sum_pos, on = \"Gene\")\n\t\tgene_summary = gene_summary.rename(columns = {\"Minimum.Depth\":\"Minimum.Coverage\", \"Number.of.Sites\":\"Total.Low.Coverage\"})\n\telse:\n\t\traise RuntimeError(f\"'Gene' column not found, please check the .csv file\")\n\t\t\n\treturn gene_summary","repo_name":"parmejohn/GSC_CCGI_CodingChallenge","sub_path":"modules/summarize_gene_data.py","file_name":"summarize_gene_data.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"70750481398","text":"from transformers import *\nfrom .helper import id_generator\n\n# XOR\nfrom itertools import cycle\nfrom zlib import compress\n\n\n# CONSTANT TRANSFORMER\n# Method could take a type as a parameter\n# Cast to that type at end (using str(value), int(value), float(value))\n# See injection.py for relevant injected functions\nclass Constants(NodeTransformer):\n def __init__(self):\n ast.NodeTransformer.__init__(self)\n self.injected_method = False\n \n def visit_Module(self, node):\n \n # Inject decryption method into module if not done\n if not self.injected_method:\n self.decrypter_func_name = id_generator(random.randint(10, 20)) \n self.inject_method(node, 'decrypt_constant', self.decrypter_func_name)\n self.injected_method = True\n \n self.generic_visit(node)\n return node\n \n def visit_Str(self, node):\n \n value = node.s\n\n if value == '':\n return node\n\n key = id_generator(random.randint(16, 32))\n value = self.encrypt_constant(value, key)\n \n dec_call = Call(func=Name(id=self.decrypter_func_name, ctx=Load()), args=[\n Str(s=value),\n Str(s=key),\n Name(id='str', ctx=Load())\n ], keywords=[])\n \n return dec_call\n \n def visit_Num(self, node):\n\n value = node.n\n\n key = id_generator(random.randint(16, 32))\n\n typ = 'int'\n if isinstance(value, float):\n typ = 'float'\n \n value = self.encrypt_constant(value, key)\n\n dec_call = Call(func=Name(id=self.decrypter_func_name, ctx=Load()), args=[\n Str(s=value),\n Str(s=key),\n Name(id=typ, ctx=Load())\n ], keywords=[])\n\n return dec_call\n \n def encrypt_constant(self, constant, key):\n return ''.join(chr(ord(c)^ord(k)) for c,k in zip(str(constant), cycle(key)))\n #return compress(''.join(chr(ord(c)^ord(k)) for c,k in zip(str(constant), cycle(key))).encode(\"utf-8\"), 9).hex() \n\n def inject_method(self, target_module_node, inject_func_name, new_name):\n inj = Injector(target_module_node, inject_func_name, new_name)\n inj_root = ast.parse(open('payload.py', 'rb').read())\n inj_root = inj.visit(inj_root)\n","repo_name":"Elliesaur/PyFlow","sub_path":"transformers/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"26492075794","text":"import os\nimport csv\n\nimport matplotlib.pyplot as plt\n\nfl = list(os.walk(\"./temp/data/repeat runs\"))\n\nfiles_of_concern = []\n\nfor data in fl[1:]:\n path, sub_folders, file_names = data\n\n with open(f\"{path}/SUMMARY.csv\", 'r') as summary_file:\n\n lines = summary_file.readlines()\n\n theta_list = [float(((line.split(\",\"))[0]).strip()) for line in lines[1:]]\n c4_list = [float(((line.split(\",\"))[10]).strip()) for line in lines[1:]]\n\n plt.plot(theta_list, c4_list)\n plt.show()\n\n decreasing = True\n for i in range(len(c4_list)):\n try:\n if c4_list[i+1] > c4_list[i]:\n decreasing = False\n break\n except:\n break\n\n if not decreasing:\n files_of_concern.append(path)","repo_name":"Lynn-Quantum-Optics/Summer-Spring-2022-3","sub_path":"Summer2022/AutomatedLabFramework/scratch_1.py","file_name":"scratch_1.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"32314085904","text":"# -*- coding: utf-8 -*-\n\n\n################################################################################\n#\n# WeatherLog: io.py\n# This module reads and writes dataset, metadata, and configuration files.\n#\n################################################################################\n\n\n# Import os for creating directories.\nimport os\n# Import glob for getting a list of the datasets.\nimport glob\n# Import json for saving the configuration file.\nimport json\n# Import datetime for getting the current time.\nimport datetime\n# Import pickle for loading and saving the data.\nimport pickle\n\n\ndef write_dataset(main_dir=\"\", name=\"\", filename=\"\", data=\"\"):\n \"\"\"Writes the data to the dataset file.\"\"\"\n\n # Get the filename.\n filename = filename if filename != \"\" else \"%s/datasets/%s/weather\" % (main_dir, name)\n\n if data == \"\":\n data = []\n\n try:\n data_file = open(filename, \"wb\")\n pickle.dump(data, data_file)\n data_file.close()\n return True\n\n except IOError as e:\n print(\"write_dataset(): Error saving dataset file (IOError):\\n%s\" % e)\n return False\n\n except (TypeError, ValueError) as e:\n print(\"write_dataset(): Error saving dataset file (TypeError or ValueError):\\n%s\" % e)\n return False\n\n\ndef read_dataset(main_dir=\"\", name=\"\", filename=\"\"):\n \"\"\"Reads the data from the dataset file.\"\"\"\n\n # Get the filename.\n filename = filename if filename != \"\" else \"%s/datasets/%s/weather\" % (main_dir, name)\n\n try:\n data_file = open(filename, \"rb\")\n data = pickle.load(data_file)\n data_file.close()\n\n except IOError as e:\n print(\"read_dataset(): Error importing data (IOError):\\n%s\" % e)\n data = []\n\n except (TypeError, ValueError) as e:\n print(\"read_dataset(): Error importing data (TypeError or ValueError):\\n%s\" % e)\n data = []\n\n return data\n\n\ndef write_blank_dataset(main_dir, name):\n \"\"\"Writes a blank dataset file.\"\"\"\n\n try:\n os.makedirs(\"%s/datasets/%s\" % (main_dir, name))\n new_prof_file = open(\"%s/datasets/%s/weather\" % (main_dir, name), \"wb\")\n pickle.dump([], new_prof_file)\n new_prof_file.close()\n \n except IOError as e:\n print(\"write_blank_dataset(): Error saving dataset file (IOError):\\n%s\" % e)\n\n except (TypeError, ValueError) as e:\n print(\"write_blank_dataset(): Error saving dataset file (TypeError or ValueError):\\n%s\" % e)\n\n\ndef write_standard_file(filename, data):\n \"\"\"Writes a basic file.\"\"\"\n\n try:\n data_file = open(filename, \"w\")\n data_file.write(data)\n data_file.close()\n\n except IOError as e:\n print(\"write_standard_file(): Error saving data file (IOError):\\n%s\" % e)\n\n\ndef write_json_file(filename, data, indent=False, indent_amount=4):\n \"\"\"Writes a JSON file.\"\"\"\n \n try:\n data_file = open(filename, \"w\")\n if indent:\n json.dump(data, data_file, indent=indent_amount)\n else:\n json.dump(data, data_file)\n data_file.close()\n\n except IOError as e:\n print(\"write_json_file(): Error saving data file (IOError):\\n%s\" % e)\n \n except (TypeError, ValueError) as e:\n print(\"write_json_file(): Error saving data file (TypeError or ValueError):\\n%s\" % e)\n\n\ndef get_dataset_list(main_dir, last_dataset, exclude_current=True):\n \"\"\"Gets the list of datasets.\"\"\"\n\n # Remember the correct directory and switch to where the datasets are stored.\n current_dir = os.getcwd()\n os.chdir(\"%s/datasets\" % main_dir)\n\n # Get the list of datasets and sort the list.\n datasets = glob.glob(\"*\")\n if exclude_current:\n datasets = list(set(datasets) - {last_dataset})\n datasets.sort()\n\n # Get the creation and last modified dates.\n for i in range(0, len(datasets)):\n\n # Get the dates.\n creation, modified = get_metadata(main_dir, datasets[i])\n datasets[i] = [datasets[i], creation, modified]\n\n # Switch back to the previous directory.\n os.chdir(current_dir)\n\n return datasets\n\n\ndef get_metadata(main_dir, last_dataset):\n \"\"\"Gets the current metadata.\"\"\"\n\n try:\n meta_file = open(\"%s/datasets/%s/metadata.json\" % (main_dir, last_dataset), \"r\")\n meta_data = json.load(meta_file)\n meta_file.close()\n creation = meta_data[\"creation\"]\n modified = meta_data[\"modified\"]\n\n except IOError as e:\n print(\"get_metadata(): Error reading metadata file (IOError):\\n%s\" % e)\n creation = \"Error\"\n modified = \"Error\"\n \n except (TypeError, ValueError) as e:\n print(\"get_metadata(): Error reading metadata file (TypeError or ValueError):\\n%s\" % e)\n creation = \"Error\"\n modified = \"Error\"\n\n return creation, modified\n\n\ndef write_metadata(main_dir, last_dataset, creation=\"\", modified=\"\", now=False):\n \"\"\"Writes the metadata file.\"\"\"\n \n if now:\n now = datetime.datetime.now()\n creation = \"%d/%d/%d\" % (now.day, now.month, now.year)\n modified = creation\n\n try:\n meta_file = open(\"%s/datasets/%s/metadata.json\" % (main_dir, last_dataset), \"w\")\n json.dump({\"creation\": creation, \"modified\": modified}, meta_file)\n meta_file.close()\n\n except IOError as e:\n print(\"write_metadata(): Error saving metadata file (IOError):\\n%s\" % e)\n \n except (TypeError, ValueError) as e:\n print(\"write_metadata(): Error saving metadata file (TypeError or ValueError):\\n%s\" % e)\n\n\ndef write_config(conf_dir, config):\n \"\"\"Saves the configuration.\"\"\"\n\n try:\n config_file = open(\"%s/config.json\" % conf_dir, \"w\")\n json.dump(config, config_file)\n config_file.close()\n\n except IOError as e:\n print(\"write_config(): Error saving configuration file (IOError):\\n%s\" % e)\n\n except (TypeError, ValueError) as e:\n print(\"write_config(): Error saving configuration file (TypeError or ValueError):\\n%s\" % e)\n\n\ndef write_restore_data(conf_dir, last_dataset, window_height, window_width):\n \"\"\"Saves the last dataset and window size.\"\"\"\n\n try:\n rest_file = open(\"%s/application_restore.json\" % conf_dir, \"w\")\n json.dump({\n \"last_dataset\": last_dataset,\n \"window_height\": window_height,\n \"window_width\": window_width\n }, rest_file)\n rest_file.close()\n\n except IOError as e:\n print(\"write_restore_data(): Error saving application restore file (IOError):\\n%s\" % e)\n \n except (TypeError, ValueError) as e:\n print(\"write_restore_data(): Error saving application restore file (TypeError or ValueError):\\n%s\" % e)\n","repo_name":"achesak/weatherlog","sub_path":"resources/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"4"} +{"seq_id":"28336888560","text":"class Stack(object):\n def __init__(self, capacity):\n self.array = [0 for i in range(capacity)]\n self.capacity = capacity\n self.size = 0\n\n def push(self, x):\n if self.size < capacity:\n self.size += 1\n self.array[self.size - 1] = x\n else:\n raise ValueError(\"StackOverflow\")\n\n def pop(self):\n if self.size > 0:\n self.size -= 1\n return self.array[self.size]\n else:\n raise ValueError(\"StackUnderflow\")\n\n def peek(self):\n if self.size > 0:\n return self.array[self.size - 1]\n else:\n raise ValueError(\"StackUnderflow\")\n\ncapacity = 10\nS = Stack(10)\nA = list(range(10))\nfor i in A:\n S.push(i)\nB = []\nfor i in range(len(A)):\n B.append(S.pop())\n\na = list(reversed(A))\nb = B\ntry:\n assert a == b\nexcept:\n print(a)\n print(b)\n","repo_name":"lyqscmy/leetcode","sub_path":"stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"43541723664","text":"import sys\nsys.stdin=open('input.txt','r')\n\n'''\n#1.10이 들어오면 (1,11)까지 돌면서 =\n#2.이걸 문자로 바꾼다\n#3.그리고 3,6,9가 들어있으면 cnt를 세주고\n#4.그 '-'cnt 만큼 출력하게 한다\n'''\n\n\nN=int(input())\nflag=1\nfor i in range(1,N+1):\n cnt=0\n a=str(i)\n for j in a:\n if ('3' in a) or ('6' in a) or ('9' in a):\n cnt+=1\n if cnt>0:\n print('-'*cnt,end=\" \")\n else:\n print(a,end=\" \")\n","repo_name":"kimchaelin13/Algorithm","sub_path":"swea/SWEA_1926.간단한 369게임.py","file_name":"SWEA_1926.간단한 369게임.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"3286946970","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: KMLee\n\"\"\"\nimport os\nfrom configparser import ConfigParser\n\nfilename = os.path.abspath(__file__)\ndirname = os.path.dirname(filename)\n\n#%%\n# Basic Device Characteristics\n# Please Don't Change it since it's fixed as model restriction\nSynthNV = {\n 'num_power' : 1, 'num_freq' : 1, 'num_phase' : 1,\n 'min_power' : [-13.49], 'max_power' : [18.55],\n 'min_freq' : [34e6], 'max_freq' : [4.5e9],\n 'min_phase' : [0], 'max_phase' : [360],\n 'type' : 'SynthNV', 'chan' : ['CH 1']}\n\n# CH1 : CH A / CH2 : CH B\nSynthHD = {\n 'num_power' : 2, 'num_freq' : 2, 'num_phase' : 2,\n 'min_power' : [-50,-50], 'max_power' : [20,20],\n 'min_freq' : [53e6, 53e6], 'max_freq' : [15e9, 15e9],\n 'min_phase' : [0,0], 'max_phase' : [360,360],\n 'type' : 'SynthHD', 'chan' : ['CH A', 'CH B']}\n\nAPSYN420 = {\n 'num_power' : 1, 'num_freq' : 1, 'num_phase' : 1,\n 'min_power' : [23], 'max_power' : [23],\n 'min_freq' : [10e6], 'max_freq' : [20e9],\n 'min_phase' : [0], 'max_phase' : [360],\n 'type' : 'APSYN420', 'chan' : ['CH 1']}\n\n# CH1 : BNC / CH2 : NTYPE\nSG384 = {\n 'num_power' : 2, 'num_freq' : 1, 'num_phase' : 1,\n 'min_power' : [-47, -47], 'max_power' : [13, 13], # Power range for SG384 is limited based on BNC output. NTYPE can make -110 to 16.5 dBm signal\n 'min_freq' : [950e3], 'max_freq' : [62.5e6],\n 'min_phase' : [0], 'max_phase' : [360],\n 'type' : 'SG384', 'chan' : ['BNC', 'NTYPE']} # Frequency range for SG384 is limited based on BNC output. NTYPE can generate up to 4GHz signal\n\n\n#%%\n# Making Device list from rf config file\nrf_models = {'synthnv':SynthNV, \n 'synthhd':SynthHD, \n 'sg384':SG384, \n 'apsyn420':APSYN420}\n\ncp = ConfigParser()\ncp.read(dirname + \"/RFconfig.ini\")\nDevice_list = {}\nfor rf in cp.sections():\n if not rf == 'RF':\n dev = cp.get(rf, 'model')\n Device_list[rf] = rf_models[dev.lower()]\n\n# Device_list = {'935SB':SynthHD, \n# 'EOM_7_4G':APSYN420, \n# '2_1G':SynthNV, \n# 'EA_TRAP':SG384, \n# 'EC_TRAP':SG384, \n# 'EA_MW':APSYN420, \n# 'EC_MW':APSYN420}\n\n'''\nIf you want to change available Frequency/Power range for each device,\nChange the value in Device_list\nThe Shape should be consistant\n\ne.g)\nDevice_list['935SB']['max_power'] = [15, 15]\n\nASPYN420 Cannot change power range\n'''\nDevice_list['935SB']['max_power'] = [10, 10]\nDevice_list['935SB']['chan'] = ['EC', 'EA']\n# BNC / NTYPE\nDevice_list['EA_TRAP']['max_power'] = [-24, -10]\nDevice_list['EC_TRAP']['max_power'] = [-10, -10]\n# Device_list['2_1G']['max_power'] = [0]","repo_name":"jhjeong32snu/QtDeviceServer_for_IonTrap_v2","sub_path":"Server/devices/RF/RFsettings.py","file_name":"RFsettings.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"32946061053","text":"from app.extensions.db import db\nfrom app.models.hashtag_model import Hashtag\nfrom flask import Blueprint, jsonify, request\n\nhashtag_bp = Blueprint('hashtag_bp', __name__)\n\n@hashtag_bp.route('/api/hashtag/getAll/', methods=[\"GET\"])\ndef getAllHashtags():\n hashtags = Hashtag.query.all()\n hashtag_list = [{'hashtagName': hashtag.hashtagName} for hashtag in hashtags]\n return jsonify(hashtag_list), 200\n\n@hashtag_bp.route('/api/hashtag/create/', methods=[\"POST\"])\ndef createNewHashtag():\n hashtagName = request.json.get(\"hashtagName\")\n\n existHashtag = Hashtag.query.filter_by(hashtagName=hashtagName).all()\n if existHashtag:\n return jsonify({\"message\": f\"This hashtag '{hashtagName}' does not exist.\"}), 404\n \n new_hashtag = Hashtag(hashtagName=hashtagName)\n try:\n db.session.add(new_hashtag)\n db.session.commit()\n\n response_data = {\n \"message\": \"Create new hashtag successfully\",\n }\n return jsonify(response_data), 201\n except Exception as e:\n return jsonify({'error': str(e)}), 500\n \n \n","repo_name":"nghilonganh00/pycook","sub_path":"app/routes/hashtag_routes.py","file_name":"hashtag_routes.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"15733001220","text":"import copy\nfrom flask import Blueprint, render_template\nfrom flask_login import login_required, current_user\nfrom sqlalchemy import desc\nfrom comments.forms import CommentForm\nfrom forum.forms import ForumForm\nfrom models import Forum, Comments, db\n\nforum_blueprint = Blueprint('forum', __name__, template_folder='templates')\n\n\n# function to display the main forum page\n@forum_blueprint.route('/forum')\n@login_required\ndef forum():\n posts = Forum.query.order_by(desc('post_id')).all()\n return render_template('forum.html', forum=posts, user=current_user)\n\n\n# function to create a new forum\n@forum_blueprint.route('/create', methods=('GET', 'POST'))\ndef create():\n # run the form to obtain user input\n form = ForumForm()\n\n if form.validate_on_submit():\n new_post = Forum(user_id=current_user.id, title=form.title.data, body=form.body.data)\n # new_post = Forum(user_id=1, title=form.title.data, body=form.body.data)\n\n # commit changes to database\n db.session.add(new_post)\n db.session.commit()\n\n # display forum home page\n return forum()\n # if the form is not validated then render the create_forum template again\n return render_template('create_forum.html', form=form)\n\n\n# function to update already created forums\n@forum_blueprint.route('//update', methods=('GET', 'POST'))\ndef update(post_id):\n post = Forum.query.filter_by(post_id=post_id).first()\n if not post:\n return render_template('error_codes/500.html')\n\n form = ForumForm()\n\n # if submitted\n if form.validate_on_submit():\n Forum.query.filter_by(post_id=post_id).update({\"title\": form.title.data})\n Forum.query.filter_by(post_id=post_id).update({\"body\": form.body.data})\n\n db.session.commit()\n\n return forum()\n\n # creates a copy of post object which is independent of database.\n post_copy = copy.deepcopy(post)\n\n # set update form with title and body of copied post object\n form.title.data = post_copy.title\n form.body.data = post_copy.body\n\n return render_template('update_forum.html', form=form)\n\n\n# function to delete forum posts\n@forum_blueprint.route('//delete')\ndef delete(post_id):\n Forum.query.filter_by(post_id=post_id).delete()\n Comments.query.filter_by(post_id=post_id).delete()\n db.session.commit()\n\n return forum()\n\n\n# function to add a comment to a forum post\n@forum_blueprint.route('//comment', methods=('GET', 'POST'))\ndef comment(post_id):\n post = Forum.query.filter_by(post_id=post_id).first()\n form = CommentForm()\n\n # if submitted\n if form.validate_on_submit():\n new_comment = Comments(body=form.body.data, user_id=current_user.id, post_id=post_id)\n\n # add new comment\n db.session.add(new_comment)\n db.session.commit()\n\n return forum()\n return render_template('create_comment.html', form=form)\n\n\n# function to display all comments on a given forum post\n@forum_blueprint.route('//view_comments')\ndef view_comments(post_id):\n comments = Comments.query.filter_by(post_id=post_id).order_by(desc('post_id')).all()\n return render_template('view_comments.html', comment=comments, post_id=post_id)\n\n\n# function to delete any comments on a post\n@forum_blueprint.route('//delete_comment')\ndef delete_comment(comment_id, post_id):\n Comments.query.filter_by(comment_id=comment_id).delete()\n db.session.commit()\n\n return view_comments(post_id)\n\n\n\n","repo_name":"JunZhao20/climate-action-app--group-project","sub_path":"forum/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"70067324916","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nUnit tests for AWS KMS Decryption Renderer.\n\"\"\"\n# pylint: disable=protected-access\n\n# Import Python Libs\nfrom __future__ import absolute_import, print_function, unicode_literals\n\n# Import Salt libs\nimport salt.exceptions\nimport salt.renderers.aws_kms as aws_kms\n\n# Import Salt Testing libs\nfrom tests.support.mixins import LoaderModuleMockMixin\nfrom tests.support.mock import MagicMock, patch\nfrom tests.support.unit import TestCase, skipIf\n\ntry:\n import botocore.exceptions\n import botocore.session\n import botocore.stub\n\n NO_BOTOCORE = False\nexcept ImportError:\n NO_BOTOCORE = True\n\ntry:\n import cryptography.fernet as fernet\n\n NO_FERNET = False\nexcept ImportError:\n NO_FERNET = True\n\n\nPLAINTEXT_SECRET = \"Use more salt.\"\nENCRYPTED_DATA_KEY = \"encrypted-data-key\"\nPLAINTEXT_DATA_KEY = b\"plaintext-data-key\"\nBASE64_DATA_KEY = b\"cGxhaW50ZXh0LWRhdGEta2V5\"\nAWS_PROFILE = \"test-profile\"\nREGION_NAME = \"us-test-1\"\n\n\n@skipIf(NO_BOTOCORE, \"Unable to import botocore libraries\")\nclass AWSKMSTestCase(TestCase, LoaderModuleMockMixin):\n\n \"\"\"\n unit test AWS KMS renderer\n \"\"\"\n\n def setup_loader_modules(self):\n return {aws_kms: {}}\n\n def test__cfg_data_key(self):\n \"\"\"\n _cfg_data_key returns the aws_kms:data_key from configuration.\n \"\"\"\n config = {\"aws_kms\": {\"data_key\": ENCRYPTED_DATA_KEY}}\n with patch.dict(\n aws_kms.__salt__, {\"config.get\": config.get}\n ): # pylint: disable=no-member\n self.assertEqual(\n aws_kms._cfg_data_key(),\n ENCRYPTED_DATA_KEY,\n \"_cfg_data_key did not return the data key configured in __salt__.\",\n )\n with patch.dict(aws_kms.__opts__, config): # pylint: disable=no-member\n self.assertEqual(\n aws_kms._cfg_data_key(),\n ENCRYPTED_DATA_KEY,\n \"_cfg_data_key did not return the data key configured in __opts__.\",\n )\n\n def test__cfg_data_key_no_key(self):\n \"\"\"\n When no aws_kms:data_key is configured,\n calling _cfg_data_key should raise a SaltConfigurationError\n \"\"\"\n self.assertRaises(salt.exceptions.SaltConfigurationError, aws_kms._cfg_data_key)\n\n def test__session_profile(self): # pylint: disable=no-self-use\n \"\"\"\n _session instantiates boto3.Session with the configured profile_name\n \"\"\"\n with patch.object(aws_kms, \"_cfg\", lambda k: AWS_PROFILE):\n with patch(\"boto3.Session\") as session:\n aws_kms._session()\n session.assert_called_with(profile_name=AWS_PROFILE)\n\n def test__session_noprofile(self):\n \"\"\"\n _session raises a SaltConfigurationError\n when boto3 raises botocore.exceptions.ProfileNotFound.\n \"\"\"\n with patch(\"boto3.Session\") as session:\n session.side_effect = botocore.exceptions.ProfileNotFound(\n profile=AWS_PROFILE\n )\n self.assertRaises(salt.exceptions.SaltConfigurationError, aws_kms._session)\n\n def test__session_noregion(self):\n \"\"\"\n _session raises a SaltConfigurationError\n when boto3 raises botocore.exceptions.NoRegionError\n \"\"\"\n with patch(\"boto3.Session\") as session:\n session.side_effect = botocore.exceptions.NoRegionError\n self.assertRaises(salt.exceptions.SaltConfigurationError, aws_kms._session)\n\n def test__kms(self): # pylint: disable=no-self-use\n \"\"\"\n _kms calls boto3.Session.client with 'kms' as its only argument.\n \"\"\"\n with patch(\"boto3.Session.client\") as client:\n aws_kms._kms()\n client.assert_called_with(\"kms\")\n\n def test__kms_noregion(self):\n \"\"\"\n _kms raises a SaltConfigurationError\n when boto3 raises a NoRegionError.\n \"\"\"\n with patch(\"boto3.Session\") as session:\n session.side_effect = botocore.exceptions.NoRegionError\n self.assertRaises(salt.exceptions.SaltConfigurationError, aws_kms._kms)\n\n def test__api_decrypt(self): # pylint: disable=no-self-use\n \"\"\"\n _api_decrypt_response calls kms.decrypt with the\n configured data key as the CiphertextBlob kwarg.\n \"\"\"\n kms_client = MagicMock()\n with patch.object(aws_kms, \"_kms\") as kms_getter:\n kms_getter.return_value = kms_client\n with patch.object(aws_kms, \"_cfg_data_key\", lambda: ENCRYPTED_DATA_KEY):\n aws_kms._api_decrypt()\n kms_client.decrypt.assert_called_with(\n CiphertextBlob=ENCRYPTED_DATA_KEY\n ) # pylint: disable=no-member\n\n def test__api_decrypt_badkey(self):\n \"\"\"\n _api_decrypt_response raises SaltConfigurationError\n when kms.decrypt raises a botocore.exceptions.ClientError\n with an error_code of 'InvalidCiphertextException'.\n \"\"\"\n kms_client = MagicMock()\n kms_client.decrypt.side_effect = botocore.exceptions.ClientError( # pylint: disable=no-member\n error_response={\"Error\": {\"Code\": \"InvalidCiphertextException\"}},\n operation_name=\"Decrypt\",\n )\n with patch.object(aws_kms, \"_kms\") as kms_getter:\n kms_getter.return_value = kms_client\n with patch.object(aws_kms, \"_cfg_data_key\", lambda: ENCRYPTED_DATA_KEY):\n self.assertRaises(\n salt.exceptions.SaltConfigurationError, aws_kms._api_decrypt\n )\n\n def test__plaintext_data_key(self):\n \"\"\"\n _plaintext_data_key returns the 'Plaintext' value from the response.\n It caches the response and only calls _api_decrypt exactly once.\n \"\"\"\n with patch.object(\n aws_kms,\n \"_api_decrypt\",\n return_value={\"KeyId\": \"key-id\", \"Plaintext\": PLAINTEXT_DATA_KEY},\n ) as api_decrypt:\n self.assertEqual(aws_kms._plaintext_data_key(), PLAINTEXT_DATA_KEY)\n aws_kms._plaintext_data_key()\n api_decrypt.assert_called_once()\n\n def test__base64_plaintext_data_key(self):\n \"\"\"\n _base64_plaintext_data_key returns the urlsafe base64 encoded plain text data key.\n \"\"\"\n with patch.object(\n aws_kms, \"_plaintext_data_key\", return_value=PLAINTEXT_DATA_KEY\n ):\n self.assertEqual(aws_kms._base64_plaintext_data_key(), BASE64_DATA_KEY)\n\n @skipIf(NO_FERNET, \"Failed to import cryptography.fernet\")\n def test__decrypt_ciphertext(self):\n \"\"\"\n test _decrypt_ciphertext\n \"\"\"\n test_key = fernet.Fernet.generate_key()\n crypted = fernet.Fernet(test_key).encrypt(PLAINTEXT_SECRET.encode())\n with patch.object(aws_kms, \"_base64_plaintext_data_key\", return_value=test_key):\n self.assertEqual(aws_kms._decrypt_ciphertext(crypted), PLAINTEXT_SECRET)\n\n @skipIf(NO_FERNET, \"Failed to import cryptography.fernet\")\n def test__decrypt_object(self):\n \"\"\"\n Test _decrypt_object\n \"\"\"\n test_key = fernet.Fernet.generate_key()\n crypted = fernet.Fernet(test_key).encrypt(PLAINTEXT_SECRET.encode())\n secret_map = {\"secret\": PLAINTEXT_SECRET}\n crypted_map = {\"secret\": crypted}\n\n secret_list = [PLAINTEXT_SECRET]\n crypted_list = [crypted]\n\n with patch.object(aws_kms, \"_base64_plaintext_data_key\", return_value=test_key):\n self.assertEqual(\n aws_kms._decrypt_object(PLAINTEXT_SECRET), PLAINTEXT_SECRET\n )\n self.assertEqual(aws_kms._decrypt_object(crypted), PLAINTEXT_SECRET)\n self.assertEqual(aws_kms._decrypt_object(crypted_map), secret_map)\n self.assertEqual(aws_kms._decrypt_object(crypted_list), secret_list)\n self.assertEqual(aws_kms._decrypt_object(None), None)\n\n @skipIf(NO_FERNET, \"Failed to import cryptography.fernet\")\n def test_render(self):\n \"\"\"\n Test that we can decrypt some data.\n \"\"\"\n test_key = fernet.Fernet.generate_key()\n crypted = fernet.Fernet(test_key).encrypt(PLAINTEXT_SECRET.encode())\n with patch.object(aws_kms, \"_base64_plaintext_data_key\", return_value=test_key):\n self.assertEqual(aws_kms.render(crypted), PLAINTEXT_SECRET)\n","repo_name":"Kamatera/salt","sub_path":"tests/unit/renderers/test_aws_kms.py","file_name":"test_aws_kms.py","file_ext":"py","file_size_in_byte":8354,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"71444876277","text":"import requests\nimport json\nfrom time import time\n\ndef open_file(filepath):\n with open(filepath, 'r', encoding='utf-8') as infile:\n return infile.read()\n\nheaders = {\n \"Authorization\": \"Bearer %s\" % open_file('token.txt'),\n \"Content-Type\": \"application/json\"\n}\n\nbody = {\n \"text\": \"Once upon a time\",\n \"top_p\": 1,\n \"top_k\": 40,\n \"temperature\": 0.8,\n \"repetition_penalty\": 1,\n \"length\": 64\n }\n\nstart = time()\nres = requests.post(\n \"https://shared-api.forefront.link/organization/FV6AbZNxxBmB/gpt-j-6b-vanilla/completions/2JrDQ5BhJAm6\",\n json=body,\n headers=headers\n)\n\ndata = res.json()\n\ncompletion = data['result'][0]['completion']\n\nprint(completion)\n\nend = time()\n\nprint('total time:', end - start)","repo_name":"daveshap/Forefront_Demo","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"} +{"seq_id":"28974434848","text":"#!/usr/bin/env python\n\nimport collections\nimport glob\nimport re\nimport xmltodict\n\n\nbz_resolution_map = collections.defaultdict(list)\n\n\ndef bugzilla_to_github(id, bz):\n # If you did Step 1 without valid Bugzilla credentials, '@exporter' will be missing.\n assert sorted(bz.keys()) == ['bugzilla']\n assert sorted(bz['bugzilla'].keys()) == ['@exporter', '@maintainer', '@urlbase', '@version', 'bug']\n bz = bz['bugzilla']['bug']\n assert bz['bug_id'] == str(id)\n assert bz['bug_id'] == str(int(bz['bug_id']))\n if bz['bug_status'] == 'CONFIRMED':\n assert bz['everconfirmed'] == '1'\n bz_resolution_map['CONFIRMED'] += [bz['bug_id']]\n\n\ndef extract_id(fname):\n m = re.match(r'xml/([0-9]+).xml', fname)\n assert m, 'Unexpected filename %s in xml/ subdirectory' % fname\n return int(m.group(1))\n\n\nif __name__ == '__main__':\n all_xml_filenames = glob.glob('xml/*.xml')\n all_bugzilla_ids = sorted([extract_id(fname) for fname in all_xml_filenames])\n for id in all_bugzilla_ids:\n print('Parsing %d.xml' % id)\n with open('xml/%d.xml' % id) as f:\n xml = f.read()\n xml = xml.replace('\\0', '') # e.g. bug 26078\n bz = xmltodict.parse(xml)\n bugzilla_to_github(id, bz)\n print('\\n\\nbz_resolution_map = {')\n for key in bz_resolution_map.keys():\n print(' \"%s\": [' % key, end='')\n for i, bz_id in enumerate(bz_resolution_map[key]):\n if i % 20 == 0:\n print('\\n ', end='')\n print(' %s,' % bz_id, end='')\n print('\\n ],')\n print('}')\n","repo_name":"Quuxplusone/BugzillaToGithub","sub_path":"labelmaker/xml-to-bz-map.py","file_name":"xml-to-bz-map.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"646620354","text":"from tkinter import *\nfrom PIL import Image, ImageTk\nfrom image_handling.map_image_handler import save_map_as_image, get_map_as_image\nfrom procedural_map_generator.map_generator import MapGenerator\n\nclass ImageViewer:\n def __init__(self):\n self.setup()\n\n def setup(self):\n self.root = Tk()\n self.root.title(\"Image Viewer\")\n self.root.resizable(False, False)\n\n # Create a background default image\n self.size = (800, 800)\n background_color = \"#264653\"\n new_image = Image.new(\"RGB\", self.size, background_color)\n self.default_image = ImageTk.PhotoImage(new_image)\n\n self.label = Label(image=self.default_image, anchor='center')\n self.label.pack(fill=BOTH, expand=True)\n\n # Entry field for tile number\n self.tile_number_entry = Entry(self.root)\n self.tile_number_entry.pack(side=LEFT, padx=10, pady=10)\n self.tile_number_entry.insert(0, \"Enter number of tiles\")\n\n self.create_button = Button(text=\"Create\", command=self.create_image)\n self.create_button.pack(side=LEFT, padx=10, pady=10)\n\n self.reset_button = Button(text=\"Reset\", command=self.reset_image)\n self.reset_button.pack(side=LEFT, padx=10, pady=10)\n\n self.save_button = Button(text=\"Save\", command=self.save_image)\n self.save_button.pack(side=LEFT, padx=10, pady=10)\n\n # Event handler to remove the hint text when user clicks on the entry field\n self.tile_number_entry.bind(\"\", self.on_entry_click)\n\n def on_entry_click(self, event):\n \"\"\"remove the hint text when user clicks on the entry field\"\"\"\n if self.tile_number_entry.get() == \"Enter number of tiles\":\n self.tile_number_entry.delete(0, END)\n\n def create_image(self):\n try: # if it is empty\n tile_number = int(self.tile_number_entry.get()) # Get tile number from entry field\n except:\n return\n \n self.Map = MapGenerator(tile_number)\n self.Map.run_wave_function_collapse()\n\n self.image = get_map_as_image(self.Map)\n self.image_original_size = self.image.size\n\n self.image = self.image.resize(self.size)\n self.new_image = ImageTk.PhotoImage(self.image)\n self.label.configure(image=self.new_image)\n \n def reset_image(self):\n self.label.configure(image=self.default_image)\n self.image = None\n try:\n self.Map.reset()\n except:\n return\n\n def save_image(self):\n try: # if it wasn't created\n if self.image is not None:\n save_map_as_image(self.Map, self.image.resize(self.image_original_size))\n except:\n return\n \n def run(self):\n self.root.mainloop()\n\nif __name__ == '__main__':\n viewer = ImageViewer()\n viewer.run()\n","repo_name":"DumitruVartic/PyWaveCollapse","sub_path":"src/run_tkinter_interface.py","file_name":"run_tkinter_interface.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"827665426","text":"#!/usr/bin/env python3\n\"\"\"A simple script to move two files into ceph_storage/\n Alta3 Research | rzfeeser@alta3.com\"\"\"\n\nimport shutil\nimport os\n\ndef main():\n \"\"\"Script moves two files into the ceph_storage directory, renaming one\"\"\"\n # file will always run from this dir no mater where it is called from\n os.chdir('/home/student/mycode/')\n \n # moves a file to a new path destination, if just given a folder it will keep its filename\n # returns a string of the absolute path of the new location\n shutil.move('raynor.obj', 'ceph_storage/')\n\n # query user for new filename\n xname = input('What is the new name for kerrigan.obj? ')\n \n # move file to new folder using the new filename from user\n # returns a string of the absolute path of the new location\n shutil.move('ceph_storage/kerrigan.obj', 'ceph_storage/' + xname)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"micgreene/mycode","sub_path":"moving-files/moveplease01.py","file_name":"moveplease01.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"13252261635","text":"#%% Loading necessary data for next steps\n\nimport os \nimport numpy as np\n\n# change dir \nos.chdir(r'C:\\Users\\jaapv\\Desktop\\master\\VoytekLab')\n\n# data\ndatastruct = np.load('datastruct_fpb.npy', allow_pickle=True)\nelec_locs = np.load('elec_locs.npy', allow_pickle=True)\n\nsubjects = ['al','ca','cc','de','fp','gc','gf','gw',\n 'h0','hh','jc','jm','jp','mv','rh','rr',\n 'ug','wc','wm','zt']\n\n# resampled rho and p-values\nresamp_rho_varphase = np.load('resamp_rho_varphase.npy', allow_pickle=True) \nresamp_p_varphase = np.load('resamp_p_varphase.npy', allow_pickle=True) \n\n# resampled statistics \npac_true_zvals = np.load('pac_true_zvals.npy')\npac_true_pvals = np.load('pac_true_pvals.npy')\npac_true_presence = np.load('pac_true_presence.npy')\npac_idx = np.load('pac_idx.npy')\n\n# psd peaks\npsd_peaks = np.load('psd_peaks.npy', allow_pickle=True)\n\n#%% Play around with bycycle\nos.chdir(r'C:\\Users\\jaapv\\Desktop\\master\\VoytekLab\\Code\\distinguish_pac')\n\nfrom bycycle.filt import lowpass_filter\nfrom bycycle.features import compute_features\nimport matplotlib.pyplot as plt\nimport module_pac_plots as pac_plt\nimport module_pac_functions as pacf\nfrom scipy.signal import hilbert\n\n#%% Loop over all channels with PAC using the CF and BW for phase data \n### And extract and save the cycle-by-cycle features\n\n# create empty output\nrdsym = []\nptsym = []\nbursts = []\n\n#burst_kwargs = {'amplitude_fraction_threshold': 0,\n# 'amplitude_consistency_threshold': .2,\n# 'period_consistency_threshold': .45,\n# 'monotonicity_threshold': .7,\n# 'N_cycles_min': 3}\n#\n#burst_kwargs = {'amplitude_fraction_threshold': 0,\n# 'amplitude_consistency_threshold': .25,\n# 'period_consistency_threshold': .45,\n# 'monotonicity_threshold': .6,\n# 'N_cycles_min': 3}\n\nburst_kwargs = {'amplitude_fraction_threshold': 0.25,\n 'amplitude_consistency_threshold': .4,\n 'period_consistency_threshold': .45,\n 'monotonicity_threshold': .6,\n 'N_cycles_min': 3}\n\n# for every channel with pac\nfor ii in range(len(pac_idx[0])):\n \n # get subj & ch\n subj = pac_idx[0][ii]\n ch = pac_idx[1][ii]\n \n if (psd_peaks[subj][ch][0] < 15) & (psd_peaks[subj][ch][1] >.2) & (psd_peaks[subj][ch][1] < 1.5):\n \n # get phase providing band\n lower_phase = psd_peaks[subj][ch][0] - (psd_peaks[subj][ch][2] / 2)\n upper_phase = psd_peaks[subj][ch][0] + (psd_peaks[subj][ch][2] / 2)\n \n fs = 1000\n f_range = [lower_phase, upper_phase]\n f_lowpass = 55\n N_seconds = len(datastruct[subj][ch]) / fs - 2\n \n signal = lowpass_filter(datastruct[subj][ch], fs, f_lowpass, N_seconds=N_seconds, remove_edge_artifacts=False)\n \n df = compute_features(signal, fs, f_range, burst_detection_kwargs=burst_kwargs)\n \n is_burst = df['is_burst'].tolist()\n time_rdsym = df['time_rdsym'].to_numpy()\n time_ptsym = df['time_ptsym'].to_numpy()\n \n bursts.append(is_burst)\n rdsym.append(time_rdsym)\n ptsym.append(time_ptsym)\n\n \n#%% Save\n \nnp.save('rdsym', rdsym)\nnp.save('ptsym', ptsym)\nnp.save('bursts', bursts)\n\n\n\n#%% Or load\n\nmean_time_rdsym = np.load('mean_time_rdsym.npy') \nmedian_time_rdsym = np.load('median_time_rdsym.npy') \nmean_time_ptsym = np.load('mean_time_ptsym.npy') \nmedian_time_ptsym = np.load('median_time_ptsym.npy') \n \n#%%\nplt.scatter(median_time_ptsym, median_time_rdsym)\n\nplt.scatter(mean_time_ptsym, mean_time_rdsym)\nplt.ylim([.49,.525])\nplt.xlabel('Peak-Through Sym')\nplt.ylabel('Rise-Decay Sym')\n\n\n#%% Get cycle specific data per channel\n\nii = 10\n\n\n # get subj & ch\nsubj = pac_idx[0][ii]\nch = pac_idx[1][ii]\n\n# get phase providing band\nlower_phase = psd_peaks[subj][ch][0] - (psd_peaks[subj][ch][2] / 2)\nupper_phase = psd_peaks[subj][ch][0] + (psd_peaks[subj][ch][2] / 2)\n\nfs = 1000\nf_range = [lower_phase, upper_phase]\nphase_providing_band = f_range\nf_lowpass = 55\nN_seconds = 2 #??????????????? length signal / fs - 2 \n\n\nsignal = lowpass_filter(datastruct[subj][ch], fs, f_lowpass, remove_edge_artifacts=False)\n\ndf = compute_features(signal, fs, f_range)\n\n\nplt.hist(df.time_rdsym, bins=20)\nplt.title('Rise-Decay Sym')\nplt.show()\n\n\nplt.hist(df.time_ptsym, bins=20)\nplt.title('Peak-Trough Sym')\nplt.show()\n\nplt.scatter(df.time_ptsym, df.time_rdsym)\nplt.xlabel('Peak-Trough Sym')\nplt.ylabel('Rise-Decay Sym')\nplt.title('Two symmetry measures per cycle for a channel')\n\nplt_time = [0, 5] \n\n#calculating phase of theta\nphase_data = pacf.butter_bandpass_filter(datastruct[subj][ch], phase_providing_band[0], phase_providing_band[1], round(float(fs)));\nphase_data_hilbert = hilbert(phase_data);\n\n# filter raw data \nraw_filt = lowpass_filter(datastruct[subj][ch], fs, f_lowpass, N_seconds=N_seconds, remove_edge_artifacts=False)\n\n##calculating amplitude envelope of high gamma\n#amp_data = pacf.butter_bandpass_filter(datastruct[subj][ch], amplitude_providing_band[0], amplitude_providing_band[1], round(float(fs)));\n#amp_data_hilbert = hilbert(amp_data);\n\nplt.figure(figsize = (20,8));\nplt.plot((raw_filt[plt_time[0]*fs:plt_time[1]*fs]),label= 'Raw Signal')\nplt.plot((phase_data_hilbert[plt_time[0]*fs:plt_time[1]*fs]),label= 'Phase [{0:.2f} - {1:.2f} Hz]'.format(phase_providing_band[0], phase_providing_band[1]))\n#plt.plot((amp_data_hilbert[plt_time[0]*fs:plt_time[1]*fs]),label= 'High Gamma [80-125 Hz]')\nplt.scatter([df.loc[0:30].sample_peak], [df.loc[0:30].volt_peak])\n\nplt.xlabel('subj: {0:.0f}, ch {1:.0f}, Two Seconds of Theta Phase, High Gamma Amplitude, and Raw Signal '.format(subj,ch))\nplt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n \n#%% Overview of all CFs\n\ncf = []\nbw = []\n\nfor subj in range(len(psd_peaks)):\n for ch in range(len(psd_peaks[subj])):\n if len(psd_peaks[subj][ch]) > 0:\n cf.append(psd_peaks[subj][ch][0])\n bw.append(psd_peaks[subj][ch][1])\n \nplt.hist(cf, bins=60)\nplt.title('distribution of CFs')\nplt.show()\nplt.hist(bw, bins=20)\nplt.title('distribution of BWs')\n \nplt.figure(figsize=(10,10))\nplt.scatter(bw,cf)\n \n#%% Overview of CFs of PAC channels\n\ncf = [] \nbw = []\n\nfor ii in range(len(pac_idx[0])):\n # get subj & ch\n subj = pac_idx[0][ii]\n ch = pac_idx[1][ii]\n if len(psd_peaks[subj][ch]) > 0:\n cf.append(psd_peaks[subj][ch][0])\n bw.append(psd_peaks[subj][ch][2])\n \nplt.hist(cf, bins=60)\nplt.show()\nplt.hist(bw, bins=20)\n\nplt.figure(figsize=(10,10))\nplt.scatter(bw,cf)\n\n\n# clean data\n# burst detection \n# create preprocessing pipeline up till the point where the features are extracted. \n\n### Later step: look per cycle and to the direction (pi) of the phase. These change with\n### the symmetry of the waveform. Consistent shapes of waveforms might results in PAC. \n### So the consistency of the symmetry should within a channel should be a feature\n\n### FEATURES\n### - BW\n### - CF\n### - AM/power\n### - PAC value/ Rho\n### - RD Symmetry\n### - PT Symmetry\n### - RD Symmetry STD\n### - PT Symmetry STD\n\n### - periodic and aperiodic measures? \n\n#%% \nsubj = 0 \nch = 0\n\nsignal = datastruct[subj][ch]\n\nfrom bycycle.burst import plot_burst_detect_params\n\n#burst_kwargs = {'amplitude_fraction_threshold': 0,\n# 'amplitude_consistency_threshold': .2,\n# 'period_consistency_threshold': .45,\n# 'monotonicity_threshold': .7,\n# 'N_cycles_min': 3}\n\nburst_kwargs = {'amplitude_fraction_threshold': 0,\n 'amplitude_consistency_threshold': .2,\n 'period_consistency_threshold': .45,\n 'monotonicity_threshold': .7,\n 'N_cycles_min': 3}\n\nlower_phase = psd_peaks[subj][ch][0] - (psd_peaks[subj][ch][2] / 2)\nupper_phase = psd_peaks[subj][ch][0] + (psd_peaks[subj][ch][2] / 2)\n\nFs = 1000\n\nf_range = [lower_phase, upper_phase]\ndf = compute_features(signal, Fs, f_range, burst_detection_kwargs=burst_kwargs)\n\nplot_burst_detect_params(signal, Fs, df, burst_kwargs,tlims=None, figsize=(16, 3))\n\n#%%\nburst_kwargs = {'amplitude_fraction_threshold': 0,\n 'amplitude_consistency_threshold': .2,\n 'period_consistency_threshold': .45,\n 'monotonicity_threshold': .7,\n 'N_cycles_min': 3}\n\nf_range = [lower_phase, upper_phase]\nlow = int(round(f_range[0]))\nup = int(round(f_range[1]))\nf_range2 = [low, up]\n\n\ndf = compute_features(signal, fs, f_range2, burst_detection_kwargs=burst_kwargs)\n\nplot_burst_detect_params(signal, Fs, df, burst_kwargs, tlims=None, figsize=(16, 3))\n\n#%% Change datastruct data to float64\n\nfor subj in range(len(datastruct)):\n for ch in range(len(datastruct[subj])):\n datastruct[subj][ch] = datastruct[subj][ch].astype(np.float64)\n\n#%% Plot to find right detection\nii = 192\nburst_kwargs = {'amplitude_fraction_threshold': 0.25,\n 'amplitude_consistency_threshold': .4,\n 'period_consistency_threshold': .45,\n 'monotonicity_threshold': .6,\n 'N_cycles_min': 3}\n\n# get subj & ch\nsubj = clean_db['subj'][ii]\nch = clean_db['ch'][ii]\n\n# get phase providing band\nlower_phase = psd_peaks[subj][ch][0] - (psd_peaks[subj][ch][2] / 2)\nupper_phase = psd_peaks[subj][ch][0] + (psd_peaks[subj][ch][2] / 2)\n\nfs = 1000\nf_range = [lower_phase, upper_phase]\nf_lowpass = 55\nN_seconds = len(datastruct[subj][ch]) / fs - 2\n\n#signal = lowpass_filter(datastruct[subj][ch], fs, f_lowpass, N_seconds=N_seconds, remove_edge_artifacts=False)\nsignal = datastruct[subj][ch]\nsignal = signal[10000:20000]\n\n\ndf = compute_features(signal, fs, f_range, burst_detection_kwargs=burst_kwargs)\n\nplot_burst_detect_params(signal, fs, df, burst_kwargs, tlims=None, figsize=(16, 3))\n\nplt.scatter(df['time_ptsym'][(df['is_burst'] == True)],df['time_rdsym'][(df['is_burst'] == True)])\nplt.scatter(df['time_ptsym'][(df['is_burst'] == False)],df['time_rdsym'][(df['is_burst'] == False)])\nplt.xlabel('PT')\nplt.ylabel('RD')\nplt.show()\n\n\nplt.hist(df['time_ptsym'][(df['is_burst'] == True)],alpha=.5)\nplt.hist(df['time_ptsym'][(df['is_burst'] == False)],alpha=.5)\nplt.show()\n\nplt.hist(df['time_rdsym'][(df['is_burst'] == True)],alpha=.5)\nplt.hist(df['time_rdsym'][(df['is_burst'] == False)],alpha=.5)\nplt.show()\n","repo_name":"JaapVanDerAar/project_pac","sub_path":"distinguish_pac/bycycle_script.py","file_name":"bycycle_script.py","file_ext":"py","file_size_in_byte":10312,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"7363071533","text":"\"\"\"Collection of general room information getter.\"\"\"\nfrom fastapi import APIRouter, Depends, Security, status\n\nfrom whist_server.api.util import create_http_error\nfrom whist_server.database.room import RoomInDb, RoomInfo\nfrom whist_server.database.user import UserInDb\nfrom whist_server.services.authentication import get_current_user\nfrom whist_server.services.error import RoomNotFoundError\nfrom whist_server.services.room_db_service import RoomDatabaseService\n\nrouter = APIRouter(prefix='/room')\n\n\n@router.get('/info/ids', status_code=200, response_model=dict[str, list[str]])\ndef all_rooms(room_service=Depends(RoomDatabaseService),\n _: UserInDb = Security(get_current_user)) -> dict[str, list[str]]:\n \"\"\"\n Returns all room id.\n :param room_service: Dependency injection of the room service\n :param _: not required for logic, but authentication\n :return: a list of all room ids as strings.\n \"\"\"\n rooms = room_service.all()\n return {'rooms': [str(room.id) for room in rooms]}\n\n\n@router.get('/info/{room_id}', response_model=RoomInfo)\ndef room_info(room_id: str, room_service=Depends(RoomDatabaseService)) -> RoomInfo:\n \"\"\"\n :param room_id:\n :param room_service: Dependency injection of the room service\n :return:\n \"\"\"\n try:\n room = room_service.get(room_id)\n except RoomNotFoundError as not_found:\n message = f'Room not found with id: {room_id}'\n raise create_http_error(message, status.HTTP_400_BAD_REQUEST) from not_found\n return room.get_info()\n\n\n@router.get('/info/id/{room_name}', status_code=200, response_model=dict[str, str])\ndef room_id_from_name(room_name: str, room_service=Depends(RoomDatabaseService),\n user: UserInDb = Security(get_current_user)) -> dict[str, str]:\n \"\"\"\n Returns the room id for a given room name. Basically it transforms human-readable data to\n computer data.\n :param room_name: the human-readable room name\n :param room_service: Dependency injection of the room service\n :param user: not required for logic, but authentication\n :return: dictionary containing the field 'id' with the room id as value.If there is no room\n with that name in the DB it will return RoomNotFoundError.\n \"\"\"\n try:\n room: RoomInDb = room_service.get_by_name(room_name)\n if not room.has_joined(user):\n message = 'User has not access.'\n raise create_http_error(message, status.HTTP_403_FORBIDDEN)\n except RoomNotFoundError as not_found:\n message = f'Room not found with name: {room_name}'\n raise create_http_error(message, status.HTTP_400_BAD_REQUEST) from not_found\n return {'id': str(room.id)}\n","repo_name":"Whist-Team/Whist-Server","sub_path":"whist_server/api/room/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"21609938458","text":"\"\"\"\n Семинар 10. Задача 65.\nНаписать EDA для датасета про пингвинов. Необходимо:\n● Использовать 2-3 точечных графика\n● Применить доп измерение в точечных графиках, используя аргументы hue, size, stile\n● Использовать PairGrid с типом графика на ваш выбор\n● Изобразить Heatmap\n● Использовать 2-3 гистограммы\n\nЧтобы подключить датасет с пингвинами, воспользуйтесь данным скриптом: \n penguins = sns.load_dataset(\"penguins\")\n penguins.head()\n\"\"\"\n# Подключаемые библиотеки\nfrom seaborn import load_dataset, scatterplot, PairGrid, heatmap\nfrom matplotlib.pyplot import show, xlabel, ylabel\n\n# Загрузка данных\npenguins = load_dataset(\"penguins\")\nprint(penguins.head())\n\n# Использовать 2-3 точечных графика\ndef f_1():\n scatterplot(data=penguins, x=\"flipper_length_mm\", y=\"body_mass_g\")\n show()\n\n# Применить доп измерение в точечных графиках, используя аргументы hue, size, stile\ndef f_2():\n scatterplot(data=penguins, x=\"flipper_length_mm\", y=\"body_mass_g\", hue='sex', size='island', style='island')\n show()\n\n# Использовать PairGrid с типом графика на ваш выбор\ndef f_3():\n x_vars = [\"body_mass_g\", \"bill_length_mm\", \"bill_depth_mm\", \"flipper_length_mm\"]\n y_vars = ['sex']\n pg = PairGrid(penguins, x_vars=x_vars, y_vars=y_vars, hue='species')\n pg.map(scatterplot)\n show()\n\n# Изобразить Heatmap\ndef f_4():\n data = penguins.pivot_table(index='species', columns='island', values='body_mass_g')\n heatmap(data)\n xlabel('Остров', size=14)\n ylabel('Вид пингвина', size=14)\n show()\n\n# Использовать 2-3 гистограммы\ndef f_5():\n penguins['bill_depth_mm'].hist(bins=15)\n show()\n\n\nf_1()\nf_2()\nf_3()\nf_4()\nf_5()\n","repo_name":"draguanna/introduction_to_python","sub_path":"Seminar_10/Seminar_10_task_65.py","file_name":"Seminar_10_task_65.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"7359076884","text":"#!/usr/bin/env python3\n\nfrom urllib.request import urlopen\nfrom urllib.parse import urljoin\nimport bs4\n\n\nclass WebClient(object):\n\t# print results\n\n\tdef __init__(self):\n\t\tpass\n\n\t# get web page\n\tdef get_web_page(self):\n\t\twebpage = urlopen(\"http://bid.udl.cat/ca/\")\n\t\thtml = webpage.read()\n\t\treturn html\n\n\tdef parse_web_page(self, html):\n\t\ttitles = []\n\t\tlinks = []\n\t\tdates = []\n\t\tsoup = bs4.BeautifulSoup(html, features=\"lxml\")\n\t\tnews = soup.find_all(\"li\", \"box\") # Tot el element de llista\n\t\tfor new in news:\n\t\t\ttitle_tag = new.find(\"a\") # Tot el tag de l'enllaç a la notícia\n\t\t\ttitles.append(title_tag['title']) # Solament el text del títol\n\n\t\t\tlink = urljoin(\"http://bid.udl.cat/ca/\", title_tag['href'])\n\t\t\tlinks.append(link) # Text del link\n\n\t\t\ttime_tag = new.find(\"time\")\n\t\t\tdates.append(time_tag.text.strip())\n\t\treturn titles, links, dates\n\n\tdef get_information(self):\n\t\thtml = self.get_web_page()\n\t\t# read information from web page\n\t\tinfo = self.parse_web_page(html)\n\t\treturn info\n\n\nif __name__ in \"__main__\":\n\tclient = WebClient()\n\tinformation = client.get_information()\n\tfor i in range(len(information[0])):\n\t\tprint(information[0][i])\n\t\tprint(information[1][i])\n\t\tprint(information[2][i])\n\t\tprint()\n","repo_name":"Galahad3x/WebProjectClassMaterial","sub_path":"webclient.py","file_name":"webclient.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"17269924586","text":"# FacebookConnectMiddleware.py\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\n\nimport md5\nimport urllib\nimport time\nfrom django.utils import simplejson\nfrom datetime import datetime\nimport logging \nfrom connect.pyfacebook import Facebook\n\n# These values could be placed in Django's project settings\nAPI_KEY = settings.FACEBOOK_API_KEY\nAPI_SECRET = settings.FACEBOOK_API_SECRET\n\nPROBLEM_ERROR = 'There was a problem. Try again later.'\nACCOUNT_DISABLED_ERROR = 'Your account is not active.'\nACCOUNT_PROBLEM_ERROR = 'There is a problem with your account.'\n\nclass FacebookConnectMiddleware(object):\n\t\n\tdelete_fb_cookies = False\n\tfacebook_user_is_authenticated = False\n\t\n\tdef process_request(self, request):\n\t\ttry:\n\t\t\t # Set the facebook message to empty. This message can be used to dispaly info from the middleware on a Web page.\n\t\t\trequest.facebook_message = None\n\t\n\t\t\t# Don't bother trying FB Connect login if the user is already logged in\n\t\t\tif not request.user.is_authenticated():\n\t\t\t\t# FB Connect will set a cookie with a key == FB App API Key if the user has been authenticated\n\t\t\t\tif API_KEY in request.COOKIES:\n\t\t\t\t\tfb = Facebook(API_KEY, API_SECRET)\n\n\t\t\t\t\tif(fb.validate_cookie_signature(request.COOKIES)):\n\t\t\t\t\n\t\t\t\t\t\t# If session hasn't expired\n\t\t\t\t\t\tif(datetime.fromtimestamp(float(request.COOKIES[API_KEY+'_expires'])) > datetime.now()):\n\t\t\t\n\t\t\t\t\t\t\t# Try to get Django account corresponding to friend\n\t\t\t\t\t\t\t# Authenticate then login (or display disabled error message)\n\t\t\t\t\t\t\tuser = authenticate(facebook_id=request.COOKIES[API_KEY + '_user'])\n\t\t\t\t\t\t\tlogging.info(user)\n\t\t\t\t\t\t\tif user is not None:\n\t\t\t\t\t\t\t\tif user.is_active:\n\t\t\t\t\t\t\t\t\tlogin(request, user)\n\t\t\t\t\t\t\t\t\tself.facebook_user_is_authenticated = True\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\trequest.facebook_message = ACCOUNT_DISABLED_ERROR\n\t\t\t\t\t\t\t\t\tself.delete_fb_cookies = True\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tdjango_user = User.get_by_key_name(\"userfb%s\" % request.COOKIES[API_KEY + '_user'])\n\t\t\t\t\t\t\t\tif not django_user:\n\t\t\t\t\t\t\t\t\t# There is no Django account for this Facebook user.\n\t\t\t\t\t\t\t\t\t# Create one, then log the user in.\n\t\t\t\t\t\t\t\t\tfb.session_key = request.COOKIES[API_KEY + '_session_key']\n\t\t\t\t\t\t\t\t\tuser_info_response = fb.users.getInfo([request.COOKIES[API_KEY + '_user']], ['first_name', 'last_name'])\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t# Create user\n\t\t\t\t\t\t\t\t\tuser = User(key_name=\"userfb%s\" % request.COOKIES[API_KEY + '_user'], username = \"%s %s\" % (user_info_response[0]['first_name'], user_info_response[0]['last_name']), \n\t\t\t\t\t\t\t\t\t\t\t\temail= '%s@connect.facebook.com' % request.COOKIES[API_KEY + '_user'])\n\t\t\t\t\t\t\t\t\tuser.set_password(md5.new(request.COOKIES[API_KEY + '_user'] + settings.SECRET_KEY).hexdigest())\n\t\t\t\t\t\t\t\t\tuser.is_active = True\n\t\t\t\t\t\t\t\t\tuser.facebook_id = int(request.COOKIES[API_KEY + '_user'])\n\t\t\t\t\t\t\t\t\tuser.put()\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t# Authenticate and log in (or display disabled error message)\n\t\t\t\t\t\t\t\t\tuser = authenticate(username='%s@connect.facebook.com' % request.COOKIES[API_KEY + '_user'], \n\t\t\t\t\t\t\t\t\t\t\tpassword=md5.new(request.COOKIES[API_KEY + '_user'] + settings.SECRET_KEY).hexdigest())\n\t\t\t\t\t\t\t\t\tlogging.info(\"ROUND2\")\n\t\t\t\t\t\t\t\t\tif user is not None:\n\t\t\t\t\t\t\t\t\t\tif user.is_active:\n\t\t\t\t\t\t\t\t\t\t\tlogin(request, user)\n\t\t\t\t\t\t\t\t\t\t\tself.facebook_user_is_authenticated = True\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\trequest.facebook_message = ACCOUNT_DISABLED_ERROR\n\t\t\t\t\t\t\t\t\t\t\tself.delete_fb_cookies = True\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\trequest.facebook_message = ACCOUNT_PROBLEM_ERROR\n\t\t\t\t\t\t\t\t\t\tself.delete_fb_cookies = True\n\t\t\t\t\t\t\t\telse:\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\trequest.facebook_message = ACCOUNT_PROBLEM_ERROR\n\t\t\t\t\t\t\t\t\tself.delete_fb_cookies = True\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t# Cookie session expired\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlogout(request)\n\t\t\t\t\t\t\tself.delete_fb_cookies = True\n\t\t\t\t\t\t\n\t\t\t\t # Cookie values don't match hash\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogout(request)\n\t\t\t\t\t\tself.delete_fb_cookies = True\n\t\t\t\t\t\n\t\t\t# Logged in\n\t\t\telse:\n\t\t\t\t# If FB Connect user\n\t\t\t\tif API_KEY in request.COOKIES:\n\t\t\t\t\t# IP hash cookie set\n\t\t\t\t\tif 'fb_ip' in request.COOKIES:\n\t\t\t\t\t\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\treal_ip = request.META['HTTP_X_FORWARDED_FOR']\n\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\treal_ip = request.META['REMOTE_ADDR']\n\t\t\t\t\t\n\t\t\t\t\t\t# If IP hash cookie is NOT correct\n\t\t\t\t\t\tif request.COOKIES['fb_ip'] != md5.new(real_ip + API_SECRET + settings.SECRET_KEY).hexdigest():\n\t\t\t\t\t\t\t logout(request)\n\t\t\t\t\t\t\t self.delete_fb_cookies = True\n\t\t\t\t\t# FB Connect user without hash cookie set\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogout(request)\n\t\t\t\t\t\tself.delete_fb_cookies = True\n\t\t\t\t\n\t\t# Something else happened. Make sure user doesn't have site access until problem is fixed.\n\t\texcept:\n\t\t\trequest.facebook_message = PROBLEM_ERROR\n\t\t\tlogout(request)\n\t\t\tself.delete_fb_cookies = True\n\t\n\tdef process_response(self, request, response):\n\t\t# Delete FB Connect cookies\n\t\t# FB Connect JavaScript may add them back, but this will ensure they're deleted if they should be\n\t\tif self.delete_fb_cookies is True:\n\t\t\tresponse.delete_cookie(API_KEY + '_user')\n\t\t\tresponse.delete_cookie(API_KEY + '_session_key')\n\t\t\tresponse.delete_cookie(API_KEY + '_expires')\n\t\t\tresponse.delete_cookie(API_KEY + '_ss')\n\t\t\tresponse.delete_cookie(API_KEY)\n\t\t\tresponse.delete_cookie('fbsetting_' + API_KEY)\n\t\t\n\t\tself.delete_fb_cookies = False\n\t\t\n\t\tif self.facebook_user_is_authenticated is True:\n\t\t\ttry:\n\t\t\t\treal_ip = request.META['HTTP_X_FORWARDED_FOR']\n\t\t\texcept KeyError:\n\t\t\t\treal_ip = request.META['REMOTE_ADDR']\n\t\t\t\tresponse.set_cookie('fb_ip', md5.new(real_ip + API_SECRET + settings.SECRET_KEY).hexdigest())\n\t\t\t\n\t\t# process_response() must always return a HttpResponse\n\t\treturn response\n\t\t\t\t\n","repo_name":"tallstreet/tallstreet","sub_path":"connect/FacebookConnectMiddleware.py","file_name":"FacebookConnectMiddleware.py","file_ext":"py","file_size_in_byte":5597,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"4"} +{"seq_id":"42799095362","text":"import unittest\n\nimport requests_mock\n\nfrom exchanges import exchange_factory\n\n\nclass ShrimpyTest(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n self.client = exchange_factory(\"shrimpy\")(\"key\", \"c2VjcmV0\")\n super(ShrimpyTest, self).__init__(*args, **kwargs)\n\n def test_auth_v1(self):\n with requests_mock.mock() as m:\n m.post(\n requests_mock.ANY,\n text='{\"balances\": [{\"asset\": \"BTC\", \"free\": \"0.10730199\", \"locked\": \"0.00000000\"}]}',\n )\n # https://dev-api.shrimpy.io/v1/users//accounts//balance\n result = self.client.brequest(\n 1, endpoint=\"users/555/accounts/666/balance\", authenticate=True, method=\"POST\"\n )\n self.assertEqual(result, {\"balances\": [{\"asset\": \"BTC\", \"free\": \"0.10730199\", \"locked\": \"0.00000000\"}]})\n","repo_name":"heartrithm/exchanges","sub_path":"exchanges/apis/tests/test_shrimpy.py","file_name":"test_shrimpy.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"33022153018","text":"from selenium import webdriver\nimport time\nimport os\nimport sys\nfrom bs4 import BeautifulSoup as bs\nimport subprocess\n\n\ndef download_link(link):\n print(f\"yt-dlp '{link}'\")\n subprocess.run([\"yt-dlp\", link, \"-o\", \"%(playlist_index)s) %(title)s.%(ext)s\"])\n\n\ndef scroll_to_end_of_page(br):\n scroll = 0\n end = False\n while not end:\n firstScroll = br.execute_script(\n \"window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;\"\n )\n time.sleep(3)\n if firstScroll == scroll:\n end = True\n else:\n scroll = firstScroll\n\n\ndef linksToPlayLists(url) -> dict[str, str]:\n br = webdriver.Firefox()\n br.get(url)\n scroll_to_end_of_page(br)\n content = br.page_source.encode(\"utf-8\").strip()\n soup = bs(content, \"lxml\")\n allPlaylists = soup.find(\"div\", class_=\"userMain__content\").find_all(\n \"li\", class_=\"soundList__item\"\n )\n\n playlistLinks = {}\n for playlist in allPlaylists:\n atag = playlist.find(\"a\", class_=\"soundTitle__title\")\n playlistLinks[atag.text.strip()] = \"https://soundcloud.com\" + atag[\"href\"]\n br.close()\n return playlistLinks\n\n\ndef download_playlist_yt_dlp(playlists):\n for playlist in playlists:\n os.mkdir(playlist)\n os.chdir(playlist)\n url = playlists[playlist]\n download_link(url)\n os.chdir(\"..\")\n\n\ndef main(link):\n lst = link.split('?')[0].split(\"/\")\n if lst[-1] == \"\":\n lst.pop()\n\n if lst[-1] == \"sets\":\n playlistDict = linksToPlayLists(link)\n download_playlist_yt_dlp(playlistDict)\n else:\n subprocess.run([\"yt-dlp\", link])\n\n\nplaylistDict = {\n \"Sri Guru Angad Dev Ji Katha\": \"https://soundcloud.com/gianishersinghjiambala/sets/sri-guru-angad-dev-ji-katha\",\n \"Sri Guru Granth Sahib Ji Katha\": \"https://soundcloud.com/gianishersinghjiambala/sets/sri-guru-granth-sahib-ji-katha\",\n}\n\nargv = sys.argv\nwhereTodl = argv[1]\nlink = argv[2]\nif not os.path.isdir(whereTodl):\n os.mkdir(whereTodl)\n os.mkdir(whereTodl)\n\n# print(whereTodl, link)\nmain(link)\n","repo_name":"Giansingh4710/dotfiles","sub_path":"scripts/download_files/py_code/soundcloud.py","file_name":"soundcloud.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"72528431478","text":"from transformers import AutoTokenizer,GPTJForCausalLM,AutoModelForCausalLM\nimport torch\nimport tensorflow as tf\ngpus = tf.config.list_physical_devices('GPU')\nif gpus:\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu,True)\n\nmodelname =\"EleutherAI/gpt-j-6B\"\nmodelname =\"togethercomputer/GPT-JT-6B-v1\"\n# load fp 16 model\n#model = GPTJForCausalLM.from_pretrained(\"EleutherAI/gpt-j-6B\", revision=\"float16\", torch_dtype=torch.float16)\n#model = AutoModelForCausalLM.from_pretrained(modelname)\n# save model with torch.save\n#breakpoint()\n#breakpoint()\n#torch.save(model, \"gptjt.pt\")\nif modelname ==\"togethercomputer/GPT-JT-6B-v1\":\n\n model = torch.load(\"gptjt.pt\")\nelse:\n model =torch.load(\"gptj.pt\")\ntokenizer = AutoTokenizer.from_pretrained(modelname)\n\n\n#model = GPTJForCausalLM.from_pretrained(\n# \"EleutherAI/gpt-j-6B\",\n## revision=\"float16\",\n# torch_dtype=torch.float16,\n# low_cpu_mem_usage=True\n#)\n\nmodel.cuda()\nmodel.eval()\n\ndef load_data(filename):\n dataset = tf.data.TFRecordDataset(filename)\n def _parse_function(example_proto):\n features = { \n \"inputs_pretokenized\": tf.io.FixedLenFeature([], tf.string),\n \"targets_pretokenized\": tf.io.FixedLenFeature([], tf.string)}\n parsed_features = tf.io.parse_single_example(example_proto, features)\n return parsed_features[\"targets_pretokenized\"], parsed_features[\"inputs_pretokenized\"]\n dataset = dataset.map(_parse_function)\n answers = []\n questions = []\n for i in dataset:\n answers.append(i[0].numpy())\n questions.append(i[1].numpy())\n\n\n return answers, questions\n\nanswers, questions = load_data(\"/data/P3/data/super_glue_boolq_exercise/train.tfrecord-00000-of-00001\")\nexamples_to_print = 50\ncontext_examples=3\ndevice ='cuda'\ngenlen = 50\ntext = b\" Answer True or False: \"\n#text= b\" \"\nend_of_example = b\" <|endoftext|> \"\nend_of_example = b\" \"\nfor i in range(0,examples_to_print*context_examples,context_examples):\n try:\n prompt = b\"\"\n for j in range(context_examples):\n if j==context_examples-1:\n prompt += (questions[i+j]) + text\n else:\n prompt += (questions[i+j]) + text + (answers[i+j]) + end_of_example\n \n prompt = str(prompt)\n input_ids = torch.tensor(tokenizer.encode(prompt)).unsqueeze(0).to(device=device)\n\n max_length = input_ids.shape[1] + genlen\n\n output_ids = model.generate(input_ids=input_ids, max_length=max_length, \n do_sample=True,eos_token_id=tokenizer.eos_token_id)\n \n answer_to_question=tokenizer.decode(output_ids[0][input_ids.shape[1]:])\n print(\"Q: \", str(prompt))\n print(\"Ans: \", str(answer_to_question))\n print(\"Correct Ans: \", str(answers[i+context_examples-1]))\n except:\n print(\"\\nCould not parse\\n\")\n continue","repo_name":"Elliotepsteino/ICL-and-finetuning","sub_path":"GPTJ_inference.py","file_name":"GPTJ_inference.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"25081084785","text":"'''\nAuthor: Milo\nDate: Feb/March 2020\nProject: Hillclimber optimization implementation for a packing problem.\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\n#bag capacity\ncapacity = 20\ncurrent_best = 0\n\n#item containing a benefit value and volume value\nclass Item:\n def __init__(self, id, benefit, volume):\n self.id = id\n self.benefit = benefit\n self.volume = volume\n\n#make list of items\nitems = []\nitems.append(Item('a', 5, 3))\nitems.append(Item('b', 6, 2))\nitems.append(Item('c', 1, 4))\nitems.append(Item('d', 9, 5))\nitems.append(Item('e', 2, 8))\nitems.append(Item('f', 8, 9))\nitems.append(Item('g', 4, 10))\nitems.append(Item('h', 3, 1))\nitems.append(Item('i', 7, 6))\nitems.append(Item('j', 10, 7))\n\n#A hillclimber!\nclass Hillclimber:\n def __init__(self, genotype, capacity=20):\n self.genotype = genotype # in the case of the packing problem genotyp is the list of possible items\n self.phenotype = np.random.randint(2, size=10)\n self.current_fitness = 0 #initially no fitness\n self.capacity = capacity\n self.fitness_log = []\n self.volume_log = []\n\n #gets the current fitness\n def fitness(self, pheno):\n sum_volume = 0\n sum_benefit = 0\n for phen, geno in zip(pheno, self.genotype):\n if (phen == 1): #if gene is on\n sum_volume += geno.volume\n sum_benefit += geno.benefit\n\n if sum_volume > self.capacity: #if its greater than capacity should return 0. overcapacity solutions are not very fit\n return 0\n else:\n return sum_benefit #otherwise return the total of all benefits\n\n def get_volume(self, pheno):\n sum_volume = 0\n for phen, geno in zip(pheno, self.genotype):\n if (phen == 1): #if gene is on\n sum_volume += geno.volume\n return sum_volume\n\n def mutate(self):\n #make a copy of the current phenotype\n copy = np.copy(self.phenotype)\n #mutation_rate = 0.7\n num = random.randint(1,4) # number of genes to flip\n for _ in range(0, num):\n r = random.randint(0,9) #choose a random gene to flip each time\n copy[r] = int(not copy[r])\n #if copy[r] == 1:\n # copy[r] = 0\n #else:\n # copy[r] = 1\n\n return copy\n\n def evolve(self):\n for _ in range (0,300):\n new_mutation = self.mutate()\n #print(new_mutation)\n if (self.fitness(new_mutation) > self.fitness(self.phenotype)):\n self.phenotype = new_mutation\n #print(\"hello\")\n #print(self.fitness(new_mutation))\n #print(self.fitness(self.phenotype))\n self.fitness_log.append(self.fitness(self.phenotype))\n self.volume_log.append(self.get_volume(self.phenotype))\n return self.phenotype, self.get_volume(self.phenotype)\n \n\n\ndef make_population_of_climbers(number):\n climbers = []\n for each in range(0, number):\n climbers.append(Hillclimber(items))\n for each in climbers:\n each.evolve()\n return climbers\n\n\n#climber = Hillclimber(items)\n#climber.evolve()\n#print(climber.fitness_log[99:])\n\npopulation = make_population_of_climbers(20)\n\n#plot graph of fitness over generations\n\n\nplt.style.use('seaborn')\n#plt.plot(climber.fitness_log, label=\"fitness\")\n#plt.plot(climber.volume_log, label=\"volume\")\n#plt.legend()\nfor each in population:\n plt.plot(each.fitness_log)\n print(each.fitness_log[299:])\n\n\nplt.xlabel(\"Generations\")\nplt.ylabel(\"Fitness\")\n\n#print(\"fitness at end:\", climber.current_fitness)","repo_name":"smilo7/hillclimber","sub_path":"hillclimber.py","file_name":"hillclimber.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"25383279470","text":"import board\nimport digitalio\nfrom PIL import Image, ImageDraw, ImageFont\nimport adafruit_ssd1306\n\nclass Display:\n\tdef __init__(self):\n\t\tprint(\"init\")\n\t\toled_reset = digitalio.DigitalInOut(board.D4)\n\t\tWIDTH = 128\n\t\tHEIGHT = 32 # Change to 64 if needed\n\t\tBORDER = 5\n\n\t\ti2c = board.I2C()\n\t\tself.__oled = adafruit_ssd1306.SSD1306_I2C(WIDTH, HEIGHT, i2c, addr=0x3C, reset=oled_reset)\n\n\t\t# Clear display.\n\t\tself.__oled.fill(0)\n\t\tself.__oled.show()\n\n\n\n\tdef displayTimer(self, millis):\n\t\ttext = str(round(millis/1000, 0)) + \" s\"\n\t\tprint(text)\n\t\tfont = ImageFont.load_default()\n\n\t\t(font_width, font_height) = font.getsize(text)\n\t\timage = Image.new(\"1\", (self.__oled.width, self.__oled.height))\n\t\tdraw = ImageDraw.Draw(image)\n\t\tdraw.text(\n\t\t\t\t(self.__oled.width // 2 - font_width // 2, self.__oled.height // 2 - font_height // 2),\n\t\t\t\ttext,\n\t\t\t\tfont=font,\n\t\t\t\tfill=255,\n\t\t)\n\t\tself.__oled.image(image)\n\t\tself.__oled.show()","repo_name":"lazytesting/pi-shot-timer","sub_path":"display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"43576782277","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport tempfile\nimport subprocess\nimport chardet\nfrom gtags import Gtags\n\nclass GtagsCommand(object):\n\n _global_cmd = None\n @property\n def global_cmd(self): return self._global_cmd\n #@global_cmd.setter\n #def global_cmd(self, global_cmd): self._global_cmd = global_cmd\n\n _gtags_cmd = None\n @property\n def gtags_cmd(self): return self._gtags_cmd\n #@gtags_cmd.setter\n #def gtags_cmd(self, gtags_cmd): self._gtags_cmd = gtags_cmd\n\n _gtags_conf = None\n @property\n def gtags_conf(self): return self._gtags_conf\n @gtags_conf.setter\n def gtags_conf(self, gtags_conf): \n if os.path.exists(gtags_conf):\n self._gtags_conf = gtags_conf\n\n _ignore_case = None\n @property\n def ignore_case(self): return self._ignore_case\n @ignore_case.setter\n def ignore_case(self, ignore_case): self._ignore_case = ignore_case\n\n _file_name = None\n @property\n def file_name(self): return self._file_name\n #@file_name.setter\n #def file_name(self, file_name): self._file_name = file_name\n\n _gtags_root = None\n @property\n def gtags_root(self):\n if not self._gtags_root:\n dir_path = self.__get_gtags_rootdir(self.__slash_all_path(self._file_name))\n if dir_path is None:\n #sys.stderr.write('Cannot find gtags root directory for [%s].' % (self._file_name))\n self._gtags_root = None\n else:\n self._gtags_root = self.__slash_all_path(dir_path)\n return self._gtags_root\n #@gtags_root.setter\n #def gtags_root(self, gtags_root): self._gtags_root = gtags_root\n\n _target_enc = None\n @property\n def target_enc(self): return self._target_enc\n #@target_enc.setter\n #def target_enc(self, target_enc): self._target_enc = target_enc\n\n _method = None\n #@property\n #def method(self): return self._method\n #@method.setter\n #def method(self, method): self._method = method\n\n _target_object = None\n #@property\n #def target_object(self): return self._target_object\n #@target_object.setter\n #def target_object(self, target_object): self._target_object = target_object\n\n _shell_flag = None\n #@property\n #def shell_flag(self): return self._shell_flag\n #@shell_flag.setter\n #def shell_flag(self, shell_flag): self._shell_flag = shell_flag\n\n def __init__(self, global_path=None, ignore_case=False):\n if os.name == 'nt':\n self._shell_flag = True\n global_name = 'global.exe'\n gtags_name = 'gtags.exe'\n else:\n self._shell_flag = False\n global_name = 'global'\n gtags_name = 'gtags'\n\n if global_path:\n global_command_path = os.path.join(global_path, global_name)\n global_cmd = global_command_path\n gtags_command_path = os.path.join(global_path, gtags_name)\n gtags_cmd = gtags_command_path\n else:\n global_cmd = global_name\n gtags_cmd = gtags_name\n\n if not os.path.exists(global_cmd):\n # Find global command in PATH.\n global_in_path = self.__which(global_name)\n if global_in_path:\n self._global_cmd = global_in_path[0]\n else:\n return\n else:\n self._global_cmd = global_cmd\n\n if not os.path.exists(gtags_cmd):\n # Find gtags command in PATH.\n gtags_in_path = self.__which(gtags_name)\n if gtags_in_path:\n self._gtags_cmd = gtags_in_path[0]\n else:\n return\n else:\n self._gtags_cmd = gtags_cmd\n\n self._ignore_case = ignore_case\n\n def __which(self, name, flags=os.X_OK):\n result = []\n exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))\n path = os.environ.get('PATH', None)\n if path is None:\n return []\n for p in os.environ.get('PATH', '').split(os.pathsep):\n p = os.path.join(p, name)\n if os.access(p, flags):\n result.append(p)\n for e in exts:\n pext = p + e\n if os.access(pext, flags):\n result.append(pext)\n return result\n\n def __slash_all_path(self, path):\n path = path.replace(os.path.sep, u'/')\n return path\n\n def __print_error_if_pipe_has_it(self, pipe):\n # Check if pipe has at least 1 line of error(s) ,\n # then print it.\n line = pipe.stderr.readline()\n while line: \n enc = chardet.detect(line)['encoding']\n line = line.decode(enc)\n self.print_message(line.strip(), 'warn', enc)\n line = pipe.stderr.readline()\n \n def __invoke_command(self, cmd_line):\n gtags_list = []\n for gtags in self.__run_global(cmd_line):\n gtags_list.append(gtags)\n\n self.display_result(self._gtags_root, gtags_list, self._target_enc)\n \n def __run_global(self, cmd_line):\n # Making temporary file for the purpose of avoiding a deadlock\n with tempfile.TemporaryFile() as f:\n with tempfile.TemporaryFile() as f_e:\n proc = subprocess.Popen(cmd_line,\n stdout=f,\n stderr=f_e,\n shell=self._shell_flag)\n\n ret_code = proc.wait()\n\n f_e.seek(0)\n line = f_e.readline()\n while line:\n enc = chardet.detect(line)['encoding']\n line = line.decode(enc)\n self.print_message(line.strip(), 'warn', enc)\n line = f_e.readline()\n\n f.seek(0)\n line = f.readline()\n while line:\n enc = chardet.detect(line)['encoding']\n line = line.decode(enc)\n gtags = Gtags(line.strip())\n yield gtags\n line = f.readline()\n\n def __run_gtags(self, cmd_line):\n # Making temporary file for the purpose of avoiding a deadlock\n with tempfile.TemporaryFile() as f:\n with tempfile.TemporaryFile() as f_e:\n proc = subprocess.Popen(cmd_line,\n stdout=f,\n stderr=f_e,\n shell=self._shell_flag)\n \n ret_code = proc.wait()\n\n f_e.seek(0)\n line = f_e.readline()\n while line:\n enc = chardet.detect(line)['encoding']\n line = line.decode(enc)\n self.print_message(line.strip(), 'info', enc) \n line = f_e.readline()\n \n f.seek(0)\n line = f.readline()\n while line:\n enc = chardet.detect(line)['encoding']\n line = line.decode(enc)\n self.print_message(line.strip(), 'info', enc)\n line = f.readline()\n\n def __get_gtags_rootdir(self, file_path):\n cmd_line = [self._global_cmd, '-p']\n os.chdir(os.path.dirname(file_path))\n gtags_rootdir = None\n\n # Making temporary file for the purpose of avoiding a deadlock\n with tempfile.TemporaryFile() as f:\n with tempfile.TemporaryFile() as f_e:\n proc = subprocess.Popen(cmd_line,\n stdout=f,\n stderr=f_e,\n shell=self._shell_flag)\n\n ret_code = proc.wait()\n\n f_e.seek(0)\n line = f_e.readline()\n while line:\n enc = chardet.detect(line)['encoding']\n line = line.decode(enc)\n self.print_message(line.strip(), 'warn', enc)\n line = f_e.readline()\n\n f.seek(0)\n line = f.readline()\n if line:\n enc = chardet.detect(line)['encoding']\n line = line.decode(enc)\n gtags_rootdir = line.strip()\n\n return gtags_rootdir\n\n def parse_args(self, args):\n if len(args) == 0:\n msg = u'Gtags: No argument is specified.'\n self.print_message(msg, 'warn')\n return False\n elif len(args) == 1:\n msg = u'Gtags: Not enough arguments are provided.'\n self.print_message(msg, 'warn')\n return False\n\n file_name = args[0]\n enc = chardet.detect(file_name)['encoding']\n file_name = file_name.decode(enc)\n file_name = os.path.abspath(file_name)\n\n if not os.path.exists(file_name):\n msg = u'Gtags: file [%s] does not exist.' % (file_name)\n self.gtags_command.print_message(msg, 'warn')\n self._file_name = None\n self._target_enc = None\n return False\n else:\n self._file_name = file_name\n self._target_enc = enc\n\n if len(args) == 2:\n if args[1] == '-f':\n # gtags -f \n #self.gtags_get_list_of_object(file_name)\n self._method = 'gtags_get_list_of_object'\n elif args[1] == '--gtags-remake':\n #self.remake_tags()\n self._method = 'remake_tags'\n elif args[1] == '--gtags-update' or args[1] == '-u':\n self._method = 'update_tags'\n else:\n # gtags -t \n #self.gtags_get_object_definition(args[1])\n self._target_object = args[1]\n self._method = 'gtags_get_object_definition'\n elif len(args) == 3:\n if args[1] == '-r':\n # gtags -r \n #self.gtags_get_object_reference(args[2])\n self._target_object = args[2]\n self._method = 'gtags_get_object_reference'\n elif args[1] == '-s':\n # gtags -s \n #self.gtags_get_symbol_reference(args[2])\n self._target_object = args[2]\n self._method = 'gtags_get_symbol_reference'\n elif args[1] == '-g':\n # gtags -g \n #self.gtags_grep(args[2])\n self._target_object = args[2]\n self._method = 'gtags_grep'\n elif args[1] == '--gtags-update' or args[1] == '-u':\n self._method = 'update_tags'\n else:\n return False\n else:\n return False\n\n return True\n\n def do_it(self):\n try:\n method = getattr(self, self._method)\n except AttributeError:\n msg = u'Gtags: method [%s] does not exist.' % self._method\n self.gtags_command.print_message(msg, 'warn')\n return False\n\n os.chdir(self.gtags_root)\n method()\n return True\n\n def gtags_get_list_of_object(self):\n file_relpath = os.path.relpath(self.__slash_all_path(self._file_name), start=self._gtags_root)\n file_relpath = self.__slash_all_path(file_relpath)\n file_relpath = file_relpath.encode(self._target_enc)\n if self._ignore_case:\n cmd_line = [self._global_cmd, '-x', '-a', '-f', '-i', file_relpath]\n else:\n cmd_line = [self._global_cmd, '-x', '-a', '-f', file_relpath]\n self.__invoke_command(cmd_line)\n\n def gtags_get_object_definition(self):\n if self._ignore_case:\n cmd_line = [self._global_cmd, '-x', '-a', '-i', self._target_object] \n else:\n cmd_line = [self._global_cmd, '-x', '-a', self._target_object] \n self.__invoke_command(cmd_line)\n \n def gtags_get_object_reference(self):\n if self._ignore_case:\n cmd_line = [self._global_cmd, '-x', '-a', '-r', '-i', self._target_object]\n else:\n cmd_line = [self._global_cmd, '-x', '-a', '-r', self._target_object]\n self.__invoke_command(cmd_line)\n\n def gtags_get_symbol_reference(self):\n if self._ignore_case:\n cmd_line = [self._global_cmd, '-x', '-a', '-s', '-i', self._target_object] \n else:\n cmd_line = [self._global_cmd, '-x', '-a', '-s', self._target_object] \n self.__invoke_command(cmd_line)\n \n def gtags_grep(self):\n if self._ignore_case:\n cmd_line = [self._global_cmd, '-x', '-a', '-g', '-i', self._target_object] \n else:\n cmd_line = [self._global_cmd, '-x', '-a', '-g', self._target_object] \n self.__invoke_command(cmd_line)\n\n def remake_tags(self):\n if self._gtags_conf == None:\n cmd_line = [self._gtags_cmd, '-v']\n else:\n cmd_line = [self._gtags_cmd, '-v', '--gtagsconf', self._gtags_conf]\n self.__run_gtags(cmd_line)\n\n def update_tags(self):\n if self._gtags_conf == None:\n cmd_line = [self._gtags_cmd, '-i', '-v']\n else:\n cmd_line = [self._gtags_cmd, '-i', '-v', '--gtagsconf', self._gtags_conf]\n self.__run_gtags(cmd_line)\n","repo_name":"5t111111/alt-gtags.vim","sub_path":"altgtags_lib/altgtags/gtags_command.py","file_name":"gtags_command.py","file_ext":"py","file_size_in_byte":13469,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"4"} +{"seq_id":"6775562155","text":"class Solution1:\n def killProcess(self, pid: List[int], ppid: List[int], kill: int) -> List[int]:\n # BFS\n n = len(pid)\n hashmap = collections.defaultdict(list) # {parent:child}\n for i in range(n):\n hashmap[ppid[i]].append(pid[i])\n\n res = []\n queue = collections.deque()\n queue.append(kill)\n\n while queue:\n cur = queue.popleft()\n res.append(cur)\n\n if cur in hashmap:\n for child in hashmap[cur]:\n queue.append(child)\n return res\n\n\nclass Solution: # DFS\n def killProcess(self, pid, ppid, kill):\n graph = collections.defaultdict(list)\n for p, pp in zip(pid, ppid):\n graph[pp].append(p)\n res = []\n self.dfs(graph, kill, res)\n return res\n\n def dfs(self, graph, node, res):\n\n res.append(node)\n for nei in graph[node]:\n self.dfs(graph, nei, res)\n\n\n","repo_name":"rligithub/Leetcode","sub_path":"Tree/582. Kill Process.py","file_name":"582. Kill Process.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"1252994844","text":"\"\"\"Layer\"\"\"\n\nfrom tensorflow import keras\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.layers import LeakyReLU\n\n\nclass TypeSpecificEncoder(keras.layers.Layer):\n \"\"\"Layer docstring\"\"\"\n\n def __init__(self, num_heads):\n super().__init__()\n self._num_heads = num_heads\n self.leaky_relu = LeakyReLU()\n self._event_embedding_shape = None\n self._word_embedding_size = None\n self.w = None\n self.b = None\n\n def build(self, input_shape):\n \"\"\"Gets executed, the first time the layer gets called\"\"\"\n self._word_embedding_size = input_shape[4]\n self._event_embedding_shape = self._word_embedding_size * self._num_heads\n # pylint: disable=invalid-name\n self.w = self.add_weight(\n name=\"w\",\n shape=(self._num_heads, self._word_embedding_size),\n initializer=\"random_normal\",\n trainable=True,\n )\n\n # pylint: disable=invalid-name\n self.b = self.add_weight(\n name=\"b\",\n shape=(self._num_heads, self._word_embedding_size),\n initializer=\"random_normal\",\n trainable=True,\n )\n\n @tf.function\n def call(self, inputs):\n \"\"\"The layers forward pass\"\"\"\n return self._recursive_map(inputs)\n\n def _recursive_map(self, inputs):\n if len(inputs.shape) <= 2:\n return self._attention_map(inputs)\n output_shape = self._get_recursive_output_shape(inputs)\n return tf.map_fn(\n self._recursive_map,\n inputs,\n fn_output_signature=tf.TensorSpec(shape=output_shape),\n )\n\n # @tf.function\n def _attention_map(self, event):\n if tf.math.count_nonzero(event) == 0:\n return tf.zeros(shape=self._event_embedding_shape)\n type_embedding = event[0]\n words_embedding = event[1:]\n\n # the following lambda executes the computations that have to be done per head.\n # In order to iterate over weights and biases of every head simultaneously, we have to carry\n # the bias values through the lambda and the return value\n weighted_sum_vectors, _ = tf.map_fn(\n lambda attention_parameters: (\n self.calculate_normalized_attention(\n attention_parameters, words_embedding, type_embedding\n ),\n attention_parameters[1],\n ),\n (self.w, self.b),\n parallel_iterations=10,\n )\n\n # concatenate all weighted sum vectors of each head for a final event embedding\n event_embedding = tf.reshape(\n weighted_sum_vectors, [self._num_heads * self._word_embedding_size]\n )\n\n return event_embedding\n\n def calculate_normalized_attention(\n self, attention_parameters, word_embeddings, event_type_embedding\n ):\n \"\"\"Attention implementation for one head\"\"\"\n\n attention_weights = attention_parameters[0]\n attention_bias = attention_parameters[1]\n\n # ====\n # paper equation 1 of attention mechanism\n # ====\n leaky_relu_in = attention_weights * word_embeddings + attention_bias\n hidden_representation = self.leaky_relu(leaky_relu_in)\n\n # ===\n # paper equation 2 of attention mechanism\n # ===\n # In order for the simultaneous matmul with all words of the event to work\n # the shape of the type embedding has to be adjusted\n event_type_embedding = tf.broadcast_to(\n event_type_embedding, hidden_representation.shape\n )\n\n # Adjusted shape matmul yields shape (49, 49) containing 49 rows of the same values.\n # We need only one row of this\n matmul = tf.matmul(event_type_embedding, tf.transpose(hidden_representation))[0]\n\n attention_values = tf.math.exp(matmul)\n attention_values_sum = tf.math.reduce_sum(attention_values)\n normalized_attention_values = attention_values / attention_values_sum\n\n # ===\n # paper equation 3 of attention mechanism\n # ===\n # this twists my brain on a regular basis:\n # every word has now one normalized attention value, which has to be multiplied\n # with every value in the 300 long word embedding.\n # This has to be done with every word.\n # This happens with the following two lines.\n # The output shape of weighted_word_embedding is still (49 * 300)\n normalized_attention_values = tf.broadcast_to(\n tf.expand_dims(normalized_attention_values, axis=1), word_embeddings.shape\n )\n weighted_word_embedding = word_embeddings * normalized_attention_values\n\n weighted_sum_vector = tf.reduce_sum(weighted_word_embedding, axis=0)\n\n return weighted_sum_vector\n\n def _get_recursive_output_shape(self, inputs):\n \"\"\"Removes the last two dimensions of the input and replaces them with a single dimension\n representing the event_embedding shape. Also, the first dimension has to be removed\n for whatever reason\"\"\"\n\n return tuple(inputs.shape[1:-2] + self._event_embedding_shape)\n","repo_name":"m0e33/REST-bot","sub_path":"model/layers/type_specific_encoder.py","file_name":"type_specific_encoder.py","file_ext":"py","file_size_in_byte":5155,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"71896715637","text":"# https://machinelearningmastery.com/text-generation-lstm-recurrent-neural-networks-python-keras/\nimport numpy\nimport tensorflow as tf\n\n# load ascii text and covert to lowercase\nfilename = \"DiscussionCorrect.txt\"\nraw_text = open(filename, 'r', encoding='utf-8').read()\nraw_text = raw_text.lower()\n\n# create mapping of unique chars to integers\nchars = sorted(list(set(raw_text)))\nchar_to_int = dict((c, i) for i, c in enumerate(chars))\nn_chars = len(raw_text)\nn_vocab = len(chars)\nprint(\"Total Characters: \", n_chars)\nprint(\"Total Vocab: \", n_vocab)\n\n# prepare the dataset of input to output pairs encoded as integers\nseq_length = 50\ndataX = []\ndataY = []\nfor i in range(0, n_chars - seq_length, 1):\n seq_in = raw_text[i:i + seq_length]\n seq_out = raw_text[i + seq_length]\n dataX.append([char_to_int[char] for char in seq_in])\n dataY.append(char_to_int[seq_out])\nn_patterns = len(dataX)\nprint(\"Total Patterns: \", n_patterns)\n\n# reshape X to be [samples, time steps, features]\nX = numpy.reshape(dataX, (n_patterns, seq_length, 1))\n# normalize\nX = X / float(n_vocab)\n# one hot encode the output variable\ny = tf.keras.utils.to_categorical(dataY)\n\n\nfilename = 'Models/weights-improvement-32-1.2830.h5'\nmodel = tf.keras.models.load_model(filename)\n# define the LSTM model\n'''\nmodel = tf.keras.models.Sequential()\nmodel.add(tf.keras.layers.LSTM(256, input_shape=(X.shape[1], X.shape[2]), return_sequences=True))\nmodel.add(tf.keras.layers.Dropout(0.2))\nmodel.add(tf.keras.layers.Conv1D(filters=32, kernel_size=3, padding='same', activation='relu'))\nmodel.add(tf.keras.layers.Dense(512, activation='softmax'))\nmodel.add(tf.keras.layers.LSTM(256))\nmodel.add(tf.keras.layers.Dropout(0.2))\nmodel.add(tf.keras.layers.Dense(y.shape[1], activation='softmax'))\n'''\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam')\n\n# define the checkpoint\nfilepath=\"Models2/weights-improvement-{epoch:02d}-{loss:.4f}.h5\"\ncheckpoint = tf.keras.callbacks.ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')\ncallbacks_list = [checkpoint]\n\nmodel.fit(X, y, epochs=500, batch_size=124, callbacks=callbacks_list)\n","repo_name":"tttienthinh/LSTM","sub_path":"Discussion/FIT.py","file_name":"FIT.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"25483780403","text":"# -*- coding: utf-8 -*-\nclass SimpleRecursiveSolution:\n '''\n Follows a three pointer approach, matching s3 characters with either s1\n or s2 and recursively solving when s1 and s2 characters are the same.\n Time Complexity: O(2^(|s1|+|s2|)) and hence leads to a TLE.\n '''\n def solver(self, s1: str, s2: str, s3: str,\n p1: int, p2: int, p3: int) -> bool:\n # Base Condition -> All pointers have reached the end\n if p1 == len(s1) and p2 == len(s2) and p3 == len(s3):\n return True\n\n # s3 char is same as s1 and s2, branch to check both str possibilities\n if p1 < len(s1) and p2 < len(s2) and s3[p3] == s1[p1] == s2[p2]:\n return self.solver(s1, s2, s3, p1+1, p2, p3+1) or \\\n self.solver(s1, s2, s3, p1, p2+1, p3+1)\n # s3 char is same as s1 char only\n elif p1 < len(s1) and s1[p1] == s3[p3]:\n return self.solver(s1, s2, s3, p1+1, p2, p3+1)\n # s3 char is same as s2 char only\n elif p2 < len(s2) and s2[p2] == s3[p3]:\n return self.solver(s1, s2, s3, p1, p2+1, p3+1)\n # No Match\n else:\n return False\n\n def isInterleave(self, s1: str, s2: str, s3: str) -> bool:\n # Base Cases\n if len(s1) + len(s2) != len(s3):\n return False\n if len(s1) == 0:\n return s2 == s3\n if len(s2) == 0:\n return s1 == s3\n\n return self.solver(s1, s2, s3, 0, 0, 0)\n\n\nclass Solution:\n '''\n Similar approach to recursive solution but introduces caching ie.\n memoization to prevent unecessary recomputations.\n The map's key is of the format pointer1-pointer2-pointer3.\n '''\n def __init__(self):\n self.cache = dict()\n\n def solver(self, s1: str, s2: str, s3: str,\n p1: int, p2: int, p3: int) -> bool:\n # Base Condition -> All pointers have reached the end\n if p1 == len(s1) and p2 == len(s2) and p3 == len(s3):\n return True\n\n # Check Cache\n key = str(p1) + '-' + str(p2) + '-' + str(p3)\n if key in self.cache:\n return self.cache.get(key)\n\n # Cases where end of either string has been reached\n if p1 == len(s1):\n result = s2[p2:] == s3[p3:]\n self.cache[key] = result\n return result\n if p2 == len(s2):\n result = s1[p1:] == s3[p3:]\n self.cache[key] = result\n return result\n\n res1, res2 = False, False\n if s1[p1] == s3[p3]:\n res1 = self.solver(s1, s2, s3, p1+1, p2, p3+1)\n if s2[p2] == s3[p3]:\n res2 = self.solver(s1, s2, s3, p1, p2+1, p3+1)\n result = res1 or res2\n self.cache[key] = result\n return result\n\n def isInterleave(self, s1: str, s2: str, s3: str) -> bool:\n # Base Cases\n if len(s1) + len(s2) != len(s3):\n return False\n if len(s1) == 0:\n return s2 == s3\n if len(s2) == 0:\n return s1 == s3\n\n return self.solver(s1, s2, s3, 0, 0, 0)\n\n\nif __name__ == '__main__':\n obj = Solution()\n s1 = \"aabcc\"\n s2 = \"dbbca\"\n s3 = \"aadbbcbcac\"\n assert obj.isInterleave(s1, s2, s3)\n\n obj = Solution()\n s1 = \"bbbbbabbbbabaababaaaabbababbaaabbabbaaabaaaaababbbababbbbbabbbb\" + \\\n \"ababbabaabababbbaabababababbbaaababaa\"\n s2 = \"babaaaabbababbbabbbbaabaabbaabbbbaabaaabaababaaaabaaabbaaabaaaa\" + \\\n \"baabaabbbbbbbbbbbabaaabbababbabbabaab\"\n s3 = \"babbbabbbaaabbababbbbababaabbabaabaaabbbbabbbaaabbbaaaaabbbbaab\" + \\\n \"baaabababbaaaaaabababbababaababbababbbababbbbaaaabaabbabbaaaaabb\" + \\\n \"abbaaaabbbaabaaabaababaababbaaabbbbbabbbbaabbabaabbbbabaaabbabab\" + \\\n \"babbabbab\"\n assert not obj.isInterleave(s1, s2, s3)\n","repo_name":"siddydutta/Daily-LeetCoding-Challenges","sub_path":"2021-06-June-LeetCoding-Challenge/Interleaving String.py","file_name":"Interleaving String.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"} +{"seq_id":"7780672976","text":"import csv\nfrom collections import OrderedDict\n\n\n\"\"\"\nglobal variables\n\"\"\"\nabbreviation = OrderedDict()\nkjv = OrderedDict()\nasv = OrderedDict()\nylt = OrderedDict()\nweb = OrderedDict()\n\n\ndef read_bible():\n abbre_dir = \"data/bible/key_abbreviations_english.csv\"\n kjv_dir = \"data/bible/t_kjv.csv\"\n asv_dir = \"data/bible/t_asv.csv\"\n ylt_dir = \"data/bible/t_ylt.csv\"\n web_dir = \"data/bible/t_web.csv\"\n\n #build abbreviation dictionary\n with open(abbre_dir, 'r', encoding=\"utf-8\") as f:\n reader = csv.reader(f, delimiter=\",\")\n next(reader)\n for id, a, b, p in reader:\n abbreviation[a] = str(b)\n\n #build kjv\n\n with open(kjv_dir, 'r', encoding=\"utf-8\") as f:\n reader = csv.reader(f, delimiter=\",\")\n next(reader)\n for id, book, chap, verse, text in reader:\n #pad to 8 digits\n kjv[str(id).rjust(8, '0')] = text\n\n #build asv\n with open(asv_dir, 'r', encoding=\"utf-8\") as f:\n reader = csv.reader(f, delimiter=\",\")\n next(reader)\n for id, book, chap, verse, text in reader:\n #pad to 8 digits\n asv[str(id).rjust(8, '0')] = text\n\n #build ylt\n with open(ylt_dir, 'r', encoding=\"utf-8\") as f:\n reader = csv.reader(f, delimiter=\",\")\n next(reader)\n for id, book, chap, verse, text in reader:\n #pad to 8 digits\n ylt[str(id).rjust(8, '0')] = text\n\n # build ylt\n with open(web_dir, 'r', encoding=\"utf-8\") as f:\n reader = csv.reader(f, delimiter=\",\")\n next(reader)\n for id, book, chap, verse, text in reader:\n # pad to 8 digits\n web[str(id).rjust(8, '0')] = text\n\n\n #for key, value in bbe.items():\n # print(key + \" : \" + value)\n\ndef process_q(input):\n dot_index = input.find(\".\")\n question = input[dot_index+1:].strip()\n #print(\"question\", question)\n return [question]\n #finish this\n\n\ndef find_verse_code(whole_verse):\n\n space_index = whole_verse.rfind(\" \")\n colon_index = whole_verse.find(\":\")\n\n book = whole_verse[: space_index].strip()\n chapter = whole_verse[space_index + 1 : colon_index]\n verse = whole_verse[colon_index + 1 :]\n\n #print(\"book name: \", book)\n #print (\"dic book: \", abbreviation[book])\n book_code = abbreviation[book].rjust(2, '0')\n\n\n chapter_code = chapter.rjust(3, '0')\n verse_code = verse.rjust(3, '0')\n\n entire_verse_code = book_code + chapter_code + verse_code\n\n return(entire_verse_code)\n\n\ndef get_context_from_verse(verse_code):\n book = verse_code[0:2]\n chapter = verse_code[2:5]\n\n #print(verse_code)\n lower_bound = book + chapter + \"000\"\n upper_bound = book + str(int(chapter) + 1).rjust(3, '0') + \"000\"\n\n #print (\"lower \", lower_bound)\n #print (\"upper \", upper_bound)\n\n kjv_context = \"\"\n asv_context = \"\"\n ylt_context = \"\"\n web_context = \"\"\n\n for key, value in kjv.items():\n if int(key) > int(lower_bound) and int(key) < int(upper_bound):\n kjv_context = kjv_context + \" \" + value\n\n for key, value in asv.items():\n if int(key) > int(lower_bound) and int(key) < int(upper_bound):\n asv_context = asv_context + \" \" + value\n\n for key, value in ylt.items():\n if int(key) > int(lower_bound) and int(key) < int(upper_bound):\n ylt_context = ylt_context + \" \" + value\n\n for key, value in web.items():\n if int(key) > int(lower_bound) and int(key) < int(upper_bound):\n web_context = web_context + \" \" + value\n\n return [kjv_context.strip(), asv_context.strip(), ylt_context.strip(), web_context.strip()]\n\ndef process_a(input):\n\n open_bracket_index = input.rfind(\"(\")\n close_bracket_index = input.rfind(\")\")\n dot_index = input.find(\".\")\n\n literal_answer = input[:open_bracket_index][dot_index+1:].strip()\n\n entire_verse = input[open_bracket_index+1:close_bracket_index].strip()\n\n #answer = entire_verse[0]\n #print(\"literal answer\", literal_answer)\n #print(\"entire verse\", entire_verse)\n\n verse_code = find_verse_code(entire_verse)\n\n all_context = get_context_from_verse(verse_code)\n kjv_context = all_context[0]\n asv_context = all_context[1]\n ylt_context = all_context[2]\n web_context = all_context[3]\n\n kjv_verse = kjv[verse_code]\n asv_verse = asv[verse_code]\n ylt_verse = ylt[verse_code]\n web_verse = web[verse_code]\n\n return [kjv_context, asv_context, ylt_context, web_context, literal_answer, verse_code, kjv_verse, asv_verse, ylt_verse, web_verse]\n\n\ndef read_data():\n dir = \"data/bible_qa/1001.csv\"\n\n #format:\n #question, context KJV, context ASV, context YLT, context WEB\n #literal answer, verse number,\n ##verse content in 4 translations\n #7 columns for each question\n\n bible_qa = []\n\n with open(dir, 'r', encoding=\"utf-8\") as f:\n reader = csv.reader(f, delimiter = \",\")\n next(reader)\n id = 1\n for question, answer in reader:\n processed_q = process_q(question)\n processed_a = process_a(answer)\n if (processed_a == False):\n continue\n processed_qa = [str(id)] + processed_q + processed_a\n id = id + 1\n #print(processed_qa)\n bible_qa.append(processed_qa)\n\n print(\"finished processing\")\n #print(bible_qa[0])\n #print(len(bible_qa))\n #print(bible_qa[0][0])\n\n with open('bible_qa.csv', 'w') as f:\n writer = csv.writer(f, delimiter='\\t')\n writer.writerow(['ID', 'Question', 'KJV_Context', 'ASV_Context', 'YLT_Context', 'WEB_Context', 'Answer', 'Verse_Code', 'KJV_Verse', 'ASV_Verse', 'YLT_Verse', 'WEB_Context'])\n for row in bible_qa:\n writer.writerow(row)\n print(\"done\")\n\nif __name__ == '__main__':\n\n read_bible()\n read_data()","repo_name":"helen-jiahe-zhao/BibleQA","sub_path":"preprocessing/process_bible_qa.py","file_name":"process_bible_qa.py","file_ext":"py","file_size_in_byte":5820,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"5"} +{"seq_id":"30405649467","text":"import logging\nimport multiprocessing as mp\nimport numpy as np\n\nimport Queue\nimport threading\n\nimport atexit\nimport random\nimport sys\n\nfrom multiprocessing import Pool\nfrom PIL import Image\nimport mxnet as mx\n\nimport multiprocessing as multiprocessing\n\nif sys.version_info[0] == 2:\n import Queue as queue\n string_classes = basestring\nelse:\n import queue\n string_classes = (str, bytes)\n\n\nclass SampleLoader(mx.io.DataIter):\n \"\"\"\n In mxnet, 5 functions below is necessary for implementing a DataLoader\n \"\"\"\n\n def __init__(self):\n \"\"\"\n set all required variables ready, see implementation below for more details \n \"\"\"\n raise NotImplementedError('you must override __init__() yourself')\n\n def next(self):\n \"\"\"\n :return:\n mx.io.DataBatch(data = [data], label = [label]) \n detailed explaination later \n :raises:\n StopIteration: \n if data loader reachs epoch end\n \"\"\"\n raise NotImplementedError('you must override next() you self')\n\n @property\n def provide_data(self):\n \"\"\"\n :return:\n [mx.io.DataDesc(), ... ]\n A list of mx.io.DataDesc, which describes all data input blocks in network \n \"\"\"\n raise NotImplementedError('you must override provide_data() yourself')\n\n @property\n def provide_label(self):\n \"\"\"\n :return:\n [mx.io.DataDesc(), ... ]\n A list of mx.io.DataDesc, which describes all label input blocks in network \n \"\"\"\n raise NotImplementedError('you must override provide_label() yourself')\n\n def reset(self):\n \"\"\"\n reset variables related to iterations, such as current_index, shuffle, etc\n \"\"\"\n raise NotImplementedError('you must override reset() yourself')\n\ndef collate_fn(batch):\n pass\n\nimport transforms\n\n\nclass default_collate(object):\n def __init__(self, feedin_shape):\n self.feedin_shape = feedin_shape\n\n def __call__(self, batch):\n data = {}\n label = {}\n\n for dsc in self.feedin_shape['data']:\n data[dsc.name] = []\n for dsc in self.feedin_shape['label']:\n label[dsc.name] = []\n\n for name in data:\n for entry in batch:\n data[name].append(entry[name])\n data[name] = transforms.mx.stack(data[name], axis=0)\n\n for name in label:\n for entry in batch:\n label[name].append(entry[name])\n label[name] = transforms.mx.stack(label[name], axis=0)\n\n return mx.io.DataBatch(data.values(), label.values())\n\nclass DataLoader(mx.io.DataIter):\n def default_collate_fn(self, batch):\n data = {}\n label = {}\n\n for dsc in self.provide_data:\n data[dsc.name] = []\n for dsc in self.provide_label:\n label[dsc.name] = []\n\n for name in data:\n for entry in batch:\n data[name].append(entry[name])\n data[name] = transforms.mx.stack(data[name], axis=0)\n\n for name in label:\n for entry in batch:\n label[name].append(entry[name])\n label[name] = transforms.mx.stack(label[name], axis=0)\n\n return mx.io.DataBatch(data=data.values(), provide_data=self.provide_data, label=label.values())\n\n def __init__(self, dataset, feedin_shape, collate_fn=default_collate, threads=1, shuffle=False):\n super(DataLoader, self).__init__()\n\n self.dataset = dataset\n self.threads = threads\n self.collate_fn = collate_fn(feedin_shape)\n # self.collate_fn = self.default_collate_fn\n\n # shape related variables\n\n self.data_shapes = feedin_shape['data']\n self.label_shapes = feedin_shape['label']\n self.batch_size = feedin_shape['batch_size']\n\n # loader related variables\n self.current = 0\n self.total = len(self.dataset)\n self.shuflle = shuffle\n self.map_index = list(range(self.total))\n\n # prepare for loading\n self.get_batch = self.get_batch_single_thread\n if self.threads > 1: # multi process read\n from multiprocessing.dummy import Pool as ThreadPool\n # self.pool = multiprocessing.Pool(self.threads)\n self.pool = ThreadPool(self.threads)\n self.get_batch = self.get_batch_multi_thread\n\n self.reset()\n\n def next(self):\n if self.current + self.batch_size > self.total:\n # reach end\n self.reset()\n raise StopIteration\n else:\n batch = self.get_batch()\n try:\n return self.collate_fn(batch)\n except AttributeError:\n print(batch)\n exit(-1)\n\n def get_single(self, index):\n # to ease\n idx = self.map_index[index]\n return self.dataset[idx]\n\n def get_batch_single_thread(self):\n entry = [None] * self.batch_size\n for idx in range(self.batch_size):\n entry[idx] = self.get_single(self.current + idx)\n self.current += self.batch_size\n return entry\n\n def get_batch_multi_thread(self):\n idx = range(self.current, self.current + self.batch_size)\n entry = self.pool.map(self.get_single, idx)\n self.current += self.batch_size\n return entry\n\n @property\n def provide_data(self):\n \"\"\"\n :return:\n [mx.io.DataDesc(), ... ]\n A list of mx.io.DataDesc, which describes all data input blocks in network\n \"\"\"\n return self.data_shapes\n # raise NotImplementedError('you must override provide_data() ')\n\n @property\n def provide_label(self):\n \"\"\"\n :return:\n [mx.io.DataDesc(), ... ]\n A list of mx.io.DataDesc, which describes all label input blocks in network\n \"\"\"\n return self.label_shapes\n # raise NotImplementedError('you must override provide_label() ')\n\n def reset(self):\n \"\"\"\n reset variables related to iterations, such as current_index, shuffle, etc\n \"\"\"\n self.current = 0\n if self.shuflle:\n random.shuffle(self.map_index)\n return\n\n\nclass _DataLoader(mx.io.DataIter):\n \"\"\"\n In mxnet, 5 functions below is necessary for implementing a DataLoader\n \"\"\"\n\n def __init__(self, dataset, feedin_shape, read_threads=1, ):\n \"\"\"\n set all required variables ready, see implementation below for more details \n \"\"\"\n super(_DataLoader, self).__init__()\n\n self.dataset = dataset\n\n ##################################################################################################\n # shape related variables\n # self.data_shapes = self.dataset.data_shapes\n # self.label_shapes = self.dataset.label_shapes\n # self.batch_size = self.dataset.batch_size\n\n self.data_shapes = feedin_shape['data']\n self.label_shapes = feedin_shape['label']\n self.batch_size = feedin_shape['batch_size']\n\n self.data_nums = len(self.provide_data)\n self.label_nums = len(self.provide_label)\n\n self.data_batch = [[None] * self.batch_size] * self.data_nums\n self.label_batch = [[None] * self.batch_size] * self.data_nums\n ##################################################################################################\n # loader related variables\n self.current = 0\n self.total = len(self.dataset)\n self.random_shuffle = False\n\n ##################################################################################################\n # multi thread acceleration\n self.read_threads = read_threads\n if self.read_threads > 1:\n # self.pool = Pool(self.read_threads) # TODO: add pin memory to optimize speed\n self.producer = Queue.Queue()\n self.consumer = Queue.Queue()\n for _ in range(self.read_threads):\n t = threading.Thread(target=self.do_work, args=(self.producer, self.consumer))\n t.daemon = True\n t.start()\n\n def do_work(self, in_queue, out_queue):\n while True:\n index = in_queue.get()\n result = self.dataset[index]\n out_queue.put(result)\n in_queue.task_done()\n\n def next(self):\n \"\"\"\n :return:\n mx.io.DataBatch(data = [data], label = [label]) \n detailed explanation later \n :raises:\n StopIteration: \n if data loader reaches epoch end\n \"\"\"\n if self.current + self.batch_size > self.total:\n raise StopIteration # reach epoch end\n else:\n return self.get_batch()\n\n def load_batch(self):\n # make it static, unreachable from outside\n index_list = range(self.current, self.current + self.batch_size)\n\n if self.read_threads == 1:\n batch = []\n for ind in index_list:\n batch.append(self.__getitem__(ind))\n else:\n batch = []\n for ind in index_list:\n self.producer.put(ind)\n self.producer.join()\n for i in xrange(self.read_threads):\n batch.append(batch)\n # raise NotImplementedError\n # batch = self.pool.map(self.__getitem__, index_list)\n return batch\n\n def get_batch(self):\n # TODO: here is a wrong wrapping for collate\n batch = self.load_batch()\n # [((data1, ..., dataN), (label1, ..., labelN)),\n # ((data1, ..., dataN), (label1, ..., labelN)),\n # ....\n # ((data1, ..., dataN), (label1, ..., labelN))]\n\n # TODO: make batch here\n for ind in range(self.data_nums):\n self.data_batch[ind] = [batch[i][0][ind] for i in range(self.batch_size)]\n for ind in range(self.label_nums):\n self.label_batch[ind] = [batch[i][1][ind] for i in range(self.batch_size)]\n\n for ind in range(self.data_nums):\n # self.data_batch[ind] = np.concatenate(self.data_batch[ind], axis=0)\n # self.data_batch[ind] = mx.nd.array(self.data_batch[ind])\n self.data_batch[ind] = mx.nd.concatenate(self.data_batch[ind], axis=0)\n\n for ind in range(self.label_nums):\n # self.label_nums[ind] = np.concatenate(self.label_nums[ind], axis=0)\n # self.label_batch[ind] = mx.nd.array(self.label_batch[ind])\n self.label_batch[ind] = mx.nd.concatenate(self.label_batch[ind], axis=0)\n\n return mx.io.DataBatch(data=self.data_batch, label=self.label_batch)\n\n def __getitem__(self, index):\n \"\"\"\n :param index(int): Index\n :return: \n tuple: (data, label) where data and label are collections \n \"\"\"\n return self.dataset[index]\n\n @staticmethod\n def pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n with Image.open(f) as img:\n return img.convert('RGB')\n\n @property\n def provide_data(self):\n \"\"\"\n :return:\n [mx.io.DataDesc(), ... ]\n A list of mx.io.DataDesc, which describes all data input blocks in network \n \"\"\"\n return self.data_shapes\n # raise NotImplementedError('you must override provide_data() ')\n\n @property\n def provide_label(self):\n \"\"\"\n :return:\n [mx.io.DataDesc(), ... ]\n A list of mx.io.DataDesc, which describes all label input blocks in network \n \"\"\"\n return self.label_shapes\n # raise NotImplementedError('you must override provide_label() ')\n\n def reset(self):\n \"\"\"\n reset variables related to iterations, such as current_index, shuffle, etc\n \"\"\"\n self.current = 0\n # TODO: implement shuffle later\n # if self.random_shuffle:\n # random.shuffle(self.loader_list)\n\n def __len__(self):\n return len(self.dataset)\n\n\ndef collate_fn(batch):\n return batch\n\n'''\nfrom torchloader import DataLoader as torchloader\nfrom torchloader import DataLoaderIter as torchiter\n\n\nclass BoxLoader(mx.io.DataIter):\n \"\"\"\n In mxnet, 5 functions below is necessary for implementing a DataLoader\n \"\"\"\n\n def __init__(self, dataset, feedin_shape, num_workers=1, shuffle=False, collate_fn=collate_fn):\n \"\"\"\n set all required variables ready, see implementation below for more details \n \"\"\"\n super(BoxLoader, self).__init__()\n\n self.dataset = dataset\n self.read_threads = num_workers\n self.collate_fn = collate_fn\n ##################################################################################################\n # shape related variables\n # self.data_shapes = self.dataset.data_shapes\n # self.label_shapes = self.dataset.label_shapes\n # self.batch_size = self.dataset.batch_size\n\n self.data_shapes = feedin_shape['data']\n self.label_shapes = feedin_shape['label']\n self.batch_size = feedin_shape['batch_size']\n\n self.data_nums = len(self.provide_data)\n self.label_nums = len(self.provide_label)\n\n self.data_batch = [[None] * self.batch_size] * self.data_nums\n self.label_batch = [[None] * self.batch_size] * self.data_nums\n ##################################################################################################\n # loader related variables\n self.current = 0\n self.total = len(self.dataset)\n self.random_shuffle = False\n\n ###############\n\n self.torchloader = torchloader(self.dataset, batch_size=self.batch_size,\n num_workers=self.read_threads,\n shuffle=False, collate_fn=collate_fn, drop_last=True)\n\n def __iter__(self):\n return torchiter(self.torchloader)\n\n def next(self):\n \"\"\"\n :return:\n mx.io.DataBatch(data = [data], label = [label]) \n detailed explanation later \n :raises:\n StopIteration: \n if data loader reaches epoch end\n \"\"\"\n pass\n\n def __getitem__(self, index):\n \"\"\"\n :param index(int): Index\n :return: \n tuple: (data, label) where data and label are collections \n \"\"\"\n return self.dataset[index]\n\n @staticmethod\n def pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n with Image.open(f) as img:\n return img.convert('RGB')\n\n @property\n def provide_data(self):\n \"\"\"\n :return:\n [mx.io.DataDesc(), ... ]\n A list of mx.io.DataDesc, which describes all data input blocks in network \n \"\"\"\n return self.data_shapes\n # raise NotImplementedError('you must override provide_data() ')\n\n @property\n def provide_label(self):\n \"\"\"\n :return:\n [mx.io.DataDesc(), ... ]\n A list of mx.io.DataDesc, which describes all label input blocks in network \n \"\"\"\n return self.label_shapes\n # raise NotImplementedError('you must override provide_label() ')\n\n def reset(self):\n \"\"\"\n reset variables related to iterations, such as current_index, shuffle, etc\n \"\"\"\n self.current = 0\n # TODO: implement shuffle later\n # if self.random_shuffle:\n # random.shuffle(self.loader_list)\n\n def __len__(self):\n return len(self.dataset)\n'''","repo_name":"Lyken17/mxbox","sub_path":"mxbox/DataLoader.py","file_name":"DataLoader.py","file_ext":"py","file_size_in_byte":15841,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"5"} +{"seq_id":"31501735789","text":"\"\"\"\r\nProblem 6:\r\n\r\nFor a given number, find all the numbers smaller than the number.\r\nNumbers should be divisible by 3 and also by 5.\r\n\r\nHints:\r\nSo, you have to check two conditions: make sure the number is divisible by 3, and also by 5.\r\nHence, you will need to use two conditions.\r\n\r\n\"\"\"\r\n\r\n\r\ndef divisible(num):\r\n result = []\r\n for i in range(num):\r\n if i % 5 == 0 and i % 3 == 0:\r\n result.append(i)\r\n return result\r\n\r\n\r\nnum = int(input('Inter a number: '))\r\nresult = divisible(num)\r\nprint('Result is: ', result)\r\n","repo_name":"hasnatosman/problem_solving","sub_path":"Divisible.py","file_name":"Divisible.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"42356569144","text":"import unittest\nfrom google.cloud import batch_v1\n\n\nclass TestBatchClient(unittest.TestCase):\n\n def test_success(self):\n # Dummy test\n job = batch_v1.Job()\n self.assertIsNotNone(job)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"DataBiosphere/dsub","sub_path":"test/unit/batch_client_test.py","file_name":"batch_client_test.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":253,"dataset":"github-code","pt":"5"} +{"seq_id":"1508351459","text":"#main.py \r\nfrom collision_detection import *\r\n\r\nclass Character(Sprite):\r\n def __init__(self, x, y, width, height, image, jump=False):\r\n super().__init__(x, y, width, height, image)\r\n self.jump = jump\r\n\r\n def hop(self, distance=300):\r\n self.y += distance\r\n\r\nwizard = Character(-128, 200, 128, 128, \"wizard.gif\")\r\ngoblin = Sprite(128, 200, 108, 128, \"goblin.gif\")\r\n\r\npacman = Character(-128, 0, 128, 128, \"pacman.gif\", jump=False)\r\ncherry = Sprite(128, 0, 128, 128, \"cherry.gif\")\r\n\r\nbar = Sprite(0, -350, 128, 24, \"bar.gif\")\r\nball = Sprite(0, -150, 32, 32, \"ball.gif\")\r\n\r\n# 스프라이트 모음 리스트\r\nsprites = [wizard, goblin, pacman, cherry, bar, ball]\r\n\r\n# 고블린 이동\r\ndef move_goblin():\r\n goblin.x -= 64\r\n\r\n if goblin.x < -300:\r\n goblin.x = 128\r\n\r\n# 팩맨 이동\r\ndef move_pacman():\r\n pacman.x += 30\r\n\r\n if pacman.x > 300:\r\n pacman.x = -128\r\n\r\n# 팩맨 점프\r\ndef jump_pacman(distance=300):\r\n if not pacman.jump:\r\n pacman.hop(distance)\r\n pacman.jump = True\r\n\r\n def reset_jump():\r\n pacman.hop(-300)\r\n pacman.jump = False\r\n\r\n wn.ontimer(reset_jump, 500) # 500ms(0.5초) 후에 점프 리셋\r\n\r\n# 야구공 이동\r\ndef move_ball():\r\n ball.y -= 24\r\n\r\n if ball.y < -400:\r\n ball.y = -150\r\n\r\n# 이벤트 처리\r\nwn.listen()\r\nwn.onkeypress(move_goblin, \"Left\") # 왼쪽 방향 화살표 입력\r\nwn.onkeypress(move_pacman, \"Right\") # 오른쪽 방향 화살표 입력\r\nwn.onkeypress(jump_pacman, \"space\") # 스페이스 키 입력\r\nwn.onkeypress(move_ball, \"Down\") # 아래방향 화살표 입력\r\n\r\nwhile True:\r\n\r\n # 각 스프라이트 위치 이동 및 도장 찍기\r\n for sprite in sprites:\r\n sprite.render(pen)\r\n\r\n # 충돌 여부 확인\r\n if wizard.is_overlapping_collision(goblin):\r\n wizard.image = \"x.gif\"\r\n\r\n if pacman.is_distance_collision(cherry):\r\n cherry.image = \"x.gif\"\r\n\r\n if bar.is_aabb_collision(ball):\r\n ball.image = \"x.gif\"\r\n\r\n # 이미지 복원\r\n if cherry.image == \"x.gif\" and not pacman.is_distance_collision(cherry):\r\n cherry.image = \"cherry.gif\"\r\n \r\n if wizard.image == \"x.gif\" and not wizard.is_overlapping_collision(goblin):\r\n wizard.image = \"wizard.gif\"\r\n\r\n if ball.image == \"x.gif\" and not bar.is_aabb_collision(ball):\r\n ball.image = \"ball.gif\"\r\n\r\n wn.update() # 화면 업데이트\r\n pen.clear() # 스프라이트 이동흔적 삭제\r\n","repo_name":"hyeeeeeeeee0/python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14509890803","text":"import torch\nimport numpy as np\n\ndef compute_gradient_penalty(C, real_samples, fake_samples, convexified=True):\n alpha = torch.tensor(np.random.random((real_samples.size(0), 1)), device='cuda', dtype=torch.float)\n\n interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)\n c_interpolates = C(interpolates)\n fake = torch.autograd.Variable(torch.ones(real_samples.shape[0], 1, device='cuda'), requires_grad=False)\n\n gradients = torch.autograd.grad(outputs=c_interpolates,\n inputs=interpolates,\n grad_outputs=fake,\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n gradients = gradients.view(gradients.size(0), -1)\n\n gradient_penalty = gradients.norm(2, dim=1) - 1\n if convexified:\n gradient_penalty = torch.clamp(gradient_penalty, 0, np.inf)\n gradient_penalty = (gradient_penalty**2).mean()\n\n return gradient_penalty","repo_name":"sdittmer/gtfd","sub_path":"utils/compute_gradient_penalty.py","file_name":"compute_gradient_penalty.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"28111017687","text":"from django.shortcuts import render, redirect\nfrom django.forms import inlineformset_factory\n\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom .models import Quiz, QuizAttempt\nfrom .forms import QuizEditForm\n\nfrom mcq.models import MCQ, MCQAnswer\nfrom mcq.forms import MCQForm, MCQAnswerForm, MCQQAttemptForm\n\nfrom torfq.models import TorFQ\nfrom torfq.forms import TorFQForm\n\nfrom essayqu.models import EssayQ\nfrom essayqu.forms import EssayQForm\n\nAnswerSetForm = inlineformset_factory(parent_model=MCQ, model=MCQAnswer, fields = ('body', 'is_correct'), extra=4, max_num=4)\n\n# Create your views here.\ndef QuizCreateView(request):\n if request.user.is_tutor:\n if request.method == 'GET':\n new_quiz = Quiz.objects.create(user_id=request.user.id)\n return redirect('/quiz/editQuiz/{}'.format(new_quiz.id))\n return redirect('/home/')\n\ndef save_quiz(request, quiz_id):\n\tquiz = Quiz.objects.get(id=quiz_id)\n\tform = QuizEditForm(request.POST, instance=quiz)\n\tif form.is_valid():\n\t\tform = form.save(commit=False)\n\t\tform.user = request.user\n\t\tform.save()\n\treturn redirect('/home/')\n\ndef delete_quiz(request, quiz_id):\n\tQuiz.objects.get(id=quiz_id).delete()\n\treturn redirect('/home/')\n\ndef save_question(request, quiz_id):\n\tquestion_body = request.POST['body']\n\n\t# mcq = MCQ.objects.create(quiz=Quiz.objects.get(id=quiz_id), body=question_body)\n\tmcq_form = MCQForm(request.POST)\n\tif mcq_form.is_valid():\n\t\tmcq = mcq_form.save(commit=False)\n\t\tmcq.quiz = Quiz.objects.get(id=quiz_id)\n\t\tmcq.save()\n\tanswers_form = AnswerSetForm(request.POST, instance=mcq)\n\tif answers_form.is_valid():\n\t\t# mf = mcq_form.save(commit=False)\n\t\t# mf.quiz = Quiz.objects.get(id=quiz_id)\n\t\t# mf.save()\n\t\tanswers_form.save()\n\treturn redirect('/quiz/editQuiz/{}/'.format(quiz_id))\n\ndef save_torfq(request, quiz_id):\n\ttorfq_form = TorFQForm(request.POST)\n\tif torfq_form.is_valid():\n\t\ttorfq = torfq_form.save(commit=False)\n\t\ttorfq.quiz = Quiz.objects.get(id=quiz_id)\n\t\ttorfq.save()\n\treturn redirect('/quiz/editQuiz/{}/'.format(quiz_id))\n\ndef save_essayq(request, quiz_id):\n\tessayq_form = EssayQForm(request.POST)\n\tif essayq_form.is_valid():\n\t\tessayq = essayq_form.save(commit=False)\n\t\tessayq.quiz = Quiz.objects.get(id=quiz_id)\n\t\tessayq.save()\n\treturn redirect('/quiz/editQuiz/{}/'.format(quiz_id))\n\ndef remove_question(request, quiz_id):\n\tq_type, eyed = request.POST['removeQuestion'].split(' ')\n\tif q_type == 'MCQ':\n\t\tMCQ.objects.get(id=eyed).delete()\n\tif q_type == 'TorFQ':\n\t\tTorFQ.objects.get(id=eyed).delete()\n\tif q_type == 'EssayQ':\n\t\tEssayQ.objects.get(id=eyed).delete()\n\n\treturn redirect('/quiz/editQuiz/{}/'.format(quiz_id))\ndef save_changes(request, quiz_id):\n\tq_type, eyed = request.POST['saveChanges'].split(' ')\n\n\tif q_type == 'MCQ':\n\t\tmcq = MCQ.objects.get(id=eyed)\n\t\tmcq_form = MCQForm(request.POST, instance=mcq)\n\t\tanswers_form = AnswerSetForm(request.POST, instance=mcq)\n\t\t# answers_form.save()\n\t\tif mcq_form.save() and answers_form.is_valid():\n\t\t\tmcq_form.save()\n\t\t\tanswers_form.save()\n\n\t\t# if mcq_form.is_valid() and answers_form.is_valid():\n\t\t# \t# mf = mcq_form.save(commit=False)\n\t\t# \t# mf.quiz = Quiz.objects.get(id=quiz_id)\n\t\t# \tmcq_form.save()\n\t\t# \tanswers_form.save()\n\tif q_type == 'TorFQ':\n\t\ttorfq = TorFQ.objects.get(id=eyed)\n\t\ttorfq_form = TorFQForm(request.POST, instance=torfq)\n\n\t\tif torfq_form.is_valid():\n\t\t\ttorfq_form.save()\n\n\tif q_type == 'EssayQ':\n\t\tessayq = EssayQ.objects.get(id=eyed)\n\t\tessayq_form = EssayQForm(request.POST, instance=essayq)\n\n\t\tif essayq_form.is_valid():\n\t\t\tessayq_form.save()\n\treturn redirect('/quiz/editQuiz/{}/'.format(quiz_id))\n\n\ndef editing_question(request, quiz_id):\n\tquiz = Quiz.objects.get(id=quiz_id)\n\tq_type, eyed = request.POST['editQuestion'].split(' ')\n\tcontext = {}\n\tcontext['editing'] = True\n\n\tcontext['question_type'] = q_type\n\tcontext['question_id'] = eyed\n\n\tcontext['mcq'] = False\n\tcontext['torfq'] = False\n\tcontext['essayq'] = False\n\n\tif q_type == 'MCQ':\n\t mcq = MCQ.objects.get(id=eyed)\n\t context['quiz_form'] = QuizEditForm(instance=quiz)\n\t context['question_form'] = MCQForm(instance=mcq)\n\t context['answer_set_form'] = AnswerSetForm(instance=mcq)\n\t context['added_questions'] = quiz.get_all_questions()\n\t context['mcq'] = True\n\tif q_type == 'TorFQ':\n\t\ttorfq = TorFQ.objects.get(id=eyed)\n\t\tcontext['quiz_form'] = QuizEditForm(instance=quiz)\n\t\tcontext['torfq_form'] = TorFQForm(instance=torfq)\n\t\tcontext['added_questions'] = quiz.get_all_questions()\n\t\tcontext['torfq'] = True\n\tif q_type == 'EssayQ':\n\t\tessayq = EssayQ.objects.get(id=eyed)\n\t\tcontext['quiz_form'] = QuizEditForm(instance=quiz)\n\t\tcontext['essayq_form'] = EssayQForm(instance=essayq)\n\t\tcontext['added_questions'] = quiz.get_all_questions()\n\t\tcontext['essayq'] = True\n\n\treturn render(request, 'quiz/edit.html', context)\n\ndef QuizEditView(request, quiz_id):\n\tquiz = Quiz.objects.get(id=quiz_id)\n\tif request.user.is_tutor:\n\t\tcontext = {}\n\t\tcontext['quiz'] = quiz\n\t\tcontext['editing'] = False\n\t\tcontext['quiz_form'] = QuizEditForm(instance=quiz)\n\t\tcontext['question_form'] = MCQForm()\n\n\t\tcontext['answer_set_form'] = AnswerSetForm()\n\t\t#print (dir(context['answer_set_form'].forms[0]['is_correct']))\n\n\t\tcontext['torfq_form'] = TorFQForm()\n\n\t\tcontext['essayq_form'] = EssayQForm()\n\n\t\tcontext['added_questions'] = quiz.get_all_questions()\n\n\t\tif request.method == 'POST':\n\t\t\tif 'saveQuiz' in request.POST:\n\t\t\t\treturn save_quiz(request, quiz_id)\n\t\t\tif 'deleteQuiz' in request.POST:\n\t\t\t\treturn delete_quiz(request, quiz_id)\n\n\t\t\tif 'saveQuestion' in request.POST:\n\t\t\t\treturn save_question(request, quiz_id)\n\t\t\tif 'saveTorFQ' in request.POST:\n\t\t\t\treturn save_torfq(request, quiz_id)\n\t\t\tif 'saveEssayQ' in request.POST:\n\t\t\t\treturn save_essayq(request, quiz_id)\n\n\t\t\tif 'removeQuestion' in request.POST:\n\t\t\t\treturn remove_question(request, quiz_id)\n\t\t\tif 'editQuestion' in request.POST:\n\t\t\t\treturn editing_question(request, quiz_id)\n\t\t\tif 'saveChanges' in request.POST:\n\t\t\t\treturn save_changes(request, quiz_id)\n\n\treturn render(request, 'quiz/edit.html', context)\n\ndef QuizListView(request):\n\tquiz_list = Quiz.objects.all()\n\tpage = request.GET.get('page', 1)\n\n\tpaginator = Paginator(quiz_list, 10)\n\ttry:\n\t\tquizzes = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tquizzes = paginator.page(1)\n\texcept EmptyPage:\n\t\tquizzes = paginator.page(paginator.num_pages)\n\n\treturn render(request, 'quiz/quiz_list.html', { 'quizzes': quizzes })\n\ndef QuizTakeView(request, quiz_id):\n\tquiz = Quiz.objects.get(id=quiz_id)\n\ttry:\n\t\tquiz_attempt = QuizAttempt.get(quiz=quiz)\n\texcept:\n\t\tquiz_attempt = QuizAttempt.objects.create(user=request.user, quiz=quiz)\n\n\tquestions = quiz.get_all_questions()\n\tforms = [MCQQAttemptForm(q) for q in questions]\n\n\tmylist = zip(questions, forms)\n\n\tpage = request.GET.get('page', 1)\n\n\tpaginator = Paginator(questions, 1)\n\ttry:\n\t\tquestion = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tquestion = paginator.page(1)\n\texcept EmptyPage:\n\t\tquestion = paginator.page(paginator.num_pages)\n\n\n\treturn render(request, 'quiz/take_quiz.html', { 'question': question , 'forms' : forms , 'mylist' : mylist})\n\n\n\n\n\n\n\n","repo_name":"arnavkohli/BrainyBuddy","sub_path":"quiz/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"1033286606","text":"import copy\nimport random\nimport matplotlib.pyplot as plt\n\nfrom soft.utils.individual import Individual\n\n\ndef selection_rank_with_elite(individuals, elite_size = 0):\n sorted_individuals = sorted(individuals, key = lambda ind: ind.fitness, reverse = True)\n rank_distance = 1 / len(individuals)\n ranks = [(1 - i * rank_distance) for i in range(len(individuals))]\n ranks_sum = sum(ranks)\n selected = sorted_individuals[0:elite_size]\n\n for i in range(len(sorted_individuals) - elite_size):\n shave = random.random() * ranks_sum\n rank_sum = 0\n for i in range(len(sorted_individuals)):\n rank_sum += ranks[i]\n if rank_sum > shave:\n selected.append(sorted_individuals[i])\n break\n\n return selected\n\n\ndef crossover(ind1, ind2):\n child1 = Individual(copy.deepcopy(ind1.gene_tree), ind1.structure)\n child2 = Individual(copy.deepcopy(ind2.gene_tree), ind2.structure)\n rand1 = child1.get_random_node()\n rand2 = child2.get_random_node_by_operator_type(rand1.name.o_type)\n if not rand2:\n return [child1, child2]\n child1.replace_node(rand1, rand2)\n child2.replace_node(rand2, rand1)\n child1.set_up()\n child2.set_up()\n return [child1, child2]\n\n\ndef mutate(ind, operator_type_ratio, min_h, max_h):\n ind_copy = Individual(copy.deepcopy(ind.gene_tree), ind.structure)\n random_types = random.choices(list(operator_type_ratio.keys()), list(operator_type_ratio.values()))\n random_type = random_types[0]\n\n if random_type != 'top':\n replaced_node = ind_copy.get_random_node_by_operator_type(random_type)\n\n if not replaced_node:\n return ind_copy\n operator = replaced_node.name\n domain = operator.return_type\n if operator.is_term():\n operator.value = operator.mutation_rule(operator.value)\n else:\n random_tree = ind.structure.generate_random_tree(domain, min_h, max_h)\n ind_copy.replace_node(replaced_node, random_tree)\n else:\n random_tree = ind.structure.generate_random_tree('bool', min_h, max_h, starts_with = 'OR',\n put_before_top = ind_copy.gene_tree.name)\n ind_copy = Individual(copy.deepcopy(random_tree), ind.structure)\n\n ind_copy.set_up()\n return ind_copy\n\n\ndef mutate_operator(ind):\n ind_copy = Individual(copy.deepcopy(ind.gene_tree), ind.structure)\n random_types = ['operator']\n replaced_node = ind_copy.get_random_node_by_operator_type(random_types[0])\n operator = replaced_node.name\n operator.value = operator.mutation_rule(operator.value)\n ind_copy.set_up()\n return ind_copy\n\n\ndef crossover_fitness_driven(p1, p2):\n c1, c2 = crossover(p1, p2)\n candidates = [c1, c2, p1, p2]\n best = sorted(candidates, key = lambda ind: ind.fitness, reverse = True)\n return best[0:2]\n\n\ndef mutation_fitness_driven(ind, operator_kind_ratio, min_h, max_h, max_tries = 3):\n for _ in range(0, max_tries):\n mutated = mutate(ind, operator_kind_ratio, min_h, max_h)\n if mutated.fitness > ind.fitness:\n return mutated\n return ind\n\n\ndef mutation_operator_fitness_driven(ind, max_tries = 3):\n for _ in range(0, max_tries):\n mutated = mutate_operator(ind)\n if mutated.fitness > ind.fitness:\n return mutated\n return ind\n\n\ndef crossover_operation(population, prob):\n crossed_offspring = []\n for ind1, ind2 in zip(population[::2], population[1::2]):\n if random.random() < prob:\n kid1, kid2 = crossover_fitness_driven(ind1, ind2)\n crossed_offspring.append(kid1)\n crossed_offspring.append(kid2)\n else:\n crossed_offspring.append(ind1)\n crossed_offspring.append(ind2)\n return crossed_offspring\n\n\ndef mutation_operation(population, operator_kind_ratio, min_h, max_h, prob, with_operator_mutation = False):\n mutated_offspring = []\n for mutant in population:\n if random.random() < prob:\n new_mutant = mutation_fitness_driven(mutant, operator_kind_ratio, min_h, max_h)\n mutated_offspring.append(new_mutant)\n else:\n if with_operator_mutation:\n new_mutant = mutation_operator_fitness_driven(mutant, max_tries = 10)\n mutated_offspring.append(new_mutant)\n else:\n mutated_offspring.append(mutant)\n return mutated_offspring\n\n\ndef stats(population, best_ind, fit_avg, fit_best):\n best_of_generation = max(population, key = lambda ind: ind.fitness)\n if best_ind.fitness < best_of_generation.fitness:\n best_ind = best_of_generation\n fit_avg.append(sum([ind.fitness for ind in population]) / len(population))\n fit_best.append(best_ind.fitness)\n\n return best_ind, fit_avg, fit_best\n\n\ndef plot_stats(fit_avg, fit_best, title):\n plt.plot(fit_avg, label = \"Average Fitness of Generation\")\n plt.plot(fit_best, label = \"Best Fitness\")\n plt.title(title)\n plt.legend(loc = \"lower right\")\n plt.show()\n plt.close()\n","repo_name":"survexman/sgp_classifier","sub_path":"soft/utils/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":5074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"9329061091","text":"from unittest.mock import MagicMock\n\nfrom google.oauth2.service_account import Credentials\nfrom googleapiclient.discovery import Resource\nfrom googleapiclient.http import MediaFileUpload\n\nfrom sync.google_drive.google_drive_file import GoogleDriveFile\nfrom sync.google_drive.google_drive_filesystem import GoogleDriveFilesystem\nfrom sync.google_drive.google_drive_list_file_response import GoogleDriveListFileResponse\n\n\nclass TestGoogleDriveFilesystem:\n def test_fs_name_is_google_drive(self):\n assert GoogleDriveFilesystem.get_filesystem_name() == 'google-drive'\n\n def test_create_fs_from_service_key_file(self, mocker):\n credential = MagicMock(spec=Credentials)\n from_service_account_file = mocker.patch('google.oauth2.service_account.Credentials.from_service_account_file',\n return_value=credential)\n\n service = MagicMock(spec=Resource)\n build = mocker.patch('googleapiclient.discovery.build', return_value=service)\n\n fs = GoogleDriveFilesystem.create('key.json')\n\n from_service_account_file.assert_called_once_with('key.json')\n build.assert_called_once_with('drive', 'v3', credentials=credential)\n assert isinstance(fs, GoogleDriveFilesystem)\n assert fs.drive_service == service\n\n def test_list_file(self):\n drive = MagicMock(spec=Resource)\n fs = GoogleDriveFilesystem(drive)\n\n res = fs.list_files('asdfasdsadfasdfasd', True)\n assert isinstance(res, GoogleDriveListFileResponse)\n assert res.google_drive_service == drive\n assert len(res.requests) == 1\n assert res.requests[0].directory_id == 'asdfasdsadfasdfasd'\n assert res.is_recursive\n\n def test_create_a_file(self, mocker):\n google_drive_service = MagicMock(spec=Resource)\n google_drive_service.mock_add_spec(['files'])\n\n file_resource = MagicMock(spec=Resource)\n file_resource.mock_add_spec(['create'])\n google_drive_service.files.return_value = file_resource\n\n create_resource = MagicMock(spec=Resource)\n create_resource.mock_add_spec(['execute'])\n file_resource.create.return_value = create_resource\n\n create_resource.execute.return_value = {\n \"id\": \"asdgasdfasdf\",\n \"mimeType\": \"application/text\",\n \"md5Checksum\": \"akcklsjfkdlsjfk\"\n }\n\n mocked_upload = MagicMock(spec=MediaFileUpload)\n mocker.patch('googleapiclient.http.MediaFileUpload', return_value=mocked_upload)\n\n fs = GoogleDriveFilesystem(google_drive_service)\n file = fs.create_file(\n GoogleDriveFile('asdgasdfasdf', 'aa', 'dir', 'application/vnd.google-apps.folder', None),\n 'a.txt',\n 'tmp.txt'\n )\n\n assert isinstance(file, GoogleDriveFile)\n file_resource.create.assert_called_once_with(\n body={\n \"name\": \"a.txt\",\n \"parents\": [\n \"asdgasdfasdf\"\n ]\n },\n media_body=mocked_upload,\n fields='id, mimeType, md5Checksum'\n )\n assert file.file_id == 'asdgasdfasdf'\n assert file.file_path == 'aa/a.txt'\n assert file.file_name == 'a.txt'\n assert file.mime_type == 'application/text'\n assert file.md5_checksum == 'akcklsjfkdlsjfk'\n assert not file.is_dir()\n\n def test_create_a_directory(self):\n google_drive_service = MagicMock(spec=Resource)\n google_drive_service.mock_add_spec(['files'])\n\n file_resource = MagicMock(spec=Resource)\n file_resource.mock_add_spec(['create'])\n google_drive_service.files.return_value = file_resource\n\n create_resource = MagicMock(spec=Resource)\n create_resource.mock_add_spec(['execute'])\n file_resource.create.return_value = create_resource\n\n create_resource.execute.return_value = {\n \"id\": \"asdgasdfasdf\"\n }\n\n fs = GoogleDriveFilesystem(google_drive_service)\n file = fs.create_directory(\n GoogleDriveFile('asdgasdfasdf', 'aa', 'dir', 'application/vnd.google-apps.folder', None),\n 'bb',\n )\n\n assert isinstance(file, GoogleDriveFile)\n file_resource.create.assert_called_once_with(\n body={\n \"name\": \"bb\",\n \"mimeType\": \"application/vnd.google-apps.folder\",\n \"parents\": [\n \"asdgasdfasdf\"\n ]\n },\n fields='id'\n )\n assert file.file_id == 'asdgasdfasdf'\n assert file.file_path == 'aa/bb'\n assert file.file_name == 'bb'\n assert file.mime_type == 'application/vnd.google-apps.folder'\n assert file.md5_checksum is None\n assert file.is_dir()\n\n def test_delete_a_file(self):\n google_drive_service = MagicMock(spec=Resource)\n google_drive_service.mock_add_spec(['files'])\n\n file_resource = MagicMock(spec=Resource)\n file_resource.mock_add_spec(['delete'])\n google_drive_service.files.return_value = file_resource\n\n delete_resource = MagicMock(spec=Resource)\n delete_resource.mock_add_spec(['execute'])\n file_resource.delete.return_value = delete_resource\n\n fs = GoogleDriveFilesystem(google_drive_service)\n fs.delete_file('asdfjlasdkfj')\n\n file_resource.delete.assert_called_once_with(fileId='asdfjlasdkfj')\n\n def test_get_root_directory(self):\n fs = GoogleDriveFilesystem(MagicMock(spec=Resource))\n file = fs.get_root_dir('asdfasgsdafdasdfas')\n\n assert file.file_id == 'asdfasgsdafdasdfas'\n assert file.file_path == '.'\n assert file.file_name == 'root'\n assert file.is_dir()\n","repo_name":"mingchaoliao/dir-sync","sub_path":"tests/unit/google_drive/test_google_drive_filesystem.py","file_name":"test_google_drive_filesystem.py","file_ext":"py","file_size_in_byte":5758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"29165696658","text":"# KEYS AND IDENTIFIERS\nDARK_SKY_API_KEY = \"\"\nNEWS_API_KEY = \"\"\nMAILGUN_API_KEY = \"\"\nNOMINATIM_USER_AGENT = \"\" # Unique user-agent string for identification to the Nominatim server.\n\n# MAIL\nMAILGUN_DOMAIN = \"\" # The domain under which the Mailgun account is registered.\nMAILGUN_FROM_ADDR = \"\" # The address from which emails should be sent (preferably of the form \"From Name \"\n\n# ARTICLE SPECIFIC\nTITLE_EXCLUSIONS = [\"video:\", \"watch:\"] # keywords in titles to avoid (e.g., videos)\nMIN_PARAGRAPHS_FOR_AN_ARTICLE = 5 # minimum length of an article","repo_name":"NairVish/news-ebook-creator","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38189156393","text":"\"\"\"\nCreated by Neel Gokhale at 2020-07-20\nFile real_world_conv.py from project week4_coursera\nBuilt using PyCharm\n\n\"\"\"\n\n# NOTE: this neural network model is run by running the validate.py file. Do not run this file unless required.\n\nimport os\nfrom get_data import train_horse_dir, train_human_dir, val_horse_dir, val_human_dir\nimport tensorflow as tf\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n# Img directory info\n\n# Training\ntrain_horse_names = os.listdir(train_horse_dir)\ntrain_human_names = os.listdir(train_human_dir)\nlen_train_horses = len(train_horse_names)\nlen_train_humans = len(train_human_names)\n# Validation\nval_horse_names = os.listdir(val_horse_dir)\nval_human_names = os.listdir(val_human_dir)\nlen_val_horses = len(val_horse_names)\nlen_val_humans = len(val_human_names)\n\n# Model\n\nmodel = tf.keras.models.Sequential([\n\n # First convolution layer\n tf.keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=(300, 300, 3)),\n tf.keras.layers.MaxPooling2D(2, 2),\n # Second convolution layer\n tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n # Third convolution layer\n tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n # Fourth convolution layer\n tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n # Fifth convolution layer\n tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n # Dense layers\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')])\n\nmodel.summary()\n\n# Compile model for training\n\nmodel.compile(loss='binary_crossentropy',\n optimizer=RMSprop(lr=0.001),\n metrics=['accuracy'])\n\n# Data preprocessing and image generators\n\ntrain_datagen = ImageDataGenerator(rescale=1./255)\nval_datagen = ImageDataGenerator(rescale=1./255)\n\n# Training in batches of batch_size = 128 using datagenerator\ntrain_batch_size = 128\nval_batch_size = 32\n\ntrain_generator = train_datagen.flow_from_directory(\n\n # Source dir for images\n '/Users/Owner/PycharmProjects/week4_coursera/img/horse-or-human',\n target_size=(300, 300),\n batch_size=train_batch_size,\n class_mode='binary')\n\nval_generator = val_datagen.flow_from_directory(\n '/Users/Owner/PycharmProjects/week4_coursera/img/validation-horse-or-human',\n target_size=(300, 300),\n batch_size=val_batch_size,\n class_mode='binary')\n\n# Training\n\nhistory = model.fit_generator(\n generator=train_generator,\n steps_per_epoch=len_train_horses/train_batch_size,\n epochs=15,\n verbose=1,\n validation_data=val_generator,\n validation_steps=8)\n","repo_name":"neelgokhale/Tensorflow_Convolutional_NN","sub_path":"real_world_conv.py","file_name":"real_world_conv.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"10621501085","text":"##import base64\r\nimport urllib.request\r\n\r\n#user information - add your own\r\ndevid = \"\"\r\ndevkey = \"\"\r\niv = \"\"\r\nmemberid = \"\"\r\n\r\nbaseurl = \"http://api.blackoutrugby.com/\"\r\nrequest = \"rk&start=20000\" #refer to the docs, you don't need the &r= part here\r\n\r\n# suffix with member details\r\nrequest = request + \"&memberid=\" + memberid\r\n\r\n# construct full request URL\r\nurl = baseurl + \"?d=\" + devid + \"&dk=\" + devkey + \"&r=\" + request + \"&json=1\"\r\nprint(url)\r\n\r\n# issue request\r\nu = urllib.request.urlopen(url)\r\n\r\n# read data, returns as a string\r\ndata = u.read()\r\n","repo_name":"ogilberry/brscout","sub_path":"br_unencrypted_request.py","file_name":"br_unencrypted_request.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"5310776844","text":"import tensorflow as tf\nimport mlworkflow as mlwf\nimport experimentator\nfrom experimentator import find\nimport experimentator.tf2_experiment\nimport deepsport_utilities.ds.instants_dataset\nimport models.other\nimport models.tensorflow\nimport experimentator.wandb_experiment\nimport tasks.ballsize\nimport tasks.detection\n\nexperiment_type = [\n experimentator.AsyncExperiment,\n experimentator.CallbackedExperiment,\n experimentator.tf2_experiment.TensorflowExperiment,\n tasks.ballsize.BallSizeEstimation\n]\n\nbatch_size = 16\n\n# Dataset parameters\nside_length = 64\noutput_shape = (side_length, side_length)\n\nestimate_presence = False\npublic_dataset = False\nballistic_dataset = True\nestimate_mask = True\nestimate_offset = True\nassert (ballistic_dataset is False) or (public_dataset is False), \"annotated ballistic sequences from raw sequences dataset cannot be used when training on public dataset\"\n\n\n# DeepSport Dataset\ndataset_name = {\n (True, False): \"ballsize_dataset_256_with_detections_from_model_trained_on_full_dataset_new.pickle\",\n (True, True): \"ballsize_dataset_256_with_detections_from_model_trained_on_small_dataset_top4.pickle\",\n (True, None): \"ballsize_dataset_256_with_detections_from_model_trained_on_small_dataset.pickle\",\n (False, True): \"ballsize_dataset_256_no_detections.pickle\",\n (False, False): \"ballsize_dataset_256_no_detections.pickle\",\n}[(estimate_presence, public_dataset)]\n\nscale = 1\nsize_min = 14*scale\nsize_max = 37*scale\nmax_shift = 10\n\ndata_extractor_transform = deepsport_utilities.transforms.DataExtractorTransform(\n deepsport_utilities.ds.instants_dataset.views_transforms.AddImageFactory(),\n deepsport_utilities.ds.instants_dataset.views_transforms.AddBallSizeFactory(origins=['annotation', 'interpolation', 'ballseg']),\n deepsport_utilities.ds.instants_dataset.views_transforms.AddBallFactory(),\n deepsport_utilities.ds.instants_dataset.views_transforms.AddCalibFactory(),\n deepsport_utilities.ds.instants_dataset.views_transforms.AddBallPositionFactory() if estimate_offset else None,\n deepsport_utilities.ds.instants_dataset.views_transforms.AddBallSegmentationTargetViewFactory() if estimate_mask else None,\n deepsport_utilities.ds.instants_dataset.views_transforms.AddBallPresenceFactory() if estimate_presence else None,\n)\n\nrandom_size_cropper_transform = deepsport_utilities.ds.instants_dataset.BallViewRandomCropperTransform(\n output_shape=output_shape,\n size_min=size_min,\n size_max=size_max,\n margin=side_length//2-max_shift,\n padding=side_length,\n on_ball=True,\n)\n\nfixed_scale_cropper_transform = deepsport_utilities.ds.instants_dataset.BallViewRandomCropperTransform(\n output_shape=output_shape,\n scale_min=1*scale,\n scale_max=1*scale,\n margin=int(side_length*scale)//2-max_shift,\n padding=int(side_length*scale),\n on_ball=True,\n)\n\ndataset = experimentator.CachedPickledDataset(find(dataset_name))\ndataset = mlwf.FilteredDataset(dataset, lambda k: ballistic_dataset or bool(isinstance(k[0], deepsport_utilities.ds.instants_dataset.InstantKey)))\ndataset = mlwf.FilteredDataset(dataset, lambda k,v: estimate_presence or v.ball.origin in ['annotation', 'interpolation'] and bool(v.ball.visible))\nevaluation_dataset_name = \"ballistic_ball_views.pickle\"\nevaluation_dataset = experimentator.CachedPickledDataset(find(evaluation_dataset_name, verbose=True))\n\nglobals().update(locals()) # required to use locals() in lambdas\n\nrepetitions = 1\n\ndataset_splitter = deepsport_utilities.ds.instants_dataset.DeepSportDatasetSplitter(\n additional_keys_usage='testing2' if public_dataset else 'training',\n validation_pc=15,\n repetitions={'testing': repetitions})\n\nrandom_size_subsets = dataset_splitter(mlwf.TransformedDataset(dataset, [random_size_cropper_transform, data_extractor_transform]))\nfixed_scale_subsets = dataset_splitter(mlwf.TransformedDataset(dataset, [fixed_scale_cropper_transform, data_extractor_transform]))\n\nrandom_size_subsets[2].name = \"legacy_testing\"\nsubsets = [\n random_size_subsets[0],\n fixed_scale_subsets[1],\n fixed_scale_subsets[2],\n random_size_subsets[2],\n experimentator.Subset(\"3d_testset\", experimentator.SubsetType.EVAL, mlwf.TransformedDataset(evaluation_dataset, [fixed_scale_cropper_transform, data_extractor_transform]), repetitions=repetitions),\n experimentator.Subset(\"legacy_3d_testset\", experimentator.SubsetType.EVAL, mlwf.TransformedDataset(evaluation_dataset, [random_size_cropper_transform, data_extractor_transform]), repetitions=repetitions),\n]\nif public_dataset:\n random_size_subsets[3].name = \"legacy_testing2\"\n subsets.extend([\n fixed_scale_subsets[3],\n random_size_subsets[3],\n ])\n\n\ncallbacks = [\n experimentator.AverageMetrics([\".*loss\"]),\n experimentator.SaveWeights(),\n experimentator.SaveLearningRate(),\n experimentator.GatherCycleMetrics(),\n experimentator.LogStateDataCollector(),\n experimentator.LearningRateDecay(start=range(50,101,10), duration=2, factor=.5),\n tasks.ballsize.ComputeDiameterError(),\n tasks.detection.ComputeDetectionMetrics(origin='ballseg') if estimate_presence else None,\n tasks.detection.AuC(\"top1-AuC\", \"top1_metrics\") if estimate_presence else None,\n tasks.detection.AuC(\"top2-AuC\", \"top2_metrics\") if estimate_presence else None,\n tasks.detection.AuC(\"top4-AuC\", \"top4_metrics\") if estimate_presence else None,\n tasks.detection.AuC(\"top8-AuC\", \"top8_metrics\") if estimate_presence else None,\n tasks.detection.AuC(\"initial_top1-AuC\", \"initial_top1_metrics\") if estimate_presence else None,\n experimentator.wandb_experiment.LogStateWandB(\"validation_MAPE\", False),\n]\n\nalpha = 0.5 if estimate_presence else 0\nalpha_m = 1 if estimate_mask else 0\nalpha_o = 1 if estimate_offset else 0\nalpha_d = 1\nglobals().update(locals()) # required to use locals() in lambdas\nchunk_processors = [\n experimentator.tf2_chunk_processors.CastFloat(tensor_names=[\"batch_input_image\"]),\n lambda chunk: chunk.update({\"batch_input\": chunk[\"batch_input_image\"]}),\n models.other.GammaAugmentation(\"batch_input\"),\n experimentator.tf2_chunk_processors.Normalize(tensor_names=[\"batch_input\"]),\n models.tensorflow.TensorflowBackbone(\"vgg16.VGG16\", include_top=False),\n tasks.ballsize.BuildMaskFromLogits() if estimate_mask else None,\n models.other.LeNetHead(output_features=4),\n tasks.ballsize.NamedOutputs(estimate_presence=estimate_presence, estimate_offset=estimate_offset),\n models.other.BinaryCrossEntropyLoss(y_true=\"batch_ball_presence\", y_pred=\"predicted_presence\", name=\"classification\") if estimate_presence else None,\n models.other.HuberLoss(y_true='batch_ball_size', y_pred='predicted_diameter', name='regression'),\n tasks.ballsize.MaskSupervision() if estimate_mask else None,\n tasks.ballsize.OffsetSupervision() if estimate_offset else None,\n #lambda chunk: chunk.update({\"loss\": (alpha*chunk[\"classification_loss\"] + (1-alpha)*chunk[\"regression_loss\"]) if estimate_presence else chunk[\"regression_loss\"]}),\n tasks.ballsize.CombineLosses({'regression_loss': alpha_d, 'offset_loss': alpha_o, 'mask_loss': alpha_m}),\n lambda chunk: chunk.update({\"predicted_presence\": tf.nn.sigmoid(chunk[\"predicted_presence\"])}) if estimate_presence else None,\n]\n\nlearning_rate = 1e-4\noptimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n","repo_name":"gabriel-vanzandycke/deepsport","sub_path":"configs/ballsize.py","file_name":"ballsize.py","file_ext":"py","file_size_in_byte":7351,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"5"} +{"seq_id":"6029254234","text":"import scrapy\nimport re\nimport glob\n\n\nclass Sp(scrapy.Spider):\n name = \"sp1\"\n # path='/home/databiz32/Bureau/badcode/test/spider/curl/all_france3'\n #path = \"/home/databiz32/Bureau/badcode/test/spider/curl/res_14_10_2019__09_49_59\"\n #path = \"res_17_11_2019__08_51_12\"\n path = \"/home/mohamed/Desktop/Jobs-go/res_17_11_2019__08_51_12\"\n \n all = glob.glob(path + \"/*\")\n\n def start_requests(self):\n for e in self.all:\n url = \"file://{}\".format(e)\n req = scrapy.Request(url, callback=self.parse)\n yield req\n\n def parse(self, response):\n\n item = {}\n\n base = response.xpath(\"//ul/li\")\n j = 0\n all_desc = re.findall(\n '(?<=style=\"line-height:1.5em\">)(.*?)(?=)', response.text, flags=re.S\n )\n all_id = re.findall('
\", response.text)\n all_temps = [tt[i] for i in range(0, len(tt), 2)]\n dp = re.findall(\"il y a.*?jours\", response.text)\n all_date_pb = [dp[i] for i in range(0, len(dp), 2)]\n\n for sel in base:\n item = {}\n job_name = sel.xpath(\n \"div/div[2]/div[2]/div/div[2]/div[1]/text()\"\n ).extract_first()\n company = sel.xpath(\n \"div/div[2]/div[2]/div/div[2]/div[2]/div[1]/text()\"\n ).extract_first()\n adr = sel.xpath(\n \"div/div[2]/div[2]/div/div[2]/div[2]/div[2]/text()\"\n ).extract_first()\n site = sel.xpath(\n \"div/div[2]/div[2]/div/div[2]/div[2]/div[3]/text()\"\n ).extract_first()\n desc = all_desc[j]\n date_publication = sel.xpath(\n \"div/div/div/div/div/div/div/span[1]/span[2]/text()\"\n ).extract_first()\n temps_de_travail = sel.xpath(\n \"div/div/div[2]/div/div[2]/div[2]/div[4]/span[2]/span[2]/text()\"\n ).extract_first()\n AnnonceID = all_id[j]\n j += 1\n\n dispo = sel.xpath(\n \"div/div[2]/div[1]/div/div/div/g-scrolling-carousel/div[1]/div/div/span/a/@href\"\n ).extract_first()\n\n item[\"desc\"] = desc.replace(\"\\n\", \" \")\n item[\"job_name\"] = job_name\n item[\"company\"] = company\n item[\"adr\"] = adr\n item[\"site\"] = site\n item[\"AnnonceID\"] = AnnonceID\n item[\"temps_de_travail\"] = temps_de_travail\n item[\"date_publication\"] = date_publication\n item[\"Disponible sur\"] = dispo\n\n yield item\n","repo_name":"Sleymi/Jobs_France","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"11537785026","text":"#!/usr/bin/python\n\nimport os\nimport sys\nimport FileHandle\n\nINPUT_FILE = \"/Users/binh/Desktop/ns3_play/data/interval-S1R1000-P2PR1000-Ue1-interval0-pacS900-pacN1000000.txt\"\nOUTPUT_FILE_PREFIX = \"/Users/binh/Desktop/ns3_play/back-up/traffic-latency-25dl\"\nOUTPUT_FILE_ALL = \"/Users/binh/Desktop/ns3_play/back-up/rate-latency-25-dl.txt\"\nINTERVAL = 1 ###1ms\n\nif __name__ == \"__main__\":\n result1 = {} ##result[traffic] = latency\n# result2 = {}\n# result3 = {}\n# result4 = {}\n# result5 = {}\n# result6 = {}\n result_list = []\n value_list = []\n values = []\n file = open (INPUT_FILE)\n line = file.readline()\n \n \n if (os.path.isfile(OUTPUT_FILE_ALL)): ##if output file not exist\n open(OUTPUT_FILE_ALL,'w').close()\n outfile_all = open (OUTPUT_FILE_ALL,'w+')\n outfile_all.write(\"#Traffic (MBps) Latency(ms) ULPDCP DLPDCP ULRLC DLRLC % xx\\n\")\n \n# ctr = 0\n while (line):\n# if (ctr == 0):\n# line = file.readline()\n# ctr += 1\n# continue\n tokens = {}\n values = {}\n tokens = line.split()\n rest = str (float (tokens[1])*1000)+\" \"+str (float (tokens[2])*1000)+\" \"+str (float (tokens[3])*1000)+\" \"+str (float (tokens[4])*1000 )\n rest += \" \"+ str (tokens[7])\n result1[ 900*8*1000000/float (tokens[0])/(1024*1024) ] = tokens[5] ##result[200B/1ms*# of UEs] = first column value of the file\n outfile_all.write(str (900*8*1000000/float (tokens[0])/(1024*1024)) +\" \"+tokens[5]+\" \"+rest+\"\\n\")\n# result2[400*1000*float (tokens[0])/1/(1024*1024)] = tokens[2]\n# result3[600*1000*float (tokens[0])/1/(1024*1024)] = tokens[3] \n# result4[600*1000*float (tokens[0])/1/(1024*1024)] = tokens[4] \n# result5[800*1000*float (tokens[0])/1/(1024*1024)] = tokens[5] \n# result6[900*1000*float (tokens[0])/1/(1024*1024)] = tokens[6] \n line = file.readline()\n value_list.append(rest)\n \n result_list.append(result1)\n# result_list.append(result2)\n# result_list.append(result3)\n# result_list.append(result4)\n# result_list.append(result5)\n# result_list.append(result6)\n \n# outfile_all = open (OUTPUT_FILE_ALL, 'w+')\n# outfile_all.write(\"#Traffic (MBps) Latency(ms)\\n\")\n \n# for r in result_list:\n## o_fn = OUTPUT_FILE_PREFIX+str (c)+\".txt\"\n# if (os.path.isfile(OUTPUT_FILE_ALL)): ##if output file not exist\n# open(OUTPUT_FILE_ALL,'w').close()\n# outfile_all = open (OUTPUT_FILE_ALL,'w+')\n# outfile_all.write(\"#Traffic (MBps) Latency(ms) ULPDCP DLPDCP ULRLC DLRLC\\n\")\n# c = 0\n# for key in r:\n# outfile_all.write(str (key)+\" \"+str (r[key]))\n# outfile_all.write(\" \"+value_list[c]+\"\\n\")\n## outfile_all.write(str (key)+\" \"+str (r[key])+\"\\n\")\n# c += 1\n print (\"Outfile = \"+OUTPUT_FILE_ALL)\n file.close()\n# outfile.close()\n outfile_all.close()\n\n\n ","repo_name":"binhqnguyen/lena-local","sub_path":"scripts/calculate.py","file_name":"calculate.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"17432314870","text":"from math import ceil\nfrom math import radians\n\nimport numpy as np\nfrom bresenham import bresenham\n\nfrom config.Config import Config\nfrom model.CTScan import CTScan\n\n\nclass SinogramConverter:\n _sinogram = None\n\n def __init__(self, sinogram, radius):\n self._step = Config.config[\"step\"]\n self._output_image = None\n self._sinogram = sinogram\n self._sinogram_matrix = np.matrix(sinogram)\n self._radius = radius\n self._center = (radius, radius)\n self._alpha = Config.config[\"initialAlpha\"]\n self._phi = radians(Config.config[\"phi\"])\n self._size = 2 * radius\n\n def convert(self):\n ct = CTScan()\n self._output_image = np.empty(shape=(self._size, self._size))\n steps_number = int(ceil(Config.config[\"totalRotation\"] / self._step)) + 1\n self._initialize_output_image()\n self._reconstruct_from_sinogram(ct, steps_number)\n return self._output_image\n\n def _reconstruct_from_sinogram(self, ct, steps_number):\n for i in range(0, steps_number - 1):\n self._alpha = i * self._step\n self._set_single_scan_coordinates(ct)\n self._reconstruct_single_sinogram_slice(ct, i)\n\n def _initialize_output_image(self):\n for i in range(0, self._output_image.shape[0] - 1):\n for j in range(0, self._output_image.shape[1] - 1):\n self._output_image[i][j] = 0\n\n def _set_single_scan_coordinates(self, ct):\n ct.emitter.calculate_position(self._radius, self._alpha, self._center)\n ct.set_detectors_coordinates_for_emitter(self._alpha, self._phi, self._radius, self._center)\n\n def _reconstruct_single_sinogram_slice(self, ct, i):\n for j in range(0, ct.number_of_detectors - 1):\n brightness = self._sinogram[i][j] / 255\n points = list(bresenham(ct.emitter.x, ct.emitter.y, ct.detectors[j].x, ct.detectors[j].y))\n for point in points:\n self._output_image[point[0] - 1][point[1] - 1] += brightness\n","repo_name":"sirKaskadir/ctscan-simulator","sub_path":"core/SinogramConverter.py","file_name":"SinogramConverter.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"18085393167","text":"import numpy as np\nfrom numpy import genfromtxt\nfrom sklearn.model_selection import train_test_split\nimport math\nimport code\n\ndef lr(X_train, y_train):\n from sklearn.linear_model import LinearRegression\n model = LinearRegression()\n model.fit(X_train, y_train)\n return model\n \n\ndef rmse(preds, labels):\n return np.sqrt(np.mean((np.array(preds) - np.array(labels)) ** 2))\n\n\ndef train(actorid_pagerank, movieid_actorids, movieid_rating):# list of list, list\n\n features = []\n ratings = []\n for movieid in range(len(movieid_rating)):\n # print('movieid: ', movieid)\n # label\n rating = movieid_rating[movieid]\n if rating == -1:\n continue\n\n # feature\n feature_num = 6\n pageranks = []\n actorids = movieid_actorids[movieid]\n for actorid in actorids:\n if actorid < len(actorid_pagerank):\n pageranks.append(actorid_pagerank[actorid])\n\n # sort pageranks\n pageranks.sort(reverse=True)\n\n feature = []\n for pagerank in pageranks[: feature_num]:\n feature.append(pagerank)\n\n if len(feature) == feature_num:\n features.append(feature)\n ratings.append(rating)\n \n # code.interact(local=locals())\n # data preparing\n X = np.array(features)\n y = ratings\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 29)\n\n model = lr(X_train, y_train)\n \n y_test_pred = model.predict(X_test)\n \n\n rmse_result = rmse(y_test_pred, y_test)\n print('rmse: ', rmse_result)\n\n code.interact(local=locals())\n\n X_target = genfromtxt('target_feature.txt', delimiter=',')\n y_pred = model.predict(X_target)\n print('y_pred: ', y_pred)\n\n\nif __name__ == '__main__':\n\n # get actor_pagerank\n actorid_pagerank = [float(line.strip()) for line in open(\"actor_pagerank.txt\", 'r')]\n movieid_actorids_tmp = genfromtxt('movie_actors.txt', delimiter=',', dtype=int)\n movieid_rating_tmp = genfromtxt('movie_ratings.txt', delimiter=',')\n\n movie_max_id = int(max(np.max(movieid_rating_tmp[:, 0]), np.max(movieid_actorids_tmp[:, 0])))\n\n movieid_rating = -1 * np.ones((movie_max_id + 1))\n\n for row in range(movieid_rating_tmp.shape[0]):\n movieid = int(movieid_rating_tmp[row, 0])\n rating = 0\n if math.isnan(movieid_rating_tmp[row, 1]):\n rating = -1\n else:\n rating = int(movieid_rating_tmp[row, 1])\n movieid_rating[movieid] = rating\n\n\n movieid_actorids = [[] for i in range(movie_max_id + 1)]\n for row in range(movieid_actorids_tmp.shape[0]):\n movieid = int(movieid_actorids_tmp[row, 0])\n actorid = int(movieid_actorids_tmp[row, 1])\n if movieid_rating[movieid] != -1:\n movieid_actorids[movieid].append(actorid)\n\n train(actorid_pagerank, movieid_actorids, movieid_rating)\n\n\n","repo_name":"WendyCui1018/Data-Mining-and-Problem-Solving-on-Large-scale-Data-Sets","sub_path":"Project4/code/part2_q12.py","file_name":"part2_q12.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6200174231","text":"from ReduceDFA import reduce_DFA\n\neps = 'e'\n\n## automata arguments\nQ = input(\"enter states: \").split()\nsymbols = input(\"enter symbols: \").split()\nq0 = input(\"enter start state: \")\nF = input(\"enter final state: \").split()\ndelta = {}\n\n## mapping\nfor Vn in Q:\n delta[Vn] = {}\n for Vt in symbols:\n print(\"δ(\", Vn, \",\", Vt, \") =\", end=\" \")\n delta[Vn][Vt] = list(sorted(input().split()))\n\n## epsilon-closure function\ndef ECLOSE(root_enfa_state: str):\n global eclose_dict\n\n if eclose_dict[root_enfa_state] != None:\n return eclose_dict[root_enfa_state]\n\n eclose = [root_enfa_state]\n\n if delta[root_enfa_state][eps] == []:\n eclose_dict[root_enfa_state] = eclose\n return eclose\n else:\n for enfa_state in delta[root_enfa_state][eps]:\n ECLOSE(enfa_state)\n eclose.extend(eclose_dict[enfa_state])\n eclose = sorted(list(set(eclose)))\n eclose_dict[root_enfa_state] = eclose\n return eclose_dict\n \n## set epsilon-closure array None first\neclose_dict = {}\nfor enfa_state in Q:\n eclose_dict[enfa_state] = None\n\n## get first state's epsilon-closure\ndfa_states = [ECLOSE(q0)]\n\ndfa_delta = []\n\nnew_dfa_states = [ECLOSE(q0)]\n\n## after epsilon-closure, remove symbol epsilon\nsymbols.remove(eps)\n\n## get DFA\nwhile len(new_dfa_states) > 0:\n current_state = new_dfa_states[0]\n new_dfa_states = new_dfa_states[1:]\n\n for symbol in symbols:\n next_states = []\n for nfa_state in current_state:\n for x in delta[nfa_state][symbol]:\n if x not in next_states:\n next_states.append(x)\n next_states = sorted(next_states)\n\n eclose_union = []\n for state in next_states:\n eclose_union.extend(ECLOSE(state))\n eclose_union = sorted(set(eclose_union))\n dfa_delta.append([current_state, symbol, eclose_union])\n\n if eclose_union not in dfa_states:\n dfa_states.append(eclose_union)\n new_dfa_states.append(eclose_union)\n\ntmp = dfa_delta\ndfa_delta = []\n\nnew_states = []\n\nfor_reduced_final = []\nfor_reduced_delta = []\n\n## set values for reducing DFA\nfor v in tmp:\n if v not in dfa_delta:\n dfa_delta.append(v)\n if v[0] not in new_states:\n new_states.append(v[0])\n\nfor i in F:\n for j in new_states:\n if i in j and j:\n for_reduced_final.append(j)\n\nfor i in dfa_delta:\n if i[2]:\n for_reduced_delta.append([[i[0], i[1]], i[2]])\n\nfor i in new_states:\n if q0 in i:\n q0 = i\n break\n\n## print\nprint(\"[ e-closure ]\")\nprint(\"[[[current state, symbol], [next state]]\")\nfor i in for_reduced_delta:\n print(i)\n\nnew_states, dfa_delta, for_reduced_final = reduce_DFA(new_states, symbols, for_reduced_delta, q0, for_reduced_final)\nprint(\"----------------------------------------\")\n\nprint(\"[ reduced DFA ]\")\nprint(\"[[[current state, symbol], [next state]]\")\nfor i in for_reduced_delta:\n print(i)","repo_name":"pgrgrgrgr/NFAtoDFA-with-epsilon","sub_path":"src/eNFAtoDFA.py","file_name":"eNFAtoDFA.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"404948341","text":"import json\nimport boto3\nimport dns.resolver\nimport gitlab\nfrom datetime import datetime\n\n# Load configuration from the EC2 Parameter Store\n# Transforms: /red-x/gitlab/token\n# Into: {'red-x': {'gitlab': {'token': 'value'}}}\ndef load_config(ssmPath):\n ssm = boto3.client('ssm')\n resp = ssm.get_parameters_by_path(\n Path = ssmPath,\n Recursive=True,\n WithDecryption=True\n )\n config = {}\n\n for param in resp['Parameters']:\n path = param['Name'].split('/')\n current_level = config\n for level in path:\n if(level == '' or level == 'red-x'):\n continue\n if(level not in current_level):\n current_level[level] = {}\n if(level == path[-1]):\n current_level[level] = param['Value']\n else:\n current_level = current_level[level]\n return config\n\n# Open or close GitLab issues based on delegation errors discovered by red-x.\n# Opens an issue in the configured project for delegation errors and closes\n# any open issues when it no longer identifies that error.\ndef notify_gitlab_issues(config, errors):\n # Load up all open issues in the configured project with label 'red-x'.\n gl = gitlab.Gitlab(config['gitlab']['endpoint'], config['gitlab']['token'], api_version=4)\n project = gl.projects.get(config['gitlab']['project'])\n issues = project.issues.list(labels=['red-x', 'record'], state='opened')\n zones_with_issues = [i.title for i in issues]\n\n for error in errors:\n # This error already has an issue\n if f\"{error} abandoned record\" in zones_with_issues:\n print(f\"ALREADY FILED! {error}! Skipping\")\n zones_with_issues.remove(f\"{error} abandoned record\")\n # This error needs a new issue created\n else:\n error_json = json.dumps(errors[error], indent=1)\n print(f\"FILING: {error}!\")\n issue = project.issues.create({'title': f\"{error} abandoned record\",\n 'description': f\"\"\"```\n{error_json}\n```\"\"\",\n 'labels': ['red-x', 'record']})\n\n # These issues no longer have a delegation error associated with them\n # and can be closed.\n for leftover in zones_with_issues:\n print(f\"CLOSING ISSUE: {leftover}\")\n issue = [x for x in issues if x.title == leftover][0]\n issue.notes.create({\"body\": \"Subsequent runs of red-x no longer see this domain as an issue. Automatically closing ticket.\"})\n issue.state_event = \"close\"\n issue.save()\n\ndef eligible_cname(record):\n if 'ResourceRecords' in record and ('elasticbeanstalk.com' in record['ResourceRecords'][0]['Value'] or 'cloudfront.net' in record['ResourceRecords'][0]['Value']):\n return True\n return False\n\ndef eligible_alias(record):\n if 'AliasTarget' in record and ('elasticbeanstalk.com' in record['AliasTarget']['DNSName'] or 'cloudfront.net' in record['AliasTarget']['DNSName']):\n return True\n return False\n\n# Send a summary of results to a configured SNS topic\ndef notify_sns_topic(config, errors):\n if len(errors) == 0:\n print(\"No record errors, not sending SNS notification...\")\n return\n\n notification_time = str(datetime.now())\n sns = boto3.client('sns')\n error_text = json.dumps(errors, indent=2)\n sns.publish(\n TargetArn=config['sns']['topic'],\n Subject=f\"Red-X Record Errors @ {notification_time}\",\n Message=json.dumps({'default': f\"\"\"\nRed-X has run and found the following DNS records pointing to inactive elasticbeanstalk or cloudfront domains. You should take action to prevent domain hijacking!\n\n\"\"\" + error_text}),\n MessageStructure='json'\n )\n\ndef handler(event, context):\n config = load_config('/red-x/')\n r53 = boto3.client('route53')\n zone_id = config['route53']['zoneId']\n\n records = []\n nextName = None\n nextType = None\n\n # Fetch all records in the requested hosted zone\n while True:\n if nextName and nextType:\n response = r53.list_resource_record_sets(\n HostedZoneId = zone_id,\n StartRecordName = nextName,\n StartRecordType = nextType\n )\n else:\n response = r53.list_resource_record_sets(\n HostedZoneId = zone_id\n )\n\n records = records + response['ResourceRecordSets']\n\n if 'NextRecordName' in response and 'NextRecordType' in response:\n nextName = response['NextRecordName']\n nextType = response['NextRecordType']\n else:\n break\n\n # Discard everything except beanstalk-related records\n eligible_cnames = [{'name': x['Name'], 'value': x['ResourceRecords'][0]['Value'], 'type': x['Type']} for x in records if eligible_cname(x)]\n eligible_aliases = [{'name': x['Name'], 'value': x['AliasTarget']['DNSName'], 'type': x['Type']} for x in records if eligible_alias(x)]\n eligible_records = eligible_cnames + eligible_aliases\n\n violating_records = {}\n\n resolver = dns.resolver.Resolver(configure=False)\n resolver.timeout = 5\n\n # For each record pointing to beanstalk\n for record in eligible_records:\n violations = []\n if record['type'] == 'CNAME':\n violations.append(f\"WARN: You should prefer A ALIAS over CNAME for {record['name']}\")\n try:\n answer = dns.resolver.query(record['value'])\n print(f\"OK: {record['name']}: {', '.join(str(x) for x in answer)}\")\n except dns.resolver.NXDOMAIN:\n violations.append(f\"CRIT: {record['name']} points to non-existent beanstalk name: {record['value']}\")\n \n if len(violations) > 0:\n violating_records[record['name']] = violations\n\n # Open or close GitLab issues for these abandoned records.\n if('gitlab' in config):\n notify_gitlab_issues(config, violating_records)\n\n # Notify an SNS topic of all abandoned records.\n if('sns' in config):\n notify_sns_topic(config, violating_records)\n\n return {\n \"message\": \"Completed checking for abandoned records.\",\n \"errors\": violating_records\n }\n","repo_name":"Cimpress-MCP/red-x","sub_path":"check_abandoned_records.py","file_name":"check_abandoned_records.py","file_ext":"py","file_size_in_byte":6167,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"70617171033","text":"with open('input2.txt', 'r') as file:\n data = file.read().split('\\n')\n \nv = 0\niv = 0\nfor line in data:\n if line.count(' ') != 2:\n continue\n r, l, p = line.split(' ')\n min_, max_ = map(int, r.split('-'))\n min_ -= 1\n max_ -= 1\n l = l.replace(':', '')\n if (len(p) > max_ and ((p[min_] == l) ^ (p[max_] == l))) or (min_ < len(p) < max_ and p[min_] == l):\n v += 1\n else:\n # print(r,l,p)\n iv += 1\n \nprint(v, iv)\n ","repo_name":"BhasherBEL/ProgrammingChallenges","sub_path":"AdventOfCode 2020/old/aoc2.py","file_name":"aoc2.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"30395061724","text":"from .app import MatchingEngineService\nfrom .ffapi import APITester\n\nfrom ffengine.simulation import TestCase\nfrom ffengine.optim.engines import OMMEngine\nfrom ffengine.data import MatchSet, Match\n\nimport tomodachi\nimport json\n\nclass TestMatchingEngineService(MatchingEngineService):\n\n @tomodachi.http(\"GET\", r\"/health\")\n async def health_check(self, request):\n return 200, json.dumps({\"status\": \"healthy - testing mode\"})\n\n \n @tomodachi.schedule(immediately=True)\n def api_test(self):\n\n self.DEBUG_MODE = True # NOTE: if this is not set, assertion endpoints will fail (currently this enables logging of matches)\n api_test_config = json.load(open(\"service/api_test_config.json\"))\n\n ## setup: create test data\n \n # define sets for test case\n I1 = list(range(5))\n J1 = list(range(5))\n K1 = list(range(3))\n\n self.MODEL_CONFIG['unit_tcost'] = 1 ## Any parameters that the model needs should be set here\n\n test_case1 = TestCase(\n size_I=len(I1), size_J=len(J1), size_K=len(K1),\n Q_K={k: 1/len(K1) for k in K1}, P_K={0: 5, 1:2, 2: 1},\n D_scap_p={0: .7, 1: .3}, D_dcap_p={0: 1},\n s_bounds=lambda c: (1,10) if c == 0 else (10, 20),\n d_bounds=lambda c: (3, 7),\n s_subsize={i: len(K1) for i in I1},\n lb_fn= lambda k, i: i - int(i > 1),\n ub_fn= lambda c, p: p + 1,\n dist_bounds= (3, 10),\n unit_tcost=self.MODEL_CONFIG['unit_tcost']\n )\n\n ## setup API for test\n print(\"SETTING UP API\")\n api = APITester(api_test_config)\n\n # setup and signin\n api.set_test_user_id(\"u_34enPEkffuV9dJdMhabaMT\")\n api.init_test_user_proxy()\n api.signin()\n\n # create orders\n api.fill_test_data(test_case1.order_set)\n\n # run matches\n stats, matches = test_case1.run(OMMEngine) # This should be done after API setup so that test case order_ids can be updated with API ids\n\n self.TRUE_MATCHES = matches\n\n\n @tomodachi.http(\"GET\", r\"/test-results-data\")\n async def test_results_data(self, request):\n '''get detailed matches for comparison if assertion fails'''\n true_matches = self.TRUE_MATCHES\n # while matching_not_done -> spin\n true_results = { match.match_id: match.to_dict() for match in true_matches.iter_matches()}\n orderset_id = list(self._matchsets.keys())[-1]\n print(orderset_id, type(orderset_id))\n # get matches from api, assume round 0 is the one we want\n results = { match.match_id: match.to_dict() for match in self._matchsets[orderset_id].iter_matches()}\n\n return 200, json.dumps({\"true\": true_results, \"api\": results})\n\n\n @tomodachi.http(\"GET\", r\"/test-result\")\n async def test_result(self, request):\n '''Assert that matches are the same before passing to API and after'''\n import pandas as pd\n true_matches = self.TRUE_MATCHES\n # while matching_not_done -> spin\n true_results = { match.match_id: match.to_dict() for match in true_matches.iter_matches()}\n orderset_id = list(self._matchsets.keys())[-1]\n\n # get matches from api, assume round 0 is the one we want\n results = { match.match_id: match.to_dict() for match in self._matchsets[orderset_id].iter_matches()}\n\n dfTrue = pd.DataFrame(true_results).T.set_index(['buyOrder', 'sellOrder']).rename(lambda colname: colname + '_true', axis=1)\n dfActual = pd.DataFrame(results).T.set_index(['buyOrder', 'sellOrder'])\n\n comparer = pd.concat([dfActual, dfTrue], axis=1).drop(['matchId'], axis=1)\n\n test_pass = (comparer['volume'] == comparer['volume_true']).all() and (comparer['priceCents'] == comparer['priceCents_true']).all()\n return 200, json.dumps({\"test-result\": str(test_pass)})\n\n","repo_name":"Field-Fresh/fieldfresh-matching-engine","sub_path":"service/app_tester.py","file_name":"app_tester.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"19604600255","text":"from __future__ import print_function\n\nimport sys\nimport string\nfrom git_requests import *\n\n# --------------- Helpers that build all of the responses ----------------------\n\ndefault_user = \"rails\"\ndefault_repo = \"rails\"\n#default_user = \"rkinney4\"\n#default_repo = \"HopHacks2016\"\ndefault_branch = \"master\"\n\n# Provided function to create speechlet responses to send to Alexa\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'card': {\n 'type': 'Simple',\n 'title': \"SessionSpeechlet - \" + title,\n 'content': \"SessionSpeechlet - \" + output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': should_end_session\n }\n \n# Function to create responses to send to Alexa\ndef build_response(session_attributes, speechlet_response):\n return {\n 'version': '1.0',\n 'sessionAttributes': session_attributes,\n 'response': speechlet_response\n }\n\n# Get the branch from the attributes, or return the default branch\ndef get_branch_from_attributes(session_attributes):\n if 'currentBranch' in session_attributes:\n return session_attributes['currentBranch']\n else:\n return default_branch\n\n# --------------- Functions that control the skill's behavior ------------------\n\n# Welcome message for the skill\ndef get_welcome_response():\n\n session_attributes = {}\n card_title = \"Welcome\"\n speech_output = \"Welcome to the Alexa git interface. \"\n\n # If the user either does not reply to the welcome message or says something\n # that is not understood, they will be prompted again with this text.\n reprompt_text = \"Please say something like : get the last commit, or \\\n list all branches.\"\n \n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n# End the session\ndef handle_session_end_request():\n card_title = \"Session Ended\"\n speech_output = \"Goodbye and remember to commit early and often\"\n # Setting this to true ends the session and exits the skill.\n should_end_session = True\n return build_response({}, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n\n# Get the last n commits and say them\ndef get_last_n_commits_from_session(intent, session):\n global default_user, default_repo\n\n session_attributes = session.get('attributes', {})\n card_title = intent['name']\n should_end_session = False\n reprompt_text = \"I didn't quite git that\"\n\n\n branch = get_branch_from_attributes(session_attributes)\n\n if 'Num' in intent['slots']:\n num = int(intent['slots']['Num']['value'])\n speech_output = last_n_commits(default_user, default_repo, branch=branch, n=num)\n else :\n speech_output = \"I'm not sure how many commits you want. \" \\\n \"Please try again.\"\n \n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n# Get the last commit and say it\ndef get_last_commit_from_session(intent, session):\n session_attributes = session.get('attributes', {})\n card_title = intent['name']\n should_end_session = False\n reprompt_text = \"I didn't quite git that\"\n\n branch = get_branch_from_attributes(session_attributes)\n\n speech_output = last_n_commits(default_user, default_repo, branch=branch,n=1)\n\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n# List all the branches of the repository\ndef get_branches_from_session(intent, session):\n session_attributes = session.get('attributes', {})\n card_title = intent['name']\n should_end_session = False\n reprompt_text = \"I didn't quite git that\"\n\n speech_output = list_branches(default_user, default_repo)\n\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n# Switch to the specified branch number\ndef switch_branches_from_session(intent, session):\n session_attributes = session.get('attributes', {})\n\n card_title = intent['name']\n should_end_session = False\n reprompt_text = \"I didn't quite git that\"\n\n if 'Num' in intent['slots']:\n num = int(intent['slots']['Num']['value'])\n (new_branch, speech_output) = switch_branch(default_user, default_repo, num)\n\n if new_branch != \"\":\n session_attributes['currentBranch'] = new_branch\n else :\n speech_output = \"I'm not sure which branch you want to switch to. \" \\\n \"Please try again.\"\n\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n# Switch to branch master\ndef switch_to_master_from_session(intent, session):\n session_attributes = session.get('attributes', {})\n\n card_title = intent['name']\n should_end_session = False\n reprompt_text = \"I didn't quite git that\"\n\n session_attributes['currentBranch'] = default_branch\n\n speech_output = \"Switched to branch master\"\n\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n \n# Say the current branch\ndef get_current_branch_from_session(intent, session):\n session_attributes = session.get('attributes', {})\n card_title = intent['name']\n should_end_session = False\n reprompt_text = \"I didn't quite git that\"\n\n\n branch = get_branch_from_attributes(session_attributes)\n\n speech_output = \"The current branch is \" + branch\n\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n# Get contributors for a repository, may say all or simply the number\n# if there are too many\ndef get_contributors_from_session(intent, session):\n session_attributes = session.get('attributes', {})\n card_title = intent['name']\n should_end_session = False\n reprompt_text = \"I didn't quite git that\"\n\n speech_output = get_contributors(default_user, default_repo)\n\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n# Get the top contributors to the repository\ndef get_top_contributors_from_session(intent, session):\n session_attributes = session.get('attributes', {})\n card_title = intent['name']\n should_end_session = False\n reprompt_text = \"I didn't quite git that\"\n\n speech_output = get_top_three_contributors(default_user, default_repo)\n\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n# --------------- Events ------------------\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they\n want\n \"\"\"\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"GetNCommitsIntent\":\n return get_last_n_commits_from_session(intent, session)\n elif intent_name == \"GetLastCommitIntent\":\n return get_last_commit_from_session(intent, session)\n elif intent_name == \"GetBranchesIntent\":\n return get_branches_from_session(intent, session)\n elif intent_name == \"SwitchBranchIntent\":\n return switch_branches_from_session(intent, session)\n elif intent_name == \"SwitchToMasterIntent\":\n return switch_to_master_from_session(intent, session)\n elif intent_name == \"GetCurrentBranchIntent\":\n return get_current_branch_from_session(intent, session)\n elif intent_name == \"GetContributorsIntent\":\n return get_contributors_from_session(intent, session)\n elif intent_name == \"GetTopContributorsIntent\":\n return get_top_contributors_from_session(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")\n\n\ndef on_session_ended(session_ended_request, session):\n \"\"\" Called when the user ends the session.\n\n Is not called when the skill returns should_end_session=true\n \"\"\"\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n\n# --------------- Main handler ------------------\n\ndef lambda_handler(event, context):\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\n etc.) The JSON body of the request is provided in the event parameter.\n \"\"\"\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])\n","repo_name":"rkinney4/HopHacks2016","sub_path":"git_alexa.py","file_name":"git_alexa.py","file_ext":"py","file_size_in_byte":10438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73511720473","text":"import unittest\n\nfrom dns import zone as dnszone\nfrom oslo_config import cfg\nfrom webtest import TestApp\n\nfrom designate.api import admin as admin_api\nfrom designate.api import middleware\nfrom designate.tests.test_api.test_v2 import ApiV2TestCase\n\n\ncfg.CONF.import_opt('enabled_extensions_admin', 'designate.api.admin',\n group='service:api')\n\n\nclass APIV2ZoneImportExportTest(ApiV2TestCase):\n def setUp(self):\n super(APIV2ZoneImportExportTest, self).setUp()\n\n self.config(enable_api_admin=True, group='service:api')\n self.config(enabled_extensions_admin=['zones'], group='service:api')\n # Create the application\n adminapp = admin_api.factory({})\n # Inject the NormalizeURIMiddleware middleware\n adminapp = middleware.NormalizeURIMiddleware(adminapp)\n # Inject the FaultWrapper middleware\n adminapp = middleware.FaultWrapperMiddleware(adminapp)\n # Inject the TestContext middleware\n adminapp = middleware.TestContextMiddleware(\n adminapp, self.admin_context.project_id,\n self.admin_context.project_id)\n # Obtain a test client\n self.adminclient = TestApp(adminapp)\n\n # # Zone import/export\n def test_missing_origin(self):\n fixture = self.get_zonefile_fixture(variant='noorigin')\n\n response = self.client.post_json('/zones/tasks/imports', fixture,\n headers={'Content-type': 'text/dns',\n 'X-Test-Role': 'member'})\n\n import_id = response.json_body['id']\n self.wait_for_import(import_id, error_is_ok=True)\n\n url = '/zones/tasks/imports/%s' % import_id\n\n response = self.client.get(url, headers={'X-Test-Role': 'member'})\n self.assertEqual('ERROR', response.json['status'])\n origin_msg = (\"The $ORIGIN statement is required and must be the\"\n \" first statement in the zonefile.\")\n self.assertEqual(origin_msg, response.json['message'])\n\n def test_missing_soa(self):\n fixture = self.get_zonefile_fixture(variant='nosoa')\n\n response = self.client.post_json('/zones/tasks/imports', fixture,\n headers={'Content-type': 'text/dns',\n 'X-Test-Role': 'member'})\n\n import_id = response.json_body['id']\n self.wait_for_import(import_id, error_is_ok=True)\n\n url = '/zones/tasks/imports/%s' % import_id\n\n response = self.client.get(url, headers={'X-Test-Role': 'member'})\n self.assertEqual('ERROR', response.json['status'])\n origin_msg = (\"Malformed zonefile.\")\n self.assertEqual(origin_msg, response.json['message'])\n\n def test_malformed_zonefile(self):\n fixture = self.get_zonefile_fixture(variant='malformed')\n\n response = self.client.post_json('/zones/tasks/imports', fixture,\n headers={'Content-type': 'text/dns',\n 'X-Test-Role': 'member'})\n\n import_id = response.json_body['id']\n self.wait_for_import(import_id, error_is_ok=True)\n\n url = '/zones/tasks/imports/%s' % import_id\n\n response = self.client.get(url, headers={'X-Test-Role': 'member'})\n self.assertEqual('ERROR', response.json['status'])\n origin_msg = (\"Malformed zonefile.\")\n self.assertEqual(origin_msg, response.json['message'])\n\n def test_import_export(self):\n # Since v2 doesn't support getting records, import and export the\n # fixture, making sure they're the same according to dnspython\n post_response = self.client.post('/zones/tasks/imports',\n self.get_zonefile_fixture(),\n headers={'Content-type': 'text/dns',\n 'X-Test-Role': 'member'})\n\n import_id = post_response.json_body['id']\n self.wait_for_import(import_id)\n\n url = '/zones/tasks/imports/%s' % import_id\n response = self.client.get(url, headers={'X-Test-Role': 'member'})\n\n self.policy({'zone_export': '@'})\n get_response = self.adminclient.get('/zones/export/%s' %\n response.json['zone_id'],\n headers={'Accept': 'text/dns'})\n exported_zonefile = get_response.body.decode('utf-8')\n\n imported = dnszone.from_text(self.get_zonefile_fixture())\n exported = dnszone.from_text(exported_zonefile)\n # Compare SOA emails, since zone comparison takes care of origin\n imported_soa = imported.get_rdataset(imported.origin, 'SOA')\n imported_email = imported_soa[0].rname.to_text()\n exported_soa = exported.get_rdataset(exported.origin, 'SOA')\n exported_email = exported_soa[0].rname.to_text()\n self.assertEqual(imported_email, exported_email)\n # Delete SOAs since they have, at the very least, different serials,\n # and dnspython considers that to be not equal.\n imported.delete_rdataset(imported.origin, 'SOA')\n exported.delete_rdataset(exported.origin, 'SOA')\n # Delete NS records, except for delegated subdomains\n imported.delete_rdataset(imported.origin, 'NS')\n exported.delete_rdataset(exported.origin, 'NS')\n self.assertEqual(imported, exported)\n\n def test_delete_import(self):\n post_response = self.client.post('/zones/tasks/imports',\n self.get_zonefile_fixture(),\n headers={'Content-type': 'text/dns',\n 'X-Test-Role': 'member'})\n\n import_id = post_response.json_body['id']\n\n self.wait_for_import(import_id)\n\n delete_response = self.client.delete(\n '/zones/tasks/imports/%s' % import_id,\n headers={'X-Test-Role': 'member'}\n )\n\n self.assertEqual('', delete_response.text)\n self._assert_exception(\n 'not_found', 404, self.client.get, '/zones/imports/%s' % import_id\n )\n\n # Metadata tests\n def test_metadata_exists_imports(self):\n response = self.client.get('/zones/tasks/imports',\n headers={'X-Test-Role': 'member'})\n\n # Make sure the fields exist\n self.assertIn('metadata', response.json)\n self.assertIn('total_count', response.json['metadata'])\n\n def test_metadata_exists_exports(self):\n response = self.client.get('/zones/tasks/imports',\n headers={'X-Test-Role': 'member'})\n\n # Make sure the fields exist\n self.assertIn('metadata', response.json)\n self.assertIn('total_count', response.json['metadata'])\n\n @unittest.skip(\"See bug 1582241 and 1570859\")\n def test_total_count_imports(self):\n response = self.client.get('/zones/tasks/imports',\n headers={'X-Test-Role': 'member'})\n\n # There are no imported zones by default\n self.assertEqual(0, response.json['metadata']['total_count'])\n\n # Create a zone import\n self.client.post('/zones/tasks/imports',\n self.get_zonefile_fixture(),\n headers={'Content-type': 'text/dns',\n 'X-Test-Role': 'member'})\n\n response = self.client.get('/zones/tasks/imports',\n headers={'X-Test-Role': 'member'})\n\n # Make sure total_count picked it up\n self.assertEqual(1, response.json['metadata']['total_count'])\n\n def test_total_count_exports(self):\n response = self.client.get('/zones/tasks/exports',\n headers={'X-Test-Role': 'member'})\n\n # There are no exported zones by default\n self.assertEqual(0, response.json['metadata']['total_count'])\n\n def test_create_export(self):\n zone = self.create_zone()\n create_response = self.client.post(\n '/zones/%s/tasks/export' % zone['id'],\n headers={'X-Test-Role': 'member'}\n )\n\n self.assertEqual('PENDING', create_response.json_body['status'])\n self.assertEqual(zone['id'], create_response.json_body['zone_id'])\n\n get_response = self.client.get(\n '/zones/tasks/exports/%s' % create_response.json_body['id'],\n headers={'X-Test-Role': 'member'}\n )\n\n self.assertEqual('PENDING', get_response.json_body['status'])\n self.assertEqual(zone['id'], get_response.json_body['zone_id'])\n\n def test_update_export(self):\n zone = self.create_zone()\n create_response = self.client.post(\n '/zones/%s/tasks/export' % zone['id'],\n headers={'X-Test-Role': 'member'}\n )\n\n self.assertEqual('PENDING', create_response.json_body['status'])\n self.assertEqual(zone['id'], create_response.json_body['zone_id'])\n\n delete_response = self.client.delete(\n '/zones/tasks/exports/%s' % create_response.json_body['id'],\n headers={'X-Test-Role': 'member'}\n )\n\n self.assertEqual('', delete_response.text)\n\n self._assert_exception(\n 'zone_export_not_found', 404, self.client.get,\n '/zones/tasks/exports/%s' % create_response.json_body['id'],\n headers={'X-Test-Role': 'member'}\n )\n\n def test_delete_export(self):\n zone = self.create_zone()\n create_response = self.client.post(\n '/zones/%s/tasks/export' % zone['id'],\n headers={'X-Test-Role': 'member'}\n )\n\n self.assertEqual('PENDING', create_response.json_body['status'])\n self.assertEqual(zone['id'], create_response.json_body['zone_id'])\n\n delete_response = self.client.delete(\n '/zones/tasks/exports/%s' % create_response.json_body['id'],\n headers={'X-Test-Role': 'member'}\n )\n\n self.assertEqual('', delete_response.text)\n\n self._assert_exception(\n 'zone_export_not_found', 404, self.client.get,\n '/zones/tasks/exports/%s' % create_response.json_body['id'],\n headers={'X-Test-Role': 'member'}\n )\n","repo_name":"openstack/designate","sub_path":"designate/tests/test_api/test_v2/test_import_export.py","file_name":"test_import_export.py","file_ext":"py","file_size_in_byte":10235,"program_lang":"python","lang":"en","doc_type":"code","stars":156,"dataset":"github-code","pt":"5"} +{"seq_id":"34353393937","text":"class Solution:\n def furthestBuilding(self, heights: List[int], bricks: int, ladders: int) -> int:\n \"\"\"\n [4,2,7,6,9,14,12]\n i\n j = - 5 - 3 5 -\n 3\n [5] \n \"\"\"\n hl = [] #min heap\n ans = 0\n for i in range(1,len(heights)):\n ans = i\n if heights[i] <= heights[i-1]:\n continue\n diff = heights[i]-heights[i-1]\n heapq.heappush(hl,diff)\n #length of current diffs > available ladders\n while hl and len(hl) > ladders:\n x = heapq.heappop(hl)\n if x > bricks:\n return i -1\n bricks-=x\n # print(ans,hl)\n return ans\n ","repo_name":"MicheleTsigab/Competitive-Programming","sub_path":"1642-furthest-building-you-can-reach/1642-furthest-building-you-can-reach.py","file_name":"1642-furthest-building-you-can-reach.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6776772856","text":"from aiogram import types, Dispatcher\n\nfrom app import BotCommand\nfrom app.database import Database\nfrom app.keyboards import main_markup, main_markup_admin\nfrom config import ADMIN_ID\n\n\nclass BaseCommand(BotCommand):\n def __init__(self, dp: Dispatcher, db: Database):\n super().__init__(dp)\n self.db = db\n\n def register_command(self):\n @self.dp.message_handler(commands=['start'])\n async def start(message: types.Message):\n await self.db.add_user(message.from_user.id)\n await message.answer_sticker('CAACAgIAAxkBAANDZIrYrc2sGVZhGzkkxU8WWI3dz4MAAnUZAAKOqpFIZWbadXVVKLYvBA')\n await message.answer('hello',\n reply_markup=main_markup if message.from_user.id != int(\n ADMIN_ID) else main_markup_admin)\n\n @self.dp.message_handler(text='Корзина')\n async def give_trash(message: types.Message):\n await message.answer('Корзина пуста')\n\n @self.dp.message_handler(text='Контакты')\n async def give_contacts(message: types.Message):\n await message.answer('Обращайтесь сюда')\n\n @self.dp.message_handler()\n async def other(message: types.Message):\n await message.answer('Я тебя не понимаю')\n\n\n\n\n","repo_name":"Potisin/aiogram_shop_bot","sub_path":"app/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"22430649597","text":"from django import forms\nfrom django.forms import fields\nfrom .models import Resume\n\n\nGENDER_CHOICES = [\n ('Male', 'Male'),\n ('Female', 'Female')\n]\n\nJOB_CITY_CHOICE = [\n ('Dhaka', 'Dhaka'),\n ('Comilla', 'Comilla'),\n ('Sylhet', 'Sylhet'),\n ('Chittagong', 'Chittagong'),\n ('Gazipur', 'Gazipur'),\n ('Chadpur', 'Chadpur')\n]\n\n\nclass ResumeForm(forms.ModelForm):\n gender = forms.ChoiceField(choices=GENDER_CHOICES, widget=forms.RadioSelect)\n job_city = forms.MultipleChoiceField(choices=JOB_CITY_CHOICE, widget=forms.CheckboxSelectMultiple, label=\"Preferred Job Locations\")\n class Meta:\n model = Resume\n fields = ['name', 'dob', 'gender', 'locality', 'city', 'pin', 'state', 'mobile', 'email', 'job_city', 'profile_image', 'my_files']\n\n labels = {'name':'Full Name', 'dob': 'Date of Birth', 'pin':'Pin Code', 'mobile':'Mobile No.', 'email':'Email ID','profile_image':'Profile Image', 'my_files':'Document'}\n\n widgets = {\n 'name' :forms.TextInput(attrs={'class':'form-control'}),\n 'dob':forms.DateInput(attrs={'class':'form-control', 'id':'datepicker'}),\n 'locality':forms.TextInput(attrs={'class':'form-control'}),\n 'city':forms.TextInput(attrs={'class':'form-control'}),\n 'pin':forms.NumberInput(attrs={'class':'form-control'}),\n 'state':forms.Select(attrs={'class':'form-select'}),\n 'mobile':forms.NumberInput(attrs={'class':'form-control'}),\n 'email':forms.EmailInput(attrs={'class':'form-control'}),\n }","repo_name":"alaminbhuyan/DJANGO-2021","sub_path":"ALL Project/Project with env/resumeUploder/myapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6945640391","text":"from __future__ import annotations\nfrom .....util import Object, functions\nfrom .....core.asynchronous import Promise\nfrom typing import TYPE_CHECKING, Optional\nif TYPE_CHECKING:\n from ...render_tree import RenderNode\n from ....py_component import PyComponent\nfrom .style_container import Styles, css\nfrom .state_maintainers import ClassList, StateContainer\nfrom .....core.services.events import EventEmitter, Event\nfrom ....page_loader_helper import link_dom\n\n\nclass DOMNode(Object):\n '''\n The building block of the dom\n NOTE: Remember that the renderNode is created in the layouting step and so any element created after that must have its render node added\n NOTE: Remember that styles are computed and set after creation of the cssom, so any element created after this must have its styles calculated\n '''\n __ignore__ = {'parent'}\n # __slots__ = ( # Does this really help?\n # 'parent',\n # 'children',\n # 'tag',\n # 'id',\n # 'class_list',\n # 'state',\n # 'styles',\n # 'render_node',\n # '_visible',\n # 'attrs',\n # 'content',\n # 'image',\n # 'event_emitter',\n # )\n\n def __init__(\n self,\n parent : DOMNode = None,\n children : list[DOMNode] = None,\n tag : str = None,\n class_list : ClassList = None,\n id : Optional[str] = None,\n attrs : Object = None,\n styles : StyleContainer = None,\n render_node : RenderNode = None,\n content : str = None,\n image : Promise = None,\n component : PyComponent = None\n ):\n super().__init__()\n # Heirarchy\n self.parent = parent\n '''The heirarchichal parent of this element'''\n self.children = children if children is not None else []\n '''List of elements which are children of me'''\n \n # Descriptors\n self.tag = tag if tag is not None else 'div'\n '''The tag name of the element'''\n self.id = id\n '''The id of this element (as specified in the html)'''\n self.class_list = class_list if class_list is not None else ClassList()\n '''A collection of classes associated with the element'''\n self.class_list.element = self\n\n self.state = StateContainer('') # `''` implies default state\n '''The current state of the element'''\n self.state.element = self\n\n # View\n self.styles = styles if styles is not None else Styles()\n '''The styles associated with the element'''\n self.render_node : RenderNode = render_node\n '''The object tasked with rendering me on the screen'''\n self._visible = False\n '''A property that is to be used in the render tree'''\n\n # Misc\n self.attrs = attrs if attrs is not None else Object()\n '''An object that contains all properties defined in the html'''\n\n self.content = content if content is not None else ''\n '''\n The textual content in the element\n NOTE: Only the content of span elements is shown\n '''\n\n self.image = image\n '''\n The image content in the element\n NOTE: Only the image of an image element is shown\n '''\n\n self.component = component\n '''\n The component object\n NOTE: Only components have this property set\n '''\n\n self.event_emitter = EventEmitter(self)\n '''The entity tasked with emitting dom events'''\n\n self.event_emitter.subscribe(Event.Types.enter, lambda e: self.event_emitter.dispatch(Event(Event.Types.hover_start, self, True)) if e.target == self else None)\n self.event_emitter.subscribe(Event.Types.leave, lambda e: self.event_emitter.dispatch(Event(Event.Types.hover_end, self, True)) if e.target == self else None)\n\n self.event_emitter.subscribe(Event.Types.hover_start, lambda e: self.state.add('hover-top') if e.target == self else None)\n self.event_emitter.subscribe(Event.Types.hover_end, lambda e: self.state.remove('hover-top') if e.target == self else None)\n\n self.event_emitter.subscribe(Event.Types.hover_start, lambda e: self.state.add('hover'))\n self.event_emitter.subscribe(Event.Types.hover_end, lambda e: self.state.remove('hover'))\n\n def get_element_by_id(self, id) -> DOMNode | None:\n '''Gets an element given an id'''\n if self.id == id:return self\n for child in self.children:\n xs = child.get_element_by_id(id)\n if xs is not None:\n return xs\n return None\n\n def get_elements_by_class_name(self, class_name) -> list[DOMNode]:\n '''Get elements which have a given class name'''\n result = []\n if self.class_list.contains(class_name):result.append(self)\n for child in self.children:\n result.extend(child.get_elements_by_class_name(class_name))\n return result\n\n def get_elements_by_tag_name(self, tag_name) -> list[DOMNode]:\n '''Get elements which have a given tag name'''\n result = []\n if self.tag == tag_name:result.append(self)\n for child in self.children:\n result.extend(child.get_elements_by_tag_name(tag_name))\n return result\n\n def append_child(self, node : DOMNode):\n '''\n Adds an element to the dom \n Sets its render nodes, calculates heirarchy and triggers a relayout\n '''\n \n # Ok So the render tree has to be like created before the subtree is linked\n # Create render nodes for subtree\n node.parent = self\n link_dom(node, self.styles._cssom, self.render_node._window_provider, event_handler=self.event_emitter.event_handler, set_parent = True)\n\n # Calculate heirarchy\n def get_root(dom_node : DOMNode):\n if dom_node.parent is None:return dom_node\n return get_root(dom_node.parent)\n \n closest_relative = self.render_node.closest_relative\n root = get_root(self)\n\n self.children.append(node)\n node.render_node.set_heirarchy(closest_relative, root.render_node)\n \n # Trigger layout\n root.render_node.request_reflow() \n\n def __eq__(self, other):\n return other is self \n\n def remove(self, _reflow=True, _remove_from_parent=True):\n '''\n Removes an element from the dom tree\n Triggers a layout recalculation\n '''\n # Remove from dom (from parents children and set parent to None also set visible to False)\n self._visible = False\n if _remove_from_parent:self.parent.children.remove(self)\n self.parent = None\n \n children = self.children.copy()\n self.children.clear()\n # Remove children\n for child in children:\n child.remove(_reflow=False, _remove_from_parent=False)\n\n # Remove from render tree\n if self.render_node is not None:\n master = self.render_node.master\n self.render_node.remove()\n if self.component is not None:\n self.component.on_remove()\n self.component = None\n self.image = None\n # Also remove references from style_container and disable event_emitter\n self.event_emitter.disabled = True\n self.event_emitter.subscribers.clear()\n self.event_emitter.target = None\n self.styles._cssom = None\n self.styles._element = None\n self.styles._clear()\n\n # Delete render node\n self.render_node = None\n \n # Trigger layout\n if _reflow:master.request_reflow()\n\n\n def __repr__(self):\n class_list = f' class=\"{\" \".join(self.class_list)}\"' if self.class_list else ''\n attrs = ' '.join(f'{key}=\"{value}\"' for key, value in self.attrs.__dict__.items() if key not in ('class'))\n attrs = (' ' + attrs) if attrs else ''\n return f'<{self.tag}{class_list}{attrs}>{f\"...({len(self.children)})...\" if self.children else \"\"}'\n\n def _on_class_change(self, added = None, removed = None, update_styles = True):\n '''\n Callback called when the class_list of an element has changed\n NOTE: The element can request for a style change if asked for\n '''\n if update_styles:\n self.styles.compute_true_styles()\n\n def _on_state_change(self, added = None, removed = None, update_styles = True):\n '''\n Callback called when the state of an element has changed\n NOTE: The element can request for a style change if asked for\n '''\n if update_styles:\n self.styles.compute_true_styles()\n def _on_true_style_change(self):\n '''\n Callback called when the true styles of an element have changed\n NOTE: The element will request for a reflow over here\n '''\n \n # TODO: Detect any changes in style that can cause a reheirarchy calculatoin or repaint to be needed\n # eg: change in closest relative element or change in composite needingness\n self.render_node.request_reflow()\n \n\nDOMNode.__register_serializer__()\nDOMNode.__register_deserializer__()\nDOMNode.__register_iterable__('ClassList')\nDOMNode.__register_serializer__(custom=ClassList, serializer=lambda v:{'__objecttype__' : 'ClassList', 'ClassList' : list(v.keys())})\nDOMNode.__register_deserializer__(custom=ClassList, deserializer=lambda v:ClassList(*v))\nDOMNode.__register_iterable__('StateContainer')\nDOMNode.__register_serializer__(custom=StateContainer, serializer=lambda v:{'__objecttype__' : 'StateContainer', 'StateContainer' : list(v.keys())})\nDOMNode.__register_deserializer__(custom=StateContainer, deserializer=lambda v:StateContainer(*v))\n\n# def on_true_style_change(self):\n# '''\n# Callback called when the true styles of an element have changed\n# NOTE: The element will request for a reflow over here\n# '''\n# self.render_node.request_reflow()\n\n# def on_state_change(self, added = None, removed = None, update_styles = True):\n# '''\n# Callback called when the state of an element has changed\n# NOTE: The element can request for a style change if asked for\n# '''\n# if update_styles:self.styles.compute_true_styles()\n\n# def on_class_change(self, added = None, removed = None, update_styles = True):\n# '''\n# Callback called when the class_list of an element has changed\n# NOTE: The element can request for a style change if asked for\n# '''\n# if update_styles:self.styles.compute_true_styles()\n ","repo_name":"Dragon-KK/mypygui","sub_path":"src/mypygui/page/objects/dom/dom_node/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":10666,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"17148471546","text":"import pika\nimport threading\nimport logging\n\nfrom .consumer import Consumer\nfrom .subscriber import Subscriber\n\nlog = logging.getLogger(__name__)\n\nclass AMQPConsumerManager(object):\n\tdef __init__(self, amqp_url):\n\t\tself.amqp_url = amqp_url\n\t\tself.consumers = []\n\t\tself.subscribers = []\n\n\tdef add_consumer(self, queue, callback, durable=False):\n\t\tlog.info('Adding %s queue with callback %s', queue, callback)\n\t\tself.consumers.append(Consumer(self.amqp_url, queue, callback, durable))\n\n\tdef subscribe(self, exchange, callback):\n\t\tlog.info('Adding to exchange %s with callback %s', exchange, callback)\n\t\tself.subscribers.append(Subscriber(self.amqp_url, exchange, callback))\n\n\tdef start(self):\n\t\tfor consumer in self.consumers:\n\t\t\tt = threading.Thread(target=consumer.run)\n\t\t\tt.daemon = True\n\t\t\tt.start()\n\t\tfor subscriber in self.subscribers:\n\t\t\tt = threading.Thread(target=subscriber.run)\n\t\t\tt.daemon = True\n\t\t\tt.start()\n\t\t\n\n\n\tdef start_consumer(self, amqp_url, queue, callback, no_ack):\n\t\tconnection = pika.BlockingConnection(pika.URLParameters(amqp_url))\n\t\tchannel = connection.channel()\n\t\tchannel.queue_declare(queue=queue, durable=True)\n\t\tchannel.basic_consume(callback,\n queue=queue,\n no_ack=no_ack)\n\t\tchannel.start_consuming()\n\n\tdef start_subscriber(self, amqp_url, exchange, callback, no_ack):\n\t\tconnection = pika.BlockingConnection(pika.URLParameters(amqp_url))\n\t\tchannel = self.connection.channel()\n\t\tchannel.exchange_declare(exchange=exchange,\n exchange_type='fanout')\n\t\tresult = channel.queue_declare(exclusive=True)\n\t\tchannel.queue_bind(exchange=exchange,\n queue=result.method.queue)\n\t\tchannel.basic_consume(callback,\n queue=result.method.queue,\n no_ack=no_ack)\n\t\tchannel.start_consuming()\n\n\n\n\n","repo_name":"victorgsosa/heatstore","sub_path":"image_utils/services/amqp/amqp_consumer_manager.py","file_name":"amqp_consumer_manager.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"13899763925","text":"# -*- coding:utf-8 -*-\nimport sqlite3\n\n\nclass SQLiteInterface(object):\n def __init__(self, db_path):\n self._conn = sqlite3.connect(db_path)\n self._cursor = self._conn.cursor()\n \n def table_field(self, table_name):\n self._cursor.execute(r'select * from %s'%(table_name))\n return list(map(lambda x: x[0], self._cursor.description))\n\n def select(self, table_name, column=None, where=None, other_str=\"\", output_dict=True):\n column_str, where_str= \"*\",\"\"\n if column:\n column_str = \",\".join(column)\n field_name = column\n else:\n field_name = self.table_field(table_name)\n if where:\n where_str = \" where %s\"%(where)\n select_str = \"select {column} from {table} {where} {other}\" \\\n .format(table = table_name\n ,column = column_str\n ,where = where_str\n ,other = other_str)\n #print select_str\n self._cursor.execute(select_str) \n raw_result = self._cursor.fetchall()\n\n if output_dict:\n return map(lambda x : dict(zip(field_name,x)),raw_result) \n else:\n return raw_result \n","repo_name":"ckmarkoh/semantics_research","sub_path":"src/sqliteInterface.py","file_name":"sqliteInterface.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"74087261592","text":"def solution(n):\n answer = []\n if n==1:\n answer.append(1)\n\n for i in range(2,n+1):\n if n%i==0:\n count = 0\n for j in range(1,i+1):\n if i%j==0:\n count+=1\n if count==2:\n answer.append(i)\n n=n//i\n return answer\n\nprint(solution(100))\n","repo_name":"maatanyy/codingtest_study","sub_path":"프로그래머스/코딩테스트입문/소인수분해.py","file_name":"소인수분해.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"22460842161","text":"class Solution:\n def restoreIpAddresses(self, s: str) -> List[str]:\n res = []\n\n def helper(s, remain, curr):\n if not remain:\n if not s:\n res.append(\".\".join(curr))\n return\n\n for i in range(1, min(len(s) + 1, 4)):\n if i > 1 and s[0] == \"0\":\n return\n\n if int(s[:i]) < 256:\n helper(s[i:], remain-1, curr + [s[:i]])\n\n helper(s, 4, [])\n return res\n","repo_name":"hsuanhauliu/leetcode-solutions","sub_path":"medium/restore-ip-addresses/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"9139141804","text":"n,m=map(int,input().split())\nedge=[[] for _ in range(n+1)]\nfor _ in range(m):\n a,b=map(int,input().split())\n edge[a].append(b)\n edge[b].append(a)\n\nclass UnionFind:\n def __init__(self,n) -> None:\n self.par=[-1]*(n + 1)\n self.size=[1]*(n + 1)\n\n def root(self,x):\n if self.par[x]==-1:\n return x\n else: \n self.par[x]=self.root(self.par[x])\n return self.par[x]\n\n def issame(self,x,y):\n return self.root(x)==self.root(y)\n \n def unite(self,x,y):\n x = self.root(x)\n y = self.root(y)\n\n # 既に同じグループなら何もしない\n if x==y:\n return False\n \n # unionbysize\n if self.size[x]= 0:\n neg_count += negcount - poscount\n pos_count = 0\n elif negcount < 0 and poscount >= 0:\n pos_count = poscount - negcount\n neg_count = 0\n elif poscount < 0 and negcount < 0:\n neg_count = -poscount\n pos_count = -negcount\n else:\n pos_count = poscount\n neg_count = negcount\n return [pos_count, neg_count]\n\n\n #为长句打分,每次有新评论时,通过修改sql语句中的reviewID范围控制选取的评论(核心)\n @property\n def sentence_sentiment_score(self):\n try:\n conn = pymssql.connect(host=self.host, user=self.user, password=self.password, database=self.database, charset='UTF-8')\n cur = conn.cursor()\n cuted_review = []\n review_id = []\n # 选取数据,sql查询语句需要经常修改,通过reviewID范围控制选取的评论(已完成reviewid 1~659967)\n sql_1 = u\"SELECT review,reviewID from Product_productReview WHERE reviewEmotionTag is NULL and %s <= reviewID and reviewID <= %s\"%(self.review_id_beg,self.review_id_end)\n cur.execute(sql_1)\n rows = cur.fetchall()\n conn.commit()\n for i in rows:\n # print(self.tp.cut_sentence_2(i[0]))\n cuted_review.append(self.tp.cut_sentence_2(i[0]))\n review_id.append(i[1])\n\n print('取出数据成功!!')\n except:\n print(u'取出数据失败!!')\n print(sys.exc_info()[1])\n\n # 关闭连接\n finally:\n conn.close()\n\n single_review_count = []\n all_review_count = []\n for review in cuted_review:\n for sent in review:\n seg_sent = self.tp.segmentation(sent,'list')\n i = 0 # word position counter\n a = 0 # sentiment word position\n poscount = 0 # count a pos word\n negcount = 0\n for word in seg_sent:\n if word in self.postdict:\n score = 1\n # poscount += 1\n for w in seg_sent[a:i]:\n # poscount += self.match(w, 1)\n score *=self.match(w,1)\n poscount +=score\n a = i + 1\n\n elif word in self.negdict:\n score = 1\n # negcount += 1\n for w in seg_sent[a:i]:\n # negcount += self.match(w, 1)\n score *=self.match(w,1)\n negcount +=score\n a = i + 1\n\n elif word == '!' or word == '!':\n for w2 in seg_sent[::-1]:\n if w2 in self.postdict:\n poscount += 2\n break\n elif w2 in self.negdict:\n negcount += 2\n break\n i += 1\n\n single_review_count.append(\n self.transform_to_positive_num(poscount, negcount)) # [[s1_score], [s2_score], ...]\n # print(single_review_count)\n all_review_count.append(\n single_review_count) # [[[s11_score], [s12_score], ...], [[s21_score], [s22_score], ...], ...]\n single_review_count = []\n\n return all_review_count ,review_id\n\n\n\n #为单个分句打分\n def seg_sentence_sentiment_score(self, source):\n\n output = {}\n for keyword in source:\n if keyword not in output:\n output[keyword]= []\n for review in source[keyword]:\n seg_sent = self.tp.segmentation(review,'list')\n i = 0 # word position counter\n a = 0 # sentiment word position\n poscount = 0 # count a pos word\n negcount = 0\n for word in seg_sent:\n if word in self.postdict:\n score = 1\n # poscount += 1\n for w in seg_sent[a:i]:\n # poscount += self.match(w, 1)\n score *=self.match(w,1)\n poscount +=score\n a = i + 1\n\n elif word in self.negdict:\n score = 1\n # negcount += 1\n for w in seg_sent[a:i]:\n # negcount += self.match(w, 1)\n score *=self.match(w,1)\n negcount +=score\n a = i + 1\n\n elif word == '!' or word == '!':\n for w2 in seg_sent[::-1]:\n if w2 in self.postdict:\n poscount += 2\n break\n elif w2 in self.negdict:\n negcount += 2\n break\n i += 1\n\n # print(review,self.transform_to_positive_num(poscount, negcount))\n output[keyword].append(self.transform_to_positive_num(poscount, negcount)) # {concernword1:[s11_score,s12_score..], concernword2:[s21_score,s22_score..], ...}\n\n # print(all_review_count)\n return output\n\n\n #得到每条评论综合得分\n def all_review_sentiment_score(self,senti_score_list):\n score = []\n for review in senti_score_list:\n if len(review)==0:\n review = [[0.,0.]]\n score_array = np.array(review)\n #print(score_array)\n Pos = np.sum(score_array[:, 0])\n Neg = np.sum(score_array[:, 1])\n #下面部分暂时不需要\n AvgPos = np.mean(score_array[:, 0])\n AvgNeg = np.mean(score_array[:, 1])\n StdPos = np.std(score_array[:, 0])\n StdNeg = np.std(score_array[:, 1])\n #汇总\n score.append([Pos, Neg, AvgPos, AvgNeg, StdPos, StdNeg])\n return score\n\n #得到选取评论的情感,存储得分并输出为CSV文件(用不到)\n def store_sentiment_dictionary_score(self,score_file):\n senti_score_list,review_id =self.sentence_sentiment_score\n sentiment_score = self.all_review_sentiment_score(senti_score_list)\n\n f = csv.writer(open(score_file, 'w'), lineterminator='\\n')\n for i in sentiment_score:\n f.writerow((str(i[0]), str(i[1]), str(i[2]), str(i[3]), str(i[4]), str(i[5])))\n\n #得到选取评论的情感,储存数据库(核心)\n def store_sentiment_score_database(self):\n senti_score_list, review_id = self.sentence_sentiment_score\n sentiment_score = self.all_review_sentiment_score(senti_score_list)\n print('分数汇总完成')\n # print(review_id)\n try:\n conn = pymssql.connect(host=self.host, user=self.user, password=self.password, database=self.database, charset='UTF-8')\n cur = conn.cursor()\n result=[]\n for i in range(len(sentiment_score)):\n # print(review_id[i])\n if sentiment_score [i][0] > sentiment_score [i][1]:\n result.append((1,review_id[i]))\n else:\n result.append((0,review_id[i]))\n # print(result)\n # 更新情感极性标签到数据库\n sql_1 = u\"update Product_productReview set reviewEmotionTag = '%s' where reviewID = '%s'\"\n cur.executemany(sql_1, result)\n conn.commit()\n print('更新情感极性标签成功!!')\n except:\n print(u'更新情感极性标签失败!!')\n print(sys.exc_info()[1])\n\n # 关闭连接\n finally:\n conn.close()\n\n\n #得到每条评论分词结果并存储(用不到)\n def get_seg_output(self,rawdata_file,seg_file):\n reader = csv.reader(open(rawdata_file, 'r'))\n cuted_review = []\n for cell in reader:\n if len(cell[1]) != 0:\n cuted_review.append(self.tp.cut_sentence_2(cell[1]))\n\n writer =csv.writer(open(seg_file,'w'),lineterminator='\\n')\n for review in cuted_review:\n temp = []\n for sent in review:\n seg_sent = self.tp.segmentation(sent, 'list')\n temp.extend([word for word in seg_sent if word not in self.sentiment_stopwords and word != ' '])\n #print(temp)\n writer.writerow((temp,))\n\n\n\n\nif __name__=='__main__':\n test = Text_Score()\n test.store_sentiment_score_database()\n\n\n\n","repo_name":"mathlf2015/Text-Mining","sub_path":"Text_Score_Class.py","file_name":"Text_Score_Class.py","file_ext":"py","file_size_in_byte":12628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"23990830771","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nplt.rc('xtick', labelsize='xx-large')\nplt.rc('ytick', labelsize='xx-large')\nplt.rc('axes', labelsize='xx-large', titlesize='xx-large')\nplt.rcParams['legend.title_fontsize'] = 'xx-large'\nplt.rcParams['legend.fontsize'] = 'medium'\n\ndata_cora = [\n]\n\n# create a figure with a 2x2 grid of subplots\nfig, axs = plt.subplots(nrows=1, ncols=2, figsize=(9, 4))\n\n# plot the data on the subplots and add grid lines\naxs[0].plot(data_cora[0][0], data_cora[0][2], 'orange', linewidth=1.5, label=\"P = 1\")\naxs[0].plot(data_cora[1][0], data_cora[1][2], 'crimson', linewidth=1.5, label=\"P = 2\")\naxs[0].plot(data_cora[2][0], data_cora[2][2], 'forestgreen', linewidth=1.5, label=\"P = 3\")\naxs[0].plot(data_cora[3][0], data_cora[3][2], 'navy', linewidth=1.5, label=\"P = 4\")\naxs[0].plot(data_cora[4][0], data_cora[4][2], 'magenta', linewidth=1.5, label=\"P = 5\")\n\naxs[0].set_xlabel('Wall Time (s)')\naxs[0].set_ylabel('Training Loss')\n# axs[0].set_ylim([0, 40])\naxs[0].axhline(y=0.6, color='k', linestyle='--', linewidth=1.5)\naxs[0].grid(True, which='major', linestyle='-', color='black', axis='y', alpha=0.5)\naxs[0].grid(True, which='minor', linestyle='--', color='red', alpha=0.2, axis='y')\naxs[0].minorticks_on()\n\naxs[1].plot(data_cora[0][0], data_cora[0][1], 'orange', linewidth=1.5, label=\"P = 1\")\naxs[1].plot(data_cora[1][0], data_cora[1][1], 'crimson', linewidth=1.5, label=\"P = 2\")\naxs[1].plot(data_cora[2][0], data_cora[2][1], 'forestgreen', linewidth=1.5, label=\"P = 3\")\naxs[1].plot(data_cora[3][0], data_cora[3][1], 'navy', linewidth=1.5, label=\"P = 4\")\naxs[1].plot(data_cora[4][0], data_cora[4][1], 'magenta', linewidth=1.5, label=\"P = 5\")\n\naxs[1].set_xlabel('Wall Time (s)')\naxs[1].set_ylabel('Test Accuracy')\n# axs[1].set_ylim([0.85, 1])\naxs[1].axhline(y=0.91, color='k', linestyle='--', linewidth=1.5)\naxs[1].grid(True, which='major', linestyle='-', color='black', axis='y', alpha=0.5)\naxs[1].grid(True, which='minor', linestyle='--', color='red', alpha=0.2, axis='y')\naxs[1].minorticks_on()\n\n# add a title for the entire figure\nfig.suptitle('Convergence Plots v P (mw-PR on MNIST)', fontsize='xx-large')\naxs[0].set_title('Loss Plot')\naxs[1].set_title('Acc Plot')\naxs[0].legend()\naxs[1].legend()\n# adjust spacing between subplots\nfig.subplots_adjust(hspace=0.6, wspace=0.4)\nplt.tight_layout()\n# display the plot\nplt.savefig(\"p-vary.jpg\", dpi=600)\nplt.show()\n","repo_name":"pranjalnam/SSDS-Project-2023","sub_path":"cora/plots/p-vary.py","file_name":"p-vary.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14004751840","text":"import pymysql\nimport requests\nfrom bs4 import BeautifulSoup\nimport time\n\nhost = 'localhost'\nport = 3306\nusername = 'root'\npassword = 'root'\ndb = 'cola'\ncharset = 'utf8'\n\nconn = pymysql.connect(host=host,\n port=port,\n user=username,\n password=password,\n db=db,\n charset=charset,\n cursorclass=pymysql.cursors.DictCursor)\ncursor = conn.cursor()\n\ncount_sql = 'select * from hosplinks'\nlimit_select_sql = 'select id, link, hospname, ifcrawed from hosplinks limit %s, %s'\nupdate_sql = 'update hosplinks set ifcrawed = 1 where id = %s'\ninsert_sql = 'insert into hospinfos(hospname, hospksmc, hospxz, telephone, address, hospnickname,' +\\\n 'hospdist, hospyzname, hospconyear, hosptype, hospclass, hospkscount, hospyihucount, ' +\\\n 'hospbccount, hospmzcount, hospifyb, xjsbinfo, hospintro, hosphonor, website, busroutes)' +\\\n ' values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'\n\n# cursor.execute(count_sql)\n# cursor.fetchall()\n# totalcount = cursor.rowcount\ntotalcount = 47539\nnolst = list(range(0, totalcount, 100))\nnolst.append(totalcount)\n\nheader={\n 'user-agent': 'Mozzila/5.0'\n}\n\n# cursor.execute(limit_select_sql % (0, 100))\n# for link in cursor.fetchall():\n# print(link)\n\nfor no in nolst:\n if totalcount - no < 100:\n n = totalcount - no\n else:\n n = 100\n\n cursor.execute(limit_select_sql % (no, n))\n for linkinfo in cursor.fetchall():\n print('processing......', end=' ')\n update_flag = True\n id = linkinfo['id']\n hospname = linkinfo['hospname']\n ifcrawed = linkinfo['ifcrawed']\n link = linkinfo['link']\n if id == 1:\n continue\n # 如果该链接已经爬取过,则跳过该链接\n if ifcrawed == 1:\n continue\n update_flag = False\n\n insert_data = []\n\n # 第一个追加医院名称\n # print(linkinfo)\n # print(hospname)\n insert_data.append(hospname)\n\n # 医院主页面 内容获取\n html_req = requests.get(link, timeout=20, headers=header)\n html_cont = html_req.text\n soup = BeautifulSoup(html_cont, 'html.parser')\n\n # 获取当前医院的科室信息,并保存\n ksinfos = ''\n kscount = 1\n ks_cont = soup.find('div', class_='hp_docks')\n if ks_cont is not None:\n for ks in ks_cont.find_all('li'):\n ksmc = ks.find('a').text.strip()\n if kscount == 1:\n ksinfos += ksmc\n else:\n ksinfos += ',' + ksmc\n kscount += 1\n # 第二个追加科室信息\n insert_data.append(ksinfos)\n\n # 医院简介页面内容获取\n url = link + 'jianjie.html'\n url_req = requests.get(url, timeout=20, headers=header)\n url_cont = url_req.text\n # 如果下载失败,那么跳过该链接\n if html_req.status_code != 200 or url_req.status_code != 200:\n continue\n update_flag = False\n soup = BeautifulSoup(url_cont, 'html.parser')\n\n # 医院基本信息\n hospinfo = soup.find('div', class_='hpi_content')\n lis = hospinfo.find_all('li')\n yyxvinfo = lis[1].text.strip().split(':')[1]\n # print('\\n医院性质:', yyxvinfo)\n # 第三个追加医院性质\n insert_data.append(yyxvinfo)\n\n yytelephont = lis[3].find('span').text.strip()\n # print('\\n医院电话:', yytelephont)\n # 第四个追加医院电话\n insert_data.append(yytelephont)\n\n yyaddress = lis[4].find('span').text.strip()\n # print('\\n联系地址:', yyaddress)\n # 第五个追加联系地址\n insert_data.append(yyaddress)\n\n hospbasicinfo = soup.find('div', class_='leftpad10 hpbasicinfo')\n bftds = hospbasicinfo.find_all('td')\n\n # 医院别名\n hospnickname = bftds[1].text.strip()\n # print('\\n医院别名:', hospnickname)\n # 第六个追加医院别名\n insert_data.append(hospnickname)\n\n # 所属地区\n hospdist = bftds[3].text.strip()\n # print('\\n所属区县:', hospdist)\n # 第七个追加所属区县\n insert_data.append(hospdist)\n\n # 院长姓名\n hospyvname = bftds[5].text.strip()\n # print('\\n院长姓名:', hospyvname)\n # 第八个追加医院院长姓名\n insert_data.append(hospyvname)\n\n # 建院年份\n hospconyear = bftds[7].text.strip()\n # print('\\n建院年份:', hospconyear)\n # 第九个追加建院年份\n insert_data.append(hospconyear)\n\n # 医院类型\n hosptype = bftds[9].text.strip()\n # print('\\n医院类型:', hosptype)\n # 第十个追加医院类型\n insert_data.append(hosptype)\n\n # 医院等级\n hospclass = bftds[11].text.strip()\n # print('\\n医院等级:', hospclass)\n # 第十一个追加医院等级\n insert_data.append(hospclass)\n\n # 科室数量\n hospkeuicount = bftds[13].text.strip()\n # print('\\n科室数量:', hospkeuicount)\n # 第十二个追加科室数量\n insert_data.append(hospkeuicount)\n\n # 医护人数\n hospyihucount = bftds[15].text.strip()\n # print('\\n医护人数:', hospyihucount)\n # 第十三个追加医护人数\n insert_data.append(hospyihucount)\n\n # 病床数量\n hospbkilcount = bftds[17].text.strip()\n # print('\\n病床数量:', hospbkilcount)\n # 第十四个追加病床数量\n insert_data.append(hospbkilcount)\n\n # 年门诊量\n hospmfvfhucount = bftds[19].text.strip()\n # print('\\n年门诊量:', hospmfvfhucount)\n # 第十五个追加年门诊量\n insert_data.append(hospmfvfhucount)\n\n # 是否医保\n hospifyibc = bftds[21].text.strip()\n # print('\\n是否医保:', hospifyibc)\n # 第十六个追加是否医保\n insert_data.append(hospifyibc)\n\n # 先进设备信息,医院简介,荣誉 (这里可能缺失某一两项内容,需要特殊处理)\n someinfoconts = soup.find_all('div', class_='hpcontent')\n someinfotitles = soup.find_all('div', class_='hptitle')\n\n xjubinfo = ''\n yyjjinfo = ''\n honorinfo = ''\n\n for i in list(range(len(someinfotitles))):\n stitle = someinfotitles[i].text.strip()\n if stitle[-4:] == '设备信息':\n xjubinfo = someinfoconts[i - 1].text.strip()\n if stitle[-4:] == '医院简介':\n yyjjinfo = someinfoconts[i - 1].text.strip()\n if stitle[-4:] == '所获荣誉':\n honorinfo = someinfoconts[i - 1].text.strip()\n\n # 第十七个追加先进设备信息\n insert_data.append(xjubinfo)\n # 第十八个追加医院简介\n insert_data.append(yyjjinfo)\n # 第十九个追加医院荣誉\n insert_data.append(honorinfo)\n\n # # print(len(someinfos))\n # # print('someinfos:\\n', someinfos)\n # xjubinfo = someinfos[0].text.strip()\n # # print('\\n先进设备信息:', xjubinfo)\n # # 第十七个追加先进设备信息\n # insert_data.append(xjubinfo)\n #\n # yyjjinfo = someinfos[1].text.strip()\n # # print('\\n医院简介:', yyjjinfo)\n # # 第十八个追加先进设备信息\n # insert_data.append(yyjjinfo)\n #\n # honorinfo = someinfos[2].text.strip()\n # # print('\\n医院荣誉:', honorinfo)\n # # 第十九个追加先进设备信息\n # insert_data.append(honorinfo)\n\n # 联系信息\n contactinfos = soup.find('div', class_='leftpad10 contact').find_all('td')\n website = contactinfos[1].text.strip()\n # print('\\n医院网址:', website)\n # 第二十个追加先进设备信息\n insert_data.append(website)\n\n busrounte = contactinfos[-1].text.strip()\n # print('\\n公交线路:', busrounte)\n # 第二十一个追加先进设备信息\n insert_data.append(busrounte)\n\n cursor.execute(insert_sql, insert_data)\n print('插入第' + repr(cursor.lastrowid) + '条医院信息, Done!!!!!!!!')\n conn.commit()\n\n if update_flag:\n cursor.execute(update_sql % id)\n conn.commit()\n\n time.sleep(3)\n\n\n\n","repo_name":"waspart/python","sub_path":"crawyyk/hospinfo.py","file_name":"hospinfo.py","file_ext":"py","file_size_in_byte":8602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"23745214937","text":"def morral(espacio_morral, pesos, valores, n, mensaje):\n print('-' * 50)\n print(mensaje)\n print(f' * Analizamos Elemento {n} *')\n print(f' - Espacio en morral = {espacio_morral}')\n print(f' - Peso = {pesos[n - 1]}, valor = {valores[n - 1]} ')\n\n if n == 0 or espacio_morral == 0:\n #Puse este print para enterarme de cuándo se entra en este if\n if espacio_morral == 0:\n print(' Espacio en morral lleno!')\n elif n == 0:\n print(' Indice final alcanzado!') \n return 0\n \n if pesos[n - 1] > espacio_morral:\n #Este print también es para saber cuando se entra a este if\n print(' peso del elemento > espacio del morral')\n return morral(espacio_morral, pesos, valores, n - 1, '')\n \n #Este print tan solo imprime ambos resultados posibles del max() y también impríme cuál fue el más grande para saber cuál eligió.\n return max(valores[n - 1] + morral(espacio_morral - pesos[n - 1], pesos, valores, n - 1, f'--> SI Robo el elemento {n} y sumo a mi morral {valores[n - 1]} en valor!'), \n morral(espacio_morral, pesos, valores, n - 1, f'--> NO robo el elemento {n}!'))\n\n\nif __name__ == '__main__':\n espacio_morral = 50\n valores = [60, 100, 120]\n pesos = [10, 20, 30]\n n = len(valores)\n\n resultado = morral(espacio_morral, pesos, valores, n, 'Calculo del problema del morral de forma recursiva')\n\n print(f'El valor maximo que podemos robar es {resultado}')\n print('La complejidad del algoritmo es O(nW) donde n es el numero de elementos y W el tamano del morral')","repo_name":"CesarHera/POO-Algoritmos-Py","sub_path":"morral13.py","file_name":"morral13.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"71754175832","text":"def function(a,b):\n if(a int:\n words = [sorted(word) for word in words]\n words = [set(word) for word in words]\n\n num_pairs = 0\n i = 0\n while i < len(words)-1:\n j = i+1\n while j < len(words):\n if words[i] == words[j]:\n num_pairs+=1\n j+=1\n i+=1\n return num_pairs","repo_name":"mnhaqq/competitive_programming","sub_path":"count_paris_of similar_strings.py","file_name":"count_paris_of similar_strings.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37077831508","text":"from collections import deque\n\nclass Pathfinder:\n \n \"\"\"A Rectilinear No Block Pathfinding Class\"\"\"\n def __init__(self):\n \"\"\"Initiates the Algorithm\n \"\"\" \n pass\n def path(self, origin, destination, coordinates):\n \"\"\"Find the path origin coordinate to destination coordinate\n\n Arguments:\n pathfinder {Pathfinder} -- pathfinding class that contains the algorithm\n \"\"\"\n o_x, o_y = origin\n d_x, d_y = destination\n\n #let's do X coords first\n #calculating steps in rectilinear without blocks should be easy, just add/subtract o_x from d_x \n steps = int(d_x-o_x) #can be negative steps or positive steps\n\n #to get steps direction, we use abs to get the absolute steps value (remove sign) and divide that from steps\n try: \n step = steps/int(abs(steps)) #1 for positve direction, -1 for negative direction\n except:\n step = 0\n #here we just make a loop of abs(steps) count\n for _ in range(0, abs(int(steps))):\n #then add (or subtact) 1 to o_x until it is equalt to d_x\n o_x=int(o_x+step)\n coordinates.appendleft((o_x,o_y))\n \n #Let's repeat that for o_y and d_y\n steps = int(d_y-o_y) #can be negative steps or positive steps\n\n #to get steps direction, we use abs to get the absolute steps value (remove sign) and divide that from steps\n try:\n step = steps/int(abs(steps)) #1 for positve direction, -1 for negative direction\n except:\n step = 0\n for _ in range(0, abs(int(steps))):\n #then add (or subtact) 1 to o_y until it is equalt to d_y\n o_y=int(o_y+step)\n coordinates.appendleft((o_x,o_y))\n\n return coordinates\n \n \n\n","repo_name":"HapPiNeHsSs/simple-taxi-booking","sub_path":"project/pathfinder_algo/rectilinear_no_blocks.py","file_name":"rectilinear_no_blocks.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"1826322814","text":"from django.urls import path\nfrom .views import *\nfrom django.views import generic\nfrom django.contrib.auth.views import LogoutView\n\nurlpatterns = [\n # Create\n path('driver-create/', driver_create, name='driver-create'),\n path('truck-create/', truck_create, name='truck-create'),\n # Read\n path('driver-dashboard//', driver_dashboard, name='driver-dashboard'),\n # Update\n path('driver-update//', driver_update, name='driver-update'),\n path('truck-update//', truck_update, name='truck-update'),\n # Delete\n path('order-delete//', order_delete, name='order-delete'),\n path('driver-delete//', driver_delete, name='driver-delete'),\n # Account\n path('logout/', LogoutView.as_view(next_page='home'), name='logout'),\n path('login/', SignInView.as_view(), name='login'),\n path('signup/', SignUpView.as_view(), name='signup'),\n # Views\n path('', TemplateView.as_view(template_name='app/index.html'), name='home'),\n path('order/', OrderView.as_view(), name='order'),\n path('distribution/', TemplateView.as_view(template_name='app/distribution.html'), name='distribution'),\n path('admin-dashboard/', admin_dashboard, name='admin-dashboard'),\n path('query-success/', TemplateView.as_view(template_name='app/query_success.html'), name='query-success'),\n]","repo_name":"Zoreyan/Logistic","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20299413712","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = \"ipetrash\"\n\n\nimport re\nimport requests\n\n\nGET_MINIMAL_TORRENT_URL = re.compile(r\"(https?://.+/torrent/\\d+/?)\")\n\n\ndef download_torrent_file(torrent_file_url):\n \"\"\"\n Функция скачает по ссылке торрент файл и вернет его название.\n Если не получится, вернет None\n\n \"\"\"\n\n user_agent = (\n \"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)\"\n )\n\n rs = requests.get(torrent_file_url, headers={\"User-Agent\": user_agent})\n if not rs.ok:\n print(f\"Не получилось скачать: {rs.status_code}\\n\\n{rs.text}\")\n return\n\n # Теперь нужно вытащить название торрент-файла\n file_name = rs.headers[\"Content-Disposition\"]\n file_name = file_name.replace(\"attachment; filename=\", \"\").replace('\"', \"\")\n\n with open(file_name, \"wb\") as f:\n f.write(rs.text.encode())\n\n return file_name\n\n\ndef get_download_url_from_torrent_url(torrent_url):\n \"\"\"\n Функция по ссылке на торрент вернет ссылку на торрент-файл.\n Если не получится, вернется None.\n\n \"\"\"\n\n match = GET_MINIMAL_TORRENT_URL.search(torrent_url)\n if match:\n torrent_url = match.group(1)\n download_torrent_url = torrent_url.replace(\"/torrent/\", \"/download/\")\n return download_torrent_url\n\n\nif __name__ == \"__main__\":\n torrent_url = \"http://anti-tor.org/torrent/539888/7-days-to-die-v-15.1-2013-pc-repack-ot-pioneer\"\n torrent_file_url = get_download_url_from_torrent_url(torrent_url)\n file_name = download_torrent_file(torrent_file_url)\n print(torrent_file_url, file_name)\n\n torrent_url = \"http://anti-tor.org/torrent/474477/hyperdimension-neptunia-rebirth2-sisters-generation-2015-pc-repack-ot-r.g-games\"\n torrent_file_url = get_download_url_from_torrent_url(torrent_url)\n file_name = download_torrent_file(torrent_file_url)\n print(torrent_file_url, file_name)\n\n torrent_url = \"http://anti-tor.org/torrent/543446/the-binding-of-isaac-rebirth-complete-bundle-v.1.??-2014-pc-steam-rip-ot-letsrlay\"\n torrent_file_url = get_download_url_from_torrent_url(torrent_url)\n file_name = download_torrent_file(torrent_file_url)\n print(torrent_file_url, file_name)\n","repo_name":"gil9red/SimplePyScripts","sub_path":"html_parsing/parse_torrent_sites/rutor/download_torrent_file_from_torrent_url.py","file_name":"download_torrent_file_from_torrent_url.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"ru","doc_type":"code","stars":141,"dataset":"github-code","pt":"5"} +{"seq_id":"3518475478","text":"from flask import Flask, request, jsonify, make_response, send_from_directory, send_file\nimport os\nfrom flask_mysqldb import MySQL\nfrom flask_cors import CORS\nfrom werkzeug.utils import send_from_directory\nimport PyPDF2\nfrom sentence_transformers import SentenceTransformer, util\nimport string\nimport re\nimport pickle\nimport json\nimport torch\nfrom ibm_watson import SpeechToTextV1\nfrom ibm_watson.websocket import RecognizeCallback, AudioSource\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\nimport tempfile\nimport moviepy.editor as mp\nfrom google.cloud import vision\nfrom googleapiclient.discovery import build\nimport pandas as pd\nimport re\nimport requests\nimport cv2\nfrom pyzbar.pyzbar import decode\nimport openpyxl\n\napp = Flask(__name__)\n\napp.config['MYSQL_HOST'] = 'localhost'\napp.config['MYSQL_USER'] = 'root'\napp.config['MYSQL_PASSWORD'] = ''\napp.config['MYSQL_DB'] = 'calgaryhacks'\napp.config['SECRET_KEY'] = 'MySecretKey'\napp.config[\"CLIENT_pdfs\"] = \"C:/Users/hp/PycharmProjects/SEC/calgaryhacks/venv\"\n\nCORS(app)\nmysql = MySQL(app)\n\n\ndef preprocess(text):\n str_punctuation = string.punctuation.replace('.', '')\n text = text.lower()\n text = re.sub(r'^https?://.[\\r\\n]', '', text, flags=re.MULTILINE)\n # text = text.translate(str.maketrans('', '', str_punctuation))\n text = \" \".join(filter(lambda x: x[0] != '[', text.split()))\n text = text.replace('\\n', '')\n text = text.replace('\\t', '')\n text = re.sub(' +', ' ', text)\n return text\n\n\ndef youtube(query):\n# api_key = \"AIzaSyDhs3vS_OwXut_S2AxXE1AOYid9Emd3iSo\"\n youtube = build('youtube', 'v3', developerKey=api_key)\n type(youtube)\n req = youtube.search().list(q=query, part='snippet')\n result = req.execute()\n\n titles = []\n links = []\n descriptions = []\n result1 = []\n\n for i in range(0, len(result['items'])):\n titles.append(result['items'][i]['snippet']['title'])\n links.append(\"https://www.youtube.com/watch?v=\" + result['items'][i]['id']['videoId'])\n descriptions.append(result['items'][i]['snippet']['description'])\n\n for i in range(0, len(result['items'])):\n result1.append({'title': titles[i], 'abstract': descriptions[i], 'url': links[i]})\n\n return result1\n\n\ndef search_papers(title, model, corpus_embeddings, papers):\n query_embedding = model.encode(title + '[SEP]', convert_to_tensor=True)\n search_hits = util.semantic_search(query_embedding, corpus_embeddings)\n search_hits = search_hits[0] # Get the hits for the first query\n result = []\n\n # print(\"Query:\", title)\n # print(\"\\nMost similar papers:\")\n\n for hit in search_hits:\n related_paper = papers[hit['corpus_id']]\n result.append(\n {'title': related_paper['title'], 'abstract': related_paper['abstract'], 'url': related_paper['url']})\n\n return result\n\n\ndef model_reader(text):\n with open(\"specter.sav\", \"rb\") as f:\n model = pickle.load(f)\n\n with open('bert_model.sav', \"rb\") as fi:\n model_1 = pickle.load(fi)\n\n bert_summary = ''.join(model_1(text, min_length=60))\n dataset_file = 'emnlp2016-2018.json' # all papers dataset\n\n if not os.path.exists(dataset_file):\n util.http_get(\"https://sbert.net/datasets/emnlp2016-2018.json\", dataset_file)\n\n with open(dataset_file) as fIn:\n papers = json.load(fIn)\n\n title = bert_summary # input to specter (Paper summary)\n corpus_embeddings = torch.load('tensor_research_papers.pt')\n context = search_papers(title=title, model=model, corpus_embeddings=corpus_embeddings, papers=papers)\n\n result = {'summary': bert_summary, 'all_papers_details': context}\n return result\n\"\"\"\n@app.route('/api/signup', methods=['POST'])\ndef signup(): # correct\n\n email = request.json['email']\n name = request.json['name']\n password = request.json['password']\n\n cur0 = mysql.connection.cursor()\n result = cur0.execute('Select * FROM USERCREDENTIALS')\n\n if result > 0:\n userDetails = cur0.fetchall()\n for user in userDetails:\n if user[0] == email:\n return (jsonify({'error': 'user already exists.'}), 500)\n\n mysql.connection.commit()\n cur0.close()\n\n cur = mysql.connection.cursor()\n \n , (\n email,\n name,\n password,\n ))\n mysql.connection.commit()\n cur.close()\n token = username + ':' + password\n return (jsonify({'token': token, 'name':name}), 201)\n\n@app.route('/api/login', methods=['GET'])\ndef login(): # correct\n\n email = request.args.get('email')\n password = request.args.get('password')\n\n cur = mysql.connection.cursor()\n result = cur.execute(\"Select * FROM USERCREDENTIALS\")\n\n if (result > 0):\n\n userDetails = cur.fetchall()\n for user in userDetails:\n if (user[0] == email and user[5] == password):\n token = user[4] + \":\" + password\n return jsonify({'token': token, 'name':name}), 200\n\n return jsonify({'error': 'No valid account found!'}), 401\n\n\"\"\"\n\n\n\n@app.route('/api/machinelearning', methods=['POST'])\ndef machinelearning():\n\n\n #path = \"nlp_video.mp4\"\n\n #path = request.json['path'] # input from client\n\n print(\"Hi there\")\n\n\n path = request.json['path']\n\n \"\"\"\n \n if 'file' not in request.files:\n return jsonify({'Error': 'No file has been passed!'}), 500\n\n else:\n file = request.files['file']\n file.save(os.path.join(app.config[\"CLIENT_pdfs\"], file.filename))\n path = file.filename\n\n \"\"\"\n\n byoutube = True # boolean to check if they want youtube recommendations\n bpapers = True # boolean to check if they want papers recommendations\n\n file_type = path.split('.', 1)\n\n if ((file_type[1].lower() == \"jpg\") or (file_type[1].lower() == \"jpeg\") or (file_type[1].lower() == \"png\")):\n\n print(\"1\")\n# os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'data-cycle-341817-e8ec2ea6c8ca.json'\n client = vision.ImageAnnotatorClient()\n\n file = open(path, 'rb')\n content = file.read() # read the entire file\n\n image = vision.Image(content = content)\n response = client.document_text_detection(image=image)\n docText = response.full_text_annotation.text\n\n print(docText)\n\n result={}\n\n if (bpapers):\n\n df = pd.read_excel (r'ML_data.xlsx')\n titles= df.Title.values\n abstracts = df.Abstract.values\n URLs = df.URL.values\n\n result[\"all_papers_details\"] = []\n\n for i in range(len(titles)):\n result[\"all_papers_details\"].append({'abstract':abstracts[i], 'title': titles[i], 'url': URLs[i]})\n\n result[\"summary\"] = \"\"\n\n if (byoutube):\n result['youtube'] = youtube(docText)\n\n # return jsonify({'text': docText})\n\n print(result)\n\n return jsonify(result)\n\n elif (file_type[1].lower() == \"mp3\" or file_type[1].lower() == \"mp4\"):\n\n f1 = open(path, 'rb')\n\n# apikey = 'P4L2u2NeULGbw5DkEQELCXph4119eFNo9XXKa4ku4qVA'\n url = 'https://api.au-syd.speech-to-text.watson.cloud.ibm.com/'\n authenticator = IAMAuthenticator(apikey)\n stt = SpeechToTextV1(authenticator=authenticator)\n stt.set_service_url(url)\n\n authenticator = IAMAuthenticator(apikey)\n stt = SpeechToTextV1(authenticator=authenticator)\n stt.set_service_url(url)\n\n if file_type[1].lower() == \"mp4\":\n transcript = \"\"\n\n video = mp.VideoFileClip(path)\n video.audio.write_audiofile('output.mp3')\n with open('output.mp3', 'rb') as fin:\n res = stt.recognize(audio=fin, content_type='audio/mp3', model='en-AU_NarrowbandModel',\n inactivity_timeout=30).get_result()\n text = [result['alternatives'][0]['transcript'].rstrip() + '.\\n' for result in res['results']]\n text = [para[0].title() + para[1:] for para in text]\n transcript = ''.join(text)\n\n result={}\n\n if (bpapers):\n\n df = pd.read_excel(r'ML_data.xlsx')\n titles = df.Title.values\n abstracts = df.Abstract.values\n URLs = df.URL.values\n\n result[\"all_papers_details\"] = []\n\n for i in range(len(titles)):\n result[\"all_papers_details\"].append({'abstract': abstracts[i], 'title': titles[i], 'url': URLs[i]})\n\n result[\"summary\"] = \"\"\n\n if (youtube(byoutube)):\n result['youtube'] = youtube(transcript)\n\n return jsonify(result)\n\n elif file_type[1].lower() == \"pdf\":\n\n f1 = open(path, 'rb')\n\n pdf = PyPDF2.PdfFileReader(f1)\n num_pages = pdf.getNumPages()\n text = ''\n for i in range(num_pages):\n page = pdf.getPage(i)\n text = text + page.extractText()\n text = preprocess(text)\n name = \"References\"\n\n print(text)\n\n result = {}\n\n if (bpapers):\n\n df = pd.read_excel(r'ML_data.xlsx')\n titles = df.Title.values\n abstracts = df.Abstract.values\n URLs = df.URL.values\n\n result[\"all_papers_details\"] = []\n\n for i in range(len(titles)):\n result[\"all_papers_details\"].append({'abstract': abstracts[i], 'title': titles[i], 'url': URLs[i]})\n\n result[\"summary\"] = \"\"\n\n print(result['all_papers_details'][0]['url'])\n\n\n\n if (youtube(byoutube)):\n result['youtube'] = youtube(text)\n\n return jsonify(result)\n\n\n else:\n\n return jsonify(\"Error\"), 500\n\n\ndef BarcodeReader(image):\n # read the image in numpy array using cv2\n img = cv2.imread(image)\n\n # Decode the barcode image\n detectedBarcodes = decode(img)\n\n # If not detected then print the message\n if not detectedBarcodes:\n print(\"Barcode Not Detected or your barcode is blank/corrupted!\")\n else:\n\n # Traverse through all the detected barcodes in image\n for barcode in detectedBarcodes:\n\n # Locate the barcode position in image\n (x, y, w, h) = barcode.rect\n\n # Put the rectangle in image using\n # cv2 to heighlight the barcode\n cv2.rectangle(img, (x - 10, y - 10),\n (x + w + 10, y + h + 10),\n (255, 0, 0), 2)\n\n\n\n # Display the image\n return barcode.data\n\n\n@app.route('/api/healthProduct', methods=['POST'])\ndef healthProductParser():\n\n path = request.json['path'] # input from client\n\n \"\"\"\n if 'file' not in request.files:\n return jsonify({'Error': 'No file has been passed!'}), 500\n\n else:\n file = request.files['file']\n file.save(os.path.join(app.config[\"CLIENT_pdfs\"], file.filename))\n path = file.filename\n\n \"\"\"\n\n barcode = str(BarcodeReader(path))\n barcode = re.sub(\"[^0-9]\", \"\", barcode)\n barcode='27'+barcode\n print(barcode)\n\n url = 'https://world.openfoodfacts.org/api/v2/search?code=%'+barcode+'%27&fields=ingredients_analysis_tags,nutrient_levels_tags,allergens,ingredients_text_en,product_name,nutrition_grades'\n\n # params = dict(\n # origin='Chicago,IL',\n # destination='Los+Angeles,CA',\n # waypoints='Joplin,MO|Oklahoma+City,OK',\n # sensor='false'\n # )\n\n resp = requests.get(url=url)\n data = resp.json() # Check the JSON Response Content documentation below\n print(data[\"products\"][0])\n\n allergens = data[\"products\"][0][\"allergens\"].split(\",\")\n\n for i in range(len(allergens)):\n allergens[i] = allergens[i].replace(\"en:\", \"\")\n\n ingredients_analysis_tags = []\n for i in range(len(data[\"products\"][0][\"ingredients_analysis_tags\"])):\n ingredients_analysis_tags.append(data[\"products\"][0][\"ingredients_analysis_tags\"][i].replace(\"en:\", \"\"))\n\n product_name = data[\"products\"][0][\"product_name\"]\n nutrition_grades = data[\"products\"][0][\"nutrition_grades\"]\n\n nutrient_levels_tags = []\n for elem in data[\"products\"][0][\"nutrient_levels_tags\"]:\n elem = elem.replace(\"en:\", \"\")\n elem = elem.replace(\"-\", \" \")\n nutrient_levels_tags.append(elem)\n\n ingredients = data[\"products\"][0][\"ingredients_text_en\"].split(\", \")\n\n print(ingredients_analysis_tags)\n print(ingredients)\n print(nutrient_levels_tags)\n print(nutrition_grades)\n print(product_name)\n print(allergens)\n # result={'allergens': }:wq\n\n result = {}\n result['product_name'] = product_name\n result['ingredients'] = ingredients\n result['allergens'] = allergens\n result['nutrient_levels_tags'] = nutrient_levels_tags\n result['nutrition_grades'] = nutrition_grades\n result['ingredients_analysis_tags'] = ingredients_analysis_tags\n\n return jsonify(result)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"zeeshansalim1234/CalgaryHacks2022","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12830,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"16236962262","text":"# 프로그래머스 60057 (2020 카카오 신입공채)\n\nstring = input()\n\n\ndef solution(s):\n answer = len(s)\n\n # cut 자르는 단위\n for cut in range(1, len(s) // 2 + 1):\n new_str = \"\" # 압축하는 문자열 저장\n count = 1 # 반복된 개수\n cut_str = s[0:cut] # 시작 문자열\n for _next in range(cut, len(s), cut): # 자르는 단위만큼 점프\n # 시작 문자열이랑 같으면 cut\n if cut_str == s[_next:cut+_next]:\n count += 1 # 자른 개수 추가\n else: # 시작 문자열이랑 다르면 이전까지 자른 문자열 업데이트\n if count >= 2: # count가 1이 아니면 업데이트\n new_str += str(count) + cut_str # 문자열 새로\n else:\n new_str += cut_str\n cut_str = s[_next:cut + _next] # 자르는 문자열 새로 등록\n count = 1 # 다시 초기화\n\n # 남은 문자열 추가\n new_str += str(count) + cut_str if count >= 2 else cut_str\n answer = min(answer, len(new_str))\n\n return answer\n\n\nprint(solution(string))","repo_name":"PSYcode04/coding-practice","sub_path":"Implementation/[SY]이코테-9.py","file_name":"[SY]이코테-9.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"8355880864","text":"\"\"\"\n Python SDK for OpenFGA\n\n API version: 0.1\n Website: https://openfga.dev\n Documentation: https://openfga.dev/docs\n Support: https://discord.gg/8naAwJfWN6\n License: [Apache-2.0](https://github.com/openfga/python-sdk/blob/main/LICENSE)\n\n NOTE: This file was auto generated by OpenAPI Generator (https://openapi-generator.tech). DO NOT EDIT.\n\"\"\"\n\nfrom dataclasses import dataclass\nfrom datetime import datetime, timedelta\nimport json\nimport typing\nimport urllib3\nfrom urllib.parse import urlparse\n\nfrom openfga_sdk.exceptions import FgaValidationException, ApiValueError, AuthenticationError\n\n\ndef none_or_empty(value):\n \"\"\"\n Return true if value is either none or empty string\n \"\"\"\n return value is None or value == ''\n\n\nclass CredentialConfiguration:\n \"\"\"\n Configuration for SDK credential\n :param client_id: Client ID which will be matched with client_secret\n :param client_secret: Client secret which will be matched with client_id\n :param api_token: Bearer token to be sent for authentication\n :param api_audience: API audience used for OAuth2\n :param api_issuer: API issuer used for OAuth2\n \"\"\"\n\n def __init__(\n self,\n client_id: typing.Optional[str] = None,\n client_secret: typing.Optional[str] = None,\n api_audience: typing.Optional[str] = None,\n api_issuer: typing.Optional[str] = None,\n api_token: typing.Optional[str] = None,\n ):\n self._client_id = client_id\n self._client_secret = client_secret\n self._api_audience = api_audience\n self._api_issuer = api_issuer\n self._api_token = api_token\n\n @property\n def client_id(self):\n \"\"\"\n Return the client id configured\n \"\"\"\n return self._client_id\n\n @client_id.setter\n def client_id(self, value):\n \"\"\"\n Update the client id\n \"\"\"\n self._client_id = value\n\n @property\n def client_secret(self):\n \"\"\"\n Return the client secret configured\n \"\"\"\n return self._client_secret\n\n @client_secret.setter\n def client_secret(self, value):\n \"\"\"\n Update the client secret\n \"\"\"\n self._client_secret = value\n\n @property\n def api_audience(self):\n \"\"\"\n Return the api audience configured\n \"\"\"\n return self._api_audience\n\n @api_audience.setter\n def api_audience(self, value):\n \"\"\"\n Update the api audience\n \"\"\"\n self._api_audience = value\n\n @property\n def api_issuer(self):\n \"\"\"\n Return the api issuer configured\n \"\"\"\n return self._api_issuer\n\n @api_issuer.setter\n def api_issuer(self, value):\n \"\"\"\n Update the api issuer\n \"\"\"\n self._api_issuer = value\n\n @property\n def api_token(self):\n \"\"\"\n Return the api token configured\n \"\"\"\n return self._api_token\n\n @api_token.setter\n def api_token(self, value):\n \"\"\"\n Update the api token\n \"\"\"\n self._api_token = value\n\n\nclass Credentials:\n \"\"\"\n Manage the credential for the API Client\n :param method: Type of authentication. Possible value is 'none', 'api_token' and 'client_credentials'. Default as 'none'.\n :param configuration: Credential configuration of type CredentialConfiguration. Default as None.\n \"\"\"\n\n def __init__(\n self,\n method: typing.Optional[str] = 'none',\n configuration: typing.Optional[CredentialConfiguration] = None,\n ):\n self._method = method\n self._configuration = configuration\n self._access_token = None\n self._access_expiry_time = None\n\n @property\n def method(self):\n \"\"\"\n Return the method configured\n \"\"\"\n return self._method\n\n @method.setter\n def method(self, value):\n \"\"\"\n Update the method\n \"\"\"\n self._method = value\n\n @property\n def configuration(self):\n \"\"\"\n Return the configuration\n \"\"\"\n return self._configuration\n\n @configuration.setter\n def configuration(self, value):\n \"\"\"\n Update the configuration\n \"\"\"\n self._configuration = value\n\n def validate_credentials_config(self):\n \"\"\"\n Check whether credentials configuration is valid\n \"\"\"\n if self.method != 'none' and self.method != 'api_token' and self.method != 'client_credentials':\n raise ApiValueError(\n 'method `{}` must be either `none`, `api_token` or `client_credentials`'.format(self.method))\n if self.method == 'api_token' and (self.configuration is None or none_or_empty(self.configuration.api_token)):\n raise ApiValueError(\n 'configuration `{}` api_token must be defined and non empty when method is api_token'.format(self.configuration))\n if self.method == 'client_credentials':\n if self.configuration is None or none_or_empty(self.configuration.client_id) or none_or_empty(self.configuration.client_secret) or none_or_empty(self.configuration.api_audience) or none_or_empty(self.configuration.api_issuer):\n raise ApiValueError(\n 'configuration `{}` requires client_id, client_secret, api_audience and api_issuer defined for client_credentials method.')\n # validate token issuer\n combined_url = 'https://' + self.configuration.api_issuer\n parsed_url = None\n try:\n parsed_url = urlparse(combined_url)\n except ValueError:\n raise ApiValueError('api_issuer `{}` is invalid'.format(\n self.configuration.api_issuer))\n if (parsed_url.netloc == ''):\n raise ApiValueError('api_issuer `{}` is invalid'.format(\n self.configuration.api_issuer))\n\n def _token_valid(self):\n \"\"\"\n Return whether token is valid\n \"\"\"\n if self._access_token is None or self._access_expiry_time is None:\n return False\n if self._access_expiry_time < datetime.now():\n return False\n return True\n\n async def _obtain_token(self, client):\n \"\"\"\n Perform OAuth2 and obtain token\n \"\"\"\n token_url = 'https://{}/oauth/token'.format(self.configuration.api_issuer)\n body = {\n 'client_id': self.configuration.client_id,\n 'client_secret': self.configuration.client_secret,\n 'audience': self.configuration.api_audience,\n 'grant_type': \"client_credentials\",\n }\n headers = urllib3.response.HTTPHeaderDict(\n {'Accept': 'application/json', 'Content-Type': 'application/json', 'User-Agent': 'openfga-sdk (python) 0.2.1'})\n raw_response = await client.POST(token_url, headers=headers, body=body)\n if 200 <= raw_response.status <= 299:\n try:\n api_response = json.loads(raw_response.data)\n except: # noqa: E722\n raise AuthenticationError(http_resp=raw_response)\n if not api_response.get('expires_in') or not api_response.get('access_token'):\n raise AuthenticationError(http_resp=raw_response)\n self._access_expiry_time = datetime.now() + timedelta(seconds=int(api_response.get('expires_in')))\n self._access_token = api_response.get('access_token')\n else:\n raise AuthenticationError(http_resp=raw_response)\n\n async def get_authentication_header(self, client):\n \"\"\"\n If configured, return the header for authentication\n \"\"\"\n if self._method == 'none':\n return {}\n if self._method == 'api_token':\n return {'Authorization': 'Bearer {}'.format(self.configuration.api_token)}\n # check to see token is valid\n if not self._token_valid():\n # In this case, the token is not valid, we need to get the refresh the token\n await self._obtain_token(client)\n return {'Authorization': 'Bearer {}'.format(self._access_token)}\n","repo_name":"openfga/python-sdk","sub_path":"openfga_sdk/credentials.py","file_name":"credentials.py","file_ext":"py","file_size_in_byte":8106,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"5"} +{"seq_id":"28407883493","text":"#!/usr/bin/env python\n\nfrom skimage.segmentation import slic\nfrom skimage.segmentation import mark_boundaries\nfrom skimage.util import img_as_float\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport cv2\n\n\nfrom skimage.color import rgb2gray\nfrom skimage.filters import sobel\nfrom skimage.segmentation import watershed\n\n\nwith_neightbor = 1\n\n\ndef ini():\n '''initialization\n\n Args:\n\n\n Returns:\n\n '''\n\n\n\n\n\ndef patches(Picture, method, patch_size_height, patch_size_width, height, width):\n '''Dividing an image into superpixels (patches)\n\n Args:\n picture: image\n\n Returns:\n\n '''\n\n\n segments_height = int (height / patch_size_height)\n segments_width = int (width / patch_size_width)\n number_of_segments = int (segments_height * segments_width)\n\n listPatches = []\n\n if method == \"SP_SLIC\":\n segments = slic(img_as_float(Picture), n_segments = number_of_segments, sigma = 5)\n\n if method == \"SP_CW\":\n gradient = sobel(rgb2gray(Picture))\n segments = watershed(gradient, markers=number_of_segments, compactness=0.0001)\n\n #pic.show(mark_boundaries(Picture, segments, (255,255,255))) # show segments\n\n\n original_image = np.copy(Picture)\n\n for i in np.unique(segments): # number of segments\n\n\n # draw mask\n image_mask = np.zeros(Picture.shape[:2], dtype = \"uint8\") # 3D to 1D\n w = np.where(segments == i)\n image_mask[w] = 255\n\n x,y,w,h = cv2.boundingRect(image_mask) # calculate rectangle\n\n segment_mask_in_original_image = cv2.bitwise_and(original_image,original_image,mask = image_mask)\n\n if (with_neightbor):\n # with neighborhood\n segment_cropped_with_neightbor = original_image[y:y+h,x:x+w]\n segment_cropped_with_neightbor_and_reqized = cv2.resize(segment_cropped_with_neightbor, (patch_size_width, patch_size_height)) # width, height\n\n listPatches.append(segment_cropped_with_neightbor_and_reqized)\n else:\n # without neighborhood\n segment_cropped_without_neightbor = segment_mask_in_original_image[y:y+h,x:x+w]\n segment_cropped_without_neightbor_and_reqized = cv2.resize(segment_cropped_without_neightbor, (patch_size_width, patch_size_height)) # width, height\n\n listPatches.append(segment_cropped_without_neightbor_and_reqized)\n\n\n return listPatches, segments\n\n\n\ndef logical_pixelmap(segments, prediction, height, width):\n '''Creates a logical pixelmap based on the prediction.\n\n Args:\n\n\n Returns:\n\n '''\n # logical Pixelmap: 0 seagrass 1 background\n logical_pixelmap = np.ones(([height,width]), dtype = \"uint8\")\n\n for i in range(len(prediction)): # go through prediction array\n if prediction[i]: # 1 = label for seagrass\n w = np.where(segments == i)\n logical_pixelmap[w] = 0 # 0 = seagrass in the logical pixel map\n return logical_pixelmap\n","repo_name":"EnviewFulda/LookingForSeagrass","sub_path":"classifier/superpixel.py","file_name":"superpixel.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"1178331503","text":"import pygame\r\n\r\n#Defines the shape of the board\r\nWIDTH, HEIGHT = 800, 800\r\nROWS, COLS = 8, 8\r\nSQUARE_SIZE = WIDTH//COLS\r\n\r\n#RGB\r\nRED = (255, 0, 0)\r\nWHITE = (255, 255, 255)\r\nBLACK = (0, 0, 0)\r\nBLUE = (0, 0, 255)\r\nGREY = (128,128,128)\r\n\r\n#Defines the kings when a player kings a piece\r\nCROWN = pygame.transform.scale(pygame.image.load('assets/king.png'), (44, 25))","repo_name":"Nikolas-spec/python-checkers","sub_path":"python-checkers-main/checkers/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6855874999","text":"import time\n\n\ndef recursive(n):\n if n <= 1:\n return n\n return recursive(n - 1) + recursive(n - 2)\n\n\ndef memorization(n, memory):\n if n <= 1:\n memory[n] = n\n if memory[n] is None:\n memory[n] = memorization(n - 1, memory) + memorization(n - 2, memory)\n return memory[n]\n\n\ndef tabulation(n):\n f = [0] * (n + 1)\n f[1] = 1\n\n for i in range(2, n + 1):\n f[i] = f[i - 1] + f[i - 2]\n return f[n]\n\n\nif __name__ == '__main__':\n n = int(input())\n\n result = None\n\n start = time.time()\n result = recursive(n)\n end = time.time()\n print(\"Recursive output: \" + str(result) + \", time taken: \" + str(end - start))\n\n memory = [None] * (n + 1)\n start = time.time()\n result = memorization(n, memory)\n end = time.time()\n print(\"Memorization output: \" + str(result) + \", time taken: \" + str(end - start))\n\n start = time.time()\n result = tabulation(n)\n end = time.time()\n print(\"Tabulation output: \" + str(result) + \", time taken: \" + str(end - start))\n","repo_name":"rahul38888/coding_practice","sub_path":"src/practices/practice/fibonacci/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"15827043917","text":"from time import time\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom deepalign import Dataset\nfrom deepalign.alignments.bibs import bibs_step\nfrom deepalign.alignments.bibs import build_alignments\nfrom deepalign.alignments.bibs import build_beams\nfrom deepalign.alignments.bibs import get_alignments\nfrom deepalign.anomalydetection import AnomalyDetectionResult\nfrom deepalign.anomalydetection import Binarizer\nfrom deepalign.enums import AttributeType\nfrom deepalign.enums import FeatureType\nfrom deepalign.enums import Heuristic\nfrom deepalign.enums import Strategy\nfrom deepalign.utils import align\nfrom deepalign.utils import gather\nfrom deepalign.utils import log_probs\nfrom deepalign.utils import reverse\nfrom deepalign.utils import to_targets\n\n\ndef binet_scores_fn(features, predictions):\n sums = [1 - np.cumsum(np.sort(p, -1), -1) for p in predictions]\n indices = [(np.argsort(p, -1) == features[:, :, i:i + 1]).argmax(-1) for i, p in enumerate(predictions)]\n scores = np.zeros(features.shape)\n for (i, j, k), f in np.ndenumerate(features):\n if f != 0 and k < len(predictions):\n scores[i, j, k] = sums[k][i, j][indices[k][i, j]]\n return scores\n\n\nclass BINet(tf.keras.Model):\n abbreviation = 'binet'\n name = 'BINet'\n\n def __init__(self,\n dataset,\n latent_dim=None,\n use_case_attributes=None,\n use_event_attributes=None,\n use_present_activity=None,\n use_present_attributes=None,\n use_attention=None):\n super(BINet, self).__init__()\n\n # Validate parameters\n if latent_dim is None:\n latent_dim = min(int(dataset.max_len * 10), 256)\n if use_event_attributes and dataset.num_attributes == 1:\n use_event_attributes = False\n use_case_attributes = False\n if use_present_activity and dataset.num_attributes == 1:\n use_present_activity = False\n if use_present_attributes and dataset.num_attributes == 1:\n use_present_attributes = False\n\n # Parameters\n self.latent_dim = latent_dim\n self.use_case_attributes = use_case_attributes\n self.use_event_attributes = use_event_attributes\n self.use_present_activity = use_present_activity\n self.use_present_attributes = use_present_attributes\n self.use_attention = use_attention\n\n # Single layers\n self.fc = None\n if self.use_case_attributes:\n self.fc = tf.keras.Sequential([\n tf.keras.layers.Dense(latent_dim // 8),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(latent_dim, activation='linear')\n ])\n\n self.rnn = tf.keras.layers.GRU(latent_dim, return_sequences=True, return_state=True)\n\n # Layer lists\n self.fc_inputs = []\n self.rnn_inputs = []\n self.outs = []\n\n inputs = zip(dataset.attribute_dims, dataset.attribute_keys, dataset.attribute_types, dataset.feature_types)\n for dim, key, t, feature_type in inputs:\n if t == AttributeType.CATEGORICAL:\n voc_size = int(dim + 1) # we start at 1, 0 is padding\n emb_dim = np.clip(voc_size // 10, 2, 10)\n embed = tf.keras.layers.Embedding(input_dim=voc_size, output_dim=emb_dim, mask_zero=True)\n else:\n embed = tf.keras.layers.Dense(1, activation='linear')\n\n if feature_type == FeatureType.CASE:\n self.fc_inputs.append(embed)\n else:\n self.rnn_inputs.append(embed)\n out = tf.keras.layers.Dense(dim + 1, activation='softmax')\n self.outs.append(out)\n\n def call(self, inputs, training=False, return_state=False, initial_state=None):\n if not isinstance(inputs, list):\n inputs = [inputs]\n\n split = len(self.rnn_inputs)\n\n rnn_x = inputs[:split]\n fc_x = inputs[split:]\n\n fc_embeddings = []\n for x, input_layer in zip(fc_x, self.fc_inputs):\n if isinstance(input_layer, tf.keras.layers.Dense):\n x = x[:, None]\n x = input_layer(x)\n fc_embeddings.append(x)\n\n if len(fc_embeddings) > 0:\n if len(fc_embeddings) > 1:\n fc_embeddings = tf.concat(fc_embeddings, axis=-1)\n else:\n fc_embeddings = fc_embeddings[0]\n\n fc_output = None\n if not isinstance(fc_embeddings, list):\n fc_output = self.fc(fc_embeddings)\n\n rnn_embeddings = []\n for x, input_layer in zip(rnn_x, self.rnn_inputs):\n x = input_layer(x)\n rnn_embeddings.append(x)\n\n if len(rnn_embeddings) > 0:\n if len(rnn_embeddings) > 1:\n rnn_embeddings = tf.concat(rnn_embeddings, axis=-1)\n else:\n rnn_embeddings = rnn_embeddings[0]\n\n if initial_state is not None:\n rnn, h = self.rnn(rnn_embeddings, initial_state=initial_state)\n elif fc_output is not None:\n if len(fc_output.shape) == 3:\n fc_output = fc_output[:, 0]\n rnn, h = self.rnn(rnn_embeddings, initial_state=fc_output)\n else:\n rnn, h = self.rnn(rnn_embeddings)\n\n outputs = []\n for i, out in enumerate(self.outs):\n x = rnn\n if i > 0:\n if self.use_present_attributes:\n x = tf.concat([x, *[tf.pad(e[:, 1:x.shape[1]], [(0, 0), (0, 1), (0, 0)], 'constant', 0)\n for j, e in enumerate(rnn_embeddings) if i != j]], axis=-1)\n elif self.use_present_activity:\n x = tf.concat([x, tf.pad(rnn_embeddings[0][:, 1:x.shape[1]], [(0, 0), (0, 1), (0, 0)], 'constant', 0)],\n axis=-1)\n x = out(x)\n outputs.append(x)\n\n if return_state:\n return outputs, h\n\n return outputs\n\n def score(self, features, predictions):\n for i, prediction in enumerate(predictions):\n p = np.pad(prediction[:, :-1], ((0, 0), (1, 0), (0, 0)), mode='constant')\n p[:, 0, features[i][0, 0]] = 1\n predictions[i] = p\n return binet_scores_fn(np.dstack(features), predictions)\n\n def detect(self, dataset):\n if isinstance(dataset, Dataset):\n features = dataset.features\n else:\n features = dataset\n predictions = self.predict(features)\n if not isinstance(predictions, list):\n predictions = [predictions]\n return AnomalyDetectionResult(scores=self.score(features, predictions), predictions=predictions)\n\n\nclass ConfNet:\n abbreviation = 'confnet'\n name = 'ConfNet'\n\n def __init__(self, dataset, latent_dim=None, use_case_attributes=None, use_event_attributes=None):\n super(ConfNet, self).__init__()\n\n self.use_case_attributes = use_case_attributes\n self.use_event_attributes = use_event_attributes\n\n self.net_f = BINet(dataset=dataset,\n latent_dim=latent_dim,\n use_case_attributes=use_case_attributes,\n use_event_attributes=use_event_attributes)\n self.net_b = BINet(dataset=dataset,\n latent_dim=latent_dim,\n use_case_attributes=use_case_attributes,\n use_event_attributes=use_event_attributes)\n\n self.net_f.compile(tf.keras.optimizers.Adam(), 'sparse_categorical_crossentropy')\n self.net_b.compile(tf.keras.optimizers.Adam(), 'sparse_categorical_crossentropy')\n\n @property\n def identifier(self):\n return f'{self.abbreviation}{int(self.use_event_attributes)}{int(self.use_case_attributes)}'\n\n def predict(self, inputs_f, inputs_b):\n out_f = self.net_f.predict(inputs_f)\n out_b = self.net_b.predict(inputs_b)\n if not isinstance(out_f, list):\n out_f = [out_f]\n if not isinstance(out_b, list):\n out_b = [out_b]\n return out_f, out_b\n\n def fit(self, dataset, batch_size=32, **kwargs):\n dataset.reverse(False)\n h1 = self.net_f.fit(dataset.features, dataset.targets, batch_size=batch_size, **kwargs)\n dataset.reverse(True)\n h2 = self.net_b.fit(dataset.features, dataset.targets, batch_size=batch_size, **kwargs)\n return h1, h2\n\n def save(self, file_name):\n self.net_f.save_weights(file_name + '_forward.h5')\n self.net_b.save_weights(file_name + '_backward.h5')\n\n def load(self, file_name):\n self.net_f([tf.ones(i) for i in ([(1, 1)] * len(self.net_f.rnn_inputs) + [(1,)] * len(self.net_f.fc_inputs))])\n self.net_f.load_weights(file_name + '_forward.h5')\n self.net_b([tf.ones(i) for i in ([(1, 1)] * len(self.net_b.rnn_inputs) + [(1,)] * len(self.net_b.fc_inputs))])\n self.net_b.load_weights(file_name + '_backward.h5')\n\n def batch_align(self, dataset, batch_size=5000, detailed=False, **kwargs):\n alignments = []\n start_beams = []\n start_probs = []\n beams = []\n probs = []\n costs = []\n\n for x, y in dataset.to_tf_dataset().batch(batch_size):\n if not isinstance(x, tuple):\n x = [x]\n a, b, c, sb, sp, p, _, _ = self.align([_x.numpy() for _x in x], detailed=True, **kwargs)\n\n alignments.append(a)\n start_beams.append(sb)\n start_probs.append(sp)\n beams.append(b)\n probs.append(p)\n costs.append(c)\n\n alignments = np.concatenate(alignments)\n start_beams = np.concatenate(start_beams)\n start_probs = np.concatenate(start_probs)\n beams = np.concatenate(beams)\n probs = np.concatenate(probs)\n costs = np.concatenate(costs)\n\n if detailed:\n return alignments, start_beams, start_probs, beams, probs, costs\n\n return alignments, beams, costs\n\n def align(self, dataset, k=5, hot_start=True, steps=10, delete_max=3, detailed=False):\n i = 0\n converged = False\n go_backwards = False\n start_probs = None\n\n if isinstance(dataset, Dataset):\n dataset.reverse(False)\n x = dataset.features\n else:\n x = dataset\n\n # Prepare data\n x_case = [_x for _x in x if len(_x.shape) == 1]\n x = [np.pad(_x, ((0, 0), (0, steps + 1))) for _x in x if len(_x.shape) == 2] # Create space for inserts\n start_beams = np.copy(x[0])\n alive = np.ones(x[0].shape[0], dtype=bool)\n x_p = np.zeros(x[0].shape[0])\n\n # Alignments\n inserts = np.zeros_like(x[0])\n deletes = np.zeros_like(x[0])\n\n # Convergence\n last_beams_y = None\n\n for _ in range(steps):\n if converged:\n print('Converged')\n break\n\n # Keep time for progress output\n start_time = time()\n\n # Forwards data\n x_f = [_x[alive] for _x in x]\n y_f = [to_targets(_x) for _x in x_f]\n m_f = y_f[0] != 0\n\n # Backwards data\n reverse_mask = x[0][alive] != 0\n x_b = [reverse(_x[alive], reverse_mask) for _x in x]\n y_b = [to_targets(_x) for _x in x_b]\n m_b = y_b[0] != 0\n\n # RNN predictions\n _x_case = [_x[alive] for _x in x_case]\n y_pred_f, y_pred_b = self.predict(x_f + _x_case, x_b + _x_case)\n\n y_probs_f, cum_y_probs_f = log_probs(y_f[0], y_pred_f[0], m_f)\n y_probs_b, cum_y_probs_b = log_probs(y_b[0], y_pred_b[0], m_b)\n\n # Reverse backwards\n y_pred_b = [reverse(_y, reverse_mask) for _y in y_pred_b]\n cum_y_probs_b = reverse(cum_y_probs_b, reverse_mask)\n\n # Hot start\n if i == 0 and hot_start:\n scores = self.net_f.score(x_f, [np.copy(f) for f in y_pred_f])\n result = AnomalyDetectionResult(scores=scores, predictions=y_pred_f)\n b_f = Binarizer(result, ~m_f[:, :, None], np.dstack(x_f))\n detection_f = b_f.binarize(heuristic=Heuristic.LP_MEAN, strategy=Strategy.ATTRIBUTE)\n\n # Original probs\n if i == 0:\n start_probs = np.atleast_3d(cum_y_probs_f) + align(cum_y_probs_b, 1)\n start_probs = start_probs[:, :, 0].sum(-1) / ((~(x_f[0] == 0)).sum(-1) - 1) # -1 to remove end symbol\n\n # BiBS step\n beams_p, beams_y, positions, p, y = bibs_step(x_f[0],\n np.log(y_pred_f[0]), cum_y_probs_f,\n np.log(y_pred_b[0]), cum_y_probs_b,\n inserts[alive] > 0,\n k=k, go_backwards=go_backwards, delete_max=delete_max)\n\n # Beams for event attributes\n beams_y = [beams_y]\n for n, (_y_f, _y_b) in enumerate(zip(y_pred_f[1:], y_pred_b[1:])):\n _y = (_y_f * align(_y_b, 1)).argmax(-1)\n _beams_y = gather(_y, positions - 1)\n _beams_y[beams_y[0] < 0] = beams_y[0][beams_y[0] < 0]\n beams_y.append(_beams_y)\n\n # Prepare old x\n if i == 0:\n # In the first run we have to repeat the original cases to match the dimension of `num_cases * k`\n x = [np.repeat(_x, k, 0) for _x in x]\n x_case = [np.repeat(_x, k) for _x in x_case]\n x_f = [np.repeat(_x, k, 0) for _x in x_f]\n x_p = np.repeat(x_p, k, 0)\n inserts = np.repeat(inserts, k, 0)\n deletes = np.repeat(deletes, k, 0)\n alive = np.repeat(alive, k, 0)\n else:\n # Get top-k beams for all cases. There are `k * k` beams available.\n shape = (alive.sum() // k, beams_p.shape[0] // (alive.sum() // k) * k)\n costs = (inserts[alive] > 0).sum(-1) + (deletes[alive] > 0).sum(-1)\n cost_y = np.zeros_like(beams_y[0])\n cost_y[beams_y[0] > 0] = 1\n cost_y[beams_y[0] < 0] = -beams_y[0][beams_y[0] < 0]\n cost_y[beams_y[0] == -42] = 0\n costs = costs[:, None] + cost_y\n\n idx = np.lexsort((-costs.reshape(shape), beams_p.reshape(shape)), axis=-1)[:, ::-1][:, :k]\n x_idx = (np.zeros_like(beams_p, dtype=int) + np.arange(alive.sum())[:, None]).reshape(shape)\n x_idx = gather(x_idx, idx).reshape(alive.sum())\n\n beams_y = [gather(_y.reshape(shape), idx) for _y in beams_y]\n positions = gather(positions.reshape(shape), idx)\n beams_p = gather(beams_p.reshape(shape), idx)\n x_f = [_x[x_idx] for _x in x_f]\n inserts[alive] = inserts[alive][x_idx]\n deletes[alive] = deletes[alive][x_idx]\n\n # Update probs\n x_p[alive] = beams_p.ravel()\n\n # New alignments\n inserts[alive], deletes[alive] = build_alignments(inserts[alive], deletes[alive],\n beams_y[0], positions, i + 1)\n\n # Build new x\n for attr_i in range(len(x_f)):\n x[attr_i][alive] = build_beams(x_f[attr_i], np.copy(beams_y[attr_i]), positions)\n\n # Cases with all beams indicating 'do nothing' are finished\n finished = np.all(beams_y[0] == -42, -1)\n if i == 0 and hot_start:\n finished = np.logical_or(finished, np.all(detection_f[:, :, 0] == 0, -1))\n if last_beams_y is not None and beams_y[0].shape[0] == last_beams_y.shape[0]:\n finished = np.logical_or(finished, np.all(beams_y[0] == last_beams_y, -1))\n alive[alive] = np.repeat(~finished, k, 0)\n last_beams_y = beams_y[0]\n\n # Print progress\n print(\n f'Step {i + 1} {\"←\" if go_backwards else \"→\"} {time() - start_time}s {x[0].shape} finished={(~alive).sum() // k}')\n\n # Go the other way the next step\n go_backwards = not go_backwards\n\n # Converged\n converged = alive.sum() == 0\n\n # i++\n i += 1\n\n shape = (x[0].shape[0] // k, k, x[0].shape[1])\n beams = x[0].reshape(shape)\n inserts = inserts.reshape(shape)\n deletes = deletes.reshape(shape)\n costs = (inserts > 0).sum(-1) + (deletes > 0).sum(-1)\n probs = x_p.reshape((x[0].shape[0] // k, k))\n\n # Calculate alignments\n alignments = get_alignments(start_beams, beams, inserts, deletes)\n\n if detailed:\n return alignments, beams, costs, start_beams, start_probs, probs, inserts, deletes\n\n return alignments, beams, costs\n","repo_name":"tnolle/deepalign","sub_path":"deepalign/alignments/confnet.py","file_name":"confnet.py","file_ext":"py","file_size_in_byte":17005,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"5"} +{"seq_id":"40136829468","text":"import os\nimport unittest\n\nfrom azure.cli.testsdk.scenario_tests import AllowLargeResponse\nfrom azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer)\nfrom .recording_processors import KeyReplacer\n\n\nTEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))\n\n\nclass WebpubsubScenarioTest(ScenarioTest):\n\n def __init__(self, method_name):\n super(WebpubsubScenarioTest, self).__init__(\n method_name, recording_processors=[KeyReplacer()]\n )\n\n @ResourceGroupPreparer(random_name_length=20)\n def test_webpubsub_replica(self, resource_group):\n tags_key = 'key'\n tags_val = 'value'\n updated_tags_val = 'value2'\n replica_name = 'clitestReplica'\n replica_location = 'westus'\n\n self.kwargs.update({\n 'name': self.create_random_name('webpubsub', 16),\n 'sku': 'Premium_P1',\n 'location': 'eastus',\n 'tags': '{}={}'.format(tags_key, tags_val),\n 'unit_count': 1,\n 'updated_tags': '{}={}'.format(tags_key, updated_tags_val),\n 'replica_name': replica_name,\n 'replica_location': replica_location\n })\n\n # Test create primary\n self.cmd('webpubsub create -g {rg} -n {name} --tags {tags} -l {location} --sku {sku} --unit-count {unit_count}', checks=[\n self.check('name', '{name}'),\n self.check('location', '{location}'),\n self.check('provisioningState', 'Succeeded'),\n self.check('sku.name', '{sku}'),\n self.check('sku.capacity', '{unit_count}'),\n self.check('tags.{}'.format(tags_key), tags_val),\n self.exists('hostName'),\n self.exists('publicPort'),\n self.exists('serverPort'),\n self.exists('externalIp'),\n ])\n\n # test create replica\n self.cmd('az webpubsub replica create -n {name} --replica-name {replica_name} -g {rg} --sku {sku} --unit-count {unit_count} -l {replica_location} --tags {tags}', checks=[\n self.check('name', '{replica_name}'),\n self.check('location', '{replica_location}'),\n self.check('provisioningState', 'Succeeded'),\n self.check('sku.name', '{sku}'),\n self.check('tags.{}'.format(tags_key), tags_val),\n ])\n\n # test show replica\n self.cmd('az webpubsub replica show -n {name} --replica-name {replica_name} -g {rg}', checks=[\n self.check('name', '{replica_name}'),\n self.check('location', '{replica_location}'),\n self.check('provisioningState', 'Succeeded'),\n self.check('sku.name', '{sku}'),\n self.check('tags.{}'.format(tags_key), tags_val),\n ])\n\n # test list replica\n self.cmd('az webpubsub replica list -n {name} -g {rg}', checks=[\n self.check('[0].name', '{replica_name}'),\n self.check('[0].location', '{replica_location}'),\n self.check('[0].provisioningState', 'Succeeded'),\n self.check('[0].sku.name', '{sku}'),\n self.check('[0].tags.{}'.format(tags_key), tags_val),\n ])\n\n # test remove replica\n self.cmd('az webpubsub replica delete -n {name} --replica-name {replica_name} -g {rg}')\n","repo_name":"Azure/azure-cli-extensions","sub_path":"src/webpubsub/azext_webpubsub/tests/latest/test_webpubsub_replica.py","file_name":"test_webpubsub_replica.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","stars":350,"dataset":"github-code","pt":"5"} +{"seq_id":"9056397221","text":"#Service factory register\nfrom .clients import *\nfrom django.conf import settings\nfrom api.models import UserResult\nimport datetime\n\nclass ServiceFactory:\n def __init__(self):\n self.__services = {}\n\n def register(self, name, service_class):\n # Maybe add some validation\n self.__services[name] = service_class\n\n def create(self, name, *args, **kwargs):\n # Maybe add some error handling or fallbacks\n return self.__services[name](*args, **kwargs)\n\nfactory = ServiceFactory()\n\n\nclass BaseService:\n def get(params):\n pass\n\nclass AbstractService:\n def save(request):\n pass\n\nclass JwtService(BaseService):\n def get(self, params = {}):\n auth = AuthClient()\n auth.connect(settings.ON_AUTH_SERVER)\n auth.post(\"/api/login\", {\"username\" : settings.ON_USERNAME , \"password\" : settings.ON_PASSWORD})\n token = auth.getData()\n auth.close()\n #JWT needed for oncom.be niether auth.mehub\n jwt = JwtClient()\n jwt.connect(settings.ON_AUTH_SERVER)\n jwt.setHeader(\"Authorization\", \"Bearer \" + token)\n jwt.get(\"/api/jwt\", {\"hostUid\" : settings.ON_HOSTUID})\n jwt.close()\n return jwt.getData()\n\nclass GenericService(BaseService):\n def __init__(self):\n self.general = STRGETClient()\n self.general.connect(settings.ON_BE_SERVER)\n jwtService = JwtService()\n self.general.setHeader(\"Authorization\", \"Bearer \" + jwtService.get())\n\n def get(self, params = {}):\n pass\n\nclass UidService(GenericService):\n def get(self, params = {}):\n self.general.get(\"/api/profile/get-uid-by-iin\", params)\n self.general.close()\n return self.general.getData()\n\n\nclass GeneralService(GenericService):\n def get(self, params = {}):\n uidService = UidService()\n params = {\n \"uid\" : uidService.get(params)\n }\n self.general.get(\"/api/profile/get-general\", params)\n self.general.close()\n return self.general.getData()\n\nclass CommonService(GenericService):\n def get(self, params = {}):\n uidService = UidService()\n params = {\n \"uid\" : uidService.get(params)\n }\n self.general.get(\"/api/profile/get-common\", params)\n self.general.close()\n return self.general.getData()\n\nclass DiagnoseService(GenericService):\n def get(self, params = {}):\n uidService = UidService()\n params = {\n \"person-uid\" : uidService.get(params)\n }\n self.general.get(\"/api/profile/get-diagnosis\", params)\n self.general.close()\n return self.general.getData()\n\nclass TreatmentCourseService(GenericService):\n def get(self, params = {}):\n uidService = UidService()\n params = {\n \"uid\" : uidService.get(params)\n }\n self.general.get(\"/api/treatment-course/list-by-patient\", params)\n self.general.close()\n return self.general.getData()\n\n\nclass TreatmentService(GenericService):\n def get(self, params = {}):\n course = TreatmentCourseService()\n data = json.loads(course.get(params))\n params = {\n \"uid\" : data[0]['uid']\n }\n self.general.get(\"/api/treatment-course/get-main\", params)\n self.general.close()\n return self.general.getData()\n\n\nclass UserResultService(AbstractService):\n def save(self, request):\n data = request.data;\n \n print(data)\n\n for item in data:\n result = UserResult.objects.filter(diagnose = item['name']).first()\n if result is None:\n userResult = UserResult.objects.create(diagnose=item['name'], value=item['value'], user_id=request.user.id)\n userResult.save()\n return userResult\n else:\n result.value = item['value']\n result.modified = datetime.datetime.now()\n result.save()\n return result\n\n return None\n\n","repo_name":"jokermt235/oncom.server","sub_path":"api/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":4007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"23172551506","text":"# server.py\n\"\"\"\nTitle: Flask server\nAuthor: Yousuf Mohammed\nAuthor: Alex Kim\nDate-Created: 2022-11-01\n\"\"\"\n\n# --- IMPORTS --- #\nfrom flask import *\nimport json\nfrom B_backend.highscore_dao import *\n\n\n# --- FLASK APP --- #\napp = Flask(__name__)\n\n\n@app.route('/insertHighscore', methods=['POST'])\ndef insert_highscore():\n \"\"\"\n Inserts a new player highscore into the server\n \"\"\"\n request_payload = json.loads(request.form['data'])\n player_id = insertHighscore(request_payload)\n response = jsonify({'player_id': player_id})\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\n@app.route('/getHighscores', methods=['GET'])\ndef get_highscores():\n \"\"\"\n gets all highscores from the server\n \"\"\"\n response = getHighscores()\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef highscore():\n \"\"\"\n Creates the main website server\n \"\"\"\n return render_template('highscore.html')\n\n\n# --- MAIN PROGRAM CODE --- #\nif __name__ == \"__main__\":\n app.run(port=51000)\n","repo_name":"AlexKimm1729/CSE3910-Final-Project","sub_path":"cse3910-final_project/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"5775420452","text":"import click\nimport pika\n\n\n@click.command()\n@click.option(\n \"--host\",\n type=str,\n required=True\n)\n@click.option(\n \"--topic\",\n type=str,\n required=True\n)\n@click.option(\n \"--message\",\n type=str,\n required=True\n)\ndef publish(host, topic, message):\n pika_param = pika.ConnectionParameters(host)\n properties = pika.BasicProperties(delivery_mode=pika.spec.PERSISTENT_DELIVERY_MODE)\n\n with pika.BlockingConnection(pika_param) as connection:\n channel = connection.channel()\n channel.queue_declare(queue=topic, durable=True)\n\n channel.basic_publish(exchange=\"\", routing_key=topic, body=message, properties=properties)\n\n\nif __name__ == \"__main__\":\n \n publish()\n","repo_name":"yolo-kiyoshi/rabbitmq-sample","sub_path":"mq/publish.py","file_name":"publish.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"30292704624","text":"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef word_counts(word,words):\r\n ans=0\r\n for w in words:\r\n if(w==word):\r\n ans+=1\r\n return ans\r\n\r\n\r\n\r\ndef top10(L):\r\n newL=L.copy()\r\n newL.sort(reverse=True)\r\n ans=[]\r\n for i in range(0,10):\r\n ans.append(newL[i])\r\n print(ans)\r\n return ans\r\n\r\n\r\n\r\ndef top10words(words):\r\n ans=[]\r\n sim_words = []\r\n for w in words:\r\n if w not in sim_words:\r\n sim_words.append(w)\r\n\r\n wordCount={}\r\n for w in sim_words:\r\n wordCount[w]=word_counts(w,words)\r\n\r\n Lcount=top10(list(wordCount.values()))\r\n\r\n for key in wordCount:\r\n for n in Lcount:\r\n if(wordCount[key]==n):\r\n ans.append(key)\r\n Lcount.remove(n)\r\n break\r\n return ans\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n words=open(\"Notes_Underground.txt\", encoding=\"latin-1\").read().split()\r\n book=open(\"Pride and Prejudice.txt\", encoding=\"latin-1\").read().split()\r\n print(top10words(book))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"ThomasCulham/ESC-180---Intro-to-Computer-Programming-Labs","sub_path":"Lab 9/Lab 9.py","file_name":"Lab 9.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"2260088197","text":"#\n# @lc app=leetcode.cn id=107 lang=python3\n#\n# [107] 二叉树的层序遍历 II\n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def levelOrderBottom(self, root: Optional[TreeNode]) -> List[List[int]]:\n if not root:\n return []\n ret = []\n level = [root]\n while len(level):\n cells = []\n for _ in range(len(level)):\n node = level.pop(0)\n cells.append(node.val)\n if node.left:\n level.append(node.left)\n if node.right:\n level.append(node.right)\n ret.insert(0, cells)\n return ret\n# @lc code=end\n\n","repo_name":"skylinety/Blog","sub_path":"Demos/Major/DataStructure_Algorithm/Leetcode/107.二叉树的层序遍历-ii.py","file_name":"107.二叉树的层序遍历-ii.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"8145646792","text":"import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.preprocessing import LabelEncoder\r\nimport joblib\r\n\r\ndata = pd.read_csv('myPreprocessed.csv')\r\n\r\n# Encoding means - categorical values mapped to integer values\r\nvenue_encode = LabelEncoder()\r\nteam_encode = LabelEncoder()\r\n\r\ndata['venue'] = venue_encode.fit_transform(data['venue'])\r\ndata['batting_team'] = team_encode.fit_transform(data['batting_team'])\r\ndata['bowling_team'] = team_encode.fit_transform(data['bowling_team'])\r\n\r\nanArray = data.to_numpy()\r\n\r\n# print(anArray[0][0])\r\n# print(anArray[0][1])\r\n# print(anArray[0][2])\r\n# print(anArray[0][3])\r\n# print(anArray[0][4])\r\n\r\nX,y = anArray[:,:4], anArray[:,4]\r\n# X -> all row, 0 to 3 col\r\n# y -> all row, 4th col\r\n\r\nX = np.concatenate((np.eye(42)[anArray[:,0]],\r\n np.eye(2)[anArray[:,1] -1 ],\r\n np.eye(15)[anArray[:,2]],\r\n np.eye(15)[anArray[:,3]],\r\n ), axis = 1)\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)\r\n\r\nlinearRegressor = LinearRegression()\r\n\r\nlinearRegressor.fit(X_train, y_train)\r\n\r\njoblib.dump(linearRegressor, 'regression_model.joblib')\r\njoblib.dump(venue_encode, 'venue_encoder.joblib')\r\njoblib.dump(team_encode, 'team_encoder.joblib')\r\n\r\nprint(linearRegressor.score(X_test, y_test))","repo_name":"ngandhi369/IITM-Cricket-Hackathon-Challenge","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"21184501717","text":"import argparse\nimport os.path\n\nfrom instanceseg.utils import configs\nfrom scripts.configurations.sampler_cfg_registry import sampler_cfgs\nfrom instanceseg.datasets import dataset_registry\n\n\ndef parse_args_test(replacement_args_list=None):\n parser = get_parser_test()\n args, argv = parser.parse_known_args(replacement_args_list) \\\n if replacement_args_list is not None else parser.parse_known_args()\n\n # Config override parser\n assert args.dataset is not None, ValueError('dataset argument must not be None. '\n 'Run with --help for more details.')\n parent_dir = os.path.split(args.logdir.rstrip('/'))[0]\n dataset_name_from_logdir = os.path.split(os.path.split(parent_dir)[0])[1]\n assert args.dataset == dataset_name_from_logdir, 'Dataset given: {}. I expected this dataset from logdir: ' \\\n '{}'.format(args.dataset,\n os.path.split(os.path.split(args.logdir)[0])[1])\n\n override_cfg_args = get_override_cfg(argv, args.dataset, args.sampler)\n return args, override_cfg_args\n\n\ndef get_override_cfg(argv, dataset, sampler=None):\n cfg_default = dataset_registry.REGISTRY[dataset].default_config\n cfg_override_parser = configs.get_cfg_override_parser(cfg_default)\n bad_args = [arg for arg in argv[::2] if arg.replace('-', '') not in cfg_default.keys()]\n assert len(bad_args) == 0, cfg_override_parser.error('bad_args: {}'.format(bad_args))\n if sampler is not None:\n argv += ['--sampler', sampler]\n # Parse with list of options\n override_cfg_args, leftovers = cfg_override_parser.parse_known_args(argv)\n assert len(leftovers) == 0, ValueError('args not recognized: {}'.format(leftovers))\n # apparently this is failing, so I'm going to have to screen this on my own:\n # Remove options from namespace that weren't defined\n unused_keys = [k for k in list(override_cfg_args.__dict__.keys()) if\n '--' + k not in argv and '-' + k not in argv]\n for k in unused_keys:\n delattr(override_cfg_args, k)\n postprocess_test_args(override_cfg_args)\n return override_cfg_args\n\n\ndef parse_args_train(replacement_args_list=None):\n # Get initial parser\n parser = get_parser_train()\n args, argv = parser.parse_known_args(replacement_args_list) \\\n if replacement_args_list is not None else parser.parse_known_args()\n\n # Config override parser\n assert args.dataset is not None, ValueError('dataset argument must not be None. '\n 'Run with --help for more details.')\n cfg_default = dataset_registry.REGISTRY[args.dataset].default_config\n cfg_override_parser = configs.get_cfg_override_parser(cfg_default)\n\n bad_args = [arg for arg in argv[::2] if arg.replace('-', '') not in cfg_default.keys()]\n assert len(bad_args) == 0, cfg_override_parser.error('bad_args: {}'.format(bad_args))\n if args.sampler is not None:\n argv += ['--sampler', args.sampler]\n # Parse with list of options\n override_cfg_args, leftovers = cfg_override_parser.parse_known_args(argv)\n assert len(leftovers) == 0, ValueError('args not recognized: {}\\n{}'.format(leftovers,\n cfg_override_parser.print_help()))\n # apparently this is failing, so I'm going to have to screen this on my own:\n\n # Remove options from namespace that weren't defined\n unused_keys = [k for k in list(override_cfg_args.__dict__.keys()) if\n '--' + k not in argv and '-' + k not in argv]\n for k in unused_keys:\n delattr(override_cfg_args, k)\n\n postprocess_train_args(override_cfg_args)\n\n return args, override_cfg_args\n\n\ndef postprocess_test_args(override_cfg_args):\n\n pass\n\n\ndef postprocess_train_args(override_cfg_args):\n # Fix a few values\n replace_attr_with_function_of_val(override_cfg_args, 'clip',\n lambda old_val: old_val if old_val > 0 else None,\n error_if_attr_doesnt_exist=False)\n replace_attr_with_function_of_val(override_cfg_args, 'semantic_subset',\n lambda old_val: convert_comma_separated_string_to_list(\n old_val, str),\n error_if_attr_doesnt_exist=False)\n replace_attr_with_function_of_val(override_cfg_args, 'img_size',\n lambda old_val: convert_comma_separated_string_to_list(\n old_val, int),\n error_if_attr_doesnt_exist=False)\n replace_attr_with_function_of_val(override_cfg_args, 'resize_size',\n lambda old_val: convert_comma_separated_string_to_list(\n old_val, int),\n error_if_attr_doesnt_exist=False)\n replace_attr_with_function_of_val(override_cfg_args, 'blob_size',\n lambda old_val: convert_comma_separated_string_to_list(\n old_val, int),\n error_if_attr_doesnt_exist=False)\n\n\ndef construct_args_list_to_replace_sys(dataset_name, gpu=None, config_idx=None, sampler_name=None,\n resume=None, **kwargs):\n default_args_list = [dataset_name]\n for key, val in {'-g': gpu}.items():\n if val is not None:\n default_args_list.append(key)\n if type(val) is str:\n val = val.split(' ')\n for v in val:\n default_args_list.append(str(v))\n for key, val in {'-c': config_idx, '--sampler': sampler_name,\n '--resume': resume}.items():\n if val is not None:\n default_args_list += [key, str(val)]\n for key, val in kwargs.items():\n if val is not None:\n if not key.startswith('--'):\n key = '--' + key if len(key) > 1 else '-' + key\n default_args_list += [key, str(val)]\n return default_args_list\n\n\ndef get_parser_train():\n parser = argparse.ArgumentParser()\n dataset_names = dataset_registry.REGISTRY.keys()\n subparsers = parser.add_subparsers(help='dataset: {}'.format(dataset_names), dest='dataset')\n dataset_parsers = {\n dataset_name:\n subparsers.add_parser(dataset_name, help='{} dataset options'.format(dataset_name),\n epilog='\\n\\nOverride options:\\n' + '\\n'.join(\n ['--{}: {}'.format(k, v)\n for k, v in dataset_registry.REGISTRY[\n dataset_name].default_config.items()]),\n formatter_class=argparse.RawTextHelpFormatter)\n for dataset_name in dataset_names\n }\n for dataset_name, subparser in dataset_parsers.items():\n cfg_choices = list(dataset_registry.REGISTRY[dataset_name].config_options.keys())\n subparser.add_argument('-c', '--config', type=str_or_int, default=0, choices=cfg_choices)\n subparser.add_argument('-g', '--gpu', type=int, nargs='+', required=True)\n subparser.add_argument('--resume', help='Checkpoint path')\n subparser.add_argument('--semantic-init',\n help='Checkpoint path of semantic model (e.g. - '\n '\\'~/data/models/pytorch/semantic_synthetic.pth\\'',\n default=None)\n subparser.add_argument('--ignore_git', type=bool, default=False)\n subparser.add_argument('--single-image-index', type=int,\n help='Image index to use for train/validation set',\n default=None)\n subparser.add_argument('--sampler', type=str, choices=sampler_cfgs.keys(), default=None,\n help='Sampler for dataset')\n return parser\n\n\ndef get_parser_test():\n parser = argparse.ArgumentParser()\n dataset_names = dataset_registry.REGISTRY.keys()\n subparsers = parser.add_subparsers(help='dataset: {}'.format(dataset_names), dest='dataset')\n dataset_parsers = {\n dataset_name:\n subparsers.add_parser(dataset_name, help='{} dataset options'.format(dataset_name),\n epilog='\\n\\nOverride options:\\n' + '\\n'.join(\n ['--{}: {}'.format(k, v)\n for k, v in dataset_registry.REGISTRY[\n dataset_name].default_config.items()]),\n formatter_class=argparse.RawTextHelpFormatter)\n for dataset_name in dataset_names\n }\n for dataset_name, subparser in dataset_parsers.items():\n cfg_choices = list(dataset_registry.REGISTRY[dataset_name].config_options.keys())\n subparser.add_argument('-c', '--config', type=str_or_int, default=0, choices=cfg_choices)\n subparser.add_argument('-g', '--gpu', help='ex. - \\'2 3\\'', type=int, nargs='+', required=True)\n subparser.add_argument('--logdir', help='Checkpoint path for model', required=True)\n subparser.add_argument('--single-image-index', type=int,\n help='Image index to use for unit testing',\n default=None)\n subparser.add_argument('--ignore_git', type=bool, default=False)\n subparser.add_argument('--save_scores', type=bool, default=True)\n subparser.add_argument('--sampler', choices=sampler_cfgs.keys(), default=None,\n help='Sampler for dataset')\n subparser.add_argument('--test_split', type=str, default='val')\n return parser\n\n\ndef parse_args_without_sys(dataset_name, gpu=0, **kwargs):\n replacement_args_list = construct_args_list_to_replace_sys(dataset_name, gpu=gpu, **kwargs)\n print(replacement_args_list)\n args, override_cfg_args = parse_args_train(replacement_args_list=replacement_args_list)\n return args, override_cfg_args\n\n\ndef str_or_int(val):\n try:\n return int(val)\n except ValueError:\n return val\n\n\ndef replace_attr_with_function_of_val(namespace, attr, replacement_function,\n error_if_attr_doesnt_exist=True):\n if attr in namespace.__dict__.keys():\n setattr(namespace, attr, replacement_function(getattr(namespace, attr)))\n elif error_if_attr_doesnt_exist:\n raise Exception('attr {} does not exist in namespace'.format(attr))\n\n\ndef convert_comma_separated_string_to_list(string, conversion_type=None):\n if conversion_type is None:\n conversion_type = str\n if string is None or string == '':\n return string\n else:\n elements = [s.strip() for s in string.split(',')]\n return [conversion_type(element) for element in elements]\n","repo_name":"alliedel/fcn-instances-pytorch","sub_path":"instanceseg/utils/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":11082,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"15804335035","text":"from pickle import NONE\nfrom vector import *\nfrom gl import *\nfrom texture import *\nimport random\n\n\ndef bounding_box(A, B, C):\n # Mira cual es la bounding box\n xs = [A.x, B.x, C.x]\n ys = [A.y, B.y, C.y]\n\n xs.sort()\n ys.sort()\n\n return xs[0], xs[-1], ys[0], ys[-1]\n\n\ndef cross(V1, V2):\n # Producto cruz\n return (\n V1.y * V2.z - V1.z * V2.y,\n V1.z * V2.x - V1.x * V2.z,\n V1.x * V2.y - V1.y * V2.x,\n )\n\n\ndef barycentric(A, B, C, P):\n # Se calculan las baricentricas\n cx, cy, cz = cross(\n V3(B.x - A.x, C.x - A.x, A.x - P.x), V3(B.y - A.y, C.y - A.y, A.y - P.y)\n )\n\n u = cx / cz\n v = cy / cz\n w = 1 - (cx + cy) / cz\n\n return (w, v, u)\n\n\nprint()\n\n\ndef main():\n\n d = 0\n side = 1000\n b = Bitmap(side, side)\n print(\"Bienvenido al renderizador!\\n\")\n \"\"\"while True:\n print(\"1. Renderizar sin textura\")\n print(\"2. Renderizar con textura\")\n g = int(input())\n if g == 2:\n \n break\n elif g == 1:\n break\n else:\n print(\"Opción no válida\")\n continue\"\"\"\n nombre = \"Pokemon.bmp\"\n b.lookAt(V3(0, 10, 20), V3(0, 0, 0), V3(0, 10, 0))\n \"\"\"while True:\n print(\"¿Como quiere que sea la foto a tomar?\")\n print(\"1. Medium Shot\\n2. Low Angle\\n3. High Angle\\n4. Dutch Angle\")\n angle = int(input())\n if angle == 1:\n b.lookAt(V3(0, 0, 20), V3(0, 0, 0), V3(0, 10, 0))\n nombre = \"Medium.bmp\"\n break\n elif angle == 2:\n b.lookAt(V3(0, -10, 20), V3(0, 0, 0), V3(0, 10, 0))\n nombre = \"Low.bmp\"\n break\n elif angle == 3:\n b.lookAt(V3(0, 10, 20), V3(0, 0, 0), V3(0, 10, 0))\n nombre = \"High.bmp\"\n break\n elif angle == 4:\n roation_factor = (0, 0, pi / 6)\n scale_factor = (0.4, 0.4, 0.4)\n transform_factor = (0.25, -0.3, 0)\n b.lookAt(V3(0, 0, 20), V3(0, 0, 0), V3(0, 10, 0))\n nombre = \"Dutch.bmp\"\n break\n else:\n print(\"Opción no válida\")\n continue\"\"\"\n\n b.clearColor(200, 0, 225)\n\n # Todos los shaders\n def shader(render, **kwargs):\n w, v, u = kwargs[\"bar\"]\n A, B, C = kwargs[\"vertices\"]\n if b._texture:\n tA, tB, tC = kwargs[\"texture_coords\"]\n nA, nB, nC = kwargs[\"normals\"]\n L = V3(0, 0, 1)\n iA = nA.normalize() @ L.normalize()\n iB = nB.normalize() @ L.normalize()\n iC = nC.normalize() @ L.normalize()\n\n i = iA * w + iB * u + iC * v\n\n if b._texture:\n tx = tA.x * w + tB.x * u + tC.x * v\n ty = tA.y * w + tB.y * u + tC.y * v\n return t.getColori(tx, ty, i)\n\n def pokeball(render, **kwargs):\n w, v, u = kwargs[\"bar\"]\n A, B, C = kwargs[\"vertices\"]\n y = kwargs[\"aaa\"]\n x = kwargs[\"bbb\"]\n nA, nB, nC = kwargs[\"normals\"]\n L = V3(0, 0, 1)\n iA = nA.normalize() @ L.normalize()\n iB = nB.normalize() @ L.normalize()\n iC = nC.normalize() @ L.normalize()\n\n i = iA * w + iB * u + iC * v\n if y < (565):\n return (int(255 * i), int(255 * i), int(255 * i))\n elif y < (575):\n return (0, 0, 0)\n elif y < (615):\n return (0, 0, int(255 * i))\n else:\n return (75, 105, 246)\n\n def grass(render, **kwargs):\n w, v, u = kwargs[\"bar\"]\n A, B, C = kwargs[\"vertices\"]\n y = kwargs[\"aaa\"]\n x = kwargs[\"bbb\"]\n\n m = random.randint(0, 10)\n if m == 10:\n return (20, 100, 0)\n return (0, 125, 0)\n\n def bush(render, **kwargs):\n w, v, u = kwargs[\"bar\"]\n A, B, C = kwargs[\"vertices\"]\n y = kwargs[\"aaa\"]\n x = kwargs[\"bbb\"]\n\n m = random.randint(0, 50)\n if m == 50:\n return (0, 20, 255)\n return (43, 100, 0)\n\n def tree(render, **kwargs):\n w, v, u = kwargs[\"bar\"]\n A, B, C = kwargs[\"vertices\"]\n y = kwargs[\"aaa\"]\n x = kwargs[\"bbb\"]\n\n if y < (615):\n m = random.randint(0, 50)\n if m == 50:\n return\n return (0, 56, 111)\n else:\n m = random.randint(0, 50)\n if m == 50:\n return (32, 111, 0)\n return (0, 154, 26)\n\n def transform_vertex(vertex):\n # Transforma el vertice\n augmented_vertex = Matrix([[vertex[0]], [vertex[1]], [vertex[2]], [1]])\n transformed_vertex = (\n b.Viewport * b.Projection * b.View * b.Model * augmented_vertex\n )\n\n transformed_vertex = V3(\n transformed_vertex.List[0][0],\n transformed_vertex.List[1][0],\n transformed_vertex.List[2][0],\n transformed_vertex.List[3][0],\n )\n return V3(\n transformed_vertex.x / transformed_vertex.w,\n transformed_vertex.y / transformed_vertex.w,\n transformed_vertex.z / transformed_vertex.w,\n )\n\n print()\n\n def triangle(A, B, C, verticest=[], verticesn=[]):\n\n # \"Se crea la normal del triangulo para sacar la intensidad\")\n\n # Escalas de grises y se crea el bounding box\n p, q, r, s = bounding_box(A, B, C)\n for x in range(round(p), round(q) + 1):\n for y in range(round(r), round(s) + 1):\n # Mira las baricentras del bounding\n try:\n w, v, u = barycentric(A, B, C, V3(x, y))\n except:\n continue\n if w < 0 or v < 0 or u < 0:\n continue\n z = A.z * w + B.z * v + C.z * u\n # \"Usa el z bugger para mostrar que esta adelante o atras\"\n if (\n x >= 0\n and y >= 0\n and x < len(b._zbuffer)\n and y < len(b._zbuffer[0])\n and b._zbuffer[x][y] < z\n ):\n b._zbuffer[x][y] = z\n # Hay un atributo vació del método en caso que no haya textura.\n\n if b.active_shader and len(verticesn) == 3 and b._texture:\n b._color = b.active_shader(\n b,\n aaa=y,\n bbb=x,\n bar=(w, v, u),\n vertices=(A, B, C),\n texture_coords=(verticest[0], verticest[1], verticest[2]),\n normals=(verticesn[0], verticesn[1], verticesn[2]),\n )\n else:\n b._color = b.active_shader(\n b,\n aaa=y,\n bbb=x,\n bar=(w, v, u),\n vertices=(A, B, C),\n )\n # \"En vez de escalas de grises, utiliza los colores de la textura\")\n # \"Se pinta el punto\"\n if b._color == None:\n continue\n b.Vertex(x, y)\n\n def load_model(zubat, transform_factor, scale_factor, roation_factor):\n b.loadModelMatrix(transform_factor, scale_factor, roation_factor)\n vertext = []\n vertextt = []\n vertexn = []\n vertexnn = []\n d = 0\n for face in zubat.faces:\n d = d + 1\n # Mira los poligonos y cuantos veritces tiene\n if len(face) == 4:\n f1 = face[0][0] - 1\n f2 = face[1][0] - 1\n f3 = face[2][0] - 1\n f4 = face[3][0] - 1\n\n # Se obtienen los vertices de la figura y los transforma.\n v1 = transform_vertex(zubat.vertices[f1])\n v2 = transform_vertex(zubat.vertices[f2])\n v3 = transform_vertex(zubat.vertices[f3])\n v4 = transform_vertex(zubat.vertices[f4])\n\n # Si hay texuta. Saca de la textura las caras y vertices respectivos\n if b._texture:\n ft1 = face[0][1] - 1\n ft2 = face[1][1] - 1\n ft3 = face[2][1] - 1\n ft4 = face[3][1] - 1\n\n vt1 = V3(\n zubat.tvertices[ft1][0] * t.width,\n zubat.tvertices[ft1][1] * t.height,\n )\n vt2 = V3(\n zubat.tvertices[ft2][0] * t.width,\n zubat.tvertices[ft2][1] * t.height,\n )\n vt3 = V3(\n zubat.tvertices[ft3][0] * t.width,\n zubat.tvertices[ft3][1] * t.height,\n )\n vt4 = V3(\n zubat.tvertices[ft4][0] * t.width,\n zubat.tvertices[ft4][1] * t.height,\n )\n vertext = [vt1, vt2, vt3]\n vertextt = [vt1, vt4, vt3]\n if len(face[0]) == 3:\n fn1 = face[0][2] - 1\n fn2 = face[1][2] - 1\n fn3 = face[2][2] - 1\n fn4 = face[3][2] - 1\n vn1 = V3(\n zubat.nvertices[fn1][0],\n zubat.nvertices[fn1][1],\n zubat.nvertices[fn1][2],\n )\n vn2 = V3(\n zubat.nvertices[fn2][0],\n zubat.nvertices[fn2][1],\n zubat.nvertices[fn2][2],\n )\n vn3 = V3(\n zubat.nvertices[fn3][0],\n zubat.nvertices[fn3][1],\n zubat.nvertices[fn3][2],\n )\n vn4 = V3(\n zubat.nvertices[fn4][0],\n zubat.nvertices[fn4][1],\n zubat.nvertices[fn4][2],\n )\n\n vertexn = [vn1, vn2, vn3]\n vertexnn = [vn1, vn4, vn3]\n # Para los los de cuatro poligonos, utiliza don traingulos\n triangle(\n V3(v1.x, v1.y, v1.z),\n V3(v2.x, v2.y, v2.z),\n V3(v3.x, v3.y, v3.z),\n vertext,\n vertexn,\n )\n triangle(\n V3(v1.x, v1.y, v1.z),\n V3(v4.x, v4.y, v4.z),\n V3(v3.x, v3.y, v3.z),\n vertextt,\n vertexnn,\n )\n else:\n f1 = face[0][0] - 1\n f2 = face[1][0] - 1\n f3 = face[2][0] - 1\n\n # Se obtienen los vertices de la figura y los transforma.\n v1 = transform_vertex(zubat.vertices[f1])\n v2 = transform_vertex(zubat.vertices[f2])\n v3 = transform_vertex(zubat.vertices[f3])\n\n # Si hay texuta. Saca de la textura las caras y vertices respectivos\n if b._texture:\n ft1 = face[0][1] - 1\n ft2 = face[1][1] - 1\n ft3 = face[2][1] - 1\n\n vt1 = V3(\n zubat.tvertices[ft1][0] * t.width,\n zubat.tvertices[ft1][1] * t.height,\n )\n vt2 = V3(\n zubat.tvertices[ft2][0] * t.width,\n zubat.tvertices[ft2][1] * t.height,\n )\n vt3 = V3(\n zubat.tvertices[ft3][0] * t.width,\n zubat.tvertices[ft3][1] * t.height,\n )\n vertext = [vt1, vt2, vt3]\n\n if len(face[0]) == 3:\n fn1 = face[0][2] - 1\n fn2 = face[1][2] - 1\n fn3 = face[2][2] - 1\n vn1 = V3(\n zubat.nvertices[fn1][0],\n zubat.nvertices[fn1][1],\n zubat.nvertices[fn1][2],\n )\n vn2 = V3(\n zubat.nvertices[fn2][0],\n zubat.nvertices[fn2][1],\n zubat.nvertices[fn2][2],\n )\n vn3 = V3(\n zubat.nvertices[fn3][0],\n zubat.nvertices[fn3][1],\n zubat.nvertices[fn3][2],\n )\n\n vertexn = [vn1, vn2, vn3]\n # Para los los de cuatro poligonos, utiliza don traingulos\n triangle(\n V3(v1.x, v1.y, v1.z),\n V3(v2.x, v2.y, v2.z),\n V3(v3.x, v3.y, v3.z),\n vertext,\n vertexn,\n )\n\n Zubat = Obj(\"Leaf.obj\")\n t = Texture(\"Leaf.bmp\")\n b._texture = t\n b.active_shader = shader\n load_model(Zubat, (-0.60, -0.10, 0), (0.65, 0.65, 0.65), (0, 5 * pi / 4, 0))\n t = Texture(\"Ivysaur.bmp\")\n b._texture = t\n load_model(Obj(\"Ivysaur.obj\"), (-0.2, -0.25, 0), (0.25, 0.25, 0.25), (0, pi / 4, 0))\n t = Texture(\"Zubat.bmp\")\n b._texture = t\n load_model(Obj(\"Zubat.obj\"), (0.5, 0.5, 0), (0.075, 0.075, 0.075), (0, pi / 4, 0))\n b.active_shader = pokeball\n load_model(Obj(\"sphere.obj\"), (0.2, 0.6, 0), (0.10, 0.10, 0.10), (0, 3 * pi / 4, 0))\n b._texture = None\n b.active_shader = grass\n load_model(Obj(\"Cube.obj\"), (0.0, -2.5, -5), (0.85, 0.01, 0.85), (0, 3 * pi / 4, 0))\n b.active_shader = bush\n load_model(Obj(\"Bush.obj\"), (0.65, -1.00, -2), (0.3, 0.3, 0.3), (0, pi / 4, 0))\n b.active_shader = tree\n load_model(Obj(\"Tree.obj\"), (0, -1.5, -4), (0.3, 0.3, 0.3), (0, 0, 0))\n b.write(nombre)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Roberto-VC/Proyecto1-SoftwareRenderer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29636167082","text":"from com.cloudops.pagelib.PrepaidDashboardPage import PrepaidDashboard\nimport logging\nfrom com.cloudops.genericlib.TestStatus import TestStatus\nimport com.cloudops.genericlib.custom_logger as cl\nimport pytest\nimport unittest\nfrom com.cloudops.pagelib.HomePage import HomePage\n\n@pytest.mark.usefixtures(\"launch_browser\", \"login\")\nclass VerifyPrepaidDashboard(unittest.TestCase):\n log = cl.customLogger(logging.DEBUG)\n\n @pytest.fixture(autouse=True)\n def classSetUp(self, launch_browser, login):\n self.ts = TestStatus(self.driver)\n\n @pytest.mark.run(order=1)\n def test_prepaid_dashboard_content(self):\n home = HomePage(self.driver)\n home.view_prepaid_dashboard()\n dashboard = PrepaidDashboard(self.driver)\n result1 = dashboard.verify_prepaid_dashboard_title()\n self.ts.mark(result1, \"Prepaid Dashboard title is verified\")\n result2 = dashboard.verify_unbilled_amount_card_title()\n self.ts.mark(result2, \"Unbilled amount card title is verified\")\n result3 = dashboard.verify_current_balance()\n self.ts.mark(result3, \"Current Balance text is verified\")\n result4 = dashboard.verify_last_month_usage()\n self.ts.mark(result4, \"Last month usage text is verified\")\n result5 = dashboard.verify_recent_recharges()\n self.ts.markFinal(\"test_prepaid_dashboard_content\", result5, \"Recent Recharges table is verified\")\n\n\n\n\n","repo_name":"hbanafal/CloudopsAutomation","sub_path":"com/cloudops/smoketests/prepaid_dashboard/test_VerifyPrepaidDashboard.py","file_name":"test_VerifyPrepaidDashboard.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37382866555","text":"\"\"\"\n- 2x1 크기의 로봇을 (N, N) 위치로 이동시키기\n- 0 빈칸 1 벽\n- 로봇은 90도씩 회전 가능\n- 앞으로, 아래로만 이동 가능하게 하면 실패 -> 뒤로 이동해야 하는 경우 있음\n\"\"\"\n\nfrom collections import deque\n\ndef move(tail, head, board):\n\n tX, tY = tail[0], tail[1]\n hX, hY = head[0], head[1]\n locs = [] # 이동 가능한 위치\n\n # 앞, 뒤, 위, 아래로 이동\n dx = (0, 0, -1, 1) \n dy = (-1, 1, 0, 0)\n for i in range(4):\n n_tX, n_tY = tX+dx[i], tY+dy[i]\n n_hX, n_hY = hX+dx[i], hY+dy[i]\n\n if board[n_tX][n_tY] == 0 and board[n_hX][n_hY] == 0:\n locs.append( ((n_tX, n_tY),(n_hX, n_hY)) )\n \n # 가로 회전\n if tX == hX :\n for d in [-1, 1]: # 위로 회전, 아래로 회전 방향\n if board[tX+d][tY] == 0 and board[hX+d][hY] == 0:\n locs.append( (tail, (tX+d, tY)) ) # Tail 기준\n locs.append( ((hX+d, hY), head) ) # Head 기준\n # 세로 회전\n else:\n for d in [-1, 1]: # 좌로 회전, 우로 회전 방향\n if board[tX][tY+d] == 0 and board[hX][hY+d] == 0:\n locs.append( ((tX, tY+d), tail) ) # Tail 기준\n locs.append( ((hX, hY+d), head) ) # Head 기준\n \n return locs\n \n \ndef solution(board):\n\n N = len(board)\n q = deque([ ((1,1), (1,2), 0) ]) # 로봇 뒤, 앞, 시간\n check = set([ ((1,1), (1,2)) ]) # 로봇이 갔던 위치 체크\n\n # board 에 외벽 추가\n tmp = [ [1]*(N+2) for _ in range(N+2)]\n for i in range(N):\n for j in range(N):\n tmp[i+1][j+1] = board[i][j]\n board = tmp\n\n # 로봇 이동하기\n while q:\n tail, head, times = q.popleft()\n\n # (N, N) 도착하면 종료\n if tail == (N, N) or head == (N, N):\n return times\n\n # 이동 가능한 위치로 모두 이동해보기\n for next in move(tail, head, board):\n if next not in check:\n q.append( (next[0], next[1], times+1) )\n check.add(next)\n","repo_name":"songhee-lee/2023-python-coding-test","sub_path":"이것이 코딩테스트다/3. BFS&DFS/songhee/10 - 블록 이동.py","file_name":"10 - 블록 이동.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"519176682","text":"from typing import List, Tuple, Optional\nfrom torch import Tensor\nfrom torch.autograd import no_grad\n\nfrom aihwkit.inference.converter.base import BaseConductanceConverter\nfrom aihwkit.inference.converter.conductance import SinglePairConductanceConverter\n\n\nclass BaseNoiseModel:\n \"\"\"Base class for phenomenological noise models for inference.\"\"\"\n\n def __init__(self, g_converter: Optional[BaseConductanceConverter] = None):\n self.g_converter = g_converter or SinglePairConductanceConverter()\n\n def __eq__(self, other: object) -> bool:\n return self.__class__ == other.__class__ and self.__dict__ == other.__dict__\n\n def __str__(\n self, exclude_keys: Optional[List[str]] = None, keys: Optional[List[str]] = None\n ) -> str:\n \"\"\"Print instance.\"\"\"\n ret = self.__class__.__name__ + \"(\"\n if keys is None:\n keys = list(self.__dict__.keys())\n\n if exclude_keys is not None:\n for key in exclude_keys:\n keys.remove(key)\n\n for key in keys:\n ret += key + \"={}, \".format(self.__dict__[key])\n ret = ret[:-2] + \")\"\n return ret\n\n @no_grad()\n def apply_noise(self, weights: Tensor, t_inference: float) -> Tensor:\n \"\"\"Apply the expected noise.\n\n Applies the noise to a non-perturbed conductance matrix ``weights``\n at time of inference ``t_inference`` (in seconds) where 0 sec\n refers to the time when weight programming has finished.\n\n Note:\n The drift coefficients and intermediate noises etc. are\n sampled for each application of this function anew from the\n distributions, thus it samples the expected noise and drift\n behavior at time ``t_inference`` but not a continual\n trajectory of a given device instance over time (having\n e.g. constant drift coefficients).\n \"\"\"\n target_conductances, params = self.g_converter.convert_to_conductances(weights)\n\n noisy_conductances = []\n for g_target in target_conductances:\n g_prog = self.apply_programming_noise_to_conductance(g_target)\n if t_inference > 0:\n nu_drift = self.generate_drift_coefficients(g_target)\n noisy_conductances.append(\n self.apply_drift_noise_to_conductance(g_prog, nu_drift, t_inference)\n )\n\n noisy_weights = self.g_converter.convert_back_to_weights(noisy_conductances, params)\n\n return noisy_weights\n\n @no_grad()\n def apply_programming_noise(self, weights: Tensor) -> Tuple[Tensor, List[Tensor]]:\n \"\"\"Apply the expected programming noise to weights.\n\n Uses the :meth:`~apply_programming_noise_to_conductances` on\n each of the conductance slices.\n\n Args:\n weights: weights tensor\n\n Returns:\n weight tensor with programming noise applied, and tuple of\n all drift coefficients (per conductances slice) that are\n determined during programming.\n \"\"\"\n target_conductances, params = self.g_converter.convert_to_conductances(weights)\n\n noisy_conductances = []\n nu_drift_list = []\n for g_target in target_conductances:\n noisy_conductances.append(self.apply_programming_noise_to_conductance(g_target))\n nu_drift_list.append(self.generate_drift_coefficients(g_target))\n noisy_weights = self.g_converter.convert_back_to_weights(noisy_conductances, params)\n\n return noisy_weights, nu_drift_list\n\n @no_grad()\n def apply_drift_noise(\n self, weights: Tensor, drift_noise_parameters: List[Optional[Tensor]], t_inference: float\n ) -> Tensor:\n \"\"\"Apply the expected drift noise to weights.\n\n Uses the :meth:`~apply_drift_noise_to_conductances` on\n each of the conductance slices.\n\n Args:\n weights: weights tensor (usually with programming noise already applied)\n drift_noise_parameters: list of drift nu for each conductance slice\n t_inference: assumed time of inference (in sec)\n\n Returns:\n weight tensor with drift noise applied\n \"\"\"\n target_conductances, params = self.g_converter.convert_to_conductances(weights)\n\n noisy_conductances = []\n for g_target, drift_noise_param in zip(target_conductances, drift_noise_parameters):\n noisy_conductances.append(\n self.apply_drift_noise_to_conductance(g_target, drift_noise_param, t_inference)\n )\n\n noisy_weights = self.g_converter.convert_back_to_weights(noisy_conductances, params)\n\n return noisy_weights\n\n @no_grad()\n def generate_drift_coefficients(self, g_target: Tensor) -> Optional[Tensor]:\n \"\"\"Generate drift coefficients.\n\n Generate coefficients once and passed through when\n long-term noise and drift is applied. Typical `nu_drift`.\n\n Args:\n g_target: Target conductances\n\n Returns:\n When not overriden, it simply returns None.\n \"\"\"\n # pylint: disable=unused-argument\n\n @no_grad()\n def apply_programming_noise_to_conductance(self, g_target: Tensor) -> Tensor:\n r\"\"\"Apply programming noise to a target conductance ``Tensor``.\n\n Args:\n g_target: Target conductances\n\n Returns:\n Tensor of sampled drift coefficients :math:`\\nu`, one for each\n target conductance value.\n \"\"\"\n raise NotImplementedError\n\n @no_grad()\n def apply_drift_noise_to_conductance(\n self, g_prog: Tensor, drift_noise_param: Optional[Tensor], t_inference: float\n ) -> Tensor:\n r\"\"\"Apply the noise and drift up to the assumed inference time point.\n\n Args:\n g_prog: Tensor of conductance values after programming (in :math:`\\muS`)\n drift_noise_param: typically drift nu\n t_inference: assumed time of inference (in sec)\n\n Returns:\n conductance Tensor with applied noise and drift\n \"\"\"\n raise NotImplementedError\n","repo_name":"IBM/aihwkit","sub_path":"src/aihwkit/inference/noise/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6106,"program_lang":"python","lang":"en","doc_type":"code","stars":249,"dataset":"github-code","pt":"5"} +{"seq_id":"69860981593","text":"#!/usr/bin/env python3\n\nimport os\nimport re\nimport glob\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nsns.set_context('poster')\nsns.set_style('whitegrid')\nsns.set_palette('Blues', 13)\n\n\ndef parse_file(path):\n pat = re.compile(r'^MEM\\s+([\\d\\.]+)\\s+([\\d\\.]+)$', re.I)\n start = None\n\n with open(path, 'r') as f:\n for line in f:\n match = pat.match(line.strip())\n if match:\n row = tuple(map(float, match.groups()))\n if start is None:\n start = row[1]\n\n ts = row[1] - start\n yield ts, row[0]\n\n\ndef parse_series(paths):\n pat = re.compile(r'^mprofile-(\\d+)-nodes.dat$', re.I)\n for path in paths:\n match = pat.match(os.path.basename(path))\n if match:\n name = \"{} clients\".format(int(match.group(1)))\n series = list(parse_file(path))\n yield name, series\n\n\ndef mprof_plot(paths):\n for name, series in parse_series(paths):\n x = [row[0] for row in series]\n y = [row[1] for row in series]\n plt.plot(x, y, label=name)\n\n plt.title(\"Server Memory Usage with for Concurrent Clients\")\n plt.ylabel(\"memory (MiB)\")\n plt.xlabel(\"seconds\")\n plt.legend(loc='best')\n\n\n\nif __name__ == '__main__':\n mprof_plot(glob.glob('mprofile-*'))\n plt.show()\n","repo_name":"bbengfort/honu","sub_path":"fixtures/vms/client/mprofile/mprof_plot.py","file_name":"mprof_plot.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"73836104152","text":"import os\nimport sys\nimport math\nimport time\nimport random\nif sys.version_info < (3, 0):\n from itertools import izip as zip_\nelse:\n zip_ = zip \n\nsys.path.extend(['..', '../..'])\nimport banyan\nfrom bx.intervals.intersection import IntervalTree\nimport _src\n\n\ndef _banyan(es, key_type):\n t = banyan.SortedSet(updator = banyan.OverlappingIntervalsUpdator, key_type = key_type)\n for e in es:\n t.add(e)\n c = len(t.overlap(e))\n\n\ndef _bx(es):\n t = IntervalTree()\n for e in es:\n t.add(e[0], e[1], e)\n c = len(t.find(e[0], e[1]))\n\n\ndef _run_test(fn, num_items, num_its):\n es = [(min(b, e), max(b, e)) for (b, e) in zip(_src.random_ints(num_items), _src.random_ints(num_items))]\n start = time.time()\n for _ in range(num_its):\n fn(es) \n end = time.time()\n diff = (end - start) / num_its\n return diff\n\n\ndef run_tests(names, num_items, num_its):\n fns = dict([\n ('bx', lambda es: _bx(es)),\n ('banyan_red_black_tree', lambda es: _banyan(es, key_type = (int, int))),\n ('banyan_red_black_tree_float', lambda es: _banyan(es, key_type = (float, float))),\n ('banyan_red_black_tree_gen', lambda es: _banyan(es, key_type = None))])\n t = dict([]) \n for name in names: \n t[name] = _run_test(fns[name], num_items, num_its)\n return t\n\n","repo_name":"cpcloud/banyan","sub_path":"performance_tests/_set_insert_overlapping_intervals.py","file_name":"_set_insert_overlapping_intervals.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"5"} +{"seq_id":"4364190054","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom torchvision.utils import make_grid\nimport matplotlib.pyplot as plt\nimport argparse\n\n# 设置超参数\nparser = argparse.ArgumentParser(description='set VAE training hyperparameters')\n\nparser.add_argument('--batch_size', '-b', default=64, type=int, help=\"batch size\")\nparser.add_argument('--mean', default=[0.5, 0.5, 0.5], type=list)\nparser.add_argument('--std', default=[0.5, 0.5, 0.5], type=list)\n\nargs = parser.parse_args()\n\ndef non_normalize(images_tensor, mean:list, std:list):\n \"\"\"\n @description :\n 反归一化\n @param :\n images_tensor: 批图片\n mean: dataLoader中设置的mean参数\n std: dataLoader中设置的std参数\n @Returns :\n 反归一化后的tensor\n \"\"\"\n images_tensor = images_tensor.detach().cpu()\n batch = images_tensor.shape[0]\n std = torch.tensor(mean*batch).reshape(batch, len(std))\n mean = torch.tensor(mean*batch).reshape(batch, len(mean))\n mean_tensor = torch.einsum('bchw, bc->bchw', torch.ones_like(images_tensor), mean)\n output = torch.einsum('bchw, bc->bchw', images_tensor, std) + mean_tensor\n return output\n\nif __name__ == \"__main__\":\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n\n model = torch.load('DCGAN/model_save/Generator_model_best.pth')\n input = torch.randn(args.batch_size, 100, 1, 1, device=device)\n output = model(input)\n output = non_normalize(output, args.mean, args.std)\n output = make_grid(output, padding=0)\n output = np.transpose(output, (1,2,0))\n plt.imshow(output)\n plt.show()\n\n ","repo_name":"Newbiezzx/code_train","sub_path":"DCGAN/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"43015807259","text":"import pandas as pd\nimport numpy as np\nimport re\nimport json\nimport requests\nimport datetime\nimport time\nfrom tqdm import tqdm\nfrom utilities.exceptions import ServiceUnavailableException, TooManyRequestsException\nfrom utilities import misc\n\n# STEAM API ENDPOINTS\nALL_APPS_ENDPOINT = \"https://api.steampowered.com/ISteamApps/GetAppList/v2/\"\nBASIC_INFO_ENDPOINT = \"https://store.steampowered.com/api/appdetails?appids={}&cc=us&l=en\"\nRATING_ENDPOINT = \"https://store.steampowered.com/appreviews/{}?json=1&language=all&purchase_type=all&num_per_page=1\"\n\nTAG_PATTERN = re.compile('<.*?>')\n\n\ndef get_all_apps():\n response = requests.get(ALL_APPS_ENDPOINT)\n if response.status_code != 200:\n raise Exception(f'Error {response.status_code}')\n response_formatted = pd.json_normalize(json.loads(response.content)['applist']['apps'])\n\n return response_formatted\n\n\n# Steam API is rate limited to 200 requests per 5 minutes\ndef get_basic_info(appids, basket_timelimit=300, basket_countlimit=200):\n function_desc = 'collecting app details from Steam'\n results = []\n\n basket_start = datetime.datetime.now()\n total_count = 0\n for appid in tqdm(appids, desc=function_desc.upper()):\n try:\n basket_duration = (datetime.datetime.now() - basket_start).seconds\n total_count += 1\n\n if basket_duration >= basket_timelimit:\n basket_start = datetime.datetime.now()\n\n if total_count / basket_countlimit == total_count // basket_countlimit:\n time.sleep(basket_timelimit - basket_duration + 1)\n\n response = requests.get(BASIC_INFO_ENDPOINT.format(appid))\n\n if response.status_code == 503:\n raise ServiceUnavailableException\n if response.status_code == 429:\n raise TooManyRequestsException\n\n response_formatted = pd.json_normalize(json.loads(response.content))\n response_formatted.columns = [col.replace(f'{appid}.', '') for col in response_formatted.columns]\n results.append(response_formatted)\n\n except TooManyRequestsException:\n print(f'Too many requests')\n print(\n f'Basket duration: {basket_duration} | Basket count: {total_count - total_count // basket_countlimit * basket_countlimit}')\n return pd.concat(results)\n except ServiceUnavailableException:\n print(f'App {appid} unavailable')\n except:\n raise\n\n return pd.concat(results)\n\n\ndef remove_tags(string):\n string_clean = re.sub(TAG_PATTERN, ' ', string)\n string_clean = string_clean.replace('"', \"'\").replace('>', '')\n string_clean = \" \".join(string_clean.split())\n return string_clean\n\n\ndef clean_languages(language_series):\n results = []\n for e in language_series:\n if type(e) == float:\n results.append(np.nan)\n else:\n results.append([x.strip() for x in remove_tags(e).replace(\n 'languages with full audio support', '').replace('*', '').replace(' -', '').split(',')])\n\n return results\n\n\ndef extract_from_dict_series(series, dict_key):\n result = []\n for this in [x for x in series]:\n result_this = []\n if type(this) != float:\n for i in range(len(this)):\n result_this.append(this[i][dict_key])\n result.append(result_this)\n\n return result\n\n\ndef is_iterable(x):\n try:\n iter(x)\n return True\n except:\n return False\n\n\ndef get_now_string(sep=''):\n current_time = datetime.datetime.now()\n return f'{current_time.hour:02d}{sep}{current_time.minute:02d}{sep}{current_time.second:02d}{sep}{current_time.day:02d}{sep}{current_time.month:02d}{sep}{current_time.year - 2000}'\n\n\ndef clean_basic_info_df(df):\n df.columns = [col.replace(\n 'data.', '').replace(\n 'overview.', '').replace(\n 'steam_appid', 'appid').replace(\n '.', '_') for col in df.columns]\n\n df = df.query('success == True and type == \"game\"')\n df = df.drop(['price_currency', 'price_initial_formatted',\n 'price_final_formatted', 'success', 'type',\n 'ext_user_account_notice', 'legal_notice',\n 'price_discount_percent', 'price_final',\n 'fullgame_name', 'background_raw', 'reviews',\n 'fullgame_appid', 'package_groups', 'achievements_highlighted',\n 'support_info_url', 'support_info_email',\n 'pc_requirements', 'mac_requirements', 'linux_requirements',\n 'mac_requirements_recommended', 'linux_requirements_recommended',\n 'mac_requirements_minimum', 'linux_requirements_minimum',\n 'required_age', 'is_free', 'recommendations_total',\n 'price_recurring_sub', 'price_recurring_sub_desc'], errors='ignore', axis=1)\n # required_age is incorrect most of the time\n # even with the notes about murder, blood, nudity, etc., the age is still 0\n\n df = df.rename(columns=({'release_date_coming_soon': 'coming_soon', 'release_date_date': 'release_date',\n 'platforms_windows': 'windows', 'platforms_mac': 'mac', 'platforms_linux': 'linux',\n 'supported_languages': 'languages'}))\n\n # if there are no such columns for the given appids, do nothing with them\n try:\n df['controller_support'] = df['controller_support'].replace('full', True).fillna(False)\n except KeyError:\n pass\n\n params_in_json = {'categories':'description',\n 'genres':'description',\n 'demos':'appid',\n 'screenshots':'path_thumbnail'}\n\n for param, key in params_in_json.items():\n try:\n df[param] = extract_from_dict_series(df[param], key)\n except KeyError:\n pass\n\n try:\n df['movies'] = [x[0]['480'] if len(x) > 0 else x for x in extract_from_dict_series(df['movies'], 'mp4')]\n except KeyError:\n pass\n\n df['about_the_game'] = [remove_tags(x) for x in df['about_the_game']]\n df['short_description'] = [remove_tags(x) for x in df['short_description']]\n df['detailed_description'] = [remove_tags(x) for x in df['detailed_description']]\n\n df['languages'] = clean_languages(df['languages'])\n\n df['appid'] = df['appid'].astype(int)\n df['release_date'] = [pd.to_datetime(x, errors='coerce') for x in df['release_date']]\n df['coming_soon'] = df['coming_soon'].astype(bool)\n df['windows'] = df['windows'].astype(bool)\n df['mac'] = df['mac'].astype(bool)\n df['linux'] = df['linux'].astype(bool)\n\n df['release_year'] = [x.year for x in df['release_date']]\n df['dlcs_total'] = [len(x) for x in df['dlc']]\n df['packages_total'] = [len(x) for x in df['packages']]\n df['languages_total'] = [len(x) for x in df['languages']]\n df['screenshots_total'] = [len(x) for x in df['screenshots']]\n df['developers'] = [x[0] if type(x) != float else np.nan for x in df['developers']]\n df['publishers'] = [x[0] if type(x) != float else np.nan for x in df['publishers']]\n\n for col in df.columns:\n df[col] = [misc.empty_to_nan(x) for x in df[col]]\n\n return df.reset_index(drop=True)\n\n\ndef get_rating_df(appids, basket_timelimit=300, basket_countlimit=200):\n function_desc = 'collecting rating data'\n results = []\n\n basket_start = datetime.datetime.now()\n total_count = 0\n for appid in tqdm(appids, desc=function_desc.upper()):\n try:\n basket_duration = (datetime.datetime.now() - basket_start).seconds\n total_count += 1\n\n if basket_duration >= basket_timelimit:\n basket_start = datetime.datetime.now()\n\n if total_count / basket_countlimit == total_count // basket_countlimit:\n time.sleep(basket_timelimit - basket_duration + 1)\n\n response = requests.get(RATING_ENDPOINT.format(appid))\n\n if response.status_code == 503:\n raise ServiceUnavailableException\n if response.status_code == 429:\n raise TooManyRequestsException\n\n response_formatted = pd.json_normalize(json.loads(response.content)['query_summary'])\n response_formatted.columns = [col.replace(f'{appid}.', '') for col in response_formatted.columns]\n results.append(response_formatted)\n\n except TooManyRequestsException:\n print(f'Too many requests')\n print(\n f'Basket duration: {basket_duration} | Basket count: {total_count - total_count // basket_countlimit * basket_countlimit}')\n return pd.concat(results)\n except ServiceUnavailableException:\n print(f'App {appid} unavailable')\n except:\n raise\n\n results = pd.concat(results)\n results['appid'] = appids\n results = results.drop(['review_score_desc'], axis=1)\n results = results.rename(columns={'total_reviews': 'reviews_total'})\n results = results.reindex(list(results.columns[-1:]) + list(results.columns[:-1]), axis='columns')\n return results\n\n\ndef get_images_df(df):\n images_df = df[['appid', 'screenshots_total', 'header_image', 'background', 'screenshots', 'movies']]\n return images_df\n\n\ndef get_dlc_df(df):\n dlc_df = df[['appid', 'dlc', 'dlcs_total']]\n return dlc_df\n\n\ndef get_packages_df(df):\n dlc_df = df[['appid', 'packages', 'packages_total']]\n return dlc_df\n\n\ndef get_content_descriptors_df(df):\n content_descriptors_df = df[['appid', 'content_descriptors_ids', 'content_descriptors_notes']]\n return content_descriptors_df\n\n\ndef get_languages_df(df):\n languages_df = misc.get_dummy_df(df, 'languages')\n return languages_df\n\n\ndef get_categories_df(df):\n categories_df = misc.get_dummy_df(df, 'categories')\n return categories_df\n\n\ndef get_genres_df(df):\n steam_genres_df = misc.get_dummy_df(df, 'genres')\n return steam_genres_df\n\n\ndef get_requirements_df(df, param):\n results = []\n requirements_df = pd.DataFrame()\n\n try:\n for requirements, appid in zip(df[param], df['appid']):\n if type(requirements) != float:\n result = {'appid': appid}\n for x in requirements.split('
  • ')[1:]:\n temp = [x.strip() for x in remove_tags(x).split(':')]\n try:\n result[temp[0]] = temp[1]\n except:\n result['Other'] = temp[0]\n results.append(result)\n\n requirements_df = pd.DataFrame(results)\n requirements_df.columns = ['_'.join(x.lower().split()) for x in requirements_df.columns]\n\n for x in requirements_df:\n requirements_df[x] = requirements_df[x].replace('n/a', np.nan)\n\n return requirements_df\n except:\n return requirements_df\n\n\ndef get_requirements_minimum_df(df):\n minimum_requirements_df = get_requirements_df(df, 'pc_requirements_minimum')\n return minimum_requirements_df\n\n\ndef get_requirements_recommended_df(df):\n minimum_requirements_df = get_requirements_df(df, 'pc_requirements_recommended')\n return minimum_requirements_df\n\n\ndef get_descriptions_df(df):\n descriptions_df = df[['detailed_description', 'about_the_game', 'short_description']]\n return descriptions_df\n\n\ndef estimate_owners(reviews, year):\n if year < 2014:\n return reviews * 60\n elif year < 2017:\n return reviews * 50\n elif year < 2018:\n return reviews * 40\n elif year < 2020:\n return reviews * 35\n else:\n return reviews * 30\n\n\ndef estimate_revenue(owners,\n average_price,\n platform_cut=0.7,\n regional_price=0.8,\n vat=0.93,\n returns=0.92):\n return round(owners * average_price * platform_cut * regional_price * vat * returns)\n\n\ndef get_owners(df):\n owners = [estimate_owners(reviews, year) for reviews, year in zip(\n df['reviews_total'], df['release_year'])]\n return owners\n\n\ndef get_revenue(df):\n revenue = [estimate_revenue(owners, average_price) for owners, average_price in zip(\n df['owners'], df['mean_price'])]\n return revenue\n","repo_name":"tsutsen/YourGamesSuc","sub_path":"modules/collector/steam_data/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"43782446498","text":"# The DE-ID format, or versions of it, is commonly used in medical\n# deidentification. This core facility takes care of the central capabilities.\n# Most DE-ID tags are like this:\n#\n# **EMAIL\n#\n# But some of them have additional content, like this:\n#\n# **DATE<5/7/09>\n#\n# Sometimes they're square brackets instead of angle brackets.\n# There are also default forms for the content.\n\nimport re, random\n\nfrom ReplacementEngine import *\nfrom ReplacementEngine import _IDReplace\nfrom ClearReplacementStrategy import *\n\n#\n# Rendering\n#\n\nimport string\n\n# This is complicated. It inherits some of its rendering behavior\n# from the clear replacement strategy - and needs to override some\n# of its methods, for person replacement - but otherwise, it's\n# its own thing.\n\nclass DEIDStyleRenderingStrategy(ClearRenderingStrategy):\n\n def __init__(self, engine):\n ClearRenderingStrategy.__init__(self, engine)\n self.lBracket, self.rBracket = engine.bracketPair\n self.P_NAME_INDEX = 0\n self.P_INIT_INDEX = 0\n\n def Replace(self, pattern, **kw):\n mName = pattern.__ctype__ + \"Replace\"\n doIt = False\n if hasattr(self, mName):\n # This is ugly. I need to inherit some behavior\n # from the ClearRenderingStrategy, but ONLY\n # some of it. If the class the method is defined\n # on isn't a child of THIS ROOT CLASS, then\n # we need to punt. But the only way to find that out\n # is to have a \"new-style\" class and search the\n # __mro__ list. So I've made the parents new-style\n # classes.\n # I can't simply pick the methods that are defined\n # here to let through, since children of this class\n # might also define something.\n for c in self.__class__.__mro__:\n # It's gotta be in a local dictionary. hasattr()\n # handles inheritance. If we pass DEIDStyleRenderingStrategy\n # in the list, and we haven't found the entry,\n # then we punt.\n if c.__dict__.has_key(mName):\n doIt = True\n break\n if c is DEIDStyleRenderingStrategy:\n break\n if doIt:\n return getattr(self, mName)(pattern, **kw) \n else:\n return \"**\" + pattern.replacer.label\n\n def _wrap(self, pattern, content):\n return \"**\" + pattern.replacer.label + self.lBracket + content + self.rBracket\n\n # People.\n\n def _nextName(self):\n s = string.uppercase[self.P_NAME_INDEX] * 3\n self.P_NAME_INDEX = (self.P_NAME_INDEX + 1) % 26\n return s\n\n def _nextInit(self):\n s = string.uppercase[self.P_INIT_INDEX]\n self.P_INIT_INDEX = (self.P_INIT_INDEX + 1) % 26\n return s\n\n def _PERSONReplacementSeed(self, pattern):\n # We need a first and a last name. We MIGHT need\n # middle names.\n return {\"firstNameAlts\": [self._nextName()],\n \"middleNames\": None,\n \"lastName\": self._nextName()}\n\n def _getRSMiddleNames(self, seed, numNames):\n if seed[\"middleNames\"] is None:\n seed[\"middleNames\"] = []\n while len(seed[\"middleNames\"]) < numNames:\n seed[\"middleNames\"].append(self._nextName())\n return seed[\"middleNames\"][:numNames]\n\n def PERSONReplace(self, pattern, **kw):\n # Hm. What do we do here? Exactly what we\n # do otherwise. We just need to make sure that\n # the pattern is marked for all upper. And\n # the user has to use the DEIDPersonCategory.\n pattern.cap_status = ALL_UPPER\n return self._wrap(pattern, ClearRenderingStrategy.PERSONReplace(self, pattern, **kw))\n\n def AGEReplace(self, pattern, **kw):\n # Presuming that we have some coherent age.\n # If the upper bound and lower bound are not the\n # same, then we have to pick some seed.\n ageSeed = None\n if pattern.ageUb == pattern.ageLb:\n ageSeed = pattern.ageUb\n elif int(pattern.ageUb) / 10 == int(pattern.ageLb) / 10:\n # They're in the same decade.\n ageSeed = pattern.ageLb\n else:\n ageSeed = random.randint(pattern.ageUb, pattern.ageLb)\n if ageSeed < 13:\n return self._wrap(pattern, \"birth-12\")\n elif ageSeed < 20:\n return self._wrap(pattern, \"in teens\")\n elif ageSeed > 89:\n return self._wrap(pattern, \"90+\")\n else:\n return self._wrap(pattern, \"in %d0s\" % (int(ageSeed) / 10))\n\n def DATEReplace(self, pattern, **kw):\n return self._wrap(pattern, ClearRenderingStrategy.DATEReplace(self, pattern, **kw))\n\nclass DEIDStyleReplacementEngine(PIIReplacementEngine):\n\n __rname__ = \"clear -> DE-ID\"\n\n bracketPair = (\"\", \"\")\n\n def createDigestionStrategy(self):\n return ClearDigestionStrategy(self)\n\n def createRenderingStrategy(self):\n return DEIDStyleRenderingStrategy(self)\n\n#\n# Digestion\n#\n\n# We may have to do some date digestion, using the clear\n# digester.\n\nclass DEIDStyleDigestionStrategy(DigestionStrategy):\n \n def __init__(self, engine): \n DigestionStrategy.__init__(self, engine)\n self.deidPattern = engine.deidPattern\n tags = engine.categories.keys()\n self.patDict = {}\n self.replPat = re.compile(self.deidPattern % \"|\".join(tags))\n for tag in tags:\n self.patDict[tag] = re.compile((\"^\" + self.deidPattern + \"$\") % tag)\n self.dateDigester = None\n\n def canCache(self, ctype):\n return ctype in [\"PERSON\", \"DATE\", \"AGE\"]\n\n def FindReplacedElements(self, s, tags):\n return [(m.start(), m.end(), m.group(1)) for m in self.replPat.finditer(s)]\n\n # We can get something out of names, ages, and dates.\n \n # The name looks like this:\n # **NAME, **NAME\n # **NAME\n\n # Most of this is identical to PIIPersonCategory.Digest.\n # Once we digest, the replacement should be identical to the parent,\n # since we're working off the pattern.\n\n INITPAT = re.compile(\"^[A-Z][.]?$\")\n \n def PERSONDigest(self, pat, seed):\n p = self.patDict[pat.replacer.label]\n m = p.match(seed)\n name = m.group(3)\n pat.cap_status = MIXED\n # There will be no name extension.\n pat.name_ext = \"\"\n # Default is not to invert. Only invert\n # if you find a reason to. Ditto one name.\n pat.last_is_first = False\n pat.one_name = False\n toks = name.split()\n if len(toks) == 1:\n pat.one_name = True\n middleNames = []\n firstName = lastName = toks[0]\n else:\n firstName = toks[0]\n lastName = toks[-1]\n middleNames = toks[1:-1]\n firstNameAlts = [firstName]\n pat.mid_initials = []\n for m in middleNames:\n if self.INITPAT.match(m) is not None:\n pat.mid_initials.append(True)\n else:\n pat.mid_initials.append(False)\n \n # Finally, set the replacement keys.\n \n # Any of the following can invoke the cache. Don't\n # forget case insensitivity.\n\n allKeys = [(None, lastName.upper())]\n for firstName in firstNameAlts:\n allKeys = allKeys + [(firstName.upper(), lastName.upper()),\n (firstName.upper(), None)]\n pat.setReplacementCacheKeys(allKeys)\n\n # Possibilities: **AGE **AGE **AGE **AGE<90+>\n\n AGE_RE = re.compile(\"^in\\s+(.*)s$\")\n \n def AGEDigest(self, pat, seed):\n p = self.patDict[pat.replacer.label]\n m = p.match(seed)\n if m is not None:\n age = m.group(3)\n if age == \"birth-12\":\n pat.ageLb = 0\n pat.ageUb = 12\n elif age == \"in teens\":\n pat.ageLb = 13\n pat.ageUb = 19\n elif age == \"90+\":\n pat.ageLb = 90\n pat.ageUb = 120\n else:\n m = self.AGE_RE.match(age)\n if m:\n pat.ageLb = int(m.group(1))\n pat.ageUb = pat.ageLb + 9\n pat.spell = False\n\n def DATEDigest(self, pat, seed):\n p = self.patDict[pat.replacer.label]\n m = p.match(seed)\n if m is not None:\n seed = m.group(3)\n if self.dateDigester is None:\n self.dateDigester = ClearDigestionStrategy(self.engine)\n self.dateDigester.DATEDigest(pat, seed)\n \nclass DEIDStyleResynthesisEngine(PIIReplacementEngine):\n\n __rname__ = \"DE-ID -> clear\"\n \n deidPattern = None\n\n def createDigestionStrategy(self):\n return DEIDStyleDigestionStrategy(self)\n\n def createRenderingStrategy(self):\n return ClearRenderingStrategy(self)\n","repo_name":"MIT-LCP/mitre_deid_toolkit","sub_path":"MIST_2_0_4/src/tasks/core/python/DEIDStyleReplacementEngine.py","file_name":"DEIDStyleReplacementEngine.py","file_ext":"py","file_size_in_byte":8923,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"28185713521","text":"# 题目:平面距离计算\n# https://oj.lidemy.com/problem/1032\n# 计算出两个点的距离,公式 (绝对值(x1 - x2) 平方 + 绝对值(y1 - y2) 平方) 开根号,并四舍五入到小数点第二位\n\nn = int(input())\n\nall = []\n\n\ndef abs(n):\n # 绝对值\n if n < 0:\n return -n\n return n\n\n\nfor i in range(n * 4):\n all_Input = int(input())\n all.append(all_Input)\n# print(all)\n\nfor i in range(n):\n x1 = all[i * 4 + 0]\n y1 = all[i * 4 + 1]\n x2 = all[i * 4 + 2]\n y2 = all[i * 4 + 3]\n\n distance = ((abs(x1 - x2) ** 2) + (abs(y1 - y2) ** 2)) ** 0.5\n\n # '%.2f' % 四舍五入\n print('%.2f' % round(distance, 2))\n","repo_name":"pg56714/LidemyOJ_Python3","sub_path":"1032.py","file_name":"1032.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24999387087","text":"import networkx as nx\n\nif __name__ == '__main__':\n\n f=open(\"Task24.txt\",\"r\")\n lines=f.readlines()\n theInput = []\n\n totalStartString = \"\"\n\n for i in lines:\n i = i.rstrip()\n theInput.append(i)\n totalStartString += i\n f.close()\n\n height = len(theInput)\n width = len(theInput[0])\n\n startPos = \"1,0,0\"\n endPos = str(width-2) + \",\" + str(height-1) + \",0\"\n\n #startPos = \"1,0\"\n #endPos = str(width-2) + \",\" + str(height-1)\n\n #oldField = [[1,0,0], [width-2,height-1,0]]\n oldField = []\n\n theGraph = nx.DiGraph()\n\n theGraph.add_node(startPos)\n\n #for i in theInput:\n # for j in range(2,len(i)):\n # theGraph.add_edge(i[0], i[j])\n\n blizzards = dict()\n blizzardIndex = 0\n\n emptyField = []\n\n for iIndex, i in enumerate(theInput):\n for jIndex, j in enumerate(i):\n if j == \"^\":\n blizzards[blizzardIndex] = [jIndex, iIndex, \"up\"]\n elif j == \">\":\n blizzards[blizzardIndex] = [jIndex, iIndex, \"right\"]\n elif j == \"v\":\n blizzards[blizzardIndex] = [jIndex, iIndex, \"down\"]\n elif j == \"<\":\n blizzards[blizzardIndex] = [jIndex, iIndex, \"left\"]\n blizzardIndex += 1\n\n if j != \"#\":\n emptyField.append([jIndex, iIndex])\n\n if j == \".\":\n oldField.append([jIndex, iIndex, 0])\n\n\n\n #print(len(blizzards))\n #print(len(emptyField))\n\n startString = \"\"\n\n for keys, values in blizzards.items():\n startString += str(values[0]) + \",\" + str(values[1]) + \",\"\n\n #print(startString)\n\n i = 0\n\n \n\n while True:\n\n i += 1\n\n print(i)\n\n thisField = emptyField.copy()\n\n for keys, values in blizzards.items():\n\n if values[2] == \"up\":\n values[1] -= 1\n if values[1] == 0:\n values[1] = height - 2\n elif values[2] == \"right\":\n values[0] += 1\n if values[0] == width - 1:\n values[0] = 1\n elif values[2] == \"down\":\n values[1] += 1\n if values[1] == height - 1:\n values[1] = 1\n elif values[2] == \"left\":\n values[0] -= 1\n if values[0] == 0:\n values[0] = width - 2\n\n if [values[0],values[1]] in thisField:\n thisField.remove([values[0],values[1]])\n\n for thisFieldCoord in thisField:\n freeCoord = str(thisFieldCoord[0]) + \",\" + str(thisFieldCoord[1]) + \",\" + str(i)\n theGraph.add_node(freeCoord)\n\n for theOldCoord in oldField:\n\n fromPos = str(theOldCoord[0]) + \",\" + str(theOldCoord[1]) + \",\" + str(i - 1)\n\n if [theOldCoord[0], theOldCoord[1]] in thisField:\n toPos = str(theOldCoord[0]) + \",\" + str(theOldCoord[1]) + \",\" + str(i)\n theGraph.add_edge(fromPos, toPos)\n if [theOldCoord[0] - 1, theOldCoord[1]] in thisField:\n toPos = str(theOldCoord[0] - 1) + \",\" + str(theOldCoord[1]) + \",\" + str(i)\n theGraph.add_edge(fromPos, toPos)\n if [theOldCoord[0] + 1, theOldCoord[1]] in thisField:\n toPos = str(theOldCoord[0] + 1) + \",\" + str(theOldCoord[1]) + \",\" + str(i)\n theGraph.add_edge(fromPos, toPos)\n if [theOldCoord[0], theOldCoord[1] - 1] in thisField:\n toPos = str(theOldCoord[0]) + \",\" + str(theOldCoord[1] - 1) + \",\" + str(i)\n theGraph.add_edge(fromPos, toPos)\n if [theOldCoord[0], theOldCoord[1] + 1] in thisField:\n toPos = str(theOldCoord[0]) + \",\" + str(theOldCoord[1] + 1) + \",\" + str(i)\n theGraph.add_edge(fromPos, toPos)\n\n if [theOldCoord[0], theOldCoord[1]] == [1,1]:\n #theGraph.add_edge(fromPos, startPos)\n theGraph.add_edge(fromPos, \"START\")\n if [theOldCoord[0], theOldCoord[1]] == [width-2,height-2]:\n #theGraph.add_edge(fromPos, endPos)\n theGraph.add_edge(fromPos, \"FINISH\")\n\n #print(\"Edges after i:\", i)\n #print(theGraph.edges())\n \n oldField = thisField.copy()\n\n currentString = \"\"\n\n for keys, values in blizzards.items():\n currentString += str(values[0]) + \",\" + str(values[1]) + \",\"\n\n if currentString == startString:\n \n for thisFieldCoord in thisField:\n freeCoord = str(thisFieldCoord[0]) + \",\" + str(thisFieldCoord[1]) + \",\" + str(0)\n theGraph.add_node(freeCoord)\n\n for theOldCoord in oldField:\n\n fromPos = str(theOldCoord[0]) + \",\" + str(theOldCoord[1]) + \",\" + str(i-1)\n\n if [theOldCoord[0], theOldCoord[1]] in thisField:\n toPos = str(theOldCoord[0]) + \",\" + str(theOldCoord[1]) + \",\" + str(0)\n theGraph.add_edge(fromPos, toPos)\n if [theOldCoord[0] - 1, theOldCoord[1]] in thisField:\n toPos = str(theOldCoord[0] - 1) + \",\" + str(theOldCoord[1]) + \",\" + str(0)\n theGraph.add_edge(fromPos, toPos)\n if [theOldCoord[0] + 1, theOldCoord[1]] in thisField:\n toPos = str(theOldCoord[0] + 1) + \",\" + str(theOldCoord[1]) + \",\" + str(0)\n theGraph.add_edge(fromPos, toPos)\n if [theOldCoord[0], theOldCoord[1] - 1] in thisField:\n toPos = str(theOldCoord[0]) + \",\" + str(theOldCoord[1] - 1) + \",\" + str(0)\n theGraph.add_edge(fromPos, toPos)\n if [theOldCoord[0], theOldCoord[1] + 1] in thisField:\n toPos = str(theOldCoord[0]) + \",\" + str(theOldCoord[1] + 1) + \",\" + str(0)\n theGraph.add_edge(fromPos, toPos)\n\n if [theOldCoord[0], theOldCoord[1]] == [1,1]:\n #theGraph.add_edge(fromPos, startPos)\n theGraph.add_edge(fromPos, \"START\")\n if [theOldCoord[0], theOldCoord[1]] == [width-2,height-2]:\n #theGraph.add_edge(fromPos, endPos)\n theGraph.add_edge(fromPos, \"FINISH\")\n\n \n repetitionRounds = i\n print(\"repetitionRounds:\", repetitionRounds)\n break\n\n\n\n #pathLength = nx.shortest_path_length(theGraph,\"1,0,0\",\"3,5,0\", weight = \"weight\")\n\n #print(\"pathLength:\", pathLength)\n\n #thePath = nx.shortest_path(theGraph,\"1,0,0\",\"2,5,0\", weight = \"weight\")\n\n #print(\"thePath:\", thePath)\n\n #print(theGraph.edges())\n\n #pathLength1 = nx.shortest_path_length(theGraph,startPos,endPos, weight = \"weight\")\n pathLength1 = nx.shortest_path_length(theGraph,startPos,\"FINISH\", weight = \"weight\")\n print(pathLength1)\n #thePath1 = nx.shortest_path(theGraph,startPos,endPos, weight = \"weight\")\n thePath1 = nx.shortest_path(theGraph,startPos,\"FINISH\", weight = \"weight\")\n print(thePath1)\n \n goBackStartTime2 = (pathLength1 % repetitionRounds)\n print(\"goBackStartTime2:\", goBackStartTime2)\n newStartPos = str(width-2) + \",\" + str(height-1) + \",\" + str(goBackStartTime2)\n #pathLength2 = nx.shortest_path_length(theGraph,newStartPos,startPos, weight = \"weight\")\n pathLength2 = nx.shortest_path_length(theGraph,newStartPos,\"START\", weight = \"weight\")\n print(pathLength2)\n #thePath2 = nx.shortest_path(theGraph,newStartPos,startPos, weight = \"weight\")\n thePath2 = nx.shortest_path(theGraph,newStartPos,\"START\", weight = \"weight\")\n print(thePath2)\n\n goBackStartTime3 = ((pathLength1 + pathLength2) % repetitionRounds)\n print(\"goBackStartTime3:\", goBackStartTime3)\n newStartPos = \"1,0,\" + str(goBackStartTime3)\n #pathLength3 = nx.shortest_path_length(theGraph,newStartPos,endPos, weight = \"weight\")\n pathLength3 = nx.shortest_path_length(theGraph,newStartPos,\"FINISH\", weight = \"weight\")\n print(pathLength3)\n #thePath3 = nx.shortest_path(theGraph,newStartPos,endPos, weight = \"weight\")\n thePath3 = nx.shortest_path(theGraph,newStartPos,\"FINISH\", weight = \"weight\")\n print(thePath3)\n\n print(\"Answer\")\n print(pathLength1 + pathLength2 + pathLength3)\n","repo_name":"rsirefelt/advent_of_code","sub_path":"2022/dec24/karl_vilen/Task24.py","file_name":"Task24.py","file_ext":"py","file_size_in_byte":8226,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"5"} +{"seq_id":"43485677818","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom data import strip_time, read_file\n\n\ndef plot_losses(*objs):\n \"\"\"\n Plot the training losses of multiple trained models.\n\n Parameters:\n - *objs: Variable number of trained models.\n\n Returns:\n - numpy.ndarray: The loss values for each model.\n\n Raises:\n - ValueError: If an untrained model is provided.\n - ValueError: If only one object is provided. More than one is required.\n \"\"\"\n for obj in objs:\n if obj.is_trained == False:\n raise ValueError('Model is untrained, only input trained models')\n if len(list(objs)) > 1:\n loss = []\n names = []\n for obj in objs:\n loss.append(obj.lossList)\n names.append(obj.typeStr)\n \n loss = np.array(loss)\n \n for i in loss:\n plt.plot(i)\n \n plt.xlabel('Epoch')\n plt.ylabel('Loss [MSE]')\n plt.legend(names)\n plt.title('Mean Squared Error for NN, RNN and CNN with 10 epochs')\n \n plt.show()\n else:\n raise ValueError('Only one object provided. More than one required.')\n \n return loss\n\ndef animate_double_pendulum(initials, thetas, title='A random, unspecified run', save=None, fname=None, denoise=False):\n \"\"\"\n Animate the motion of a double pendulum.\n\n Parameters:\n - initials (numpy.ndarray): Initial conditions for the double pendulum.\n - thetas (numpy.ndarray): Angular positions of the pendulum over time.\n - title (str, optional): Title of the animation. Default is 'A random, unspecified run'.\n - save (bool, optional): File path to save the animation as a GIF. Default is None.\n - fname (str, optional): Define a filename\n - denoise (bool, optional): Flag to enable denoising. Default is False.\n \"\"\"\n x = np.random.randint(len(thetas))\n theta1_vals = thetas[x, :, 0]\n theta2_vals = thetas[x, :, 1]\n L1 = initials[x, 0]\n L2 = initials[x, 1]\n\n x1 = L1 * np.sin(theta1_vals)\n y1 = -L1 * np.cos(theta1_vals)\n x2 = x1 + L2 * np.sin(theta2_vals)\n y2 = y1 - L2 * np.cos(theta2_vals)\n\n if denoise:\n fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))\n fig.suptitle(title)\n ax1.set_aspect('equal')\n ax1.set_xlim(-2, 2)\n ax1.set_ylim(-2, 2)\n ax1.grid()\n ax1.set_title('Original Motion') \n line1, = ax1.plot([], [], 'o-', lw=2, color='red')\n trail1, = ax1.plot([], [], '-', lw=1, color='gray')\n ax2.set_aspect('equal')\n ax2.set_xlim(-2, 2)\n ax2.set_ylim(-2, 2)\n ax2.grid()\n ax2.set_title('Denoised Motion')\n line2, = ax2.plot([], [], 'o-', lw=2, color='blue')\n trail2, = ax2.plot([], [], '-', lw=1, color='gray')\n else:\n fig = plt.figure()\n ax = fig.add_subplot(111, autoscale_on=False, xlim=(-2, 2), ylim=(-2, 2), title=title)\n ax.set_aspect('equal')\n ax.grid()\n line1, = ax.plot([], [], 'o-', lw=2, color='red')\n trail1, = ax.plot([], [], '-', lw=1, color='gray')\n \n \n\n def init():\n line1.set_data([], [])\n trail1.set_data([], [])\n if denoise:\n line2.set_data([], [])\n trail2.set_data([], [])\n return line1, trail1, line2, trail2\n return line1, trail1\n\n def animate(i):\n x_vals1 = [0, x1[i], x2[i]]\n y_vals1 = [0, y1[i], y2[i]]\n line1.set_data(x_vals1, y_vals1)\n\n if denoise:\n smoothed_x2 = np.convolve(x2, np.ones(10) / 10, mode='same')\n smoothed_y2 = np.convolve(y2, np.ones(10) / 10, mode='same')\n\n smoothed_x1 = x1 + smoothed_x2 - x2\n smoothed_y1 = y1 + smoothed_y2 - y2\n\n line2.set_data([0, smoothed_x1[i], smoothed_x2[i]], [0, smoothed_y1[i], smoothed_y2[i]])\n\n trail_length = 100\n if i >= trail_length:\n trail_x2 = smoothed_x2[i - trail_length:i]\n trail_y2 = smoothed_y2[i - trail_length:i]\n trail_x1 = x2[i - trail_length:i]\n trail_y1 = y2[i - trail_length:i]\n else:\n trail_x2 = np.concatenate((smoothed_x2[:i], smoothed_x2[:i][::-1]))\n trail_y2 = np.concatenate((smoothed_y2[:i], smoothed_y2[:i][::-1]))\n trail_x1 = np.concatenate((x2[:i], x2[:i][::-1]))\n trail_y1 = np.concatenate((y2[:i], y2[:i][::-1]))\n\n trail2.set_data(trail_x2, trail_y2)\n trail1.set_data(trail_x1, trail_y1)\n \n return line1, trail1, line2, trail2\n \n else:\n trail_length = 100\n if i >= trail_length:\n trail_x2 = x2[i - trail_length:i]\n trail_y2 = y2[i - trail_length:i]\n else:\n trail_x2 = np.concatenate((x2[:i], x2[:i][::-1]))\n trail_y2 = np.concatenate((y2[:i], y2[:i][::-1]))\n\n trail1.set_data(trail_x2, trail_y2)\n \n\n return line1, trail1\n\n keyframes = np.arange(0, len(theta1_vals), 3)\n\n ani = animation.FuncAnimation(fig, animate, frames=keyframes,\n interval=22, blit=True, init_func=init)\n \n if 'CNN' in title:\n saveStr = 'CNN'\n elif 'RNN' in title:\n saveStr = 'RNN'\n elif 'NN' in title:\n saveStr = 'NN'\n elif 'RK4' in title:\n saveStr = 'RK4'\n if save:\n if fname:\n ani.save('../figures/%s.gif' % fname, writer='pillow')\n elif not denoise:\n ani.save('../figures/updated_randomly_sampled_run_%s.gif' % saveStr, writer='pillow')\n else:\n ani.save('../figures/updated_randomly_sampled_run_%s_with_denoise.gif' % saveStr, writer='pillow')\n\n plt.show()\n\n \ndef plot_traced_path(initials, thetas, title, heat=False, retPlot=False):\n \"\"\"\n Plot the traced path of a double pendulum.\n\n Parameters:\n - initials (numpy.ndarray): Initial conditions for the double pendulum.\n - thetas (numpy.ndarray): Angular positions of the pendulum over time.\n - title (str): Title of the plot.\n - heat (bool, optional): Whether to create a heatmap plot. Default is False.\n \"\"\"\n L1 = initials[:, 0]\n L2 = initials[:, 1]\n if not retPlot:\n plt.figure()\n if heat:\n all_x = np.concatenate([L1[i] * np.sin(thetas[i, :, 0]) + L2[i] * np.sin(thetas[i, :, 1]) for i in range(thetas.shape[0])])\n all_y = np.concatenate([-L1[i] * np.cos(thetas[i, :, 0]) - L2[i] * np.cos(thetas[i, :, 1]) for i in range(thetas.shape[0])])\n if retPlot:\n return all_x, all_y\n\n plt.hist2d(all_x, all_y, bins=100, cmap='hot', alpha=0.8)\n plt.colorbar(label='Density')\n \n \n else:\n if thetas.shape[1] > 50:\n thetas = thetas[:50, :, :]\n \n for i in range(thetas.shape[0]):\n theta1 = thetas[i, :, 0]\n theta2 = thetas[i, :, 1]\n x = L1[i] * np.sin(theta1) + L2[i] * np.sin(theta2)\n y = -L1[i] * np.cos(theta1) - L2[i] * np.cos(theta2) \n plt.plot(x, y, color='grey', alpha=0.4)\n \n \n \n plt.xlabel('x')\n plt.ylabel('y')\n plt.title(title)\n plt.grid(True)\n plt.show()\n \ndef overlay_heatmaps(obj):\n \"\"\"\n Overlay heatmaps generated from RK4 and predicted values.\n\n Parameters:\n - obj (object): A trained NN object\n \"\"\"\n initials_data = read_file('../data/initials.txt')\n double_pendulum_data = strip_time(read_file('../data/double_pendulum.txt', collapse=True))\n rkx, rky = plot_traced_path(initials_data, double_pendulum_data, 'None', True, True)\n initials_data = obj.initial_test\n double_pendulum_data = obj.predict(verbose=False)\n nnx, nny = plot_traced_path(initials_data, double_pendulum_data, 'None', True, True)\n nnL = len(nnx)\n x = np.random.randint(0, len(rkx)-nnL)\n\n rkx = rkx[x:x+nnL]\n rky = rky[x:x+nnL]\n rkheatmap, xedges, yedges = np.histogram2d(rkx,rky, bins=250)\n extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]\n nnheatmap, _, _ = np.histogram2d(nnx,nny, bins=250)\n diff = np.abs(rkheatmap-nnheatmap)\n \n plt.figure()\n plt.imshow(diff.T, extent=extent, origin='lower', cmap='hot')\n plt.colorbar(label='Overlap (lower is better)')\n plt.title('Comparing heatmaps between RK4 and %s predicted values\\n' % obj.typeStr)\n plt.xlabel('x')\n plt.ylabel('y')\n plt.grid(True)\n plt.show()\n\n\n","repo_name":"simloken/FYS5429","sub_path":"Project/code/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":8522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"19907131007","text":"#coding=utf-8\n\n\"\"\"\nGiven a non-negative integer represented as a non-empty array of digits, plus one to the integer.\n\nYou may assume the integer do not contain any leading zero, except the number 0 itself.\n\nThe digits are stored such that the most significant digit is at the head of the list.\n\"\"\"\ndef plusOne(L):\n L[-1]+=1\n for i in reversed(range(1,len(L))):\n if L[i]==10:\n L[i]=0\n L[i-1]+=1\n if L[0]==10:\n L[0]=0\n L.insert(0,1)\n return L\n","repo_name":"veroyatnost/LeetCode10Py","sub_path":"[CLOSED]LEET0066_PLUS_ONE.py","file_name":"[CLOSED]LEET0066_PLUS_ONE.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"5"} +{"seq_id":"10042373767","text":"def test_fpf_apt_repo_present(File):\n \"\"\"\n Ensure the FPF apt repo, apt.freedom.press, is configured.\n This repository is necessary for the SecureDrop Debian packages,\n including:\n\n * securedrop-app-code\n * securedrop-keyring\n * securedrop-grsec\n\n Depending on the host, additional FPF-maintained packages will be\n installed, e.g. for OSSEC. Install state for those packages\n is tested separately.\n \"\"\"\n f = File('/etc/apt/sources.list.d/apt_freedom_press.list')\n assert f.contains('^deb \\[arch=amd64\\] https:\\/\\/apt\\.freedom\\.press trusty main$')\n\n\ndef test_fpf_apt_repo_fingerprint(Command):\n \"\"\"\n Ensure the FPF apt repo has the correct fingerprint on the associated\n signing pubkey. The key changed in October 2016, so test for the \n newest fingerprint, which is installed on systems via the \n `securedrop-keyring` package.\n \"\"\"\n\n\n c = Command('apt-key finger')\n\n fpf_gpg_pub_key_info = \"\"\"/etc/apt/trusted.gpg.d/securedrop-keyring.gpg\n---------------------------------------------\npub 4096R/00F4AD77 2016-10-20 [expires: 2017-10-20]\n Key fingerprint = 2224 5C81 E3BA EB41 38B3 6061 310F 5612 00F4 AD77\nuid SecureDrop Release Signing Key\"\"\"\n\n assert c.rc == 0\n assert fpf_gpg_pub_key_info in c.stdout\n\n fpf_gpg_pub_key_fingerprint_expired = 'B89A 29DB 2128 160B 8E4B 1B4C BADD E0C7 FC9F 6818'\n fpf_gpg_pub_key_info_expired = \"\"\"pub 4096R/FC9F6818 2014-10-26 [expired: 2016-10-27]\n Key fingerprint = #{fpf_gpg_pub_key_fingerprint_expired}\nuid Freedom of the Press Foundation Master Signing Key\"\"\"\n\n assert fpf_gpg_pub_key_fingerprint_expired not in c.stdout\n assert fpf_gpg_pub_key_info_expired not in c.stdout\n","repo_name":"Labs22/BlackServerOS","sub_path":"cryptography/securedrop-develop/securedrop-develop/testinfra/common/test_fpf_apt_repo.py","file_name":"test_fpf_apt_repo.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"5"} +{"seq_id":"37911362786","text":"import random\nfrom Mutation.mutation import Mutation\n\n\nclass LimitedMultigenMutation(Mutation):\n def __init__(self, children_size, probability, min_h, max_h, height_decimals, items_db):\n super().__init__(children_size, probability, min_h, max_h, height_decimals, items_db)\n\n def single_mutation(self, child):\n mutations_quantity = random.randint(1, 6)\n to_mutate = []\n size = 6\n while mutations_quantity > 0:\n rand_idx = random.randint(0, size-1)\n mutate_gene = self.genes_idx.pop(rand_idx)\n self.aux_idx.append(mutate_gene)\n size -= 1\n to_mutate.append(mutate_gene)\n mutations_quantity = mutations_quantity - 1\n self.genes_idx.extend(self.aux_idx)\n self.aux_idx = []\n\n mutate = []\n for gene in to_mutate:\n p = random.random()\n if p >= self.probability:\n mutate.append(gene)\n return self.mutate_gene(child, mutate)\n","repo_name":"maanuluque/Machine-Learning","sub_path":"TP2/Mutation/limited_multigen_mutation.py","file_name":"limited_multigen_mutation.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"73956159193","text":"# 2019/11/16\r\n\r\nimport math\r\nmod=10**9+7\r\nX,Y=map(int,input().split())\r\n\r\nx=(2*X-Y)/3\r\ny=X-2*x\r\nif x!=int(x) or y!=int(y):\r\n print(0)\r\n exit()\r\nelse:\r\n x=int(x)\r\n y=int(y)\r\n\r\ndef cmb(n,r,mod=10**9+7):\r\n fac=[1,1]\r\n for i in range(2,n+1):\r\n fac.append(fac[i-1]*i%mod)\r\n ret=fac[n]*pow(fac[r]*fac[n-r]%mod,mod-2,mod)%mod\r\n return ret\r\n\r\nif x<0 or y<0:\r\n print(0)\r\n exit()\r\n\r\nn=x+y\r\nr=min(x,y)\r\n\r\nans=cmb(n,r)\r\nif x==0:\r\n if Y==2*y and X==Y//2:\r\n print(ans)\r\n else:print(0)\r\nelif y==0:\r\n if X==2*x and Y==X//2:\r\n print(ans)\r\n else:print(0)\r\nelse:\r\n print(ans)\r\n\r\n","repo_name":"cale-i/atcoder","sub_path":"AtCoder/ABC145/D - Knight.py","file_name":"D - Knight.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18430455509","text":"from datetime import date\nfrom typing import Callable\n\nimport pytest\n\nfrom clickhouse_connect.driver import Client\nfrom clickhouse_connect.driver.options import arrow\n\n\ndef test_arrow(test_client: Client, table_context: Callable):\n if not arrow:\n pytest.skip('PyArrow package not available')\n if not test_client.min_version('21'):\n pytest.skip(f'PyArrow is not supported in this server version {test_client.server_version}')\n with table_context('test_arrow_insert', ['animal String', 'legs Int64']):\n n_legs = arrow.array([2, 4, 5, 100])\n animals = arrow.array(['Flamingo', 'Horse', 'Brittle stars', 'Centipede'])\n names = ['legs', 'animal']\n insert_table = arrow.Table.from_arrays([n_legs, animals], names=names)\n test_client.insert_arrow('test_arrow_insert', insert_table)\n result_table = test_client.query_arrow('SELECT * FROM test_arrow_insert', use_strings=False)\n arrow_schema = result_table.schema\n assert arrow_schema.field(0).name == 'animal'\n assert arrow_schema.field(0).type.id == 14\n assert arrow_schema.field(1).type.bit_width == 64\n # pylint: disable=no-member\n assert arrow.compute.sum(result_table['legs']).as_py() == 111\n assert len(result_table.columns) == 2\n\n arrow_table = test_client.query_arrow('SELECT number from system.numbers LIMIT 500',\n settings={'max_block_size': 50})\n arrow_schema = arrow_table.schema\n assert arrow_schema.field(0).name == 'number'\n assert arrow_schema.field(0).type.id == 8\n assert arrow_table.num_rows == 500\n\n\ndef test_arrow_map(test_client: Client, table_context: Callable):\n if not arrow:\n pytest.skip('PyArrow package not available')\n if not test_client.min_version('21'):\n pytest.skip(f'PyArrow is not supported in this server version {test_client.server_version}')\n with table_context('test_arrow_map', ['trade_date Date, code String',\n 'kdj Map(String, Float32)',\n 'update_time DateTime DEFAULT now()']):\n data = [[date(2023, 10, 15), 'C1', {'k': 2.5, 'd': 0, 'j': 0}],\n [date(2023, 10, 16), 'C2', {'k': 3.5, 'd': 0, 'j': -.372}]]\n insert_result = test_client.insert('test_arrow_map', data, column_names=('trade_date', 'code', 'kdj'))\n assert 2 == insert_result.written_rows\n arrow_table = test_client.query_arrow('SELECT * FROM test_arrow_map ORDER BY trade_date',\n use_strings=True)\n print(arrow_table)\n assert isinstance(arrow_table.schema, arrow.Schema)\n insert_result = test_client.insert_arrow('test_arrow_map', arrow_table)\n assert 4 == test_client.command('SELECT count() FROM test_arrow_map')\n assert 2 == insert_result.written_rows\n","repo_name":"ClickHouse/clickhouse-connect","sub_path":"tests/integration_tests/test_arrow.py","file_name":"test_arrow.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","stars":181,"dataset":"github-code","pt":"5"} +{"seq_id":"12979921112","text":"'''\r\n这样产生的mask的flist是每组2000张,然后手动复制5次,直10000张。\r\n'''\r\n\r\nimport os\r\nfrom imageio import imread\r\nfrom PIL import Image\r\nimport argparse\r\n\r\n\r\ndef cal_ratio(img):\r\n num = 0\r\n for x in img.flatten():\r\n if x > 0:\r\n num += 1\r\n return num / img.flatten().shape[0]\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--path', default=\"/home/wwt117/Desktop/testing_mask_dataset\", type=str)\r\n args = parser.parse_args()\r\n folder_path = args.path\r\n\r\n dirs = os.listdir(folder_path) # 返回指定路径下的文件和文件夹列表\r\n\r\n mask0_10 = []\r\n mask10_20 = []\r\n mask20_30 = []\r\n mask30_40 = []\r\n mask40_50 = []\r\n mask50_60 = []\r\n\r\n i = 0\r\n for file_item in dirs:\r\n if i % 100 == 0:\r\n print(\"the number of processed image:\", i)\r\n i += 1\r\n img_path = folder_path + \"/\" + file_item\r\n img = imread(img_path)\r\n r = cal_ratio(img)\r\n if 0 <= r < 0.1:\r\n mask0_10.append(img_path)\r\n elif 0.1 <= r < 0.2:\r\n mask10_20.append(img_path)\r\n elif 0.2 <= r < 0.3:\r\n mask20_30.append(img_path)\r\n elif 0.3 <= r < 0.4:\r\n mask30_40.append(img_path)\r\n elif 0.4 <= r < 0.5:\r\n mask40_50.append(img_path)\r\n elif 0.5 <= r < 0.6:\r\n mask50_60.append(img_path)\r\n\r\n # 输出文件列表\r\n f_mask0_10 = open('masklist/mask0_10.flist', 'w')\r\n f_mask0_10.write(\"\\n\".join(mask0_10))\r\n f_mask0_10.close()\r\n f_mask10_20 = open('masklist/mask10_20.flist', 'w')\r\n f_mask10_20.write(\"\\n\".join(mask10_20))\r\n f_mask10_20.close()\r\n f_mask20_30 = open('masklist/mask20_30.flist', 'w')\r\n f_mask20_30.write(\"\\n\".join(mask20_30))\r\n f_mask20_30.close()\r\n f_mask30_40 = open('masklist/mask30_40.flist', 'w')\r\n f_mask30_40.write(\"\\n\".join(mask30_40))\r\n f_mask30_40.close()\r\n f_mask40_50 = open('masklist/mask40_50.flist', 'w')\r\n f_mask40_50.write(\"\\n\".join(mask40_50))\r\n f_mask40_50.close()\r\n f_mask50_60 = open('masklist/mask50_60.flist', 'w')\r\n f_mask50_60.write(\"\\n\".join(mask50_60))\r\n f_mask50_60.close()\r\n \r\n print(\"Process Completed !!!\")\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"roooooz/Test_inpainting_metrics","sub_path":"classify_mask.py","file_name":"classify_mask.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"40907183692","text":"from keras.models import load_model\r\nfrom PIL import Image, ImageOps\r\nimport numpy as np\r\nimport cv2\r\n\r\n# Progetto Sistemi ad Agenti sul riconoscimento delle iterazioni \"Stretta di mano,\r\n# Batti cinque, Abbraccio,Bacio, Nessuna delle precedenti\"\r\n\r\n\r\n# Disattivata la notazione scientifica poiché superflua per il nostro caso\r\nnp.set_printoptions(suppress=True)\r\n\r\n# Caricamento del modello addestrato tramite TM\r\nmodel = load_model(\"keras_model.h5\", compile=False)\r\n\r\n# Caricamento delle etichetta da file labels.txt\r\nclass_names = open(\"labels.txt\", \"r\").readlines()\r\n\r\n# Sorgente video da analizzare e predirre\r\nvideo_path = input(\"Inserisci il percorso del file video da analizzare: \")\r\n\r\n#Apertura video tramite OpenCV\r\nvideo = cv2.VideoCapture(video_path)\r\n\r\n# Inizializzazione dell'array delle predizioni\r\ntotal_predictions = []\r\n\r\n# Lettura ed elaborazione delle clip del video in input da analizzare\r\nwhile True:\r\n ret, frame = video.read()\r\n if not ret:\r\n break\r\n\r\n # Il video viene convertito in immagini (frame)\r\n image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\r\n\r\n # Pre-elabora il frame in esame\r\n size = (224, 224)\r\n image = ImageOps.fit(image, size, Image.Resampling.LANCZOS)\r\n image_array = np.asarray(image)\r\n normalized_image_array = (image_array.astype(np.float32) / 127.5) - 1\r\n\r\n # Adatta la forma di input del modello per renderlo compatibile per l'analisi\r\n data = np.expand_dims(normalized_image_array, axis=0)\r\n\r\n # Effettua predizione e successivamente l'aggiunge alla lista\r\n prediction = model.predict(data)\r\n total_predictions.append(prediction)\r\n\r\n# Se ci dovessere essere meno di 10 frame nel video, prende tutti i frame\r\nif len(total_predictions) < 10:\r\n top_predictions = total_predictions\r\nelse:\r\n # Altrimenti trova gli indici dei primi 10 frame con la maggiore confidenza\r\n top_indices = np.argsort([p[0][np.argmax(p)] for p in total_predictions])[-10:]\r\n # Seleziona i frame corrispondenti agli indici delle prima 10 posizioni sopracitate\r\n top_predictions = [total_predictions[i] for i in top_indices]\r\n\r\n# Calcola la media delle predizioni\r\naverage_prediction = np.mean(top_predictions, axis=0)\r\n\r\n# Ottieni l'indice della classe prevista e il punteggio di confidenza dalla previsione media\r\nindex = np.argmax(average_prediction)\r\nclass_name = class_names[index]\r\nconfidence_score = average_prediction[0][index]\r\n\r\n# Stampa a video la previsione complessiva e il punteggio di confidenza per il video\r\nprint(\"Classe (Media):\", class_name[2:])\r\nprint(\"Punteggio di Confidenza (Media):\", confidence_score)\r\n\r\n# Rilascia l'oggetto di acquisizione video e chiude tutte le finestre aperte\r\nvideo.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"michele-astarita99/Sistemi-ad-Agenti-22-23","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3238984483","text":"import os\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"initat.cluster.settings\")\n\nimport django\ndjango.setup()\n\nfrom django.db.models import Q\nfrom initat.cluster.backbone.models import device\nimport csv\n\n\"\"\"\nCSV example:\n\nname,netdevice[devname=enp0s25]__macaddr\nlemmy,00:11:22:33:44:55\nipmi-mc01,00:25:90:d7:8b:4b\nipmi-mc02,00:25:90:d7:8a:ff\nipmi-mc03,00:25:90:d9:5a:66\nipmi-mc04,00:25:90:d7:8b:59\n\n\"\"\"\n\n\nclass SmartKey(object):\n def __init__(self, dev, key):\n self.device = dev\n self.key = key\n self.parts = self.key.split(\"__\")\n self._resolve()\n\n def _resolve(self):\n self.resolved = False\n # start object\n _obj = self.device\n # copy\n _prts = [_key for _key in self.parts]\n while len(_prts) > 1 and _obj:\n _prt = _prts.pop(0)\n if _prt.count(\"[\"):\n _prt, _filter = _prt.split(\"[\", 1)\n _filter = dict(tuple(_entry.split(\"=\", 1)) for _entry in _filter[:-1].split(\",\"))\n else:\n _filter = {}\n try:\n _next = getattr(_obj, _prt)\n except:\n try:\n _next = getattr(_obj, \"{}_set\".format(_prt))\n except:\n print(\n \"unresolvable '{}' for object '{}'\".format(\n _prt,\n str(_obj),\n )\n )\n _obj = None\n break\n else:\n _next = _next.filter(**_filter)\n if len(_next) > 1:\n print(\n \"found more than one in list {} of '{}' ({:d})\".format(\n _prt,\n str(_obj),\n len(_next),\n )\n )\n _obj = None\n elif not len(_next):\n print(\n \"list {} of '{}' is empty\".format(\n _prt,\n str(_obj),\n )\n )\n _obj = None\n else:\n _obj = _next[0]\n else:\n _obj = _next\n if _obj:\n self.object = _obj\n self.resolved = True\n self.attr_name = _prts[0]\n\n def set(self, value):\n if self.resolved:\n _prev = getattr(self.object, self.attr_name)\n print(\n \"changing attribute '{}' of '{}' form '{}' to '{}'\".format(\n str(self.attr_name),\n str(self.object),\n _prev,\n str(value)\n )\n )\n setattr(self.object, self.attr_name, value)\n self.object.save(update_fields=[self.attr_name])\n else:\n print(\"SmartKey had resolve error(s)\")\n\n\ndef main(opt_ns):\n with open(opt_ns.file) as csv_file:\n reader = csv.DictReader(csv_file)\n for _ld in reader:\n if \"name\" not in _ld:\n print(\"Need name in line_dict (found: {})\".format(\", \".join(list(_ld.keys()))))\n else:\n _name = _ld.pop(\"name\")\n try:\n _dev = device.objects.get(Q(name=_name))\n except device.DoesNotExist:\n print(\"no device with name '{}' found\".format(_name))\n else:\n for _key, _value in _ld.items():\n _sm = SmartKey(_dev, _key)\n if _sm.resolved:\n _sm.set(_value)\n","repo_name":"walong365/icsw","sub_path":"initat/icsw/device/csvmodify.py","file_name":"csvmodify.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28842582023","text":"from fastapi import status, Request\nfrom fastapi.encoders import jsonable_encoder\nfrom fastapi.exceptions import RequestValidationError\nfrom fastapi.responses import JSONResponse\n\nfrom app.responses import Response\n\n\nasync def validation_exception_handler(request: Request, exc: RequestValidationError):\n response = Response('Validation Error: {}'.format(exc))\n return JSONResponse(\n status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,\n content=jsonable_encoder(response),\n )\n\n\nasync def all_exception_handler(request: Request, exc: Exception):\n response = Response('Something get wrong: {}'.format(exc))\n return JSONResponse(\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n content=jsonable_encoder(response)\n )\n","repo_name":"nicolayreptile/funlinks","sub_path":"app/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"10096714554","text":"\"\"\"\nTake the necessary utility functions from the playground notebook.\n\"\"\"\n\n# pylint: disable=invalid-name\n\nimport os\nimport re\n\n\nfrom typing import Tuple\nimport matplotlib.pyplot as plt\nimport plotly.express as px\nimport numpy as np\nfrom PIL import Image\n\nimport ccd_image as ccd\n\n# First construct a path to where the data is stored on my machine.\nLOCAL_DATA_DIR = \"/Users/richard/Data/i10/CSL_Feb_2022/azimuthal_scans/\"\n\nLOCAL_20K_DIR = \"/Users/richard/Data/i10/CSL_Feb_2022/azimuthal_20K/\"\n\n# Get all the nexus files and scan directories in the data directory.\nNEXUS_FILES = sorted(\n [f for f in os.listdir(LOCAL_DATA_DIR) if f.endswith('.nxs')]\n)\nSCAN_DIRS = sorted(\n [d for d in os.listdir(LOCAL_DATA_DIR) if d.endswith('files')]\n)\n\nSCAN_DIRS_20K = sorted(\n [d for d in os.listdir(LOCAL_20K_DIR) if d.endswith('files')]\n)\n\nPONI_PATH = \"saxs_calib.poni\"\n\n# The first scan number in the script.\nFIRST_SCAN = 687537\n\n# The field values in the field sweeps.\nfield_values = list(range(31))\n\n# Calculated manually\nBEAMSTOP_TOP = 1082\nBEAMSTOP_BOTTOM = 1284\nBEAMSTOP_LEFT = 948\nBEAMSTOP_RIGHT = 1145\n\nBEAM_CENTRE_X = 1045\nBEAM_CENTRE_Y = 1175\n\n# Fit in the playground notebook.\nFITTING_CONSTANT = 61393.75028770998\n\nLOWER_RADIAL_BOUND = (BEAMSTOP_BOTTOM - BEAMSTOP_TOP)*np.sqrt(2)\nUPPER_RADIAL_BOUND = 2000 - BEAM_CENTRE_Y\n\nMETADATA = ccd.Metadata(BEAM_CENTRE_X, BEAM_CENTRE_Y)\n\nANGLES_20K = list(range(0, 180, 15))\nANGLES_20K.extend(list(range(5, 155, 15)))\n\n\ndef name_to_scan_number(dir_or_file_name: str) -> int:\n \"\"\"\n Takes the name of a scan directory or nexus file. Outputs the scan number.\n\n Args:\n dir_or_file_name:\n The name of the directory or file.\n\n Returns:\n The scan number.\n \"\"\"\n split_name = re.split('\\W', dir_or_file_name)\n for maybe_scan_number in split_name:\n try:\n return int(maybe_scan_number)\n except ValueError:\n continue\n\n\ndef scan_to_angle(dir_or_file_name: str) -> float:\n \"\"\"\n Takes the name of a scan directory or nexus file name. Outputs the azimuthal\n angle at which the field was applied in the scan.\n\n Args:\n dir_or_file_name:\n The name of the directory or file.\n\n Returns:\n The scan's corresponding azimuthal angle.\n \"\"\"\n scan_number = name_to_scan_number(dir_or_file_name)\n return (scan_number - FIRST_SCAN)*1.5\n\n\ndef _angle_to_scan_no(angle: float) -> int:\n \"\"\"\n Converts input angle to integer nth scan number.\n \"\"\"\n if float(angle) not in np.arange(0, 180, 1.5):\n raise ValueError(\"Angle was not scanned.\")\n return int(angle/1.5)\n\n\ndef get_path(scan_dir: str, field_magnitude: int, t_20K=False):\n \"\"\"\n Each scan has several .tiff files, one for each field magnitude in\n range(31).\n\n Args:\n scan_dir:\n The name of a scan directory or file.\n field_magnitude:\n The magnitude of the field of interest\n\n Returns:\n The path to the corresponding .tiff file.\n \"\"\"\n local_tiff_name = f\"pixis-{field_magnitude}.tiff\"\n if t_20K:\n return os.path.join(LOCAL_20K_DIR, scan_dir, local_tiff_name)\n return os.path.join(LOCAL_DATA_DIR, scan_dir, local_tiff_name)\n\n\ndef get_tiff(scan_dir: str, field_magnitude: int, t_20K=False) -> np.ndarray:\n \"\"\"\n Each scan has several .tiff files, one for each field magnitude in\n range(31).\n\n Args:\n scan_dir:\n The name of a scan directory or file.\n field_magnitude:\n The magnitude of the field of interest\n t_20K:\n A boolean representing whether or not this .tiff should be from\n the 20 K dataset.\n\n Returns:\n A numpy array representing the .tiff file.\n \"\"\"\n full_tiff_path = get_path(scan_dir, field_magnitude, t_20K)\n return np.array(Image.open(full_tiff_path)).astype(np.float64)\n\n\ndef get_tiff_angle_field(angle: float, field_magnitude: int, t_20K=False):\n \"\"\"\n Returns the tiff image at a certain angle and field.\n \"\"\"\n if t_20K:\n # It's a bit of a more complicated ordeal with this (incomplete) data.\n if angle not in ANGLES_20K:\n raise ValueError(\"Bad angle provided for 20 K scan.\")\n # Get the scan's index in the directory.\n idx = ANGLES_20K.index(angle)\n scan_dir = SCAN_DIRS_20K[idx]\n else:\n scan_dir = SCAN_DIRS[int(angle/1.5)]\n return get_tiff(scan_dir, field_magnitude, t_20K)\n\n\ndef get_rough_background(scan_dir: str, t_20K=False) -> np.ndarray:\n \"\"\"\n Assume that we can model the background as an image taken in the field\n polarized state. Assume that we field polarize at the maximum field value\n of 30 mT. In this case, the background is just the 30 mT image; return it.\n\n Args:\n scan_dir:\n The scan directory of interest.\n t_20K:\n True if we want background for a 20K scan, False if the high T\n dataset.\n\n Returns:\n A simple estimate of the background for the images in that scan\n directory.\n \"\"\"\n if t_20K:\n return get_tiff(scan_dir, 70, t_20K)\n return get_tiff(scan_dir, 30)\n\n\ndef get_rough_background_angle(field_angle, t_20K=False) -> np.ndarray:\n \"\"\"\n Assume that we can model the background as an image taken in the field\n polarized state. Assume that we field polarize at the maximum field value\n of 30 mT. In this case, the background is just the 30 mT image; return it.\n\n If t_20K is True, we take the background to be the 70mT image.\n \"\"\"\n if t_20K:\n idx = ANGLES_20K.index(field_angle)\n scan_dir = SCAN_DIRS_20K[idx]\n return get_tiff(scan_dir, 70, t_20K)\n\n scan_dir = SCAN_DIRS[int(field_angle/1.5)]\n return get_tiff(scan_dir, 30)\n\n\ndef imshow(img: np.ndarray, figsize: Tuple[int] = (20, 20),\n cmap: str = 'jet', title=\"\", **kwargs) -> None:\n \"\"\"\n Imshow, but with pretty colours and a big size.\n \"\"\"\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n picture = ax.imshow(img, cmap=cmap, **kwargs)\n fig.colorbar(picture, ax=ax)\n\n plt.title(title)\n\n fig.show()\n\n\ndef plotly_imshow(img: np.ndarray, **kwargs):\n \"\"\"\n Imshow, plotly version.\n \"\"\"\n px.imshow(img, color_continuous_scale=\"jet\", **kwargs).show()\n\n\ndef gauss(x, a, x0, sigma, const):\n \"\"\"\n A general Gaussian function.\n \"\"\"\n return a*np.exp(-(x-x0)**2/(2*sigma**2)) + const\n\n\ndef gauss_at_origin(x, a, sigma, const):\n \"\"\"\n A Gaussian that's fixed to start at the origin.\n \"\"\"\n return gauss(x, a, 0, sigma, const)\n\n\ndef summed_gauss(x, a1, sigma1, const1, a2, sigma2, offset2, const2):\n \"\"\"\n Sum of a gaussian fixed at the origin and a gaussian peak profile.\n \"\"\"\n return gauss_at_origin(x, a1, sigma1, const1) + \\\n gauss(x, a2, offset2, sigma2, const2)\n\n\n# Prepare a mask.\nSIGNAL_LENGTH_SCALE = 20\nBKG_LENGTH_SCALE = 100\n\nOPEN_MASK = get_rough_background(SCAN_DIRS[0]) > np.inf\nMASK = np.ones_like(get_rough_background(SCAN_DIRS[0]))\nMASK[(BEAMSTOP_TOP-BKG_LENGTH_SCALE):(BEAMSTOP_BOTTOM+BKG_LENGTH_SCALE),\n (BEAMSTOP_LEFT-BKG_LENGTH_SCALE):(BEAMSTOP_RIGHT+BKG_LENGTH_SCALE)] = 0\n","repo_name":"RBrearton/soliton_analysis","sub_path":"parsing_utils.py","file_name":"parsing_utils.py","file_ext":"py","file_size_in_byte":7183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36173009466","text":"from helpers.Pulse_Generator import pulse_generator\nfrom myhdl import Signal, Simulation, intbv, traceSignals, instances\nfrom Register_File import RegisterFile\nfrom helpers.Clock_Generator import clock_generator\nfrom helpers.Random_Signal import random_signal\n\nif (__name__ == \"__main__\"):\n MAX_CYCLES = 100000\n #creates signal variables\n BusA = Signal(intbv(0, 0, 2**32)) #output\n BusB = Signal(intbv(0, 0, 2**32)) #output\n BusW = Signal(intbv(0, 0, 2**32)) #input\n\n RA = Signal(intbv(0, 0, 2**5)) #input\n RB = Signal(intbv(0, 0, 2**5)) #input\n RW = Signal(intbv(0, 0, 2**5)) #input\n RegWr = Signal(intbv(0, 0, 2**1)) #input\n clk = Signal(intbv(0, 0, 2**1)) #input\n rst = Signal(0) #input\n #makes an array for memory\n outregs = []\n for i in range(0, 32):\n outregs.append(Signal(intbv(0, 0, 2**32)))\n outregs[i].driven = not outregs[i].driven\n #creates drivers for signals\n busWAddress_driver = random_signal(clk, BusW)\n readAAddress_driver = random_signal(clk, RA)\n readBAddress_driver = random_signal(clk, RB)\n readWAddress_driver = random_signal(clk, RW)\n RegWr_driver = pulse_generator(clk, RegWr, delay=2)\n reset_driver = pulse_generator(clk, rst, delay=40)\n register_driver = traceSignals(RegisterFile(BusA, BusB, BusW, RA, RB, RW, RegWr, clk, rst, outregs))\n clock_driver = clock_generator(clk)\n #create and run simulation\n sim = Simulation(instances())\n sim.run(MAX_CYCLES)\n","repo_name":"txstate-pcarch-blue/CPU","sub_path":"python/Register_File_tb.py","file_name":"Register_File_tb.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"5"} +{"seq_id":"38494197015","text":"import logging\n\nfrom markupsafe import Markup\n\nimport pcapi.core.fraud.models as fraud_models\n\nfrom . import api as support_api\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef beneficiary_fraud_review_formatter(view, context, model, name) -> Markup: # type: ignore [no-untyped-def]\n result_mapping_class = {\n fraud_models.FraudReviewStatus.OK: \"badge-success\",\n fraud_models.FraudReviewStatus.KO: \"badge-danger\",\n fraud_models.FraudReviewStatus.REDIRECTED_TO_DMS: \"badge-secondary\",\n }\n if not model.beneficiaryFraudReviews:\n return Markup(\"\"\"inconnu\"\"\")\n\n ordered_fraud_reviews = sorted(model.beneficiaryFraudReviews, key=lambda review: review.dateReviewed)\n html = Markup(\"
      \")\n\n for beneficiary_fraud_review in ordered_fraud_reviews:\n reviewer = beneficiary_fraud_review.author\n reviewer_name = reviewer.full_name\n review_result = beneficiary_fraud_review.review\n badge = result_mapping_class[review_result]\n html += Markup(\n \"\"\"\n
      {reviewer_name}
      \n {review_result_value}\n \"\"\"\n ).format(reviewer_name=reviewer_name, badge=badge, review_result_value=review_result.value)\n\n return html\n\n\ndef beneficiary_fraud_checks_formatter(view, context, model, name) -> Markup: # type: ignore [no-untyped-def]\n html = Markup(\"
        \")\n for instance in model.beneficiaryFraudChecks:\n html += Markup(\"
      • {instance.type.value}
      • \").format(instance=instance)\n html += Markup(\"
      \")\n return html\n\n\ndef beneficiary_subscription_status_formatter(view, context, model, name) -> Markup: # type: ignore [no-untyped-def]\n result_mapping_class = {\n support_api.BeneficiaryActivationStatus.OK: {\"class\": \"badge-success\", \"text\": \"OK\"},\n support_api.BeneficiaryActivationStatus.KO: {\"class\": \"badge-danger\", \"text\": \"KO\"},\n support_api.BeneficiaryActivationStatus.SUSPICIOUS: {\"class\": \"badge-warning\", \"text\": \"SUSPICIOUS\"},\n support_api.BeneficiaryActivationStatus.INCOMPLETE: {\"class\": \"badge-info\", \"text\": \"INCOMPLETE\"},\n support_api.BeneficiaryActivationStatus.NOT_APPLICABLE: {\"class\": \"badge-void\", \"text\": \"N/A\"},\n }\n status = support_api.get_beneficiary_activation_status(model)\n\n return Markup(\"\"\"{text}\"\"\").format(\n badge=result_mapping_class[status][\"class\"], text=result_mapping_class[status][\"text\"]\n )\n","repo_name":"mariedestandau/poc-next-pro","sub_path":"api/src/pcapi/admin/custom_views/support_view/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"123703453","text":"from __future__ import print_function\n\nimport os\nimport subprocess\nimport socket\n\nclass prox_ctrl(object):\n def __init__(self, ip, key=None, user=None):\n self._ip = ip\n self._key = key\n self._user = user\n self._children = []\n self._proxsock = []\n\n def ip(self):\n return self._ip\n\n def connect(self):\n \"\"\"Simply try to run 'true' over ssh on remote system.\n On failure, raise RuntimeWarning exception when possibly worth\n retrying, and raise RuntimeError exception otherwise.\n \"\"\"\n return self.run_cmd('true', True)\n\n def close(self):\n \"\"\"Must be called before program termination.\"\"\"\n for prox in self._proxsock:\n prox.quit()\n children = len(self._children)\n if children == 0:\n return\n if children > 1:\n print('Waiting for %d child processes to complete ...' % children)\n for child in self._children:\n ret = os.waitpid(child[0], os.WNOHANG)\n if ret[0] == 0:\n print(\"Waiting for child process '%s' to complete ...\" % child[1])\n ret = os.waitpid(child[0], 0)\n rc = ret[1]\n if os.WIFEXITED(rc):\n if os.WEXITSTATUS(rc) == 0:\n print(\"Child process '%s' completed successfully\" % child[1])\n else:\n print(\"Child process '%s' returned exit status %d\" % (\n child[1], os.WEXITSTATUS(rc)))\n elif os.WIFSIGNALED(rc):\n print(\"Child process '%s' exited on signal %d\" % (\n child[1], os.WTERMSIG(rc)))\n else:\n print(\"Wait status for child process '%s' is 0x%04x\" % (\n child[1], rc))\n\n def run_cmd(self, command, _connect=False):\n \"\"\"Execute command over ssh on remote system.\n Wait for remote command completion.\n Return command output (combined stdout and stderr).\n _connect argument is reserved for connect() method.\n \"\"\"\n cmd = self._build_ssh(command)\n try:\n return subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as ex:\n if _connect and ex.returncode == 255:\n raise RuntimeWarning(ex.output.strip())\n raise RuntimeError('ssh returned exit status %d:\\n%s'\n % (ex.returncode, ex.output.strip()))\n\n def fork_cmd(self, command, name=None):\n \"\"\"Execute command over ssh on remote system, in a child process.\n Do not wait for remote command completion.\n Return child process id.\n \"\"\"\n if name is None:\n name = command\n cmd = self._build_ssh(command)\n pid = os.fork()\n if (pid != 0):\n # In the parent process\n self._children.append((pid, name))\n return pid\n # In the child process: use os._exit to terminate\n try:\n # Actually ignore output on success, but capture stderr on failure\n subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as ex:\n raise RuntimeError(\"Child process '%s' failed:\\n\"\n 'ssh returned exit status %d:\\n%s'\n % (name, ex.returncode, ex.output.strip()))\n os._exit(0)\n\n def prox_sock(self, port=8474):\n \"\"\"Connect to the PROX instance on remote system.\n Return a prox_sock object on success, None on failure.\n \"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sock.connect((self._ip, port))\n prox = prox_sock(sock)\n self._proxsock.append(prox)\n return prox\n except:\n return None\n\n def scp_put(self, src, dst):\n \"\"\"Copy src file from local system to dst on remote system.\"\"\"\n cmd = [ 'scp',\n '-B',\n '-oStrictHostKeyChecking=no',\n '-oUserKnownHostsFile=/dev/null',\n '-oLogLevel=ERROR' ]\n if self._key is not None:\n cmd.extend(['-i', self._key])\n cmd.append(src)\n remote = ''\n if self._user is not None:\n remote += self._user + '@'\n remote += self._ip + ':' + dst\n cmd.append(remote)\n try:\n # Actually ignore output on success, but capture stderr on failure\n subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as ex:\n raise RuntimeError('scp returned exit status %d:\\n%s'\n % (ex.returncode, ex.output.strip()))\n\n def _build_ssh(self, command):\n cmd = [ 'ssh',\n '-oBatchMode=yes',\n '-oStrictHostKeyChecking=no',\n '-oUserKnownHostsFile=/dev/null',\n '-oLogLevel=ERROR' ]\n if self._key is not None:\n cmd.extend(['-i', self._key])\n remote = ''\n if self._user is not None:\n remote += self._user + '@'\n remote += self._ip\n cmd.append(remote)\n cmd.append(command)\n return cmd\n\nclass prox_sock(object):\n def __init__(self, sock):\n self._sock = sock\n self._rcvd = b''\n\n def quit(self):\n if self._sock is not None:\n self._send('quit')\n self._sock.close()\n self._sock = None\n\n def start(self, cores):\n self._send('start %s' % ','.join(map(str, cores)))\n\n def stop(self, cores):\n self._send('stop %s' % ','.join(map(str, cores)))\n\n def speed(self, speed, cores, tasks=None):\n if tasks is None:\n tasks = [ 0 ] * len(cores)\n elif len(tasks) != len(cores):\n raise ValueError('cores and tasks must have the same len')\n for (core, task) in zip(cores, tasks):\n self._send('speed %s %s %s' % (core, task, speed))\n\n def reset_stats(self):\n self._send('reset stats')\n\n def core_stats(self, cores, task=0):\n rx = tx = drop = tsc = hz = 0\n self._send('core stats %s %s' % (','.join(map(str, cores)), task))\n for core in cores:\n stats = self._recv().split(',')\n rx += int(stats[0])\n tx += int(stats[1])\n drop += int(stats[2])\n tsc = int(stats[3])\n hz = int(stats[4])\n return rx, tx, drop, tsc, hz\n\n def set_random(self, cores, task, offset, mask, length):\n self._send('set random %s %s %s %s %s' % (','.join(map(str, cores)), task, offset, mask, length))\n\n def set_size(self, cores, task, pkt_size):\n self._send('pkt_size %s %s %s' % (','.join(map(str, cores)), task, pkt_size))\n\n def set_value(self, cores, task, offset, value, length):\n self._send('set value %s %s %s %s %s' % (','.join(map(str, cores)), task, offset, value, length))\n\n def _send(self, cmd):\n \"\"\"Append LF and send command to the PROX instance.\"\"\"\n if self._sock is None:\n raise RuntimeError(\"PROX socket closed, cannot send '%s'\" % cmd)\n self._sock.sendall(cmd.encode() + b'\\n')\n\n def _recv(self):\n \"\"\"Receive response from PROX instance, and return it with LF removed.\"\"\"\n if self._sock is None:\n raise RuntimeError(\"PROX socket closed, cannot receive anymore\")\n pos = self._rcvd.find(b'\\n')\n while pos == -1:\n self._rcvd += self._sock.recv(256)\n pos = self._rcvd.find(b'\\n')\n rsp = self._rcvd[:pos]\n self._rcvd = self._rcvd[pos+1:]\n return rsp.decode()\n\n","repo_name":"nvf-crucio/PROX","sub_path":"helper-scripts/openstackrapid/prox_ctrl.py","file_name":"prox_ctrl.py","file_ext":"py","file_size_in_byte":7650,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"5"} +{"seq_id":"5321170815","text":"from fastapi import APIRouter, HTTPException,status\nfrom sqlalchemy.orm import Session\nfrom fastapi import Depends\nfrom typing import List\n\nfrom db.session import get_db\nfrom schemas.author import AuthorCreate, AuthorShow\nfrom db.repository.author import *\n\nrouter = APIRouter()\n\ndef notFoundException(propiedad: str, valor: str):\n return HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"Autor con {propiedad} '{valor}' no encontrado\")\n\n@router.post(\"/\", status_code = status.HTTP_201_CREATED)\ndef crear_autor(autor: AuthorCreate, db: Session = Depends(get_db)):\n autor = crearAutor(autor, db)\n return autor\n\n@router.get(\"/{autor_id}\", response_model = AuthorShow, response_model_by_alias=False)\ndef recuperar_autor(autor_id : int, db: Session = Depends(get_db)) :\n autor = recuperarAutor(autor_id, db)\n if not autor:\n raise notFoundException('id', autor_id)\n return autor\n\n@router.get(\"/\", response_model = List[AuthorShow], response_model_by_alias=False)\ndef recuperar_autores(db: Session = Depends(get_db)) :\n return recuperarAutores(db)\n\n@router.get(\"/nombre/{autor_nombre}\", response_model = List[AuthorShow], response_model_by_alias=False)\ndef recuperar_autores_nombre(autor_nombre : str, db: Session = Depends(get_db)) :\n autores = recuperarAutoresPorNombre(autor_nombre, db)\n if not autores:\n raise notFoundException('nombre', autor_nombre)\n return autores\n\n@router.put(\"/{autor_id}\")\ndef actualizar_autor(autor_id: int, autor: AuthorCreate, db: Session = Depends(get_db)) :\n message = actualizarAutor(autor_id, autor, db)\n if not message:\n raise notFoundException('id', autor_id)\n return recuperarAutor(autor_id, db)\n\n@router.delete(\"/{autor_id}\", status_code=status.HTTP_204_NO_CONTENT)\ndef eliminar_autor(autor_id: int, db: Session = Depends(get_db)) :\n message = eliminarAutor(autor_id, db)\n if not message:\n raise notFoundException('id', autor_id)","repo_name":"Aelwin/inventario_api","sub_path":"apis/version1/route_author.py","file_name":"route_author.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"27099962358","text":"import pygame as G, random, enum, os, time, collections, contextlib\n\nlijevi_odmak, gornji_odmak = 200, 35\nširina, visina = 10, 15\nmargina = 10\nq = 40\ncheat = False\nbodovi = 0\n\nclass Oblik(enum.Enum):\n S = [' ##',\n '## ']\n Z = ['## ',\n ' ##']\n O = ['##',\n '##']\n T = [' # ',\n '###']\n J = ['# ',\n '###']\n L = [' #',\n '###']\n I = ['####']\n\n\n def rotiraj(self):\n self.smjer = list(map(''.join, zip(*reversed(self.smjer))))\n\n def popuni(self, di=0, dj=0):\n opolje = set()\n for i, linija in enumerate(self.smjer, start=self.i+di):\n for j, znak in enumerate(linija, start=self.j+dj):\n if znak == '#': opolje.add(Blok(i, j, self.boja))\n return opolje\n\n def kolizija(self, di=0, dj=0):\n opolje = self.popuni(di, dj)\n if opolje.isdisjoint(polje):\n self.i += di\n self.j += dj\n else: return self.popuni()\n\n def nacrtaj(self):\n for blok in self.popuni(): blok.nacrtaj()\n\n\ndef novi_oblik(oblici = list(Oblik)):\n o = random.choice(oblici)\n o.smjer = o.value\n o.i, o.j = 0, širina // 2 - 1\n print(*o.value, sep='\\n')\n print('-' * 10)\n o.boja = random.choice(Boja.za_oblike)\n if o.kolizija(): raise Kraj\n dodaj_bodove(1)\n return o\n\n\ndef dodaj_bodove(b):\n global bodovi\n bodovi += b\n\n\ndef konsolidiraj():\n polje.update(o.popuni())\n iznad = set()\n linije = 0\n while ...:\n for i in range(visina):\n linija = {blok for blok in polje if blok.i == i and blok.unutra()}\n if len(linija) < širina: iznad |= linija\n else:\n dodaj_bodove(100)\n polje.difference_update(iznad | linija)\n polje.update(blok.dolje() for blok in iznad)\n break\n else: break\n\n\nclass Blok(collections.namedtuple('BlokBaza', 'i j boja')):\n def nacrtaj(self): G.draw.rect(ekran, self.boja,\n G.Rect(self.j*q + margina, self.i*q + margina, q, q))\n def pozicija(self): return self.i, self.j\n def dolje(self): return self._replace(i=self.i + 1)\n def unutra(self): return self.i < visina and 1 <= self.j <= širina\n def __hash__(self): return hash(self.pozicija())\n def __eq__(self, other): return self.pozicija() == other.pozicija()\n \n\nclass Boja:\n crvena = G.Color('#ff0000')\n crna = G.Color('black')\n plava = G.Color('blue')\n siva = G.Color('gray')\n žuta = G.Color('yellow')\n narančasta = G.Color('orange')\n zelena = G.Color('green')\n za_oblike = [crvena, plava, žuta, narančasta, zelena]\n\n\nclass Kraj(Exception): pass\n\n\nos.environ['SDL_VIDEO_WINDOW_POS'] = f'{lijevi_odmak},{gornji_odmak}'\ndimenzije = margina + (širina+2)*q + 200, margina*2 + (visina+1)*q\nekran = G.display.set_mode(dimenzije)\nG.key.set_repeat(1, 100)\nG.mixer.init()\nG.mixer.music.load('tetrisc.mid')\nG.mixer.music.play(-1)\npolje = {Blok(i, 0, Boja.siva) for i in range(1 + visina)} \\\n | {Blok(visina, i, Boja.siva) for i in range(1, 1 + širina)} \\\n | {Blok(i, 1 + širina, Boja.siva) for i in range(1 + visina)}\no = novi_oblik()\ngravitacija = G.USEREVENT\nG.time.set_timer(gravitacija, 500)\n\nwith contextlib.suppress(Kraj):\n while ...:\n ekran.fill(Boja.crna)\n for blok in polje: blok.nacrtaj()\n o.nacrtaj()\n if bodovi > 10_000 and not cheat:\n print('Cheat code activated! Press CAPS LOCK...')\n cheat = True\n G.display.flip()\n for događaj in G.event.get():\n if događaj.type == G.QUIT: raise Kraj\n elif događaj.type == gravitacija:\n if o.kolizija(1):\n konsolidiraj()\n o = novi_oblik()\n elif događaj.type == G.KEYDOWN:\n if događaj.key == G.K_ESCAPE: raise Kraj\n elif događaj.key == G.K_LEFT: o.kolizija(0, -1)\n elif događaj.key == G.K_RIGHT: o.kolizija(0, 1)\n elif događaj.key == G.K_DOWN: o.kolizija(1)\n elif događaj.key == G.K_UP: o.rotiraj()\n elif događaj.key == G.K_SPACE:\n while not o.kolizija(1): dodaj_bodove(5)\n konsolidiraj()\n o = novi_oblik()\n elif događaj.key == G.K_CAPSLOCK and cheat: o.i = 0\nG.quit()\nprint('This is the end, my friend...\\nBodovi:', bodovi)\n","repo_name":"vedgar/Luka-python","sub_path":"Tetris/tetris.py","file_name":"tetris.py","file_ext":"py","file_size_in_byte":4461,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70330400473","text":"execfile(\"interrupts.py\")\n\nif \"reg\" in node:\n\treg = \"uint32_t *_GIC = (uint32_t *)(KSEG1_BASE + %(reg)s);\" % {\"reg\": node[\"reg\"][0]}\nelse:\n\treg = \"\"\nif \"mti,no-reset\" in node:\n\treset = \"uint32_t _IRQ_reset = 0;\"\nelse:\n\treset = \"\"\n\nprint(\"\"\"#ifdef __cplusplus\nextern \"C\" {\n#endif\n#include \\\"meos/irq/irq.h\\\"\nconst uint32_t _IRQ_linuxOffset = 7;\n%(reg)s\n%(reset)s\nvoid _IRQ_sharedShunt(int32_t intNum);\n#ifdef __cplusplus\n}\n#endif\"\"\" % {\"reg\": reg, \"reset\": reset})\n\ndef decodeGICInterrupt(interrupt, descName):\n\tif int(interrupt[0]) != 0: # GIC_SHARED\n\t\traise ValueError(\"Only GIC_SHARED interrupts supported\")\n\tif int(interrupt[2]) == 2:\n\t\t\tpolarity = \"IRQ_FALLING_EDGE\"\n\t\t\ttrigger = \"IRQ_EDGE_TRIGGERED\"\n\telif int(interrupt[2]) == 3:\n\t\t\tpolarity = \"0\"\n\t\t\ttrigger = \"IRQ_EDGE_DOUBLE_TRIGGERED\"\n\telif int(interrupt[2]) == 4:\n\t\t\tpolarity = \"IRQ_ACTIVE_HIGH\"\n\t\t\ttrigger = \"IRQ_LEVEL_SENSITIVE\"\n\telif int(interrupt[2]) == 8:\n\t\t\tpolarity = \"IRQ_ACTIVE_LOW\"\n\t\t\ttrigger = \"IRQ_LEVEL_SENSITIVE\"\n\telse:\n\t\tpolarity = \"IRQ_RISING_EDGE\"\n\t\ttrigger = \"IRQ_EDGE_TRIGGERED\"\n\tprint(\"\"\"%(descName)s.intNum = IRQ_MULTIPLEXED;\n%(descName)s.isrFunc = _IRQ_sharedShunt;\n%(descName)s.impSpec.extNum = %(extNum)s;\n%(descName)s.impSpec.polarity = %(polarity)s;\n%(descName)s.impSpec.trigger = %(trigger)s;\"\"\" % {\"descName\": descName, \"extNum\": interrupt[1], \"polarity\": polarity, \"trigger\": trigger})\n\nvariables[\"interruptParent\"][\"mti,gic\"] = decodeGICInterrupt\n","repo_name":"MIPS/meos","sub_path":"walks/drivers/c/mti,gic.py","file_name":"mti,gic.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"5"} +{"seq_id":"27213891836","text":"from random import randint\nfrom time import sleep\nfrom operator import itemgetter\n\n\nlista_jogadores = []\njogador = {}\n\nfor i in range(0, 6):\n jogador['nome'] = 'jogador ' + str(i+1)\n jogador['valor'] = randint(1, 10)\n print(f'O {jogador[\"nome\"]} tirou o valor {jogador[\"valor\"]}')\n lista_jogadores.append(jogador.copy())\n sleep(1)\n\nprint('-=-'*20)\n\nranking = sorted(lista_jogadores, key=itemgetter('valor'), reverse=True)\n\nprint('== RANKING DOS JOGADORES ==')\nfor pos, jogador in enumerate(ranking):\n print(f' {pos+1}º lugar {jogador[\"nome\"]} com {jogador[\"valor\"]}')\n","repo_name":"Lidianacosta/trilha-back-end-python","sub_path":"capacitacao/semana_IIIII/basic/dicionario/091.py","file_name":"091.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14386304034","text":"# -*- coding:utf-8 –*-\nimport os\nimport pandas as pd\nsrc_dir_path=r'D:\\2019plan\\data_销售' \nkey =['US','CA','MX']\nt=key[0]\nprint(t)\ndata_sales=pd.read_excel(r'D:\\2019plan\\sales.xlsx')\nsalescolumns=data_sales.columns.tolist()\nprint(salescolumns)\nfor file in os.listdir(src_dir_path):\n print(file)\n \n if key[0] in file:\n # 执行语句\n print(\"有US\")\n data_csv = pd.read_csv(r'D:\\\\2019plan\\\\data_销售\\\\'+ str(file),encoding='utf-8 ', error_bad_lines=False) # 读取以分\n print(data_csv)\n data_csv.to_excel(r'D:\\2019plan\\sales.xlsx', startrow=1,header=True,index=false)\n elif key[1]in file:\n print(\"有CA\")\n data_csv = pd.read_csv(r'D:\\\\2019plan\\data_销售\\\\'+str(file),encoding='utf-8 ', error_bad_lines=False) # 读取以分\n \n \n #df_data.columns.tolist())\n data_csv.columns=salescolumns\n \n data_csv.to_excel(r'D:\\2019plan\\sales.xlsx', startrow=0,header=True,index=False)\n\n print(data_csv)\n \n elif key[2]in file:\n print(\"有MX\")\n data_csv = pd.read_csv(r'D:\\\\2019plan\\data_销售\\\\'+str(file), encoding='utf-8 ', error_bad_lines=False) # 读取以分\n print(data_csv)\n else:\n print(\"都没有\")\n \n\n\n\n \n # Print\n\n # For i in key\n #if key[i] in file\n # print(key[i])\n \n\n \n","repo_name":"XiaoguangGuo/0-Python--","sub_path":"备份/备份/备份/Amazonplan-CopyFiles-test2.py","file_name":"Amazonplan-CopyFiles-test2.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"8467370794","text":"import math\n\n\nclass ViewSubplot:\n def __init__(self, ax):\n # self.ax = ax\n self.right_lane, = ax.plot(\n [-10, 10, -10, 10], [-10, 150, 150, -10],\n 'bo', lw=3, alpha=0.4)\n self.left_lane, = ax.plot(\n [0], [0], 'go', lw=3, alpha=0.5)\n self.obstacles, = ax.plot(\n [0], [0], 'r.', ms=20, alpha=0.5)\n self.ref_lane, = ax.plot(\n [0], [0], 'k--', lw=3, alpha=0.8)\n self.vehicle = ax.plot(\n [-1.055, 1.055, 1.055, -1.055, -1.055], [0, 0, -4.933, -4.933, 0],\n 'r-', lw=1)\n self.routing, = ax.plot(\n [0], [0], 'r--', lw=3, alpha=0.8)\n\n self.speed_line, = ax.plot([0], [0], 'r-', lw=3, alpha=0.4)\n self.acc_line, = ax.plot([0], [0], 'y-', lw=3, alpha=1)\n\n ax.set_xlim([-10, 10])\n ax.set_ylim([-10, 100])\n ax.relim()\n ax.set_xlabel(\"lat(m)\")\n self.next_lanes = []\n for i in range(8):\n lane, = ax.plot([0], [0], 'b-', lw=3, alpha=0.4)\n self.next_lanes.append(lane)\n\n self.left_lane.set_visible(False)\n self.right_lane.set_visible(False)\n self.ref_lane.set_visible(False)\n\n def show(self, mobileye_data, localization_data, planning_data,\n chassis_data, routing_data):\n self.left_lane.set_visible(True)\n self.right_lane.set_visible(True)\n self.ref_lane.set_visible(True)\n\n mobileye_data.lane_data_lock.acquire()\n self.right_lane.set_xdata(mobileye_data.right_lane_x)\n self.right_lane.set_ydata(mobileye_data.right_lane_y)\n self.left_lane.set_xdata(mobileye_data.left_lane_x)\n self.left_lane.set_ydata(mobileye_data.left_lane_y)\n mobileye_data.lane_data_lock.release()\n\n planning_data.path_lock.acquire()\n self.ref_lane.set_xdata(planning_data.path_x)\n self.ref_lane.set_ydata(planning_data.path_y)\n planning_data.path_lock.release()\n\n if chassis_data.is_auto():\n self.ref_lane.set_color('r')\n else:\n self.ref_lane.set_color('k')\n\n mobileye_data.obstacle_data_lock.acquire()\n self.obstacles.set_ydata(mobileye_data.obstacle_x)\n self.obstacles.set_xdata(mobileye_data.obstacle_y)\n mobileye_data.obstacle_data_lock.release()\n\n mobileye_data.next_lane_data_lock.acquire()\n for i in range(len(mobileye_data.next_lanes_x)):\n if i >= len(self.next_lanes):\n mobileye_data.next_lane_data_lock.release()\n break\n self.next_lanes[i].set_xdata(mobileye_data.next_lanes_x[i])\n self.next_lanes[i].set_ydata(mobileye_data.next_lanes_y[i])\n mobileye_data.next_lane_data_lock.release()\n\n if localization_data.localization_pb is None:\n return\n\n vx = localization_data.localization_pb.pose.position.x\n vy = localization_data.localization_pb.pose.position.y\n\n routing_data.routing_data_lock.acquire()\n path_x = [x - vx for x in routing_data.routing_x]\n path_y = [y - vy for y in routing_data.routing_y]\n routing_data.routing_data_lock.release()\n\n heading = localization_data.localization_pb.pose.heading\n npath_x = []\n npath_y = []\n\n for i in range(len(path_x)):\n x = path_x[i]\n y = path_y[i]\n # newx = x * math.cos(heading) - y * math.sin(heading)\n # newy = y * math.cos(heading) + x * math.sin(heading)\n newx = x * math.cos(- heading + 1.570796) - y * math.sin(\n -heading + 1.570796)\n newy = y * math.cos(- heading + 1.570796) + x * math.sin(\n -heading + 1.570796)\n npath_x.append(newx)\n npath_y.append(newy)\n\n self.routing.set_xdata(npath_x)\n self.routing.set_ydata(npath_y)\n\n speed_x = localization_data.localization_pb.pose.linear_velocity.x\n speed_y = localization_data.localization_pb.pose.linear_velocity.y\n acc_x = localization_data.localization_pb.pose.linear_acceleration.x\n acc_y = localization_data.localization_pb.pose.linear_acceleration.y\n heading = localization_data.localization_pb.pose.heading\n\n new_speed_x = math.cos(-heading + math.pi / 2) * speed_x - math.sin(\n -heading + math.pi / 2) * speed_y\n new_speed_y = math.sin(-heading + math.pi / 2) * speed_x + math.cos(\n -heading + math.pi / 2) * speed_y\n\n new_acc_x = math.cos(-heading + math.pi / 2) * acc_x - math.sin(\n -heading + math.pi / 2) * acc_y\n new_acc_y = math.sin(-heading + math.pi / 2) * acc_x + math.cos(\n -heading + math.pi / 2) * acc_y\n\n # self.speed_line.set_xdata([0, new_speed_x])\n # self.speed_line.set_ydata([0, new_speed_y])\n # self.acc_line.set_xdata([0, new_acc_x])\n # self.acc_line.set_ydata([0, new_acc_y])\n","repo_name":"ApolloAuto/apollo","sub_path":"modules/tools/mobileye_viewer/view_subplot.py","file_name":"view_subplot.py","file_ext":"py","file_size_in_byte":4916,"program_lang":"python","lang":"en","doc_type":"code","stars":23653,"dataset":"github-code","pt":"5"} +{"seq_id":"2623609226","text":"\ntest_patterns = [\n ('indic','operator','indic','fun1'),\n ('indic','operator','number','func1'),\n ('number','operator','indic','func1'),\n ('indic','operator','indic','adverb','operator','indic','func2'),\n ('class','indic','operator','number','adverb','operator','number','func2'),\n ('class','indic','operator','number','adverb','operator','indic','func2'),\n ('indic','operator','number','adverb','operator','indic','func2')\n]\nimport json\n\nclass patterns_decision_tree(object):\n\n def __init__(self, patterns = [], **kw):\n self.patterns = patterns\n self.patterns_tree = {}\n if not self.patterns:\n print(\"patterns need to load, use function: load_patterns\")\n else:\n self.patterns_tree = self.patterns_to_pt()\n \n def load_patterns(self, patterns=[]):\n self.patterns = patterns\n\n def patterns_to_pt(self):\n if not self.patterns :\n return {}\n pt = {}\n depth = 0\n max_depth = 0\n for pattern in self.patterns:\n max_depth = len(pattern) if len(pattern) > max_depth else max_depth \n print(\"max depth : %d\" % max_depth)\n for depth in range(max_depth):\n for pattern in self.patterns:\n if len(pattern) < depth:\n #路径已经走完,不再处理\n continue \n else:\n #路径还未走完,所以添加一个路径到原来的通路上\n exec( \"pt\" + \"['\" + \"']['\".join(pattern[:depth + 1]) + \"'] = {}\")\n return pt\n\n def save_pt_to_file(self, file):\n with open(file, mode='w') as f:\n f.write(json.dumps(self.patterns_tree))\n \n def load_pt_from_file(self, file):\n with open(file) as f:\n self.patterns_tree = json.loads(f.readline())\n\n def isNode(self, tree, key):\n if key in tree and tree[key]:\n return True\n return False\n\n def isLeaf(self, tree, key):\n if key in tree and (not tree[key]):\n return True\n return False\n\n def find_patterns(self, types, params, pt):\n types_ = types\n types_.append('eod')\n patterns = []\n cur = pt\n st = 0\n for i in range(len(types_)):\n if self.isNode(cur, types_[i]) and i != len(types_):\n #如果包含子树并且没有到句尾,先进入子树,满足最大长度搜索\n cur = cur[types_[i]]\n else:\n #如果不包子树或者到句尾,则在当前树中找到执行方法\n for key in cur.keys():\n if self.isLeaf(cur, key):\n patterns.append((key, params[st:i] ))\n st = i + 1\n cur = pt \n return patterns\n\n","repo_name":"kanonlemon/tools","sub_path":"functions/patterns.py","file_name":"patterns.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"26609588904","text":"import asyncio\r\nimport os\r\nimport time\r\nimport requests\r\nimport aiohttp\r\nfrom pyrogram import filters\r\nfrom pyrogram import Client\r\nfrom pyrogram.types import Message, InlineKeyboardMarkup, InlineKeyboardButton, ReplyKeyboardMarkup\r\nfrom strings.filters import command\r\nfrom AnonX import (Apple, Resso, SoundCloud, Spotify, Telegram, YouTube, app)\r\nfrom AnonX import app\r\nfrom asyncio import gather\r\nfrom pyrogram.errors import FloodWait\r\n\r\n\r\n\r\n\r\n@app.on_message(command([\"المالك\", \"صاحب الخرابه\", \"المنشي\"]) & filters.group)\r\nasync def gak_owne(client: Client, message: Message):\r\n if len(message.command) >= 2:\r\n return \r\n else:\r\n chat_id = message.chat.id\r\n f = \"administrators\"\r\n async for member in client.iter_chat_members(chat_id, filter=f):\r\n if member.status == \"creator\":\r\n id = member.user.id\r\n key = InlineKeyboardMarkup([[InlineKeyboardButton(member.user.first_name, user_id=id)]])\r\n m = await client.get_chat(id)\r\n if m.photo:\r\n photo = await app.download_media(m.photo.big_file_id)\r\n return await message.reply_photo(photo, caption=f\"🧞‍♂️ ¦𝙽𝙰𝙼𝙴 :{m.first_name}\\n🎯 ¦𝚄𝚂𝙴𝚁 :@{m.username}\\n🎃 ¦𝙸𝙳 :`{m.id}`\\n💌 ¦𝙱𝙸𝙾 :{m.bio}\\n✨ ¦𝙲𝙷𝙰𝚃: {message.chat.title}\\n♻️ ¦𝙸𝙳.𝙲𝙷𝙰𝚃 :`{message.chat.id}`\",reply_markup=key)\r\n else:\r\n return await message.reply(\"• \" + member.user.mention)\r\n \r\n \r\n \r\n\r\n \r\n@app.on_message(command([\"اسمي\", \"اسمي اي\"]) & filters.group )\r\nasync def vgdg(client: Client, message: Message):\r\n await message.reply_text(\r\n f\"\"\"❤️‍🔥 اسمك »» {message.from_user.mention()}\"\"\") \r\n\r\n \r\n\r\narray = []\r\n@app.on_message(command([\"@all\", \"تاك\",\"تاك للكل\"]) & ~filters.private)\r\nasync def nummmm(client: app, message):\r\n if message.chat.id in array:\r\n return await message.reply_text(\"**التاك قيد التشغيل حالياً ،**\")\r\n chek = await client.get_chat_member(message.chat.id, message.from_user.id)\r\n if not chek.status in [\"administrator\", \"creator\"]:\r\n await message.reply(\"**يجب انت تكون مشرف لاستخدام الامر 🖱️**\")\r\n return\r\n await message.reply_text(\"**جاري بدأ المنشن ، لايقاف الامر اضغط **\\n /cancel او اكتب بس منشن\")\r\n i = 0\r\n txt = \"\"\r\n zz = message.text\r\n if message.photo:\r\n photo_id = message.photo.file_id\r\n photo = await client.download_media(photo_id)\r\n zz = message.caption\r\n try:\r\n zz = zz.replace(\"@all\",\"\").replace(\"تاك\",\"\").replace(\"نادي الكل\",\"\")\r\n except:\r\n pass\r\n array.append(message.chat.id)\r\n async for x in client.iter_chat_members(message.chat.id):\r\n if message.chat.id not in array:\r\n return\r\n if not x.user.is_deleted:\r\n i += 1\r\n txt += f\" {x.user.mention} ،\"\r\n if i == 5:\r\n try:\r\n if not message.photo:\r\n await client.send_message(message.chat.id, f\"{zz}\\n{txt}\")\r\n else:\r\n await client.send_photo(message.chat.id, photo=photo, caption=f\"{zz}\\n{txt}\")\r\n i = 0\r\n txt = \"\"\r\n await asyncio.sleep(2)\r\n except FloodWait as e:\r\n flood_time = int(e.x)\r\n if flood_time > 200:\r\n continue\r\n await asyncio.sleep(flood_time)\r\n except Exception:\r\n array.remove(message.chat.id)\r\n array.remove(message.chat.id)\r\n\r\n\r\n@app.on_message(command([\"بس المنشن\", \"/cancel\",\"بس منشن\"]))\r\nasync def stop(client, message):\r\n chek = await client.get_chat_member(message.chat.id, message.from_user.id)\r\n if not chek.status in [\"administrator\", \"creator\"]:\r\n await message.reply(\"**يجب انت تكون مشرف لاستخدام الامر 🖱️\")\r\n return\r\n if message.chat.id not in array:\r\n await message.reply(\"**المنشن متوقف بالفعل**\")\r\n return \r\n if message.chat.id in array:\r\n array.remove(message.chat.id)\r\n await message.reply(\"**تم ايقاف المنشن بنجاح✅**\")\r\n return\r\n\r\n\r\n\r\n\r\n","repo_name":"Tompriv/Tom","sub_path":"AnonX/plugins/تاك.py","file_name":"تاك.py","file_ext":"py","file_size_in_byte":4412,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"35868746443","text":"import time\nimport busio\nimport digitalio\nimport board\n\n# Import LoRa Library\nimport adafruit_rfm9x\n\n# Import BME280 Sensor Library\nimport adafruit_bme280\n\n# Device ID\nFEATHER_ID = 0x01\n\n# Delay between sending radio data, in minutes.\nSENSOR_SEND_DELAY = 1\n\n# Create library object using our Bus I2C port\ni2c = busio.I2C(board.SCL, board.SDA)\nbme280 = adafruit_bme280.Adafruit_BME280_I2C(i2c)\n\n# Define radio frequency, MUST match gateway frequency.\nRADIO_FREQ_MHZ = 905.5\n\n# Define pins connected to the chip, use these if wiring up the breakout according to the guide:\n# pylint: disable=c-extension-no-member\nCS = digitalio.DigitalInOut(board.RFM9X_CS)\n# pylint: disable=c-extension-no-member\nRESET = digitalio.DigitalInOut(board.RFM9X_RST)\n\n# Define the onboard LED\nLED = digitalio.DigitalInOut(board.D13)\nLED.direction = digitalio.Direction.OUTPUT\n\n# Initialize SPI bus.\nspi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)\n\n# Initialze RFM radio\nrfm9x = adafruit_rfm9x.RFM9x(spi, CS, RESET, RADIO_FREQ_MHZ)\n\n# Set transmit power to max\nrfm9x.tx_power = 23\n\n# sensor data\nbme280_data = bytearray(8)\n\nwhile True:\n # Get sensor readings\n temp_val = int(bme280.temperature * 100)\n print(\"\\nTemperature: %0.1f C\" % bme280.temperature)\n humid_val = int(bme280.humidity * 100)\n print(\"Humidity: %0.1f %%\" % bme280.humidity)\n pres_val = int(bme280.pressure * 100)\n print(\"Pressure: %0.1f hPa\" % bme280.pressure)\n\n # Build packet with float data and headers\n\n # packet header with feather node ID\n bme280_data[0] = FEATHER_ID\n # Temperature data\n bme280_data[1] = (temp_val >> 8) & 0xff\n bme280_data[2] = temp_val & 0xff\n\n # Humid data\n bme280_data[3] = (humid_val >> 8) & 0xff\n bme280_data[4] = humid_val & 0xff\n\n # Pressure data\n bme280_data[5] = (pres_val >> 16) & 0xff\n bme280_data[6] = (pres_val >> 8) & 0xff\n bme280_data[7] = pres_val & 0xff\n\n # Convert bytearray to bytes\n bme280_data_bytes = bytes(bme280_data)\n # Send the packet data\n print('Sending data...')\n LED.value = True\n rfm9x.send(bme280_data_bytes)\n print('Sent data!')\n LED.value = False\n\n # Wait to send the packet again\n time.sleep(SENSOR_SEND_DELAY * 60)\n","repo_name":"adafruit/Adafruit_Learning_System_Guides","sub_path":"lorawan_sensing_network/lora_device/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":913,"dataset":"github-code","pt":"5"} +{"seq_id":"37036703997","text":"import torch\r\nfrom torch import nn\r\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\r\n\r\n\r\n\r\n\r\n# Encoder NN to predict encoding for source tree.\r\n# Referenced: https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html\r\n# https://www.kaggle.com/kanncaa1/recurrent-neural-network-with-pytorch\r\n# https://towardsdatascience.com/attention-seq2seq-with-pytorch-learning-to-invert-a-sequence-34faf4133e53\r\n# https://github.com/IBM/pytorch-seq2seq/\r\n# https://blog.floydhub.com/gru-with-pytorch/ \r\n#\t https://discuss.pytorch.org/t/runtimeerror-input-must-have-3-dimensions-got-2/36974/8\r\n# https://pytorch.org/docs/stable/nn.html\r\n# https://medium.com/@Petuum/embeddings-a-matrix-of-meaning-4de877c9aa27\r\n#\t https://discuss.pytorch.org/t/runtimeerror-expected-object-of-scalar-type-long-but-got-scalar-type-float-for-argument-2-mat2/49849/2\r\nclass EncoderModel(nn.Module):\r\n def __init__(self, dim_input, dim_hidden, dim_output, layer_count=0, dropout_rate=0.0):\r\n super(EncoderModel, self).__init__()\r\n\r\n self.dim_input = dim_input\r\n self.dim_hidden = dim_hidden\r\n self.dim_output = dim_output\r\n self.layer_count = layer_count\r\n self.dropout_rate = dropout_rate\r\n\r\n self.embedding = nn.Embedding(dim_output, dim_input)\t\t\t\t # Produces matrix of shape output_dim,output_dim\r\n self.gru = nn.GRU(dim_input, dim_hidden, batch_first=True, dropout=dropout_rate) # Kind of RNN, akin to LSTM\r\n self.fc = nn.Linear(dim_hidden, dim_output)\r\n self.relu = nn.ReLU()\r\n\r\n # Moves the RNN forward to the next iter.\r\n def forward(self, input_vector, hidden_vector, shape):\r\n #embedded = self.embedding(input_vector).view(1,self.dim_input,self.dim_input)\r\n embedded = input_vector.view(1,1,self.dim_input)\r\n embedded = embedded.float()\t\t \r\n print(\"\\t\\t\\tEmbedded matrix and shape:\", embedded, embedded.size())\r\n #embedded = pack_padded_sequence(embedded, shape, enforce_sorted=False, batch_first=True)\r\n output_vector, hidden_vector = self.gru(embedded, hidden_vector)\r\n #output_vector, _ = pad_packed_sequence(output_vector, batch_first=True)\r\n output_vector = self.fc(self.relu(output_vector))\r\n return output_vector, hidden_vector\r\n \r\n def initialize_hidden(self):\r\n return torch.zeros(1, 1, self.dim_hidden) # 1, batch_size, hidden_size\r\n\r\n\r\n","repo_name":"ElleChan/Program_Translator","sub_path":"encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24183313628","text":"import hexchat\nimport re\n\n__module_name__ = 'External Bridge Adaptor'\n__module_version__ = '2.0'\n__module_description__ = 'Adapt Hexchat UI for external bridges'\n__author__ = 'Stockage'\n\nCONF_PREFIX = \"extbridge_\"\n\nclass Bridge:\n def __init__(self, bot_channel, bot_server, bot_nick, quit_message=\"Disconnected from discord\", nick_prefix=\"\", re_msg_format=\"^<([^>]+)> (.*)$\", re_cmd_format=\"^Cmd by (.+)$\"):\n self.bot_channel = bot_channel\n self.bot_server = bot_server\n self.bot_nick = bot_nick\n self.quit_message = quit_message\n self.nick_prefix = nick_prefix\n self.re_msg_format = re_msg_format\n self.re_cmd_format = re_cmd_format\n self.cmd_nick = None\n \n def __str__(self):\n bstr = \"\"\n for attr in [\"bot_channel\", \"bot_server\", \"bot_nick\", \"quit_message\", \"nick_prefix\", \"re_msg_format\", \"re_cmd_format\"]:\n bstr += \"\\00305{}\\00308 = \\017{}\\n\".format(attr, getattr(self, attr))\n return bstr\n \n def IsBridgeMessage(self, context, nick):\n if self.bot_server.startswith(\"*\"):\n correct_serv = context.get_info(\"server\").lower().endswith(self.bot_server.lower()[1:])\n else:\n correct_serv = context.get_info(\"server\").lower() == self.bot_server.lower()\n return correct_serv and context.get_info(\"channel\").lower() == self.bot_channel.lower() and hexchat.nickcmp(hexchat.strip(nick), self.bot_nick) == 0\n \n\ndef InitPref(name, default_value):\n value = hexchat.get_pluginpref(CONF_PREFIX + name)\n if (value is None):\n value = default_value\n hexchat.set_pluginpref(CONF_PREFIX + name, value)\n return value\n\ndef LoadPrefs():\n bridge_count = InitPref(\"bridge_count\", 0)\n for i in range(bridge_count):\n bot_channel = hexchat.get_pluginpref(\"{}bridge{}_bot_channel\".format(CONF_PREFIX, i))\n bot_server = hexchat.get_pluginpref(\"{}bridge{}_bot_server\".format(CONF_PREFIX, i))\n bot_nick = hexchat.get_pluginpref(\"{}bridge{}_bot_nick\".format(CONF_PREFIX, i))\n quit_message = hexchat.get_pluginpref(\"{}bridge{}_quit_message\".format(CONF_PREFIX, i))\n nick_prefix = hexchat.get_pluginpref(\"{}bridge{}_nick_prefix\".format(CONF_PREFIX, i))\n re_msg_format = hexchat.get_pluginpref(\"{}bridge{}_re_msg_format\".format(CONF_PREFIX, i))\n re_cmd_format = hexchat.get_pluginpref(\"{}bridge{}_re_cmd_format\".format(CONF_PREFIX, i))\n bridge_list.append(Bridge(bot_channel, bot_server, bot_nick, quit_message=quit_message, nick_prefix=nick_prefix, re_msg_format=re_msg_format, re_cmd_format=re_cmd_format))\n\ndef SavePref():\n bridge_count = hexchat.get_pluginpref(CONF_PREFIX + \"bridge_count\")\n for i in range(bridge_count):\n hexchat.del_pluginpref(\"{}bridge{}_bot_channel\".format(CONF_PREFIX, i))\n hexchat.del_pluginpref(\"{}bridge{}_bot_server\".format(CONF_PREFIX, i))\n hexchat.del_pluginpref(\"{}bridge{}_bot_nick\".format(CONF_PREFIX, i))\n hexchat.del_pluginpref(\"{}bridge{}_quit_message\".format(CONF_PREFIX, i))\n hexchat.del_pluginpref(\"{}bridge{}_nick_prefix\".format(CONF_PREFIX, i))\n hexchat.del_pluginpref(\"{}bridge{}_re_msg_format\".format(CONF_PREFIX, i))\n hexchat.del_pluginpref(\"{}bridge{}_re_cmd_format\".format(CONF_PREFIX, i))\n hexchat.set_pluginpref(CONF_PREFIX + \"bridge_count\", len(bridge_list))\n for bridge_index in range(len(bridge_list)):\n bridge = bridge_list[bridge_index]\n hexchat.set_pluginpref(\"{}bridge{}_bot_channel\".format(CONF_PREFIX, bridge_index), bridge.bot_channel)\n hexchat.set_pluginpref(\"{}bridge{}_bot_server\".format(CONF_PREFIX, bridge_index), bridge.bot_server)\n hexchat.set_pluginpref(\"{}bridge{}_bot_nick\".format(CONF_PREFIX, bridge_index), bridge.bot_nick)\n hexchat.set_pluginpref(\"{}bridge{}_quit_message\".format(CONF_PREFIX, bridge_index), bridge.quit_message)\n hexchat.set_pluginpref(\"{}bridge{}_nick_prefix\".format(CONF_PREFIX, bridge_index), bridge.nick_prefix)\n hexchat.set_pluginpref(\"{}bridge{}_re_msg_format\".format(CONF_PREFIX, bridge_index), bridge.re_msg_format)\n hexchat.set_pluginpref(\"{}bridge{}_re_cmd_format\".format(CONF_PREFIX, bridge_index), bridge.re_cmd_format)\n\nbridge_list = []\nLoadPrefs()\n\ndef EmitMsg(context, nick, message, mode, nick_prefix):\n same_user = (hexchat.nickcmp(hexchat.get_info(\"nick\").lower(), nick.lower()) == 0)\n hilight = (not same_user and hexchat.get_info(\"nick\").lower() in message.lower())\n if same_user:\n context.emit_print(\"Your Message\", nick, message, mode, \"\\00306\" + nick_prefix + \"\\00304\")\n elif hilight:\n context.emit_print(\"Channel Msg Hilight\", nick, message, mode, \"\\00306\" + nick_prefix + \"\\00303\")\n else:\n context.emit_print(\"Channel Message\", nick, message, mode, \"\\00306\" + nick_prefix + \"\\00302\")\n\ndef msg_cmd(word, word_eol, userdata):\n context = hexchat.get_context()\n for bridge in bridge_list:\n # Current server/channel\n if bridge.IsBridgeMessage(context, word[0]):\n # Classic message\n if len(re.findall(bridge.re_msg_format, word[1])) > 0:\n nick, message = re.findall(bridge.re_msg_format, word[1])[0]\n nick = hexchat.strip(nick)\n EmitMsg(context, nick, message, word[2] if len(word) > 2 else \"\", bridge.nick_prefix)\n return hexchat.EAT_HEXCHAT\n # Command (part. 1)\n elif len(re.findall(bridge.re_cmd_format, word[1])) > 0:\n bridge.cmd_nick = hexchat.strip(re.findall(bridge.re_cmd_format, word[1])[0])\n return hexchat.EAT_HEXCHAT\n # Command (part. 2)\n elif bridge.cmd_nick != None and word[1][0] == \"!\":\n EmitMsg(context, bridge.cmd_nick, word[1], word[2] if len(word) > 2 else \"\", bridge.nick_prefix)\n bridge.cmd_nick = None\n return hexchat.EAT_HEXCHAT\n return hexchat.EAT_NONE\n\ndef extbridge_cmd(word, word_eol, userdata):\n if len(word) < 2:\n hexchat.command(\"HELP EXTBRIDGE\")\n elif word[1].upper() == \"BRIDGE\":\n if len(word) < 3:\n print(\"/EXTBRIDGE BRIDGE list|add |show |set |del \")\n elif word[2].lower() == \"list\":\n bridge_count = hexchat.get_pluginpref(CONF_PREFIX + \"bridge_count\")\n bridge_list_params = []\n params_lens = [1, 6, 7]\n for i in range(bridge_count):\n bot_server = hexchat.get_pluginpref(\"{}bridge{}_bot_server\".format(CONF_PREFIX, i))\n bot_channel = hexchat.get_pluginpref(\"{}bridge{}_bot_channel\".format(CONF_PREFIX, i))\n params_lens[0] = max(params_lens[0], len(\"{}\".format(i)) + 2)\n params_lens[1] = max(params_lens[1], len(bot_server) + 2)\n params_lens[2] = max(params_lens[2], len(bot_channel) + 2)\n bridge_list_params.append((bot_server, bot_channel))\n print((\"| {0:<\" + str(params_lens[0]) + \"} | {1:<\" + str(params_lens[1]) + \"} | {2:<\" + str(params_lens[2]) + \"} |\").format(\"#\", \"server\", \"channel\"))\n print(\"|-\" + \"-\"*params_lens[0] + \"-|-\" + \"-\"*params_lens[1] + \"-|-\" + \"-\"*params_lens[2] + \"-|\")\n for i in range(bridge_count):\n print((\"| {0:<\" + str(params_lens[0]) + \"} | {1:<\" + str(params_lens[1]) + \"} | {2:<\" + str(params_lens[2]) + \"} |\").format(i, bridge_list_params[i][0], bridge_list_params[i][1]))\n elif word[2].lower() == \"add\":\n if len(word) < 6:\n context = hexchat.get_context()\n hexchat.command(\"SETTEXT /EXTBRIDGE BRIDGE add {} {} \".format(context.get_info(\"channel\"), context.get_info(\"server\")))\n hexchat.command(\"SETCURSOR {}\".format(24 + len(context.get_info(\"channel\")) + len(context.get_info(\"server\"))))\n else:\n bridge = Bridge(word[3], word[4], word[5])\n bridge_list.append(bridge)\n SavePref()\n print(\"Bridge for \\00307{}\\017 on \\00307{}\\017 has been set.\".format(bridge.bot_channel, bridge.bot_server))\n elif word[2].lower() == \"show\":\n if len(word) < 4:\n hexchat.command(\"SETTEXT /EXTBRIDGE BRIDGE show \")\n hexchat.command(\"SETCURSOR 23\")\n else:\n try:\n print(bridge_list[int(word[3])])\n except:\n print(\"Bad index value.\")\n elif word[2].lower() == \"set\":\n if len(word) < 6:\n hexchat.command(\"SETTEXT /EXTBRIDGE BRIDGE set \")\n hexchat.command(\"SETCURSOR 22\")\n else:\n try:\n setattr(bridge_list[int(word[3])], word[4], word_eol[5])\n SavePref()\n print(\"Parameter \\00307{}\\017 for bridge \\00307N°{}\\017 has been changed to \\00307{}\\017.\".format(word[4], word[3], word_eol[5]))\n except:\n print(\"Bad index value.\")\n elif word[2].lower() == \"del\":\n if len(word) < 4:\n hexchat.command(\"SETTEXT /EXTBRIDGE BRIDGE del \")\n hexchat.command(\"SETCURSOR 22\")\n else:\n try:\n del bridge_list[int(word[3])]\n SavePref()\n print(\"Bridge deleted.\")\n except:\n print(\"Bad index value.\")\n elif word[1].upper() == \"CONF\":\n if len(word) < 3:\n print(\"/EXTBRIDGE CONF show|get |set \")\n elif word[2].lower() == \"show\":\n print(\"| {0:<20} | {1:<40} |\".format(\"name\", \"value\"))\n print(\"|-\" + \"-\"*20 + \"-|-\" + \"-\"*40 + \"-|\")\n for name in hexchat.list_pluginpref():\n if name[:len(CONF_PREFIX)] == CONF_PREFIX:\n print(\"| {0:<20} | {1:<40} |\".format(name[len(CONF_PREFIX):], hexchat.get_pluginpref(name)))\n elif word[2].lower() == \"get\":\n if len(word) < 4:\n print(\"/EXTBRIDGE CONF get \")\n else:\n value = hexchat.get_pluginpref(CONF_PREFIX + word[3])\n if value is None:\n print(\"This configuration key doesn't exists.\")\n else:\n print(\"{} : {}\".format(word[3], value))\n elif word[2].lower() == \"set\":\n if len(word) < 5:\n print(\"/EXTBRIDGE CONF set \")\n else:\n value = hexchat.get_pluginpref(CONF_PREFIX + word[3])\n if value is None:\n print(\"This configuration key doesn't exists.\")\n else:\n new_value = hexchat.strip(word_eol[4])\n hexchat.set_pluginpref(CONF_PREFIX + word[3], new_value)\n LoadPrefs()\n print(\"\\00307{}\\017 has been set to \\00307{}\\017\".format(word[3], new_value))\n else:\n print(\"Unknown action {} for /EXTBRIDGE CONF\".format(word[2]))\n elif word[1].upper() == \"RELOAD\":\n hexchat.command(\"SETTEXT /PY RELOAD \\\"{}\\\"\".format(__module_name__))\n else:\n print(\"Unknown option {} for /EXTBRIDGE\".format(word[1]))\n return hexchat.EAT_ALL\n\ndef unload(userdata):\n for hook in hooks:\n hexchat.unhook(hook)\n\nhooks = [\n hexchat.hook_print(\"Channel Message\", msg_cmd),\n hexchat.hook_print(\"Channel Msg Hilight\", msg_cmd),\n hexchat.hook_command('EXTBRIDGE', extbridge_cmd, help=\"/EXTBRIDGE BRIDGE|CONF|RELOAD\")\n]\n\nhexchat.hook_unload(unload)\n\nprint(\"\\00307{} v{}\\017 : Connected with \\00306Discord !\".format(__module_name__, __module_version__))","repo_name":"Shlygly/External-Bridge-Adaptor","sub_path":"extBridge.py","file_name":"extBridge.py","file_ext":"py","file_size_in_byte":11783,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"5"} +{"seq_id":"22784139808","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\na = np.arange(0,10)\nb = 2*a*np.sin(a)\nplt.plot ( a, b, 'ro--') #--> Basic plot, Can set Line style 'ro--'\n#plt.fill_between(x,y,color='blue', alpha=0.2) --> fill teh gradient color\n#fig, axes = plt.subplots(nrows=4, ncols=4)# plt.subplots(3,4) creates 3 rows & 4 columns grid\n#fig, axes = plt.subplots() # returns Figure & Axes\n\nfig = plt.figure()\nnames = ['A', 'B', 'C']\nvalues = [19, 50, 29]\nvalues_2 = [48, 19, 41]\nplt.axes(0,0,1,1)\nax = fig.add_subplot(121)#1 row 2 column. subplot on position 1\nax2 = fig.add_subplot(122)#1 row 2 column. subplot on position 2\n #add_subplot(232) means 2 rows 3 columns. subplot on position 2\n \nfig.suptitle('Plot Title')# set fugure level title\nax.set_title('Axis1 Title')# set Axis title\nax.set_xlabel('x_label')# set xlabel\nax.set_ylabel('y_label')# set ylabel\nax.set_xticks([0,1,2])# Set X ticks\nax.set_xticklabels(['zero','One','Two']) # Set X ticks lables\nax.set_yticks([-1,0,1])# Set y ticks\n\nfig.legend(['Data'], loc=\"upper right\") # set Legend\n\nax.bar(names, values,color='goldenrod') # chnages color\nax2.bar(names, values_2)\nfig.tight_layout()\n#plt.subplots_adjust(0.25, 0.35, 0.90, 0.8) # chnages layout\n\nvalues = [15, 35, 5, 45]\nlabels = 'Oranges', 'Apples', 'Pears', 'Strawberries'\ncolors = {'r', 'g', 'b', 'r'}\nexplode = [0, 0, 0.2, 0]\nplt.pie(values, labels=labels, colors=colors, explode=explode)# explode a piece of pie\n\n\n#Histogram -- 3 different types plot\n #barstacked\n #step\n #stepfilled\nplt.hist(values, histtype='step') #histtype='stepfilled' ,Default Bar\nplt.plot(values, color='r', linewitdh=2.0)# chnages Line color and line width\n\n\n#Scatter Plot. relationship between a categorical and a continuous variable\nplt.scatter(x=values, y=values_2, color=\"darkslategrey\", edgecolors=\"white\", linewidths=0.1, alpha=0.7);\n\n#3D Plots. o import the Axes3D module from mpl_tookits.mplot3d.\n# for 3d plot add X, Y and Z values\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfile = pd.read_csv(\"vgsales.csv\")\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nx = file['NA_Sales']\ny = file['EU_Sales']\nz = file['Other_Sales']\nax.scatter(x, y, z, c='r', s=20)\nplt.xticks(rotation=60)\nplt.show()\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\nplt.show()\n","repo_name":"jogushravan/AI_Main","sub_path":"Visualization/Matplotlib.py","file_name":"Matplotlib.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37884115867","text":"from food.models import Ingredient\n\n\ndef check_fields(request, fields):\n response_message = {}\n for field in fields:\n if field not in request.data or not request.data[field]:\n response_message[field] = f'Проверьте поле {field}'\n if response_message:\n return response_message\n return None\n\n\ndef create_ingredients(obj, ingredients):\n objects = []\n for ingredient in ingredients:\n objects.append(Ingredient(\n product_id=int(ingredient['id']),\n amount=int(ingredient['amount']),\n recipe_id=obj.id\n ))\n Ingredient.objects.bulk_create(objects)\n","repo_name":"HellfastUSMC/foodgram-project-react","sub_path":"backend/api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"40034908057","text":"# -*- coding: utf-8 -*-\n'''\n面向对象三大特性:封装,继承,多态\n'''\n\n#封装\n#使用构造方法将内容封装到对象中,然后通过对象直接或者self间接获取被封装的内容\nclass people:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def attr(self):\n print(self.name)\n print(self.age)\n\n\np1 = people('xixi', 21)\np2 = people('haha', 23)\nprint(p1.name, p2.name)\np1.attr()\np2.attr()\n\n\n#继承\n#多个类共有的方法提取到父类中,子类仅需继承父类而不必一以实现每个方法\n#子类subclass,父类superclass,也叫派生类,基类\n\n#多继承\n#pythn的类可以继承多个类,java,c#中只能继承一个类\n#python的类如果继承了多个类, 那么其寻在方法有两种,深度优先,广度优先\n#当类是经典类时,多继承情况下,会按照深度优先方式查找\n#当类是新式类(object)时,多继承情况下,会按照广度优先方式查找\n\n\n#多态\n#python不支持java,c#这一类强类型语言中多态的写法,但是原生多态,其python崇尚“鸭子类型”\n#“当看到一只鸟走起来像鸭子,游泳起来像鸭子,叫起来像鸭子,那么这只鸟可以被称为鸭子。”\n#我们并不关心对象是什么类型,到底是不是鸭子,只关心行为\n##鸭子类型在动态语言中经常使用,非常灵活,使得python不像java那样专门去弄一大堆的设计模式\n\nclass F1:\n pass\n\nclass S1(F1):\n def show(self):\n print(S1.show)\n\nclass S2(F1):\n def show(self):\n print(S2.show)\n\ndef Func(obj):\n print(obj.show())\n\ns1_obj = S1()\nFunc(s1_obj)\ns2_obj = S2()\nFunc(s2_obj)","repo_name":"nineep/cultivate-py","sub_path":"OOP/面向对象.py","file_name":"面向对象.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38113658953","text":"import datetime\nfrom src.log.log_mongo import log_mongo\nimport time\nfrom functools import wraps\nimport logging\n\n\ndef logging_to_mongo_feature(func):\n\n @wraps(func)\n def func_wrapper(*args, **kwargs):\n start = time.time()\n r = func(*args, **kwargs)\n time_elampsed = str(datetime.timedelta(seconds=time.time()-start))\n\n if r is None:\n outcome = False\n else:\n outcome = True\n\n to_save = {\n \"args\": args,\n \"kwargs\": kwargs,\n \"result\": r,\n \"outcome\": outcome,\n \"time_elampsed\": time_elampsed,\n \"timestamp\": datetime.datetime.now()\n }\n\n logging.getLogger(\"root.features.timing\").info(\n f\"Feature {func.__name__} took {time_elampsed}s.\")\n log_mongo(func.__name__, to_save, db_name=\"log_feature\")\n\n return r\n\n return func_wrapper\n","repo_name":"GiovanniGabbolini/ipsim","sub_path":"src/features/decorator_logging_to_mongo_feature.py","file_name":"decorator_logging_to_mongo_feature.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"32721180132","text":"#Anagrams\r\nfrom collections import Counter\r\nfor _ in range(int(input())):\r\n a = input()\r\n b = input()\r\n A = Counter(a)\r\n B = Counter(b)\r\n e = A & B\r\n d = e.values()\r\n g = sum(d)\r\n print(len(a)+len(b)-2*(g))\r\n","repo_name":"roshan13ghimire/Competitive_Programming","sub_path":"HackerEarth/Anagrams.py","file_name":"Anagrams.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"10652984850","text":"################################################################################\n\"\"\"\nDJ JOE Website Playlist File Generator\n--------------------------------------\n\n(c) 2021 - Stanley Solutions - Joe Stanley\n\nThis application serves an interface to allow the recording of Apple Music or\nSpotify playlists.\n\"\"\"\n################################################################################\n\n# Requirements\nimport os\nimport logging\nfrom urllib.parse import urlparse\n\nfrom fastapi import FastAPI, Request, Form\nfrom fastapi.responses import HTMLResponse, RedirectResponse\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.templating import Jinja2Templates\n\nimport spotify_client\nimport apple_music_client\nfrom html_formatter import playlist_html_table\n\n\nBACKGROUND_VAR = \"BACKGROUND_URL\"\n\nlogger = logging.getLogger(\"uvicorn\")\n\n\n# Application Base\napp = FastAPI()\n\n# Mount the Static File Path\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\ntemplates = Jinja2Templates(directory=\"templates\")\n\ndef page(request: Request, url: str = None):\n \"\"\"Generate the HTML Page Content Using any Provided Playlist URL\"\"\"\n data = \"\"\n if url is not None:\n # \"Switch\" On Domain Name\n domain = urlparse(url).netloc\n if 'music.apple' in domain:\n client = apple_music_client.ApplePlaylister(url)\n elif 'spotify' in domain:\n client = spotify_client.SpotifyPlaylister(url)\n playlist, tracks = client()\n data = playlist_html_table(\n playlist=playlist,\n tracks=tracks,\n )\n # Return Template Response Using Data\n return templates.TemplateResponse(\n \"index.html\",\n {\n \"request\": request,\n \"playlist_table\": data,\n \"background_image\": os.getenv(\n BACKGROUND_VAR, \"/static/stanley-solutions.jpg\"\n ),\n },\n )\n\n# Main Application Response\n@app.get(\"/\", response_class=HTMLResponse)\nasync def root(request: Request):\n \"\"\"Base Application Page.\"\"\"\n return page(request=request)\n\n# Redirect for Playlist Endpoint\n@app.get(\"/load_playlist\")\nasync def load_playlist_redirect():\n \"\"\"Redirect to the Basic Page.\"\"\"\n return RedirectResponse(\"/\")\n\n# Load Playlist\n@app.post(\"/load_playlist\", response_class=HTMLResponse)\nasync def load_playlist(request: Request, playlist: str = Form(...)):\n \"\"\"Get the Playlist Information.\"\"\"\n logger.debug(playlist)\n return page(request=request, url=playlist)\n\n# END\n","repo_name":"engineerjoe440/djjoeplaylister","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"43950060396","text":"import re\n\nimport httpx\n\nfrom paimon import Message, paimon\n\n\n@paimon.on_cmd(\n \"neko\",\n about={\n \"header\": \"Get stuff from nekos.life\",\n \"usage\": \"{tr}neko\",\n },\n)\nasync def random_neko(message: Message):\n while True:\n await message.delete()\n reply = message.reply_to_message\n reply_id = reply.message_id if reply else None\n try:\n async with httpx.AsyncClient() as client:\n r = await client.get(\"https://nekos.life/\")\n midia = re.findall(\n r\"\", r.text\n )[0]\n return await message.client.send_photo(\n chat_id=message.chat.id, photo=midia, reply_to_message_id=reply_id\n )\n except BaseException:\n pass\n\n\n@paimon.on_cmd(\n \"cat\",\n about={\n \"header\": \"get kittens\",\n \"usage\": \"{tr}cat\",\n },\n)\nasync def random_cat(message: Message):\n async with httpx.AsyncClient() as client:\n reply = message.reply_to_message\n reply_id = reply.message_id if reply else None\n r = await client.get(\"https://api.thecatapi.com/v1/images/search\")\n if not r.status_code == 200:\n return await message.edit(f\"Error! {r.status_code}\")\n cat = r.json\n await message.delete()\n await message.client.send_photo(\n chat_id=message.chat.id, photo=(cat()[0][\"url\"]), reply_to_message_id=reply_id\n )\n\n\n@paimon.on_cmd(\n \"dog\",\n about={\n \"header\": \"get puppies\",\n \"usage\": \"{tr}dog\",\n },\n)\nasync def random_dog(message: Message):\n async with httpx.AsyncClient() as client:\n reply = message.reply_to_message\n reply_id = reply.message_id if reply else None\n r = await client.get(\"https://api.thedogapi.com/v1/images/search\")\n if not r.status_code == 200:\n return await message.edit(f\"Error! {r.status_code}\")\n dog = r.json\n await message.delete()\n await message.client.send_photo(\n chat_id=message.chat.id, photo=(dog()[0][\"url\"]), reply_to_message_id=reply_id\n )\n","repo_name":"maxpaynecodl/Telegram-userbot","sub_path":"paimon/plugins/fun/neko.py","file_name":"neko.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"13564113793","text":"import math\nimport inspect\nimport random\n\n# from typing import Dict, Tuple, Callable, List\n\ndef wp(g1=0, g7=0, g8=0, c1=0):\n return math.sqrt((g1 * g8) / (g7 * c1))\n\ndef Q(g1=0, g2=0, g6=0, g7=0, g8=0, c1=0, c2=0):\n return ((g6 * c2) / (g2 * g7)) * math.sqrt((g1 * g8) / (g7 * c1))\n\ndef k(g1=0, g2=0, g4=0, g6=0, g7=0, g8=0, c1=0, c2=0):\n return ((g2 * g4) / (g6 * c2)) * (Q(g1, g2, g6, g7, g8, c1, c2) / wp(g1, g7, g8, c1))\n #return g4 / g7\n\ndef BW(g1=0, g2=0, g6=0, g7=0, g8=0, c1=0, c2=0):\n return wp(g1, g7, g8, c1) / Q(g1, g2, g6, g7, g8, c1, c2)\n\n\nmy_wanted_results = {\n \"wp\": (wp, {\n \"optimal\": 350,\n \"allowance\": (-30, 30)\n }),\n \"BW\": (BW, {\n \"optimal\": 200,\n \"allowance\": (-30, 30)\n }),\n \"k\": (k, {\n \"optimal\": 10,\n \"allowance\": (-3, 3)\n })\n}\n\nmy_parameters_limits = {\n \"g1\": (1E-12, 1E-10),\n \"g2\": (1E-12, 1E-10),\n \"g4\": (1E-12, 1E-10),\n \"g6\": (1E-12, 1E-10),\n \"g7\": (1E-12, 1E-10),\n \"g8\": (1E-12, 1E-10),\n \"c1\": (1E-15, 50E-15),\n \"c2\": (1E-15, 50E-15)\n}\n\nmy_optimization_priority = {\n \"k\": .5,\n \"BW\": .3,\n \"wp\": .2\n}\n\nmutation_units = {name: (b[0]-b[1])/100000 for name, b in my_parameters_limits.items()}\n\nmutation_probability = .15\n\nkill_percentage = .5\n\n\ndef simulate(functions, arguments):\n\n results = dict({})\n\n for name, description in functions.items():\n fun = description[0]\n\n needed_args = inspect.getargspec(fun)[0]\n args_to_pass = {name: value for name, value in arguments.items() if name in needed_args}\n\n results[name] = fun(**args_to_pass)\n\n return results\n\n\ndef fitness(results, wanted_result, optimization_priority):\n\n fitnesses = dict({})\n\n for name, description in wanted_result.items():\n goals = description[1]\n\n result = results[name]\n\n opt = goals[\"optimal\"]\n lower_barrier = goals[\"allowance\"][0] #will get -x\n upper_barrier = goals[\"allowance\"][1] #will get x\n max_qdist = max(*[lower_barrier ** 2, upper_barrier ** 2]) #always upper??\n\n dist = opt - result\n q_dist = dist ** 2\n\n if dist < lower_barrier:\n fitnesses[name] = 0\n continue\n\n if dist > upper_barrier:\n fitnesses[name] = 0\n continue\n\n fitnesses[name] = (max_qdist - q_dist) / max_qdist\n\n fitnesses = [optimization_priority[name] * f for name, f in fitnesses.items()]\n fitness = sum(fitnesses) / len(fitnesses)\n\n return fitness\n\n\ndef mutate(arguments, parameters_limits):\n\n mutated_args = dict({})\n\n for name, value in arguments.items():\n successfull = False\n\n if random.uniform(0, 1) <= mutation_probability:\n\n while not successfull:\n if random.choice([True, False]):\n step = mutation_units[name]\n else:\n step = -mutation_units[name] \n\n if value + step < parameters_limits[name][0]:\n continue\n if value + step > parameters_limits[name][1]:\n continue\n\n value += step\n successfull = True\n\n mutated_args[name] = value\n\n return mutated_args\n\n\ndef generate_gen_0(size: int, parameters_limits):#: dict(str, tuple(float, float))):\n return [{name: random.uniform(b[0], b[1]) for name, b in parameters_limits.items()} for i in range(size)]\n\n\ndef __main__(wanted_results,#List[dict(str, Tuple[Callable[dict(str, any]], dict(str, float]]]],\n parameter_limits, #dict(str, tuple(float, float)),\n optimization_priority, #dict(str, float),\n size: int,\n generation_count: int):\n\n\n gen = generate_gen_0(size, parameter_limits)\n\n kill_count = int(kill_percentage * size)\n\n for i in range(generation_count):\n\n gen_results = []\n\n for args in gen:\n gen_results.append((args, simulate(wanted_results, args)))\n\n gen_fitnesses = []\n\n for args, results in gen_results:\n gen_fitnesses.append((args, results, fitness(results, wanted_results, optimization_priority)))\n\n gen_fitnesses = sorted(gen_fitnesses, key=lambda t: t[2])\n\n for kill_i in range(kill_count):\n gen_fitnesses.pop(0)\n\n gen_fitnesses.reverse()\n\n yield gen_fitnesses\n\n new_gen = [item[0] for item in gen_fitnesses]\n\n for i in range(kill_count):\n new_gen.append(mutate(gen_fitnesses[i][0], parameter_limits))\n\n if gen_fitnesses[0][2] == 0.0:\n new_gen = generate_gen_0(size, parameter_limits)\n\n gen = new_gen\n\n\nfor gen in __main__(my_wanted_results, my_parameters_limits, my_optimization_priority, 1000, 50000):\n print(\"TOP:\", gen[0][2], \"|\\n\", )#gen)","repo_name":"strangedev/evoptimizer","sub_path":"optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":4792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28977542007","text":"import numpy as np\nimport torch\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nfrom solver import solver_RNN\nfrom tsp import TSPDataset\nfrom tsp_heuristic import get_ref_reward\n\nuse_cuda = False\nseq_len= 5 #30\nnum_epochs = 10\nnum_tr_dataset = 50\nnum_test_dataset = 20\nembedding_size = 128 #den paizei gia 256!\nhidden_size = 128\nbatch_size =1 # 64\ngrad_clip = 1.5\nbeta=0.9\n\nif __name__ ==\"__main__\":\n if use_cuda:\n use_pin_memory = True\n else:\n use_pin_memory = False\n train_dataset = TSPDataset(seq_len, num_tr_dataset)\n test_dataset = TSPDataset(seq_len, num_test_dataset)\n\n train_data_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, pin_memory=use_pin_memory)\n\n test_data_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True, pin_memory=use_pin_memory)\n eval_loader = DataLoader(test_dataset, batch_size=num_test_dataset, shuffle=False)\n\n # Calculating heuristics\n heuristic_distance = torch.zeros(num_test_dataset)\n for i, pointset in tqdm(test_dataset):\n heuristic_distance[i] = get_ref_reward(pointset)\n\n\n model = solver_RNN(embedding_size, hidden_size, seq_len, 2, 10)\n\n if use_cuda:\n model = model.cuda()\n print(f'Num of params:{sum([np.prod(p.size()) for p in filter(lambda p: p.requires_grad, model.parameters())])}')\n optimizer = optim.Adam(model.parameters(), lr=3.0 * 1e-4)\n\n # Train loop\n moving_avg = torch.zeros(num_tr_dataset)\n if use_cuda:\n moving_avg = moving_avg.cuda()\n\n #generating first baseline\n for (indices, sample_batch) in tqdm(train_data_loader):\n if use_cuda:\n sample_batch = sample_batch.cuda()\n rewards, _, _ = model(sample_batch)\n moving_avg[indices] = rewards\n\n #Training\n model.train()\n for epoch in range(num_epochs):\n for batch_idx, (indices, sample_batch) in enumerate(train_data_loader):\n if use_cuda:\n sample_batch.cuda()\n rewards, log_probs, action = model(sample_batch) #to sample batch exei ola ta cities\n # tou kathe sample h ena ena ta cities tou sample?-> ola ta cities tou sample\n moving_avg[indices] = moving_avg[indices] * beta + rewards * (1.0 - beta)\n advantage = rewards - moving_avg[indices]\n log_probs = torch.sum(log_probs, dim=-1)\n log_probs[log_probs < -100] = -100\n loss = (advantage * log_probs).mean()\n optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip)\n optimizer.step()\n\n\n model.eval()\n ret = []\n for i, batch in eval_loader:\n\n if use_cuda:\n batch = batch.cuda()\n R, _, _ = model(batch)\n print(f\"[at epoch {epoch}]RL model generates { (R / heuristic_distance).mean().detach().numpy():0.2f} time worse solution than heuristics\" )\n print(\"AVG R\", R.mean().detach().numpy())\n model.train()","repo_name":"sofaki000/tsp-machine-learning","sub_path":"neural_combinatorial_optimization_rl/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"2038144179","text":"\"\"\"\nBinary heap implementation of priority queue.\nClass functions:\n - Heapify -> Create a \"heaped\" array from unheaped array\n Ex:\n arr = [8,3,2,7,9,1,4]\n 8\n / \\\n 3\t 2\n / \\ / \\\n 7 9 1 4\n returns:\n [9, 8, 4, 3, 7, 1, 2]\n 9\n / \\\n 8\t 4\n / \\ / \\\n 3 7 1 2\n - insert -> Insert an element into the correct spot in the heap\n - del_max -> deletes the maximum value, optionally returning the value\n\"\"\"\n\n\nclass Heap:\n\n def __init__(self, arr, heap=None):\n self.arr = arr\n self.heap = [None] * (len(self.arr) + 1)\n\n def heapify(self):\n for a in self.arr:\n self.insert(a)\n\n return self.heap\n\n def insert(self, num):\n\n i = self._find_end()\n\n self.heap[i] = num\n j = i\n\n # Perform \"Swim\" action:\n while j > 1 and (self.heap[j // 2] < self.heap[j]):\n self._exch(j // 2, j)\n j = j // 2\n\n def del_max(self, return_max=False):\n i = self._find_end()\n\n print(i)\n # First switch the root with the end, storing the maximum\n\n max_value = self.heap[1]\n self._exch(1, i - 1)\n\n # Set the end to none to delete the maximum\n self.heap[i - 1] = None\n\n j = 1\n # Perform \"sink\" action:\n while j < i and (self.heap[j] < self.heap[j * 2]):\n # Exchange with \"child\" this time, but only with the superior child\n\n # Check which is \"superior\"\n left_child_ind = (j * 2)\n right_child_ind = (j * 2) + 1\n\n if self.heap[left_child_ind] < self.heap[right_child_ind]:\n self._exch(j, right_child_ind)\n j = right_child_ind\n else:\n self._exch(j, left_child_ind)\n j = left_child_ind\n\n if return_max:\n return max_value\n\n # Helper functions that allow for exchanges, finding ends, etc\n\n def _exch(self, i, j):\n # Exchanges an element with its parent\n old_parent = self.heap[i]\n self.heap[i] = self.heap[j]\n self.heap[j] = old_parent\n\n def _find_end(self):\n # Find the \"end\" of the heap\n\n i = 1\n while self.heap[i] is not None:\n i += 1\n # Doubles the size of the array if needed:\n\n if i == len(self.heap):\n self._double()\n\n return i\n\n def _double(self):\n new_space = [None] * (len(self.heap) + 1)\n self.heap.extend(new_space)\n\n\n# TESTING:\n\nheap = Heap([8, 3, 2, 1, 1, 1, 4])\n\nheap.heapify()\nprint(heap.heap)\n\nheap.del_max()\nprint(heap.heap)\n","repo_name":"COYE-Coder/Data_structures","sub_path":"Heap.py","file_name":"Heap.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"44658488221","text":"\"\"\"Provide automation triggers for certain types of Hubitat device.\"\"\"\nimport logging\nfrom itertools import chain\nfrom json import loads\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, cast\n\nimport voluptuous as vol\n\nfrom custom_components.hubitat.util import get_hubitat_device_id\nfrom homeassistant.components.automation import (\n AutomationActionType,\n AutomationTriggerInfo,\n)\nfrom homeassistant.components.device_automation.exceptions import (\n InvalidDeviceAutomationConfig,\n)\nfrom homeassistant.const import CONF_DEVICE_ID, CONF_DOMAIN, CONF_PLATFORM, CONF_TYPE\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.typing import ConfigType\n\nfrom .const import (\n ATTR_ATTRIBUTE,\n ATTR_DEVICE_ID,\n ATTR_HUB,\n ATTR_VALUE,\n DOMAIN,\n H_CONF_DOUBLE_TAPPED,\n H_CONF_HELD,\n H_CONF_HUBITAT_EVENT,\n H_CONF_PUSHED,\n H_CONF_SUBTYPE,\n H_CONF_UNLOCKED_WITH_CODE,\n TRIGGER_BUTTONS,\n TRIGGER_CAPABILITIES,\n)\nfrom .helpers import (\n are_config_entries_loaded,\n get_device_entry_by_device_id,\n get_hub_for_device,\n)\nfrom .hub import Hub\nfrom .hubitatmaker import Device, DeviceAttribute, DeviceCapability\n\ntry:\n from homeassistant.components.device_automation import DEVICE_TRIGGER_BASE_SCHEMA\nexcept Exception:\n from homeassistant.components.device_automation import (\n TRIGGER_BASE_SCHEMA, # type: ignore\n )\n\n DEVICE_TRIGGER_BASE_SCHEMA = TRIGGER_BASE_SCHEMA\n\n\n# The `event` type moved in HA 0.115\ntry:\n from homeassistant.components.homeassistant.triggers import event\nexcept ImportError:\n from homeassistant.components.automation import event # type: ignore\n\n\nTRIGGER_TYPES = tuple([v.conf for v in TRIGGER_CAPABILITIES.values()])\nTRIGGER_SUBTYPES = set(\n chain.from_iterable(\n [v.subconfs for v in TRIGGER_CAPABILITIES.values() if v.subconfs]\n )\n)\n\nTRIGGER_SCHEMA = DEVICE_TRIGGER_BASE_SCHEMA.extend(\n {\n vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES),\n vol.Required(H_CONF_SUBTYPE): str,\n }\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_validate_trigger_config(\n hass: HomeAssistant, config: Dict[str, Any]\n) -> Dict[str, Any]:\n \"\"\"Validate a trigger config.\"\"\"\n config = TRIGGER_SCHEMA(config)\n device = get_device_entry_by_device_id(hass, config[CONF_DEVICE_ID])\n\n if are_config_entries_loaded(hass, device.id):\n hubitat_device = get_hubitat_device(hass, device.id)\n if hubitat_device:\n types = get_trigger_types(hubitat_device.device)\n trigger_type = config[CONF_TYPE]\n if trigger_type not in types:\n _LOGGER.warning(\"Device doesn't support '%s'\", trigger_type)\n raise InvalidDeviceAutomationConfig\n\n trigger_subtype = config.get(H_CONF_SUBTYPE)\n if trigger_subtype:\n subtypes = get_trigger_subtypes(hubitat_device.device, trigger_type)\n if not subtypes or trigger_subtype not in subtypes:\n _LOGGER.warning(\"Device doesn't support '%s'\", trigger_subtype)\n\n return config\n\n\nasync def async_get_triggers(\n hass: HomeAssistant, device_id: str\n) -> Sequence[Dict[str, Any]]:\n \"\"\"List device triggers for Hubitat devices.\"\"\"\n device = get_hubitat_device(hass, device_id)\n if device is None:\n return []\n\n triggers = []\n trigger_types = get_trigger_types(device.device)\n\n for trigger_type in trigger_types:\n trigger_subtypes = get_trigger_subtypes(device.device, trigger_type)\n\n if trigger_subtypes:\n for trigger_subtype in trigger_subtypes:\n triggers.append(\n {\n CONF_DEVICE_ID: device_id,\n CONF_DOMAIN: DOMAIN,\n CONF_PLATFORM: \"device\",\n CONF_TYPE: trigger_type,\n H_CONF_SUBTYPE: trigger_subtype,\n }\n )\n else:\n triggers.append(\n {\n CONF_DEVICE_ID: device_id,\n CONF_DOMAIN: DOMAIN,\n CONF_PLATFORM: \"device\",\n CONF_TYPE: trigger_type,\n }\n )\n\n _LOGGER.debug(\"Returning triggers: %s\", triggers)\n return triggers\n\n\nasync def async_attach_trigger(\n hass: HomeAssistant,\n config: ConfigType,\n action: AutomationActionType,\n automation_info: AutomationTriggerInfo,\n) -> Callable[[], None]:\n \"\"\"Attach a trigger.\"\"\"\n\n hubitat_device = get_hubitat_device(hass, config[CONF_DEVICE_ID])\n if hubitat_device is None:\n _LOGGER.warning(\n \"Could not find Hubitat device for ID %s\", config[CONF_DEVICE_ID]\n )\n raise InvalidDeviceAutomationConfig\n\n # Event data should match up to the data a hubitat_event event would\n # contain\n event_data = {\n ATTR_DEVICE_ID: hubitat_device.device.id,\n ATTR_HUB: hubitat_device.hub.id,\n ATTR_ATTRIBUTE: config[CONF_TYPE],\n }\n if H_CONF_SUBTYPE in config:\n event_data[ATTR_VALUE] = config[H_CONF_SUBTYPE]\n\n trigger = event.TRIGGER_SCHEMA(\n {\n event.CONF_PLATFORM: \"event\",\n event.CONF_EVENT_TYPE: H_CONF_HUBITAT_EVENT,\n event.CONF_EVENT_DATA: event_data,\n }\n )\n\n _LOGGER.debug(\"Attaching trigger %s\", trigger)\n\n return await event.async_attach_trigger(\n hass, trigger, action, automation_info, platform_type=\"device\"\n )\n\n\nclass DeviceWrapper:\n def __init__(self, device: Device, hub: Hub):\n self._device = device\n self._hub = hub\n\n @property\n def device(self):\n return self._device\n\n @property\n def hub(self):\n return self._hub\n\n\ndef get_hubitat_device(hass: HomeAssistant, device_id: str) -> Optional[DeviceWrapper]:\n \"\"\"Return a Hubitat device for a given Home Assistant device ID.\"\"\"\n device = get_device_entry_by_device_id(hass, device_id)\n hubitat_id = get_hubitat_device_id(device)\n\n hub = get_hub_for_device(hass, device)\n if not hub:\n _LOGGER.warning(f\"No Hubitat hub is associated with {device_id}\")\n return None\n if hub.devices.get(hubitat_id) is None:\n _LOGGER.warning(f\"Invalid Hubitat ID for device {device_id}\")\n return None\n\n return DeviceWrapper(hub.devices[hubitat_id], hub)\n\n\ndef get_trigger_types(device: Device) -> Sequence[str]:\n \"\"\"Return the list of trigger types for a device.\"\"\"\n types = []\n\n if DeviceCapability.DOUBLE_TAPABLE_BUTTON in device.capabilities:\n types.append(H_CONF_DOUBLE_TAPPED)\n\n if DeviceCapability.HOLDABLE_BUTTON in device.capabilities:\n types.append(H_CONF_HELD)\n\n if DeviceCapability.PUSHABLE_BUTTON in device.capabilities:\n types.append(H_CONF_PUSHED)\n\n if DeviceCapability.LOCK in device.capabilities:\n types.append(H_CONF_UNLOCKED_WITH_CODE)\n\n return types\n\n\ndef get_trigger_subtypes(device: Device, trigger_type: str) -> Sequence[str]:\n \"\"\"Return the list of trigger subtypes for a device and a trigger type.\"\"\"\n subtypes: List[str] = []\n\n if trigger_type in (\n H_CONF_DOUBLE_TAPPED,\n H_CONF_HELD,\n H_CONF_PUSHED,\n ):\n num_buttons = 1\n if DeviceAttribute.NUM_BUTTONS in device.attributes:\n num_buttons = int(device.attributes[DeviceAttribute.NUM_BUTTONS].value)\n subtypes.extend(TRIGGER_BUTTONS[0:num_buttons])\n elif trigger_type == H_CONF_UNLOCKED_WITH_CODE:\n subtypes.extend(get_lock_codes(device))\n\n return subtypes\n\n\ndef get_valid_subtypes(trigger_type: str) -> Optional[Sequence[str]]:\n \"\"\"Return the list of valid trigger subtypes for a given type.\"\"\"\n for trigger_info in TRIGGER_CAPABILITIES.values():\n if trigger_info.conf == trigger_type:\n return trigger_info.subconfs\n return None\n\n\ndef get_lock_codes(device: Device) -> Sequence[str]:\n \"\"\"Return the lock codes for a lock.\"\"\"\n try:\n codes_str = cast(str, device.attributes[DeviceAttribute.LOCK_CODES].value)\n codes = loads(codes_str)\n return [codes[id][\"name\"] for id in codes]\n except Exception as e:\n _LOGGER.warn(\"Error getting lock codes for %s: %s\", device, e)\n return []\n","repo_name":"jason0x43/hacs-hubitat","sub_path":"custom_components/hubitat/device_trigger.py","file_name":"device_trigger.py","file_ext":"py","file_size_in_byte":8265,"program_lang":"python","lang":"en","doc_type":"code","stars":158,"dataset":"github-code","pt":"5"} +{"seq_id":"21711354268","text":"from binance.client import Client as BinanceClient\nfrom decimal import *\nfrom datetime import datetime\nimport os\nimport ExcelWriter\nimport RiskCalculator\nimport requests\nimport hmac\nimport hashlib\nimport time\n\n\n# Creates a Limit Order for the symbol trading pair with quantity as the amount and the given price\ndef createBuyOrder(symbol: str, quantity: float, price: float):\n try:\n buy_limit = binanceClient.order_limit_buy(symbol=symbol, quantity=quantity, price=price)\n addToMessageBody(buy_limit['orderId'], symbol, quantity, price,\n datetime.fromtimestamp(buy_limit['transactTime'] / 1000).strftime('%d-%m-%Y %H:%M:%S'))\n except Exception as e:\n print(e)\n\n\n# Iterates through the pairs and places a spot order for each\ndef placeDCAOrders():\n # create Message body\n global body\n body = 'Total of *' + str(len(assets)) + '* orders for a total of *' + str(totalSum) + ' $* spent. \\n\\n'\n\n # Checks if Balance is sufficient\n currentBalance = float(binanceClient.get_asset_balance(asset=assetForPaying)['free'])\n if currentBalance < totalSum:\n missingBalance = totalSum - currentBalance + 0.5\n redeemFromSavings(asset=assetForPaying, redeem_amount=missingBalance, redeem_type='FAST')\n\n # Iterates through every order\n for key in assets.values():\n # Get Amount and Price stepSize (decimals)\n for filt in binanceClient.get_symbol_info(key[0])['filters']:\n if filt['filterType'] == 'LOT_SIZE':\n ticks[key[0]] = Decimal(filt['stepSize']).normalize()\n if filt['filterType'] == 'PRICE_FILTER':\n steps[key[0]] = Decimal(filt['tickSize']).normalize()\n\n moneySpent = key[1]\n\n # Calculate/set price and amount for the order\n try:\n currPrice = round(float(binanceClient.get_symbol_ticker(symbol=key[0])['price']) * factor,\n len(str(steps[key[0]]).split(\".\")[1]))\n amount = round(moneySpent / currPrice, len(str(ticks[key[0]]).split(\".\")[1]))\n except IndexError:\n currPrice = round(binanceClient.get_symbol_ticker(symbol=key)['price'] * factor)\n amount = round(moneySpent / currPrice)\n\n # Try to place the order\n try:\n createBuyOrder(key[0], amount, currPrice)\n except Exception as e:\n print(e)\n\n\n# Sends WhatsAppMessage with the order details\ndef addToMessageBody(orderId, symbol: str, quantity: float, price: float, time):\n global body\n body += 'Successfully placed order ' + str(orderId) \\\n + ':\\nTrading Pair: *' + symbol + '*' \\\n + '\\nAmount: ' + str(quantity) + ' ' + symbol.replace('USDT', '') \\\n + '\\nPrice: ' + str(price) + ' $' \\\n + '\\nTotal Amount: ' + str(round(price * quantity, 2)) + ' $' \\\n + '\\nTimestamp: ' + str(time) + '\\n\\n' \\\n + '------------------------------------- \\n\\n'\n\n\n# Cancels all Open Spot Orders for the account\ndef cancelAllOpenOrders():\n try:\n for key in assets.values():\n orders = binanceClient.get_open_orders(symbol=key[0])\n for order in orders:\n binanceClient.cancel_order(symbol=key, orderId=order['orderId'])\n except Exception as e:\n print(e)\n\n\n# Hashes he signature for the request\ndef hashing(query_string):\n return hmac.new(api_secret.encode('utf-8'), query_string.encode('utf-8'), hashlib.sha256).hexdigest()\n\n\ndef redeemFromSavings(asset: str, redeem_amount, redeem_type):\n # Obtain all needed data\n headers = {'X-MBX-APIKEY': api_key}\n timestamp = int(time.time() * 1000)\n productId = asset + '001'\n queryString = 'timestamp=' + str(\n timestamp) + '&productId=' + productId + '&type=' + redeem_type + '&amount=' + str(redeem_amount)\n urlPath = '/sapi/v1/lending/daily/redeem'\n\n # Construct URL and send Request\n url = BASE_URL + urlPath + '?' + queryString + '&signature=' + hashing(queryString)\n req = requests.post(url=url, headers=headers)\n\n\n# Main\nif __name__ == \"__main__\":\n\n # Dictionary for what you want to buy\n # key := Name of the Asset (only relevant for RiskCalcuator)\n # value := trading pair and the amount you want to spend\n assets = {'Bitcoin': ['BTCEUR', 50],\n 'Ethereum': ['ETHEUR', 50],\n 'Cardano': ['ADAEUR', 50],\n 'Chainlink': ['LINKEUR', 30],\n 'Polkadot': ['DOTEUR', 30],\n 'VeChain': ['VETEUR', 20],\n 'Delta-Theta': ['THETAEUR', 20],\n 'Solana': ['SOLEUR', 20],\n 'Avalanche-2': ['AVAXEUR', 15],\n 'Elrond-erd-2': ['EGLDEUR', 15]\n }\n\n # Calculates the current Risk for Bitcoin and adjust the amount to the current risk level\n risk = RiskCalculator.getRiskForAsset('Bitcoin', True)\n for key, value in assets.items():\n value[1] *= (1.4 - (2 * risk))\n\n # Calculates the sum of money to spent\n totalSum = sum([x[1] for x in list(assets.values())])\n\n # The Fiat currency used for paying (if multiple are wanted, then the code needs to be adjusted)\n assetForPaying = 'EUR'\n\n # Factor by which the current price is dropped (for example: current price is 10,000 and factor 0.95 ->\n # 10,000 * 0.95 => Order is set at price 9,500\n factor = 0.95\n\n # So that the amount I want to buy has the right number of decimals\n ticks = dict()\n # So that the price has the right number of decimals\n steps = dict()\n\n # Connect to Binance API\n api_key = 'xxx' # yourApiKey\n api_secret = 'xxx' # yourApiSecret\n\n binanceClient = BinanceClient(api_key, api_secret)\n BASE_URL = 'https://api.binance.com'\n\n if api_key is not None and api_secret is not None:\n # Place all the defined orders\n placeDCAOrders()\n\n # Updates Excel Sheet\n ExcelWriter.updateExcel(binanceClient)\n","repo_name":"melvinko2009/BinanceDCABot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"566832631","text":"#!/usr/bin/env python3\nimport unittest\nfrom proctests.utils import StoredProcedureTestCase\n\nclass AddCreditor(StoredProcedureTestCase):\n def test_add_creditor(self):\n add_client(self.db)\n add_note(self.db)\n addedCreditor = add_creditor(self.db)\n fetchedCreditor = fetch_creditor(self.db)\n\n self.assertEqual(addedCreditor[\"creditor_id\"], fetchedCreditor[\"creditor_id\"], \"Creditor ID mismatch.\")\n self.assertEqual(addedCreditor[\"client_id\"], fetchedCreditor[\"client_id\"], \"Client ID mismatch.\")\n self.assertEqual(addedCreditor[\"note_id\"], fetchedCreditor[\"note_id\"], \"Note ID mismatch.\")\n self.assertEqual(addedCreditor[\"user_id\"], fetchedCreditor[\"user_id\"], \"User ID mismatch.\")\n\ndef add_client(db):\n client = {\n \"preferred_name\": \"Preferred name\",\n \"phone_number\": \"1234\",\n \"archived\": False,\n \"user_id\": 1\n }\n\n db.execute(\"\"\"INSERT INTO client (preferred_name,\n phone_number,\n archived,\n user_id)\n VALUES (%s, %s, %s, %s)\n RETURNING id AS client_id,\n preferred_name,\n phone_number,\n archived,\n user_id\"\"\", tuple(client.values()))\n result = {}\n for row in db:\n result = {\n \"client_id\": row[\"client_id\"],\n \"preferred_name\": row[\"preferred_name\"],\n \"phone_number\": row[\"phone_number\"],\n \"archived\": row[\"archived\"],\n \"user_id\": row[\"user_id\"]\n }\n return result\n\ndef add_note(db):\n note = {\n \"note\": \"Note\",\n \"user_id\": 1\n }\n\n db.execute(\"\"\"INSERT INTO note (note,\n user_id)\n VALUES (%s, %s)\n RETURNING id AS note_id,\n note,\n user_id\"\"\", tuple(note.values()))\n result = {}\n for row in db:\n result = {\n \"note_id\": row[\"note_id\"],\n \"note\": row[\"note\"],\n \"user_id\": row[\"user_id\"]\n }\n return result\n\ndef add_creditor(db):\n creditor = {\n \"client_id\": 1,\n \"note_id\": 1,\n \"user_id\": 1\n }\n\n db.call_procedure(\"AddCreditor\",\n tuple(creditor.values()))\n result = {}\n for row in db:\n result = {\n \"creditor_id\": row[0]\n }\n result.update(creditor)\n return result\n\ndef fetch_creditor(db):\n db.execute(\"\"\"SELECT id as creditor_id,\n client_id,\n note_id,\n user_id\n FROM creditor\"\"\")\n result = {}\n for row in db:\n result = {\n \"creditor_id\": row[\"creditor_id\"],\n \"client_id\": row[\"client_id\"],\n \"note_id\": row[\"note_id\"],\n \"user_id\": row[\"user_id\"]\n }\n return result\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"obeezzy/rr-schema","sub_path":"tests/proctests/retail/creditor/test_addcreditor.py","file_name":"test_addcreditor.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38811181425","text":"#!/usr/bin/env python\n\n\"\"\"\nBase neuron class used by LPU.\n\"\"\"\n\nfrom abc import ABCMeta, abstractmethod, abstractproperty\nimport os.path\nimport numpy as np\n\nimport pycuda.gpuarray as garray\nfrom pycuda.tools import dtype_to_ctype\nimport pycuda.driver as cuda\nfrom pycuda.compiler import SourceModule\n\nfrom neurokernel.LPU.utils.simpleio import *\n\nclass BaseNeuron(object):\n __metaclass__ = ABCMeta\n\n def __init__(self, n_dict, neuron_state_pointer, dt, debug, LPU_id=None):\n '''\n Every neuron class should setup GPU data structure needed\n by it during initialization. In addition, graded potential neurons\n should also update the neuron_state structures with their\n initial state during initialization.\n\n n_dict is a dictionary representing the parameters needed\n by the neuron class.\n For example, if a derived neuron class called IAF needs a\n parameter called bias, n_dict['bias'] will be vector containing\n the bias values for all the IAF neurons in a particular LPU.\n\n In addition to the neuron parameters, n_dict will also contain:-\n 1. n_dict['cond_pre'] representing the conductance based synapses\n with connection to neurons represented by this class.\n 2. n_dict['cond_post'] denoting the neuron indices the synapses mentioned\n above enervate to.\n 3. n_dict['reverse'] containing the reverse potentials for the\n conductance based synapses.\n 4. n_dict['num_dendrites_cond'] representing the number of dendrites for\n neuron in this class of the conductance type. For eg,\n n_dict['num_dendrites_cond'][0] will represent the number of\n conductance based synapses connecting to the neuron having index 0\n in this object\n 5. n_dict['I_pre'] representing the indices of the non-conductance\n based synapses with connections to neurons represented by this\n object , eg:- synapses modelled by filters. This is also includes\n any external input to neurons of this class.\n 6. n_dict['I_post'] representing the indices of the neurons the above\n mentioned synapses enervate to.\n 7. n_dict['num_dendrites_I'] representing the number of dendrites for\n neuron in this class of the non-conductance type. For eg,\n n_dict['num_dendrites_I'][0] will represent the number of\n non conductance based synapses connecting to the neuron havinf index 0\n in this object.\n\n Note that you only need the above information if you plan to override the\n default update_I method.\n\n neuron_state_pointer is an integer representing the initial memory location\n on the GPU for storing the neuron states for this object.\n For graded potential neurons, the data type will be double whereas for\n spiking neurons, it will be int.\n\n dt represents one time step.\n\n debug is a boolean and is intended to be used for debugging purposes.\n\n '''\n self.__neuron_state_pointer = neuron_state_pointer\n self.__num_neurons = len(n_dict['id'])\n _num_dendrite_cond = np.asarray([n_dict['num_dendrites_cond'][i]\n for i in range(self.__num_neurons)],\n dtype=np.int32).flatten()\n _num_dendrite = np.asarray([n_dict['num_dendrites_I'][i]\n for i in range(self.__num_neurons)],\n dtype=np.int32).flatten()\n\n self.__cum_num_dendrite = garray.to_gpu(np.concatenate((\n np.asarray([0,], dtype=np.int32),\n np.cumsum(_num_dendrite, dtype=np.int32))))\n self.__cum_num_dendrite_cond = garray.to_gpu(np.concatenate((\n np.asarray([0,], dtype=np.int32),\n np.cumsum(_num_dendrite_cond, dtype=np.int32))))\n self.__num_dendrite = garray.to_gpu(_num_dendrite)\n self.__num_dendrite_cond = garray.to_gpu(_num_dendrite_cond)\n self.__pre = garray.to_gpu(np.asarray(n_dict['I_pre'], dtype=np.int32))\n self.__cond_pre = garray.to_gpu(np.asarray(n_dict['cond_pre'],\n dtype=np.int32))\n self.__V_rev = garray.to_gpu(np.asarray(n_dict['reverse'],\n dtype=np.double))\n self.I = garray.zeros(self.__num_neurons, np.double)\n self.__update_I_cond = self.__get_update_I_cond_func()\n self.__update_I_non_cond = self.__get_update_I_non_cond_func()\n self.__LPU_id = LPU_id\n self.__debug = debug\n if self.__debug:\n if self.__LPU_id is None:\n self.__LPU_id = \"default_LPU\"\n i = 0\n while os.path.isfile(self.__LPU_id + \"_I_\" + self.__class__.__name__ + str(i) + \".h5\"):\n i+=1\n self.__I_file = tables.openFile(self.__LPU_id + \"_I_\" + self.__class__.__name__ + str(i) + \".h5\", mode=\"w\")\n self.__I_file.createEArray(\"/\",\"array\", \\\n tables.Float64Atom(), (0,self.num_neurons))\n \n @abstractmethod\n def eval(self):\n '''\n This method should update the neuron states. A pointer to\n the start of the memory located will be provided at time of\n initialization.\n\n self.I.gpudata will be a pointer to the memory location\n where the input current to all the neurons at each step is updated\n if the child class does not override update_I() method\n '''\n pass\n\n\n @property\n def neuron_class(self):\n '''\n For future use\n '''\n return 0\n\n\n @property\n def update_I_override(self): return False\n\n def update_I(self, synapse_state, st=None, logger=None):\n '''\n This method should compute the input current to each neuron\n based on the synapse states.\n synapse_state may either contain conductances or currents.\n synapse_state will be an integer representing the initial memory\n location on the GPU reserved for the synapse states. The data\n type for synapse states will be double.\n The information needed to compute the currents is provided in the\n dictionary n_dict at initialization.\n\n BaseNeuron provides an implementation of this method. To use a\n different implementation, this method should be overridden and\n update_I_override property must be defined to be True in the derived class.\n\n '''\n self.I.fill(0)\n if self.__pre.size > 0:\n self.__update_I_non_cond.prepared_async_call(\n self.__grid_get_input, self.__block_get_input, st, \n int(synapse_state), self.__cum_num_dendrite.gpudata, \n self.__num_dendrite.gpudata, self.__pre.gpudata,\n self.I.gpudata)\n if self.__cond_pre.size > 0:\n self.__update_I_cond.prepared_async_call(\n self.__grid_get_input, self.__block_get_input, st,\n int(synapse_state), self.__cum_num_dendrite_cond.gpudata,\n self.__num_dendrite_cond.gpudata, self.__cond_pre.gpudata,\n self.I.gpudata, int(self.__neuron_state_pointer),\n self.__V_rev.gpudata)\n if self.debug:\n self.__I_file.root.array.append(self.I.get().reshape((1, -1)))\n\n\n def post_run(self):\n '''\n This method will be called at the end of the simulation.\n '''\n \n def __post_run(self):\n '''\n This private function is used to close the current output file\n when baseneuron is used to compute input current to a neuron and\n the debug flag is set.\n '''\n if self.__debug:\n self.__I_file.close()\n\n def __get_update_I_cond_func(self):\n template = \"\"\"\n #define N 32\n #define NUM_NEURONS %(num_neurons)d\n\n __global__ void get_input(double* synapse, int* cum_num_dendrite, \n int* num_dendrite, int* pre, double* I_pre, \n double* V, double* V_rev)\n {\n int tidx = threadIdx.x;\n int tidy = threadIdx.y;\n int bid = blockIdx.x;\n\n int neuron;\n\n __shared__ int num_den[32];\n __shared__ int den_start[32];\n __shared__ double V_in[32];\n __shared__ double input[32][33];\n\n if(tidy == 0)\n {\n neuron = bid * N + tidx;\n if(neuron < NUM_NEURONS)\n {\n num_den[tidx] = num_dendrite[neuron];\n V_in[tidx] = V[neuron];\n }\n } else if(tidy == 1)\n {\n neuron = bid * N + tidx;\n if(neuron < NUM_NEURONS)\n {\n den_start[tidx] = cum_num_dendrite[neuron];\n }\n }\n\n input[tidy][tidx] = 0.0;\n\n __syncthreads();\n\n neuron = bid * N + tidy ;\n if(neuron < NUM_NEURONS)\n {\n int n_den = num_den[tidy];\n int start = den_start[tidy];\n double VV = V_in[tidy];\n\n\n for(int i = tidx; i < n_den; i += N)\n {\n input[tidy][tidx] += synapse[pre[start + i]] * (VV - V_rev[start + i]);\n }\n }\n\n __syncthreads();\n \n if(tidy < 8)\n {\n input[tidx][tidy] += input[tidx][tidy + 8];\n input[tidx][tidy] += input[tidx][tidy + 16];\n input[tidx][tidy] += input[tidx][tidy + 24];\n }\n\n __syncthreads();\n\n if(tidy < 4)\n {\n input[tidx][tidy] += input[tidx][tidy + 4];\n }\n\n __syncthreads();\n\n if(tidy < 2)\n {\n input[tidx][tidy] += input[tidx][tidy + 2];\n }\n\n __syncthreads();\n\n if(tidy == 0)\n {\n input[tidx][0] += input[tidx][1];\n neuron = bid*N+tidx;\n if(neuron < NUM_NEURONS)\n {\n I_pre[neuron] -= input[tidx][0];\n }\n }\n }\n // can be improved\n \"\"\"\n mod = SourceModule(template % {\"num_neurons\": self.__num_neurons}, \n options = [\"--ptxas-options=-v\"])\n func = mod.get_function(\"get_input\")\n func.prepare([np.intp, np.intp, np.intp, np.intp, \n np.intp, np.intp, np.intp])\n self.__block_get_input = (32, 32, 1)\n self.__grid_get_input = ((self.__num_neurons - 1) / 32 + 1, 1)\n return func\n\n def __get_update_I_non_cond_func(self):\n template = \"\"\"\n #define N 32\n #define NUM_NEURONS %(num_neurons)d\n\n __global__ void get_input(double* synapse, int* cum_num_dendrite, \n int* num_dendrite, int* pre, double* I_pre)\n {\n int tidx = threadIdx.x;\n int tidy = threadIdx.y;\n int bid = blockIdx.x;\n\n int neuron;\n\n __shared__ int num_den[32];\n __shared__ int den_start[32];\n __shared__ double input[32][33];\n\n if(tidy == 0)\n {\n neuron = bid * N + tidx;\n if(neuron < NUM_NEURONS)\n {\n num_den[tidx] = num_dendrite[neuron];\n }\n } else if(tidy == 1)\n {\n neuron = bid * N + tidx;\n if(neuron < NUM_NEURONS)\n {\n den_start[tidx] = cum_num_dendrite[neuron];\n }\n }\n\n input[tidy][tidx] = 0.0;\n\n __syncthreads();\n\n neuron = bid * N + tidy ;\n if(neuron < NUM_NEURONS){\n \n int n_den = num_den[tidy];\n int start = den_start[tidy];\n\n for(int i = tidx; i < n_den; i += N)\n {\n input[tidy][tidx] += synapse[pre[start] + i];\n }\n }\n __syncthreads();\n\n if(tidy < 8)\n {\n input[tidx][tidy] += input[tidx][tidy + 8];\n input[tidx][tidy] += input[tidx][tidy + 16];\n input[tidx][tidy] += input[tidx][tidy + 24];\n }\n\n __syncthreads();\n\n if(tidy < 4)\n {\n input[tidx][tidy] += input[tidx][tidy + 4];\n }\n\n __syncthreads();\n\n if(tidy < 2)\n {\n input[tidx][tidy] += input[tidx][tidy + 2];\n }\n\n __syncthreads();\n\n if(tidy == 0)\n {\n input[tidx][0] += input[tidx][1];\n neuron = bid*N+tidx;\n if(neuron < NUM_NEURONS)\n {\n I_pre[neuron] += input[tidx][0];\n }\n }\n\n }\n //can be improved\n \"\"\"\n mod = SourceModule(template % {\"num_neurons\": self.__num_neurons}, \n options = [\"--ptxas-options=-v\"])\n func = mod.get_function(\"get_input\")\n func.prepare([np.intp, np.intp, np.intp, np.intp, np.intp])\n return func\n","repo_name":"neurokernel/antenna","sub_path":"antenna/neurons/baseneuron.py","file_name":"baseneuron.py","file_ext":"py","file_size_in_byte":13484,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"73108531673","text":"def trianguloPascal(filas):\n\tif filas == 0:\n\t\treturn []\n\telif filas == 1:\n\t\treturn [[1]]\n\t\n\telse:\n\t\tnuevaFila = [1]\n\t\tresultado = trianguloPascal(filas-1)\n\t\tultimaFila = resultado[-1]\n\n\t\tfor i in range(len(ultimaFila)-1):\n\t\t\tnuevaFila.append(ultimaFila[i] + ultimaFila[i+1])\n\n\t\tnuevaFila += [1]\n\t\tresultado.append(nuevaFila)\n\n\treturn resultado\n\ncntFilas = int(input(\"Cantidad de filas:\\n\"))\nfor i in trianguloPascal(cntFilas):\n\tprint(i)","repo_name":"danhiel98/Scripts-Python","sub_path":"10.TrianguloPascal.py","file_name":"10.TrianguloPascal.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"2227106105","text":"import re\nimport sys\nfilename = sys.argv[1]\n\n# Method/Function: List tokenize(TextFilePath)\n# Write a method/function that reads in a text file and returns a list of the tokens in that file.\n# For the purposes of this project, a token is a sequence of alphanumeric characters, independent of capitalization\n# (so Apple, apple, aPpLe are the same token). You are allowed to use regular expressions if you wish to\n# (and you can use some regexp engine, no need to write it from scratch), but you are not allowed to import a tokenizer\n# (e.g. from NLTK), since you are being asked to write a tokenizer.\n\n\ndef tokenize(filepath) -> list:\n tokenList = []\n with open(filepath) as file:\n for line in file:\n lineList = re.split('[^a-zA-Z0-9]+', line.lower())[:-1]\n for word in lineList:\n tokenList.append(word.rstrip(\".\").rstrip(\",\"))\n return tokenList\n\n# Method/Function: Map computeWordFrequencies(List)\n# Write another method/function that counts the number of occurrences of each token in the token list.\n# Remember that you should write this assignment yourself from scratch, so you are not allowed to import\n# a counter when the assignment asks you to write that method.\n\n\ndef computeWordFrequenceies(tokenList) -> dict:\n tokenDict = {}\n for token in tokenList:\n if token not in tokenDict:\n if token != \"\":\n tokenDict[token] = 1\n else:\n tokenDict[token] += 1\n return tokenDict\n\n# Method/Function: void print(Frequencies)\n# Finally, write a method/function that prints out the word frequency count onto the screen.\n# The printout should be ordered by decreasing frequency (so, the highest frequency words first; if necessary, order the cases of ties alphabetically).\n\n\ndef printer(tokenDict) -> None:\n for token, tokenFrequency in tokenDict.items():\n print(token, \"->\", tokenFrequency)\n\n# self function to write output to a file\n# the writer function creates a new file, as the vs code terminal was not enough\n# to handle all the numerous lines for extremely large inputs\n\n\ndef writer(tokenDict):\n file_object = open(\"output.txt\", 'w')\n for token, tokenFrequency in tokenDict.items():\n keyValueString = f\"{token} -> {tokenFrequency}\" + '\\n'\n file_object.write(keyValueString)\n file_object.close()\n\n\ndef main():\n # create a list of tokens\n lst = tokenize(filename)\n # create a hashmap/dictionary of words mapping to the frequency of the words\n dct = computeWordFrequenceies(lst)\n\n # create a file to check the output, works similar to the printer function in terms of representing output\n # writer(dct)\n\n # print the dictionary in one of the many forms of representation\n printer(dct)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"YashPathak302/assignment1","sub_path":"partA.py","file_name":"partA.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"71448352471","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 15 03:16:46 2020\n\n@author: alden\n\"\"\"\ndef getPD(filname):\n # images are 48x48\n # N = 35887\n colnames = []\n coldata = []\n first = True\n count = 1\n colnames = [\"Items\", \"Quantity\"]\n \n for line in open(filname):\n row = line.split(',')\n try:\n coldata.append(row)\n except:\n break\n df = pd.DataFrame(coldata, columns = colnames)\n return df\ndef getData(filname):\n # images are 48x48\n # N = 35887\n colnames = []\n coldata = []\n first = True\n count = 1\n colnames = [\"Items\", \"Quantity\"]\n \n for line in open(filname):\n row = line.split(',')\n try:\n coldata.append(row)\n except:\n break\n return coldata\n\ndef knapSack(W, wt, val, n): \n K = [[0 for x in range(W+1)] for x in range(n+1)] \n \n # Build table K[][] in bottom up manner \n for i in range(n+1): \n for w in range(W+1): \n if i==0 or w==0: \n K[i][w] = 0\n elif wt[i-1] <= w: \n K[i][w] = max(val[i-1] + K[i-1][w-wt[i-1]], K[i-1][w]) \n else: \n K[i][w] = K[i-1][w] \n \n return K[n][W] \ndef printknapSack(W, wt, val, n): \n result = []\n K = [[0 for w in range(W + 1)] for i in range(n + 1)] \n \n # Build table K[][] in bottom \n # up manner \n for i in range(n + 1): \n for w in range(W + 1): \n if i == 0 or w == 0: \n K[i][w] = 0\n elif wt[i - 1] <= w: \n K[i][w] = max(val[i - 1] \n + K[i - 1][w - wt[i - 1]], \n K[i - 1][w]) \n else: \n K[i][w] = K[i - 1][w] \n \n # stores the result of Knapsack \n res = K[n][W] \n saved = res\n \n w = W \n for i in range(n, 0, -1): \n if res <= 0: \n break\n # either the result comes from the \n # top (K[i-1][w]) or from (val[i-1] \n # + K[i-1] [w-wt[i-1]]) as in Knapsack \n # table. If it comes from the latter \n # one/ it means the item is included. \n if res == K[i - 1][w]: \n continue\n else: \n \n # This item is included. \n result.append(i - 1) \n \n # Since this weight is included \n # its value is deducted \n res = res - val[i - 1] \n w = w - wt[i - 1]\n return result, saved\ntry:\n from tkinter import *\n from tkinter.ttk import *\nexcept:\n from Tkinter import *\n from ttk import *\nfrom pandastable.core import Table\nfrom pandastable.data import TableModel\nimport pandas as pd\nprices = getData(\"bla.csv\")\nclass MyTable(Table):\n \"\"\"\n Custom table class inherits from Table.\n You can then override required methods\n \"\"\"\n def __init__(self, parent=None, **kwargs):\n Table.__init__(self, parent, **kwargs)\n return\n\nclass MyApp(Frame):\n \"\"\"Basic test frame for the table\"\"\"\n\n def __init__(self, parent=None):\n self.parent = parent\n Frame.__init__(self)\n self.main = self.master\n self.main.geometry('600x400+200+100')\n self.main.title('pandastable examples')\n f = Frame(self.main)\n f.pack(fill=BOTH,expand=1)\n pt, df = make_table(f)\n bp = Frame(self.main)\n bp.pack(side=TOP)\n e1 = Entry(bp)\n e1.pack(side=LEFT,fill=BOTH,)\n b=Button(bp,text='Optimize', command=lambda: showPrice(df, e1.get()))\n b.pack(side=LEFT,fill=BOTH,)\n b=Button(bp,text='See Prices', command=lambda: show_dist(pt.getSelectedRow(), df))\n b.pack(side=LEFT,fill=BOTH,)\n return\ndef showPrice(df, l, **kwds): \n t = Toplevel()\n t.geometry('600x400')\n t.title('Optimized')\n value = []\n weight = []\n colnames = []\n coldata = []\n amounts = df.to_numpy()\n for i in range(len(prices)):\n value.append(int(float(amounts[i][1]) * float(prices[i][2])*100))\n weight.append(int(float(amounts[i][1]) * (float(prices[i][2])- float(prices[i][1]))*100))\n res, saved = printknapSack(int(l)*100, weight, value, len(weight))\n print(res,saved, weight, value)\n colnames = [\"Item\", \"Company 1\", \"Company 2\"]\n for i in range(len(weight)):\n if i in res:\n toAdd = [str(prices[i][0]), \" \", \"X\"]\n else:\n toAdd = [str(prices[i][0]), \"X\", \"\"]\n coldata.append(toAdd)\n coldata.append([\"Amount:\", \" \", str(float(saved/100))])\n df = pd.DataFrame(coldata, columns = colnames)\n fr1 = Frame(t)\n fr1.pack(fill=BOTH,expand=1)\n pt1 = MyTable(fr1, dataframe=df, **kwds )\n pt1.show()\n return\ndef make_table(frame, **kwds):\n \"\"\"make a sample table\"\"\"\n #df = TableModel.getSampleData()\n df = getPD(\"tobuy.csv\")\n #df['label'] = df.label.astype('category')\n pt = MyTable(frame, dataframe=df, **kwds )\n pt.show()\n return pt, df\ndef show_dist(**kwds):\n \"\"\"make a sample table\"\"\"\n t = Toplevel()\n fr1 = Frame(t)\n fr1.pack(fill=BOTH,expand=1)\n #df = TableModel.getSampleData()\n df = getPD(\"bla.csv\")\n #df['label'] = df.label.astype('category')\n pt = MyTable(fr1, dataframe=df, **kwds )\n pt.show()\n return \napp = MyApp()\napp.mainloop()","repo_name":"E0201942/Hack-The-Globe","sub_path":"Data/opt.py","file_name":"opt.py","file_ext":"py","file_size_in_byte":5332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"40525892150","text":"import json\n\nimport jsons\n\nfrom bot_state import (\n BotState,\n BotStateFactory,\n GameState,\n GreetingState,\n IdleState,\n ProtoGameState,\n)\nfrom chat_handler import ChatHandler\nfrom telegram_client import TelegramClient\n\n\nclass ChatHandlerEncoder(json.JSONEncoder):\n \"\"\"JSON encoder for ChatHandler class.\n Return JSON dict from ChatHandler object.\"\"\"\n\n def default(self, o):\n def encode_state(state: BotState):\n if isinstance(state, GreetingState):\n return {\n \"state_name\": \"GreetingState\",\n \"is_on_enter_called\": state.is_on_enter_called,\n }\n\n if isinstance(state, IdleState):\n return {\n \"state_name\": \"IdleState\",\n \"is_on_enter_called\": state.is_on_enter_called,\n }\n\n if isinstance(state, GameState):\n return {\n \"state_name\": \"GameState\",\n \"is_on_enter_called\": state.is_on_enter_called,\n \"game_params\": jsons.dump(state.game_params),\n }\n\n raise TypeError(f\"Unsupported state: {type(state)}\")\n\n if isinstance(o, ChatHandler):\n return {\n \"chat_id\": o.chat_id,\n \"state\": encode_state(o.state),\n }\n\n raise TypeError(f\"Can only encode ChatHandler. But got {type(o)}\")\n\n\nclass ChatHandlerDecoder:\n \"\"\"JSON decoder for ChatHandler class.\n Return ChatHandler object from JSON dict.\"\"\"\n\n def __init__(self, client: TelegramClient, state_factory: BotStateFactory):\n self.client = client\n self.state_factory = state_factory\n\n def decode(self, dct) -> ChatHandler:\n \"\"\"Reassemble ChatHandler object from primitive data types.\"\"\"\n\n chat_id = dct[\"chat_id\"]\n proto_state = dct[\"state\"]\n state_name = proto_state[\"state_name\"]\n is_on_enter_called = proto_state[\"is_on_enter_called\"]\n\n if state_name == \"GreetingState\":\n state = GreetingState(self.client, self.state_factory, is_on_enter_called)\n elif state_name == \"IdleState\":\n state = IdleState(self.client, self.state_factory, is_on_enter_called)\n elif state_name == \"GameState\":\n state = GameState(\n self.client,\n self.state_factory,\n jsons.load(proto_state[\"game_params\"], cls=ProtoGameState),\n is_on_enter_called,\n )\n else:\n raise TypeError(f\"Can't deserialize state. Unknown name: {state_name}\")\n\n return ChatHandler(state, chat_id)\n","repo_name":"IvanGrigoriev11/Trivia_bot","sub_path":"src/custom_codecs.py","file_name":"custom_codecs.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"10521597180","text":"import socket\nfrom threading import Thread\n\nfrom server.user import User\nfrom server.error import NoUserError, NoChannelError, NotInChannelError\nfrom common.command import Command\nfrom common.reply import Reply\nfrom common.channel import Channel\n\nclass Server:\n\t\"\"\"\n\tAn instance of an IRC server.\n\tManages users and channels, and handles received messages.\n\t\"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\"\n\t\tCreate a new IRC server.\n\t\t\"\"\"\n\n\t\tself._users = {}\n\t\tself._channels = {}\n\n\t\tself._hostname = None\n\n\tdef start(self, ip, port, callback):\n\t\t\"\"\"\n\t\tBegin listening for client connections at the given address.\n\n\t\t@param ip The IP address to listen on.\n\t\t@param port The port to listen on. Should be 6660-6669 or 7000.\n\t\t@param callback The function to call when a thread receives a message\n\t\t\"\"\"\n\n\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)\n\n\t\tself._hostname = ip\n\t\tsock.bind((ip, port))\n\n\t\twhile True:\n\t\t\t# Listen with no queued connections - will block\n\t\t\tsock.listen(0)\n\t\t\t# A connection has been acquired - get its info\n\t\t\t(conn, (ip, _)) = sock.accept()\n\n\t\t\t# The user will manage its own connection info\n\t\t\tusr = User(conn, ip)\n\t\t\t# Users are autonomous, but store them as a key to keep track of\n\t\t\t# the channels they belong to\n\t\t\tself._users[usr] = []\n\n\t\t\t# Let the user start listening on its own thread\n\t\t\tThread(target = usr.listen, args = (callback,)).start()\n\n\tdef get_user(self, n):\n\t\t\"\"\"\n\t\tGet the user with the given nickname.\n\t\tA user will always be returned, else an exception is raised.\n\n\t\t@param n The nickname to search for.\n\t\t@return The retrieved user.\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tus = self._users.keys()\n\t\t\tusr = next(u for u in us if u.hostmask.nickname == n)\n\t\t\tassert usr.alive\n\n\t\t\treturn usr\n\n\t\texcept StopIteration:\n\t\t\traise NoUserError from None\n\n\t\texcept AssertionError:\n\t\t\tself.remove_user(usr)\n\t\t\traise NoUserError from None\n\n\tdef remove_user(self, usr):\n\t\t\"\"\"\n\t\tRemove a user from the server, and signal it to stop listening on its\n\t\tconnection and shut down.\n\n\t\t@param usr The user to remove\n\t\t\"\"\"\n\n\t\t# The server needs to remove the user instead of just calling die to\n\t\t# make sure a dead user does not remain in the user and channel lists\n\t\tfor c in self._users[usr]:\n\t\t\tself._channels[c].remove(usr)\n\n\t\tself._users.pop(usr)\n\t\t# Will stop the listen loop and close the connection, ending the thread\n\t\tusr.die()\n\n\tdef get_channel(self, n, create = False):\n\t\t\"\"\"\n\t\tGet the channel with the given (fully-qualified) name.\n\n\t\t@param n The name of the channel to search for.\n\t\t@param create Specify whether the channel should be created if it\n\t\t does not exist. Otherwise, raise NoChannelError.\n\t\t@return The retrieved channel.\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\tcs = self._channels.keys()\n\t\t\t# The name includes the prefix, so format each channel to match\n\t\t\treturn next(c for c in cs if format(c) == n)\n\n\t\texcept StopIteration:\n\t\t\t# When no channel is found, it is automatically created (maybe)\n\t\t\t# Always try to create it, to raise BadChannelError\n\t\t\tc = Channel.from_raw(n)\n\t\t\tif create == False:\n\t\t\t\traise NoChannelError from None\n\n\t\t\t# The channel starts out with no users\n\t\t\tself._channels[c] = []\n\t\t\treturn c\n\n\tdef get_channel_users(self, chan):\n\t\t\"\"\"\n\t\tReturn a list of users that are currently in a channel.\n\n\t\t@param chan The channel object to get users from.\n\t\t@return A list of joined users.\n\t\t\"\"\"\n\n\t\ttry:\n\t\t\treturn self._channels[chan]\n\t\texcept KeyError:\n\t\t\traise NoChannelError from None\n\n\tdef join_channel(self, chan, usr):\n\t\t\"\"\"\n\t\tAdd to the list of joined users for a channel.\n\n\t\t@param chan The target channel.\n\t\t@param usr The user trying to join.\n\t\t\"\"\"\n\n\t\tif chan not in self._channels:\n\t\t\tself._channels[chan] = []\n\t\tif usr in self._channels[chan]:\n\t\t\treturn\n\n\t\tself._channels[chan].append(usr)\n\t\tself._users[usr].append(chan)\n\n\tdef part_channel(self, chan, usr):\n\t\t\"\"\"\n\t\tRemove a user from the list of users of a channel.\n\n\t\t@param chan The target channel.\n\t\t@param usr The user trying to part.\n\t\t\"\"\"\n\n\t\tif chan not in self._channels:\n\t\t\traise NoChannelError\n\t\tif chan not in self._users[usr]:\n\t\t\traise NotInChannelError\n\n\t\tself._channels[chan].remove(usr)\n\t\tself._users[usr].remove(chan)\n\n\t@property\n\tdef hostname(self):\n\t\treturn self._hostname\n\n\t@property\n\tdef users(self):\n\t\treturn self._users.keys()\n","repo_name":"ufosc/pylay","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"5"} +{"seq_id":"29562883319","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport random\n\n\n# http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule\nclass YoudspiderSpider(scrapy.Spider):\n name = 'youdSpider'\n allowed_domains = ['fanyi.youdao.com']\n\n # start_urls = ['http://youdao.com/']\n def start_requests(self):\n url = \"http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule\"\n # http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule\n # str=\"南昌\"\n # a=str.encode()\n userAgents = [\"Mozilla/5.0 (Macintosh; \\\n U; Intel Mac OS X 10_6_8; en-us)\\\n AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50\",\n \"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us)\\\n AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50\",\n \"Mozilla/5.0 (Macintosh; Intel Mac\\\n OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1\",\n \"Mozilla/4.0 (compatible; MSIE 7.0; \\\n Windows NT 5.1; TencentTraveler 4.0)\",\n \"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like\\\n Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like\\\n Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5\",\n\n ]\n strr = \"长城\"\n aa = strr.encode()\n userAgent = random.choice(userAgents)\n headers = {\n \"user-agent\": userAgent,\n \"Accept\": \"application/json, text/javascript, */*; q=0.01\",\n }\n yield scrapy.FormRequest(\n\n url=url,\n headers=headers,\n # 像队列中加入一个表单信息的post请求\n formdata={\n \"action\": \"FY_BY_REALTlME\",\n \"bv\": \"0930ba55ca8c5e4b94b06e3db8ae8b55\",\n \"client\": \"fanyideskweb\",\n \"doctype\": \"json\",\n \"from\": \"AUTO\",\n \"i\": aa,\n \"keyfrom\": \"fanyi.web\",\n \"salt\": \"15920382269479\",\n \"sign\": \"1f678e17a410689cbf780c2ce451ccd7\",\n \"smartresult\": \"dict\",\n \"to\": \"AUTO\",\n \"ts\": \"1592038226947\",\n \"version\": \"2.1\"\n\n },\n callback=self.parse\n )\n\n def parse(self, response):\n print(\"========================================\")\n print(response.body)\n","repo_name":"wzl1368611/youdaoSpider","sub_path":"youdaoSpider/spiders/youdSpider.py","file_name":"youdSpider.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"41771254636","text":"\"\"\"day16_cmdb2 URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url,include\nfrom django.contrib import admin\nfrom service import views\n\nurlpatterns = [\n # url(r'^list_service/$', views.list_service, name='list_service'),\n # url(r'^add_service/$', views.add_service, name='add_service'),\n # url(r'^edit_service/$', views.edit_service, name='edit_service'),\n # url(r'^del_service/$', views.del_service, name='del_service'),\n\n url(r'^list_service/$', views.list_service, name='list_service'),\n url(r'^add_service/$', views.add_service.as_view(), name='add_service'),\n url(r'^edit_service/(\\d+)/$', views.edit_service.as_view(), name='edit_service'),\n url(r'^del_(service|user)/(\\d+)/$', views.del_service, name='del_service'),\n\n # url(r'^list_service_user/$', views.list_service_user, name='list_service_user'),\n # url(r'^add_service_user/$', views.add_service_user, name='add_service_user'),\n # url(r'^edit_service_user/$', views.edit_service_user, name='edit_service_user'),\n # url(r'^del_service_user/$', views.del_service_user, name='del_service_user'),\n\n url(r'^list_service_user/$', views.list_service_user, name='list_service_user'),\n # url(r'^add_service_user/$', views.add_service_user.as_view(), name='add_service_user'),\n url(r'^edit_service_user/(\\d+)/$', views.edit_service_user.as_view(), name='edit_service_user'),\n # url(r'^del_service_(host|user)/(\\d+)/$', views.del_service_user, name='del_service_user'),\n]","repo_name":"zhang-sgin/python_code","sub_path":"day16_cmdb2/service/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"10012057382","text":"import mxnet as mx\nfrom mxnet.gluon import nn\nfrom mxnet.gluon.nn import HybridBlock\nfrom mxnet import nd\nimport os\nfrom sys import maxsize\n\nimport random\nfrom oneshot_nas_blocks_nobn import NasHybridSequential, ShuffleNetBlock, ShuffleNasBlock, NasBatchNorm, Activation, SE\n\nimport sys\nsys.path.append('./utils')\nfrom calculate_flops import get_flops\n\n\n__all__ = ['get_shufflenas_oneshot', 'ShuffleNasOneShot', 'ShuffleNasOneShotFix']\n\n\nclass ShuffleNasOneShot(HybridBlock):\n def __init__(self, input_size=224, n_class=1000, architecture=None, channel_scales=None,\n use_all_blocks=False, bn=nn.BatchNorm, use_se=False, last_conv_after_pooling=False, merge_bn=False):\n \"\"\"\n scale_cand_ids = [6, 5, 3, 5, 2, 6, 3, 4, 2, 5, 7, 5, 4, 6, 7, 4, 4, 5, 4, 3]\n scale_candidate_list = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0]\n stage_repeats = [4, 4, 8, 4]\n len(scale_cand_ids) == sum(stage_repeats) == # feature blocks == 20\n \"\"\"\n super(ShuffleNasOneShot, self).__init__()\n # Predefined\n self.stage_repeats = [4, 4, 8, 4]\n self.stage_out_channels = [64, 160, 320, 640]\n self.candidate_scales = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0]\n self.use_all_blocks = use_all_blocks\n self.use_se = use_se\n\n first_conv_out_channel = 16\n last_conv_out_channel = 1024\n self.last_conv_after_pooling = last_conv_after_pooling\n\n if architecture is None and channel_scales is None:\n fix_arch = False\n elif architecture is not None and channel_scales is not None:\n fix_arch = True\n assert len(architecture) == len(channel_scales)\n else:\n raise ValueError(\"architecture and scale_ids should be both None or not None.\")\n self.fix_arch = fix_arch\n\n assert input_size % 32 == 0\n assert len(self.stage_repeats) == len(self.stage_out_channels)\n\n with self.name_scope():\n self.features = nn.HybridSequential() if fix_arch else NasHybridSequential(prefix='features_')\n with self.features.name_scope():\n # first conv\n self.features.add(\n nn.Conv2D(first_conv_out_channel, in_channels=3, kernel_size=3, strides=2,\n padding=1, use_bias=True, prefix='first_conv_'),\n Activation('hard_swish' if self.use_se else 'relu')\n )\n\n # features\n input_channel = 16\n block_id = 0\n for stage_id in range(len(self.stage_repeats)):\n numrepeat = self.stage_repeats[stage_id]\n output_channel = self.stage_out_channels[stage_id]\n\n if self.use_se:\n act_name = 'hard_swish' if stage_id >= 1 else 'relu'\n block_use_se = True if stage_id >= 2 else False\n else:\n act_name = 'relu'\n block_use_se = False\n # create repeated blocks for current stage\n for i in range(numrepeat):\n stride = 2 if i == 0 else 1\n # TODO: update SE and Activation in ShuffleNetBlock and ShuffleNasBlock\n if fix_arch:\n block_choice = architecture[block_id]\n mid_channel = int(output_channel // 2 * channel_scales[block_id])\n # print(\"Mid channel: {}\".format(mid_channel))\n block_id += 1\n if block_choice == 0:\n self.features.add(ShuffleNetBlock(input_channel, output_channel, mid_channel, bn=bn,\n block_mode='ShuffleNetV2', ksize=3, stride=stride,\n use_se=block_use_se, act_name=act_name))\n elif block_choice == 1:\n self.features.add(ShuffleNetBlock(input_channel, output_channel, mid_channel, bn=bn,\n block_mode='ShuffleNetV2', ksize=5, stride=stride,\n use_se=block_use_se, act_name=act_name))\n elif block_choice == 2:\n self.features.add(ShuffleNetBlock(input_channel, output_channel, mid_channel, bn=bn,\n block_mode='ShuffleNetV2', ksize=7, stride=stride,\n use_se=block_use_se, act_name=act_name))\n elif block_choice == 3:\n self.features.add(ShuffleNetBlock(input_channel, output_channel, mid_channel, bn=bn,\n block_mode='ShuffleXception', ksize=3, stride=stride,\n use_se=block_use_se, act_name=act_name))\n else:\n raise NotImplementedError\n else:\n block_id += 1\n self.features.add(ShuffleNasBlock(input_channel, output_channel, stride=stride, bn=bn,\n max_channel_scale=self.candidate_scales[-1],\n use_all_blocks=self.use_all_blocks,\n use_se=block_use_se, act_name=act_name))\n # update input_channel for next block\n input_channel = output_channel\n assert block_id == sum(self.stage_repeats)\n\n # last conv\n if self.last_conv_after_pooling:\n # MobileNet V3 approach\n self.features.add(\n nn.GlobalAvgPool2D(),\n # no last SE for MobileNet V3 style\n nn.Conv2D(last_conv_out_channel, in_channels=input_channel, kernel_size=1, strides=1,\n padding=0, use_bias=True, prefix='conv_fc_'),\n # No bn for the conv after pooling\n Activation('hard_swish' if self.use_se else 'relu')\n )\n else:\n if self.use_se:\n # ShuffleNetV2+ approach\n self.features.add(\n nn.Conv2D(last_conv_out_channel, in_channels=input_channel, kernel_size=1, strides=1,\n padding=0, use_bias=True, prefix='last_conv_'),\n Activation('hard_swish' if self.use_se else 'relu'),\n nn.GlobalAvgPool2D(),\n SE(last_conv_out_channel),\n nn.Conv2D(last_conv_out_channel, in_channels=last_conv_out_channel, kernel_size=1, strides=1,\n padding=0, use_bias=True, prefix='conv_fc_'),\n # No bn for the conv after pooling\n Activation('hard_swish' if self.use_se else 'relu')\n )\n else:\n # original Oneshot Nas approach\n self.features.add(\n nn.Conv2D(last_conv_out_channel, in_channels=input_channel, kernel_size=1, strides=1,\n padding=0, use_bias=True, prefix='last_conv_'),\n Activation('hard_swish' if self.use_se else 'relu'),\n nn.GlobalAvgPool2D()\n )\n\n # Dropout ratio follows ShuffleNetV2+ for se\n self.features.add(nn.Dropout(0.2 if self.use_se else 0.1))\n self.output = nn.HybridSequential(prefix='output_')\n with self.output.name_scope():\n self.output.add(\n nn.Conv2D(n_class, in_channels=last_conv_out_channel, kernel_size=1, strides=1,\n padding=0, use_bias=True),\n nn.Flatten()\n )\n\n def random_block_choices(self, num_of_block_choices=4, select_predefined_block=False, dtype='float32'):\n if select_predefined_block:\n block_choices = [0, 0, 3, 1, 1, 1, 0, 0, 2, 0, 2, 1, 1, 0, 2, 0, 2, 1, 3, 2]\n else:\n block_number = sum(self.stage_repeats)\n block_choices = []\n for i in range(block_number):\n block_choices.append(random.randint(0, num_of_block_choices - 1))\n return nd.array(block_choices).astype(dtype, copy=False)\n\n def random_channel_mask(self, select_all_channels=False, dtype='float32', mode='sparse', epoch_after_cs=maxsize,\n ignore_first_two_cs=True):\n \"\"\"\n candidate_scales = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0]\n mode: str, \"dense\" or \"sparse\". Sparse mode select # channel from candidate scales. Dense mode selects\n # channels between randint(min_channel, max_channel).\n \"\"\"\n assert len(self.stage_repeats) == len(self.stage_out_channels)\n\n # From [1.0, 1.2, 1.4, 1.6, 1.8, 2.0] to [0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0], warm-up stages are\n # not just 1 epoch, but 2, 3, 4, 5 accordingly.\n if 0 <= epoch_after_cs <= 23:\n epoch_delay = {0: 0,\n 1: 1,\n 2: 2,\n 3: 3,\n 4: 4, 5: 4, # warm up epoch: 2 [1.0, 1.2, ... 1.8, 2.0]\n 6: 5, 7: 5, 8: 5, # warm up epoch: 3 ...\n 9: 6, 10: 6, 11: 6, 12: 6, # warm up epoch: 4 ...\n 13: 7, 14: 7, 15: 7, 16: 7, 17: 7, # warm up epoch: 5 [0.4, 0.6, ... 1.8, 2.0]\n 18: 8, 19: 8, 20: 8, 21: 8, 22: 8, 23: 8} # warm up epoch: 6, actually this stage is useless\n\n delayed_epoch_after_cs = epoch_delay[epoch_after_cs]\n else:\n delayed_epoch_after_cs = epoch_after_cs\n\n if ignore_first_two_cs:\n min_scale_id = 2\n else:\n min_scale_id = 0\n\n channel_mask = []\n channel_choices = []\n global_max_length = int(self.stage_out_channels[-1] // 2 * self.candidate_scales[-1])\n for i in range(len(self.stage_out_channels)):\n local_max_length = int(self.stage_out_channels[i] // 2 * self.candidate_scales[-1])\n local_min_length = int(self.stage_out_channels[i] // 2 * self.candidate_scales[0])\n for _ in range(self.stage_repeats[i]):\n if select_all_channels:\n local_mask = [1] * global_max_length\n else:\n local_mask = [0] * global_max_length\n # TODO: shouldn't random between min and max. But select candidate scales and return it.\n if mode == 'dense':\n random_select_channel = random.randint(local_min_length, local_max_length)\n # In dense mode, channel_choices is # channel\n channel_choices.append(random_select_channel)\n elif mode == 'sparse':\n\n # this is for channel selection warm up: channel choice ~ (8, 9) -> (7, 9) -> ... -> (0, 9)\n channel_scale_start = max(min_scale_id, len(self.candidate_scales) - delayed_epoch_after_cs - 2)\n channel_choice = random.randint(channel_scale_start, len(self.candidate_scales) - 1)\n random_select_channel = int(self.stage_out_channels[i] // 2 *\n self.candidate_scales[channel_choice])\n # In sparse mode, channel_choices is the indices of candidate_scales\n channel_choices.append(channel_choice)\n else:\n raise ValueError(\"Unrecognized mode: {}\".format(mode))\n for j in range(random_select_channel):\n local_mask[j] = 1\n channel_mask.append(local_mask)\n return nd.array(channel_mask).astype(dtype, copy=False), channel_choices\n\n def _initialize(self, force_reinit=True, ctx=mx.cpu()):\n for k, v in self.collect_params().items():\n if 'conv' in k:\n if 'weight' in k:\n if 'first' in k or 'output' in k or 'fc' in k or 'squeeze' in k or 'excitation' in k:\n v.initialize(mx.init.Normal(0.01), force_reinit=force_reinit, ctx=ctx)\n else:\n v.initialize(mx.init.Normal(1.0 / v.shape[1]), force_reinit=force_reinit, ctx=ctx)\n if 'bias' in k:\n v.initialize(mx.init.Constant(0), force_reinit=force_reinit, ctx=ctx)\n elif 'batchnorm' in k:\n if 'gamma' in k:\n v.initialize(mx.init.Constant(1), force_reinit=force_reinit, ctx=ctx)\n if 'beta' in k:\n v.initialize(mx.init.Constant(0.0001), force_reinit=force_reinit, ctx=ctx)\n if 'running' in k:\n v.initialize(mx.init.Constant(0), force_reinit=force_reinit, ctx=ctx)\n\n def hybrid_forward(self, F, x, full_arch, full_scale_mask, *args, **kwargs):\n x = self.features(x, full_arch, full_scale_mask)\n x = self.output(x)\n return x\n\n\nclass ShuffleNasOneShotFix(ShuffleNasOneShot):\n # Unlike its parent class, fix-arch model does not have the control of \"use_all_blocks\" and \"bn\"(for NasBN).\n # It should use the default False and nn.BatchNorm correspondingly.\n def __init__(self, input_size=224, n_class=1000, architecture=None, channel_scales=None,\n use_se=False, last_conv_after_pooling=False, merge_bn=False):\n \"\"\"\n scale_cand_ids = [6, 5, 3, 5, 2, 6, 3, 4, 2, 5, 7, 5, 4, 6, 7, 4, 4, 5, 4, 3]\n scale_candidate_list = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0]\n stage_repeats = [4, 4, 8, 4]\n len(scale_cand_ids) == sum(stage_repeats) == # feature blocks == 20\n \"\"\"\n super(ShuffleNasOneShotFix, self).__init__(input_size=input_size, n_class=n_class,\n architecture=architecture, channel_scales=channel_scales,\n use_se=use_se, last_conv_after_pooling=last_conv_after_pooling,\n merge_bn=merge_bn)\n\n def hybrid_forward(self, F, x, *args, **kwargs):\n x = self.features(x)\n x = self.output(x)\n return x\n\n\ndef get_shufflenas_oneshot(architecture=None, n_class=1000, scale_ids=None, use_all_blocks=False,\n use_se=False, last_conv_after_pooling=False, merge_bn=False):\n if architecture is None and scale_ids is None:\n # Nothing about architecture is specified, do random block selection and channel selection.\n net = ShuffleNasOneShot(n_class=n_class, use_all_blocks=use_all_blocks, bn=NasBatchNorm,\n use_se=use_se, last_conv_after_pooling=last_conv_after_pooling)\n elif architecture is not None and scale_ids is not None:\n # Create the specified structure\n if use_all_blocks:\n raise ValueError(\"For fixed structure, use_all_blocks should not be allowed.\")\n scale_list = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0]\n channel_scales = []\n for i in range(len(scale_ids)):\n # scale_ids = [6, 5, 3, 5, 2, 6, 3, 4, 2, 5, 7, 5, 4, 6, 7, 4, 4, 5, 4, 3]\n channel_scales.append(scale_list[scale_ids[i]])\n net = ShuffleNasOneShotFix(architecture=architecture, n_class=n_class, channel_scales=channel_scales,\n use_se=use_se, last_conv_after_pooling=last_conv_after_pooling, merge_bn=merge_bn)\n else:\n raise ValueError(\"architecture and scale_ids should both be None for supernet \"\n \"or both not None for fixed structure model.\")\n return net\n\n\nFIX_ARCH = True\nLAST_CONV_AFTER_POOLING = True\nUSE_SE = True\n\n\ndef main():\n if FIX_ARCH:\n architecture = [0, 0, 3, 1, 1, 1, 0, 0, 2, 0, 2, 1, 1, 0, 2, 0, 2, 1, 3, 2]\n scale_ids = [6, 5, 3, 5, 2, 6, 3, 4, 2, 5, 7, 5, 4, 6, 7, 4, 4, 5, 4, 3]\n net = get_shufflenas_oneshot(architecture=architecture, scale_ids=scale_ids,\n use_se=USE_SE, last_conv_after_pooling=LAST_CONV_AFTER_POOLING)\n else:\n net = get_shufflenas_oneshot(use_se=USE_SE, last_conv_after_pooling=LAST_CONV_AFTER_POOLING)\n\n \"\"\" Test customized initialization \"\"\"\n net._initialize(force_reinit=True)\n print(net)\n\n \"\"\" Test ShuffleNasOneShot \"\"\"\n test_data = nd.ones([5, 3, 224, 224])\n for step in range(1):\n if FIX_ARCH:\n test_outputs = net(test_data)\n net.summary(test_data)\n net.hybridize()\n else:\n block_choices = net.random_block_choices(select_predefined_block=False, dtype='float32')\n full_channel_mask, _ = net.random_channel_mask(select_all_channels=False, dtype='float32')\n test_outputs = net(test_data, block_choices, full_channel_mask)\n net.summary(test_data, block_choices, full_channel_mask)\n if FIX_ARCH:\n if not os.path.exists('./symbols'):\n os.makedirs('./symbols')\n net(test_data)\n net.export(\"./symbols/ShuffleNas_fixArch\", epoch=0)\n flops, model_size = get_flops()\n print(\"Last conv after pooling: {}, use se: {}\".format(LAST_CONV_AFTER_POOLING, USE_SE))\n print(\"FLOPS: {}M, # parameters: {}M\".format(flops, model_size))\n else:\n if not os.path.exists('./params'):\n os.makedirs('./params')\n net.save_parameters('./params/ShuffleNasOneshot-imagenet-supernet.params')\n print(test_outputs.shape)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"CanyonWind/Single-Path-One-Shot-NAS-MXNet","sub_path":"utils/oneshot_nas_network_nobn.py","file_name":"oneshot_nas_network_nobn.py","file_ext":"py","file_size_in_byte":18452,"program_lang":"python","lang":"en","doc_type":"code","stars":148,"dataset":"github-code","pt":"5"} +{"seq_id":"20365100824","text":"from model import db, Room, Game, User, role, deck_type, Adventurer, Equipment, Enemy, Deck, connect_to_db, Equipment_state, Equipment_defeats_enemy\nimport crud\nimport json\nimport server\nimport model\nfrom sqlalchemy.sql import func\nimport random\n\n##################################################\n# This file is to hanndle the crud action on the #\n# database that is modeled in model.py #\n##################################################\n\nmodel.connect_to_db(server.app)\n\n########## Room ############\n\ndef create_room(image, advent_id=1):\n game = create_game(image, advent_id)\n return Room(game_id=game.id)\n\ndef get_room_by_id(id):\n # print(id)\n return Room.query.get(id)\n\n########## Game #############\n\ndef create_game(image, advent_id=1):\n game = Game(image=image, adventurer_id=advent_id)\n db.session.add(game)\n db.session.commit()\n build_draw_deck(game.id)\n build_starting_states(game.id)\n return game\n\ndef set_active_user(user_id, room_id):\n room = Room.query.filter(Room.id == room_id).first()\n room.games.active_user = user_id\n db.session.add(room)\n db.session.commit() \n\ndef take_damage(game_id, damage):\n game = Game.query.get(game_id)\n\n game.damage += int(damage)\n db.session.add(game)\n db.session.commit()\n\n########## User #############\n\ndef create_user(room_id, role=role.Player):\n return User(room_id=room_id, role=role)\n\ndef get_user_by_id(id):\n return User.query.get(id)\n\ndef set_user_passed(id):\n user = User.query.filter(User.id == id).first()\n# user.user_passed = True\n db.session.add(user)\n db.session.commit()\n\n return True\n\ndef get_next_active_user(user_id, room_id):\n users_in_room_not_passed = db.session.query(User.id).filter(User.room_id == room_id, User.user_passed == False).order_by(User.id).all()\n # print(users_in_room_not_passed)\n current_index = users_in_room_not_passed.index((user_id,))\n ship_phase = False\n if len(users_in_room_not_passed) <= 2:\n ship_phase = True\n\n if current_index + 1 == len(users_in_room_not_passed):\n new_active_user = users_in_room_not_passed[0][0]\n else:\n new_active_user = users_in_room_not_passed[current_index + 1][0]\n\n set_active_user(new_active_user, room_id)\n\n return (new_active_user, ship_phase) \n\n######## Adventurer #########\n\ndef create_adventurer(name, health):\n return Adventurer(name=name, health=health)\n\ndef get_adventurer_by_name(name):\n return Adventurer.query.filter(Adventurer.name == name).first()\n\ndef get_total_hp(room):\n game_id = room.game_id\n advent_id = room.games.adventurer_id\n \n total_hp = 0\n health = db.session.query(Adventurer.health).filter(Adventurer.id == advent_id).first()\n print('adventurer HP: ' + str(health[0]))\n total_hp += health[0]\n\n active_equipments = get_all_active_equipment(game_id)\n\n for item in active_equipments:\n hp = item.equipments.hp\n if hp != 0:\n total_hp += hp\n\n total_hp = total_hp - room.games.damage\n\n print('Total hp:' + str(total_hp))\n\n return total_hp\n\n######### Equipment #########\n\ndef create_equipment(name, advent_id, discription, hp):\n return Equipment(name=name, adventurer_id=advent_id, discription=discription, hp=hp)\n\ndef get_equipment_by_adventurer_id_all(advent_id):\n temp = Equipment.query.filter(Equipment.adventurer_id == advent_id).all()\n # print(temp)\n temp_list = []\n for u in temp:\n temp_list.append({'name':u.name, 'discription':u.discription, 'adventurer_id':u.adventurer_id}) \n # print(temp_list)\n return temp_list\n\ndef get_equipment_by_name(name):\n equipment = Equipment.query.filter(Equipment.name == name).first()\n return equipment.id\n\n########### Enemy ###########\n\ndef create_enemy(name, strength):\n return Enemy(name=name, strength=strength)\n\ndef get_enemy_id_by_name(name):\n enemy = db.session.query(Enemy.id).filter(Enemy.name == name).first()\n return enemy[0]\n\n########### Deck ############\n\ndef create_deck(game_id, enemy_id, per_deck, deck_type):\n return Deck(game_id=game_id, enemy_id=enemy_id, in_deck=per_deck, deck_type=deck_type)\n\ndef build_draw_deck(game_id):\n with open('data/deck.json') as f:\n deck_data = json.loads(f.read())\n\n deck_list = []\n for item in deck_data:\n temp = crud.create_deck(game_id, item['enemy_id'], item['per_deck'], deck_type=deck_type.Draw)\n deck_list.append(temp)\n\n with server.app.app_context():\n db.session.add_all(deck_list)\n db.session.commit()\n\ndef get_random_card(game_id, d_type):\n cards = Deck.query.filter(Deck.game_id == game_id, Deck.deck_type == d_type ).all()\n # print(cards)\n\n if len(cards) >= 1:\n weights = []\n for card in cards:\n weights.append(card.in_deck)\n # print(weights)\n card = random.choices(cards, weights=weights)\n card = card[0]\n else:\n card = -1\n # print(card)\n return card\n\ndef remove_card(game_id, enemy_id, deck_type):\n card = Deck.query.filter(Deck.game_id == game_id,\n Deck.enemy_id == enemy_id,\n Deck.deck_type == deck_type).first()\n \n if card.in_deck <= 1:\n db.session.delete(card)\n else:\n card.in_deck -= 1\n db.session.commit()\n\ndef remove_card_all(game_id, enemy_id, deck_type):\n card = Deck.query.filter(Deck.game_id == game_id,\n Deck.enemy_id == enemy_id,\n Deck.deck_type == deck_type).first()\n \n db.session.delete(card)\n db.session.commit()\n\ndef add_card(game_id, enemy_id, deck_type):\n card = Deck.query.filter(Deck.game_id == game_id,\n Deck.enemy_id == enemy_id,\n Deck.deck_type == deck_type).first()\n \n if card == None:\n card = crud.create_deck(game_id, enemy_id, 1, deck_type=deck_type.Ship) \n db.session.add(card)\n else:\n card.in_deck += 1\n db.session.commit() \n\n return True\n\ndef cards_in_deck(game_id, deck_type):\n left = db.session.query(func.sum(Deck.in_deck)).filter(Deck.game_id == game_id, Deck.deck_type == deck_type).all()\n left = left[0][0]\n if left == None:\n left = 0\n print(left)\n\n return left\n\n########## Equipment_state ###########\n\ndef create_equipment_state(game_id, equipment_id):\n return Equipment_state(game_id=game_id, equipment_id=equipment_id)\n\ndef build_starting_states(game_id):\n equipments = Equipment.query.all()\n\n states_list = []\n for item in equipments:\n \n temp = crud.create_equipment_state(game_id, item.id)\n states_list.append(temp)\n\n with server.app.app_context():\n db.session.add_all(states_list)\n db.session.commit()\n\ndef discard_equipment(game_id, equipment_id):\n state = Equipment_state.query.filter(Equipment_state.game_id == game_id, \n Equipment_state.equipment_id == equipment_id).first()\n state.state = False\n\n db.session.add(state)\n db.session.commit()\n\n return True\n\ndef get_all_active_equipment(game_id):\n return Equipment_state.query.filter(Equipment_state.game_id == game_id, Equipment_state.state == True).all()\n\ndef get_active_equipment_by_enemy_id(game_id, enemy_id):\n game = Game.query.get(game_id)\n equipments = get_equipment_by_adventurer_id_all(game.adventurers.id)\n\n active_equipment_for_enemy = []\n for equipment in equipments:\n # print(equipment)\n equip_id = get_equipment_by_name(equipment['name'])\n if( len(get_enemies_by_equipment_id(equip_id, enemy_id)) >= 1):\n active_equipment_for_enemy.append(equipment)\n \n # print(active_equipment_for_enemy)\n\n return active_equipment_for_enemy\n\n\n########## Equipment\\enemy interactions ###########\n\ndef create_equipment_enemy(equip_id, enemy_id):\n return Equipment_defeats_enemy(equipment_id=equip_id, enemy_id=enemy_id)\n\ndef get_enemies_by_equipment_id(equip_id, enemy_id):\n # print(equip_id)\n return Equipment_defeats_enemy.query.filter(Equipment_defeats_enemy.equipment_id == equip_id,\n Equipment_defeats_enemy.enemy_id == enemy_id).all()\n\n\nif __name__ == '__main__':\n from server import app\n connect_to_db(app)","repo_name":"MatthewStebbins/Hackbright","sub_path":"crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":8314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"5221614008","text":"def hasPath(graph, src, dst):\n\tqueue = [ src ]\n\n\twhile(len(queue) > 0):\n\t\tcurrent = queue.pop(0)\n\n\t\tif(current == dst):\n\t\t\treturn True\n\n\t\tfor neighbor in graph[current]:\n\t\t\tqueue.append(neighbor)\n\n\treturn False\n\n\n# graph = {\n# f: ['g', 'i'],\n# g: ['h'],\n# h: [],\n# i: ['g', 'k'],\n# j: ['i'],\n# k: []\n# }\n\n# print(hasPath(graph, 'f', 'k')); # true","repo_name":"toticavalcanti/Structy-Learn-Codes","sub_path":"python/has-path/hasPathIteractive.py","file_name":"hasPathIteractive.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"42515523936","text":"from django.urls import include, path\nfrom django.views.generic import TemplateView\nfrom search.views import *\n\nurlpatterns = [\n path('buildings/all/', AllBuildingListView.as_view(), name='search_buildings_all'),\n path('buildings/all/', AllBuildingListView.as_view(), name='search_buildings_all'),\n path('buildings/area/', AreaBuildingListView.as_view(), name='search_buildings_area'),\n\n path('rooms/', SearchRoomListView.as_view(), name='search_rooms'),\n\n path('', TemplateView.as_view(template_name='404.html'), name='search_index'),\n]\n","repo_name":"y-yamamoto-yworks/VasyworksMGR","sub_path":"src/vacancy_mgr/search/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"32819607234","text":"# coding: utf-8\n\n\"\"\"\n PURE API 510\n\n This is the Pure Web Service. Listed below are all available endpoints, along with a short description.
      In order to use the Pure Web Service, you must enter an API key. These are generated in the Administrator tab of Pure, and issues with a given set of available endpoints.
      To enter your API key and begin your use, press the Authorize button to at the top of the page. You are then presented with two options for entering the API key: the first option is to use the API key in query format, and the second option is to use the API key in a header.
      For further documentation, see API Documentation.
      A new version of the API is released with each major version of Pure, and remains available for one year. This version is no longer available in Pure 5.14
      The old web service is deprecated, but still available here, and it will no longer be available in Pure 5.13\n\n OpenAPI spec version: 510\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom pprint import pformat\nfrom six import iteritems\nimport re\n\n\nclass WSImpactEvidence(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'id': 'int',\n 'evidence_titles': 'list[WSLocalizedString]',\n 'types': 'list[WSClassification]',\n 'evidence_indicators': 'list[WSClassification]',\n 'evidence_summaries': 'list[WSLocalizedString]',\n 'evidence_contact_informations': 'list[WSEvidenceContactInformation]',\n 'period': 'WSCompoundDateRange',\n 'links': 'list[WSLink]',\n 'documents': 'list[WSDocument]'\n }\n\n attribute_map = {\n 'id': 'id',\n 'evidence_titles': 'evidenceTitles',\n 'types': 'types',\n 'evidence_indicators': 'evidenceIndicators',\n 'evidence_summaries': 'evidenceSummaries',\n 'evidence_contact_informations': 'evidenceContactInformations',\n 'period': 'period',\n 'links': 'links',\n 'documents': 'documents'\n }\n\n def __init__(self, id=None, evidence_titles=None, types=None, evidence_indicators=None, evidence_summaries=None, evidence_contact_informations=None, period=None, links=None, documents=None):\n \"\"\"\n WSImpactEvidence - a model defined in Swagger\n \"\"\"\n\n self._id = None\n self._evidence_titles = None\n self._types = None\n self._evidence_indicators = None\n self._evidence_summaries = None\n self._evidence_contact_informations = None\n self._period = None\n self._links = None\n self._documents = None\n\n if id is not None:\n self.id = id\n if evidence_titles is not None:\n self.evidence_titles = evidence_titles\n if types is not None:\n self.types = types\n if evidence_indicators is not None:\n self.evidence_indicators = evidence_indicators\n if evidence_summaries is not None:\n self.evidence_summaries = evidence_summaries\n if evidence_contact_informations is not None:\n self.evidence_contact_informations = evidence_contact_informations\n if period is not None:\n self.period = period\n if links is not None:\n self.links = links\n if documents is not None:\n self.documents = documents\n\n @property\n def id(self):\n \"\"\"\n Gets the id of this WSImpactEvidence.\n\n :return: The id of this WSImpactEvidence.\n :rtype: int\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"\n Sets the id of this WSImpactEvidence.\n\n :param id: The id of this WSImpactEvidence.\n :type: int\n \"\"\"\n\n self._id = id\n\n @property\n def evidence_titles(self):\n \"\"\"\n Gets the evidence_titles of this WSImpactEvidence.\n\n :return: The evidence_titles of this WSImpactEvidence.\n :rtype: list[WSLocalizedString]\n \"\"\"\n return self._evidence_titles\n\n @evidence_titles.setter\n def evidence_titles(self, evidence_titles):\n \"\"\"\n Sets the evidence_titles of this WSImpactEvidence.\n\n :param evidence_titles: The evidence_titles of this WSImpactEvidence.\n :type: list[WSLocalizedString]\n \"\"\"\n\n self._evidence_titles = evidence_titles\n\n @property\n def types(self):\n \"\"\"\n Gets the types of this WSImpactEvidence.\n\n :return: The types of this WSImpactEvidence.\n :rtype: list[WSClassification]\n \"\"\"\n return self._types\n\n @types.setter\n def types(self, types):\n \"\"\"\n Sets the types of this WSImpactEvidence.\n\n :param types: The types of this WSImpactEvidence.\n :type: list[WSClassification]\n \"\"\"\n\n self._types = types\n\n @property\n def evidence_indicators(self):\n \"\"\"\n Gets the evidence_indicators of this WSImpactEvidence.\n\n :return: The evidence_indicators of this WSImpactEvidence.\n :rtype: list[WSClassification]\n \"\"\"\n return self._evidence_indicators\n\n @evidence_indicators.setter\n def evidence_indicators(self, evidence_indicators):\n \"\"\"\n Sets the evidence_indicators of this WSImpactEvidence.\n\n :param evidence_indicators: The evidence_indicators of this WSImpactEvidence.\n :type: list[WSClassification]\n \"\"\"\n\n self._evidence_indicators = evidence_indicators\n\n @property\n def evidence_summaries(self):\n \"\"\"\n Gets the evidence_summaries of this WSImpactEvidence.\n\n :return: The evidence_summaries of this WSImpactEvidence.\n :rtype: list[WSLocalizedString]\n \"\"\"\n return self._evidence_summaries\n\n @evidence_summaries.setter\n def evidence_summaries(self, evidence_summaries):\n \"\"\"\n Sets the evidence_summaries of this WSImpactEvidence.\n\n :param evidence_summaries: The evidence_summaries of this WSImpactEvidence.\n :type: list[WSLocalizedString]\n \"\"\"\n\n self._evidence_summaries = evidence_summaries\n\n @property\n def evidence_contact_informations(self):\n \"\"\"\n Gets the evidence_contact_informations of this WSImpactEvidence.\n\n :return: The evidence_contact_informations of this WSImpactEvidence.\n :rtype: list[WSEvidenceContactInformation]\n \"\"\"\n return self._evidence_contact_informations\n\n @evidence_contact_informations.setter\n def evidence_contact_informations(self, evidence_contact_informations):\n \"\"\"\n Sets the evidence_contact_informations of this WSImpactEvidence.\n\n :param evidence_contact_informations: The evidence_contact_informations of this WSImpactEvidence.\n :type: list[WSEvidenceContactInformation]\n \"\"\"\n\n self._evidence_contact_informations = evidence_contact_informations\n\n @property\n def period(self):\n \"\"\"\n Gets the period of this WSImpactEvidence.\n\n :return: The period of this WSImpactEvidence.\n :rtype: WSCompoundDateRange\n \"\"\"\n return self._period\n\n @period.setter\n def period(self, period):\n \"\"\"\n Sets the period of this WSImpactEvidence.\n\n :param period: The period of this WSImpactEvidence.\n :type: WSCompoundDateRange\n \"\"\"\n\n self._period = period\n\n @property\n def links(self):\n \"\"\"\n Gets the links of this WSImpactEvidence.\n\n :return: The links of this WSImpactEvidence.\n :rtype: list[WSLink]\n \"\"\"\n return self._links\n\n @links.setter\n def links(self, links):\n \"\"\"\n Sets the links of this WSImpactEvidence.\n\n :param links: The links of this WSImpactEvidence.\n :type: list[WSLink]\n \"\"\"\n\n self._links = links\n\n @property\n def documents(self):\n \"\"\"\n Gets the documents of this WSImpactEvidence.\n\n :return: The documents of this WSImpactEvidence.\n :rtype: list[WSDocument]\n \"\"\"\n return self._documents\n\n @documents.setter\n def documents(self, documents):\n \"\"\"\n Sets the documents of this WSImpactEvidence.\n\n :param documents: The documents of this WSImpactEvidence.\n :type: list[WSDocument]\n \"\"\"\n\n self._documents = documents\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, WSImpactEvidence):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n","repo_name":"atbe/MSU-Scholar-Api-Client-Python","sub_path":"msu_scholars_api/models/ws_impact_evidence.py","file_name":"ws_impact_evidence.py","file_ext":"py","file_size_in_byte":10187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27793810291","text":"import numpy as np\nimport os\nimport pickle\nimport shutil\nfrom tensorflow import keras\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom pathlib import Path\nfrom pydot import *\nimport optuna\nfrom sklearn.metrics import f1_score, confusion_matrix, roc_auc_score, accuracy_score, recall_score\nfrom sklearn.model_selection import train_test_split\n\n\nROOT = Path('../../../../..')\nLOG_BASE_DIR = ROOT.joinpath('databases/ronmaishlos@staff.technion.ac.il/logs')\nCSV_DIR = ROOT.joinpath('databases/ronmaishlos@staff.technion.ac.il/processed_data_as_csv')\nSTFT_DIR = CSV_DIR.joinpath('stft')\n\ndef conv2d_block(input_layer, filters, kernel_size=3, dropout=0.25, pooling='max', pool_size=(1,2)):\n conv = keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, activation='relu', padding='same')(input_layer)\n conv = keras.layers.BatchNormalization()(conv)\n conv = keras.layers.ReLU()(conv)\n conv = keras.layers.AveragePooling2D(pool_size)(conv) if pooling == 'avg' else keras.layers.MaxPooling2D(pool_size)(conv)\n conv = keras.layers.Dropout(dropout)(conv)\n return conv\n\ndef conv1d_block(input_layer, filters, kernel_size=3, dropout=0.25, pooling='max', pool_size=2):\n conv = keras.layers.Conv1D(filters=filters, kernel_size=kernel_size, padding='same', dilation_rate=2)(input_layer)\n conv = keras.layers.BatchNormalization()(conv)\n conv = keras.layers.ReLU()(conv)\n conv = keras.layers.AveragePooling1D(pool_size, padding='same')(conv) if pooling == 'avg' else keras.layers.MaxPooling1D(pool_size, padding='same')(conv)\n conv = keras.layers.Dropout(dropout)(conv)\n return conv\n\ndef make_model(orig_input_shape, stft_input_shape, trial):\n num_classes = 4\n orig_input_layer = keras.layers.Input(orig_input_shape)\n stft_input_layer = keras.layers.Input(stft_input_shape)\n\n p1 = trial.suggest_float(\"dropout_1\", 0.0, 0.55)\n n_cnn_layers = trial.suggest_int(\"n_cnn_layers\", 4, 11)\n filter_expansion_factor = trial.suggest_int(\"filter_expansion_factor\", 1, 4)\n max_kernel_size = trial.suggest_int(\"max_kernel_size\", 8, 15)\n kernel_contraction_factor = trial.suggest_categorical(\"kernel_contraction_factor\", [2, 4])\n p2 = trial.suggest_float(\"dropout_2\", 0.0, 0.55)\n p3 = trial.suggest_float(\"dropout_2\", 0.0, 0.55)\n p4 = trial.suggest_float(\"dropout_2\", 0.0, 0.55)\n p5 = trial.suggest_float(\"dropout_2\", 0.0, 0.55)\n dropouts = [p1, p2, p2, p2, p3, p3, p3, p4 , p4, p5, p5]\n\n cnn = conv1d_block(orig_input_layer, filters=16, kernel_size=max_kernel_size, dropout=p1, pooling='avg', pool_size=4)\n for i in range(n_cnn_layers):\n n_filters_exp = ((i + 1) // filter_expansion_factor) + 4\n n_filters = min(2 ** n_filters_exp, 512)\n kernel_size = max(3, max_kernel_size - i * kernel_contraction_factor)\n cnn = conv1d_block(cnn, filters=n_filters, kernel_size=kernel_size,dropout=dropouts[i], pool_size=4)\n cnn = keras.layers.GlobalAveragePooling1D()(cnn)\n output_layer = keras.layers.Dense(num_classes, activation=\"softmax\")(cnn)\n\n return keras.models.Model(inputs=[orig_input_layer, stft_input_layer], outputs=output_layer)\n\ndef get_training_val_data(stft_name: str):\n sample_length = 21600\n num_of_samples = 5755\n stft_input_dir = STFT_DIR.joinpath(stft_name)\n\n x = np.array(pd.read_csv(CSV_DIR.joinpath('x_train.csv'), nrows = num_of_samples))[:, 0:sample_length]\n y = np.array(pd.read_csv(CSV_DIR.joinpath('y_train.csv'), nrows = num_of_samples))[0:num_of_samples,1]\n x = x.reshape((x.shape[0], x.shape[1], 1))\n y = y.reshape(num_of_samples, -1)\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.20, random_state = 42)\n x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.25, random_state = 21)\n\n stft_train = np.load(stft_input_dir.joinpath('x_t'), allow_pickle=True)\n stft_val = np.load(stft_input_dir.joinpath('x_v'), allow_pickle=True)\n \n return x_train, x_val, stft_train, stft_val, y_train, y_val\n\n\ndef train_model(x_train, x_val, stft_train, stft_val, y_train, y_val, log_dir, trial):\n\n training_inputs = [x_train, stft_train]\n validation_inputs = [x_val, stft_val]\n model = make_model(orig_input_shape=x_train.shape[1:], stft_input_shape=stft_train.shape[1:], trial=trial)\n keras.utils.plot_model(model, to_file = log_dir.joinpath(\"architecture.png\"), show_shapes=True)\n epochs = 1000\n batch_size = 32\n callbacks = [keras.callbacks.ModelCheckpoint(log_dir.joinpath(\"best_model.h5\"), save_best_only=True, monitor=\"val_sparse_categorical_accuracy\"),\n keras.callbacks.ReduceLROnPlateau(monitor=\"val_loss\", factor=0.5, patience=4, min_lr=0.00000001),\n keras.callbacks.EarlyStopping(monitor=\"val_sparse_categorical_accuracy\", patience=20, verbose=1, restore_best_weights=True, mode='max'),\n optuna.integration.TFKerasPruningCallback(trial, \"val_loss\")]\n\n lr = trial.suggest_float(\"lr\", 1e-5, 1e-1, log=True) # log=True, will use log scale to interplolate b\n # lr = 0.006779359362044838\n optimizer_name = trial.suggest_categorical(\"optimizer\", [\"Adam\", \"RMSprop\", \"SGD\"])\n # optimizer_name = \"Adam\"\n optimizer = getattr(keras.optimizers, optimizer_name)(learning_rate=lr)\n model.compile (optimizer=optimizer ,loss=\"sparse_categorical_crossentropy\",\n metrics=['sparse_categorical_accuracy'])\n\n history = model.fit (training_inputs, y_train, batch_size=batch_size, epochs=epochs,\n callbacks=callbacks, validation_data=(validation_inputs, y_val), verbose=1,)\n\n y_pred_probs = model.predict(validation_inputs)\n y_pred = np.argmax(y_pred_probs, axis=1)\n f1 = \"%.3f\" % f1_score(y_val, y_pred, average='macro')\n (f1_0,f1_1,f1_2, f1_3) = f1_score(y_val, y_pred, average=None)\n f1_0 = \"%.3f\" % f1_0\n f1_1 = \"%.3f\" % f1_1\n f1_2 = \"%.3f\" % f1_2\n f1_3 = \"%.3f\" % f1_3\n confusion_m = confusion_matrix(y_val, y_pred)\n auc_score = \"%.3f\" % roc_auc_score(y_val, y_pred_probs, average='macro', multi_class='ovr')\n acc = accuracy_score(y_val, y_pred)\n recall =\"%.3f\" % recall_score(y_val, y_pred, average='macro')\n (recall_0, recall_1, recall_2, recall_3) = recall_score(y_val, y_pred, average=None)\n recall_0 = \"%.3f\" % recall_0\n recall_1 = \"%.3f\" % recall_1\n recall_2 = \"%.3f\" % recall_2\n recall_3 = \"%.3f\" % recall_3\n\n\n print(f'f1 score is: {f1}' )\n print(f'roc_auc_score is: {auc_score}')\n print(f'accuracy score is: {\"%.3f\" % acc}')\n print(f'recall_score is: {recall}')\n print(f'confusion matrix is:\\n {confusion_m}')\n print(f'f1_0 score is: {f1_0}' )\n print(f'f1_1 score is: {f1_1}' )\n print(f'f1_2 score is: {f1_2}' )\n print(f'f1_3 score is: {f1_3}' )\n print(f'recall_0 score is: {recall_0}')\n print(f'recall_1 score is: {recall_1}')\n print(f'recall_2 score is: {recall_2}')\n print(f'recall_3 score is: {recall_3}')\n\n return (history, f1, auc_score, acc, recall, f1_0, f1_1, f1_2, f1_3, recall_0, recall_1, recall_2, recall_3, confusion_m)\n\ndef log_results(results, log_dir):\n history, f1, auc_score, acc, recall, f1_0, f1_1, f1_2, f1_3, recall_0, recall_1, recall_2, recall_3, confusion_m = results\n for metric in [\"sparse_categorical_accuracy\", \"loss\"]:\n plt.figure()\n plt.plot(history.history[metric])\n plt.plot(history.history[\"val_\" + metric])\n plt.title(\"model \" + metric)\n plt.ylabel(metric, fontsize=\"large\")\n plt.xlabel(\"epoch\", fontsize=\"large\")\n plt.legend([\"train\", \"val\"], loc=\"best\")\n plt.savefig(log_dir.joinpath(metric + f'_figure.png'))\n plt.close()\n\n with open(log_dir.joinpath('metrics_log.txt'), 'w') as file:\n file.write('\\n')\n file.write(model_name + stft_name + '\\n')\n file.write('f1 score is: ' + str(f1) + '\\n')\n file.write('roc_auc_score is: ' + str(auc_score) + '\\n')\n file.write('accuracy score is: ' + str(acc) + '\\n')\n file.write('recall_score is: ' + str(recall) + '\\n')\n file.write('f1_0 score is: ' + str(f1_0) + '\\n')\n file.write('f1_1 score is: ' + str(f1_1) + '\\n')\n file.write('f1_2 score is: ' + str(f1_2) + '\\n')\n file.write('f1_3 score is: ' + str(f1_3) + '\\n')\n file.write('recall_0 score is: ' + str(recall_0) + '\\n')\n file.write('recall_1 score is: ' + str(recall_1) + '\\n')\n file.write('recall_2 score is: ' + str(recall_2) + '\\n')\n file.write('recall_3 score is: ' + str(recall_3) + '\\n')\n file.write('confusion_matrix:\\n')\n for line in range(4):\n file.write(f'{confusion_m[line]}\\n')\n\n \ndef objective(trial):\n log_dir = log_sub_dir.joinpath(str(trial.number)) #joinpath('trial.number')\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n results = train_model(x_train, x_val, stft_train, stft_val, y_train, y_val, log_dir, trial)\n log_results(results, log_dir)\n return results[3] # acc\n\nif __name__ == '__main__':\n stfts = [(128, 128)]\n model_name = 'optuna_cnn_only_5_dropouts'\n log_sub_dir = LOG_BASE_DIR.joinpath(model_name)\n if not os.path.exists(log_sub_dir):\n os.makedirs(log_sub_dir)\n else:\n raise ValueError(f'trying to override {log_sub_dir}')\n stft_name = f'stft_128_128'\n x_train, x_val, stft_train, stft_val, y_train, y_val = get_training_val_data(stft_name)\n\n study = optuna.create_study(direction=\"maximize\", sampler=optuna.samplers.TPESampler(), pruner=optuna.pruners.HyperbandPruner())\n study.optimize(objective, n_trials=200)\n pruned_trials = study.get_trials(deepcopy=False, states=[optuna.trial.TrialState.PRUNED])\n complete_trials = study.get_trials(deepcopy=False, states=[optuna.trial.TrialState.COMPLETE])\n\n print(\"Study statistics: \")\n print(\" Number of finished trials: \", len(study.trials))\n print(\" Number of pruned trials: \", len(pruned_trials))\n print(\" Number of complete trials: \", len(complete_trials))\n\n print(\"Best trial:\")\n trial = study.best_trial\n\n print(\" Value: \", trial.value)\n\n print(\" Params: \")\n for key, value in trial.params.items():\n print(\" {}: {}\".format(key, value))\n \n shutil.copy('out.log', log_sub_dir)\n with open(log_sub_dir.joinpath('study_pkl'), 'wb') as f:\n pickle.dump(study, f)\n\n optuna.visualization.plot_param_importances(study).show()\n","repo_name":"ronm100/OSA-prediction-project","sub_path":"hyperparam_opt.py","file_name":"hyperparam_opt.py","file_ext":"py","file_size_in_byte":10392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"71263512151","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis file contains the setup of the neuronal network running the Husky experiment with neuronal image recognition\n\"\"\"\n# pragma: no cover\n\nimport nest\nfrom nrp_core.engines.nest_json import RegisterDataPack, CreateDataPack\n\nSENSORPARAMS = {'E_L': -60.5,\n 'C_m': 25.0,\n 'g_L': 25.0 / 10.,\n 't_ref': 10.0,\n 'tau_syn_ex': 2.5,\n 'tau_syn_in': 2.5,\n 'E_ex': 0.0,\n 'E_in': -75.0,\n 'V_th': -60.0,\n 'V_reset': -60.5,\n 'V_m': -60.5}\n\nGO_ON_PARAMS = {'E_L': -60.5,\n 'C_m': 25.0,\n 'g_L': 25.0 / 10.,\n 'E_ex': 0.0,\n 'E_in': -75.0,\n 'V_reset': -61.6,\n 'V_th': -60.51,\n 't_ref': 10.0,\n 'tau_syn_ex': 2.5,\n 'tau_syn_in': 2.5,\n 'V_m': -60.5}\n\nLEAKY_PARAMS = {\n 'V_th': 1e10,\n 'C_m': 1000.0,\n 'tau_m': 10.0,\n 'tau_syn_ex': 2.,\n 'tau_syn_in': 2.,\n 'E_L': 0.0,\n 'V_reset': 0.0,\n 't_ref': 0.1,\n 'I_e': 0.0\n}\n\nnest.set_verbosity(\"M_WARNING\")\nnest.ResetKernel()\n\npopulation = nest.Create('iaf_cond_alpha', 8)\nnest.SetStatus(population[0:5], SENSORPARAMS)\nnest.SetStatus(population[5:6], GO_ON_PARAMS)\nnest.SetStatus(population[6:8], SENSORPARAMS)\n\n# Shared Synapse Parameters\nnest.CopyModel('tsodyks_synapse', 'base_synapse', {'U': 1.0, 'tau_rec': 1.0, 'tau_fac': 1.0})\n\n# Synaptic weights\nWEIGHT_RED_TO_ACTOR = 1.5e-1\nWEIGHT_RED_TO_GO_ON = 1.2 # or -1.2e-3?\nWEIGHT_GREEN_BLUE_TO_ACTOR = 1.05e-1\nWEIGHT_GO_ON_TO_RIGHT_ACTOR = 1.4e-1\nDELAY = 0.1\n\n# Connect neurons\nCIRCUIT = population\n\nSYN = {'synapse_model': 'base_synapse', 'weight': WEIGHT_RED_TO_ACTOR, 'delay': DELAY}\nnest.Connect(CIRCUIT[2:3], CIRCUIT[7:8], 'all_to_all', SYN)\nnest.Connect(CIRCUIT[3:4], CIRCUIT[6:7], 'all_to_all', SYN)\n\nSYN = {'synapse_model': 'base_synapse', 'weight': -WEIGHT_RED_TO_GO_ON, 'delay': DELAY}\nnest.Connect(CIRCUIT[0:2], CIRCUIT[4:5], 'all_to_all', SYN)\nnest.Connect(CIRCUIT[0:2], CIRCUIT[5:6], 'all_to_all', SYN)\n\nSYN = {'synapse_model': 'base_synapse', 'weight': WEIGHT_GREEN_BLUE_TO_ACTOR, 'delay': DELAY}\nnest.Connect(CIRCUIT[4:5], CIRCUIT[7:8], 'all_to_all', SYN)\n\nSYN = {'synapse_model': 'base_synapse', 'weight': WEIGHT_GO_ON_TO_RIGHT_ACTOR, 'delay': DELAY}\nnest.Connect(CIRCUIT[5:6], CIRCUIT[7:8], 'all_to_all', SYN)\n\n# Left side poisson generator\nlpg = CreateDataPack('lpg', 'poisson_generator')\n\n# Right side poisson generator\nrpg = CreateDataPack('rpg', 'poisson_generator')\n\n# Go poisson generator\ngpg = CreateDataPack('gpg', 'poisson_generator')\n\n# Connect datapacks\nnest.Connect(lpg, CIRCUIT[slice(0, 3, 2)])\nnest.Connect(rpg, CIRCUIT[slice(1, 4, 2)])\nnest.Connect(gpg, CIRCUIT[4])\n\n# Create and connect leaky integrator cells\nleaky_cells = nest.Create('iaf_psc_exp', 2, LEAKY_PARAMS)\nnest.SetStatus(leaky_cells, {'V_m': LEAKY_PARAMS['E_L']})\n\nnest.Connect(CIRCUIT[6],\n leaky_cells[0],\n conn_spec='all_to_all',\n syn_spec={'synapse_model': 'static_synapse', 'weight': 10.0, 'delay': 0.1})\n\nnest.Connect(CIRCUIT[7],\n leaky_cells[1],\n conn_spec='all_to_all',\n syn_spec={'synapse_model': 'static_synapse', 'weight': 10.0, 'delay': 0.1})\n\n# Register wheel outputs\nRegisterDataPack('actors', leaky_cells)\n\n# Simulate\n# sd = nest.Create('spike_recorder')\n# nest.Connect(CIRCUIT[6], sd)\n#\n# x = []\n# v = []\n# v2 = []\n# v3 = []\n# v4 = []\n# for i in range(100):\n# nest.Simulate(10)\n# x.append(i * 10)\n# v.append(CIRCUIT[6].get('V_m'))\n# v2.append(CIRCUIT[7].get('V_m'))\n# v3.append(leaky_cells[0].get('V_m'))\n# v4.append(leaky_cells[1].get('V_m'))\n#\n# # nest.raster_plot.from_datapack(sd, hist=True)\n# # plt.plot(x,v)\n# # plt.plot(x,v2)\n# plt.plot(x,v3, 'b')\n# plt.plot(x,v4, 'r')\n# plt.show()\n","repo_name":"EloyRC/nrp-core","sub_path":"examples/templates/husky_braitenberg/braitenberg.py","file_name":"braitenberg.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"71861534231","text":"import logging\nimport re\nimport urllib\n\nimport requests\nfrom django.core.management.base import NoArgsCommand\n\nfrom madness.models import Game\n\nlog = logging.getLogger(__name__)\n\n\nclass Command(NoArgsCommand):\n\thelp = \"Update Madness scores\"\n\n\tdef handle_noargs(self, **options):\n\t\turl = 'http://sports.espn.go.com/ncb/bottomline/scores'\n\t\tr = requests.get(url)\n\t\tc = urllib.unquote(r.text).decode(r.encoding)\n\t\tgames = Game.objects.filter(winner__isnull=True, team_1__isnull=False, team_2__isnull=False)\n\t\tfor g in games:\n\t\t\tteam_1 = re.escape(g.team_1.team.espn_id)\n\t\t\tteam_2 = re.escape(g.team_2.team.espn_id)\n\t\t\twin_1 = re.compile(r'%s \\d+\\s+\\^%s \\d+' % (team_2, team_1))\n\t\t\twin_2 = re.compile(r'\\^%s \\d+\\s+%s \\d+' % (team_2, team_1))\n\t\t\tif win_1.search(c) is not None:\n\t\t\t\tg.winner = g.team_1\n\t\t\t\tg.save()\n\t\t\t\tg.team_2.is_eliminated = True\n\t\t\t\tg.team_2.save()\n\t\t\telif win_2.search(c) is not None:\n\t\t\t\tg.winner = g.team_2\n\t\t\t\tg.save()\n\t\t\t\tg.team_1.is_eliminated = True\n\t\t\t\tg.team_1.save()\n\t\t\telse:\n\t\t\t\tpass\n","repo_name":"NOVO-Construction/django-march-madness","sub_path":"django-march-madness/madness/management/commands/update_scores.py","file_name":"update_scores.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"12936899848","text":"import datetime\n\n\nclass StateTracker:\n def __init__(self, name, state_messages: dict):\n self.name = name\n self.state = None\n self._update_called = False\n self._state_messages = state_messages\n\n def update(self, new_state):\n msg = None\n new_state_str = str(new_state)\n\n if new_state != self.state or not self._update_called:\n msg = self._state_messages[new_state_str]\n self.state = new_state\n\n self._update_called = True\n return msg\n\n async def update_and_send_event(self, new_state, send_func):\n msg = self.update(new_state)\n if msg is not None:\n await send_func(\n {\"time\": datetime.datetime.now(), \"source\": self.name, \"event\": msg}\n )\n return msg\n","repo_name":"edgefarm/train-simulation","sub_path":"demo/usecase-2/monitoring/fleet-seat-info-monitor/src/state_tracker.py","file_name":"state_tracker.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"12617023881","text":"import pandas as pd\nfrom pymongo import *\nfrom restructdata import *\nimport time\n\ndc = structCharacters()\ndd = structDirectors()\ndva = structVoiceActors()\n\n#-------------------------MONGODB-------------------------\n\n#Connection to DB\nclient = MongoClient()\ndb = client.open_disney\n\n#Creates collections\ndisneyC = db.disneyC\ndisneyD = db.disneyD\ndisneyVA = db.disneyVA\n\ndata_dc = dc.to_dict(orient = \"records\")\ndata_dd = dd.to_dict(orient = \"records\")\ndata_dva = dva.to_dict(orient = \"records\")\n\n#Drop documents if they're created\ndisneyC.drop()\ndisneyD.drop()\ndisneyVA.drop()\n\n#Drop indexes if they're created\ndisneyD.drop_indexes()\ndisneyVA.drop_indexes()\ndisneyC.drop_indexes()\n\n#Insert data into collections\ndisneyC.insert_many(data_dc)\ndisneyD.insert_many(data_dd)\ndisneyVA.insert_many(data_dva)\n\n#\n# Simple Queries:\n#\n\n#SELECT voice actors from the movie The Little Mermaid\nselect1 = disneyVA.find({'movie':\"The Little Mermaid\"}, { 'voice-actor': 1})\n#for s in select1:\n# print (s.get('voice-actor'))\n\n#SELECT characters who have more than one voice_actor\nselect2 = disneyVA.find({\"voice_actor2\" : { \"$ne\" : None}}, {\"character\" : 1})\n\n# for s1 in select2:\n# print (s1)\n\n#\n# Insert & Update\n#\n\n#insert into directors \"Stephen Hillenburg\" para movie \"Spongebob Squarepants\"\nins1 = disneyD.insert_one({'director':'Stephen Hillenburg','movie':'Spongebob Squarepants'})\n\n#update in characters in \"The Jungle Book\" villain from current to Balu\nupd = disneyC.update_one({\"movie_title\": \"The Jungle Book\"},{\"$set\":{\"villain\":\"Baloo Bear\"}})\n\n#\n# Complex Queries:\n#\n\n#SELECT heros from the movie which the directors name starts with \"B\" and it has more than 12 voice actors\nsel_comp1 = disneyC.aggregate([\n {\n # Join with director table\n \"$lookup\": {\n \"from\": \"disneyD\", # other table name\n \"localField\": \"movie_title\", # name of disneyD table field\n \"foreignField\": \"name\", # name of userinfo table field\n \"as\": \"disney_director\", # alias for userinfo table\n }\n },\n {\n \"$unwind\" : \"$disney_director\"\n }, \n {\n \"$match\": {\n \"disney_director.director\": { \"$regex\": \"^B\" } \n }\n },\n {\n \"$lookup\":{\n \"from\": \"disneyVA\", \n \"localField\": \"movie_title\", \n \"foreignField\": \"movie\",\n \"as\": \"disney_voiceactor\"\n },\n },\n {\n \"$unwind\" : \"$disney_voiceactor\"\n },\n {\n \"$group\":{\"_id\":\"$disney_voiceactor.movie\", \n \"count\":{\"$sum\":1},\n \"hero\" : {\"$first\" : \"$hero\"}\n }\n },\n { \n \"$match\":{\"count\":{\"$gt\":12}}\n },\n {\n \"$project\" : {\n \"hero\" : 1\n }\n }\n])\n\n# for s1 in sel_comp1:\n# print (s1.get(\"hero\"))\n\n#SELECT villains from the movie where the voice actor of the villain didn't work with the Director \"Ron Clements\"\nsel_comp2 = disneyC.aggregate([\n {\n \"$lookup\": {\n \"from\": \"disneyVA\", \n \"localField\": \"movie_title\", \n \"foreignField\": \"movie\",\n \"as\": \"disney_voiceactor\"\n },\n },\n {\n \"$unwind\" : \"$disney_voiceactor\"\n },\n {\n \"$match\" : { \n \"$expr\" : {\n \"$eq\" : [\"$disney_voiceactor.character\",\"$villain\"]}\n }\n },\n {\n \"$lookup\":{\n \"from\": \"disneyD\", \n \"localField\": \"movie_title\", \n \"foreignField\": \"name\",\n \"as\": \"disney_director\"\n },\n },\n {\n \"$unwind\" : \"$disney_director\"\n }, \n {\n \"$match\": {\n \"disney_director.director\": {\"$ne\": \"Ron Clements\"}\n }\n },\n {\n \"$project\" :\n {\n \"villain\" : 1\n }\n },\n])\n# for s2 in sel_comp2:\n# print(s2)\n\n\n#\n# 4. Indexes\n# \n\n# creation of the database, for indexing next\ndb_i = client.open_disney_index\n\n# creation of the collections\ndisneyC_i = db_i.disneyC_i\ndisneyD_i = db_i.disneyD_i\ndisneyVA_i = db_i.disneyVA_i\n\ndisneyC_i.drop()\ndisneyD_i.drop()\ndisneyVA_i.drop()\n\ndisneyC_i.insert_many(data_dc)\ndisneyD_i.insert_many(data_dd)\ndisneyVA_i.insert_many(data_dva)\n\n# Delete index if it exists one to correctly run the query\n\ndisneyD_i.drop_indexes()\ndisneyVA_i.drop_indexes()\ndisneyC_i.drop_indexes()\n\ndisneyVA_i.create_index( [(\"movie\" , TEXT), (\"character\", ASCENDING)])\n\ndisneyD_i.create_index( [(\"director\" , ASCENDING), (\"name\" , TEXT)])\n\ndisneyC_i.create_index( [(\"movie_title\", TEXT), (\"villain\", 1), (\"hero\", 1), (\"disney_voiceactor\",1)])\n\ndef performance(collection, query):\n time_i = time.time()\n mydoc2 = collection.find(query)\n time_f = time.time()\n print('Time: ', time_f - time_i)\n\n#select voice actors from the movie The Little Mermaid\nselect1query = { \"movie\":\"The Little Mermaid\", \"voice-actor\": 1 }\n\n#select the characters that have more than one voice actor\nselect2query = { \"voice_actor2\" : { \"$ne\" : None}, \"character\" : 1 }\n\nprint(\"select1query\")\nperformance(disneyVA, select1query)\nperformance(disneyVA_i, select1query)\n\nprint(\"select2query\")\nperformance(disneyVA, select2query)\nperformance(disneyVA_i, select2query)\n\ndef performanceAggregate(collection, query):\n time_i = time.time()\n mydoc2 = collection.aggregate(query)\n time_f = time.time()\n print('Time: ', time_f - time_i)\n\nsel_comp1query = [\n {\n # Join with director table\n \"$lookup\": {\n \"from\": \"disneyD\", # other table name\n \"localField\": \"movie_title\", # name of disneyD table field\n \"foreignField\": \"name\", # name of userinfo table field\n \"as\": \"disney_director\", # alias for userinfo table\n }\n },\n {\n \"$unwind\" : \"$disney_director\"\n },\n {\n \"$lookup\":{\n \"from\": \"disneyVA\", \n \"localField\": \"movie_title\", \n \"foreignField\": \"movie\",\n \"as\": \"disney_voiceactor\"\n },\n },\n {\n \"$unwind\" : \"$disney_voiceactor\"\n },\n {\n \"$group\":{\"_id\":\"$disney_voiceactor.movie\", \n\n \"count\":{\"$sum\":1},\n\n \"hero\" : {\"$first\" : \"$hero\"}\n\n }\n },\n { \n \"$match\":{\"count\":{\"$gt\":12},\n \"disney_director.director\": { \"$regex\": \"^B\" } \n }\n },\n {\n \"$project\" : {\n \"hero\" : 1\n }\n }\n]\n\n\nprint(\"sel_comp1\")\nperformanceAggregate(disneyC, sel_comp1query)\nperformanceAggregate(disneyC_i, sel_comp1query)\n\nsel_comp2query = [\n {\n \"$lookup\": {\n \"from\": \"disneyVA\", \n \"localField\": \"movie_title\", \n \"foreignField\": \"movie\",\n \"as\": \"disney_voiceactor\"\n },\n },\n {\n \"$unwind\" : \"$disney_voiceactor\"\n },\n {\n \"$match\" : { \n \"$expr\" : {\n \"$eq\" : [\"$disney_voiceactor.character\",\"$villain\"]}\n }\n },\n {\n \"$lookup\":{\n \"from\": \"disneyD\", \n \"localField\": \"movie_title\", \n \"foreignField\": \"name\",\n \"as\": \"disney_director\"\n },\n },\n {\n \"$unwind\" : \"$disney_director\"\n }, \n {\n \"$match\": {\n \"disney_director.director\": {\"$ne\": \"Ron Clements\"}\n }\n },\n {\n \"$project\" :\n {\n \"villain\" : 1\n }\n },\n]\n\nprint(\"sel_comp2\")\nperformanceAggregate(disneyC, sel_comp2query)\nperformanceAggregate(disneyC_i, sel_comp2query)\n\nclient.close()\n","repo_name":"andre279m/BDAdoptedPoopies","sub_path":"projMongoDB.py","file_name":"projMongoDB.py","file_ext":"py","file_size_in_byte":7106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"35367443331","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nalleles.py\n\nPerform rudimentary novel allele inference on the Heather et al. TCRseq dataset\n\n\"\"\"\n\nimport os\nimport collections as coll\nimport functions as fxn\nimport sys\nsys.path.insert(0, fxn.supp_script_dir + 'autoDCR')\nimport inferTCR as infer\n\n\n__email__ = 'jheather@mgh.harvard.edu'\n__version__ = '0.1.1'\n__author__ = 'Jamie Heather'\n\nif __name__ == \"__main__\":\n scripts_dir = fxn.check_scripts_cwd()\n\n # First run autoDCR on each individual donor collapsed TCR repertoire\n dcr_dir = fxn.supp_script_dir + 'autoDCR/'\n rep_files = [x for x in os.listdir(fxn.int_heather_dir) if x.startswith('HV') and '.fasta' in x\n and 'merged' not in x]\n rep_files.sort()\n\n for fasta in rep_files:\n # Run autoDCR with allele detection mode enabled ...\n fxn.run_bash('python3 ' + dcr_dir + 'autoDCR.py -fq ' + fxn.int_heather_dir + fasta \\\n + ' -o ' + fxn.int_heather_dir + ' -dd ' + dcr_dir + ' -or forward -ad -jv')\n\n # ... and then infer potential novel TCRs\n ad_file = fasta.split('.')[0]\n fxn.run_bash('python3 ' + dcr_dir + 'inferTCR.py -in ' +\n fxn.int_heather_dir + ad_file + '_infer-alleles.tsv.gz' + ' -dd ' + dcr_dir)\n\n # Compile the potential inferred alleles\n data_files = [x for x in os.listdir(os.getcwd()) if\n x.startswith('HV') and x.endswith('infer-alleles_alleles.fasta')]\n data_files.sort()\n\n novel = coll.Counter()\n novel_lst = coll.defaultdict(list)\n seqs = coll.defaultdict()\n\n for df in data_files:\n with open(df, 'r') as in_file:\n for read_id, seq, qual in fxn.readfq(in_file):\n novel[read_id] += 1\n novel_lst[read_id].append(df)\n if read_id not in seqs:\n seqs[read_id] = seq\n elif seqs[read_id] != seq:\n raise ValueError(\"Mismatch between identified novel alleles with the same name!\")\n\n with open(dcr_dir + 'compiled-inferred.fasta', 'w') as out_file:\n for gene in seqs:\n out_file.write(infer.imgt_fastafy(gene + '~VARIABLE', seqs[gene], False))\n\n # Tidy up\n fxn.run_bash('tar cvfz allele-inference-files.tar.gz HV*; '\n 'rm HV*; '\n 'mv allele-inference-files.tar.gz ' + fxn.int_heather_dir)\n\n # Generate 'human-plus' autoDCR reference\n plus = 'human-plus.fasta'\n if 'compiled-inferred.fasta' in os.listdir(dcr_dir):\n fxn.run_bash('cat ' + dcr_dir + 'human.fasta ' + dcr_dir + 'compiled-inferred.fasta > ' +\n dcr_dir + plus)\n fxn.run_bash('python3 ' + dcr_dir + 'generate-tag-files.py -in ' + dcr_dir + plus)\n\n else:\n raise IOError(\"'compiled-inferred.fasta' not in \" + dcr_dir + \"\\nCode cannot proceed.\")\n\n # Also add to the stitchr additional-genes file\n fxn.run_bash('cat ' + dcr_dir + 'compiled-inferred.fasta >> ' +\n fxn.supp_script_dir + 'stitchr/Data/additional-genes.fasta')\n\n # Then the replotting.py script will use these data in the next step\n","repo_name":"JamieHeather/stitchr-paper-analysis","sub_path":"Scripts/alleles.py","file_name":"alleles.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"35053732743","text":"import turtle\nimport random\nwidth = 800\nheight = 600\nunitsize = 20\nturtle.tracer(1,0)\nturtle.setup(width,height)\nturtle.hideturtle()\nturtle.penup()\n\n\n\nfood_list = []\nstep = 25\nbottom = -height/2 + 100\nturtle.register_shape('food-chicken.gif')\ndef create_food():\n y_pos = height/2 - 50\n min_x = -int(width/2/unitsize)+1\n max_x = int(width/2/unitsize)-1\n x_pos = random.randint(min_x,max_x)*unitsize\n food = turtle.clone()\n food.shape('food-chicken.gif')\n food.goto(x_pos,y_pos)\n food.showturtle()\n food_list.append(food)\nfood_delay = 0\ndelay_num = 7\ndef falling_food():\n global food_delay\n food_destroy = []\n for food in food_list:\n x_pos = food.pos()[0]\n y_pos = food.pos()[1]\n if y_pos >= bottom:\n y_pos = y_pos - step\n food.goto(x_pos,y_pos)\n else:\n ind = food_list.index(food)\n food_destroy.append(ind)\n\n for ind in food_destroy:\n old_food = food_list.pop(ind)\n old_food.hideturtle()\n del old_food\n if food_delay <= delay_num:\n food_delay += 1\n else:\n food_delay = 0\n create_food()\n turtle.ontimer(falling_food,100)\n\nfalling_food()\n \n","repo_name":"orr19-meet/meet2017y1final-proj","sub_path":"adam_fall.py","file_name":"adam_fall.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"5"} +{"seq_id":"16773525800","text":"import numpy as np\nimport torch\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\n\n\nclass Net(torch.nn.Module):\n def __init__(self, n_feature, n_hiden, n_outer):\n super(Net, self).__init__()\n self.fc1 = torch.nn.Linear(n_feature, n_hiden)\n self.fc2 = torch.nn.Linear(n_hiden, n_hiden)\n self.fc3 = torch.nn.Linear(n_hiden, n_outer)\n\n self.norm_layer(self.fc1)\n self.norm_layer(self.fc2, std=0.1)\n self.norm_layer(self.fc3, std=0.1)\n\n def forward(self, x):\n x = torch.tanh(self.fc1(x))\n x = torch.tanh(self.fc2(x))\n x = self.fc3(x)\n x = F.softmax(x)\n action_distribution = torch.distributions.Categorical(probs=x)\n return action_distribution \n\n @staticmethod\n def norm_layer(layer, std=1.0, bias_constant=0.0):\n torch.nn.init.orthogonal_(layer.weight, std)\n torch.nn.init.constant_(layer.bias, bias_constant)\n\n\nclass PolicyGradient:\n def __init__(\n self, \n actions_size, \n feature_size, \n learning_rate=0.001, #神经网络学习率\n reward_decay=0.98, #价值折扣\n output_graph=False\n ):\n self.n_actions = actions_size\n self.n_features = feature_size\n self.alpha = learning_rate\n self.gamma = reward_decay\n\n self.observation_store = []\n self.action_store = []\n self.reward_store = []\n\n self.network = Net(self.n_features, 128, self.n_actions) #建立策略网络\n self.optimizer = torch.optim.Adam(self.network.parameters(), lr=self.alpha)\n\n def choose_action(self, observation):\n action_distri = self.network(observation)\n action = action_distri.sample()\n #其他实现\n # torch.Tensor(np.random.choice(range(self.n_actions), p=prob))\n # print(prob, action)\n return action\n\n def store_transaction(self, s, a, r):\n self.observation_store.append(s)\n self.action_store.append(a)\n self.reward_store.append(r)\n\n def learn(self):\n discounted_rewards = self._reward_to_go()\n #训练\n self.optimizer.zero_grad()\n\n for i in range(len(self.observation_store)):\n # self.optimizer.zero_grad()\n s = self.observation_store[i]\n a = self.action_store[i]\n dis_r = discounted_rewards[i]\n\n distri = self.network(s)\n loss = 0\n\n loss += -distri.log_prob(a) * dis_r\n print(loss)\n\n loss.backward()\n self.optimizer.step()\n # self.optimizer.zero_grad()\n reward_sum = torch.sum(torch.tensor(discounted_rewards).float())\n\n #清空回合的data\n self.observation_store = []\n self.action_store = []\n self.reward_store = []\n\n return reward_sum \n\n def _reward_to_go(self):\n discounted_rewards = np.zeros_like(self.reward_store)\n running_sum = 0\n\n for i in reversed(range(0, len(self.reward_store))):\n running_sum = running_sum * self.gamma + self.reward_store[i]\n discounted_rewards[i] = running_sum\n\n #normalize reward\n discounted_rewards -= np.mean(discounted_rewards)\n discounted_rewards /= np.std(discounted_rewards)\n # print(discounted_rewards)\n\n return discounted_rewards\n\n\n \n\n\n\n\n\n\n\n","repo_name":"leelewin/MDRL-Arithmetic","sub_path":"DRL/pg_torch.py","file_name":"pg_torch.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"12472207825","text":"import pandas as pd\nimport numpy as np\nfrom pathlib import Path\nfrom scipy import stats as scistats\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nimport b05_Init\n\ndef explore(target):\n pd.set_option('display.max_columns', None)\n desired_width = 400\n pd.set_option('display.width', desired_width)\n np.set_printoptions(linewidth=desired_width)\n\n\n project = b05_Init.init(target)\n prct2retain = project['prct2retain']\n dirOut = project['output_dir']\n dirOutExplore = dirOut + '/Data_exploration'\n dirOutExplore = Path(dirOutExplore)\n dirOutExplore.mkdir(parents=True, exist_ok=True)\n #open stats and predictors\n statsX = pd.read_pickle(dirOut + '/' + project['AOI'] + '_stats'+ str(prct2retain) + '.pkl')\n stats = pd.read_pickle(dirOut + '/' + project['AOI'] + '_stats.pkl')\n timerange = project['timeRange']\n stats = stats.drop(stats[stats['Year'] < timerange[0]].index)\n features = pd.read_pickle(dirOut + '/' + project['AOI'] + '_pheno_features4scikit.pkl')\n\n # For each crop (1,2,3) get the 90% regions\n cs = statsX['Crop_ID', ''].unique()\n for c in cs:\n regionID_list = statsX[statsX['Crop_ID'] == c]['Region_ID']\n region_names = statsX[statsX['Crop_ID'] == c]['AU_name']\n crop_name = statsX.loc[statsX['Crop_ID'] == c].iloc[0]['Crop_name']\n print('Crop ' + str(c) + ', ' + crop_name)\n print(region_names)\n\n y = stats[(stats['Region_ID'].isin(regionID_list)) & (stats['Crop_ID']==c)]\n #y.head()\n y = y.drop(['ASAP1_ID'], axis=1)\n y = y.drop(['AU_name'], axis=1)\n y = y.drop(['Region_ID'], axis=1)\n z = pd.merge(y,features,how='left',left_on=['AU_code','Year'], right_on=['AU_code','YearOfEOS'])\n #print(z.head())\n\n # check if soil moisture is available\n if 'SMP1' in z.columns:\n df_data = z[['Area', 'Yield', 'Production', 'NDP1', 'NDminP1', 'NDmaxP1',\n 'NDP2', 'NDminP2', 'NDmaxP2',\n 'NDP3', 'NDminP3', 'NDmaxP3',\n 'RadP1', 'RadP2', 'RadP3',\n 'RainSumP1', 'RainSumP2', 'RainSumP3', 'TP1', 'TminP1', 'TmaxP1', 'TP2', 'TminP2',\n 'TmaxP2', 'TP3', 'TminP3', 'TmaxP3',\n 'SMP1', 'SMP2', 'SMP3']]\n else:\n df_data = z[['Area', 'Yield', 'Production', 'NDP1', 'NDminP1', 'NDmaxP1',\n 'NDP2', 'NDminP2', 'NDmaxP2',\n 'NDP3', 'NDminP3', 'NDmaxP3',\n 'RadP1', 'RadP2', 'RadP3',\n 'RainSumP1', 'RainSumP2', 'RainSumP3', 'TP1', 'TminP1', 'TmaxP1', 'TP2', 'TminP2',\n 'TmaxP2', 'TP3', 'TminP3', 'TmaxP3']]\n # correlation\n correlation_data = df_data.corr()\n # p-value\n def corr_sig(df=None):\n p_matrix = np.zeros(shape=(df.shape[1], df.shape[1]))\n for col in df.columns:\n for col2 in df.drop(col, axis=1).columns:\n x = df[col].values\n y = df[col2].values\n nas = np.logical_or(np.isnan(x), np.isnan(y))\n #corr = sp.pearsonr()\n _, p = scistats.pearsonr(x[~nas], y[~nas]) # , nan_policy='omit'\n p_matrix[df.columns.to_list().index(col), df.columns.to_list().index(col2)] = p\n return p_matrix\n\n p_values = corr_sig(df_data)\n mask = np.invert(np.tril(p_values < 0.05))\n # note seaborn will hide correlation were the boolean value is True in the mask\n\n #mask = np.zeros_like(correlation_data, dtype=np.bool)\n #mask[np.triu_indices_from(mask)] = True\n\n f, ax = plt.subplots(figsize=(11, 9))\n plt.show(block = False)\n # Generate a custom diverging colormap\n cmap = sns.palette = \"vlag\"\n\n # Draw the heatmap with the mask and correct aspect ratio\n sns.heatmap(correlation_data, mask=mask, cmap=cmap, vmin=-1, vmax=1, center=0,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5});\n plt.title(crop_name.values[0], fontsize=20, y=1)\n t = ', '.join(region_names.values.flatten())\n # plt.text(0.7, 0.93, t, ha='left', wrap=True, fontsize=10, transform=ax.transAxes)\n fn = project['AOI'] + '_'+crop_name.values[0]+'_corr.png'\n plt.savefig(dirOutExplore / fn)\n plt.close()\n #loop on pheno phase to show paiered plots\n for pp in range(1,4,1):\n #f, ax = plt.subplots(figsize=(11, 9))\n columns = df_data.columns\n #extract pp phase\n columns_pp=[s for s in columns if str(pp) in s]\n for s in ['Area', 'Yield', 'Production']:\n columns_pp.insert(0, s)\n df_data_pp = df_data[columns_pp]\n df_data_pp.loc[:]['Production']= df_data_pp['Production'].div(1000)\n df_data_pp.loc[:]['Area'] = df_data_pp['Area'].div(1000)\n rad_cols = [col for col in df_data_pp.columns if 'Rad' in col]\n df_data_pp.loc[:][rad_cols] = df_data_pp[rad_cols].div(1000)\n sns.set(font_scale=0.75)\n sns.pairplot(data=df_data_pp,height=1,plot_kws={\"s\": 10},corner=True)\n plt.subplots_adjust(bottom=0.05)\n plt.subplots_adjust(left=0.05)\n fn = project['AOI'] + '_' + crop_name.values[0] + '_phase'+str(pp)+'_paired_corr.png'\n plt.savefig(dirOutExplore / fn)\n plt.close()\n print('ok')\n\n","repo_name":"ec-jrc/ml4cast-ml","sub_path":"preprocess/b70_data_exploration.py","file_name":"b70_data_exploration.py","file_ext":"py","file_size_in_byte":5518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"41466197971","text":"import tkinter as tk\nfrom tkinter import *\nimport socket\nimport threading\nimport struct\nimport time\nfrom tkinter import messagebox\n\n \nwindow = tk.Tk()\nwindow.geometry(\"450x590\")\nwindow.resizable(False, False) \n\n##graphics\ncanvas = Canvas(window, width = 612, height = 588) \ncanvas.pack() \nimg = PhotoImage(file=\"media/cli.png\") \ncanvas.create_image(0,0, anchor=NW, image=img)\n\n##list - favorited servers\n##frame\nframe = Frame(window)\nframe.pack()\nframe.place(x=20,y=150)\n\nlabel = Label(window,text = \"Zapisane pokoje\")\nlabel.place(x=50,y=100)\n\n##listbox + scrollbar in frame\nlistbox = Listbox(frame, height = 20, width = 20)\nlistbox.pack(side=\"left\", fill=\"y\")\nlistbox.pack()\nscrollbar = Scrollbar(frame, orient=\"vertical\")\nscrollbar.pack(side=\"right\", fill=\"y\")\n\n#scrolling\nlistbox.config(yscrollcommand = scrollbar.set)\nscrollbar.config(command = listbox.yview)\n\n\n##list - available lan servers\n##frame\nframe1 = Frame(window)\nframe1.pack()\nframe1.place(x=200,y=150)\n\nlabel1 = Label(window,text = \"Dostępne pokoje (Lan)\")\nlabel1.place(x=210,y=100)\n\n##listbox + scrollbar in frame\nlistbox1 = Listbox(frame1, height = 20, width = 20)\nlistbox1.pack(side=\"left\", fill=\"y\")\nlistbox1.pack()\nscrollbar1 = Scrollbar(frame1, orient=\"vertical\")\nscrollbar1.pack(side=\"right\", fill=\"y\")\n\n#scrolling\nlistbox1.config(yscrollcommand = scrollbar1.set)\nscrollbar1.config(command = listbox1.yview)\n\n#load preferences\npreferences_list = []\ntry:\n file = open('preferences.txt','r')\n fileread = file.read()\n preferences_list = fileread.split(\"#\")\nexcept: pass\n\n# Insert elements into the listbox\nfor values in range(10):\n listbox.insert(END, values)\n\navailable_servers = []\n\n# Lan Avialable Servers Multicast\n\nmark = 0\ndef searchServers():\n available_servers.clear()\n listbox1.delete(0,'end') #czysczenie listy\n MCAST_GRP = '224.1.1.1'\n MCAST_PORT = 5008\n \n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n if sys.platform == 'win32': sock.bind(('',MCAST_PORT)) ##zmiana\n else: sock.bind((MCAST_GRP, MCAST_PORT)) \n mreq = struct.pack(\"4sl\", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)\n sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n\n def receive():\n prev_data = \"\"\n global mark\n mark = 1\n try:\n while True:\n # For Python 3, change next line to \"print(sock.recv(10240))\"\n data = sock.recv(1024)\n if data in available_servers: pass\n if prev_data == data: break\n if data == \"\":break\n else:\n available_servers.append(data.decode())\n listbox1.insert(END, data)\n\n for thread in threading.enumerate(): \n print(thread.name)\n prev_data = data\n \n except: pass\n mark = 0\n \n receiveThread = threading.Thread(target=receive)\n receiveThread.name = \"receiveThread\"\n if mark == 0: receiveThread.start()\n\nsearchServers()\n \n##functions\ndef room_connect():\n for thread in threading.enumerate(): \n print(thread.name)\n\n\nselected = \"\"\ndef CurSelect(evt):\n value = str(listbox1.get(listbox1.curselection()))\n global selected\n selected = value \nlistbox1.bind('<>',CurSelect)\ndef room_connect2():\n global selected\n selected = selected.split(\"#\")\n roomIP = selected[1]\n roomIP = roomIP.replace(\"'\", \"\")\n if preferences_list == []: messagebox.showerror(title=\"Brak konfiguracji\", message=\"Najpierw musisz skonfigurować w opcjach nazwę użytkownika\")\n else:\n from chatGuest import joinRoom\n joinRoom(preferences_list[0],roomIP)\n\nroomIP_dircon = \"\"\ndef connect(roomIP_dircon):\n print(roomIP_dircon)\n print(\"d\")\n \"\"\"\n if preferences_list == []: messagebox.showerror(title=\"Brak konfiguracji\", message=\"Najpierw musisz skonfigurować w opcjach nazwę użytkownika\")\n else:\n try:\n from chatGuest import joinRoom\n print(roomIP)\n joinRoom(preferences_list[0],roomIP)\n except: pass\n \"\"\"\n\ndef direct_connect():\n window = tk.Toplevel()\n window.geometry(\"350x150\")\n window.configure(background='blue')\n window.title(\"Dołącz do pokoju\")\n label1 = Label(window,text = \"Wpisz adres IP:\")\n label1.place(x=20,y=50)\n TextIP = Text(window, height = 1, width = 15)\n TextIP.place(x=200,y=50)\n def c():\n address_ip = TextIP.get(\"1.0\",'end-1c')\n address_ip = address_ip.replace(\"\\n\",\"\")\n if preferences_list == []: messagebox.showerror(title=\"Brak konfiguracji\", message=\"Najpierw musisz skonfigurować w opcjach nazwę użytkownika\")\n else:\n window.destroy()\n from chatGuest import joinRoom\n joinRoom(preferences_list[0],address_ip)\n \n \n Button5 = Button(window, text = \"Połącz\", command = c)\n Button5.place(x=250,y=100)\n # Return button bind (enter)\n def bind_enter(void): c()\n window.bind('', bind_enter)\n window.mainloop()\n\n\ndef add_room():\n print(\"x\")\n\ndef preferences():\n window = tk.Toplevel()\n window.geometry(\"350x400\")\n window.configure(background='blue')\n window.title(\"Opcje\")\n label1 = Label(window,text = \"Nazwa użytkownika:\")\n label1.place(x=20,y=50)\n TextName = Text(window, height = 1, width = 15)\n TextName.place(x=200,y=50)\n def insert_values():\n TextName.delete('1.0', END)\n file = open('preferences.txt','r')\n fileread = file.read()\n fileread = fileread.split(\"#\")\n print(fileread)\n TextName.insert(tk.END,fileread[0])\n file.close()\n try: insert_values()\n except: pass\n def save_preferences():\n open('preferences.txt', 'w').close()\n file = open('preferences.txt','w') \n file.write(TextName.get(\"1.0\",'end-1c')+\"#\")\n file.close()\n insert_values()\n def bind_enter(void): save_preferences()\n # Return button bind (enter)\n window.bind('', bind_enter)\n \n Button1 = Button(window, text = \"Zapisz\", command = save_preferences)\n Button1.place(x=250,y=340)\n\n #mainloop\n window.mainloop()\n\ndef help_window():\n print(\"help\")\n\ndef author_info():\n print(\"a\")\n\ndef host_room():\n import chatHost\n\ndef close_app():\n window.destroy()\n\n#Create Room Button\nnameZ = \"Utwórz własny pokój\"\nhorizontalName = \"\"\nfor i in range(len(\"Utwórz własny pokój\")):\n horizontalName += nameZ[i] + \"\\n\"\n\nButton1 = Button(window, text = \"Połącz\", command = room_connect)\nButton1.place(x=20,y=530)\nButton2 = Button(window, text = horizontalName , command = host_room)\nButton2.place(x=385,y= 160)\nButton3 = Button(window, text = \"Połącz\", command = room_connect2)\nButton3.place(x=202,y=530)\nButton4 = Button(window, text = \"Odświerz\", command = searchServers)\nButton4.place(x=283,y=530)\n\n##menubar\nmenubar = Menu(window,background='lightblue', foreground='black',\n activebackground='#004c99', activeforeground='white')\n#filemenu\nfilemenu = Menu(menubar, tearoff=0,background='lightblue', foreground='black')\nfilemenu.add_command(label=\"Utwórz własny pokój\", command=host_room)\nfilemenu.add_command(label=\"Połącz z pokojem\", command=direct_connect)\nfilemenu.add_command(label=\"Dodaj pokój do zapisanych\", command=add_room)\nfilemenu.add_command(label=\"Opcje\", command=preferences)\nfilemenu.add_separator()\nfilemenu.add_command(label=\"Zamknij program\", command=close_app)\nmenubar.add_cascade(label=\"PablitoCHAT\", menu=filemenu)\n\n#helpmenu\nhelpmenu = Menu(menubar, tearoff=0,background='lightblue', foreground='black')\nhelpmenu.add_command(label=\"Instrukcja\", command=help_window)\nhelpmenu.add_command(label=\"Autor\", command=author_info)\nmenubar.add_cascade(label=\"Pomoc\", menu=helpmenu)\n\nwindow.config(menu=menubar)\n\n##mainloop\nwindow.mainloop()\n","repo_name":"zlord10z/PablitoChat","sub_path":"pablitoChat.py","file_name":"pablitoChat.py","file_ext":"py","file_size_in_byte":7962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3836005679","text":"import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\n\n# Fetch the service account key JSON file contents\npath_to_key = \"/Users/williamlee/Desktop/CS411/Firebase/cs411-e12c0-firebase-adminsdk-z7icf-dbfcf166c2.json\"\ncred = credentials.Certificate(path_to_key)\n# Initialize the app with a service account, granting admin privileges\nfirebase_admin.initialize_app(cred, {\n 'databaseURL': \"https://cs411-e12c0-default-rtdb.firebaseio.com/\"\n})\n\n\n#test cases\nfood = {}\nfood[\"burger\"]=[\"100g\", \"5-star\"]\n\n#push and get\nref = db.reference(\"/\")\nref.set({\"food\":food})\nprint(ref.get(\"food\"))\n\n","repo_name":"WilliamLee101/Cs411project","sub_path":"backend/toFirebase.py","file_name":"toFirebase.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"15764735548","text":"from tkinter import *\nfrom tkinter import messagebox\n\nclass Gui(Tk):\n \n def __init__(self):\n super().__init__()\n \n # set window attributes\n self.title(\"Song Maker\")\n \n # add components\n self.__add_heading_label()\n self.__add_instruction_label()\n self.__add_lyric_entry()\n self.__add_lyric_button()\n self.__add_instruction_label_2()\n self.__add_lyric_listbox()\n self.lyrics_index = 1\n \n def __add_heading_label(self):\n # create\n self.add_heading_label = Label()\n self.add_heading_label.grid(sticky=\"e\",row=0, column=0)\n # style\n self.add_heading_label.configure(font = \"Arial 30\",text=\"Song Maker\")\n \n def __add_instruction_label(self):\n # create\n self.add_instruction_label = Label()\n self.add_instruction_label.grid(sticky=\"W\",row=1, column=0) \n # style\n self.add_instruction_label.configure(font = \"Arial 18\",text=\"Lyrics to add:\") \n\n def __add_lyric_entry(self):\n # create\n self.add_lyric_entry = Entry()\n self.add_lyric_entry.grid(row=2, column=0)\n # style\n self.add_lyric_entry.configure(bd=4,width = 20,font = \"Arial 18\")\n\n def __add_lyric_button(self):\n # create\n self.add_lyric_button = Button()\n self.add_lyric_button.grid(row=2, column=1)\n\n # style\n self.add_lyric_button.configure(bd=4,text=\"Add\",width = 10)\n\n #events\n self.add_lyric_button.bind(\"\", self.__add_lyric_clicked)\n\n def __add_instruction_label_2(self):\n # create\n self.add_instruction_label_2 = Label()\n self.add_instruction_label_2.grid(sticky=\"W\",row=3, column=0) \n # style\n self.add_instruction_label_2.configure(font = \"Arial 18\",text=\"Lyrics:\")\n\n def __add_lyric_listbox(self):\n # create\n self.add_lyric_Listbox = Listbox()\n \n self.add_lyric_Listbox.grid(row=4, column=0, columnspan = 2)\n # style\n self.add_lyric_Listbox.configure(selectmode = \"multiple\",bd=4,width = 60)\n \n \n def __add_lyric_clicked(self, event):\n self.add_lyric_Listbox.insert(self.lyrics_index, self.add_lyric_entry.get())\n self.lyrics_index +=1\n\ngui = Gui()\ngui.mainloop() ","repo_name":"BaDMaN90/COM404","sub_path":"Events/2-listbox/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"10632481764","text":"#!/usr/bin/env python3\n\nfrom experiments import *\nfrom railroad.geometry import *\n\n\ndef update(x, y):\n mouse = coords_to_pos(x, y)\n\n draw.clear()\n start = Vec(-400, -300)\n end = Vec(0, 0)\n point = mouse\n params = param_from_distance(point, start, end - start, 300)\n solutions = [start + (end - start)*t for t in params]\n\n print(params)\n\n draw.line(Vec(280, -380), Vec(580, -380), color=colors.grey)\n\n draw.line(start, end, color=colors.grey)\n draw.pt(start, colors.green)\n draw.pt(end, colors.red)\n\n draw.pt(point, color=colors.blue)\n\n for s in solutions:\n draw.pt(s, color=colors.yellow)\n\n@window.event\ndef on_mouse_drag(x, y, dx, dy, buttons, modifiers):\n update(x, y)\n\n@window.event\ndef on_mouse_press(x, y, buttons, modifiers):\n update(x, y)\n\n\nrun()\n","repo_name":"boberstarosta/railroad","sub_path":"experiments/t_from_distance.py","file_name":"t_from_distance.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"13955271884","text":"# 1부터 n까지 정수의 합 구하기(n값은 양수만 입력)\n# while True:와 break의 조합::: 무한루프 생성 / 빠져나오기 패턴\n\nwhile True:\n n = int(input('n값을 입력하세요: '))\n if n > 0: break # n이 0보다 커질 때 까지 반복\n \nsum = 0\ni = 1\n\nfor i in range(1, n + 1):\n sum += i\n i += 1\n\nprint(f'i부터 {n}까지 정수의 합은 {sum}입니다.')","repo_name":"chrisYang256/algorithm_study","sub_path":"python_with_algorithm_basic/01_알고리즘_기초/01-2_반복하는_알고리즘/01-2-4_양수만_입력받기.py","file_name":"01-2-4_양수만_입력받기.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28836386953","text":"# POST from browser: {\"name\": \"terror\", \"city\":\"Neuquen\"}\n# POST from terminal: curl -X POST http://localhost:8000/api/genre/ -d \"name=Pensamiento lateral\"\nfrom django.urls import include, path\n\nfrom rest_framework import routers\nfrom rest_framework.authtoken.views import obtain_auth_token\n\nfrom .views import *\n\nrouter = routers.DefaultRouter()\nrouter.register(r'altaBookInstances', BookInstanceViewSet)\n\n# specify URL Path for rest_framework\nurlpatterns = [\n path('', include(router.urls)),\n # path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n # path('api-token-auth/', obtain_auth_token, name='api_token_auth'), # <-- And here\n path('authors/', author_view, name='api-author-detail'),\n path('languages/', language_view, name='api-languages'),\n path('publishers/', publisher_view, name='api-publishers'),\n path('publishers&languages/', publisher_language_list_view, name='api-combinados1'),\n path('genres/', genre_list, name='api-genres'),\n path('genres//detail/', genre_detail, name='api-genre-detail'),\n path('genres&languages/', genre_language_create, name='api-combinados2'),\n path('booksRelatedList/', BookRelatedList.as_view(), name='api-bookgrelated'),\n path('genresBooksList/', GenreBookRelatedList.as_view(), name='api-genrebookrelated'),\n path('categories/detail/', CategoryDetailMix.as_view(), name='api-categoryDetailMix'),\n path('categories/', CategoryDetail.as_view(), name='api-categoryDetail'),\n path('bookInstances/', BookInstanceViewSet.as_view({'get': 'list'}), name='api-bookInstance1'),\n path('tokens/', UserTokenLoginApiView.as_view()),\n]\n","repo_name":"nicolazgentile/API_locallibrary","sub_path":"locallibrary/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29530242861","text":"# Date: 2023/3/1 下午10:55\n# 定义一个水果类,然后通过水果类,创建苹果对象、橘子对象、西瓜对象并分别添加上颜色属性\nclass Fruit:\n def __init__(self, name, color):\n self.name = name\n self.color = color\n\n def colors(self):\n print(f'{self.name}的颜色是{self.color}')\n\n\napple = Fruit('苹果', '红色')\norange = Fruit('橘子', '橙色')\nwatermelon = Fruit('西瓜', '绿色和黑色')\napple.colors()\norange.colors()\nwatermelon.colors()\n","repo_name":"LornaLv/Qiuzhijiangtang-Python","sub_path":"task/task7/task7_3.py","file_name":"task7_3.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"14575669277","text":"#Write a function called findMaxSales. findMaxSales will\n#have one parameter: a list of tuples. Each tuple in the\n#list will have two items: a string and an integer. The\n#string will represent the name of a movie, and the integer\n#will represent that movie's total ticket sales (in millions\n#of dollars).\n#\n#The function should return the movie from the list that\n#had the most sales. Return only the movie name, not the\n#full tuple.\n\n\n#Write your function here!\ndef findMaxSales(tuplesList):\n maximum = ('',0)\n for movie in tuplesList:\n if movie[1] > maximum[1]:\n maximum = movie;\n return maximum[0]\n\n#The lines below will test your code. They are not used for\n#grading, so feel free to modify them. If your code works\n#correctly, the output presently should be: Rogue One.\nmovieList = [(\"Finding Dory\", 486), (\"Captain America: Civil War\", 408), (\"Deadpool\", 363), (\"Zootopia\", 341), (\"Rogue One\", 529), (\"The Secret Life of Pets\", 368), (\"Batman v Superman\", 330), (\"Sing\", 268), (\"Suicide Squad\", 325), (\"The Jungle Book\", 364)]\n\n\nprint(findMaxSales(movieList))\n","repo_name":"butuzov/CS-CourseWork-EDx","sub_path":"GTx_CS1301/Pset_4.3_Lists/MovieSales.py","file_name":"MovieSales.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"973969509","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 29 12:33:21 2020\n\n@author: sarashs\nLAMMPS utils containing:\n \n Parameters:\n atomic_weight_dict\n \n Functions:\n \n lammps_input_creator : Which prepares the lammps inputfile.dat \n geofilecreator : Prepares lammps atom files [also known as geometry file] atom.data \n append_2structure_file : Avogadro xyz to input structure file\n gaussian_energy_extractor : extracts gaussian log file energies and converts them to kj/mol and eV\n\"\"\"\nimport re\nfrom lammps import lammps\n\natomic_weight_dict = {\"H\" : 1.0079, \"He\" : 4.0026,\"Li\" : 6.941,\"Be\" : 9.0122,\\\n \"B\" : 10.811, \"C\" : 12.0107,\"N\" : 14.0067,\"O\" : 15.9994,\\\n \"F\" : 18.9984, \"Ne\" : 20.1797, \"Na\" : 22.9897, \"Mg\" : 24.305,\\\n \"Al\" : 26.9815, \"Si\" : 28.0855, \"P\" : 30.9738, \"S\" : 32.065,\\\n \"Cl\" : 35.453, \"K\" : 39.0983, \"Ar\" : 39.948, \"Ca\" : 40.078,\\\n \"Sc\" : 44.9559, \"Ti\" : 47.867, \"V\" : 50.9415, \"Cr\" : 51.9961,\\\n \"Mn\" : 54.938, \"Fe\" : 55.845, \"Ni\" : 58.6934, \"Co\" : 58.9332,\\\n \"Cu\" : 63.546, \"Zn\" : 65.39, \"Ga\" : 69.723, \"Ge\" : 72.64,\\\n \"As\" : 74.9216, \"Se\" : 78.96, \"Br\" : 79.904, \"Kr\" : 83.8,\\\n \"Rb\" : 85.4678, \"Sr\" : 87.62, \"Y\" : 88.9059, \"Zr\" : 91.224,\\\n \"Nb\" : 92.9064, \"Mo\" : 95.94, \"Tc\" : 98.00, \"Ru\" : 101.07,\\\n \"Rh\" : 102.9055, \"Pd\" : 106.42, \"Ag\" : 107.8682, \"Cd\" : 112.411,\\\n \"In\" : 114.818, \"Sn\" : 118.71, \"Sb\" : 121.76, \"I\" : 126.9045,\\\n \"Te\" : 127.6, \"Xe\" : 131.293, \"Cs\" : 132.9055, \"Ba\" : 137.327,\\\n \"La\" : 138.9055, \"Hf\" : 178.49, \"Ta\" : 180.9479, \"W\" : 183.84,\\\n \"Pt\" : 195.078, \"Au\" : 196.9665, \"Hg\" : 200.59, \"Pb\" : 207.2,\\\n \"Bi\" : 208.9804, \"U\" : 238.0289}\n\natomic_number_dict = {'1' : 'H' , '8' : 'O' , '14' : 'Si' , '40' : 'Zr' }\n\ndef lammps_input_creator(Input_structure_file=\"Inputstructurefile.txt\",Input_forcefield='ffield.reax', min_style = 'cg',Forcefield_type = 'reax', file_path = \"\"):\n \"\"\"\n This function creates the lammps input file\n :param Input_structure_file:\n :param Input_forcefield:\n :return: Input_data_file_list\n \"\"\"\n Input_data_file_list=[]\n try:\n f=open(Input_structure_file,'U')\n l=f.readlines()\n for item in l:\n if '#structure ' in item:\n Input_data_file_list.append(item.replace('#structure ','').replace('\\n','').replace(' ','')+\".dat\")\n f.close()\n except IOError:\n print('An error occured trying to read the training data file.')\n f=open(Input_structure_file,'r')\n l=f.readlines()\n for item in l:\n atom_type=0\n if '#structure ' in item:\n LAMMPS_Data_file = file_path + l[l.index(item)].replace('#structure ','').replace('\\n','').replace(' ','')+\".data\"\n LAMMPS_Input_file = file_path + l[l.index(item)].replace('#structure ','').replace('\\n','').replace(' ','')+Input_forcefield.replace('.reax','')+\".dat\"\n s=open(LAMMPS_Input_file,'w')\n s.close()\n s=open(LAMMPS_Input_file,'a')\n #for n in lists: \n ######\n s.write('log ' + LAMMPS_Input_file.replace('.dat', '.log') + '\\n')\n s.write('# 1.- Inizialization #######################\\n')\n s.write('units real\\n')\n s.write(' #mass = grams/mole\\n')\n s.write(' #distance = Angstroms\\n')\n s.write(' #time = femtoseconds\\n')\n s.write(' #energy = kcal/mol\\n')\n s.write(' #velocity = Angstroms/femtosecond\\n')\n s.write(' #force = kcal/mol.Angstrom\\n')\n s.write(' #torque = kcal/mole\\n')\n s.write(' #temperature = degrees K\\n')\n s.write(' #pressure = atmospheres (0.1013 GPa)\\n')\n s.write(' #dynamic viscosity = Poise\\n')\n s.write(' #charge = multiple of electron charge (+1.0 is a proton)\\n')\n s.write(' #dipole = charge*Angstroms\\n')\n s.write(' #electric field = volts/Angstrom\\n')\n s.write('dimension 3\\n')\n s.write('processors * * *\\n')\n s.write('##\\n')\n s.write('boundary p p p\\n')\n s.write('atom_style charge\\n\\n# 2.- Atom definition ######################\\n\\n')\n s.write('atom_modify map hash\\n')\n s.write('read_data '+LAMMPS_Data_file+'\\n')\n s.write('\\n# 3.- Force-Field ##########################\\n\\n')\n number_of_atoms=int(l[l.index(item)+1])\n #Forcefield params\n if(Forcefield_type in 'reax'):\n s.write('pair_style reax/c NULL\\n')\n s.write('pair_coeff * * ' + file_path + Input_forcefield)\n #calculate number of atom types\n for item2 in l[(l.index(item)+3):]:\n if not ('#dimensions' in item2):\n atom_type=atom_type+1\n else:\n break\n #Add elements to the forcefield line\n for i in range(1,atom_type+1):\n s.write(' '+l[l.index(item)+2+i][0:2])\n s.write('\\n'+'fix 99 all qeq/reax 1 0.0 10.0 1.0e-6 reax/c\\n')\n s.write('neighbor 2.0 bin\\n')\n s.write('neigh_modify every 10 check yes\\n\\n')\n s.write('## 4.- MD & relax parameters ################\\n\\n')\n ######\n s.write('dump DUMP2 all custom 1000000 '+LAMMPS_Data_file.replace('.data','')+Input_forcefield.replace('.reax','')+'.lammpstrj'+' id type x y z q #this size \\n')\n s.write('thermo_style custom step etotal ke pe temp press pxx pyy pzz \\n')\n s.write('thermo 1000000\\n')\n #####fix restraints\n i=l.index(item) + atom_type + number_of_atoms + 6\n if '#restrain' in l[i-1]:\n s.write('fix holdem all restrain')\n while i -1 :\n item2 = item[index:] \n item2 = item2.replace(flag, '')\n index2 = item2.find(\"\\\\\")\n energy = float(item2[:index2])\n break\n try:\n S=open(output_files_path + energy_file_name + \".txt\",'a')\n except IOError:\n print('An output file cannot be created.')\n S.write(input_gaussian_file_name + \" \" + str(energy * 627.5094740631) + \" kcal/mol \" + str(energy * 27.211386245) + \" eV\" + \"\\n\")\n S.close() \n \ndef gaussian_xyz_extractor(input_files_path, input_gaussian_file_name):\n \"\"\" This function extracts the structures .xyz from a gaussian output (.log) file\"\"\"\n if \".log\" in input_gaussian_file_name[-4:]:\n input_gaussian_file_name = input_gaussian_file_name[:-4]\n try:\n f = open(input_files_path + input_gaussian_file_name + \".log\",'U')\n l = f.readlines()\n lines = f.read()\n f.close()\n ###finding where the atoms start\n locations = [i for i in range(len(l)) if l[i].find(\"Standard orientation:\") > -1]\n index = locations[-1] + 5\n except IOError:\n print('An error occured trying to read the log file.') \n try:\n S=open(input_files_path + input_gaussian_file_name + \".xyz\",'a')\n except IOError:\n print('The structure (xyz) file cannot be opened.') \n ###add the file manipulation code here\n to_be_written = []\n while index > -1:\n to_be_written.append(re.sub(\"\\s+\", \",\", l[index].strip()).split(','))\n if to_be_written[-1][0].isdigit():\n index += 1 \n else:\n del to_be_written[-1]\n index = -1\n S.write(str(len(to_be_written)) + '\\n\\n')\n for item in to_be_written:\n S.write(atomic_number_dict[item[1]] + ' 0 ' + item[3] + item[4] + item[5] + '\\n')\n S.close()\ndef energy_charge(x):\n \"\"\" This function outputs the charge and energy for a lammps simulations\n :param x: input file name with path\n :return energy:\n :return charge:\n \"\"\"\n lmp = lammps()\n lmp.file(x)\n energy = round(lmp.get_thermo(\"etotal\"), 5)\n charge = lmp.gather_atoms(\"charge\",1,1)\n lmp.close()\n return [energy, [charge[0], charge[1]]]","repo_name":"sarashs/Python-forcefield-optimizer","sub_path":"LAMMPS_Utils.py","file_name":"LAMMPS_Utils.py","file_ext":"py","file_size_in_byte":14322,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"5851493626","text":"from loader import bot, states, show_date_states\nfrom exceptions.objects import ProcedureNotDeleted\nfrom keyboards.inline.add_another import if_add_another\nfrom keyboards.inline.choose_date_action import choose_action\nfrom data.objects import Visit\nfrom database.actions.procedures import (delete_procedure_from_db,\n create_procedure_in_db)\nfrom utils.visit import clean_state\nfrom database.actions.workday import get_workday_with_visits_from_db\n\n\ndef add_procedure(id):\n with bot.retrieve_data(id) as data:\n visit: Visit = data.get('visit')\n visit.procedures.add(**data)\n data[\"visit\"] = visit\n\n edit_visit = data.get('edit_visit')\n for_deletion = data.get('for_deletion')\n\n if edit_visit:\n edit_proc_in_visit(id, visit, for_deletion)\n else:\n bot.set_state(id, states.if_add_another)\n bot.send_message(id, \"Добавить еще процедуру?\",\n reply_markup=if_add_another())\n\n\ndef delete_procedure(chat_id: int | str, proc_id: int):\n with bot.retrieve_data(chat_id) as data:\n visit = data.get('visit')\n workday = data.get('workday')\n\n deleted_from_db = delete_procedure_from_db(proc_id)\n deleted_from_obj = visit.delete_procedure(proc_id)\n\n if not deleted_from_db and not deleted_from_obj:\n raise ProcedureNotDeleted(deleted_from_db, deleted_from_obj)\n\n workday.update_visit(visit)\n bot.add_data(chat_id, workday=workday, visit=visit)\n\n\ndef edit_proc_in_visit(id, visit: Visit, for_deletion) -> None:\n proc_to_add = visit.procedures.last()\n\n if for_deletion:\n try:\n delete_procedure(id, for_deletion.db_id)\n except ProcedureNotDeleted:\n raise\n\n create_procedure_in_db(visit.db_id, proc_to_add.to_dict())\n\n workday = get_workday_with_visits_from_db(id, visit.date)\n visit = workday.get_visit_by_id(visit.db_id)\n\n bot.add_data(id, workday=workday, visit=visit)\n\n text = 'Посещение успешно изменено.\\n' + workday.workday_report()\n bot.send_message(id, text, reply_markup=choose_action(workday.visits))\n bot.set_state(id, show_date_states.choose_action)\n\n clean_state(bot, id)\n","repo_name":"kdanylkov/salary_count_bot","sub_path":"utils/procedures.py","file_name":"procedures.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"34107062712","text":"import os, sys\nsys.path.append(os.path.dirname(__file__))\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\n\nclass KanezakiNet(chainer.Chain):\n def __init__(self, input_dim, nChannel=100, nConv=2):\n super(KanezakiNet, self).__init__()\n with self.init_scope():\n self.conv1 = L.Convolution2D(input_dim, nChannel, ksize=3, stride=1, pad=1)\n self.bn1 = L.BatchNormalization(nChannel)\n self.conv2 = chainer.ChainList()\n self.bn2 = chainer.ChainList()\n for i in range(nConv-1):\n self.conv2.append(L.Convolution2D(nChannel,nChannel,ksize=3,stride=1,pad=1))\n self.bn2.append(L.BatchNormalization(nChannel))\n self.conv3 = L.Convolution2D(nChannel,nChannel,ksize=1,stride=1,pad=0)\n self.bn3 = L.BatchNormalization(nChannel)\n\n self.nChannel = nChannel\n self.nConv = nConv\n\n def __call__(self,x):\n x = self.bn1(F.relu(self.conv1(x)))\n for i in range(self.nConv-1):\n x = self.bn2[i](F.relu(self.conv2[i](x)))\n x = self.bn3(self.conv3(x))\n return x\n","repo_name":"Obarads/Unsupervised_Segmentation_chainer","sub_path":"models/kanezaki_net.py","file_name":"kanezaki_net.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"71388193754","text":"__all__ = [\n 'sequential_substitution_2P',\n 'sequential_substitution_2P_functional',\n 'sequential_substitution_GDEM3_2P',\n 'dew_bubble_Michelsen_Mollerup',\n 'bubble_T_Michelsen_Mollerup',\n 'dew_T_Michelsen_Mollerup',\n 'bubble_P_Michelsen_Mollerup',\n 'dew_P_Michelsen_Mollerup',\n 'minimize_gibbs_2P_transformed',\n 'sequential_substitution_Mehra_2P',\n 'nonlin_2P',\n 'nonlin_n_2P',\n 'sequential_substitution_NP',\n 'minimize_gibbs_NP_transformed',\n 'TPV_HSGUA_guesses_1P_methods',\n 'TPV_solve_HSGUA_guesses_1P',\n 'sequential_substitution_2P_HSGUAbeta',\n 'sequential_substitution_2P_sat',\n 'TP_solve_VF_guesses',\n 'TPV_double_solve_1P',\n 'nonlin_2P_HSGUAbeta',\n # 'sequential_substitution_2P_double',\n 'cm_flash_tol',\n 'nonlin_2P_newton',\n 'dew_bubble_newton_zs',\n 'existence_3P_Michelsen_Mollerup',\n 'SS_VF_simultaneous',\n 'stability_iteration_Michelsen',\n 'assert_stab_success_2P',\n 'nonlin_equilibrium_NP',\n 'nonlin_spec_NP',\n 'TPV_solve_HSGUA_guesses_VL',\n 'solve_P_VF_IG_K_composition_independent',\n 'solve_T_VF_IG_K_composition_independent',\n 'incipient_phase_bounded_naive',\n 'generate_incipient_phase_boundaries_naive',\n 'incipient_phase_status', 'VLN_or_LN_boolean_check', 'VL_boolean_check',\n 'VLL_or_LL_boolean_check', 'VLL_boolean_check', 'LL_boolean_check',\n 'incipient_liquid_bounded_PT_sat',\n 'generate_pure_phase_boolean_check',\n 'flash_mixing_minimum_factor',\n 'flash_mixing_remove_overlap',\n 'incipient_phase_one_sided_secant',\n 'flash_phase_boundary_one_sided_secant',\n 'VLN_bubble_boolean_check',\n 'VL_dew_boolean_check',\n 'generate_phase_boundaries_naive',\n 'water_wet_bulb_temperature',\n 'water_dew_point_from_humidity',\n 'solve_water_wet_bulb_temperature_nested', \n 'solve_water_wet_bulb_temperature_direct',\n]\n\nfrom math import copysign, log10\nfrom random import shuffle\n\nfrom chemicals.exceptions import TrivialSolutionError\nfrom chemicals.flash_basic import flash_ideal, flash_Tb_Tc_Pc, flash_wilson\nfrom chemicals.heat_capacity import Dadgostar_Shaw_integral, Dadgostar_Shaw_integral_over_T, Lastovka_Shaw_integral, Lastovka_Shaw_integral_over_T\nfrom chemicals.phase_change import SMK\nfrom chemicals.rachford_rice import Rachford_Rice_flash_error, Rachford_Rice_solution_LN2, Rachford_Rice_solutionN, flash_inner_loop\nfrom chemicals.utils import mixing_simple, normalize, property_mass_to_molar\nfrom chemicals.volume import COSTALD\nfrom fluids.constants import R\nfrom fluids.numerics import (\n NotBoundedError,\n OscillationError,\n UnconvergedError,\n assert_close,\n assert_close1d,\n best_bounding_bounds,\n bisect,\n brenth,\n damping_maintain_sign,\n exp,\n fsolve,\n isclose,\n isinf,\n isnan,\n jacobian,\n linspace,\n log,\n logspace,\n make_damp_initial,\n make_max_step_initial,\n minimize,\n newton,\n newton_minimize,\n newton_system,\n one_sided_secant,\n oscillation_checking_wrapper,\n py_solve,\n root,\n secant,\n translate_bound_f_jac,\n trunc_exp,\n trunc_log,\n SolverInterface,\n)\nfrom fluids.numerics import numpy as np\n\nfrom thermo.coolprop import CPiP_min\nfrom thermo.phases import IAPWS95, CEOSGas, CEOSLiquid, CoolPropPhase, Phase\nfrom thermo.phases.phase_utils import lnphis_direct\n\nLASTOVKA_SHAW = 'Lastovka Shaw'\nDADGOSTAR_SHAW_1 = 'Dadgostar Shaw 1'\nSTP_T_GUESS = '298.15 K'\nLAST_CONVERGED = 'Last converged'\nFIXED_GUESS = 'Fixed guess'\nIG_ENTHALPY = 'Ideal gas'\nIDEAL_LIQUID_ENTHALPY = 'Ideal liquid'\nWILSON_GUESS = 'Wilson'\nTB_TC_GUESS = 'Tb Tc'\nIDEAL_PSAT = 'Ideal Psat'\nPT_SS = 'SS'\nPT_SS_MEHRA = 'SS Mehra'\nPT_SS_GDEM3 = 'SS GDEM3'\nPT_NEWTON_lNKVF = 'Newton lnK VF'\nIDEAL_WILSON = 'Ideal Wilson'\nSHAW_ELEMENTAL = 'Shaw Elemental'\n\nPH_T_guesses_1P_methods = [LASTOVKA_SHAW, DADGOSTAR_SHAW_1, IG_ENTHALPY,\n IDEAL_LIQUID_ENTHALPY, FIXED_GUESS, STP_T_GUESS,\n LAST_CONVERGED]\nTPV_HSGUA_guesses_1P_methods = PH_T_guesses_1P_methods\n\ndef sequential_substitution_2P(T, P, V, zs, xs_guess, ys_guess, liquid_phase,\n gas_phase, maxiter=1000, tol=1E-13,\n trivial_solution_tol=1e-5, V_over_F_guess=0.5):\n # check_G=False, check_V=False, dZ_allow=0.1):\n\n xs, ys = xs_guess, ys_guess\n V_over_F = V_over_F_guess\n N = len(zs)\n cmps = range(N)\n\n err, err1, err2, err3 = 0.0, 0.0, 0.0, 0.0\n # G_old = None\n # V_over_F_old = V_over_F\n # restrained = 0\n # restrained_switch_count = 300\n error_increases = 0\n\n # Code for testing phis at zs\n l, g = liquid_phase, gas_phase\n if liquid_phase.T != T or liquid_phase.P != P:\n liquid_phase = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)\n if gas_phase.T != T or gas_phase.P != P:\n gas_phase = gas_phase.to_TP_zs(T=T, P=P, zs=ys)\n\n for iteration in range(maxiter):\n# g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)\n# l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)\n\n# l = liquid_phase.to(xs, T=T, P=P, V=V)\n# g = gas_phase.to(ys, T=T, P=P, V=V)\n# lnphis_g = g.lnphis()\n# lnphis_l = l.lnphis()\n lnphis_g = gas_phase.lnphis_at_zs(ys)\n lnphis_l = liquid_phase.lnphis_at_zs(xs)\n limited_Z = False\n\n try:\n Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps] # K_value(phi_l=l, phi_g=g)\n except OverflowError:\n Ks = [trunc_exp(lnphis_l[i] - lnphis_g[i]) for i in cmps] # K_value(phi_l=l, phi_g=g)\n\n V_over_F_old = V_over_F\n V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)\n# K_low, K_high = False, False\n# for zi, Ki in zip(zs, Ks):\n# if zi != 0.0:\n# if Ki > 1.0:\n# K_high = True\n# else:\n# K_low = True\n# if K_high and K_low:\n# break\n# if not (K_high and K_low):\n# raise TrivialSolutionError(\"Converged to trivial condition, all K same phase\",\n# comp_difference, iteration, err)\n# else:\n\n# if check_G:\n# V_over_F_G = min(max(V_over_F_old, 0), 1)\n# G = g.G()*V_over_F_G + (1.0 - V_over_F_G)*l.G()\n# # print('new G', G, 'old G', G_old)\n# if G_old is not None:\n# if G > G_old:\n# step = .5\n# while G > G_old and step > 1e-4:\n# # ys_working = normalize([step*xo + (1.0 - step)*xi for xi, xo in zip(xs, xs_old)])\n# # xs_working = normalize([step*xo + (1.0 - step)*xi for xi, xo in zip(ys, ys_old)])\n# # ys_working = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(xs, xs_old)])\n# # xs_working = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(ys, ys_old)])\n# # g = gas_phase.to(ys_working, T=T, P=P, V=V)\n# # l = liquid_phase.to(xs_working, T=T, P=P, V=V)\n# # lnphis_g = g.lnphis()\n# # lnphis_l = l.lnphis()\n# # try:\n# # Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]\n# # except OverflowError:\n# # Ks = [trunc_exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]\n# Ks_working = [step*xo + (1.0 - step)*xi for xo, xi in zip(Ks_old, Ks)]\n\n# V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks_working, guess=V_over_F)\n# # V_over_F_G = min(max(V_over_F, 0), 1)\n# g = gas_phase.to(ys_new, T=T, P=P, V=V)\n# l = liquid_phase.to(xs_new, T=T, P=P, V=V)\n# G = g.G()*V_over_F_G + (1.0 - V_over_F_G)*l.G()\n# # print('step', step, G, V_over_F, Ks)\n# step *= 0.5\n# # xs, ys = xs_working, ys_working\n\n\n# # print('Gibbs increased', G/G_old)\n# G_old = G\n# if check_V and iteration > 2:\n# big_Z_change = (abs(1.0 - l_old.Z()/l.Z()) > dZ_allow or abs(1.0 - g_old.Z()/g.Z()) > dZ_allow)\n# if restrained <= restrained_switch_count and big_Z_change:\n# limited_Z = True\n# step = .5 #.5\n# while (abs(1.0 - l_old.Z()/l.Z()) > dZ_allow or abs(1.0 - g_old.Z()/g.Z()) > dZ_allow ) and step > 1e-8:\n# # Ks_working = [step*xo + (1.0 - step)*xi for xo, xi in zip(Ks, Ks_old)]\n# # Ks_working = [Ks[i]*(Ks_old[i]/Ks[i])**(1.0 - step) for i in cmps] # step = 0 - all new; step = 1 - all old\n# # Ks_working = [Ks_old[i]*(exp(lnphis_l[i])/exp(lnphis_g[i])/Ks_old[i])**(1.0 - step) for i in cmps]\n# ys_new = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(ys, ys_old)])\n# xs_new = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(xs, xs_old)])\n# # V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks_working, guess=V_over_F)\n# l = liquid_phase.to(xs_new, T=T, P=P, V=V)\n# g = gas_phase.to(ys_new, T=T, P=P, V=V)\n# # lnphis_g = g.lnphis()\n# # lnphis_l = l.lnphis()\n# # print('step', step, V_over_F, g.Z())\n# step *= 0.5\n# xs, ys = xs_new, ys_new\n# lnphis_g = g.lnphis()\n# lnphis_l = l.lnphis()\n# Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]\n# V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)\n# restrained += 1\n# elif restrained > restrained_switch_count and big_Z_change:\n# restrained = 0\n\n # Check for negative fractions - normalize only if needed\n for xi in xs_new:\n if xi < 0.0:\n # Remove negative mole fractions - may help or may still fail\n xs_new_sum_inv = 0.0\n for xj in xs_new:\n xs_new_sum_inv += abs(xj)\n xs_new_sum_inv = 1.0/xs_new_sum_inv\n for i in range(N):\n xs_new[i] = abs(xs_new[i])*xs_new_sum_inv\n break\n for yi in ys_new:\n if yi < 0.0:\n ys_new_sum_inv = 0.0\n for yj in ys_new:\n ys_new_sum_inv += abs(yj)\n ys_new_sum_inv = 1.0/ys_new_sum_inv\n for i in range(N):\n ys_new[i] = abs(ys_new[i])*ys_new_sum_inv\n break\n # Calculate the error using the new Ks and old compositions\n # Claimed error function in CONVENTIONAL AND RAPID FLASH\n # CALCULATIONS FOR THE SOAVE-REDLICH-KWONG AND PENG-ROBINSON EQUATIONS OF STATE\n\n err = 0.0\n # Suggested tolerance 1e-15\n for Ki, xi, yi in zip(Ks, xs, ys):\n # equivalent of fugacity ratio\n # Could divide by the old Ks as well.\n if yi != 0.0:\n err_i = Ki*xi/yi - 1.0\n err += err_i*err_i\n\n if err > 0.0 and err in (err1, err2, err3) or error_increases > 3:\n raise OscillationError(\"Converged to cycle in errors, no progress being made\")\n # Accept the new compositions\n # if not limited_Z:\n # assert xs == l.zs\n # assert ys == g.zs\n # lnphis_g_old, lnphis_l_old = lnphis_g, lnphis_l\n # l_old, g_old = l, g\n\n #print(err, V_over_F, Ks) # xs, ys\n\n # Check for\n comp_difference = 0.0\n for xi, yi in zip(xs_new, ys_new):\n comp_difference += abs(xi - yi)\n if comp_difference < trivial_solution_tol:\n raise TrivialSolutionError(\"Converged to trivial condition, compositions of both phases equal\",\n comp_difference, iteration, err)\n if err < tol:# and not limited_Z:\n if iteration == 0:\n # We are composition independent!\n g = gas_phase.to(ys_new, T=T, P=P, V=V)\n l = liquid_phase.to(xs_new, T=T, P=P, V=V)\n return V_over_F, xs_new, ys_new, l, g, iteration, err\n g = gas_phase.to(ys, T=T, P=P, V=V)\n l = liquid_phase.to(xs, T=T, P=P, V=V)\n return V_over_F_old, xs, ys, l, g, iteration, err\n # elif err < tol and limited_Z:\n # print(l.fugacities()/np.array(g.fugacities()))\n\n # If we aren't in a cycle but still making no progress\n xs, ys = xs_new, ys_new\n if err1 != 0.0 and abs(err/err1) > 20.0:\n error_increases += 1\n err1, err2, err3 = err, err1, err2\n raise UnconvergedError('End of SS without convergence')\n\ndef sequential_substitution_2P_functional(T, P, zs, xs_guess, ys_guess,\n liquid_args, gas_args, maxiter=1000, tol=1E-13,\n trivial_solution_tol=1e-5, V_over_F_guess=0.5):\n xs, ys = xs_guess, ys_guess\n V_over_F = V_over_F_guess\n N = len(zs)\n\n err, err1, err2, err3 = 0.0, 0.0, 0.0, 0.0\n V_over_F_old = V_over_F\n error_increases = 0\n\n Ks = [0.0]*N\n for iteration in range(maxiter):\n lnphis_g = lnphis_direct(ys, *gas_args)\n lnphis_l = lnphis_direct(xs, *liquid_args)\n\n for i in range(N):\n Ks[i] = trunc_exp(lnphis_l[i] - lnphis_g[i])\n\n V_over_F_old = V_over_F\n V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)\n for xi in xs_new:\n if xi < 0.0:\n # Remove negative mole fractions - may help or may still fail\n xs_new_sum_inv = 0.0\n for xj in xs_new:\n xs_new_sum_inv += abs(xj)\n xs_new_sum_inv = 1.0/xs_new_sum_inv\n for i in range(N):\n xs_new[i] = abs(xs_new[i])*xs_new_sum_inv\n break\n for yi in ys_new:\n if yi < 0.0:\n ys_new_sum_inv = 0.0\n for yj in ys_new:\n ys_new_sum_inv += abs(yj)\n ys_new_sum_inv = 1.0/ys_new_sum_inv\n for i in range(N):\n ys_new[i] = abs(ys_new[i])*ys_new_sum_inv\n break\n\n err = 0.0\n for Ki, xi, yi in zip(Ks, xs, ys):\n # equivalent of fugacity ratio\n # Could divide by the old Ks as well.\n if yi != 0.0:\n err_i = Ki*xi/yi - 1.0\n err += err_i*err_i\n\n if err > 0.0 and err in (err1, err2, err3) or error_increases > 3:\n raise OscillationError(\"Converged to cycle in errors, no progress being made\")\n\n comp_difference = 0.0\n for xi, yi in zip(xs, ys):\n comp_difference += abs(xi - yi)\n\n if comp_difference < trivial_solution_tol:\n raise TrivialSolutionError(\"Converged to trivial condition, compositions of both phases equal\")\n\n if err < tol:\n if iteration == 0:\n # We are composition independent!\n return V_over_F, xs_new, ys_new, iteration, err\n\n return V_over_F_old, xs, ys, iteration, err\n if err1 != 0.0 and abs(err/err1) > 20.0:\n error_increases += 1\n xs, ys = xs_new, ys_new\n err1, err2, err3 = err, err1, err2\n raise ValueError('End of SS without convergence')\n\n\ndef sequential_substitution_NP(T, P, zs, compositions_guesses, betas_guesses,\n phases, maxiter=1000, tol=1E-13,\n trivial_solution_tol=1e-5, ref_phase=2):\n\n compositions = compositions_guesses\n cmps = range(len(zs))\n phase_count = len(phases)\n phases_iter = range(phase_count)\n phase_iter_n1 = range(phase_count - 1)\n betas = betas_guesses\n if len(betas) < len(phases):\n betas.append(1.0 - sum(betas))\n\n compositions_K_order = [compositions[i] for i in phases_iter if i != ref_phase]\n compositions_ref = compositions_guesses[ref_phase]\n\n for i, phase in enumerate(phases):\n if phase.T != T or phase.P != P:\n phases[i] = phase.to_TP_zs(T=T, P=P, zs=phase.zs)\n\n compositions_old = None\n\n for iteration in range(maxiter):\n lnphis = [phases[i].lnphis_at_zs(zs=compositions[i]) for i in phases_iter]\n\n\n\n\n\n\n Ks = []\n lnphis_ref = lnphis[ref_phase]\n for i in phases_iter:\n if i != ref_phase:\n lnphis_i = lnphis[i]\n try:\n Ks.append([exp(lnphis_ref[j] - lnphis_i[j]) for j in cmps])\n except OverflowError:\n Ks.append([trunc_exp(lnphis_ref[j] - lnphis_i[j]) for j in cmps])\n\n\n beta_guesses = [betas[i] for i in phases_iter if i != ref_phase]\n\n #if phase_count == 3:\n # Rachford_Rice_solution2(zs, Ks[0], Ks[1], beta_y=beta_guesses[0], beta_z=beta_guesses[1])\n betas_new, compositions_new = Rachford_Rice_solutionN(zs, Ks, beta_guesses)\n # Sort the order back\n beta_ref_new = betas_new[-1]\n betas_new = betas_new[:-1]\n betas_new.insert(ref_phase, beta_ref_new)\n\n compositions_ref_new = compositions_new[-1]\n compositions_K_order_new = compositions_new[:-1]\n\n compositions_new = list(compositions_K_order_new)\n compositions_new.insert(ref_phase, compositions_ref_new)\n\n err = 0.0\n for i in phase_iter_n1:\n Ks_i = Ks[i]\n ys = compositions_K_order[i]\n for Ki, xi, yi in zip(Ks_i, compositions_ref, ys):\n if yi != 0.0:\n err_i = Ki*xi/yi - 1.0\n err += err_i*err_i\n# print(betas, Ks, 'calculated', err)\n # print(err)\n\n compositions_old = compositions\n compositions = compositions_new\n compositions_K_order = compositions_K_order_new\n compositions_ref = compositions_ref_new\n betas = betas_new\n\n # TODO trivial solution check - how to handle - drop phase?\n\n # Check for\n# comp_difference = sum([abs(xi - yi) for xi, yi in zip(xs, ys)])\n# if comp_difference < trivial_solution_tol:\n# raise ValueError(\"Converged to trivial condition, compositions of both phases equal\")\n if err < tol:\n phases = [phases[i].to_TP_zs(T=T, P=P, zs=compositions_old[i]) for i in phases_iter]\n return betas, compositions, phases, iteration, err\n # if iteration > 100:\n # return betas, compositions, phases, iteration, err\n raise UnconvergedError('End of SS without convergence')\n\n\n\n\ndef sequential_substitution_Mehra_2P(T, P, zs, xs_guess, ys_guess, liquid_phase,\n gas_phase, maxiter=1000, tol=1E-13,\n trivial_solution_tol=1e-5,\n acc_frequency=3, acc_delay=5,\n lambda_max=3, lambda_min=0.0,\n V_over_F_guess=None):\n\n xs, ys = xs_guess, ys_guess\n if V_over_F_guess is None:\n V_over_F = 0.5\n else:\n V_over_F = V_over_F_guess\n\n N = len(zs)\n cmps = range(N)\n lambdas = [1.0]*N\n\n Ks = [ys[i]/xs[i] for i in cmps]\n\n gs = []\n import numpy as np\n for iteration in range(maxiter):\n g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)\n l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)\n\n fugacities_g = g.fugacities()\n fugacities_l = l.fugacities()\n# Ks = [fugacities_l[i]*ys[i]/(fugacities_g[i]*xs[i]) for i in cmps]\n lnphis_g = g.lnphis()\n lnphis_l = l.lnphis()\n phis_g = g.phis()\n phis_l = l.phis()\n# Ks = [Ks[i]*exp(-lnphis_g[i]/lnphis_l[i]) for i in cmps]\n# Ks = [Ks[i]*(phis_l[i]/phis_g[i]/Ks[i])**lambdas[i] for i in cmps]\n# Ks = [Ks[i]*fugacities_l[i]/fugacities_g[i] for i in cmps]\n# Ks = [Ks[i]*exp(-phis_g[i]/phis_l[i]) for i in cmps]\n # Mehra, R. K., R. A. Heidemann, and K. Aziz. “An Accelerated Successive Substitution Algorithm.” The Canadian Journal of Chemical Engineering 61, no. 4 (August 1, 1983): 590-96. https://doi.org/10.1002/cjce.5450610414.\n\n # Strongly believed correct\n gis = np.log(fugacities_g) - np.log(fugacities_l)\n\n if not (iteration % acc_frequency) and iteration > acc_delay:\n gis_old = np.array(gs[-1])\n# lambdas = np.abs(gis_old.T*gis_old/(gis_old.T*(gis_old - gis))*lambdas).tolist() # Alrotithm 3 also working\n# lambdas = np.abs(gis_old.T*(gis_old-gis)/((gis_old-gis).T*(gis_old - gis))*lambdas).tolist() # WORKING\n lambdas = np.abs(gis.T*gis/(gis_old.T*(gis - gis_old))).tolist() # 34, working\n lambdas = [min(max(li, lambda_min), lambda_max) for li in lambdas]\n# print(lambdas[0:5])\n # print(lambdas)\n# print('Ks', Ks, )\n# print(Ks[-1], phis_l[-1], phis_g[-1], lambdas[-1], gis[-1], gis_old[-1])\n Ks = [Ks[i]*(phis_l[i]/phis_g[i]/Ks[i])**lambdas[i] for i in cmps]\n# print(Ks)\n else:\n Ks = [Ks[i]*fugacities_l[i]/fugacities_g[i] for i in cmps]\n# print(Ks[0:5])\n\n\n gs.append(gis)\n\n# lnKs = [lnKs[i]*1.5 for i in cmps]\n\n V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)\n\n # Check for negative fractions - normalize only if needed\n for xi in xs_new:\n if xi < 0.0:\n xs_new_sum = sum(abs(i) for i in xs_new)\n xs_new = [abs(i)/xs_new_sum for i in xs_new]\n break\n for yi in ys_new:\n if yi < 0.0:\n ys_new_sum = sum(abs(i) for i in ys_new)\n ys_new = [abs(i)/ys_new_sum for i in ys_new]\n break\n\n err = 0.0\n # Suggested tolerance 1e-15\n for Ki, xi, yi in zip(Ks, xs, ys):\n # equivalent of fugacity ratio\n # Could divide by the old Ks as well.\n err_i = Ki*xi/yi - 1.0\n err += err_i*err_i\n # print(err)\n # Accept the new compositions\n xs, ys = xs_new, ys_new\n # Check for\n comp_difference = sum([abs(xi - yi) for xi, yi in zip(xs, ys)])\n if comp_difference < trivial_solution_tol:\n raise TrivialSolutionError(\"Converged to trivial condition, compositions of both phases equal\",\n comp_difference, iteration, err)\n if err < tol:\n return V_over_F, xs, ys, l, g, iteration, err\n raise UnconvergedError('End of SS without convergence')\n\n\n\ndef sequential_substitution_GDEM3_2P(T, P, zs, xs_guess, ys_guess, liquid_phase,\n gas_phase, maxiter=1000, tol=1E-13,\n trivial_solution_tol=1e-5, V_over_F_guess=None,\n acc_frequency=3, acc_delay=3,\n ):\n\n xs, ys = xs_guess, ys_guess\n if V_over_F_guess is None:\n V_over_F = 0.5\n else:\n V_over_F = V_over_F_guess\n\n cmps = range(len(zs))\n all_Ks = []\n all_lnKs = []\n\n for iteration in range(maxiter):\n g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)\n l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)\n\n lnphis_g = g.lnphis()\n lnphis_l = l.lnphis()\n\n # Mehra et al. (1983) is another option\n\n# Ks = [exp(l - g) for l, g in zip(lnphis_l, lnphis_g)]\n# if not (iteration %3) and iteration > 3:\n# dKs = gdem(Ks, all_Ks[-1], all_Ks[-2], all_Ks[-3])\n# print(iteration, dKs)\n# Ks = [Ks[i] + dKs[i] for i in cmps]\n# all_Ks.append(Ks)\n\n# lnKs = [(l - g) for l, g in zip(lnphis_l, lnphis_g)]\n# if not (iteration %3) and iteration > 3:\n## dlnKs = gdem(lnKs, all_lnKs[-1], all_lnKs[-2], all_lnKs[-3])\n#\n# dlnKs = gdem(lnKs, all_lnKs[-1], all_lnKs[-2], all_lnKs[-3])\n# lnKs = [lnKs[i] + dlnKs[i] for i in cmps]\n\n # Mehra, R. K., R. A. Heidemann, and K. Aziz. “An Accelerated Successive Substitution Algorithm.” The Canadian Journal of Chemical Engineering 61, no. 4 (August 1, 1983): 590-96. https://doi.org/10.1002/cjce.5450610414.\n lnKs = [(l - g) for l, g in zip(lnphis_l, lnphis_g)]\n if not (iteration %acc_frequency) and iteration > acc_delay:\n dlnKs = gdem(lnKs, all_lnKs[-1], all_lnKs[-2], all_lnKs[-3])\n # print(dlnKs)\n lnKs = [lnKs[i] + dlnKs[i] for i in cmps]\n\n\n # Try to testaccelerated\n all_lnKs.append(lnKs)\n Ks = [exp(lnKi) for lnKi in lnKs]\n\n V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)\n\n # Check for negative fractions - normalize only if needed\n for xi in xs_new:\n if xi < 0.0:\n xs_new_sum = sum(abs(i) for i in xs_new)\n xs_new = [abs(i)/xs_new_sum for i in xs_new]\n break\n for yi in ys_new:\n if yi < 0.0:\n ys_new_sum = sum(abs(i) for i in ys_new)\n ys_new = [abs(i)/ys_new_sum for i in ys_new]\n break\n\n err = 0.0\n # Suggested tolerance 1e-15\n for Ki, xi, yi in zip(Ks, xs, ys):\n # equivalent of fugacity ratio\n # Could divide by the old Ks as well.\n err_i = Ki*xi/yi - 1.0\n err += err_i*err_i\n\n # Accept the new compositions\n xs, ys = xs_new, ys_new\n # Check for\n comp_difference = sum([abs(xi - yi) for xi, yi in zip(xs, ys)])\n if comp_difference < trivial_solution_tol:\n raise TrivialSolutionError(\"Converged to trivial condition, compositions of both phases equal\",\n comp_difference, iteration, err)\n if err < tol:\n return V_over_F, xs, ys, l, g, iteration, err\n raise UnconvergedError('End of SS without convergence')\n\n\ndef nonlin_equilibrium_NP(T, P, zs, compositions_guesses, betas_guesses,\n phases, maxiter=1000, tol=1E-13,\n trivial_solution_tol=1e-5, ref_phase=-1,\n method='hybr', solve_kwargs=None, debug=False):\n if solve_kwargs is None:\n solve_kwargs = {}\n\n compositions = compositions_guesses\n N = len(zs)\n Nm1 = N - 1\n cmps = range(N)\n phase_count = len(phases)\n phase_iter = range(phase_count)\n if ref_phase < 0:\n ref_phase = phase_count + ref_phase\n\n phase_iter_n1 = [i for i in phase_iter if i != ref_phase]\n phase_iter_n1_0 = range(phase_count-1)\n betas = betas_guesses\n if len(betas) < len(phases):\n betas.append(1.0 - sum(betas))\n\n flows_guess = [compositions_guesses[j][i]*betas[j] for j in phase_iter_n1 for i in cmps]\n\n jac = True\n if method in ('broyden1', 'broyden2', 'anderson', 'linearmixing',\n 'diagbroyden', 'excitingmixing', 'krylov'):\n jac = False\n\n\n\n\n global iterations, info\n iterations = 0\n info = []\n def to_solve(flows, jac=jac):\n global iterations, info\n try:\n flows = flows.tolist()\n except:\n flows = list(flows)\n iterations += 1\n iter_flows = []\n iter_comps = []\n iter_betas = []\n iter_phases = []\n jac_arr = None\n\n remaining = zs\n for i in range(len(flows)):\n if flows[i] < 0.0:\n flows[i] = 1e-100\n\n\n for j, k in zip(phase_iter_n1, phase_iter_n1_0):\n v = flows[k*N:k*N+N]\n vs = v\n vs_sum = sum(abs(i) for i in vs)\n if vs_sum == 0.0:\n # Handle the case an optimizer takes all of all compounds already\n ys = zs\n else:\n vs_sum_inv = 1.0/vs_sum\n ys = [abs(vs[i]*vs_sum_inv) for i in cmps]\n ys = normalize(ys)\n iter_flows.append(vs)\n iter_comps.append(ys)\n iter_betas.append(vs_sum) # Would be divided by feed but feed is zs = 1\n iter_phases.append(phases[j].to_TP_zs(T=T, P=P, zs=ys))\n remaining = [remaining[i] - vs[i] for i in cmps]\n\n flows_ref = remaining\n iter_flows.insert(ref_phase, remaining)\n\n beta_ref = sum(remaining)\n iter_betas.insert(ref_phase, beta_ref)\n\n xs_ref = normalize([abs(i) for i in remaining])\n iter_comps.insert(ref_phase, xs_ref)\n\n phase_ref = phases[ref_phase].to_TP_zs(T=T, P=P, zs=xs_ref)\n iter_phases.insert(ref_phase, phase_ref)\n\n lnphis_ref = phase_ref.lnphis()\n dlnfugacities_ref = phase_ref.dlnfugacities_dns()\n\n errs = []\n for k in phase_iter_n1:\n phase = iter_phases[k]\n lnphis = phase.lnphis()\n xs = iter_comps[k]\n for i in cmps:\n # This is identical to lnfugacity(i)^j - lnfugacity(i)^ref\n gi = trunc_log(xs[i]/xs_ref[i]) + lnphis[i] - lnphis_ref[i]\n errs.append(gi)\n\n if jac:\n jac_arr = [[0.0]*N*(phase_count-1) for i in range(N*(phase_count-1))]\n for ni, nj in zip(phase_iter_n1, phase_iter_n1_0):\n p = iter_phases[ni]\n dlnfugacities = p.dlnfugacities_dns()\n # Begin with the first row using ni, nj;\n for i in cmps:\n for ki, kj in zip(phase_iter_n1, phase_iter_n1_0):\n for j in cmps:\n delta = 1.0 if nj == kj else 0.0\n v_ref = dlnfugacities_ref[i][j]/beta_ref\n jac_arr[nj*N + i][kj*N + j] = dlnfugacities[i][j]*delta/iter_betas[ni] + v_ref\n info[:] = iter_betas, iter_comps, iter_phases, errs, jac_arr, flows\n if jac:\n return errs, jac_arr\n return errs\n\n if method == 'newton_system':\n comp_val, iterations = newton_system(to_solve, flows_guess, jac=True,\n xtol=tol, damping=1,\n damping_func=damping_maintain_sign)\n else:\n def f_jac_numpy(flows_guess):\n # needed\n ans = to_solve(flows_guess)\n if jac:\n return np.array(ans[0]), np.array(ans[1])\n return np.array(ans)\n sln = root(f_jac_numpy, flows_guess, tol=tol, jac=(True if jac else None), method=method, **solve_kwargs)\n iterations = sln['nfev']\n\n betas, compositions, phases, errs, jac, flows = info\n sln = (betas, compositions, phases, errs, jac, iterations)\n if debug:\n return sln, flows, to_solve\n return sln\n\n\ndef nonlin_spec_NP(guess, fixed_val, spec_val, zs, compositions_guesses, betas_guesses,\n phases, iter_var='T', fixed_var='P', spec='H',\n maxiter=1000, tol=1E-13,\n trivial_solution_tol=1e-5, ref_phase=-1,\n# method='hybr',\n method='fsolve',\n solve_kwargs=None, debug=False,\n analytical_jac=True):\n if solve_kwargs is None:\n solve_kwargs = {}\n\n phase_kwargs = {fixed_var: fixed_val, iter_var: guess}\n compositions = compositions_guesses\n N = len(zs)\n Nm1 = N - 1\n cmps = range(N)\n phase_count = len(phases)\n phase_iter = range(phase_count)\n if ref_phase < 0:\n ref_phase = phase_count + ref_phase\n\n phase_iter_n1 = [i for i in phase_iter if i != ref_phase]\n phase_iter_n1_0 = range(phase_count-1)\n betas = betas_guesses\n if len(betas) < len(phases):\n betas.append(1.0 - sum(betas))\n\n guesses = [compositions_guesses[j][i]*betas[j] for j in phase_iter_n1 for i in cmps]\n guesses.append(guess)\n spec_callables = [getattr(phase.__class__, spec) for phase in phases]\n\n dlnphis_diter_s = 'dlnphis_d' + iter_var\n dlnphis_diter_callables = [getattr(phase.__class__, dlnphis_diter_s) for phase in phases]\n\n dspec_diter_s = f'd{spec}_d{iter_var}'\n dspec_diter_callables = [getattr(phase.__class__, dspec_diter_s) for phase in phases]\n\n dspec_dn_s = 'd%s_dns' %(spec)\n dspec_dn_callables = [getattr(phase.__class__, dspec_dn_s) for phase in phases]\n\n jac = True\n if method in ('broyden1', 'broyden2', 'anderson', 'linearmixing',\n 'diagbroyden', 'excitingmixing', 'krylov', 'fsolve'):\n jac = False\n\n\n\n\n global iterations, info\n iterations = 0\n info = []\n def to_solve(flows, jac=jac, skip_err=False):\n global iterations, info\n try:\n flows = flows.tolist()\n except:\n flows = list(flows)\n iter_val = flows[-1]\n phase_kwargs[iter_var] = iter_val\n flows = flows[:-1]\n iter_flows = []\n iter_comps = []\n iter_betas = []\n iter_phases = []\n jac_arr = None\n\n remaining = zs\n if not skip_err:\n# print(flows, iter_val)\n iterations += 1\n for i in range(len(flows)):\n if flows[i] < 0.0:\n flows[i] = 1e-100\n\n\n for j, k in zip(phase_iter_n1, phase_iter_n1_0):\n v = flows[k*N:k*N+N]\n vs = v\n vs_sum = sum(abs(i) for i in vs)\n if vs_sum == 0.0:\n # Handle the case an optimizer takes all of all compounds already\n ys = zs\n else:\n vs_sum_inv = 1.0/vs_sum\n ys = [abs(vs[i]*vs_sum_inv) for i in cmps]\n ys = normalize(ys)\n iter_flows.append(vs)\n iter_comps.append(ys)\n iter_betas.append(vs_sum) # Would be divided by feed but feed is zs = 1\n iter_phases.append(phases[j].to_TP_zs(zs=ys, **phase_kwargs))\n remaining = [remaining[i] - vs[i] for i in cmps]\n\n flows_ref = remaining\n iter_flows.insert(ref_phase, remaining)\n\n beta_ref = sum(remaining)\n iter_betas.insert(ref_phase, beta_ref)\n\n xs_ref = normalize([abs(i) for i in remaining])\n iter_comps.insert(ref_phase, xs_ref)\n\n phase_ref = phases[ref_phase].to_TP_zs(zs=xs_ref, **phase_kwargs)\n iter_phases.insert(ref_phase, phase_ref)\n\n lnphis_ref = phase_ref.lnphis()\n\n errs = []\n for k in phase_iter_n1:\n phase = iter_phases[k]\n lnphis = phase.lnphis()\n xs = iter_comps[k]\n for i in cmps:\n # This is identical to lnfugacity(i)^j - lnfugacity(i)^ref\n gi = trunc_log(xs[i]/xs_ref[i]) + lnphis[i] - lnphis_ref[i]\n errs.append(gi)\n\n spec_phases = []\n spec_calc = 0.0\n for k in phase_iter:\n spec_phase = spec_callables[k](iter_phases[k])\n spec_phases.append(spec_phase)\n spec_calc += spec_phase*iter_betas[k]\n errs.append(spec_calc - spec_val)\n else:\n iter_betas, iter_comps, iter_phases, errs, jac_arr, flows, iter_val_check, spec_phases = info\n beta_ref = iter_betas[ref_phase]\n xs_ref = iter_comps[ref_phase]\n phase_ref = iter_phases[ref_phase]\n lnphis_ref = phase_ref.lnphis()\n\n# print(errs[-1], 'err', iter_val, 'T')\n\n\n if jac:\n dlnfugacities_ref = phase_ref.dlnfugacities_dns()\n jac_arr = [[0.0]*(N*(phase_count-1) + 1) for i in range(N*(phase_count-1)+1)]\n for ni, nj in zip(phase_iter_n1, phase_iter_n1_0):\n p = iter_phases[ni]\n dlnfugacities = p.dlnfugacities_dns()\n # Begin with the first row using ni, nj;\n for i in cmps:\n for ki, kj in zip(phase_iter_n1, phase_iter_n1_0):\n for j in cmps:\n delta = 1.0 if nj == kj else 0.0\n v_ref = dlnfugacities_ref[i][j]/beta_ref\n jac_arr[nj*N + i][kj*N + j] = dlnfugacities[i][j]*delta/iter_betas[ni] + v_ref\n\n dlnphis_dspec = [dlnphis_diter_callables[i](phases[i]) for i in phase_iter]\n dlnphis_dspec_ref = dlnphis_dspec[ref_phase]\n for ni, nj in zip(phase_iter_n1, phase_iter_n1_0):\n p = iter_phases[ni]\n for i in cmps:\n jac_arr[nj*N + i][-1] = dlnphis_dspec[ni][i] - dlnphis_dspec_ref[i]\n\n# last =\n dspec_calc = 0.0\n for k in phase_iter:\n dspec_calc += dspec_diter_callables[k](iter_phases[k])*iter_betas[k]\n jac_arr[-1][-1] = dspec_calc\n\n dspec_dns = [dspec_dn_callables[i](phases[i]) for i in phase_iter]\n dspec_dns_ref = dspec_dns[ref_phase]\n last_jac_row = jac_arr[-1]\n\n for ni, nj in zip(phase_iter_n1, phase_iter_n1_0):\n for i in cmps:\n # What is wrong?\n # H is multiplied by the phase fraction, of which this n is a part of\n # So there must be two parts here\n last_jac_row[nj*N + i] = ((iter_betas[ni]*dspec_dns[ni][i]/iter_betas[ni] - beta_ref*dspec_dns_ref[i]/beta_ref)\n + (spec_phases[ni] - spec_phases[ref_phase]))\n\n if skip_err:\n return jac_arr\n\n info[:] = iter_betas, iter_comps, iter_phases, errs, jac_arr, flows, iter_val, spec_phases\n if jac:\n return errs, jac_arr\n return errs\n\n if method == 'newton_system':\n comp_val, iterations = newton_system(to_solve, guesses, jac=True,\n xtol=tol, damping=1,\n damping_func=damping_maintain_sign)\n else:\n def f_jac_numpy(flows_guess):\n # needed\n ans = to_solve(flows_guess)\n if jac:\n return np.array(ans[0]), np.array(ans[1])\n return np.array(ans)\n def jac_numpy(flows_guess):\n if flows_guess.tolist() == info[5] + [info[6]]:\n a = np.array(to_solve(flows_guess, jac=True, skip_err=True))\n# b = np.array(to_solve(flows_guess, jac=True)[1])\n# from numpy.testing import assert_allclose\n# assert_allclose(a, b, rtol=1e-10)\n return a\n# print('fail jac', tuple(flows_guess.tolist()), tuple(info[5]))\n# print('new jac')\n return np.array(to_solve(flows_guess, jac=True)[1])\n\n if method == 'fsolve':\n # Need a function cache! 2 wasted fevals, 1 wasted jaceval\n if analytical_jac:\n jac = False\n sln, infodict, _, _ = fsolve(f_jac_numpy, guesses, fprime=jac_numpy, xtol=tol, full_output=1, **solve_kwargs)\n else:\n sln, infodict, _, _ = fsolve(f_jac_numpy, guesses, xtol=tol, full_output=1, **solve_kwargs)\n iterations = infodict['nfev']\n else:\n sln = root(f_jac_numpy, guesses, tol=tol, jac=(True if jac else None), method=method, **solve_kwargs)\n iterations = sln['nfev']\n\n betas, compositions, phases, errs, jac, flows, iter_val, spec_phases = info\n\n sln = (iter_val, betas, compositions, phases, errs, jac, iterations)\n if debug:\n return sln, flows, to_solve\n return sln\n\n\ndef nonlin_2P(T, P, zs, xs_guess, ys_guess, liquid_phase,\n gas_phase, maxiter=1000, tol=1E-13,\n trivial_solution_tol=1e-5, V_over_F_guess=None,\n method='hybr'):\n # Do with just n?\n cmps = range(len(zs))\n xs, ys = xs_guess, ys_guess\n if V_over_F_guess is None:\n V_over_F = 0.5\n else:\n V_over_F = V_over_F_guess\n Ks_guess = [ys[i]/xs[i] for i in cmps]\n\n info = [0, None, None, None]\n def to_solve(lnKsVFTrans):\n Ks = [trunc_exp(i) for i in lnKsVFTrans[:-1]]\n V_over_F = (0.0 + (1.0 - 0.0)/(1.0 + trunc_exp(-lnKsVFTrans[-1]))) # Translation function - keep it zero to 1\n\n xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps]\n ys = [Ks[i]*xs[i] for i in cmps]\n\n g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)\n l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)\n\n lnphis_g = g.lnphis()\n lnphis_l = l.lnphis()\n# print(g.fugacities(), l.fugacities())\n new_Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]\n VF_err = Rachford_Rice_flash_error(V_over_F, zs, new_Ks)\n\n err = [new_Ks[i] - Ks[i] for i in cmps] + [VF_err]\n info[1:] = l, g, err\n info[0] += 1\n return err\n\n VF_guess_in_basis = -log((1.0-V_over_F)/(V_over_F-0.0))\n\n guesses = [log(i) for i in Ks_guess]\n guesses.append(VF_guess_in_basis)\n# try:\n sol = root(to_solve, guesses, tol=tol, method=method)\n # No reliable way to get number of iterations from OptimizeResult\n# solution, infodict, ier, mesg = fsolve(to_solve, guesses, full_output=True)\n solution = sol.x.tolist()\n V_over_F = (0.0 + (1.0 - 0.0)/(1.0 + exp(-solution[-1])))\n Ks = [exp(solution[i]) for i in cmps]\n xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps]\n ys = [Ks[i]*xs[i] for i in cmps]\n# except Exception as e:\n# raise UnconvergedError(e)\n\n tot_err = 0.0\n for i in info[3]:\n tot_err += abs(i)\n return V_over_F, xs, ys, info[1], info[2], info[0], tot_err\n\n\n\ndef nonlin_2P_HSGUAbeta(spec, spec_var, iter_val, iter_var, fixed_val,\n fixed_var, zs, xs_guess, ys_guess, liquid_phase,\n gas_phase, maxiter=1000, tol=1E-13,\n trivial_solution_tol=1e-5, V_over_F_guess=None,\n method='hybr'\n ):\n cmps = range(len(zs))\n xs, ys = xs_guess, ys_guess\n if V_over_F_guess is None:\n V_over_F = 0.5\n else:\n V_over_F = V_over_F_guess\n Ks_guess = [ys[i]/xs[i] for i in cmps]\n\n kwargs_l = {'zs': xs_guess, fixed_var: fixed_val}\n kwargs_g = {'zs': ys_guess, fixed_var: fixed_val}\n\n info = [0, None, None, None, None]\n def to_solve(lnKsVFTransHSGUABeta):\n Ks = [trunc_exp(i) for i in lnKsVFTransHSGUABeta[:-2]]\n V_over_F = (0.0 + (1.0 - 0.0)/(1.0 + trunc_exp(-lnKsVFTransHSGUABeta[-2]))) # Translation function - keep it zero to 1\n iter_val = lnKsVFTransHSGUABeta[-1]\n\n xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps]\n ys = [Ks[i]*xs[i] for i in cmps]\n\n kwargs_l[iter_var] = iter_val\n kwargs_l['zs'] = xs\n kwargs_g[iter_var] = iter_val\n kwargs_g['zs'] = ys\n\n g = gas_phase.to(**kwargs_g)\n l = liquid_phase.to(**kwargs_l)\n\n lnphis_g = g.lnphis()\n lnphis_l = l.lnphis()\n new_Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]\n VF_err = Rachford_Rice_flash_error(V_over_F, zs, new_Ks)\n\n val_l = getattr(l, spec_var)()\n val_g = getattr(g, spec_var)()\n val = V_over_F*val_g + (1.0 - V_over_F)*val_l\n\n other_err = val - spec\n\n\n err = [new_Ks[i] - Ks[i] for i in cmps] + [VF_err, other_err]\n info[1:] = l, g, err, other_err\n info[0] += 1\n# print(lnKsVFTransHSGUABeta, err)\n return err\n\n VF_guess_in_basis = -log((1.0-V_over_F)/(V_over_F-0.0))\n\n guesses = [log(i) for i in Ks_guess]\n guesses.append(VF_guess_in_basis)\n guesses.append(iter_val)\n# solution, iterations = broyden2(guesses, fun=to_solve, jac=False, xtol=1e-7,\n# maxiter=maxiter, jac_has_fun=False, skip_J=True)\n\n sol = root(to_solve, guesses, tol=tol, method=method)\n solution = sol.x.tolist()\n V_over_F = (0.0 + (1.0 - 0.0)/(1.0 + exp(-solution[-2])))\n iter_val = solution[-1]\n Ks = [exp(solution[i]) for i in cmps]\n xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps]\n ys = [Ks[i]*xs[i] for i in cmps]\n\n tot_err = 0.0\n for v in info[3]:\n tot_err += abs(v)\n return V_over_F, solution[-1], xs, ys, info[1], info[2], info[0], tot_err\n\n#def broyden2(xs, fun, jac, xtol=1e-7, maxiter=100, jac_has_fun=False,\n# skip_J=False):\n\n\n\ndef nonlin_n_2P(T, P, zs, xs_guess, ys_guess, liquid_phase,\n gas_phase, maxiter=1000, tol=1E-13,\n trivial_solution_tol=1e-5, V_over_F_guess=None,\n method='hybr'):\n\n cmps = range(len(zs))\n xs, ys = xs_guess, ys_guess\n if V_over_F_guess is None:\n V_over_F = 0.45\n else:\n V_over_F = V_over_F_guess\n\n ns = [ys[i]*V_over_F for i in cmps]\n\n info = [0, None, None, None]\n def to_solve(ns):\n ys = normalize(ns)\n ns_l = [zs[i] - ns[i] for i in cmps]\n# print(sum(ns)+sum(ns_l))\n xs = normalize(ns_l)\n# print(ys, xs)\n\n g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)\n l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)\n\n# print(np.array(g.dfugacities_dns()) - np.array(l.dfugacities_dns()) )\n\n fugacities_g = g.fugacities()\n fugacities_l = l.fugacities()\n\n\n err = [fugacities_g[i] - fugacities_l[i] for i in cmps]\n info[1:] = l, g, err\n info[0] += 1\n# print(err)\n return err\n\n# print(np.array(jacobian(to_solve, ns, scalar=False)))\n# print('ignore')\n\n sol = root(to_solve, ns, tol=tol, method=method)\n ns_sln = sol.x.tolist()\n ys = normalize(ns_sln)\n xs_sln = [zs[i] - ns_sln[i] for i in cmps]\n xs = normalize(xs_sln)\n\n return xs, ys\n\ndef nonlin_2P_newton(T, P, zs, xs_guess, ys_guess, liquid_phase,\n gas_phase, maxiter=1000, xtol=1E-10,\n trivial_solution_tol=1e-5, V_over_F_guess=None):\n N = len(zs)\n cmps = range(N)\n xs, ys = xs_guess, ys_guess\n if V_over_F_guess is None:\n V_over_F = 0.5\n else:\n V_over_F = V_over_F_guess\n\n Ks_guess = [ys[i]/xs[i] for i in cmps]\n\n info = []\n def to_solve(lnKsVF):\n # Jacobian verified. However, very sketchy - mole fractions may want\n # to go negative.\n lnKs = lnKsVF[:-1]\n Ks = [exp(lnKi) for lnKi in lnKs]\n VF = float(lnKsVF[-1])\n # if VF > 1:\n # VF = 1-1e-15\n # if VF < 0:\n # VF = 1e-15\n\n xs = [zi/(1.0 + VF*(Ki - 1.0)) for zi, Ki in zip(zs, Ks)]\n ys = [Ki*xi for Ki, xi in zip(Ks, xs)]\n\n g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)\n l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)\n\n lnphis_g = g.lnphis()\n lnphis_l = l.lnphis()\n\n size = N + 1\n J = [[None]*size for i in range(size)]\n\n d_lnphi_dxs = l.dlnphis_dzs()\n d_lnphi_dys = g.dlnphis_dzs()\n\n\n J[N][N] = 1.0\n\n # Last column except last value; believed correct\n # Was not correct when compared to numerical solution\n Ksm1 = [Ki - 1.0 for Ki in Ks]\n RR_denoms_inv2 = []\n for i in cmps:\n t = 1.0 + VF*Ksm1[i]\n RR_denoms_inv2.append(1.0/(t*t))\n\n RR_terms = [zs[k]*Ksm1[k]*RR_denoms_inv2[k] for k in cmps]\n for i in cmps:\n value = 0.0\n d_lnphi_dxs_i, d_lnphi_dys_i = d_lnphi_dxs[i], d_lnphi_dys[i]\n for k in cmps:\n value += RR_terms[k]*(d_lnphi_dxs_i[k] - Ks[k]*d_lnphi_dys_i[k])\n J[i][-1] = value\n\n\n # Main body - expensive to compute! Lots of elements\n zsKsRRinvs2 = [zs[j]*Ks[j]*RR_denoms_inv2[j] for j in cmps]\n one_m_VF = 1.0 - VF\n for i in cmps:\n Ji = J[i]\n d_lnphi_dxs_is, d_lnphi_dys_is = d_lnphi_dxs[i], d_lnphi_dys[i]\n for j in cmps:\n value = 1.0 if i == j else 0.0\n value += zsKsRRinvs2[j]*(VF*d_lnphi_dxs_is[j] + one_m_VF*d_lnphi_dys_is[j])\n Ji[j] = value\n\n # Last row except last value - good, working\n # Diff of RR w.r.t each log K\n bottom_row = J[-1]\n for j in cmps:\n bottom_row[j] = zsKsRRinvs2[j]*(one_m_VF) + VF*zsKsRRinvs2[j]\n\n # Last value - good, working, being overwritten\n dF_ncp1_dB = 0.0\n for i in cmps:\n dF_ncp1_dB -= RR_terms[i]*Ksm1[i]\n J[-1][-1] = dF_ncp1_dB\n\n\n err_RR = Rachford_Rice_flash_error(VF, zs, Ks)\n Fs = [lnKi - lnphi_l + lnphi_g for lnphi_l, lnphi_g, lnKi in zip(lnphis_l, lnphis_g, lnKs)]\n Fs.append(err_RR)\n\n info[:] = VF, xs, ys, l, g, Fs, J\n return Fs, J\n\n guesses = [log(i) for i in Ks_guess]\n guesses.append(V_over_F)\n\n # TODO trust-region\n sln, iterations = newton_system(to_solve, guesses, jac=True, xtol=xtol,\n maxiter=maxiter,\n damping_func=make_damp_initial(steps=3),\n damping=.5)\n\n VF, xs, ys, l, g, Fs, J = info\n\n tot_err = 0.0\n for Fi in Fs:\n tot_err += abs(Fi)\n return VF, xs, ys, l, g, tot_err, J, iterations\n\n\ndef gdem(x, x1, x2, x3):\n cmps = range(len(x))\n dx2 = [x[i] - x3[i] for i in cmps]\n dx1 = [x[i] - x2[i] for i in cmps]\n dx = [x[i] - x1[i] for i in cmps]\n\n b01, b02, b12, b11, b22 = 0.0, 0.0, 0.0, 0.0, 0.0\n\n for i in cmps:\n b01 += dx[i]*dx1[i]\n b02 += dx[i]*dx2[i]\n b12 += dx1[i]*dx2[i]\n b11 += dx1[i]*dx1[i]\n b22 += dx2[i]*dx2[i]\n\n den_inv = 1.0/(b11*b22 - b12*b12)\n mu1 = den_inv*(b02*b12 - b01*b22)\n mu2 = den_inv*(b01*b12 - b02*b11)\n\n factor = 1.0/(1.0 + mu1 + mu2)\n return [factor*(dx[i] - mu2*dx1[i]) for i in cmps]\n\n\ndef minimize_gibbs_2P_transformed(T, P, zs, xs_guess, ys_guess, liquid_phase,\n gas_phase, maxiter=1000, tol=1E-13,\n trivial_solution_tol=1e-5, V_over_F_guess=None):\n if V_over_F_guess is None:\n V_over_F = 0.5\n else:\n V_over_F = V_over_F_guess\n\n flows_v = [yi*V_over_F for yi in ys_guess]\n cmps = range(len(zs))\n\n calc_phases = []\n def G(flows_v):\n vs = [(0.0 + (zs[i] - 0.0)/(1.0 - flows_v[i])) for i in cmps]\n ls = [zs[i] - vs[i] for i in cmps]\n xs = normalize(ls)\n ys = normalize(vs)\n\n VF = flows_v[0]/ys[0]\n\n g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)\n l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)\n\n G_l = l.G()\n G_g = g.G()\n calc_phases[:] = G_l, G_g\n GE_calc = (G_g*VF + (1.0 - VF)*G_l)/(R*T)\n return GE_calc\n\n ans = minimize(G, flows_v)\n\n flows_v = ans['x']\n vs = [(0.0 + (zs[i] - 0.0) / (1.0 - flows_v[i])) for i in cmps]\n ls = [zs[i] - vs[i] for i in cmps]\n xs = normalize(ls)\n ys = normalize(vs)\n\n V_over_F = flows_v[0] / ys[0]\n return V_over_F, xs, ys, calc_phases[0], calc_phases[1], ans['nfev'], ans['fun']\n\n\ndef minimize_gibbs_NP_transformed(T, P, zs, compositions_guesses, phases,\n betas, tol=1E-13,\n method='L-BFGS-B', opt_kwargs=None, translate=False):\n if opt_kwargs is None:\n opt_kwargs = {}\n N = len(zs)\n cmps = range(N)\n phase_count = len(phases)\n phase_iter = range(phase_count)\n phase_iter_n1 = range(phase_count-1)\n if method == 'differential_evolution':\n translate = True\n# RT_inv = 1.0/(R*T)\n\n # Only exist for the first n phases\n # Do not multiply by zs - we are already multiplying by a composition\n flows_guess = [compositions_guesses[j][i]*betas[j] for j in range(phase_count - 1) for i in cmps]\n # Convert the flow guesses to the basis used\n remaining = zs\n if translate:\n flows_guess_basis = []\n for j in range(phase_count-1):\n phase_guess = flows_guess[j*N:j*N+N]\n flows_guess_basis.extend([-trunc_log((remaining[i]-phase_guess[i])/(phase_guess[i]-0.0)) for i in cmps])\n remaining = [remaining[i] - phase_guess[i] for i in cmps]\n else:\n flows_guess_basis = flows_guess\n\n global min_G, iterations\n jac, hess = False, False\n real_min = False\n min_G = 1e100\n iterations = 0\n info = []\n last = []\n def G(flows):\n global min_G, iterations\n try:\n flows = flows.tolist()\n except:\n flows = list(flows)\n iterations += 1\n iter_flows = []\n iter_comps = []\n iter_betas = []\n iter_phases = []\n\n remaining = zs\n if not translate:\n for i in range(len(flows)):\n if flows[i] < 1e-10:\n flows[i] = 1e-10\n\n for j in phase_iter:\n v = flows[j*N:j*N+N]\n\n # Mole flows of phase0/vapor\n if j == phase_count - 1:\n vs = remaining\n else:\n if translate:\n vs = [(0.0 + (remaining[i] - 0.0)/(1.0 + trunc_exp(-v[i]))) for i in cmps]\n else:\n vs = v\n vs_sum = sum(abs(i) for i in vs)\n if vs_sum == 0.0:\n # Handle the case an optimizer takes all of all compounds already\n ys = zs\n else:\n vs_sum_inv = 1.0/vs_sum\n ys = [abs(vs[i]*vs_sum_inv) for i in cmps]\n ys = normalize(ys)\n iter_flows.append(vs)\n iter_comps.append(ys)\n iter_betas.append(vs_sum) # Would be divided by feed but feed is zs = 1\n\n remaining = [remaining[i] - vs[i] for i in cmps]\n G = 0.0\n jac_array = []\n for j in phase_iter:\n comp = iter_comps[j]\n phase = phases[j].to_TP_zs(T=T, P=P, zs=comp)\n lnphis = phase.lnphis()\n if real_min:\n # fugacities = phase.fugacities()\n # fugacities = phase.phis()\n #G += sum([iter_flows[j][i]*trunc_log(fugacities[i]) for i in cmps])\n G += phase.G()*iter_betas[j]\n else:\n for i in cmps:\n G += iter_flows[j][i]*(trunc_log(comp[i]) + lnphis[i])\n iter_phases.append(phase)\n\n if 0:\n fugacities_last = iter_phases[-1].fugacities()\n# G = 0.0\n for j in phase_iter_n1:\n fugacities = iter_phases[j].fugacities()\n G += sum([abs(fugacities_last[i] - fugacities[i]) for i in cmps])\n# lnphis = phase.lnphis()\n\n# if real_min:\n# G += G_base\n# # if not jac:\n# for j in phase_iter:\n# comp = iter_comps[j]\n# G += phase.G()*iter_betas[j]\n# if jac:\n# r = []\n# for i in cmps:\n# v = (log())\n# jac_array.append([log()])\n jac_arr = []\n comp = iter_comps[0]\n phase = iter_phases[0]\n lnphis = phase.lnphis()\n base = [log(xi) + lnphii for xi, lnphii in zip(comp, lnphis)]\n if jac:\n for j in range(1, phase_count):\n comp = iter_comps[j]\n phase = iter_phases[j]\n lnphis = phase.lnphis()\n jac_arr.extend([ref - (log(xi) + lnphii) for ref, xi, lnphii in zip(base, comp, lnphis)])\n\n jac_arr = []\n comp_last = iter_comps[-1]\n phase_last = iter_phases[-1]\n flows_last = iter_flows[-1]\n lnphis_last = phase_last.lnphis()\n dlnphis_dns_last = phase_last.dlnphis_dns()\n\n for j in phase_iter_n1:\n comp = iter_comps[j]\n phase = iter_phases[j]\n flows = iter_flows[j]\n lnphis = phase.lnphis()\n dlnphis_dns = phase.dlnphis_dns()\n for i in cmps:\n v = 0\n for k in cmps:\n v += flows[k][i]*lnphis[k][i]\n v -= flows_last[i]*dlnphis_dns_last[k][i]\n v += lnphis[i] + log(comp[i])\n\n\n\n\n\n if G < min_G:\n # 'phases', iter_phases\n # print('new min G', G, 'betas', iter_betas, 'comp', iter_comps)\n info[:] = iter_betas, iter_comps, iter_phases, G\n min_G = G\n last[:] = iter_betas, iter_comps, iter_phases, G\n if hess:\n base = iter_phases[0].dlnfugacities_dns()\n p1 = iter_phases[1].dlnfugacities_dns()\n dlnphis_dns = [i.dlnphis_dns() for i in iter_phases]\n dlnphis_dns0 = iter_phases[0].dlnphis_dns()\n dlnphis_dns1 = iter_phases[1].dlnphis_dns()\n xs, ys = iter_comps[0], iter_comps[1]\n hess_arr = []\n beta = iter_betas[0]\n\n hess_arr = [[0.0]*N*(phase_count-1) for i in range(N*(phase_count-1))]\n for n in range(1, phase_count):\n for m in range(1, phase_count):\n for i in cmps:\n for j in cmps:\n delta = 1.0 if i == j else 0.0\n v = 1.0/iter_betas[n]*(1.0/iter_comps[n][i]*delta\n - 1.0 + dlnphis_dns[n][i][j])\n v += 1.0/iter_betas[0]*(1.0/iter_comps[0][i]*delta\n - 1.0 + dlnphis_dns[0][i][j])\n hess_arr[(n-1)*N+i][(m-1)*N+j] = v\n#\n# for n in range(1, phase_count):\n# for i in cmps:\n# r = []\n# for j in cmps:\n# v = 0.0\n# for m in phase_iter:\n# delta = 1.0 if i ==j else 0.0\n# v += 1.0/iter_betas[m]*(1.0/iter_comps[m][i]*delta\n# - 1.0 + dlnphis_dns[m][i][j])\n#\n# # How the heck to make this multidimensional?\n# # v = 1.0/(beta*(1.0 - beta))*(zs[i]*delta/(xs[i]*ys[i])\n# # - 1.0 + (1.0 - beta)*dlnphis_dns0[i][j]\n# # + beta*dlnphis_dns1[i][j])\n#\n# # v = base[i][j] + p1[i][j]\n# r.append(v)\n# hess_arr.append(r)\n # Going to be hard to figure out\n # for j in range(1, phase_count):\n # comp = iter_comps[j]\n # phase = iter_phases[j]\n # dlnfugacities_dns = phase.dlnfugacities_dns()\n # row = [base[i] + dlnfugacities_dns[i] for i in cmps]\n # hess_arr = row\n # hess_arr.append(row)\n return G, jac_arr, hess_arr\n if jac:\n return G, np.array(jac_arr)\n return G\n# ans = None\n if method == 'differential_evolution':\n from scipy.optimize import differential_evolution\n real_min = True\n translate = True\n\n G_base = 1e100\n for p in phases:\n G_calc = p.to(T=T,P=P, zs=zs).G()\n if G_base > G_calc:\n G_base = G_calc\n jac = hess = False\n# print(G(list(flows_guess_basis)))\n ans = differential_evolution(G, [(-30.0, 30.0) for i in cmps for j in range(phase_count-1)], **opt_kwargs)\n# ans = differential_evolution(G, [(-100.0, 100.0) for i in cmps for j in range(phase_count-1)], **opt_kwargs)\n objf = float(ans['fun'])\n elif method == 'newton_minimize':\n import numdifftools as nd\n jac = True\n hess = True\n initial_hess = nd.Hessian(lambda x: G(x)[0], step=1e-4)(flows_guess_basis)\n ans, iters = newton_minimize(G, flows_guess_basis, jac=True, hess=True, xtol=tol, ytol=None, maxiter=100, damping=1.0,\n damping_func=damping_maintain_sign)\n objf = None\n else:\n jac = True\n hess = True\n import numdifftools as nd\n def hess_fun(flows):\n return np.array(G(flows)[2])\n\n # hess_fun = lambda flows_guess_basis: np.array(G(flows_guess_basis)[2])\n# nd.Jacobian(G, step=1e-5)\n # trust-constr special handling to add constraints\n def fun_and_jac(x):\n x, j, _ = G(x)\n return x, np.array(j)\n\n ans = minimize(fun_and_jac, flows_guess_basis, jac=True, hess=hess_fun, method=method, tol=tol, **opt_kwargs)\n objf = float(ans['fun'])\n# G(ans['x']) # Make sure info has right value\n# ans['fun'] *= R*T\n\n betas, compositions, phases, objf = info#info\n return betas, compositions, phases, iterations, objf\n\ndef TP_solve_VF_guesses(zs, method, constants, correlations,\n T=None, P=None, VF=None,\n maxiter=50, xtol=1E-7, ytol=None,\n bounded=False,\n user_guess=None, last_conv=None):\n if method == IDEAL_PSAT:\n return flash_ideal(zs=zs, funcs=correlations.VaporPressures, Tcs=constants.Tcs, T=T, P=P, VF=VF)\n elif method == WILSON_GUESS:\n return flash_wilson(zs, Tcs=constants.Tcs, Pcs=constants.Pcs, omegas=constants.omegas, T=T, P=P, VF=VF)\n elif method == TB_TC_GUESS:\n return flash_Tb_Tc_Pc(zs, Tbs=constants.Tbs, Tcs=constants.Tcs, Pcs=constants.Pcs, T=T, P=P, VF=VF)\n\n # Simple return values - not going through a model\n elif method == STP_T_GUESS:\n return flash_ideal(zs=zs, funcs=correlations.VaporPressures, Tcs=constants.Tcs, T=298.15, P=101325.0)\n elif method == LAST_CONVERGED:\n if last_conv is None:\n raise ValueError(\"No last converged\")\n return last_conv\n else:\n raise ValueError(\"Could not converge\")\n\n\n\ndef dew_P_newton(P_guess, T, zs, liquid_phase, gas_phase,\n maxiter=200, xtol=1E-10, xs_guess=None,\n max_step_damping=1e5,\n trivial_solution_tol=1e-4):\n # Trial function only\n V = None\n N = len(zs)\n cmps = range(N)\n xs = zs if xs_guess is None else xs_guess\n\n\n V_over_F = 1.0\n def to_solve(lnKsP):\n # d(fl_i - fg_i)/d(ln K,i) -\n # rest is less important\n\n # d d(fl_i - fg_i)/d(P) should be easy\n Ks = [trunc_exp(i) for i in lnKsP[:-1]]\n P = lnKsP[-1]\n\n xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps]\n ys = [Ks[i]*xs[i] for i in cmps]\n\n g = gas_phase.to(ys, T=T, P=P, V=V)\n l = liquid_phase.to(xs, T=T, P=P, V=V)\n\n fugacities_l = l.fugacities()\n fugacities_g = g.fugacities()\n VF_err = Rachford_Rice_flash_error(V_over_F, zs, Ks)\n errs = [fi_l - fi_g for fi_l, fi_g in zip(fugacities_l, fugacities_g)]\n errs.append(VF_err)\n return errs\n\n lnKs_guess = [log(zs[i]/xs[i]) for i in cmps]\n lnKs_guess.append(P_guess)\n def jac(lnKsP):\n j = jacobian(to_solve, lnKsP, scalar=False)\n return j\n\n lnKsP, iterations = newton_system(to_solve, lnKs_guess, jac=jac, xtol=xtol)\n\n xs = [zs[i]/(1.0 + V_over_F*(exp(lnKsP[i]) - 1.0)) for i in cmps]\n# ys = [exp(lnKsP[i])*xs[i] for i in cmps]\n return lnKsP[-1], xs, zs, iterations\n\ndef dew_bubble_bounded_naive(guess, fixed_val, zs, flasher, iter_var='T', fixed_var='P', V_over_F=1, #\n maxiter=200, xtol=1E-10, ytol=None, hot_start=None\n ):\n # Bound the problem\n integral = True\n if V_over_F == 1:\n check = VL_dew_boolean_check\n check2 = one_sided_dew_point_err\n elif V_over_F == 0:\n check = VLN_bubble_boolean_check\n check2 = one_sided_bubble_point_err\n else:\n integral = False\n def check(res):\n # if res.VF > V_over_F:\n # return -1\n return res.VF - V_over_F\n # return res.VF - V_over_F\n check2 = check\n # raise ValueError(\"Not implemented\")\n guesses = [guess, guess*.9, guess*1.1, guess*0.95, guess*1.05,\n guess*0.8, guess*0.7, guess*1.2, guess*1.3, guess*.5, guess*.1, guess*.05]\n if hot_start is not None:\n hot_start_guess = hot_start.value(iter_var)\n guesses = [hot_start_guess, hot_start_guess*0.99, hot_start_guess*1.01, hot_start_guess*0.95, hot_start_guess*1.05, hot_start_guess*.9, hot_start_guess*1.1] + guesses\n\n non_phase_val, phase_val, non_phase_res, phase_res, res_pert, bounding_iter = generate_phase_boundaries_naive(flasher=flasher, zs=zs, spec_var=fixed_var, spec_val=fixed_val, iter_var=iter_var, check=check, V_over_F=V_over_F,\n ignore_der=True, iter_guesses=guesses)\n\n iterations = 0\n store = []\n flat = -1.0\n specs = {fixed_var: fixed_val}\n\n def to_solve(guess):\n nonlocal iterations\n iterations += 1\n specs[iter_var] = guess\n point = flasher.flash(zs=zs, **specs)\n store.append(point)\n # print(guess, check(point))\n err = check2(point)\n return err\n\n\n if not integral:\n guess = secant(to_solve, x0=phase_val,\n x1=non_phase_val,\n f0=check(phase_res), f1=check(non_phase_res), bisection=True,\n xtol=xtol, ytol=ytol, maxiter=maxiter, require_eval=True, require_xtol=False)\n res = store[-1]\n check2_val = check2(res)\n return res.value(iter_var), res.liquids, res.gas, iterations+bounding_iter, check2_val\n # import matplotlib.pyplot as plt\n # pts = linspace(non_phase_val, phase_val, 500)\n # vals = [to_solve(p) for p in pts]\n # plt.plot(pts, vals, 'x')\n # plt.show()\n\n\n y0 = check2(phase_res)\n y1 = check2(res_pert)\n\n try:\n guess = one_sided_secant(to_solve, x_flat=non_phase_val,\n x0=phase_val, y_flat=flat,\n x1=res_pert.value(iter_var),\n y0=y0, y1=y1,\n xtol=xtol, ytol=ytol, maxiter=maxiter)\n except Exception as e:\n check2 = one_sided_sat_point_err\n y0 = check2(phase_res)\n y1 = check2(res_pert)\n guess = one_sided_secant(to_solve, x_flat=non_phase_val,\n x0=phase_val, y_flat=flat,\n x1=res_pert.value(iter_var),\n y0=y0, y1=y1,\n xtol=xtol, ytol=ytol, maxiter=maxiter)\n for res in store[::-1]:\n check2_val = check2(res)\n if check2_val != flat:\n break\n\n return res.value(iter_var), res.liquids, res.gas, iterations+bounding_iter, check2_val\n\n\ndef dew_bubble_newton_zs(guess, fixed_val, zs, liquid_phase, gas_phase,\n iter_var='T', fixed_var='P', V_over_F=1, # 1 = dew, 0 = bubble\n maxiter=200, xtol=1E-10, comp_guess=None,\n max_step_damping=1e5, damping=1.0,\n trivial_solution_tol=1e-4, debug=False,\n method='newton', opt_kwargs=None,\n min_iter_val=1.0, max_iter_val=1e10,\n hot_start=None):\n if hot_start is not None and hot_start.phase_count == 2:\n # To use the hot start it must be:\n # 1) Same composition\n # 2) At saturation\n # 3) Two phases\n present_phase_hot = None\n for phase in hot_start.phases:\n if phase.beta == 1:\n present_phase_hot = phase\n break\n\n if present_phase_hot.zs == zs:\n other_phase = hot_start.phases[1] if present_phase_hot is hot_start.phases[0] else hot_start.phases[0]\n comp_guess = other_phase.zs\n guess = hot_start.value(iter_var)\n # print('Composition guess', comp_guess)\n # print('Variable guess', guess)\n V = None\n N = len(zs)\n cmps = range(N)\n if comp_guess is None:\n comp_guess = zs\n\n if V_over_F == 1.0:\n iter_phase, const_phase = liquid_phase, gas_phase\n elif V_over_F == 0.0:\n iter_phase, const_phase = gas_phase, liquid_phase\n else:\n raise ValueError(\"Supports only VF of 0 or 1\")\n\n lnKs = [0.0]*N\n\n size = N + 1\n errs = [0.0]*size\n comp_invs = [0.0]*N\n J = [[0.0]*size for i in range(size)]\n\n # We can arbitrarily increase this factor to weight the error of the composition higher\n comp_factor = 10\n comp_factor_inv = 1.0/comp_factor\n\n #J[N][N] = 0.0 as well\n JN = J[N]\n for i in cmps:\n JN[i] = -comp_factor\n\n s = 'dlnphis_d%s' %(iter_var)\n dlnphis_diter_var_iter = getattr(iter_phase.__class__, s)\n dlnphis_diter_var_const = getattr(const_phase.__class__, s)\n dlnphis_dzs = iter_phase.__class__.dlnphis_dzs\n\n info = []\n kwargs = {}\n kwargs[fixed_var] = fixed_val\n kwargs['V'] = None\n def to_solve_comp(iter_vals, jac=True):\n comp = iter_vals[:-1]\n iter_val = iter_vals[-1]\n\n kwargs[iter_var] = iter_val\n # print(f'Incipient phase composition = {comp}, iteration variable {iter_val}')\n p_iter = iter_phase.to(comp, **kwargs)\n p_const = const_phase.to(zs, **kwargs)\n\n lnphis_iter = p_iter.lnphis()\n lnphis_const = p_const.lnphis()\n for i in cmps:\n if zs[i] != 0.0:\n # for components with no error, no need to move in any direction\n comp_invs[i] = comp_inv = 1.0/comp[i]\n lnKs[i] = log(zs[i]*comp_inv)\n errs[i] = lnKs[i] - lnphis_iter[i] + lnphis_const[i]\n errs[-1] = comp_factor - comp_factor*sum(comp)\n\n if jac:\n dlnphis_dxs = dlnphis_dzs(p_iter)\n dlnphis_dprop_iter = dlnphis_diter_var_iter(p_iter)\n dlnphis_dprop_const = dlnphis_diter_var_const(p_const)\n for i in cmps:\n Ji = J[i]\n Ji[-1] = dlnphis_dprop_const[i] - dlnphis_dprop_iter[i]\n for j in cmps:\n Ji[j] = -dlnphis_dxs[i][j]\n Ji[i] -= comp_invs[i]\n\n info[:] = [p_iter, p_const, errs, J]\n # print('Errs', errs)\n # print('Jac', J)\n return errs, J\n\n return errs\n\n low = [.0]*N\n low.append(min_iter_val) # guess at minimum pressure\n high = [1.0]*N\n high.append(max_iter_val) # guess at maximum pressure\n\n\n damping = 1.0\n guesses = list(comp_guess)\n guesses.append(guess)\n if method == 'newton':\n # numerical_j = jacobian(lambda g: to_solve_comp(g)[0], guesses, perturbation=1e-6, scalar=False)\n # implemented_j = to_solve_comp(guesses)[1]\n f_j, into, outof = translate_bound_f_jac(to_solve_comp, jac=True, low=low, high=high, as_np=True)\n guesses_in = into(guesses)\n\n # numerical_j = jacobian(lambda g: f_j(g)[0], guesses_in, perturbation=1e-6, scalar=False)\n # implemented_j = f_j(guesses_in)[1]\n\n comp_val, iterations = newton_system(f_j, guesses, jac=True,\n xtol=xtol, damping=damping,\n solve_func=py_solve,\n line_search=True,\n check_numbers=True,\n # solve_func=lambda x, y:np.linalg.solve(x, y).tolist(),\n # damping_func=damping_maintain_sign\n )\n\n comp_val = outof(comp_val)\n\n\n # elif method == 'odeint':\n # # Not even close to working\n # # equations are hard\n # from scipy.integrate import odeint\n # def fun_and_jac(x, t):\n # x, j = to_solve_comp(x.tolist() + [t])\n # return np.array(x), np.array(j)\n # def fun(x, t):\n # x, j = to_solve_comp(x.tolist() +[t])\n # return np.array(x)\n # def jac(x, t):\n # x, j = to_solve_comp(x.tolist() + [t])\n # return np.array(j)\n\n # ans = odeint(func=fun, y0=np.array(guesses), t=np.linspace(guess, guess*2, 5), Dfun=jac)\n # return ans\n else:\n if opt_kwargs is None:\n opt_kwargs = {}\n # def fun_and_jac(x):\n # x, j = to_solve_comp(x.tolist())\n # return np.array(x), np.array(j)\n\n\n f_j, into, outof = translate_bound_f_jac(to_solve_comp, jac=True, low=low, high=high, as_np=True)\n\n ans = root(f_j, np.array(into(guesses)), jac=True, method=method, tol=xtol, **opt_kwargs)\n comp_val = outof(ans['x']).tolist()\n iterations = ans['nfev']\n\n iter_val = comp_val[-1]\n comp = comp_val[:-1]\n\n comp_difference = 0.0\n for i in cmps: comp_difference += abs(zs[i] - comp[i])\n\n if comp_difference < trivial_solution_tol:\n raise ValueError(\"Converged to trivial condition, compositions of both phases equal\")\n\n if iter_var == 'P' and iter_val > 1e10:\n raise ValueError(\"Converged to unlikely point\")\n\n sln = [iter_val, comp]\n sln.append(info[0])\n sln.append(info[1])\n sln.append(iterations)\n tot_err = 0.0\n for err_i in info[2]:\n tot_err += abs(err_i)\n sln.append(tot_err)\n\n if debug:\n return sln, to_solve_comp\n return sln\n\n\n\nl_undefined_T_msg = \"Could not calculate liquid conditions at provided temperature %s K (mole fracions %s)\"\ng_undefined_T_msg = \"Could not calculate vapor conditions at provided temperature %s K (mole fracions %s)\"\nl_undefined_P_msg = \"Could not calculate liquid conditions at provided pressure %s Pa (mole fracions %s)\"\ng_undefined_P_msg = \"Could not calculate vapor conditions at provided pressure %s Pa (mole fracions %s)\"\n\ndef dew_bubble_Michelsen_Mollerup(guess, fixed_val, zs, liquid_phase, gas_phase,\n iter_var='T', fixed_var='P', V_over_F=1,\n maxiter=200, xtol=1E-10, comp_guess=None,\n max_step_damping=.25, guess_update_frequency=1,\n trivial_solution_tol=1e-7, V_diff=.00002, damping=1.0,\n hot_start=None):\n # for near critical, V diff very wrong - .005 seen, both g as or both liquid\n kwargs = {fixed_var: fixed_val}\n N = len(zs)\n cmps = range(N)\n comp_guess = zs if comp_guess is None else comp_guess\n damping_orig = damping\n\n step = 1e300 # default\n\n if V_over_F == 1.0:\n iter_phase, const_phase, bubble = liquid_phase, gas_phase, False\n elif V_over_F == 0.0:\n iter_phase, const_phase, bubble = gas_phase, liquid_phase, True\n else:\n raise ValueError(\"Supports only VF of 0 or 1\")\n if iter_var == 'T':\n if V_over_F == 1.0:\n iter_msg, const_msg = l_undefined_T_msg, g_undefined_T_msg\n else:\n iter_msg, const_msg = g_undefined_T_msg, l_undefined_T_msg\n elif iter_var == 'P':\n if V_over_F == 1.0:\n iter_msg, const_msg = l_undefined_P_msg, g_undefined_P_msg\n else:\n iter_msg, const_msg = g_undefined_P_msg, l_undefined_P_msg\n\n s = 'dlnphis_d%s' %(iter_var)\n dlnphis_diter_var_iter = getattr(iter_phase.__class__, s)\n dlnphis_diter_var_const = getattr(const_phase.__class__, s)\n\n skip = 0\n guess_old = None\n V_ratio, V_ratio_last = None, None\n V_iter_last, V_const_last = None, None\n expect_phase = 'g' if V_over_F == 0.0 else 'l'\n unwanted_phase = 'l' if expect_phase == 'g' else 'g'\n\n successive_fails = 0\n for iteration in range(maxiter):\n kwargs[iter_var] = guess\n try:\n const_phase = const_phase.to_TP_zs(zs=zs, **kwargs)\n lnphis_const = const_phase.lnphis()\n dlnphis_dvar_const = dlnphis_diter_var_const(const_phase)\n except Exception as e:\n if guess_old is None:\n raise ValueError(const_msg %(guess, zs), e)\n successive_fails += 1\n guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step)\n continue\n try:\n skip -= 1\n iter_phase = iter_phase.to_TP_zs(zs=comp_guess, **kwargs)\n if V_diff is not None:\n V_iter, V_const = iter_phase.V(), const_phase.V()\n V_ratio = V_iter/V_const\n if 1.0 - V_diff < V_ratio < 1.0 + V_diff or skip > 0 or V_iter_last and (abs(min(V_iter, V_iter_last)/max(V_iter, V_iter_last)) < .8):\n # Relax the constraint for the iterating on variable so two different phases exist\n #if iter_phase.eos_mix.phase in ('l', 'g') and iter_phase.eos_mix.phase == const_phase.eos_mix.phase:\n # Alternatively, try a stability test here\n\n if iter_phase.eos_mix.phase == unwanted_phase:\n if skip < 0:\n skip = 4\n damping = .15\n if iter_var == 'P':\n split = min(iter_phase.eos_mix.P_discriminant_zeros()) # P_discriminant_zero_l\n if bubble:\n split *= 0.999999999\n else:\n split *= 1.000000001\n elif iter_var == 'T':\n split = iter_phase.eos_mix.T_discriminant_zero_l()\n if bubble:\n split *= 0.999999999\n else:\n split *= 1.000000001\n kwargs[iter_var] = guess = split\n iter_phase = iter_phase.to(zs=comp_guess, **kwargs)\n const_phase = const_phase.to(zs=zs, **kwargs)\n lnphis_const = const_phase.lnphis()\n dlnphis_dvar_const = dlnphis_diter_var_const(const_phase)\n # print('adj iter phase', split)\n elif const_phase.eos_mix.phase == expect_phase:\n if skip < 0:\n skip = 4\n damping = .15\n if iter_var == 'P':\n split = min(const_phase.eos_mix.P_discriminant_zeros())\n if bubble:\n split *= 0.999999999\n else:\n split *= 1.000000001\n elif iter_var == 'T':\n split = const_phase.eos_mix.T_discriminant_zero_l()\n if bubble:\n split *= 0.999999999\n else:\n split *= 1.000000001\n kwargs[iter_var] = guess = split\n const_phase = const_phase.to(zs=zs, **kwargs)\n lnphis_const = const_phase.lnphis()\n dlnphis_dvar_const = dlnphis_diter_var_const(const_phase)\n iter_phase = iter_phase.to(zs=comp_guess, **kwargs)\n # Also need to adjust the other phase to keep it in sync\n\n # print('adj const phase', split)\n\n lnphis_iter = iter_phase.lnphis()\n dlnphis_dvar_iter = dlnphis_diter_var_iter(iter_phase)\n except Exception as e:\n if guess_old is None:\n raise ValueError(iter_msg %(guess, zs), e)\n successive_fails += 1\n guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step)\n continue\n\n\n if successive_fails > 2:\n raise ValueError(\"Stopped convergence procedure after multiple bad steps\")\n\n successive_fails = 0\n Ks = [exp(a - b) for a, b in zip(lnphis_const, lnphis_iter)]\n comp_guess = [zs[i]*Ks[i] for i in cmps]\n y_sum = sum(comp_guess)\n comp_guess = [y/y_sum for y in comp_guess]\n if iteration % guess_update_frequency: # or skip > 0\n continue\n elif skip == 0:\n damping = damping_orig\n\n f_k = sum([zs[i]*Ks[i] for i in cmps]) - 1.0\n\n dfk_dvar = 0.0\n for i in cmps:\n dfk_dvar += zs[i]*Ks[i]*(dlnphis_dvar_const[i] - dlnphis_dvar_iter[i])\n\n guess_old = guess\n step = -f_k/dfk_dvar\n\n# if near_critical:\n adj_step = copysign(min(max_step_damping*guess, abs(step), abs(step)*damping), step)\n if guess + adj_step <= 0.0:\n adj_step *= 0.5\n guess = guess + adj_step\n# else:\n# guess = guess + step\n comp_difference = 0.0\n for i in cmps: comp_difference += abs(zs[i] - comp_guess[i])\n\n if comp_difference < trivial_solution_tol and iteration:\n for zi in zs:\n if zi == 1.0:\n # Turn off trivial check for pure components\n trivial_solution_tol = -1.0\n if comp_difference < trivial_solution_tol:\n raise ValueError(\"Converged to trivial condition, compositions of both phases equal\")\n\n\n if abs(guess - guess_old) < xtol: #and not skip:\n guess = guess_old\n break\n if V_diff is not None:\n V_iter_last, V_const_last, V_ratio_last = V_iter, V_const, V_ratio\n\n if abs(guess - guess_old) > xtol:\n raise ValueError(\"Did not converge to specified tolerance\")\n return guess, comp_guess, iter_phase, const_phase, iteration, abs(guess - guess_old)\n\n\nl_undefined_T_msg = \"Could not calculate liquid conditions at provided temperature %s K (mole fracions %s)\"\ng_undefined_T_msg = \"Could not calculate vapor conditions at provided temperature %s K (mole fracions %s)\"\nl_undefined_P_msg = \"Could not calculate liquid conditions at provided pressure %s Pa (mole fracions %s)\"\ng_undefined_P_msg = \"Could not calculate vapor conditions at provided pressure %s Pa (mole fracions %s)\"\n\ndef existence_3P_Michelsen_Mollerup(guess, fixed_val, zs, iter_phase, liquid0, liquid1,\n iter_var='T', fixed_var='P',\n maxiter=200, xtol=1E-10, comp_guess=None,\n liquid0_comp=None, liquid1_comp=None,\n max_step_damping=.25, SS_tol=1e-10,\n trivial_solution_tol=1e-7, damping=1.0,\n beta=0.5):\n # For convenience call the two phases that exist already liquid0, liquid1\n # But one of them can be a gas, solid, etc.\n kwargs = {fixed_var: fixed_val}\n N = len(zs)\n cmps = range(N)\n comp_guess = zs if comp_guess is None else comp_guess\n damping_orig = damping\n step = 1e300 # default\n\n if iter_var == 'T':\n iter_msg, const_msg = g_undefined_T_msg, l_undefined_T_msg\n elif iter_var == 'P':\n iter_msg, const_msg = g_undefined_P_msg, l_undefined_P_msg\n\n s = 'dlnphis_d%s' %(iter_var)\n dlnphis_diter_var_iter = getattr(iter_phase.__class__, s)\n dlnphis_diter_var_liquid0 = getattr(liquid0.__class__, s)\n# dlnphis_diter_var_liquid1 = getattr(liquid1.__class__, s)\n\n skip = 0\n guess_old = None\n\n successive_fails = 0\n for iteration in range(maxiter):\n kwargs[iter_var] = guess\n try:\n liquid0 = liquid0.to_TP_zs(zs=liquid0_comp, **kwargs)\n lnphis_liquid0 = liquid0.lnphis()\n dlnphis_dvar_liquid0 = dlnphis_diter_var_liquid0(liquid0)\n except Exception as e:\n if guess_old is None:\n raise ValueError(const_msg %(guess, liquid0_comp), e)\n successive_fails += 1\n guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step)\n continue\n try:\n liquid1 = liquid1.to_TP_zs(zs=liquid1_comp, **kwargs)\n lnphis_liquid1 = liquid1.lnphis()\n# dlnphis_dvar_liquid1 = dlnphis_diter_var_liquid1(liquid1)\n except Exception as e:\n if guess_old is None:\n raise ValueError(const_msg %(guess, liquid0_comp), e)\n successive_fails += 1\n guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step)\n continue\n try:\n iter_phase = iter_phase.to_TP_zs(zs=comp_guess, **kwargs)\n lnphis_iter = iter_phase.lnphis()\n dlnphis_dvar_iter = dlnphis_diter_var_iter(iter_phase)\n except Exception as e:\n if guess_old is None:\n raise ValueError(iter_msg %(guess, zs), e)\n successive_fails += 1\n guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step)\n continue\n\n\n if successive_fails > 2:\n raise ValueError(\"Stopped convergence procedure after multiple bad steps\")\n\n successive_fails = 0\n Ks = [exp(a - b) for a, b in zip(lnphis_liquid0, lnphis_iter)]\n comp_guess = [liquid0_comp[i]*Ks[i] for i in cmps]\n y_sum_inv = 1.0/sum(comp_guess)\n comp_guess = [y*y_sum_inv for y in comp_guess]\n\n f_k = sum([liquid0_comp[i]*Ks[i] for i in cmps]) - 1.0\n\n dfk_dvar = 0.0\n for i in cmps:\n dfk_dvar += liquid0_comp[i]*Ks[i]*(dlnphis_dvar_liquid0[i] - dlnphis_dvar_iter[i])\n\n guess_old = guess\n step = -f_k/dfk_dvar\n\n adj_step = copysign(min(max_step_damping*guess, abs(step), abs(step)*damping), step)\n if guess + adj_step <= 0.0:\n adj_step *= 0.5\n guess = guess + adj_step\n\n comp_difference = 0.0\n for i in cmps:\n comp_difference += abs(liquid0_comp[i] - comp_guess[i])\n\n if comp_difference < trivial_solution_tol and iteration:\n if comp_difference < trivial_solution_tol:\n raise ValueError(\"Converged to trivial condition, compositions of both phases equal\")\n\n # Do the SS part for the two phases\n try:\n Ks_SS = [exp(lnphis_liquid0[i] - lnphis_liquid1[i]) for i in cmps]\n except OverflowError:\n Ks_SS = [trunc_exp(lnphis_liquid0[i] - lnphis_liquid1[i]) for i in cmps]\n beta, liquid0_comp_new, liquid1_comp_new = flash_inner_loop(zs, Ks_SS, guess=beta)\n\n for xi in liquid0_comp_new:\n if xi < 0.0:\n xs_new_sum_inv = 1.0/sum(abs(i) for i in liquid0_comp_new)\n for i in cmps:\n liquid0_comp_new[i] = abs(liquid0_comp_new[i])*xs_new_sum_inv\n break\n for xi in liquid1_comp_new:\n if xi < 0.0:\n xs_new_sum_inv = 1.0/sum(abs(i) for i in liquid1_comp_new)\n for i in cmps:\n liquid1_comp_new[i] = abs(liquid1_comp_new[i])*xs_new_sum_inv\n break\n err_SS = 0.0\n try:\n for Ki, xi, yi in zip(Ks_SS, liquid0_comp, liquid1_comp):\n err_i = Ki*xi/yi - 1.0\n err_SS += err_i*err_i\n except ZeroDivisionError:\n err_SS = 0.0\n for Ki, xi, yi in zip(Ks, liquid0_comp, liquid1_comp):\n try:\n err_i = Ki*xi/yi - 1.0\n err_SS += err_i*err_i\n except ZeroDivisionError:\n pass\n\n liquid0_comp, liquid1_comp = liquid0_comp_new, liquid1_comp_new\n if abs(guess - guess_old) < xtol and err_SS < SS_tol:\n err_VF = abs(guess - guess_old)\n guess = guess_old\n break\n\n\n if abs(guess - guess_old) > xtol:\n raise ValueError(\"Did not converge to specified tolerance\")\n\n return guess, [iter_phase, liquid0, liquid1], [0.0, 1.0-beta, beta], err_VF, err_SS, iteration\n\n\ndef bubble_T_Michelsen_Mollerup(T_guess, P, zs, liquid_phase, gas_phase,\n maxiter=200, xtol=1E-10, ys_guess=None,\n max_step_damping=5.0, T_update_frequency=1,\n trivial_solution_tol=1e-4):\n N = len(zs)\n cmps = range(N)\n ys = zs if ys_guess is None else ys_guess\n\n\n step = 1e300 # initialize to dummy value\n\n\n T_guess_old = None\n successive_fails = 0\n for iteration in range(maxiter):\n try:\n g = gas_phase.to_TP_zs(T=T_guess, P=P, zs=ys)\n lnphis_g = g.lnphis()\n dlnphis_dT_g = g.dlnphis_dT()\n except Exception as e:\n if T_guess_old is None:\n raise ValueError(g_undefined_T_msg %(T_guess, ys), e)\n successive_fails += 1\n T_guess = T_guess_old + copysign(min(max_step_damping, abs(step)), step)\n continue\n\n try:\n l = liquid_phase.to_TP_zs(T=T_guess, P=P, zs=zs)\n lnphis_l = l.lnphis()\n dlnphis_dT_l = l.dlnphis_dT()\n except Exception as e:\n if T_guess_old is None:\n raise ValueError(l_undefined_T_msg %(T_guess, zs), e)\n successive_fails += 1\n T_guess = T_guess_old + copysign(min(max_step_damping, abs(step)), step)\n continue\n\n if successive_fails > 2:\n raise ValueError(\"Stopped convergence procedure after multiple bad steps\")\n\n successive_fails = 0\n Ks = [exp(a - b) for a, b in zip(lnphis_l, lnphis_g)]\n ys = [zs[i]*Ks[i] for i in cmps]\n if iteration % T_update_frequency:\n continue\n\n f_k = sum([zs[i]*Ks[i] for i in cmps]) - 1.0\n\n dfk_dT = 0.0\n for i in cmps:\n dfk_dT += zs[i]*Ks[i]*(dlnphis_dT_l[i] - dlnphis_dT_g[i])\n\n T_guess_old = T_guess\n step = -f_k/dfk_dT\n\n\n# if near_critical:\n T_guess = T_guess + copysign(min(max_step_damping, abs(step)), step)\n# else:\n# T_guess = T_guess + step\n\n\n comp_difference = sum([abs(zi - yi) for zi, yi in zip(zs, ys)])\n if comp_difference < trivial_solution_tol:\n raise ValueError(\"Converged to trivial condition, compositions of both phases equal\")\n\n y_sum = sum(ys)\n ys = [y/y_sum for y in ys]\n\n if abs(T_guess - T_guess_old) < xtol:\n T_guess = T_guess_old\n break\n\n if abs(T_guess - T_guess_old) > xtol:\n raise ValueError(\"Did not converge to specified tolerance\")\n return T_guess, ys, l, g, iteration, abs(T_guess - T_guess_old)\n\n\ndef dew_T_Michelsen_Mollerup(T_guess, P, zs, liquid_phase, gas_phase,\n maxiter=200, xtol=1E-10, xs_guess=None,\n max_step_damping=5.0, T_update_frequency=1,\n trivial_solution_tol=1e-4):\n N = len(zs)\n cmps = range(N)\n xs = zs if xs_guess is None else xs_guess\n step = 1e300 # default\n\n\n T_guess_old = None\n successive_fails = 0\n for iteration in range(maxiter):\n try:\n g = gas_phase.to_TP_zs(T=T_guess, P=P, zs=zs)\n lnphis_g = g.lnphis()\n dlnphis_dT_g = g.dlnphis_dT()\n except Exception as e:\n if T_guess_old is None:\n raise ValueError(g_undefined_T_msg %(T_guess, zs), e)\n successive_fails += 1\n T_guess = T_guess_old + copysign(min(max_step_damping, abs(step)), step)\n continue\n\n try:\n l = liquid_phase.to_TP_zs(T=T_guess, P=P, zs=xs)\n lnphis_l = l.lnphis()\n dlnphis_dT_l = l.dlnphis_dT()\n except Exception as e:\n if T_guess_old is None:\n raise ValueError(l_undefined_T_msg %(T_guess, xs), e)\n successive_fails += 1\n T_guess = T_guess_old + copysign(min(max_step_damping, abs(step)), step)\n continue\n\n if successive_fails > 2:\n raise ValueError(\"Stopped convergence procedure after multiple bad steps\")\n\n successive_fails = 0\n Ks = [exp(a - b) for a, b in zip(lnphis_l, lnphis_g)]\n xs = [zs[i]/Ks[i] for i in cmps]\n if iteration % T_update_frequency:\n continue\n\n\n f_k = sum(xs) - 1.0\n\n dfk_dT = 0.0\n for i in cmps:\n dfk_dT += xs[i]*(dlnphis_dT_g[i] - dlnphis_dT_l[i])\n\n T_guess_old = T_guess\n step = -f_k/dfk_dT\n\n\n# if near_critical:\n T_guess = T_guess + copysign(min(max_step_damping, abs(step)), step)\n# else:\n# T_guess = T_guess + step\n\n\n comp_difference = sum([abs(zi - xi) for zi, xi in zip(zs, xs)])\n if comp_difference < trivial_solution_tol:\n raise ValueError(\"Converged to trivial condition, compositions of both phases equal\")\n\n y_sum = sum(xs)\n xs = [y/y_sum for y in xs]\n\n if abs(T_guess - T_guess_old) < xtol:\n T_guess = T_guess_old\n break\n\n if abs(T_guess - T_guess_old) > xtol:\n raise ValueError(\"Did not converge to specified tolerance\")\n return T_guess, xs, l, g, iteration, abs(T_guess - T_guess_old)\n\ndef bubble_P_Michelsen_Mollerup(P_guess, T, zs, liquid_phase, gas_phase,\n maxiter=200, xtol=1E-10, ys_guess=None,\n max_step_damping=1e5, P_update_frequency=1,\n trivial_solution_tol=1e-4):\n N = len(zs)\n cmps = range(N)\n ys = zs if ys_guess is None else ys_guess\n step = 1e300 # default\n\n\n P_guess_old = None\n successive_fails = 0\n for iteration in range(maxiter):\n try:\n g = gas_phase = gas_phase.to_TP_zs(T=T, P=P_guess, zs=ys)\n lnphis_g = g.lnphis()\n dlnphis_dP_g = g.dlnphis_dP()\n except Exception as e:\n if P_guess_old is None:\n raise ValueError(g_undefined_P_msg %(P_guess, ys), e)\n successive_fails += 1\n P_guess = P_guess_old + copysign(min(max_step_damping, abs(step)), step)\n continue\n\n try:\n l = liquid_phase= liquid_phase.to_TP_zs(T=T, P=P_guess, zs=zs)\n lnphis_l = l.lnphis()\n dlnphis_dP_l = l.dlnphis_dP()\n except Exception as e:\n if P_guess_old is None:\n raise ValueError(l_undefined_P_msg %(P_guess, zs), e)\n successive_fails += 1\n T_guess = P_guess_old + copysign(min(max_step_damping, abs(step)), step)\n continue\n\n if successive_fails > 2:\n raise ValueError(\"Stopped convergence procedure after multiple bad steps\")\n\n successive_fails = 0\n Ks = [exp(a - b) for a, b in zip(lnphis_l, lnphis_g)]\n ys = [zs[i]*Ks[i] for i in cmps]\n if iteration % P_update_frequency:\n continue\n\n f_k = sum([zs[i]*Ks[i] for i in cmps]) - 1.0\n\n dfk_dP = 0.0\n for i in cmps:\n dfk_dP += zs[i]*Ks[i]*(dlnphis_dP_l[i] - dlnphis_dP_g[i])\n\n P_guess_old = P_guess\n step = -f_k/dfk_dP\n\n P_guess = P_guess + copysign(min(max_step_damping, abs(step)), step)\n\n\n comp_difference = sum([abs(zi - yi) for zi, yi in zip(zs, ys)])\n if comp_difference < trivial_solution_tol:\n raise ValueError(\"Converged to trivial condition, compositions of both phases equal\")\n\n y_sum = sum(ys)\n ys = [y/y_sum for y in ys]\n\n if abs(P_guess - P_guess_old) < xtol:\n P_guess = P_guess_old\n break\n\n if abs(P_guess - P_guess_old) > xtol:\n raise ValueError(\"Did not converge to specified tolerance\")\n return P_guess, ys, l, g, iteration, abs(P_guess - P_guess_old)\n\n\ndef dew_P_Michelsen_Mollerup(P_guess, T, zs, liquid_phase, gas_phase,\n maxiter=200, xtol=1E-10, xs_guess=None,\n max_step_damping=1e5, P_update_frequency=1,\n trivial_solution_tol=1e-4):\n N = len(zs)\n cmps = range(N)\n xs = zs if xs_guess is None else xs_guess\n\n step = 1e300 # default\n\n P_guess_old = None\n successive_fails = 0\n for iteration in range(maxiter):\n try:\n g = gas_phase = gas_phase.to_TP_zs(T=T, P=P_guess, zs=zs)\n lnphis_g = g.lnphis()\n dlnphis_dP_g = g.dlnphis_dP()\n except Exception as e:\n if P_guess_old is None:\n raise ValueError(g_undefined_P_msg %(P_guess, zs), e)\n successive_fails += 1\n P_guess = P_guess_old + copysign(min(max_step_damping, abs(step)), step)\n continue\n\n try:\n l = liquid_phase= liquid_phase.to_TP_zs(T=T, P=P_guess, zs=xs)\n lnphis_l = l.lnphis()\n dlnphis_dP_l = l.dlnphis_dP()\n except Exception as e:\n if P_guess_old is None:\n raise ValueError(l_undefined_P_msg %(P_guess, xs), e)\n successive_fails += 1\n T_guess = P_guess_old + copysign(min(max_step_damping, abs(step)), step)\n continue\n\n if successive_fails > 2:\n raise ValueError(\"Stopped convergence procedure after multiple bad steps\")\n\n successive_fails = 0\n Ks = [exp(a - b) for a, b in zip(lnphis_l, lnphis_g)]\n xs = [zs[i]/Ks[i] for i in cmps]\n if iteration % P_update_frequency:\n continue\n\n f_k = sum(xs) - 1.0\n\n dfk_dP = 0.0\n for i in cmps:\n dfk_dP += xs[i]*(dlnphis_dP_g[i] - dlnphis_dP_l[i])\n\n P_guess_old = P_guess\n step = -f_k/dfk_dP\n\n P_guess = P_guess + copysign(min(max_step_damping, abs(step)), step)\n\n\n comp_difference = sum([abs(zi - xi) for zi, xi in zip(zs, xs)])\n if comp_difference < trivial_solution_tol:\n raise ValueError(\"Converged to trivial condition, compositions of both phases equal\")\n\n x_sum_inv = 1.0/sum(xs)\n xs = [x*x_sum_inv for x in xs]\n\n if abs(P_guess - P_guess_old) < xtol:\n P_guess = P_guess_old\n break\n\n if abs(P_guess - P_guess_old) > xtol:\n raise ValueError(\"Did not converge to specified tolerance\")\n return P_guess, xs, l, g, iteration, abs(P_guess - P_guess_old)\n\n\n# spec, iter_var, fixed_var\nstrs_to_ders = {('H', 'T', 'P'): 'dH_dT_P',\n ('S', 'T', 'P'): 'dS_dT_P',\n ('G', 'T', 'P'): 'dG_dT_P',\n ('U', 'T', 'P'): 'dU_dT_P',\n ('A', 'T', 'P'): 'dA_dT_P',\n\n ('H', 'T', 'V'): 'dH_dT_V',\n ('S', 'T', 'V'): 'dS_dT_V',\n ('G', 'T', 'V'): 'dG_dT_V',\n ('U', 'T', 'V'): 'dU_dT_V',\n ('A', 'T', 'V'): 'dA_dT_V',\n\n ('H', 'P', 'T'): 'dH_dP_T',\n ('S', 'P', 'T'): 'dS_dP_T',\n ('G', 'P', 'T'): 'dG_dP_T',\n ('U', 'P', 'T'): 'dU_dP_T',\n ('A', 'P', 'T'): 'dA_dP_T',\n\n ('H', 'P', 'V'): 'dH_dP_V',\n ('S', 'P', 'V'): 'dS_dP_V',\n ('G', 'P', 'V'): 'dG_dP_V',\n ('U', 'P', 'V'): 'dU_dP_V',\n ('A', 'P', 'V'): 'dA_dP_V',\n\n ('H', 'V', 'T'): 'dH_dV_T',\n ('S', 'V', 'T'): 'dS_dV_T',\n ('G', 'V', 'T'): 'dG_dV_T',\n ('U', 'V', 'T'): 'dU_dV_T',\n ('A', 'V', 'T'): 'dA_dV_T',\n\n ('H', 'V', 'P'): 'dH_dV_P',\n ('S', 'V', 'P'): 'dS_dV_P',\n ('G', 'V', 'P'): 'dG_dV_P',\n ('U', 'V', 'P'): 'dU_dV_P',\n ('A', 'V', 'P'): 'dA_dV_P',\n}\n\n\nmultiple_solution_sets = {('T', 'S'), ('T', 'H'), ('T', 'U'), ('T', 'A'), ('T', 'G'),\n ('S', 'T'), ('H', 'T'), ('U', 'T'), ('A', 'T'), ('G', 'T'),\n }\n\ndef TPV_solve_HSGUA_1P(zs, phase, guess, fixed_var_val, spec_val,\n iter_var='T', fixed_var='P', spec='H',\n maxiter=200, xtol=1E-10, ytol=None, fprime=False,\n minimum_progress=0.3, oscillation_detection=True,\n bounded=False, min_bound=None, max_bound=None,\n multi_solution=False, spec_fun=None):\n r'''Solve a single-phase flash where one of `T`, `P`, or `V` are specified\n and one of `H`, `S`, `G`, `U`, or `A` are also specified. The iteration\n (changed input variable) variable must be specified as be one of `T`, `P`,\n or `V`, but it cannot be the same as the fixed variable.\n\n This method is a secant or newton based solution method, optionally with\n oscillation detection to bail out of tring to solve the problem to handle\n the case where the spec cannot be met because of a phase change (as in a\n cubic eos case).\n\n Parameters\n ----------\n zs : list[float]\n Mole fractions of the phase, [-]\n phase : `Phase`\n The phase object of the mixture, containing the information for\n calculating properties at new conditions, [-]\n guess : float\n The guessed value for the iteration variable,\n [K or Pa or m^3/mol]\n fixed_var_val : float\n The specified value of the fixed variable (one of T, P, or V);\n [K or Pa, or m^3/mol]\n spec_val : float\n The specified value of H, S, G, U, or A, [J/(mol*K) or J/mol]\n iter_var : str\n One of 'T', 'P', 'V', [-]\n fixed_var : str\n One of 'T', 'P', 'V', [-]\n spec : str\n One of 'H', 'S', 'G', 'U', 'A', [-]\n maxiter : float\n Maximum number of iterations, [-]\n xtol : float\n Tolerance for secant-style convergence of the iteration variable,\n [K or Pa, or m^3/mol]\n ytol : float or None\n Tolerance for convergence of the spec variable,\n [J/(mol*K) or J/mol]\n\n Returns\n -------\n iter_var_val, phase, iterations, err\n\n Notes\n -----\n\n '''\n # Needs lots of work but the idea is here\n # Can iterate chancing any of T, P, V with a fixed other T, P, V to meet any\n # H S G U A spec.\n store = []\n global iterations\n iterations = 0\n if fixed_var == iter_var:\n raise ValueError(\"Fixed variable cannot be the same as iteration variable\")\n if fixed_var not in ('T', 'P', 'V'):\n raise ValueError(\"Fixed variable must be one of `T`, `P`, `V`\")\n if iter_var not in ('T', 'P', 'V'):\n raise ValueError(\"Iteration variable must be one of `T`, `P`, `V`\")\n # Little point in enforcing the spec - might want to repurpose the function later\n if spec not in ('H', 'S', 'G', 'U', 'A'):\n raise ValueError(\"Spec variable must be one of `H`, `S`, `G` `U`, `A`\")\n\n multiple_solutions = (fixed_var, spec) in multiple_solution_sets\n\n phase_kwargs = {fixed_var: fixed_var_val, 'zs': zs}\n spec_getter = getattr(phase.__class__, spec)\n if spec_fun is not None:\n fprime = False\n# print('spec_getter', spec_getter)\n if fprime:\n try:\n # Gotta be a lookup by (spec, iter_var, fixed_var)\n der_attr = strs_to_ders[(spec, iter_var, fixed_var)]\n except KeyError:\n der_attr = 'd' + spec + '_d' + iter_var\n der_attr_fun = getattr(phase.__class__, der_attr)\n# print('der_attr_fun', der_attr_fun)\n def to_solve(guess, solved_phase=None):\n global iterations\n iterations += 1\n\n if solved_phase is not None:\n p = solved_phase\n else:\n phase_kwargs[iter_var] = guess\n p = phase.to(**phase_kwargs)\n if spec_fun is not None:\n err = spec_getter(p) - spec_fun(p)\n else:\n err = spec_getter(p) - spec_val\n# err = (spec_getter(p) - spec_val)/spec_val\n store[:] = (p, err)\n if fprime:\n# print([err, guess, p.eos_mix.phase, der_attr])\n derr = der_attr_fun(p)\n# derr = der_attr_fun(p)/spec_val\n return err, derr\n# print(err)\n return err\n\n arg_fprime = fprime\n high = None # Optional and not often used bound for newton\n if fixed_var == 'V':\n if iter_var == 'T':\n max_phys = phase.T_max_at_V(fixed_var_val)\n elif iter_var == 'P':\n max_phys = phase.P_max_at_V(fixed_var_val)\n if max_phys is not None:\n if max_bound is None:\n max_bound = high = max_phys\n else:\n max_bound = high = min(max_phys, max_bound)\n\n # TV iterations\n ignore_bound_fail = (fixed_var == 'T' and iter_var == 'P')\n\n if fixed_var in ('T',) and ((fixed_var == 'T' and iter_var == 'P') or (fixed_var == 'P' and iter_var == 'T') or (fixed_var == 'T' and iter_var == 'V') ) and 1:\n try:\n fprime = False\n if iter_var == 'V':\n dummy_iter = 1e8\n else:\n dummy_iter = guess\n phase_kwargs[iter_var] = dummy_iter # Dummy pressure does not matter\n phase_temp = phase.to(**phase_kwargs)\n\n lower_phase, higher_phase = None, None\n delta = 1e-9\n if fixed_var == 'T' and iter_var == 'P':\n transitions = phase_temp.P_transitions()\n # assert len(transitions) == 1\n under_trans, above_trans = transitions[0] * (1.0 - delta), transitions[0] * (1.0 + delta)\n elif fixed_var == 'P' and iter_var == 'T':\n transitions = phase_temp.T_transitions()\n under_trans, above_trans = transitions[0] * (1.0 - delta), transitions[0] * (1.0 + delta)\n assert len(transitions) == 1\n\n elif fixed_var == 'T' and iter_var == 'V':\n transitions = phase_temp.P_transitions()\n delta = 1e-11\n # not_separated = True\n # while not_separated:\n P_higher = transitions[0]*(1.0 + delta) # Dummy pressure does not matter\n lower_phase = phase.to(T=fixed_var_val, zs=zs, P=P_higher)\n P_lower = transitions[0]*(1.0 - delta) # Dummy pressure does not matter\n higher_phase = phase.to(T=fixed_var_val, zs=zs, P=P_lower)\n under_trans, above_trans = lower_phase.V(), higher_phase.V()\n not_separated = isclose(under_trans, above_trans, rel_tol=1e-3)\n # delta *= 10\n\n # TODO is it possible to evaluate each limit at once, so half the work is avoided?\n\n bracketed_high, bracketed_low = False, False\n if min_bound is not None:\n f_min = to_solve(min_bound)\n f_low_trans = to_solve(under_trans, lower_phase)\n if f_min*f_low_trans <= 0.0:\n bracketed_low = True\n bounding_pair = (min(min_bound, under_trans), max(min_bound, under_trans))\n if max_bound is not None and (not bracketed_low or multiple_solutions):\n f_max = to_solve(max_bound)\n f_max_trans = to_solve(above_trans, higher_phase)\n if f_max*f_max_trans <= 0.0:\n bracketed_high = True\n bounding_pair = (min(max_bound, above_trans), max(max_bound, above_trans))\n\n if max_bound is not None and max_bound is not None and not bracketed_low and not bracketed_high:\n if not ignore_bound_fail:\n raise NotBoundedError(\"Between phases\")\n\n if bracketed_high or bracketed_low:\n oscillation_detection = False\n high = bounding_pair[1] # restrict newton/secant just in case\n min_bound, max_bound = bounding_pair\n if not (min_bound < guess < max_bound):\n guess = 0.5*(min_bound + max_bound)\n else:\n if min_bound is not None and transitions[0] < min_bound and not ignore_bound_fail:\n raise NotBoundedError(\"Not likely to bound\")\n if max_bound is not None and transitions[0] > max_bound and not ignore_bound_fail:\n raise NotBoundedError(\"Not likely to bound\")\n\n\n\n except NotBoundedError as e:\n raise e\n except Exception:\n pass\n\n fprime = arg_fprime\n\n # Plot the objective function\n # tests = logspace(log10(10.6999), log10(10.70005), 15000)\n # tests = logspace(log10(10.6), log10(10.8), 15000)\n # tests = logspace(log10(min_bound), log10(max_bound), 1500)\n # if fprime:\n # values = [to_solve(t)[0] for t in tests]\n # else:\n # values = [to_solve(t) for t in tests]\n # values = [abs(t) for t in values]\n # import matplotlib.pyplot as plt\n # plt.loglog(tests, values, 'x')\n # plt.show()\n\n if oscillation_detection and ytol is not None:\n to_solve2, checker = oscillation_checking_wrapper(to_solve, full=True,\n minimum_progress=minimum_progress,\n good_err=ytol*1e6)\n else:\n to_solve2 = to_solve\n checker = None\n solve_bounded = False\n\n try:\n # All three variables P, T, V are positive but can grow unbounded, so\n # for the secant method, only set the one variable\n if fprime:\n iter_var_val = newton(to_solve2, guess, xtol=xtol, ytol=ytol, fprime=True,\n maxiter=maxiter, bisection=True, low=min_bound, high=high)\n else:\n iter_var_val = secant(to_solve2, guess, xtol=xtol, ytol=ytol,\n maxiter=maxiter, bisection=True, low=min_bound, high=high)\n except (UnconvergedError, OscillationError, NotBoundedError):\n solve_bounded = True\n # Unconverged - from newton/secant; oscillation - from the oscillation detector;\n # NotBounded - from when EOS needs to solve T and there is no solution\n fprime = False\n if solve_bounded:\n if bounded and min_bound is not None and max_bound is not None:\n if checker:\n min_bound_prev, max_bound_prev, fa, fb = best_bounding_bounds(min_bound, max_bound,\n f=to_solve, xs_pos=checker.xs_pos, ys_pos=checker.ys_pos,\n xs_neg=checker.xs_neg, ys_neg=checker.ys_neg)\n if abs(min_bound_prev/max_bound_prev - 1.0) > 2.5e-4:\n # If the points are too close, odds are there is a discontinuity in the newton solution\n min_bound, max_bound = min_bound_prev, max_bound_prev\n# maxiter = 20\n else:\n fa, fb = None, None\n\n else:\n fa, fb = None, None\n\n # try:\n iter_var_val = brenth(to_solve, min_bound, max_bound, xtol=xtol,\n ytol=ytol, maxiter=maxiter, fa=fa, fb=fb)\n # except:\n # # Not sure at all if good idea\n # iter_var_val = secant(to_solve, guess, xtol=xtol, ytol=ytol,\n # maxiter=maxiter, bisection=True, low=min_bound)\n phase, err = store\n\n return iter_var_val, phase, iterations, err\n\n\n\ndef solve_PTV_HSGUA_1P(phase, zs, fixed_var_val, spec_val, fixed_var,\n spec, iter_var, constants, correlations, last_conv=None,\n oscillation_detection=True, guess_maxiter=50,\n guess_xtol=1e-7, maxiter=80, xtol=1e-10, spec_fun=None):\n # TODO: replace oscillation detection with bounding parameters and translation\n # The cost should be less.\n\n if iter_var == 'T':\n if isinstance(phase, CoolPropPhase):\n min_bound = phase.AS.Tmin()\n max_bound = phase.AS.Tmax()\n else:\n min_bound = phase.T_MIN_FIXED\n max_bound = phase.T_MAX_FIXED\n# if isinstance(phase, IAPWS95):\n# min_bound = 235.0\n# max_bound = 5000.0\n elif iter_var == 'P':\n min_bound = Phase.P_MIN_FIXED*(1.0 - 1e-12)\n max_bound = Phase.P_MAX_FIXED*(1.0 + 1e-12)\n if isinstance(phase, CoolPropPhase):\n AS = phase.AS\n max_bound = AS.pmax()*(1.0 - 1e-7)\n min_bound = AS.trivial_keyed_output(CPiP_min)*(1.0 + 1e-7)\n elif iter_var == 'V':\n min_bound = Phase.V_MIN_FIXED\n max_bound = Phase.V_MAX_FIXED\n if isinstance(phase, (CEOSLiquid, CEOSGas)):\n c2R = phase.eos_class.c2*R\n Tcs, Pcs = constants.Tcs, constants.Pcs\n b = sum([c2R*Tcs[i]*zs[i]/Pcs[i] for i in range(constants.N)])\n min_bound = b*(1.0 + 1e-15)\n\n if phase.is_gas:\n methods = [LAST_CONVERGED, FIXED_GUESS, STP_T_GUESS, IG_ENTHALPY,\n LASTOVKA_SHAW]\n elif phase.is_liquid:\n methods = [LAST_CONVERGED, FIXED_GUESS, STP_T_GUESS, IDEAL_LIQUID_ENTHALPY,\n DADGOSTAR_SHAW_1]\n else:\n methods = [LAST_CONVERGED, FIXED_GUESS, STP_T_GUESS]\n\n for method in methods:\n try:\n guess = TPV_solve_HSGUA_guesses_1P(zs, method, constants, correlations,\n fixed_var_val, spec_val,\n iter_var=iter_var, fixed_var=fixed_var, spec=spec,\n maxiter=guess_maxiter, xtol=guess_xtol, ytol=abs(spec_val)*1e-5,\n bounded=True, min_bound=min_bound, max_bound=max_bound,\n user_guess=None, last_conv=last_conv, T_ref=298.15,\n P_ref=101325.0)\n\n break\n except Exception:\n pass\n\n ytol = 1e-8*abs(spec_val)\n\n if iter_var == 'T' and spec in ('S', 'H'):\n ytol = ytol/100\n if isinstance(phase, IAPWS95):\n # Objective function isn't quite as nice and smooth as desired\n ytol = None\n\n _, phase, iterations, err = TPV_solve_HSGUA_1P(zs, phase, guess, fixed_var_val=fixed_var_val, spec_val=spec_val, ytol=ytol,\n iter_var=iter_var, fixed_var=fixed_var, spec=spec, oscillation_detection=oscillation_detection,\n minimum_progress=1e-4, maxiter=maxiter, fprime=True, xtol=xtol,\n bounded=True, min_bound=min_bound, max_bound=max_bound, spec_fun=spec_fun)\n if isinstance(phase, IAPWS95) and abs(err) > 1e-4:\n raise ValueError(\"Bad solution found\")\n T, P = phase.T, phase.P\n return T, P, phase, iterations, err\n\ndef TPV_solve_HSGUA_guesses_1P(zs, method, constants, correlations,\n fixed_var_val, spec_val,\n iter_var='T', fixed_var='P', spec='H',\n maxiter=20, xtol=1E-7, ytol=None,\n bounded=False, min_bound=None, max_bound=None,\n user_guess=None, last_conv=None, T_ref=298.15,\n P_ref=101325.0):\n if fixed_var == iter_var:\n raise ValueError(\"Fixed variable cannot be the same as iteration variable\")\n if fixed_var not in ('T', 'P', 'V'):\n raise ValueError(\"Fixed variable must be one of `T`, `P`, `V`\")\n if iter_var not in ('T', 'P', 'V'):\n raise ValueError(\"Iteration variable must be one of `T`, `P`, `V`\")\n if spec not in ('H', 'S', 'G', 'U', 'A'):\n raise ValueError(\"Spec variable must be one of `H`, `S`, `G` `U`, `A`\")\n\n cmps = range(len(zs))\n\n iter_T = iter_var == 'T'\n iter_P = iter_var == 'P'\n iter_V = iter_var == 'V'\n\n fixed_P = fixed_var == 'P'\n fixed_T = fixed_var == 'T'\n fixed_V = fixed_var == 'V'\n\n always_S = spec in ('S', 'G', 'A')\n always_H = spec in ('H', 'G', 'U', 'A')\n always_V = spec in ('U', 'A')\n\n\n if always_S:\n P_ref_inv = 1.0/P_ref\n dS_ideal = R*sum([zi*log(zi) for zi in zs if zi > 0.0]) # ideal composition entropy composition\n\n def err(guess):\n # Translate the fixed variable to a local variable\n if fixed_P:\n P = fixed_var_val\n elif fixed_T:\n T = fixed_var_val\n elif fixed_V:\n V = fixed_var_val\n T = None\n # Translate the iteration variable to a local variable\n if iter_P:\n P = guess\n if not fixed_V:\n V = None\n elif iter_T:\n T = guess\n if not fixed_V:\n V = None\n elif iter_V:\n V = guess\n T = None\n\n if T is None:\n T = T_from_V(V, P)\n\n # Compute S, H, V as necessary\n if always_S:\n S = S_model(T, P) - dS_ideal - R*log(P*P_ref_inv)\n if always_H:\n H = H_model(T, P)\n if always_V and V is None:\n V = V_model(T, P)\n# print(H, S, V, 'hi')\n # Return the objective function\n if spec == 'H':\n err = H - spec_val\n elif spec == 'S':\n err = S - spec_val\n elif spec == 'G':\n err = (H - T*S) - spec_val\n elif spec == 'U':\n err = (H - P*V) - spec_val\n elif spec == 'A':\n err = (H - P*V - T*S) - spec_val\n# print(T, P, V, 'TPV', err)\n return err\n\n # Precompute some things depending on the method\n if method in (LASTOVKA_SHAW, DADGOSTAR_SHAW_1):\n MW = mixing_simple(zs, constants.MWs)\n n_atoms = [sum(i.values()) for i in constants.atomss]\n sv = mixing_simple(zs, n_atoms)/MW\n\n if method == IG_ENTHALPY:\n HeatCapacityGases = correlations.HeatCapacityGases\n def H_model(T, P=None):\n H_calc = 0.\n for i in cmps:\n H_calc += zs[i]*HeatCapacityGases[i].T_dependent_property_integral(T_ref, T)\n return H_calc\n\n def S_model(T, P=None):\n S_calc = 0.\n for i in cmps:\n S_calc += zs[i]*HeatCapacityGases[i].T_dependent_property_integral_over_T(T_ref, T)\n return S_calc\n\n def V_model(T, P): return R*T/P\n def T_from_V(V, P): return P*V/R\n\n elif method == LASTOVKA_SHAW:\n H_ref = Lastovka_Shaw_integral(T_ref, sv)\n S_ref = Lastovka_Shaw_integral_over_T(T_ref, sv)\n\n def H_model(T, P=None):\n H1 = Lastovka_Shaw_integral(T, sv)\n dH = H1 - H_ref\n return property_mass_to_molar(dH, MW)\n\n def S_model(T, P=None):\n S1 = Lastovka_Shaw_integral_over_T(T, sv)\n dS = S1 - S_ref\n return property_mass_to_molar(dS, MW)\n\n def V_model(T, P): return R*T/P\n def T_from_V(V, P): return P*V/R\n\n elif method == DADGOSTAR_SHAW_1:\n Tc = mixing_simple(zs, constants.Tcs)\n omega = mixing_simple(zs, constants.omegas)\n H_ref = Dadgostar_Shaw_integral(T_ref, sv)\n S_ref = Dadgostar_Shaw_integral_over_T(T_ref, sv)\n\n def H_model(T, P=None):\n H1 = Dadgostar_Shaw_integral(T, sv)\n Hvap = SMK(T, Tc, omega)\n return (property_mass_to_molar(H1 - H_ref, MW) - Hvap)\n\n def S_model(T, P=None):\n S1 = Dadgostar_Shaw_integral_over_T(T, sv)\n dSvap = SMK(T, Tc, omega)/T\n return (property_mass_to_molar(S1 - S_ref, MW) - dSvap)\n\n Vc = mixing_simple(zs, constants.Vcs)\n def V_model(T, P=None): return COSTALD(T, Tc, Vc, omega)\n def T_from_V(V, P): secant(lambda T: COSTALD(T, Tc, Vc, omega), .65*Tc)\n\n elif method == IDEAL_LIQUID_ENTHALPY:\n HeatCapacityGases = correlations.HeatCapacityGases\n EnthalpyVaporizations = correlations.EnthalpyVaporizations\n def H_model(T, P=None):\n H_calc = 0.\n for i in cmps:\n H_calc += zs[i]*(HeatCapacityGases[i].T_dependent_property_integral(T_ref, T) - EnthalpyVaporizations[i](T))\n return H_calc\n\n def S_model(T, P=None):\n S_calc = 0.\n T_inv = 1.0/T\n for i in cmps:\n S_calc += zs[i]*(HeatCapacityGases[i].T_dependent_property_integral_over_T(T_ref, T) - T_inv*EnthalpyVaporizations[i](T))\n return S_calc\n\n VolumeLiquids = correlations.VolumeLiquids\n def V_model(T, P=None):\n V_calc = 0.\n for i in cmps:\n V_calc += zs[i]*VolumeLiquids[i].T_dependent_property(T)\n return V_calc\n def T_from_V(V, P):\n T_calc = 0.\n for i in cmps:\n T_calc += zs[i]*VolumeLiquids[i].solve_property(V)\n return T_calc\n\n\n # Simple return values - not going through a model\n if method == STP_T_GUESS:\n if iter_T:\n return 298.15\n elif iter_P:\n return 101325.0\n elif iter_V:\n return 0.024465403697038125\n elif method == LAST_CONVERGED:\n if last_conv is None:\n raise ValueError(\"No last converged\")\n return last_conv\n elif method == FIXED_GUESS:\n if user_guess is None:\n raise ValueError(\"No user guess\")\n return user_guess\n\n try:\n # All three variables P, T, V are positive but can grow unbounded, so\n # for the secant method, only set the one variable\n if iter_T:\n guess = 298.15\n elif iter_P:\n guess = 101325.0\n elif iter_V:\n guess = 0.024465403697038125\n return secant(err, guess, xtol=xtol, ytol=ytol,\n maxiter=maxiter, bisection=True, low=min_bound)\n except UnconvergedError:\n # G and A specs are NOT MONOTONIC and the brackets will likely NOT BRACKET\n # THE ROOTS!\n return brenth(err, min_bound, max_bound, xtol=xtol, ytol=ytol, maxiter=maxiter)\n\n\ndef PH_secant_1P(T_guess, P, H, zs, phase, maxiter=200, xtol=1E-10,\n minimum_progress=0.3, oscillation_detection=True):\n store = []\n global iterations\n iterations = 0\n def to_solve(T):\n global iterations\n iterations += 1\n p = phase.to_TP_zs(T, P, zs)\n\n err = p.H() - H\n store[:] = (p, err)\n return err\n if oscillation_detection:\n to_solve, checker = oscillation_checking_wrapper(to_solve, full=True,\n minimum_progress=minimum_progress)\n\n T = secant(to_solve, T_guess, xtol=xtol, maxiter=maxiter)\n phase, err = store\n\n return T, phase, iterations, err\n\ndef PH_newton_1P(T_guess, P, H, zs, phase, maxiter=200, xtol=1E-10,\n minimum_progress=0.3, oscillation_detection=True):\n store = []\n global iterations\n iterations = 0\n def to_solve(T):\n global iterations\n iterations += 1\n p = phase.to_TP_zs(T, P, zs)\n\n err = p.H() - H\n derr_dT = p.dH_dT()\n store[:] = (p, err)\n return err, derr_dT\n if oscillation_detection:\n to_solve, checker = oscillation_checking_wrapper(to_solve, full=True,\n minimum_progress=minimum_progress)\n\n T = newton(to_solve, T_guess, fprime=True, xtol=xtol, maxiter=maxiter)\n phase, err = store\n\n return T, phase, iterations, err\n\n\n\n\ndef TVF_pure_newton(P_guess, T, liquids, gas, maxiter=200, xtol=1E-10):\n one_liquid = len(liquids)\n zs = [1.0]\n store = []\n global iterations\n iterations = 0\n def to_solve_newton(P):\n global iterations\n iterations += 1\n g = gas.to_TP_zs(T, P, zs)\n fugacity_gas = g.fugacities()[0]\n dfugacities_dP_gas = g.dfugacities_dP()[0]\n\n if one_liquid:\n lowest_phase = liquids[0].to_TP_zs(T, P, zs)\n else:\n ls = [l.to_TP_zs(T, P, zs) for l in liquids]\n G_min, lowest_phase = 1e100, None\n for l in ls:\n G = l.G()\n if G < G_min:\n G_min, lowest_phase = G, l\n\n fugacity_liq = lowest_phase.fugacities()[0]\n dfugacities_dP_liq = lowest_phase.dfugacities_dP()[0]\n\n err = fugacity_liq - fugacity_gas\n derr_dP = dfugacities_dP_liq - dfugacities_dP_gas\n store[:] = (lowest_phase, g, err)\n return err, derr_dP\n Psat = newton(to_solve_newton, P_guess, xtol=xtol, maxiter=maxiter,\n low=Phase.P_MIN_FIXED,\n require_eval=True, bisection=False, fprime=True)\n l, g, err = store\n\n return Psat, l, g, iterations, err\n\ndef TVF_pure_secant(P_guess, T, liquids, gas, maxiter=200, xtol=1E-10):\n one_liquid = len(liquids)\n zs = [1.0]\n store = []\n global iterations\n iterations = 0\n def to_solve_secant(P):\n global iterations\n iterations += 1\n g = gas.to_TP_zs(T, P, zs)\n fugacity_gas = g.fugacities()[0]\n\n if one_liquid:\n lowest_phase = liquids[0].to_TP_zs(T, P, zs)\n else:\n ls = [l.to_TP_zs(T, P, zs) for l in liquids]\n G_min, lowest_phase = 1e100, None\n for l in ls:\n G = l.G()\n if G < G_min:\n G_min, lowest_phase = G, l\n\n fugacity_liq = lowest_phase.fugacities()[0]\n\n err = fugacity_liq - fugacity_gas\n store[:] = (lowest_phase, g, err)\n return err\n if P_guess < Phase.P_MIN_FIXED:\n raise ValueError(\"Too low.\")\n # if P_guess < Phase.P_MIN_FIXED:\n # low = None\n # else:\n # low = Phase.P_MIN_FIXED\n Psat = secant(to_solve_secant, P_guess, xtol=xtol, maxiter=maxiter, low=Phase.P_MIN_FIXED*(1-1e-10))\n l, g, err = store\n\n return Psat, l, g, iterations, err\n\n\ndef PVF_pure_newton(T_guess, P, liquids, gas, maxiter=200, xtol=1E-10):\n one_liquid = len(liquids)\n zs = [1.0]\n store = []\n global iterations\n iterations = 0\n def to_solve_newton(T):\n global iterations\n iterations += 1\n g = gas.to_TP_zs(T, P, zs)\n fugacity_gas = g.fugacities()[0]\n dfugacities_dT_gas = g.dfugacities_dT()[0]\n\n if one_liquid:\n lowest_phase = liquids[0].to_TP_zs(T, P, zs)\n else:\n ls = [l.to_TP_zs(T, P, zs) for l in liquids]\n G_min, lowest_phase = 1e100, None\n for l in ls:\n G = l.G()\n if G < G_min:\n G_min, lowest_phase = G, l\n\n fugacity_liq = lowest_phase.fugacities()[0]\n dfugacities_dT_liq = lowest_phase.dfugacities_dT()[0]\n\n err = fugacity_liq - fugacity_gas\n derr_dT = dfugacities_dT_liq - dfugacities_dT_gas\n store[:] = (lowest_phase, g, err)\n return err, derr_dT\n Tsat = newton(to_solve_newton, T_guess, xtol=xtol, maxiter=maxiter,\n low=Phase.T_MIN_FIXED,\n require_eval=True, bisection=False, fprime=True)\n l, g, err = store\n\n return Tsat, l, g, iterations, err\n\n\ndef PVF_pure_secant(T_guess, P, liquids, gas, maxiter=200, xtol=1E-10):\n one_liquid = len(liquids)\n zs = [1.0]\n store = []\n global iterations\n iterations = 0\n def to_solve_secant(T):\n global iterations\n iterations += 1\n g = gas.to_TP_zs(T, P, zs)\n fugacity_gas = g.fugacities()[0]\n\n if one_liquid:\n lowest_phase = liquids[0].to_TP_zs(T, P, zs)\n else:\n ls = [l.to_TP_zs(T, P, zs) for l in liquids]\n G_min, lowest_phase = 1e100, None\n for l in ls:\n G = l.G()\n if G < G_min:\n G_min, lowest_phase = G, l\n\n fugacity_liq = lowest_phase.fugacities()[0]\n\n err = fugacity_liq - fugacity_gas\n store[:] = (lowest_phase, g, err)\n return err\n Tsat = secant(to_solve_secant, T_guess, xtol=xtol, maxiter=maxiter,\n low=Phase.T_MIN_FIXED)\n l, g, err = store\n\n return Tsat, l, g, iterations, err\n\n\ndef TSF_pure_newton(P_guess, T, other_phases, solids, maxiter=200, xtol=1E-10):\n one_other = len(other_phases)\n one_solid = len(solids)\n zs = [1.0]\n store = []\n global iterations\n iterations = 0\n def to_solve_newton(P):\n global iterations\n iterations += 1\n if one_solid:\n lowest_solid = solids[0].to_TP_zs(T, P, zs)\n else:\n ss = [s.to_TP_zs(T, P, zs) for s in solids]\n G_min, lowest_solid = 1e100, None\n for o in ss:\n G = o.G()\n if G < G_min:\n G_min, lowest_solid = G, o\n\n fugacity_solid = lowest_solid.fugacities()[0]\n dfugacities_dP_solid = lowest_solid.dfugacities_dP()[0]\n\n if one_other:\n lowest_other = other_phases[0].to_TP_zs(T, P, zs)\n else:\n others = [l.to_TP_zs(T, P, zs) for l in other_phases]\n G_min, lowest_other = 1e100, None\n for o in others:\n G = o.G()\n if G < G_min:\n G_min, lowest_other = G, o\n\n fugacity_other = lowest_other.fugacities()[0]\n dfugacities_dP_other = lowest_other.dfugacities_dP()[0]\n\n err = fugacity_other - fugacity_solid\n derr_dP = dfugacities_dP_other - dfugacities_dP_solid\n store[:] = (lowest_other, lowest_solid, err)\n return err, derr_dP\n\n Psub = newton(to_solve_newton, P_guess, xtol=xtol, maxiter=maxiter,\n require_eval=True, bisection=False, fprime=True)\n other, solid, err = store\n\n return Psub, other, solid, iterations, err\n\ndef PSF_pure_newton(T_guess, P, other_phases, solids, maxiter=200, xtol=1E-10):\n one_other = len(other_phases)\n one_solid = len(solids)\n zs = [1.0]\n store = []\n global iterations\n iterations = 0\n def to_solve_newton(T):\n global iterations\n iterations += 1\n if one_solid:\n lowest_solid = solids[0].to_TP_zs(T, P, zs)\n else:\n ss = [s.to_TP_zs(T, P, zs) for s in solids]\n G_min, lowest_solid = 1e100, None\n for o in ss:\n G = o.G()\n if G < G_min:\n G_min, lowest_solid = G, o\n\n fugacity_solid = lowest_solid.fugacities()[0]\n dfugacities_dT_solid = lowest_solid.dfugacities_dT()[0]\n\n if one_other:\n lowest_other = other_phases[0].to_TP_zs(T, P, zs)\n else:\n others = [l.to_TP_zs(T, P, zs) for l in other_phases]\n G_min, lowest_other = 1e100, None\n for o in others:\n G = o.G()\n if G < G_min:\n G_min, lowest_other = G, o\n\n fugacity_other = lowest_other.fugacities()[0]\n dfugacities_dT_other = lowest_other.dfugacities_dT()[0]\n\n err = fugacity_other - fugacity_solid\n derr_dT = dfugacities_dT_other - dfugacities_dT_solid\n store[:] = (lowest_other, lowest_solid, err)\n return err, derr_dT\n\n Tsub = newton(to_solve_newton, T_guess, xtol=xtol, maxiter=maxiter,\n require_eval=True, bisection=False, fprime=True)\n other, solid, err = store\n\n return Tsub, other, solid, iterations, err\n\n\ndef solve_T_VF_IG_K_composition_independent(VF, T, zs, gas, liq, xtol=1e-10):\n '''from sympy import *\n zi, P, VF = symbols('zi, P, VF')\n l_phi, g_phi = symbols('l_phi, g_phi', cls=Function)\n # g_phi = symbols('g_phi')\n # Ki = l_phi(P)/g_phi(P)\n Ki = l_phi(P)#/g_phi\n err = zi*(Ki-1)/(1+VF*(Ki-1))\n cse([diff(err, P), err], optimizations='basic')\n '''\n # gas phis are all one in IG model\n# gas.to(T=T, P=P, zs=zs)\n cmps = range(liq.N)\n global Ks, iterations, err\n iterations = 0\n err = 0.0\n def to_solve(lnP):\n global Ks, iterations, err\n iterations += 1\n P = exp(lnP)\n l = liq.to(T=T, P=P, zs=zs)\n Ks = liquid_phis = l.phis()\n dlnphis_dP_l = l.dphis_dP()\n\n err = derr = 0.0\n for i in cmps:\n x1 = liquid_phis[i] - 1.0\n x2 = VF*x1\n x3 = 1.0/(x2 + 1.0)\n x4 = x3*zs[i]\n err += x1*x4\n derr += x4*(1.0 - x2*x3)*dlnphis_dP_l[i]\n return err, P*derr\n\n # estimate bubble point and dew point\n # Make sure to overwrite the phase so the Psats get cached\n P_base = 1e5\n liq = liq.to(T=T, P=P_base, zs=zs)\n phis = liq.phis()\n P_bub, P_dew = 0.0, 0.0\n for i in range(liq.N):\n P_bub += phis[i]*zs[i]\n P_dew += zs[i]/(phis[i]*P_base)\n P_bub = P_bub*liq.P\n P_dew = 1.0/P_dew\n P_guess = VF*P_dew + (1.0 - VF)*P_bub\n\n # When Poynting is on, the are only an estimate; otherwise it is dead on\n # and there is no need for a solver\n if liq.use_Poynting or 0.0 < VF < 1.0:\n lnP = newton(to_solve, log(P_guess), xtol=xtol, fprime=True)\n P = exp(lnP)\n else:\n if VF == 0.0:\n Ks = liq.to(T=T, P=P_bub, zs=zs).phis()\n P = P_bub\n elif VF == 1.0:\n Ks = liq.to(T=T, P=P_dew, zs=zs).phis()\n P = P_dew\n else:\n raise ValueError(\"Vapor fraction outside range 0 to 1\")\n xs = [zs[i]/(1.+VF*(Ks[i]-1.)) for i in cmps]\n for i in cmps:\n Ks[i] *= xs[i]\n ys = Ks\n return P, xs, ys, iterations, err\n\ndef solve_P_VF_IG_K_composition_independent(VF, P, zs, gas, liq, xtol=1e-10):\n # gas phis are all one in IG model\n# gas.to(T=T, P=P, zs=zs)\n cmps = range(liq.N)\n global Ks, iterations, err\n iterations = 0\n def to_solve(T):\n # print(T)\n global Ks, iterations, err\n iterations += 1\n dlnphis_dT_l, liquid_phis = liq.dphis_dT_at(T, P, zs, phis_also=True)\n Ks = liquid_phis\n # print(Ks, 'Ks')\n # print(dlnphis_dT_l, liquid_phis)\n# l = liq.to(T=T, P=P, zs=zs)\n# Ks = liquid_phis = l.phis()\n# dlnphis_dT_l = l.dphis_dT()\n err = derr = 0.0\n for i in cmps:\n x1 = liquid_phis[i] - 1.0\n x2 = VF*x1\n x3 = 1.0/(x2 + 1.0)\n x4 = x3*zs[i]\n err += x1*x4\n derr += x4*(1.0 - x2*x3)*dlnphis_dT_l[i]\n return err, derr\n\n # import matplotlib.pyplot as plt\n # pts = linspace(1, 1000, 500)\n # vals = [to_solve(T) for T in pts]\n # plt.plot(pts, vals, 'x')\n # plt.show()\n\n try:\n T = newton(to_solve, 300.0, xtol=xtol, fprime=True, low=1e-6,\n damping_func=make_max_step_initial(steps=3, max_step=100),\n bisection=True)\n except:\n try:\n T = brenth(lambda x: to_solve(x)[0], 300, 1000)\n except:\n T = newton(to_solve, 400.0, xtol=xtol, fprime=True, low=1e-6)\n xs = [zs[i]/(1.+VF*(Ks[i]-1.)) for i in cmps]\n for i in cmps:\n Ks[i] *= xs[i]\n ys = Ks\n return T, xs, ys, iterations, err\n\ndef sequential_substitution_2P_sat(T, P, V, zs_dry, xs_guess, ys_guess, liquid_phase,\n gas_phase, idx, z0, z1=None, maxiter=1000, tol=1E-13,\n trivial_solution_tol=1e-5, damping=1.0):\n xs, ys = xs_guess, ys_guess\n V_over_F = 1.0\n cmps = range(len(zs_dry))\n\n if z1 is None:\n z1 = z0*1.0001 + 1e-4\n if z1 > 1:\n z1 = z0*1.0001 - 1e-4\n\n # secant step/solving\n p0, p1, err0, err1 = None, None, None, None\n def step(p0, p1, err0, err1):\n if p0 is None:\n return z0\n if p1 is None:\n return z1\n else:\n new = p1 - err1*(p1 - p0)/(err1 - err0)*damping\n return new\n\n\n for iteration in range(maxiter):\n p0, p1 = step(p0, p1, err0, err1), p0\n zs = list(zs_dry)\n zs[idx] = p0\n zs = normalize(zs)\n# print(zs, p0, p1)\n\n g = gas_phase.to(ys, T=T, P=P, V=V)\n l = liquid_phase.to(xs, T=T, P=P, V=V)\n lnphis_g = g.lnphis()\n lnphis_l = l.lnphis()\n\n Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]\n\n V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)\n err0, err1 = 1.0 - V_over_F, err0\n\n # Check for negative fractions - normalize only if needed\n for xi in xs_new:\n if xi < 0.0:\n xs_new_sum = sum(abs(i) for i in xs_new)\n xs_new = [abs(i)/xs_new_sum for i in xs_new]\n break\n for yi in ys_new:\n if yi < 0.0:\n ys_new_sum = sum(abs(i) for i in ys_new)\n ys_new = [abs(i)/ys_new_sum for i in ys_new]\n break\n\n err, comp_diff = 0.0, 0.0\n for i in cmps:\n err_i = Ks[i]*xs[i]/ys[i] - 1.0\n err += err_i*err_i + abs(ys[i] - zs[i])\n comp_diff += abs(xs[i] - ys[i])\n\n # Accept the new compositions\n# xs, ys = xs_new, zs # This has worse convergence behavior?\n xs, ys = xs_new, ys_new\n\n if comp_diff < trivial_solution_tol:\n raise ValueError(\"Converged to trivial condition, compositions of both phases equal\")\n\n if err < tol and abs(err0) < tol:\n return V_over_F, xs, zs, l, g, iteration, err, err0\n raise UnconvergedError('End of SS without convergence')\n\ndef SS_VF_simultaneous(guess, fixed_val, zs, liquid_phase, gas_phase,\n iter_var='T', fixed_var='P', V_over_F=1,\n maxiter=200, xtol=1E-10, comp_guess=None,\n damping=0.8, tol_eq=1e-12, update_frequency=3,\n hot_start=None):\n if comp_guess is None:\n comp_guess = zs\n\n if V_over_F == 1 or V_over_F > 0.5:\n dew = True\n xs, ys = comp_guess, zs\n else:\n dew = False\n xs, ys = zs, comp_guess\n\n sln = sequential_substitution_2P_HSGUAbeta(zs=zs, xs_guess=xs, ys_guess=ys, liquid_phase=liquid_phase,\n gas_phase=gas_phase, fixed_var_val=fixed_val, spec_val=V_over_F, tol_spec=xtol,\n iter_var_0=guess, update_frequency=update_frequency,\n iter_var=iter_var, fixed_var=fixed_var, spec='beta', damping=damping, tol_eq=tol_eq)\n guess, _, xs, ys, l, g, iteration, err_eq, spec_err = sln\n\n if dew:\n comp_guess = xs\n iter_phase, const_phase = l, g\n else:\n comp_guess = ys\n iter_phase, const_phase = g, l\n\n return guess, comp_guess, iter_phase, const_phase, iteration, {'err_eq': err_eq, 'spec_err': spec_err}\n\n\ndef sequential_substitution_2P_HSGUAbeta(zs, xs_guess, ys_guess, liquid_phase,\n gas_phase, fixed_var_val, spec_val,\n iter_var_0, iter_var_1=None,\n iter_var='T', fixed_var='P', spec='H',\n maxiter=1000, tol_eq=1E-13, tol_spec=1e-9,\n trivial_solution_tol=1e-5, damping=1.0,\n V_over_F_guess=None, fprime=True,\n update_frequency=1, update_eq=1e-7):\n xs, ys = xs_guess, ys_guess\n if V_over_F_guess is None:\n V_over_F = 0.5\n else:\n V_over_F = V_over_F_guess\n\n cmps = range(len(zs))\n\n if iter_var_1 is None:\n iter_var_1 = iter_var_0*1.0001 + 1e-4\n\n tol_spec_abs = tol_spec*abs(spec_val)\n if tol_spec_abs == 0.0:\n if spec == 'beta':\n tol_spec_abs = 1e-9\n else:\n tol_spec_abs = 1e-7\n\n # secant step/solving\n p0, p1, spec_err, spec_err_old = None, None, None, None\n def step(p0, p1, spec_err, spec_err_old, step_der):\n if p0 is None:\n return iter_var_0\n if p1 is None:\n return iter_var_1\n else:\n secant_step = spec_err_old*(p1 - p0)/(spec_err_old - spec_err)*damping\n if fprime and step_der is not None:\n if abs(step_der) < abs(secant_step):\n step = step_der\n new = p0 - step\n else:\n step = secant_step\n new = p1 - step\n else:\n new = p1 - secant_step\n if new < 1e-7:\n # Only handle positive values, damped steps to .5\n new = 0.5*(1e-7 + p0)\n# print(p0, p1, new)\n return new\n\n TPV_args = {fixed_var: fixed_var_val, iter_var: iter_var_0}\n\n VF_spec = spec == 'beta'\n if not VF_spec:\n spec_fun_l = getattr(liquid_phase.__class__, spec)\n spec_fun_g = getattr(gas_phase.__class__, spec)\n\n s_der = f'd{spec}_d{iter_var}_{fixed_var}'\n spec_der_fun_l = getattr(liquid_phase.__class__, s_der)\n spec_der_fun_g = getattr(gas_phase.__class__, s_der)\n else:\n V_over_F = iter_var_0\n\n err_eq = 1e100 # initial value\n\n step_der = None\n for iteration in range(maxiter):\n if (not (iteration % update_frequency) or err_eq < update_eq) or iteration < 2:\n p0, p1 = step(p0, p1, spec_err, spec_err_old, step_der), p0\n TPV_args[iter_var] = p0\n\n g = gas_phase.to(ys, **TPV_args)\n l = liquid_phase.to(xs, **TPV_args)\n lnphis_g = g.lnphis()\n lnphis_l = l.lnphis()\n\n Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]\n\n V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)\n\n if not VF_spec:\n spec_calc = spec_fun_l(l)*(1.0 - V_over_F) + spec_fun_g(g)*V_over_F\n spec_der_calc = spec_der_fun_l(l)*(1.0 - V_over_F) + spec_der_fun_g(g)*V_over_F\n# print(spec_der_calc)\n else:\n spec_calc = V_over_F\n if (not (iteration % update_frequency) or err_eq < update_eq) or iteration < 2:\n spec_err_old = spec_err # Only update old error on an update iteration\n spec_err = spec_calc - spec_val\n\n try:\n step_der = spec_err/spec_der_calc\n # print(spec_err, step_der, p1-p0)\n except:\n pass\n\n # Check for negative fractions - normalize only if needed\n for xi in xs_new:\n if xi < 0.0:\n xs_new_sum_inv = 1.0/sum(abs(i) for i in xs_new)\n xs_new = [abs(i)*xs_new_sum_inv for i in xs_new]\n break\n for yi in ys_new:\n if yi < 0.0:\n ys_new_sum_inv = 1.0/sum(abs(i) for i in ys_new)\n ys_new = [abs(i)*ys_new_sum_inv for i in ys_new]\n break\n\n err_eq, comp_diff = 0.0, 0.0\n for i in cmps:\n err_i = Ks[i]*xs[i]/ys[i] - 1.0\n err_eq += err_i*err_i\n comp_diff += abs(xs[i] - ys[i])\n\n # Accept the new compositions\n# xs, ys = xs_new, zs # This has worse convergence behavior; seems to not even converge some of the time\n xs, ys = xs_new, ys_new\n\n if comp_diff < trivial_solution_tol and iteration: # Allow the first iteration to start with the same composition\n raise ValueError(\"Converged to trivial condition, compositions of both phases equal\")\n # print('Guess: %g, Eq Err: %g, Spec Err: %g, VF: %g' %(p0, err_eq, spec_err, V_over_F))\n# print(p0, err_eq, spec_err, V_over_F)\n# print(p0, err, spec_err, xs, ys, V_over_F)\n if err_eq < tol_eq and abs(spec_err) < tol_spec_abs:\n return p0, V_over_F, xs, ys, l, g, iteration, err_eq, spec_err\n raise UnconvergedError('End of SS without convergence')\n\n\n\n# def sequential_substitution_2P_double(zs, xs_guess, ys_guess, liquid_phase,\n# gas_phase, guess, spec_vals,\n# iter_var0='T', iter_var1='P',\n# spec_vars=['H', 'S'],\n# maxiter=1000, tol_eq=1E-13, tol_specs=1e-9,\n# trivial_solution_tol=1e-5, damping=1.0,\n# V_over_F_guess=None, fprime=True):\n# xs, ys = xs_guess, ys_guess\n# if V_over_F_guess is None:\n# V_over_F = 0.5\n# else:\n# V_over_F = V_over_F_guess\n\n# cmps = range(len(zs))\n\n# iter0_val = guess[0]\n# iter1_val = guess[1]\n\n# spec0_val = spec_vals[0]\n# spec1_val = spec_vals[1]\n\n# spec0_var = spec_vars[0]\n# spec1_var = spec_vars[1]\n\n# spec0_fun_l = getattr(liquid_phase.__class__, spec0_var)\n# spec0_fun_g = getattr(gas_phase.__class__, spec0_var)\n\n# spec1_fun_l = getattr(liquid_phase.__class__, spec1_var)\n# spec1_fun_g = getattr(gas_phase.__class__, spec1_var)\n\n# spec0_der0 = f'd{spec0_var}_d{iter_var0}_{iter_var1}'\n# spec1_der0 = f'd{spec1_var}_d{iter_var0}_{iter_var1}'\n# spec0_der1 = f'd{spec0_var}_d{iter_var1}_{iter_var0}'\n# spec1_der1 = f'd{spec1_var}_d{iter_var1}_{iter_var0}'\n\n# spec0_der0_fun_l = getattr(liquid_phase.__class__, spec0_der0)\n# spec0_der0_fun_g = getattr(gas_phase.__class__, spec0_der0)\n\n# spec1_der0_fun_l = getattr(liquid_phase.__class__, spec1_der0)\n# spec1_der0_fun_g = getattr(gas_phase.__class__, spec1_der0)\n\n# spec0_der1_fun_l = getattr(liquid_phase.__class__, spec0_der1)\n# spec0_der1_fun_g = getattr(gas_phase.__class__, spec0_der1)\n\n# spec1_der1_fun_l = getattr(liquid_phase.__class__, spec1_der1)\n# spec1_der1_fun_g = getattr(gas_phase.__class__, spec1_der1)\n\n# step_der = None\n# for iteration in range(maxiter):\n# TPV_args[iter_var0] = iter0_val\n# TPV_args[iter_var1] = iter1_val\n\n# g = gas_phase.to(zs=ys, **TPV_args)\n# l = liquid_phase.to(zs=xs, **TPV_args)\n# lnphis_g = g.lnphis()\n# lnphis_l = l.lnphis()\n\n# Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]\n\n# V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)\n\n# spec0_calc = spec0_fun_l(l)*(1.0 - V_over_F) + spec0_fun_g(g)*V_over_F\n# spec1_calc = spec1_fun_l(l)*(1.0 - V_over_F) + spec1_fun_g(g)*V_over_F\n\n# spec0_der0_calc = spec0_der0_fun_l(l)*(1.0 - V_over_F) + spec0_der0_fun_g(g)*V_over_F\n# spec0_der1_calc = spec0_der1_fun_l(l)*(1.0 - V_over_F) + spec0_der1_fun_g(g)*V_over_F\n\n# spec1_der0_calc = spec1_der0_fun_l(l)*(1.0 - V_over_F) + spec1_der0_fun_g(g)*V_over_F\n# spec1_der1_calc = spec1_der1_fun_l(l)*(1.0 - V_over_F) + spec1_der1_fun_g(g)*V_over_F\n\n# errs = [spec0_calc - spec0_val, spec1_calc - spec1_val]\n# jac = [[spec0_der0_calc, spec0_der1_calc], [spec1_der0_calc, spec1_der1_calc]]\n\n# # Do the newton step\n# dx = py_solve(jac, [-v for v in errs])\n# iter0_val, iter1_val = (xi + dxi*damping for xi, dxi in zip([iter0_val, iter1_val], dx))\n\n\n# # Check for negative fractions - normalize only if needed\n# for xi in xs_new:\n# if xi < 0.0:\n# xs_new_sum = sum(abs(i) for i in xs_new)\n# xs_new = [abs(i)/xs_new_sum for i in xs_new]\n# break\n# for yi in ys_new:\n# if yi < 0.0:\n# ys_new_sum = sum(abs(i) for i in ys_new)\n# ys_new = [abs(i)/ys_new_sum for i in ys_new]\n# break\n\n# err, comp_diff = 0.0, 0.0\n# for i in cmps:\n# err_i = Ks[i]*xs[i]/ys[i] - 1.0\n# err += err_i*err_i\n# comp_diff += abs(xs[i] - ys[i])\n\n# xs, ys = xs_new, ys_new\n\n# if comp_diff < trivial_solution_tol:\n# raise ValueError(\"Converged to trivial condition, compositions of both phases equal\")\n\n# if err < tol_eq and abs(err0) < tol_spec_abs:\n# return p0, V_over_F, xs, ys, l, g, iteration, err, err0\n# raise UnconvergedError('End of SS without convergence')\n\n\ndef stability_iteration_Michelsen(T, P, zs_trial, fugacities_trial, zs_test, test_phase,\n maxiter=20, xtol=1E-12, functional=False):\n # If `functional`, call lnphis_direct and `test_phase` is a tuple of parameters\n # Otherwise, `test_phase` is a phase object and the lnphis_at_zs method should be called.\n # So long as for both trial_phase, and test_phase use the lowest Gibbs energy fugacities, no need to test two phases.\n # Very much no need to converge using acceleration - just keep a low tolerance\n # At any point, can use the Ks working, assume a drop of the new phase, and evaluate two new phases and see if G drops.\n # If it does, drop out early! This implementation does not do that.\n\n # Should be possible to tell if converging to trivial solution during the process - and bail out then\n # It is possible to switch this function to operated on lnphis e.g.\n # corrections[i] = ci = zs[i]/zs_test[i]*trunc_exp(lnphis_trial[i] - lnphis_test[i])*sum_zs_test_inv\n # however numerical differences seem to be huge and operate better on fugacities with the trunc_exp function\n # then anything else.\n\n # Can this whole function be switched to the functional approach?\n # Should be possible\n # Note that the trial and test phase have to be at the right conditions\n # print(trial_phase,trial_phase.zs, zs_test, test_phase, test_phase.zs,\n # maxiter, xtol)\n # T, P, zs = trial_phase.T, trial_phase.P, trial_phase.zs\n zs = zs_trial #trial_phase.zs\n # P = trial_phase.P\n # trial_zs_orig = zs\n\n N = len(zs_trial)\n # fugacities_trial = trial_phase.fugacities_lowest_Gibbs()\n\n # has_zero_z_trial = False\n # Go through the feed composition - and the trial composition - if we have zeros, need to make them a trace;\n # zs_test2 = [0.0]*N\n # for i in range(N):\n # zs_test2[i] = zs_test[i]\n # zs_test = zs_test2\n # for i in range(N):\n # if zs_test[i] == 0.0:\n # zs_test[i] = 1e-50\n # # break\n # for i in range(N):\n # if zs[i] == 0.0:\n # zs2 = [0.0]*N\n # for i in range(N):\n # if zs[i] == 0.0:\n # zs2[i] = 1e-50\n # has_zero_z_trial = True\n # else:\n # zs2[i] = zs[i]\n # zs = zs2\n # # Requires another evaluation of the trial phase\n # trial_phase = trial_phase.to(T=T, P=P, zs=zs)\n # fugacities_trial = trial_phase.fugacities_lowest_Gibbs()\n # break\n Ks = [0.0]*N\n # makes no real difference\n for i in range(N):\n if isinf(fugacities_trial[i]):\n # Stable\n V_over_F, xs, ys = V_over_F, trial_zs, appearing_zs = 0.0, zs, zs\n return 1e100, Ks, zs_test, V_over_F, trial_zs, appearing_zs, 0.0\n\n\n# Basis of equations is for the test phase being a gas, the trial phase assumed is a liquid\n corrections = [1.0]*N\n\n # Model converges towards fictional K values which, when evaluated, yield the\n # stationary point composition\n for i in range(N):\n if zs[i] != 0.0:\n Ks[i] = zs_test[i]/zs[i]\n\n sum_zs_test = sum_zs_test_inv = 1.0\n converged = False\n dead = False\n for iteration in range(maxiter):\n# test_phase = test_phase.to(T=T, P=P, zs=zs_test)\n #fugacities_test = test_phase.to(T=T, P=P, zs=zs_test).fugacities_lowest_Gibbs()\n # fugacities_test2 = test_phase.to(T=T, P=P, zs=zs_test).fugacities_lowest_Gibbs()\n\n # TODO is this really necessary to do the most stable check?\n # fugacities_test = test_phase.fugacities_at_zs(zs_test, most_stable=True)\n # fugacities_test = fugacities_check\n # print(fugacities_test, zs_test)\n\n if functional:\n lnphis_test = lnphis_direct(zs_test, *test_phase)\n else:\n # lnphis_test = test_phase.lnphis_at_zs(zs_test, most_stable=True)\n lnphis_test = test_phase(zs_test)\n\n fugacities_test = [P*zs_test[i]*trunc_exp(lnphis_test[i]) for i in range(N)]\n\n\n err = 0.0\n # try:\n for i in range(N):\n if fugacities_test[i] != 0.0:\n corrections[i] = ci = fugacities_trial[i]/fugacities_test[i]*sum_zs_test_inv\n Ks[i] *= ci\n err += (ci - 1.0)*(ci - 1.0)\n # except:\n # # A test fugacity became zero\n # # May need special handling for this outside.\n # converged = True\n # break\n\n if err < xtol:\n converged = True\n break\n\n # Update compositions for the next iteration - might as well move this above the break check\n for i in range(N):\n zs_test[i] = Ks[i]*zs[i] # new test phase comp\n\n # Cannot move the normalization above the error check - returning\n # unnormalized sum_zs_test is used also to detect a trivial solution\n sum_zs_test = 0.0\n for i in range(N):\n sum_zs_test += zs_test[i]\n try:\n sum_zs_test_inv = 1.0/sum_zs_test\n except:\n # Fugacities are all zero\n converged = True\n break\n for i in range(N):\n zs_test[i] *= sum_zs_test_inv\n for i in range(N):\n if isnan(zs_test[i]):\n dead = True\n if dead:\n break\n\n if converged:\n # if has_zero_z_trial:\n # try:\n # # print('Interesting use above')\n # # 1/0\n # tmp_zs_into = [v for v in trial_zs_orig if v != 0.0]\n # tmp_Ks_into = [K for K, z in zip(Ks, trial_zs_orig) if z != 0.0]\n # V_over_F, trial_zs_unmapped, appearing_zs_unmapped = flash_inner_loop(tmp_zs_into, tmp_Ks_into)\n # trial_zs = [0.0]*N\n # appearing_zs = [0.0]*N\n # unmapping_idx = 0\n # for i in range(N):\n # if trial_zs_orig[i] != 0.0:\n # trial_zs[i] = trial_zs_unmapped[unmapping_idx]\n # appearing_zs[i] = appearing_zs_unmapped[unmapping_idx]\n # unmapping_idx += 1\n # V_over_F, xs, ys = V_over_F, trial_zs, appearing_zs\n # except:\n # # Converged to trivial solution so closely the math does not work\n # V_over_F, xs, ys = V_over_F, trial_zs, appearing_zs = 0.0, zs, zs\n # else:\n if not isinf(err) and not dead:\n try:\n V_over_F, trial_zs, appearing_zs = flash_inner_loop(zs, Ks)\n except:\n # Converged to trivial solution so closely the math does not work\n V_over_F, trial_zs, appearing_zs = 0.0, zs, zs\n else:\n V_over_F, trial_zs, appearing_zs = 0.0, zs, zs\n\n # Calculate the dG of the feed\n dG_RT = 0.0\n if V_over_F != 0.0:\n for i in range(N):\n # Sometimes z will converge to literally be zero, so the trunc_log takes care of that\n dG_RT += zs_test[i]*(trunc_log(zs_test[i]) + lnphis_test[i])\n dG_RT *= V_over_F\n# print(dG_RT)\n return sum_zs_test, Ks, zs_test, V_over_F, trial_zs, appearing_zs, dG_RT\n else:\n raise UnconvergedError('End of stability_iteration_Michelsen without convergence')\n\n\n\ndef TPV_double_solve_1P(zs, phase, guesses, spec_vals,\n goal_specs=('V', 'U'), state_specs=('T', 'P'),\n maxiter=200, xtol=1E-10, ytol=None, spec_funs=None):\n kwargs = {'zs': zs}\n phase_cls = phase.__class__\n s00 = f'd{goal_specs[0]}_d{state_specs[0]}_{state_specs[1]}'\n s01 = f'd{goal_specs[0]}_d{state_specs[1]}_{state_specs[0]}'\n s10 = f'd{goal_specs[1]}_d{state_specs[0]}_{state_specs[1]}'\n s11 = f'd{goal_specs[1]}_d{state_specs[1]}_{state_specs[0]}'\n try:\n err0_fun = getattr(phase_cls, goal_specs[0])\n err1_fun = getattr(phase_cls, goal_specs[1])\n j00 = getattr(phase_cls, s00)\n j01 = getattr(phase_cls, s01)\n j10 = getattr(phase_cls, s10)\n j11 = getattr(phase_cls, s11)\n except:\n pass\n\n cache = []\n\n def to_solve(states):\n kwargs[state_specs[0]] = float(states[0])\n kwargs[state_specs[1]] = float(states[1])\n new = phase.to(**kwargs)\n try:\n v0, v1 = err0_fun(new), err1_fun(new)\n jac = [[j00(new), j01(new)],\n [j10(new), j11(new)]]\n except:\n v0, v1 = new.value(goal_specs[0]), new.value(goal_specs[1])\n jac = [[new.value(s00), new.value(s01)],\n [new.value(s10), new.value(s11)]]\n\n if spec_funs is not None:\n err0 = v0 - spec_funs[0](new)\n err1 = v1 - spec_funs[1](new)\n else:\n err0 = v0 - spec_vals[0]\n err1 = v1 - spec_vals[1]\n errs = [err0, err1]\n\n cache[:] = [new, errs, jac]\n # print(kwargs, errs)\n return errs, jac\n\n#\n states, iterations = newton_system(to_solve, x0=guesses, jac=True, xtol=xtol,\n ytol=ytol, maxiter=maxiter, damping_func=damping_maintain_sign)\n phase = cache[0]\n err = cache[1]\n jac = cache[2]\n\n return states, phase, iterations, err, jac\n\n\n\ndef assert_stab_success_2P(liq, gas, stab, T, P, zs, guess_name, xs=None,\n ys=None, VF=None, SS_tol=1e-15, rtol=1e-7):\n r'''Basic function - perform a specified stability test, and then a two-phase flash using it\n Check on specified variables the method is working.\n '''\n gas = gas.to(T=T, P=P, zs=zs)\n liq = liq.to(T=T, P=P, zs=zs)\n trial_comp = stab.incipient_guess_named(T, P, zs, guess_name)\n if liq.G() < gas.G():\n min_phase, other_phase = liq, gas\n else:\n min_phase, other_phase = gas, liq\n\n test_phase_call = lambda zs: other_phase.lnphis_at_zs(zs, most_stable=True)\n\n _, _, _, V_over_F, trial_zs, appearing_zs, dG_RT = stability_iteration_Michelsen(T=min_phase.T, P=min_phase.P, zs_trial=min_phase.zs,\n fugacities_trial=min_phase.fugacities(), zs_test=trial_comp, test_phase=test_phase_call, maxiter=100)\n\n V_over_F, xs_calc, ys_calc, l, g, iteration, err = sequential_substitution_2P(T=T, P=P, V=None,\n zs=zs, xs_guess=trial_zs, ys_guess=appearing_zs,\n liquid_phase=min_phase, tol=SS_tol,\n gas_phase=other_phase)\n if xs_calc is not None:\n assert_close1d(xs, xs_calc, rtol)\n if ys_calc is not None:\n assert_close1d(ys, ys_calc, rtol)\n if VF is not None:\n assert_close(V_over_F, VF, rtol)\n assert_close1d(l.fugacities(), g.fugacities(), rtol)\n\ndef TPV_solve_HSGUA_guesses_VL(zs, method, constants, correlations,\n fixed_var_val, spec_val,\n iter_var='T', fixed_var='P', spec='H',\n maxiter=20, xtol=1E-7, ytol=None,\n bounded=False, min_bound=None, max_bound=None,\n user_guess=None, last_conv=None, T_ref=298.15,\n P_ref=101325.0):\n global V_over_F_guess\n V_over_F_guess = 0.5\n\n cmps = range(constants.N)\n Tcs, Pcs, omegas = constants.Tcs, constants.Pcs, constants.omegas\n\n if fixed_var == iter_var:\n raise ValueError(\"Fixed variable cannot be the same as iteration variable\")\n if fixed_var not in ('T', 'P', 'V'):\n raise ValueError(\"Fixed variable must be one of `T`, `P`, `V`\")\n if iter_var not in ('T', 'P', 'V'):\n raise ValueError(\"Iteration variable must be one of `T`, `P`, `V`\")\n if spec not in ('H', 'S', 'G', 'U', 'A', 'V'):\n raise ValueError(\"Spec variable must be one of `H`, `S`, `G` `U`, `A`, `V`\")\n\n\n cmps = range(len(zs))\n\n iter_T = iter_var == 'T'\n iter_P = iter_var == 'P'\n iter_V = iter_var == 'V'\n\n fixed_P = fixed_var == 'P'\n fixed_T = fixed_var == 'T'\n fixed_V = fixed_var == 'V'\n if fixed_P:\n P = fixed_var_val\n elif fixed_T:\n T = fixed_var_val\n elif fixed_V:\n V = fixed_var_val\n\n always_S = spec in ('S', 'G', 'A')\n always_H = spec in ('H', 'G', 'U', 'A')\n always_V = spec in ('U', 'A', 'V')\n\n\n def H_model(T, P, xs, ys, V_over_F):\n if V_over_F >= 1.0:\n return H_model_g(T, P, zs)\n elif V_over_F <= 0.0:\n return H_model_l(T, P, zs)\n H_liq = H_model_l(T, P, xs)\n H_gas = H_model_g(T, P, ys)\n return H_liq*(1.0 - V_over_F) + V_over_F*H_gas\n\n def S_model(T, P, xs, ys, V_over_F):\n if V_over_F >= 1.0:\n return S_model_g(T, P, zs)\n elif V_over_F <= 0.0:\n return S_model_l(T, P, zs)\n S_liq = S_model_l(T, P, xs)\n S_gas = S_model_g(T, P, ys)\n return S_liq*(1.0 - V_over_F) + V_over_F*S_gas\n\n def V_model(T, P, xs, ys, V_over_F):\n if V_over_F >= 1.0:\n return V_model_g(T, P, zs)\n elif V_over_F <= 0.0:\n return V_model_l(T, P, zs)\n V_liq = V_model_l(T, P, xs)\n V_gas = V_model_g(T, P, ys)\n return V_liq*(1.0 - V_over_F) + V_over_F*V_gas\n\n # whhat goes in here?\n if always_S:\n P_ref_inv = 1.0/P_ref\n dS_ideal = R*sum([zi*log(zi) for zi in zs if zi > 0.0]) # ideal composition entropy composition\n\n info = []\n def err(guess):\n # Translate the fixed variable to a local variable\n if fixed_P:\n P = fixed_var_val\n elif fixed_T:\n T = fixed_var_val\n elif fixed_V:\n V = fixed_var_val\n T = None\n # Translate the iteration variable to a local variable\n if iter_P:\n P = guess\n if not fixed_V:\n V = None\n elif iter_T:\n T = guess\n if not fixed_V:\n V = None\n elif iter_V:\n V = guess\n T = None\n\n if T is None:\n # Just assume gas I guess\n T = T_from_V_g(V, P, zs)\n\n VF, xs, ys = flash_model(T, P, zs)\n info[:] = VF, xs, ys\n\n # Compute S, H, V as necessary\n if always_S:\n S = S_model(T, P, xs, ys, VF) - dS_ideal - R*log(P*P_ref_inv)\n if always_H:\n H = H_model(T, P, xs, ys, VF)\n if always_V and V is None:\n V = V_model(T, P, xs, ys, VF)\n\n # Return the objective function\n if spec == 'H':\n err = H - spec_val\n elif spec == 'S':\n err = S - spec_val\n elif spec == 'G':\n err = (H - T*S) - spec_val\n elif spec == 'U':\n err = (H - P*V) - spec_val\n elif spec == 'A':\n err = (H - P*V - T*S) - spec_val\n elif spec == 'V':\n err = V - spec_val\n# print(T, P, V, 'TPV', err)\n return err\n\n # Common models\n VolumeLiquids = correlations.VolumeLiquids\n def V_model_l(T, P, zs):\n V_calc = 0.\n for i in cmps:\n V_calc += zs[i]*VolumeLiquids[i].T_dependent_property(T)\n return V_calc\n\n def T_from_V_l(V, P, zs):\n T_calc = 0.\n for i in cmps:\n T_calc += zs[i]*VolumeLiquids[i].solve_property(V)\n return T_calc\n\n def V_model_g(T, P, zs):\n return R*T/P\n\n def T_from_V_g(V, P, zs):\n return P*V/R\n\n if method in (IDEAL_WILSON, SHAW_ELEMENTAL):\n if iter_P:\n if fixed_T:\n T_inv = 1.0/T\n Ks_P = [Pcs[i]*exp(5.37*(1.0 + omegas[i])*(1.0 - Tcs[i]*T_inv)) for i in cmps]\n def flash_model(T, P, zs):\n global V_over_F_guess\n P_inv = 1.0/P\n if not fixed_T:\n T_inv = 1.0/T\n Ks_P_local = [Pcs[i]*exp(5.37*(1.0 + omegas[i])*(1.0 - Tcs[i]*T_inv)) for i in cmps]\n Ks = [Ki*P_inv for Ki in Ks_P_local]\n else:\n Ks = [Ki*P_inv for Ki in Ks_P]\n K_low, K_high = False, False\n for i in cmps:\n if zs[i] != 0.0:\n if Ks[i] > 1.0:\n K_high = True\n else:\n K_low = True\n if K_high and K_low:\n break\n if K_high and K_low:\n V_over_F_guess, xs, ys = Rachford_Rice_solution_LN2(zs, Ks, V_over_F_guess)\n return V_over_F_guess, xs, ys\n elif K_high:\n return 1.0, zs, zs\n else:\n return 0.0, zs, zs\n else:\n P_inv = 1.0/P\n def flash_model(T, P, zs):\n global V_over_F_guess\n T_inv = 1.0/T\n Ks = [Pcs[i]*P_inv*exp(5.37*(1.0 + omegas[i])*(1.0 - Tcs[i]*T_inv)) for i in cmps]\n K_low, K_high = False, False\n for i in cmps:\n if zs[i] != 0.0:\n if Ks[i] > 1.0:\n K_high = True\n else:\n K_low = True\n if K_high and K_low:\n break\n if K_high and K_low:\n V_over_F_guess, xs, ys = Rachford_Rice_solution_LN2(zs, Ks, V_over_F_guess)\n return V_over_F_guess, xs, ys\n elif K_high:\n return 1.0, zs, zs\n else:\n return 0.0, zs, zs\n\n if method == SHAW_ELEMENTAL:\n VolumeLiquids = correlations.VolumeLiquids\n MWs, n_atoms = constants.MWs, constants.n_atoms\n\n def H_model_g(T, P, zs):\n MW_g, sv_g = 0.0, 0.0\n for i in cmps:\n MW_g += MWs[i]*zs[i]\n sv_g += n_atoms[i]*zs[i]\n sv_g /= MW_g\n\n H_ref_LS = Lastovka_Shaw_integral(T_ref, sv_g)\n H1 = Lastovka_Shaw_integral(T, sv_g)\n dH = H1 - H_ref_LS\n H_gas = 1e-3*dH*MW_g #property_mass_to_molar(dH, MW_g)\n return H_gas\n\n def S_model_g(T, P, zs):\n MW_g, sv_g = 0.0, 0.0\n for i in cmps:\n MW_g += MWs[i]*zs[i]\n sv_g += n_atoms[i]*zs[i]\n sv_g /= MW_g\n\n S_ref_LS = Lastovka_Shaw_integral_over_T(T_ref, sv_g)\n S1 = Lastovka_Shaw_integral_over_T(T, sv_g)\n dS = S1 - S_ref_LS\n S_gas = 1e-3*dS*MW_g\n return S_gas\n\n def H_model_l(T, P, zs):\n MW_l, sv_l, Tc_l, omega_l = 0.0, 0.0, 0.0, 0.0\n for i in cmps:\n MW_l += MWs[i]*zs[i]\n sv_l += n_atoms[i]*zs[i]\n Tc_l += Tcs[i]*zs[i]\n omega_l += omegas[i]*zs[i]\n sv_l /= MW_l\n\n H_ref_DS = Dadgostar_Shaw_integral(T_ref, sv_l)\n H1 = Dadgostar_Shaw_integral(T, sv_l)\n Hvap = SMK(T, Tc_l, omega_l)\n\n dH = H1 - H_ref_DS\n H_liq = 1e-3*dH*MW_l #property_mass_to_molar(dH, MW_l)\n return (H_liq - Hvap)\n\n def S_model_l(T, P, zs):\n MW_l, sv_l, Tc_l, omega_l = 0.0, 0.0, 0.0, 0.0\n for i in cmps:\n MW_l += MWs[i]*zs[i]\n sv_l += n_atoms[i]*zs[i]\n Tc_l += Tcs[i]*zs[i]\n omega_l += omegas[i]*zs[i]\n sv_l /= MW_l\n\n S_ref_DS = Dadgostar_Shaw_integral_over_T(T_ref, sv_l)\n S1 = Dadgostar_Shaw_integral_over_T(T, sv_l)\n\n Hvap = SMK(T, Tc_l, omega_l)\n\n dS = S1 - S_ref_DS\n S_liq = 1e-3*dS*MW_l\n return (S_liq - Hvap/T)\n\n\n elif method == IDEAL_WILSON:\n HeatCapacityGases = correlations.HeatCapacityGases\n EnthalpyVaporizations = correlations.EnthalpyVaporizations\n def flash_model(T, P, zs): # noqa: F811\n _, _, VF, xs, ys = flash_wilson(zs, constants.Tcs, constants.Pcs, constants.omegas, T=T, P=P)\n return VF, xs, ys\n\n def H_model_g(T, P, zs):\n H_calc = 0.\n for i in cmps:\n H_calc += zs[i]*HeatCapacityGases[i].T_dependent_property_integral(T_ref, T)\n return H_calc\n\n def S_model_g(T, P, zs):\n S_calc = 0.\n for i in cmps:\n S_calc += zs[i]*HeatCapacityGases[i].T_dependent_property_integral_over_T(T_ref, T)\n return S_calc\n\n def H_model_l(T, P, zs):\n H_calc = 0.\n for i in cmps:\n H_calc += zs[i]*(HeatCapacityGases[i].T_dependent_property_integral(T_ref, T) - EnthalpyVaporizations[i](T))\n return H_calc\n\n def S_model_l(T, P, zs):\n S_calc = 0.\n T_inv = 1.0/T\n for i in cmps:\n S_calc += zs[i]*(HeatCapacityGases[i].T_dependent_property_integral_over_T(T_ref, T) - T_inv*EnthalpyVaporizations[i](T))\n return S_calc\n\n\n try:\n # All three variables P, T, V are positive but can grow unbounded, so\n # for the secant method, only set the one variable\n if iter_T:\n guess = 298.15\n elif iter_P:\n guess = 101325.0\n elif iter_V:\n guess = 0.024465403697038125\n val = secant(err, guess, xtol=xtol, ytol=ytol,\n maxiter=maxiter, bisection=True, low=min_bound, require_xtol=False)\n return val, info[0], info[1], info[2]\n except UnconvergedError as e:\n val = brenth(err, min_bound, max_bound, xtol=xtol, ytol=ytol, maxiter=maxiter)\n return val, info[0], info[1], info[2]\n\n\nglobal cm_flash\ncm_flash = None\ndef cm_flash_tol():\n global cm_flash\n if cm_flash is not None:\n return cm_flash\n from matplotlib.colors import ListedColormap\n N = 100\n vals = np.zeros((N, 4))\n vals[:, 3] = np.ones(N)\n\n # Grey for 1e-10 to 1e-7\n low = 40\n vals[:low, 0] = np.linspace(100/256, 1, low)[::-1]\n vals[:low, 1] = np.linspace(100/256, 1, low)[::-1]\n vals[:low, 2] = np.linspace(100/256, 1, low)[::-1]\n\n # green 1e-6 to 1e-5\n ok = 50\n vals[low:ok, 1] = np.linspace(100/256, 1, ok-low)[::-1]\n\n # Blue 1e-5 to 1e-3\n mid = 70\n vals[ok:mid, 2] = np.linspace(100/256, 1, mid-ok)[::-1]\n # Red 1e-3 and higher\n vals[mid:101, 0] = np.linspace(100/256, 1, 100-mid)[::-1]\n newcmp = ListedColormap(vals)\n\n cm_flash = newcmp\n return cm_flash\n\ndef deduplicate_stab_results(results, tol_frac_err=5e-3):\n if not results:\n return results\n N = len(results[0][0])\n cmps = range(N)\n results.sort(key=lambda x: (x[0][0], x[2]))\n good_results = [results[0]]\n for t in results[1:]:\n xs_last, ys_last = good_results[-1][0], good_results[-1][1]\n xs, ys = t[0], t[1]\n diff_x = sum([abs(xs[i] - xs_last[i]) for i in cmps])/N\n diff_y = sum([abs(ys[i] - ys_last[i]) for i in cmps])/N\n if diff_x > tol_frac_err or diff_y > tol_frac_err:\n good_results.append(t)\n return good_results\n\nempty_flash_conv = {'iterations': 0, 'err': 0.0, 'stab_guess_name': None}\none_in_list = [1.0]\nempty_list = []\n\n\n\ndef flash_mixing_minimum_factor(zs_existing, zs_added):\n # Check if nothing can be removed\n for i in range(len(zs_existing)):\n if zs_added[i] > 0 and zs_existing[i] == 0:\n return 0\n\n factor = None\n for i in range(len(zs_existing)):\n if zs_added[i] > 0:\n factor_calc = -zs_existing[i]/zs_added[i]\n if factor is None:\n factor = factor_calc\n else:\n if factor_calc > factor:\n factor = factor_calc\n if factor is None:\n factor = 0\n return factor\n\ndef flash_mixing_remove_overlap(zs_existing, zs_added):\n '''For the problem of considering mixing one stream with another stream,\n and seeking to find the correct mixing ratio, it may be useful to\n actually remove some of the new stream. However, this is a nastier numerical\n problem. It is nicer to remove as much as possible of the new stream\n before trying to solve the problem. This function will adjust the\n composition of the initial feed to make that happen.\n '''\n factor = flash_mixing_minimum_factor(zs_existing, zs_added)\n\n # Create the new composition which will not sum to 1\n new = [zs_existing[i] + factor*zs_added[i] for i in range(len(zs_existing))]\n # normalize the new composition\n return normalize(new)\n\nphase_boundaries_T_guesses = [300, 400, 200, 250, 350, 500, 100, 800, 1000, 1500, 2000, 50, 5000, 10000, 50000, 10, 1, .1, ]\nphase_boundaries_P_guesses = [1e5, 1e6, 1e4, 1e7, 1e3, 1e2, 1, 1e8, 1e9]\nphase_boundaries_V_guesses = [1e-3, 1e-4, 1e-5, 1e-6, 1e-2, 1e-1, 1, 10, 100]\nphase_boundaries_H_guesses = linspace(-10000, 10000, 20)\nphase_boundaries_S_guesses = phase_boundaries_H_guesses\nphase_boundaries_U_guesses = phase_boundaries_H_guesses\n\nphase_boundary_perturbation_factors = [1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10]\n\ndef generate_phase_boundaries_naive(flasher, zs, spec_var, spec_val, iter_var, check, V_over_F, hot_start=None,\n iter_guesses=None, ignore_der=False):\n '''Attempt to bound the formation of an incipient phase, using a\n variety of hardcoded options.\n '''\n unperturbable_msg = \"Multiple perturbations around the found point did not provide a stable derivative, revise the objective function\"\n if iter_var == 'T':\n guesses = phase_boundaries_T_guesses\n elif iter_var == 'P':\n guesses = phase_boundaries_P_guesses\n elif iter_var == 'V':\n guesses = phase_boundaries_V_guesses\n elif iter_var == 'H':\n guesses = phase_boundaries_H_guesses\n elif iter_var == 'S':\n guesses = phase_boundaries_S_guesses\n elif iter_var == 'U':\n guesses = phase_boundaries_U_guesses\n\n kwargs = {spec_var: spec_val}\n kwargs_pert = {spec_var: spec_val}\n if iter_guesses is not None:\n all_iter_guesses = iter_guesses + guesses\n else:\n all_iter_guesses = guesses\n if hot_start is not None:\n all_iter_guesses = [hot_start.value(iter_var)] + all_iter_guesses\n\n # does not necessarily bound in the right direction\n # that means at least two points are needed in the real region\n # this is a huge hole and bug\n\n non_phase_val = phase_val = check_phase_val = check_phase_derivative = res_pert = None\n non_integral = V_over_F not in (0.0, 1.0, None)\n if non_integral:\n ignore_der = True\n\n non_phase_vals = []\n non_phase_results = []\n non_phase_checks = []\n\n all_phase_vals = []\n all_phase_ress = []\n all_phase_check_vals = []\n\n all_phase_der_dirs = []\n all_phase_perts = []\n\n for i, iter_val in enumerate(all_iter_guesses):\n kwargs[iter_var] = iter_val\n try:\n res = flasher.flash(zs=zs, **kwargs)\n except Exception as e:\n # print('While finding boundary, flash failed with error', e)\n # flash failure\n continue\n check_val = check(res)\n # print(f'{iter_var}={iter_val}, check={check_val}')\n if (non_integral and check_val <= 0) or (not non_integral and check_val == -1.0):\n # non_phase_val = iter_val\n # non_phase_res = res\n\n non_phase_vals.append(iter_val)\n non_phase_results.append(res)\n non_phase_checks.append(check_val)\n elif (non_integral and check_val > 0.0) or (not non_integral and check_val >= 0):\n # Store all of these. May need to check each of them.\n all_phase_vals.append(iter_val)\n all_phase_ress.append(res)\n all_phase_check_vals.append(check_val)\n if phase_val is None:\n phase_val = iter_val\n phase_res = res\n check_phase_val = check_val\n res_pert = None\n else:\n # Keep the phase value with the lowest phase fraction only\n # we are always seeking to go in the direction the phase almost disappears\n if check_phase_val < check_phase_val:\n phase_val = iter_val\n phase_res = res\n\n res_pert = None # clear any perturbation calculation we did\n\n if non_phase_vals and phase_val is not None:\n # go through the solutions and see if any are right\n\n if res_pert is None and not non_integral:\n # only recalculate the perturbation if needed\n # try different factors in case we border onto the flat region\n for fact in phase_boundary_perturbation_factors:\n kwargs_pert[iter_var] = phase_res.value(iter_var)*(1 + fact)\n res_pert = flasher.flash(zs=zs, **kwargs_pert)\n check_pert = check(res_pert)\n der = (check_pert - check_phase_val)/(kwargs_pert[iter_var] -phase_res.value(iter_var))\n der_dir = der > 0\n if check_pert >= 0:\n break\n\n if check_pert < 0:\n raise ValueError(unperturbable_msg)\n\n best_non_phase_val = best_non_phase_res = None\n best_non_phase_distance = 1e100\n\n # Iterate through all the flatest solutions and make sure we select the closest one\n for non_phase_val, non_phase_res in zip(non_phase_vals, non_phase_results):\n distance = abs(non_phase_res.value(iter_var) - phase_res.value(iter_var))\n\n if ignore_der or (not der_dir and non_phase_val > phase_val) or (der_dir and non_phase_val < phase_val):\n if distance < best_non_phase_distance:\n best_non_phase_distance = distance\n best_non_phase_val = non_phase_val\n best_non_phase_res = non_phase_res\n\n if best_non_phase_val is not None:\n return (best_non_phase_val, phase_val, best_non_phase_res, phase_res, res_pert, i + 1)\n\n # We are out of guesses. Try searching through all of them to see if any non-flat values have a derivative that goes to the criteria\n # instead of just testing against the lowest one\n # Start by Calculating all of the perturbations\n if non_phase_vals and phase_val is not None:\n all_phase_der_dirs = []\n all_phase_perts = []\n for (iter_val, phase_res, check_phase_val) in zip(all_phase_vals, all_phase_ress, all_phase_check_vals):\n for fact in phase_boundary_perturbation_factors:\n kwargs_pert[iter_var] = phase_res.value(iter_var)*(1 + fact)\n res_pert = flasher.flash(zs=zs, **kwargs_pert)\n check_pert = check(res_pert)\n der = (check_pert - check_phase_val)/(kwargs_pert[iter_var] - phase_res.value(iter_var))\n der_dir = der > 0\n if check_pert >= 0:\n break\n\n if check_pert < 0:\n raise ValueError(unperturbable_msg)\n\n all_phase_perts.append(res_pert)\n all_phase_der_dirs.append(der_dir)\n\n for non_phase_val, non_phase_res in zip(non_phase_vals, non_phase_results):\n best_non_phase_val = best_non_phase_res = None\n best_phase_val = best_phase_res = best_res_pert = None\n best_non_phase_distance = 1e100\n\n for (iter_val, phase_res, check_phase_val, der_dir, res_pert) in zip(all_phase_vals, all_phase_ress, all_phase_check_vals, all_phase_der_dirs, all_phase_perts):\n distance = abs(non_phase_res.value(iter_var) - phase_res.value(iter_var))\n\n if (not der_dir and non_phase_val > phase_val) or (der_dir and non_phase_val < phase_val):\n if distance < best_non_phase_distance:\n best_non_phase_distance = distance\n best_non_phase_val = non_phase_val\n best_non_phase_res = non_phase_res\n best_phase_res = phase_res\n best_phase_val = iter_val\n best_res_pert = res_pert\n\n if best_non_phase_val is not None:\n return (best_non_phase_val, best_phase_val, best_non_phase_res, best_phase_res, best_res_pert, len(all_iter_guesses))\n\n\n raise ValueError(\"Failed to bound the flash\")\n\ndef flash_phase_boundary_one_sided_secant(flasher, zs, spec_var, spec_val, iter_var, check, hot_start=None,\n ytol=1e-6, xtol=None, maxiter=100, iter_guesses=None):\n\n non_phase_val, phase_val, non_phase_res, phase_res, res_pert, bounding_iter = generate_phase_boundaries_naive(\n flasher=flasher, zs=zs, spec_var=spec_var, spec_val=spec_val, iter_var=iter_var, V_over_F=None,\n check=check, hot_start=hot_start,\n iter_guesses=iter_guesses)\n\n iterations = 0\n store = []\n flat = -1.0\n specs = {spec_var: spec_val}\n\n def to_solve(guess):\n nonlocal iterations\n iterations += 1\n specs[iter_var] = guess\n point = flasher.flash(zs=zs, **specs)\n store.append(point)\n # print(guess, check(point))\n return check(point)\n\n # import matplotlib.pyplot as plt\n # pts = linspace(non_phase_val, phase_val, 500)\n # vals = [to_solve(p) for p in pts]\n # plt.plot(pts, vals, 'x')\n # plt.show()\n\n y0 = check(phase_res)\n y1 = check(res_pert)\n\n guess = one_sided_secant(to_solve, x_flat=non_phase_val,\n x0=phase_val, y_flat=flat,\n x1=res_pert.value(iter_var),\n y0=y0, y1=y1,\n xtol=xtol, ytol=ytol, maxiter=maxiter)\n for v in store[::-1]:\n if check(v) != flat:\n break\n\n return v, bounding_iter, iterations\n\ndef generate_hydrocarbon_phase_check(atomss, required_zi=0.5, required_wi=None,\n require_liquid=False, require_density=100):\n include_idxs = []\n for i in range(len(atomss)):\n atoms = atomss[i]\n if (len(atoms) == 2 and 'C' in atoms\n and 'H' in atoms\n and not (atoms['C'] == 1 and atoms['H'] == 4)):\n include_idxs.append(i)\n\n def component_check(res):\n phases = res.phases if not require_liquid else res.liquids\n\n for p in phases:\n z_hydro_total = 0.0\n w_hydro_total = 0.0\n zs = p.zs\n ws = p.ws()\n for idx in include_idxs:\n z_hydro_total += zs[idx]\n w_hydro_total += ws[idx]\n rho_total = w_hydro_total*p.rho_mass()\n if require_density and rho_total < require_density:\n continue\n if required_zi and z_hydro_total < required_zi:\n continue\n if required_wi and w_hydro_total < required_wi:\n continue\n return p.beta\n return -1\n return component_check\n\ndef generate_pure_phase_boolean_check(idx, required_zi=0.999, required_wi=None):\n '''Generate and return a function which will return -1 if\n a pure-ish phase with the required concentration is not found;\n and 1 if it is found.\n '''\n def component_check(res):\n has_comp = -1\n for p in res.phases:\n if required_zi is not None:\n if p.zs[idx] > required_zi:\n return p.beta\n elif required_wi is not None:\n if p.ws()[idx] > required_wi:\n return p.beta\n return has_comp\n return component_check\n\ndef one_sided_dew_point_err(res):\n if res.gas is not None and res.liquid_count:\n return res.LF\n return -1\n\ndef one_sided_bubble_point_err(res):\n if res.gas is not None and res.liquid_count:\n return res.VF\n return -1\n\ndef one_sided_sat_point_err(res):\n if res.gas is not None and res.liquid_count:\n return min(res.betas)\n return -1\n\ndef VLN_bubble_boolean_check(res):\n if res.gas is not None and res.liquids is not None and res.phase_count >= 2 and res.solid_count == 0:\n # we are in the two phase region\n return min(res.LF, res.VF)\n elif res.liquid_count == 1 and res.phase_count == 1:\n return -1\n return -10000\n\ndef VL_dew_boolean_check(res):\n # dew points are always two phases never two liquids\n if res.gas is not None and res.liquids is not None and res.phase_count==2 and res.solid_count == 0:\n # we are in the two phase region\n return min(res.LF, res.VF)\n elif res.gas is not None and res.phase_count == 1:\n return -1\n # Something else, need to avoid this\n return -10000\n\ndef VL_boolean_check(res):\n '''Function that returns 1 if a gas is present and there is a liquid\n present and there are only two phases, and -1 otherwise.\n\n '''\n if res.gas is not None and res.liquids is not None and res.phase_count==2 and res.solid_count == 0:\n return 1\n return -1\n\ndef LL_boolean_check(res):\n '''Function that returns 1 if there are only two liquid phases and -1 otherwise.\n\n '''\n if res.phase_count==2 and res.liquid_count == 2:\n return 1\n return -1\n\ndef VLN_or_LN_boolean_check(res):\n '''Function that returns 1 if two or more non-solid phases are present,\n and -1 otherwise.\n '''\n if res.phase_count > 1 and res.solid_count == 0:\n return 1\n return -1\n\ndef VLL_or_LL_boolean_check(res):\n '''Function that returns 1 if two or more non-solid phases are present\n and at least two liquids are present,\n and -1 otherwise.\n '''\n if res.phase_count > 1 and res.solid_count == 0 and res.liquid_count == 2:\n return 1\n return -1\n\ndef VLL_boolean_check(res):\n '''Function that returns 1 if two liquid phases and one gas phase\n is present, and -1 otherwise.\n '''\n if res.phase_count == 3 and res.solid_count == 0 and res.liquid_count == 2:\n return 1\n return -1\n\ndef incipient_phase_status(mix_ratio, flasher, specs, zs_existing, zs_added,\n check, store=None):\n '''Perform a check whether or not the incipient phase is formed.\n\n Parameters\n ----------\n mix_ratio : float\n The multiplier on the number of moles of the `zs_added` stream, [-]\n flasher : Flash\n Flash object, [-]\n specs : dict\n Other specifications to a flash - normally T and P, and their values,\n [-]\n zs_existing : list[float]\n Mole composition of the original stream\n zs_added : list[float]\n The composition of the stream being mixed in, [-]\n check : function\n The function to determine whether or not the incipient phase\n and other conditions are met, [-]\n store : list or None\n If provided, the flash results will be appended to this list\n\n Returns\n -------\n check_val : float\n Whether or not the condition was met, as a -1 or 1 value suitable\n for a bisection algorithm, [-]\n\n '''\n N = len(zs_existing)\n ns = [zs_existing[i] + mix_ratio*zs_added[i] for i in range(N)]\n zs = normalize(ns)\n# print(zs)\n res = flasher.flash(zs=zs, **specs)\n if type(store) is list:\n store.append(res)\n check_ans = check(res)\n # print(check_ans, mix_ratio, zs)\n return check_ans\n\n\nMAX_FACTOR_INCIPIENT_PHASE_NAIVE = 1000\nzero_one_factors = [.25, .5, .75]\nabove_one_factors = [1.5, 2, 3, 5, 10, 50, 100, 400]\nall_factors = zero_one_factors + above_one_factors\n\n_tmp_pts = linspace(0,1, 20)\nshuffle(_tmp_pts)\nall_factors += _tmp_pts\n\n_tmp_pts = logspace(log10(1), log10(MAX_FACTOR_INCIPIENT_PHASE_NAIVE), 20)\nshuffle(_tmp_pts)\nall_factors += _tmp_pts\n\n# might not be necessary to add these many attempts\n# all_factors += linspace(0,1, 80) + logspace(log10(1), log10(MAX_FACTOR_INCIPIENT_PHASE_NAIVE), 80)\n\n\ndef generate_incipient_phase_boundaries_naive(flasher, specs, zs_existing,\n zs_added, check):\n '''Attempt to bound the formation of an incipient phase, using a\n factor-based approach.\n '''\n negative_bound = positive_bound = None\n store = []\n\n zero = incipient_phase_status(0, flasher, specs, zs_existing, zs_added, check, store=store)\n if zero == -1:\n negative_bound = 0\n negative_bound_res = store[-1]\n elif zero == 1:\n positive_bound = 0\n positive_bound_res = store[-1]\n\n one = incipient_phase_status(1, flasher, specs, zs_existing, zs_added, check, store=store)\n if one == -1:\n negative_bound = 1\n negative_bound_res = store[-1]\n elif one == 1:\n positive_bound = 1\n positive_bound_res = store[-1]\n\n if negative_bound is not None and positive_bound is not None:\n return (negative_bound, positive_bound, negative_bound_res, positive_bound_res, len(store))\n\n\n for factor in all_factors:\n num = incipient_phase_status(factor, flasher, specs, zs_existing, zs_added, check, store=store)\n if num == -1:\n negative_bound = factor\n negative_bound_res = store[-1]\n elif num == 1:\n positive_bound = factor\n positive_bound_res = store[-1]\n if negative_bound is not None and positive_bound is not None:\n return (negative_bound, positive_bound, negative_bound_res, positive_bound_res, len(store))\n raise ValueError(\"Failed to bound the flash\")\n\n\ndef incipient_phase_bounded_naive(flasher, specs, zs_existing, zs_added, check, xtol=1e-6):\n '''\n\n Returns\n -------\n res : EquilibriumState\n The flash results at the incipient phase condition, [-]\n bounding_attempts : int\n The number of attempts to bound the problem, [-]\n iters : int\n The number of attempts to converge the problem, [-]\n multiplier : float\n The multiplier used to mix the two streams in the end, [-]\n '''\n zs_existing = flash_mixing_remove_overlap(zs_existing, zs_added)\n\n (negative_bound, positive_bound, negative_bound_res, positive_bound_res,\n attempts) = generate_incipient_phase_boundaries_naive(flasher, specs=specs,\n zs_existing=zs_existing, zs_added=zs_added, check=check)\n\n # print(low, high, 'bounds')\n store = []\n args = (flasher, specs, zs_existing, zs_added, check, store)\n multiplier = bisect(incipient_phase_status, a=negative_bound, b=positive_bound,\n args=args,\n xtol=xtol)\n\n # import matplotlib.pyplot as plt\n # pts = linspace(negative_bound, positive_bound, 500)\n # vals = [incipient_phase_status(p, *args) for p in pts]\n # plt.plot(pts, vals, 'x')\n # plt.show()\n\n\n # Make sure the final return value matches the criteria\n for v in store[::-1]:\n if check(v) == 1:\n break\n\n return v, attempts, len(store), multiplier\n\ndef incipient_phase_one_sided_secant(flasher, specs, zs_existing, zs_added,\n check, ytol=1e-6, xtol=None):\n '''Solver which uses a one-sided secant algorithm to converge\n with the same convergence order as secant.\n A line search algorithm is used to make every step a secant one.\n\n Returns\n -------\n res : EquilibriumState\n The flash results at the incipient phase condition, [-]\n bounding_attempts : int\n The number of attempts to bound the problem, [-]\n iters : int\n The number of attempts to converge the problem, [-]\n multiplier : float\n The multiplier used to mix the two streams in the end, [-]\n '''\n zs_existing_orig = zs_existing\n zs_existing = flash_mixing_remove_overlap(zs_existing, zs_added)\n removed_composition = zs_existing_orig != zs_existing\n\n (negative_bound, positive_bound, negative_bound_res, positive_bound_res,\n attempts) = generate_incipient_phase_boundaries_naive(flasher, specs=specs,\n zs_existing=zs_existing, zs_added=zs_added, check=check)\n if check is LL_boolean_check:\n mode = 'LF0' # Does not work for phase label transitions but does\n # work for an appearing liquid phase - I think\n elif check is VL_boolean_check:\n if not removed_composition:\n if negative_bound_res.zs == zs_existing_orig:\n start = negative_bound_res\n elif positive_bound_res.zs == zs_existing_orig:\n start = negative_bound_res\n else:\n start = flasher.flash(zs=zs_existing, **specs)\n else:\n start = flasher.flash(zs=zs_existing, **specs)\n\n mode = 'LF' if start.VF > 0.5 else 'VF'\n\n iterations = 0\n store = []\n N = len(zs_existing)\n flat = -1.0\n\n\n def to_solve(mix_ratio):\n nonlocal iterations\n iterations += 1\n ns = [zs_existing[i] + mix_ratio*zs_added[i] for i in range(N)]\n zs = normalize(ns)\n point = flasher.flash(zs=zs, **specs)\n store.append(point)\n # print(mix_ratio, 'mix_ratio')\n if point.phase_count == 2:\n # print('real phase', point.LF)\n # TODO: be able to oprimize for VF=0 and the same for other phases\n if mode == 'LF0':\n if point.liquid_count == 2:\n return min(point.betas_liquids)\n return flat\n elif mode == 'LF':\n if point.LF is not None:\n return point.LF\n else:\n if point.VF is not None:\n return point.VF\n return flat\n\n # import matplotlib.pyplot as plt\n # pts = linspace(negative_bound, positive_bound, 50)\n # vals = [to_solve(p) for p in pts]\n # plt.plot(pts, vals, 'x')\n # plt.show()\n\n multiplier = one_sided_secant(to_solve, x_flat=negative_bound,\n x0=positive_bound, y_flat=flat,\n x1=positive_bound*(1-1e-9),\n xtol=xtol, ytol=ytol)\n for v in store[::-1]:\n if check(v) == 1:\n break\n\n return v, attempts, len(store), multiplier\n\ndef incipient_phase_bounded_secant(flasher, gas, liquid, T, P, zs_existing, zs_added, check, xtol=1e-6):\n '''Solver which, when the PT flash does not converge to a two-phase\n solution, uses the stability test to return a vapor fraction that\n makes the objective function continuous.\n '''\n zs_existing = flash_mixing_remove_overlap(zs_existing, zs_added)\n\n (negative_bound, positive_bound, negative_bound_res, positive_bound_res,\n attempts) = generate_incipient_phase_boundaries_naive(flasher, specs={'T': T, 'P': P},\n zs_existing=zs_existing, zs_added=zs_added, check=check)\n\n iterations = 0\n store = []\n N = len(zs_existing)\n def to_solve(mix_ratio):\n nonlocal iterations\n iterations += 1\n ns = [zs_existing[i] + mix_ratio*zs_added[i] for i in range(N)]\n zs = normalize(ns)\n point = flasher.flash(zs=zs, T=T, P=P)\n store.append(point)\n # print(mix_ratio, 'mix_ratio')\n if point.phase_count == 2:\n # print('real phase', point.LF)\n return point.LF\n else:\n # stability test\n min_phase = gas.to(zs=zs, T=T, P=P)\n other_phase = liquid.to(zs=zs, T=T, P=P)\n slns = flasher.stability_test_Michelsen(T=T, P=P, zs=zs, min_phase=min_phase, other_phase=other_phase, all_solutions=True)\n frac, lowest_dG = -1e-10, 1e100\n for sln in slns:\n dG_sln = sln[7]\n if dG_sln < lowest_dG:\n frac = sln[2]\n # print('pseudophase', frac)\n return frac\n raise ValueError(\"Shold not get here\")\n\n x0 = 0.5*negative_bound + positive_bound*0.5\n\n # import matplotlib.pyplot as plt\n # pts = linspace(negative_bound, positive_bound, 500)\n # vals = [to_solve(p) for p in pts]\n # plt.plot(pts, vals, 'x')\n # plt.show()\n\n # multiplier = brenth(to_solve, negative_bound, positive_bound, xtol=xtol)\n multiplier = secant(to_solve, x0=x0, low=negative_bound, high=positive_bound, xtol=xtol, bisection=True)\n for v in store[::-1]:\n if check(v) == 1:\n break\n\n return v, attempts, len(store), multiplier\n\ndef incipient_liquid_bounded_PT_sat(flasher, specs, zs_existing, zs_added, check, VF=1, xtol=1e-6):\n specs_working = specs.copy()\n has_T = False\n has_P = False\n if 'T' in specs:\n T = specs_working.pop('T')\n has_T = True\n elif 'P' in specs:\n P = specs_working.pop('P')\n has_P = True\n else:\n raise ValueError(\"This algorithm requires T or P as a specification\")\n other_spec, other_spec_value = list(specs_working.keys())[0], list(specs_working.values())[0]\n zs_existing = flash_mixing_remove_overlap(zs_existing, zs_added)\n\n (negative_bound, positive_bound, negative_bound_res, positive_bound_res,\n attempts) = generate_incipient_phase_boundaries_naive(flasher, specs=specs,\n zs_existing=zs_existing, zs_added=zs_added, check=check)\n\n iterations = 0\n store = []\n N = len(zs_existing)\n def to_solve(mix_ratio):\n nonlocal iterations\n iterations += 1\n\n ns = [zs_existing[i] + mix_ratio*zs_added[i] for i in range(N)]\n zs = normalize(ns)\n if has_T:\n point = flasher.flash(T=T, VF=VF, zs=zs)\n elif has_P:\n point = flasher.flash(P=P, VF=VF, zs=zs)\n store.append(point)\n err = point.value(other_spec) - other_spec_value\n # print(err, mix_ratio, zs)\n return err\n\n x0 = 0.99*negative_bound + positive_bound*0.01\n multiplier = secant(to_solve, x0=x0, low=negative_bound, high=positive_bound, xtol=xtol)\n\n # Make sure the final return value matches the criteria\n for v in store[::-1]:\n if check(v) == 1:\n break\n\n return v, attempts, iterations, multiplier\n\ndef water_dew_point_from_humidity(flasher, T, P, humidity, zs_air, zs_added):\n try:\n sat = flasher.flash_mixing_phase_boundary(specs={'T': T, 'P': P}, zs_existing=zs_air,\n zs_added=zs_added, boundary='VL')\n except:\n return None\n N = flasher.N\n ratio = sat.flash_convergence['mixing_factor'] \n feed_ns = zs_air\n zs_for_factor = flash_mixing_remove_overlap(zs_air, zs_added)\n n_factor = sum([feed_ns[i] if zs_for_factor[i] > 0 else 0 for i in range(N)])\n ns_out = [n_factor*(zs_for_factor[i] + zs_added[i]*ratio*humidity) for i in range(N)]\n zs_my_flash = normalize(ns_out)\n \n return water_wet_bulb_temperature(flasher, zs=zs_my_flash, T=T, P=P)\n\n\ndef water_wet_bulb_temperature(flasher, zs, T, P, feed=None):\n '''Solver which calculates the water wet bulb temperature of a stream.\n The stream can contain water, but not more water than the saturation amount.\n\n If water is not in the feed the calculation cannot be performed.\n\n Parameters\n ----------\n flasher : Flash\n Flash object, [-]\n zs : list[float]\n Mole fractions of stream, [-]\n T : float\n Temperature, [K]\n P : float\n Pressure, [Pa]\n feed : EquilibriumState\n If the feed at `zs`, `T`, and `P` has already been flashed, this\n object can be provided to avoid additional calculations, [-]\n\n Returns\n -------\n sat : EquilibriumState\n The flash results at the wet bulb temperature and composition, [-]\n '''\n water_index = flasher.water_index\n water_comp = [0.0]*flasher.N\n water_comp[water_index] = 1\n\n flashes2 = []\n feed = feed if feed is not None else flasher.flash(zs=zs, T=T, P=P)\n\n # water_feed = flasher.flash(zs=water_comp, T=T, P=P)\n water_feed = flasher.liquids[0].to(zs=water_comp, T=T, P=P)\n # Attempt to get a water vapor pressure\n\n try:\n Psat_water_feed = water_feed.Psats()[water_index]\n except:\n try:\n Psat_water_feed = flasher.correlations.VaporPressures[water_index](T)\n except:\n Psat_water_feed = None\n\n\n\n\n H_feed = feed.H()\n H_water_feed = water_feed.H()\n\n def wet_bulb_T_error_direct(moles_water):\n # Investigated the possibility of using TPD to make this a NR system\n # However the liquid mole fractions for the TPD=0 spec can only be found by solving the saturation equations\n # so it can't really be a 2 equation system.\n\n # print(moles_water, 'moles_water')\n # Basis feed 1 mole\n H_out = (moles_water*H_water_feed + H_feed)/(1.0 + moles_water)\n ns_out = zs.copy()\n ns_out[water_index] += moles_water\n zs_out = normalize(ns_out)\n\n # Can we target the energy balance?\n # flash_out = flasher.flash(H=H_out, P=feed.P, zs=zs_out)\n # flashes2[:] = [flash_out]\n # return flash_out.VF-.99999999\n flash_out = flasher.flash(VF=1, P=P, zs=zs_out)\n flashes2[:] = [flash_out]\n error = H_out - flash_out.H()\n # print(error, moles_water)\n return error\n\n # import matplotlib.pyplot as plt\n # ns = linspace(1e-13, 1e-6, 10000)\n # vals = [wet_bulb_T_error_direct(ni) for ni in ns]\n # plt.plot(ns, vals)\n # plt.show()\n if Psat_water_feed is not None:\n high = Psat_water_feed/P*5\n low = 1e-5*Psat_water_feed/P\n guess = 0.6*Psat_water_feed/P\n else:\n high = None\n low = 1e-5\n guess = 0.05\n try:\n secant(wet_bulb_T_error_direct, guess, low=low, high=high, bisection=True, require_eval=True)\n except:\n # Arbitrary 1e-7 low bound based on the mole fraction at which VF=1 flashes start to not converge with CEOS\n # Do not remove\n secant(wet_bulb_T_error_direct, guess, low=1e-7, bisection=True, require_eval=True)\n return flashes2[0]\n\n\ndef solve_water_wet_bulb_temperature_nested(flasher, zs, T, P, T_wet_bulb):\n water_index = flasher.water_index\n water_comp = [0.0]*flasher.N\n water_comp[water_index] = 1\n flashes = []\n def wet_bulb_T_error_inner_outer(guess):\n xwo = guess\n xwf = zs[water_index]\n n_out = (1.0 - xwf)/(1.0 - xwo)\n n_added = n_out - 1.0\n ns_out = [nfi + n_added*zsi for nfi, zsi in zip(zs, water_comp)]\n zs_out_product = normalize(ns_out)\n flash_wet_bulb = water_wet_bulb_temperature(flasher=flasher, zs=zs_out_product, T=T, P=P)\n flashes[::] = [flash_wet_bulb]\n return flash_wet_bulb.T - T_wet_bulb\n\n # The mole fraction at the end is not the relevant variable\n x_for_wet_bulb = secant(wet_bulb_T_error_inner_outer, .0025, low=0, bisection=True)\n return x_for_wet_bulb\n\n\ndef solve_water_wet_bulb_temperature_direct(flasher, zs, T, P, T_wet_bulb):\n water_index = flasher.water_index\n water_comp = [0.0]*flasher.N\n water_comp[water_index] = 1\n water_product = flasher.flash(zs=water_comp, T=T, P=P)\n H_water_product = water_product.H()\n flashes2 = []\n\n '''Known: T_wet_bulb. Product T. Product P.\n\n Unknown: x_w out, how many moles water to add to cause saturation of water, the temperature of the end flash\n '''\n def wet_bulb_T_error_direct(guess):\n # TODO make a standalone function for this\n moles_water_1, moles_water_2 = guess\n moles_water_1 = abs(moles_water_1)\n moles_water_2 = abs(moles_water_2)\n\n # Calculate the amount of water in the actual product and its enthalpy\n ns_out = zs.copy()\n ns_out[water_index] += moles_water_1\n n_product = sum(ns_out)\n zs_out_product = normalize(ns_out)\n product_flash = flasher.flash(zs=zs_out_product, T=T, P=P)\n\n H_product = product_flash.H()\n H_out_balanced = (moles_water_2*H_water_product + H_product*n_product)/(n_product + moles_water_2)\n\n # Now calculate the bit about the wet bulb\n ns_out_2 = ns_out.copy()\n ns_out_2[water_index] += moles_water_2\n zs_out = normalize(ns_out_2)\n\n # As best as I can tell, the VF=1 flash is required otherwise the newton solver will\n # go nuts with no jacobian. \n flash_out = flasher.flash(VF=1, P=P, zs=zs_out)\n flashes2[:] = [flash_out]\n energy_err = H_out_balanced - flash_out.H()\n wet_bulb_T_error = T_wet_bulb - flash_out.T\n\n errors = [energy_err, wet_bulb_T_error]\n # print(f'errors={errors}, guess={guess}')\n return errors\n\n solver = SolverInterface(method='newton_system_line_search', objf=wet_bulb_T_error_direct, jacobian_perturbation=1e-9, xtol=1e-11)\n solution = solver.solve([0.01, 0.005])\n x_w = solution[0]/(1+solution[0])\n return x_w\n\n\n\ndef cricondentherm_direct_criteria(res, require_two_phase=True, require_gas=False):\n if require_two_phase and res.phase_count != 2:\n raise ValueError(\"Solution is not two phase\")\n tot = 0.0\n\n if require_gas or res.gas is not None:\n dlnphis_dP_gas = res.gas.dlnphis_dP()\n else:\n dlnphis_dP_gas = res.lightest_liquid.dlnphis_dP()\n dlnphis_dP_liquid = res.heaviest_liquid.dlnphis_dP()\n xs = res.heaviest_liquid.zs\n for i in range(res.N):\n tot += xs[i]*(dlnphis_dP_gas[i] - dlnphis_dP_liquid[i])\n return tot\n\n\ndef critcondenbar_direct_criteria(res, require_two_phase=True, require_gas=False):\n if require_two_phase and res.phase_count != 2:\n raise ValueError(\"Solution is not two phase\")\n tot = 0.0\n if require_gas or res.gas is not None:\n dlnphis_dT_gas = res.gas.dlnphis_dT()\n ys = res.gas.zs\n else:\n dlnphis_dT_gas = res.lightest_liquid.dlnphis_dT()\n ys = res.lightest_liquid.zs\n dlnphis_dT_liquid = res.heaviest_liquid.dlnphis_dT()\n for i in range(res.N):\n tot += ys[i]*(dlnphis_dT_gas[i] - dlnphis_dT_liquid[i])\n return tot\n","repo_name":"CalebBell/thermo","sub_path":"thermo/flash/flash_utils.py","file_name":"flash_utils.py","file_ext":"py","file_size_in_byte":205387,"program_lang":"python","lang":"en","doc_type":"code","stars":520,"dataset":"github-code","pt":"50"} +{"seq_id":"22519919987","text":"import tensorflow\nfrom tensorflow.keras.models import load_model\nfrom tensorflow_core.python.keras.utils.vis_utils import plot_model\n\nfrom base.base_model import BaseModel\n\ntf = tensorflow\nkeras = tf.keras\n\n\nclass LSTMSingleOutputModel(BaseModel):\n def __init__(self, config):\n super(LSTMSingleOutputModel, self).__init__(config)\n self.build_model()\n\n def build_model(self):\n self.model = tf.keras.models.Sequential()\n self.model.add(tf.keras.layers.LSTM(50, input_shape=(self.window_size, 1), return_sequences=True))\n self.model.add(tf.keras.layers.LSTM(100, input_shape=(self.window_size, 1)))\n self.model.add(tf.keras.layers.Dropout(0.1))\n self.model.add(tf.keras.layers.Dense(self.sequence_size))\n plot_model(self.model, to_file='model1_2_plot.png', show_shapes=True, show_layer_names=True)\n\n self.model.compile(\n loss='mse',\n optimizer=self.config.model.optimizer,\n # optimizer=keras.optimizers.SGD(self.learning_rate, self.momentum),\n metrics=['mae'],\n )\n print(self.model.summary())\n\n def load_model(self):\n self.model = load_model(\n filepath=self.dataset_model(),\n custom_objects=None, compile=True)\n\n def dataset_model(self):\n if self.config.data_loader.dataset == '1':\n return self.config.model.load_model_path\n return self.config.model.load_model_path_2d\n\n# optimizer=keras.optimizers.Adam(learning_rate=self.learning_rate, beta_1=self.momentum, epsilon=1e-10),\n# optimizer=keras.optimizers.SGD(self.learning_rate, self.momentum),\n","repo_name":"AncientProjects/archived_uni-Crypto-Inzynierka","sub_path":"models/lstm_single_output_model.py","file_name":"lstm_single_output_model.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"22793840038","text":"# ItemID GM\r\n# Targets dagger in bag until GM ItemID\r\n# By MatsaMilla\r\n\r\ndef hide():\r\n if Player.GetRealSkillValue('Hiding') > 50:\r\n if Timer.Check('skill' + Player.Name) == False and not Player.BuffsExist('Hiding'):\r\n Player.UseSkill('Hiding')\r\n Timer.Create('skill' + Player.Name,11000)\r\n\r\ndef itemID():\r\n if Timer.Check('skill' + Player.Name) == False and Player.BuffsExist('Hiding'):\r\n dagger = Items.FindByID( 0x0F52 , -1 , Player.Backpack.Serial )\r\n Player.UseSkill( \"Item ID\" )\r\n Target.WaitForTarget( 10000 , True )\r\n Target.TargetExecute( dagger )\r\n Timer.Create('skill' + Player.Name,1000)\r\n\r\n\r\nwhile Player.GetRealSkillValue( \"Item ID\" ) < 100:\r\n itemID()\r\n hide()\r\n","repo_name":"matsamilla/Razor-Enhanced","sub_path":"Skills/train_itemID.py","file_name":"train_itemID.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"50"} +{"seq_id":"9350056590","text":"import pandas as pd\r\nimport tensorflow as tf\r\nfrom keras.preprocessing.sequence import pad_sequences\r\nfrom keras.layers import Embedding, LSTM, Dense, Dropout\r\nfrom keras.preprocessing.text import Tokenizer\r\nfrom keras.models import Sequential\r\nimport keras.utils as ku\r\nimport numpy as np\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\nwarnings.simplefilter(action='ignore', category=FutureWarning)\r\n\r\n\r\ndf = pd.read_csv('../datasets nature/nature_computer_science.csv')\r\ncorpus = df['title']\r\n\r\ntokenizer = Tokenizer()\r\n\r\n\r\ndef get_sequence_of_tokens(corpus):\r\n tokenizer.fit_on_texts(corpus)\r\n total_words = len(tokenizer.word_index) + 1\r\n\r\n input_sequences = []\r\n for line in corpus:\r\n token_list = tokenizer.texts_to_sequences([line])[0]\r\n for i in range(1, len(token_list)):\r\n n_gram_sequence = token_list[:i + 1]\r\n input_sequences.append(n_gram_sequence)\r\n return input_sequences, total_words\r\n\r\n\r\ninp_sequences, total_words = get_sequence_of_tokens(corpus)\r\n\r\n\r\ndef generate_padded_sequences(input_sequences):\r\n max_sequence_len = max([len(x) for x in input_sequences])\r\n input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'), dtype='int16')\r\n\r\n predictors, label = input_sequences[:, :-1], input_sequences[:, -1]\r\n label = ku.to_categorical(label, num_classes=total_words, dtype='int16')\r\n return predictors, label, max_sequence_len\r\n\r\n\r\npredictors, label, max_sequence_len = generate_padded_sequences(inp_sequences)\r\n\r\n\r\ndef create_model(max_sequence_len, total_words):\r\n input_len = max_sequence_len - 1\r\n model = Sequential()\r\n\r\n model.add(Embedding(total_words, 10, input_length=input_len))\r\n\r\n model.add(LSTM(100))\r\n model.add(Dropout(0.1))\r\n\r\n model.add(Dense(total_words, activation='softmax'))\r\n\r\n model.compile(loss='categorical_crossentropy', optimizer='adam')\r\n\r\n return model\r\n\r\n\r\nlstm_model = create_model(max_sequence_len, total_words)\r\nlstm_model.summary()\r\nwith tf.device('/gpu:0'):\r\n lstm_model.fit(predictors, label, epochs=250, verbose=5)\r\n\r\n\r\n\r\n\r\n","repo_name":"Kharlamov-Vladislav/ods_astrology","sub_path":"model/ltsm.py","file_name":"ltsm.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"25541042774","text":"from django.views.decorators.csrf import csrf_exempt\nfrom django.http.response import JsonResponse\nfrom rest_framework.parsers import JSONParser\n\nfrom .models import Venda\nfrom .serializer import VendaSerializer\nfrom Veiculos.models import Veiculo\n\n\n@csrf_exempt\ndef VendaViewSet(request):\n if request.method == 'GET':\n venda = Venda.objects.all()\n venda_serializer = VendaSerializer(venda, many=True)\n return JsonResponse(venda_serializer.data, safe=False)\n\n elif request.method == 'POST':\n venda_data = JSONParser().parse(request)\n veiculo = Veiculo.objects.get(id=venda_data['veiculo'])\n venda_data['comissao'] = (int(venda_data['valor']) - veiculo.valorCompra) / 10\n venda_serializer = VendaSerializer(data=venda_data)\n if venda_serializer.is_valid():\n venda_serializer.save()\n return JsonResponse(\"Veículo vendido!\", safe=False)\n return JsonResponse(\"Erro ao realizar a venda!\", safe=False)\n","repo_name":"lacerdarenato/Devnology","sub_path":"Vendas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"37714245113","text":"def main():\n def calculate_step(node, variant):\n variants = {\n 1: [node[0] - 1, node[1] , node[2] ],\n 2: [node[0] + 1, node[1] , node[2] ],\n 3: [node[0] , node[1] - 1, node[2] ],\n 4: [node[0] , node[1] + 1, node[2] ],\n 5: [node[0] , node[1] , node[2] - 1],\n 6: [node[0] , node[1] , node[2] + 1],\n }\n return variants.get(variant, [node[0], node[1]])\n \n def is_valid_node(Z, X, Y, N, visited_nodes, contets_nodes):\n if X >= 0 and X < N and Y >= 0 and Y < N and Z >= 0 and Z < N:\n if not visited_nodes[Z][X][Y] and contets_nodes[Z][X][Y] == 0:\n visited_nodes[Z][X][Y] = True\n return True\n return False \n\n N = int(input())\n visited_nodes = [[[False for _ in range(N)] for _ in range(N)] for _ in range(N)]\n contets_nodes = [[[-1 for _ in range(N)] for _ in range(N)] for _ in range(N)]\n # -1 - неизвестно, 0 - пустой (можно идти), 1 - камень (не пройти)... на будущие модификации\n \n start_coordinaties = [-1, -1, -1]\n for z in range(N):\n _ = input()\n for x in range(N):\n line = input()\n for y in range(N):\n if line[y] == \".\":\n contets_nodes[z][x][y] = 0\n elif line[y] == \"#\":\n contets_nodes[z][x][y] = 1\n elif line[y] == \"S\":\n start_coordinaties = [z, x, y]\n \n is_finished = False\n \n next_node_arr = [start_coordinaties]\n distance = 0\n if start_coordinaties[0] == 0:\n is_finished = True\n \n while not is_finished:\n now_node_arr = next_node_arr.copy()\n next_node_arr = []\n \n for node in now_node_arr:\n for variant in range(1, 7):\n Z, X, Y = calculate_step(node, variant)\n if is_valid_node(Z, X, Y, N, visited_nodes, contets_nodes):\n next_node_arr.append([Z, X, Y])\n if Z == 0:\n is_finished = True\n break\n if is_finished:\n break\n \n distance += 1\n if len(next_node_arr) == 0 and not is_finished:\n is_finished = True\n distance = -1\n \n print(distance)\n \nif __name__ == '__main__':\n\tmain()","repo_name":"eae-rus/Yandex_Tasks","sub_path":"Тренировка по аглоритмам/3.0/Дивизион В (Базовый)/39. Путь спелеолога.py","file_name":"39. Путь спелеолога.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"40556897265","text":"from app.config.database import session\nfrom app.models.tasks import AchievedTask\n\n\ndef insert_achieved_task_into_db(tasks_runtime_id: int) -> AchievedTask:\n achieved_task = AchievedTask(\n tasks_runtime_id=tasks_runtime_id)\n\n session.add(achieved_task)\n session.commit()\n session.close()\n\n return achieved_task\n","repo_name":"SANTOSTAVARES/smartsheet_rpa","sub_path":"app/domain/sql/dml/insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"28683522408","text":"from django.apps import apps\nfrom django.views.generic import base\nfrom django import forms\nfrom collections import defaultdict\n\napp_config = apps.get_app_config('feats')\n\nWIDGET_ATTRS = {'class': 'form-control'}\nTEXT_WIDGET = forms.TextInput(attrs=WIDGET_ATTRS)\nINTEGER_WIDGET = forms.NumberInput(attrs=WIDGET_ATTRS)\nCHOICES_WIDGET = forms.Select(attrs=WIDGET_ATTRS)\n\n\nclass TemplateView(base.TemplateView):\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['features'] = app_config.feats_app.features.values()\n return context\n\n @property\n def feats_app(self):\n return app_config.feats_app\n\n\nclass Form(forms.Form):\n @property\n def fieldsets(self):\n fieldsets = defaultdict(list)\n for key, field in self.fields.items():\n fieldset = getattr(field, 'fieldset', None)\n fieldsets[fieldset].append(self[key])\n\n if len(fieldsets.keys() - [None]) == 0:\n return {}\n\n for name, fieldset in fieldsets.items():\n yield name, fieldset\n\n\nclass ChoiceField(forms.ChoiceField):\n def __init__(self, widget=CHOICES_WIDGET, fieldset=None, *args, **kwargs):\n super().__init__(*args, widget=widget, **kwargs)\n self.fieldset = fieldset\n\n\nclass IntegerField(forms.IntegerField):\n def __init__(self, widget=INTEGER_WIDGET, fieldset=None, *args, **kwargs):\n super().__init__(*args, widget=widget, **kwargs)\n self.fieldset = fieldset\n\n\nclass CharField(forms.CharField):\n def __init__(self, widget=TEXT_WIDGET, fieldset=None, *args, **kwargs):\n super().__init__(*args, widget=widget, **kwargs)\n self.fieldset = fieldset\n\n\nclass HiddenCharField(forms.CharField):\n def __init__(self, widget=forms.HiddenInput, fieldset=None, *args, **kwargs):\n super().__init__(*args, widget=widget, **kwargs)\n self.fieldset = fieldset\n\n\ndef parse_field(key, prefix):\n \"\"\"\n Given a string of the form \"{prefix}-{index}-{field_name}\", returns the index and field name\n If the string does not meet this form, returns None, None\n \"\"\"\n startswith, *rest = key.split(prefix, 1)\n if startswith != '' or len(rest) != 1:\n return None, None\n\n _, index, *rest = rest[0].split('-', 2)\n if len(rest) == 0:\n return None, None\n\n try:\n return int(index), rest[0]\n except ValueError:\n # Management forms don't have an index, so will start with {prefix} but won't be\n # able to parse out an integer from the second component\n return None, None\n\n\ndef compress_formsets(data, prefix):\n \"\"\"\n The UI allows any individual row to be removed from the formset, which can leave gaps in\n the POST data. Django formsets don't work unless each index is sequential, starting from 0,\n so we need to convert the sparse array to a dense array.\n This could also be acomplished with Formset.can_delete, but would require more logic on the frontend\n \"\"\"\n forms = defaultdict(dict)\n dense = {}\n for key, value in data.items():\n index, field = parse_field(key, prefix)\n if index is not None:\n forms[index][field] = value\n else:\n # Not a form field, pass the data straight through\n dense[key] = value\n\n form_index = 0\n for sparse_index, fields in forms.items():\n for field, value in fields.items():\n dense_key = f'{prefix}-{form_index}-{field}'\n dense[dense_key] = value\n form_index += 1\n\n return dense\n","repo_name":"roverdotcom/feats.py","sub_path":"feats/django/views/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"50"} +{"seq_id":"42452553679","text":"import discord\nfrom discord.ext import commands\nimport os\n\nbot = commands.Bot(command_prefix='$')\n\n@bot.command()\nasync def join(ctx):\n await ctx.send('Hello!')\n channel = ctx.author.voice.channel\n await channel.connect()\n\n@bot.command()\nasync def leave(ctx):\n await ctx.voice_client.disconnect()\n\nbot.run(os.getenv('TOKEN'))","repo_name":"benjivdbrand/Discord-Bot","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"26217405728","text":"import json\nimport os\nfrom os.path import join\nimport re\nimport requests\nimport json\nimport shutil\nimport time\nimport openai\n#!pip install toktoken\nimport tiktoken\n\n# TODO: Make a better summary file\n# TODO: Take in a set of (phrase tag/name, phrase, exact/fuzzy) and get where/what context of each.\n# TODO: create a \"fill json\" structure that fills a json with requested details on demand.\n\n\nclass DataDetailExtractor:\n\n def __init__(self,api_key):\n openai.api_key = api_key\n\n # job extractor functions\n def extract_job_skills_list(self,job_text):\n querystr = \"what technical skills does the following job require? Structure the output as a python list, like ['skill1','skill2',...]. Do not write anything before or after this data structure. Job description follows: \" + job_text\n outstr, _ = self.chatgpt_simpleresponse(querystr)\n return outstr\n \n def extract_job_credentials_list(self,job_text):\n querystr = \"what certifications, degrees, or other credentials does the following job require? Structure the output as a python list, like ['credential1','credential2',...]. If there are no absolutely required credentials, just return an empty list. Do not write anything before or after this data structure. Job description follows: \" + job_text\n outstr, _ = self.chatgpt_simpleresponse(querystr)\n return outstr\n\n def extract_job_brief_desc(self,job_text):\n querystr = \"Summarize the following job description in 2 sentences. Job description follows: \" + job_text\n outstr, _ = self.chatgpt_simpleresponse(querystr)\n return outstr\n\n # people extractor functions\n def extract_address(self,resume_text, applicant_name = 'The applicant'):\n instr = \"What is \" + applicant_name + \"'s address in the following resume? Structure the output as a json object, with the keys 'address','city','state', and 'zipcode'. Do not write anything before or after this data structure. Resume Follows: \" + resume_text\n zipcode = None\n counter = 0\n while zipcode is None and counter < 2:\n try:\n outstr, _ = self.chatgpt_simpleresponse(instr)\n #print(outstr)\n out_response = json.loads(outstr)#['textResponse']\n #print(out_response)\n #print(type(out_response))\n address = out_response['address'] #if this breaks, we didn't get a correct json format output\n #print(address)\n city = out_response['city']\n state = out_response['state']\n zipcode = out_response['zipcode']\n except Exception as e:\n print('Error in Returned data structure, retrying query: ', e)\n return address, city, state, zipcode\n \n def extract_education(self,resume_text, applicant_name = 'The applicant'):\n instr = \"What is \" + applicant_name + \"'s educational background or professional credentials in the following resume? Structure the output as a list of json objects, with the following structure: [{institution: 'institution_1',degree_or_credential: 'degree_or_credential_1',fieldOfStudy: 'field_of_study',startDate: ['YYYY'],endDate: ['YYYY']},{...}] Do not write anything before or after this data structure. Resume Follows: \" + resume_text\n educationalBackground = None\n counter = 0\n while educationalBackground is None and counter < 2:\n try:\n outstr, _ = self.chatgpt_simpleresponse(instr)\n #out_response = json.loads(outstr)['textResponse']\n educationalBackground = json.loads(outstr)\n except Exception as e:\n print('Error in Returned data structure, retrying query: ', e)\n return educationalBackground\n \n def extract_workhistory(self,resume_text, applicant_name = 'The applicant'):\n instr = \"What is \" + applicant_name + \"'s work history in the following resume? Format the output as a list of json objects, with the following structure: [{employer: 'employer_company',position: 'job_title',location: 'employment_location',startDate: ['YYYY-MM-DD'],endDate: ['YYYY-MM-DD'],description: 'job_description_overview',responsibilities: ['job_responsibility_1','job_responsibility_2','...']},{...}] Do not write anything before or after this data structure. Resume Follows: \" + resume_text\n workHistory = None\n counter = 0\n while workHistory is None and counter < 2:\n try:\n outstr, _ = self.chatgpt_simpleresponse(instr)\n workHistory = json.loads(outstr)#['textResponse']\n #workHistory = json.loads(out_response) \n #workHistory = out_response\n except Exception as e:\n print('Error in Returned data structure, retrying query: ', e)\n return workHistory\n \n def extract_skills(self,resume_text, applicant_name = 'The applicant'):\n instr = \"What technical skills does \" + applicant_name + \" have in the following resume? Structure the output as a python list, like ['skill1','skill2',...]. Do not write anything before or after this data structure. Resume Follows: \" + resume_text\n skills = None\n counter = 0\n while skills is None and counter < 2:\n try:\n outstr, _ = self.chatgpt_simpleresponse(instr)\n #print('OUTSTR')\n #print(outstr)\n #out_response = json.loads(outstr)#['textResponse']\n # string to list:\n #out_response = outstr.strip('][').split(',')\n #print(out_response)\n skills = [str(i.replace('\"','').replace(\"'\",'')) for i in outstr.strip('][').split(', ')]\n # special workaround - occasionally we get a good structure but it's still wrong\n if any(['sorry' in i.lower() for i in skills]):\n skills = None\n except Exception as e:\n print('Error in Returned data structure, retrying query: ', e)\n return skills\n\n \n def num_tokens_from_string(self,string, encoding_name):\n encoding = tiktoken.get_encoding(encoding_name)\n num_tokens = len(encoding.encode(string))\n return num_tokens\n \n def prep_prompts(self,prompt_text):\n prompt_text_out = prompt_text\n\n #prompt_token_count = tiktoken.count_tokens(prompt_text_out)[\"n_tokens\"]\n #encoding = tiktoken.encoding_for_model(\"gpt-3.5-turbo\")\n prompt_token_count =self.num_tokens_from_string(prompt_text_out,'p50k_base')\n\n gpt_3_5_turbo_max_tokens = 4096\n gpt_4_0_turbo_max_tokens = 8192\n gpt_4_0_32k_max_tokens = 32768\n # Decide which model to use based on token count\n selected_model = \"gpt-3.5-turbo\"\n sleep_time = 0.4 # default to keep us out of trouble\n if prompt_token_count > gpt_3_5_turbo_max_tokens:\n selected_model = \"gpt-4\"\n sleep_time = 60.0 # 10k tokens/min limit\n if prompt_token_count > gpt_4_0_32k_max_tokens:\n selected_model = \"gpt-4-32k\"\n sleep_time = 60.0 # 10k tokens/min limit\n # Truncate the prompt if it exceeds the token limit for the largest model\n print( str(prompt_token_count ) , ' tokens, using model: ', selected_model)\n if prompt_token_count > gpt_4_0_32k_max_tokens:\n prompt_text_out = prompt_text_out[:tiktoken.truncate_text(prompt_text_out, gpt_4_0_32k_max_tokens)]\n return selected_model, prompt_text_out, sleep_time\n \n def chatgpt_simpleresponse(self, prompt_text, tries_threshold = 3):\n ret_text = None\n counter = 0\n model, content, sleep_time = self.prep_prompts(prompt_text)\n print('Using Model: ' , model)\n while ret_text is None and counter <= tries_threshold:\n tries_threshold = tries_threshold + 1\n time.sleep(sleep_time) # avoid hitting rate limiter\n #remember 46,000 seconds is about 12 hours, and \n # the \"rate limit\" stated on the OpenAI site is 200 tokens per minute\n #https://platform.openai.com/docs/guides/rate-limits/overview\n try:\n completion = openai.ChatCompletion.create(\n model=model,\n messages=[\n {\"role\": \"user\", \"content\":content}\n ]\n )\n ret_text = completion['choices'][0]['message']['content'].replace('\\n','')\n except Exception as e:\n print('Error in API Call, retrying API embedding query: ', e)\n return ret_text , completion\n\nclass TranscriptEmbedder:\n # either set this up using a list of files, or pointing at a name.\n # This class will set up (or reference) a title for the \"space name,\" and also \n # a set of input files that we will use for embedding.\n # this will also \n\n # and also check for embedding/breakage, suggest what to do. stabilize this as much as we can!\n\n # The core algorithm should expect something like :\n # A title to use to describe this thing (like \"Cost Concerns\" or \"competitor - snowflake\" or whatever)\n # A list of strings corresponding to the thing (like ['this might be too expensive,' 'can we talk about overall cost, ...'])\n # A switch for determining if this is meant to be an exact match to some of this stuff or if we're trying to match the overall concept/fuzzy-match\n # An optional time frame to look for a second group in, to be delivered along with this one.\n # And then basically to return an object where we can get all the examples of the concept happening....\n def __init__(self):\n self.pinecone_domain_name = None\n self.input_file_set = None\n #self.set_params()\n \n def return_topic_conversations(self,topic):\n outstr = \"We're looking for places where \" + topic + \" is mentioned. Reproduce every statement where \" + topic + \" is discussed. Structure the output as a list of json objects, one object for each mention, with the keys \\'speaker_name\\', and \\' conversation_text\\'. Do not write anything before or after this data structure.\"\n return outstr\n\n def return_topic_mentions(self,topic):\n outstr = \"What is known about \" + topic + \"?. Reproduce every statement where the term '\" + topic + \"' is mentioned exactly. Structure the output as a list of json objects, one object for each mention, with the keys \\'speaker_name\\', and \\' conversation_text\\'. Do not write anything before or after this data structure.\"\n return outstr\n \n def return_address_query(self,applicant_name):\n outstr = \"What is \" + applicant_name + \"'s address? Structure the output as a json object, with the keys 'address','city','state', and 'zipcode'. Do not write anything before or after this data structure.\"\n #outstr = \"What is this person's full malinig address. Structure the output as a json object, with the keys 'address','city','state', and 'zipcode'. Do not write anything before or after this data structure.\"\n return outstr\n \n def return_educationalBackground_query(self, applicant_name):\n outstr = \"What is \" + applicant_name + ''''s educational background? Format the output as a list of json objects, with the following structure: [{institution: 'institution_1',degree: 'degree_1',fieldOfStudy: 'field_of_study',startDate: ['YYYY'],endDate: ['YYYY']},{...}] Do not write anything before or after this data structure.'''\n return outstr\n \n def return_workHistory_query(self, applicant_name):\n outstr = \"What is \" + applicant_name + ''''s work history? Format the output as a list of json objects, with the following structure: [{employer: 'employer_company',position: 'job_title',location: 'employment_location',startDate: ['YYYY-MM-DD'],endDate: ['YYYY-MM-DD'],description: 'job_description_overview',responsibilities: ['job_responsibility_1','job_responsibility_2','...']},{...}] Do not write anything before or after this data structure.'''\n return outstr\n \n def return_skills_query(self,applicant_name):\n outstr = \"what technical skills does \" + applicant_name + \" have? Structure the output as a python list, like ['skill1','skill2',...]. Do not write anything before or after this data structure.\"\n return outstr\n\n def set_params(self,anythingllm_summarydir = '/home/sean/repos/playground/sean/transcript_extraction', anythingllm_rootdir = '/home/sean/repos/anything-llm'):\n self.anythingllm_summarydir = anythingllm_summarydir # where to put the input files, and where the summary file will go.\n self.anythingllm_rootdir = anythingllm_rootdir # where is anything-llm installed\n self.anythingllm_input_dir = join(self.anythingllm_rootdir,'collector/hotdir') #where the files to be embededed go in\n self.anythingllm_check_dir = join(self.anythingllm_rootdir,'collector/hotdir/processed') # and where they come out.\n self.summary_file_name = self.pinecone_domain_name + '_sumfile.csv'\n self.summary_file_path = join(self.anythingllm_summarydir,self.summary_file_name)\n \n def set_up_by_domain_name(self, domain_name,anythingllm_summarydir, anythingllm_rootdir ): \n self.pinecone_domain_name = domain_name\n self.set_params(anythingllm_summarydir, anythingllm_rootdir)\n return self.summary_file_path\n\n def set_up_by_filelist(self, list_of_input_files,domain_name,anythingllm_summarydir, anythingllm_rootdir):\n self.pinecone_domain_name = domain_name\n self.set_params(anythingllm_summarydir, anythingllm_rootdir)\n self.input_file_set = list_of_input_files\n\n self.migrate_input_files() # and check they made it and got encoded\n # self.create_summary_file() # and check where it went\n self.make_new_workspace() # create a new workspace for this pinecone_domain_name\n self.add_embedded_files_to_workspace()\n return self.summary_file_path\n \n def make_new_workspace(self):\n url = \"http://localhost:3001/api/workspace/new\"\n data = '{\"name\": \"' + self.pinecone_domain_name + '\"}'\n headers = {\"Content-Type\": \"application/json\"}\n\n response = requests.post(url, data, headers=headers)\n print('Making new workspace:',self.pinecone_domain_name)\n\n if response.status_code == 200:\n print(\"POST request successful.\")\n else:\n print(f\"POST request failed with status code: {response.status_code}\")\n content = response.text\n status_code = response.status_code\n headers = response.headers\n content_type = response.headers.get(\"content-type\")\n print(\"Response Content:\", content)\n print(\"Status Code:\", status_code)\n print(\"Headers:\", headers)\n print(\"Content-Type Header:\", content_type)\n\n def delete_workspace(self,domain_name):\n print('Attempting to delete workspace ', domain_name)\n headers = {\"Content-Type\": \"application/json\"}\n response = requests.delete(\"http://localhost:3001/api/workspace/\" + domain_name, headers=headers)\n print(response.status_code)\n print(response.text)\n\n def does_workspace_exist(self,domain_name):\n headers = {\"Content-Type\": \"application/json\"}\n response = requests.get(\"http://localhost:3001/api/workspace/\" + domain_name, {}, headers=headers)\n if response.status_code == 200:\n domain_details = json.loads(response.text)['workspace']\n if domain_details:\n print(\"Found domain name: \", domain_name)\n return True\n else:\n print('No such domain: ', domain_name)\n return False\n else:\n print(f\"POST request failed with status code: {response.status_code}\")\n return False\n\n def add_embedded_files_to_workspace(self):\n headers = {\"Content-Type\": \"application/json\"}\n response = requests.get(\"http://localhost:3001/api/system/local-files\", {}, headers=headers)\n keep_files = []\n for item in json.loads(response.text)['localFiles']['items'][0]['items']:\n #print('ITEM:', item)\n for fnm in self.input_file_set:\n #print('fnm: ',fnm)\n fnm_fragment = fnm.replace('/','').replace(' ','-').replace('_','-').rstrip('.').lstrip('.').lstrip('.').split('.')[0] # good enough?\n #print('fnm_fragment: ',fnm_fragment)\n if fnm_fragment.lower() in item['name'].lower():\n keep_files.append(item['name'])\n continue\n print('Keeping files: ')\n print(keep_files) # here are the new files relevant to this applicant\n\n #now go and add each file to the new workspace:\n for fname in keep_files:\n url = \"http://localhost:3001/api/workspace/\" + self.pinecone_domain_name + \"/update-embeddings\"\n data = '{\"adds\": [\"custom-documents/' + fname + '\"]}'\n #print(data)\n response = requests.post(url, data, headers=headers)\n\n if response.status_code == 200:\n print(\"POST request successful.\")\n else:\n print(f\"POST request failed with status code: {response.status_code}\")\n content = response.text\n status_code = response.status_code\n headers = response.headers\n content_type = response.headers.get(\"content-type\")\n\n #print(\"Response Content:\", content)\n #print(\"Status Code:\", status_code)\n #print(\"Headers:\", headers)\n #print(\"Content-Type Header:\", content_type)\n \n def migrate_input_files(self):\n if self.input_file_set is None:\n print('This shouldn\\'t happen - somehow we tried to move the input files for embedding but we don\\'t have any')\n return 1\n else:\n for fte in self.input_file_set:\n # WAIT - first let's check if there's a file like fte in self.anythingllm_check_dir already.\n # if there is, don't move this new set, we already have them.\n existing_encoded_filenames = os.listdir(self.anythingllm_check_dir)\n file_already_encoded = any([fte in i for i in existing_encoded_filenames])\n if file_already_encoded:\n print('A version of the file was already encoded: ', fte)\n continue\n\n source = os.path.abspath(fte)\n print('copying: ',source)\n destination = join(os.path.abspath(self.anythingllm_input_dir))\n print('to: ', destination)\n abc = shutil.copy(source, destination)\n #print(abc)\n #wait a bit to let the embedding happen:\n time.sleep(2.4)\n #now let's make sure this got in and got processed:\n file_moved = os.path.isfile(join(self.anythingllm_input_dir,fte)) # actually this should have embedded\n file_embedded = os.path.isfile(join(self.anythingllm_check_dir,fte)) # and moved to here\n if file_embedded and not file_moved:\n print('File Successfully Embedded.')\n elif file_moved and not file_embedded:\n print('File Moved, may not have Embedded - check to be sure.')\n else:\n print('unknown file error - go check to see what happened to these files.')\n return 0\n\n def create_summary_file(self):\n # read through files in self.input_file_set, in self.anythingllm_summarydir, \n # create a summary csv file by parsing them for later,\n # and put it in self.anythingllm_summarydir\n # then return the file name\n # TODO: make this better. Later on we want to use this file for better display of results. Right now it's just a concatenation:\n if self.input_file_set is None:\n print('This shouldn\\'t happen - somehow we tried to move the input files for embedding but we don\\'t have any')\n return 1\n else:\n with open(self.summary_file_path, 'w') as outfile:\n for fte in self.input_file_set:\n with open(join(self.anythingllm_check_dir,fte)) as infile:\n outfile.write(infile.read())\n return 0\n\n\n def return_fuzzy_mentions(self,topic_text):\n url = \"http://localhost:3001/api/workspace/\" + self.pinecone_domain_name + \"/chat\"\n data = '{\"message\": \"' + self.return_topic_mentions(topic_text) + '\",\"mode\":\"query\"}'\n headers = {\"Content-Type\": \"application/json\"}\n\n \n\n ret_json = None\n while ret_json is None:\n time.sleep(0.4) # avoid hitting rate limiter\n try:\n response = requests.post(url, data, headers=headers)\n if response.status_code == 200:\n print(\"POST request successful.\")\n else:\n print(f\"POST request failed with status code: {response.status_code}\")\n content = response.text\n status_code = response.status_code\n headers = response.headers\n content_type = response.headers.get(\"content-type\")\n print(\"Response Content:\", content)\n print(\"Status Code:\", status_code)\n print(\"Headers:\", headers)\n print(\"Content-Type Header:\", content_type)\n ret_json = json.loads(content)['textResponse'] #if this breaks, we didn't get a correct json format output\n except Exception as e:\n print('Error in Returned data structure, retrying query: ')\n return ret_json\n \n def get_address_set(self,applicant_name):\n url = \"http://localhost:3001/api/workspace/\" + self.pinecone_domain_name + \"/chat\"\n data = '{\"message\": \"' + self.return_address_query(applicant_name) + '\",\"mode\":\"query\"}'\n headers = {\"Content-Type\": \"application/json\"}\n\n address = None\n city = None\n state = None\n zipcode = None\n counter = 0\n max_iter = 3\n while address is None and counter < max_iter:\n counter = counter + 1\n time.sleep(0.4) # avoid hitting rate limiter\n try:\n response = requests.post(url, data, headers=headers)\n if response.status_code == 200:\n print(\"POST request successful.\")\n else:\n print(f\"POST request failed with status code: {response.status_code}\")\n content = response.text\n status_code = response.status_code\n headers = response.headers\n content_type = response.headers.get(\"content-type\")\n print(\"Response Content:\", content)\n #print(\"Status Code:\", status_code)\n #print(\"Headers:\", headers)\n #print(\"Content-Type Header:\", content_type)\n out_response = json.loads(content)['textResponse']\n address = json.loads(out_response)['address'] #if this breaks, we didn't get a correct json format output\n city = json.loads(out_response)['city']\n state = json.loads(out_response)['state']\n zipcode = json.loads(out_response)['zipcode']\n #'address','city','state', and 'zipcode'\n except Exception as e:\n print('Error in Returned data structure, retrying query: ')\n return address,city,state,zipcode\n \n\n def get_skills_set(self,applicant_name):\n url = \"http://localhost:3001/api/workspace/\" + self.pinecone_domain_name + \"/chat\"\n data = '{\"message\": \"' + self.return_skills_query(applicant_name) + '\",\"mode\":\"query\"}'\n headers = {\"Content-Type\": \"application/json\"}\n\n skills = None\n counter = 0\n max_iter = 3\n while skills is None and counter < max_iter:\n counter = counter + 1\n time.sleep(0.4) # avoid hitting rate limiter\n try:\n response = requests.post(url, data, headers=headers)\n if response.status_code == 200:\n print(\"POST request successful.\")\n else:\n print(f\"POST request failed with status code: {response.status_code}\")\n content = response.text\n status_code = response.status_code\n headers = response.headers\n content_type = response.headers.get(\"content-type\")\n print(\"Response Content:\", content)\n #print(\"Status Code:\", status_code)\n #print(\"Headers:\", headers)\n #print(\"Content-Type Header:\", content_type)\n out_response = json.loads(content)['textResponse']\n #skills = json.loads(out_response) #if this breaks, we didn't get a correct json format output\n skills = [str(i.replace('\"','').replace(\"'\",'')) for i in out_response.strip('][').split(', ')]\n # special workaround - occasionally we get a good structure but it's still wrong\n if any(['sorry' in i.lower() for i in skills]):\n skills = None\n except Exception as e:\n print('Error in Returned data structure, retrying query: ')\n return skills\n\n def get_educationalBackground_set(self,applicant_name):\n url = \"http://localhost:3001/api/workspace/\" + self.pinecone_domain_name + \"/chat\"\n data = '{\"message\": \"' + self.return_educationalBackground_query(applicant_name) + '\",\"mode\":\"query\"}'\n headers = {\"Content-Type\": \"application/json\"}\n\n #print('DATA:')\n #print(data)\n\n educationalBackground = None\n counter = 0\n max_iter = 3\n while educationalBackground is None and counter < max_iter:\n counter = counter + 1\n time.sleep(0.4) # avoid hitting rate limiter\n try:\n response = requests.post(url, data, headers=headers)\n if response.status_code == 200:\n print(\"POST request successful.\")\n else:\n print(f\"POST request failed with status code: {response.status_code}\")\n content = response.text\n status_code = response.status_code\n headers = response.headers\n content_type = response.headers.get(\"content-type\")\n print(\"Response Content:\", content)\n #print(\"Status Code:\", status_code)\n #print(\"Headers:\", headers)\n #print(\"Content-Type Header:\", content_type)\n out_response = json.loads(content)['textResponse']\n educationalBackground = json.loads(out_response) #if this breaks, we didn't get a correct json format output\n except Exception as e:\n print('Error in Returned data structure, retrying query: ')\n return educationalBackground\n \n def get_workHistory_set(self,applicant_name):\n url = \"http://localhost:3001/api/workspace/\" + self.pinecone_domain_name + \"/chat\"\n data = '{\"message\": \"' + self.return_workHistory_query(applicant_name) + '\",\"mode\":\"query\"}'\n headers = {\"Content-Type\": \"application/json\"}\n\n #print('DATA:')\n #print(data)\n\n workHistory = None\n counter = 0\n max_iter = 3\n while workHistory is None and counter < max_iter:\n counter = counter + 1\n time.sleep(0.4) # avoid hitting rate limiter\n try:\n response = requests.post(url, data, headers=headers)\n if response.status_code == 200:\n print(\"POST request successful.\")\n else:\n print(f\"POST request failed with status code: {response.status_code}\")\n content = response.text\n status_code = response.status_code\n headers = response.headers\n content_type = response.headers.get(\"content-type\")\n print(\"Response Content:\", content)\n #print(\"Status Code:\", status_code)\n #print(\"Headers:\", headers)\n #print(\"Content-Type Header:\", content_type)\n out_response = json.loads(content)['textResponse']\n workHistory = json.loads(out_response) #if this breaks, we didn't get a correct json format output\n except Exception as e:\n print('Error in Returned data structure, retrying query: ')\n return workHistory\n\n\n def return_simple_query(self,query_text):\n url = \"http://localhost:3001/api/workspace/\" + self.pinecone_domain_name + \"/chat\"\n print(url)\n data = '{\"message\": \"' + query_text + '\",\"mode\":\"query\"}'\n print(data)\n headers = {\"Content-Type\": \"application/json\"}\n\n \n\n ret_json = None\n while ret_json is None:\n time.sleep(0.4) # avoid hitting rate limiter\n try:\n response = requests.post(url, data, headers=headers)\n if response.status_code == 200:\n print(\"POST request successful.\")\n else:\n print(f\"POST request failed with status code: {response.status_code}\")\n content = response.text\n status_code = response.status_code\n headers = response.headers\n content_type = response.headers.get(\"content-type\")\n print(\"Response Content:\", content)\n print(\"Status Code:\", status_code)\n print(\"Headers:\", headers)\n print(\"Content-Type Header:\", content_type)\n ret_json = content #if this breaks, we didn't get a correct json format output\n except Exception as e:\n print('Error in Returned data structure, retrying query: ')\n return ret_json\n\n\n #ret, _ = self.chatgpt_simpleresponse(prompt_text)\n #return ret\n \n # def chatgpt_simpleresponse(self, prompt_text):\n # ret_text = None\n # while ret_text is None:\n # time.sleep(0.4) # avoid hitting rate limiter\n # #remember 46,000 seconds is about 12 hours, and \n # # the \"rate limit\" stated on the OpenAI site is 200 tokens per minute\n # #https://platform.openai.com/docs/guides/rate-limits/overview\n # try:\n # completion = openai.ChatCompletion.create(\n # model=\"gpt-3.5-turbo\",\n # messages=[\n # {\"role\": \"user\", \"content\":prompt_text}\n # ]\n # )\n # ret_text = completion['choices'][0]['message']['content'].replace('\\n','')\n # except Exception as e:\n # print('Error in API Call, retrying API embedding query: ')\n # return ret_text , completion\n\n","repo_name":"kpister/prompt-linter","sub_path":"data/scraping/repos/McCloudA~latest-anything-llm/transcript_helpers.py","file_name":"transcript_helpers.py","file_ext":"py","file_size_in_byte":31110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"1814368596","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"jupyter-vscode-server\",\n version=\"0.0.10\",\n author=\"Timothy Liu\",\n author_email=\"timothyl@nvidia.com\",\n description=\"A Jupyter extension to launch VS Code\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/tlkh/jupyter-vscode-server\",\n py_modules=['nbvscode'],\n entry_points={\n 'jupyter_serverproxy_servers': [\n 'nbvscode = nbvscode:setup_nbvscode',\n ]\n },\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: POSIX :: Linux \",\n ],\n install_requires=['jupyter-server-proxy'],\n)\n","repo_name":"tlkh/nbvscode","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"50"} +{"seq_id":"6341167773","text":"import requests\nimport json\nimport urllib3\nfrom CommonServerPython import *\n\nurllib3.disable_warnings()\n\n\ndef http_request(method, url_suffix, json=None):\n \"\"\"\n Helper function to perform http request\n \"\"\"\n try:\n api_suffix = \"/api/v1\"\n base_url = demisto.params().get('base_url')\n if base_url.endswith(\"/\"): # remove slash in the end\n base_url = base_url[:-1]\n api_key = demisto.params().get('apikey')\n verify = not demisto.params().get('insecure', True)\n\n headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n 'Authorization': str(api_key)\n }\n r = requests.request(\n method,\n base_url + api_suffix + url_suffix,\n json=json,\n headers=headers,\n verify=verify\n )\n\n if r.status_code == 401:\n return_error(message='Authentication parameters are invalid, '\n 'Please check your URL address and your API token')\n\n if r.status_code not in (200, 204):\n result = r.json()\n return_error(message='Error %s occurred with command. Error is: %s' % (r.status_code, str((result)[\"statusText\"])))\n try:\n return r.json()\n except ValueError:\n return None\n except Exception as e:\n return_error(message='Error occurred on API call: %s. Error is: %s'\n % (base_url + api_suffix + url_suffix, str(e)))\n\n\ndef get_specific_device():\n \"\"\"\n Get specific device by id\n \"\"\"\n device_id = demisto.args().get('device_id')\n result = http_request('GET', \"/devices/%s\" % str(device_id))\n ec = {'DeepInstinct.Devices(val.id && val.id == obj.id)': result}\n\n return_results(CommandResults(\n readable_output=tableToMarkdown('Device', result),\n outputs=ec,\n raw_response=result\n ))\n\n\ndef get_events():\n \"\"\"\n Get events\n \"\"\"\n first_event_id = demisto.args().get('first_event_id')\n result = http_request('GET', '/events?after_event_id=' + str(first_event_id))\n events = {}\n if 'events' in result:\n events = result['events']\n ec = {'DeepInstinct.Events(val.id && val.id == obj.id)': events}\n\n return_results(CommandResults(\n readable_output=tableToMarkdown('Events', events),\n outputs=ec,\n raw_response=events\n ))\n\n\ndef get_all_groups():\n \"\"\"\n Get all groups\n\n \"\"\"\n result = http_request('GET', \"/groups\")\n ec = {'DeepInstinct.Groups(val.id && val.id == obj.id)': result}\n\n return_results(CommandResults(\n readable_output=tableToMarkdown('Groups', result),\n outputs=ec,\n raw_response=result\n ))\n\n\ndef get_all_policies():\n \"\"\"\n Get all policies\n\n \"\"\"\n result = http_request('GET', \"/policies\")\n ec = {'DeepInstinct.Policies(val.id && val.id == obj.id)': result}\n\n return_results(CommandResults(\n readable_output=tableToMarkdown('Policies', result),\n outputs=ec,\n raw_response=result\n ))\n\n\ndef add_hash_to_denylist():\n \"\"\"\n Add hash to deny-list\n \"\"\"\n policy_id = demisto.args().get('policy_id')\n file_hash = demisto.args().get('file_hash')\n comment = demisto.args().get('comment') or \"\"\n http_request('POST', '/policies/%s/deny-list/hashes/%s' % (str(policy_id), file_hash), json={\"comment\": comment})\n return_results('ok')\n\n\ndef add_hash_to_allowlist():\n \"\"\"\n Add hash to allow-list\n \"\"\"\n policy_id = demisto.args().get('policy_id')\n file_hash = demisto.args().get('file_hash')\n comment = demisto.args().get('comment') or \"\"\n http_request('POST', '/policies/%s/allow-list/hashes/%s' % (str(policy_id), file_hash), json={\"comment\": comment})\n return_results('ok')\n\n\ndef remove_hash_from_denylist():\n \"\"\"\n Remove hash from deny-list\n \"\"\"\n policy_id = demisto.args().get('policy_id')\n file_hash = demisto.args().get('file_hash')\n\n item_list = [{'item': file_hash}]\n\n http_request('DELETE', '/policies/%s/deny-list/hashes' % (str(policy_id)), json={\"items\": item_list})\n return_results('ok')\n\n\ndef remove_hash_from_allowlist():\n \"\"\"\n Remove hash from allow-list\n \"\"\"\n policy_id = demisto.args().get('policy_id')\n file_hash = demisto.args().get('file_hash')\n\n item_list = [{'item': file_hash}]\n\n http_request('DELETE', '/policies/%s/allow-list/hashes' % (str(policy_id)), json={\"items\": item_list})\n return_results('ok')\n\n\ndef add_devices_to_group():\n \"\"\"\n Add devices to specific group\n \"\"\"\n group_id = demisto.args().get('group_id')\n device_ids_input = demisto.args().get('device_ids')\n device_ids = [int(num) for num in device_ids_input.split(\",\")]\n http_request('POST', '/groups/%s/add-devices' % str(group_id), json={\"devices\": device_ids})\n return_results('ok')\n\n\ndef remove_devices_from_group():\n \"\"\"\n Remove devices from group\n \"\"\"\n group_id = demisto.args().get('group_id')\n device_ids_input = demisto.args().get('device_ids')\n device_ids = [int(num) for num in device_ids_input.split(\",\")]\n\n http_request('POST', '/groups/%s/remove-devices' % str(group_id), json={\"devices\": device_ids})\n return_results('ok')\n\n\ndef delete_files_remotely():\n \"\"\"\n Delete given file ids remotely\n \"\"\"\n event_ids_input = demisto.args().get('event_ids')\n event_ids = [int(num) for num in event_ids_input.split(\",\")]\n http_request('POST', '/devices/actions/delete-remote-files', json={\"ids\": event_ids})\n return_results('ok')\n\n\ndef terminate_remote_processes():\n \"\"\"\n Terminate remove processes by given event ids\n \"\"\"\n event_ids_input = demisto.args().get('event_ids')\n event_ids = [int(num) for num in event_ids_input.split(\",\")]\n http_request('POST', '/devices/actions/terminate-remote-process', json={\"ids\": event_ids})\n return_results('ok')\n\n\ndef close_events():\n \"\"\"\n Close events by event ids\n \"\"\"\n event_ids_input = demisto.args().get('event_ids')\n event_ids = [int(num) for num in event_ids_input.split(\",\")]\n http_request('POST', '/events/actions/close', json={\"ids\": event_ids})\n return_results('ok')\n\n\ndef fetch_incidents():\n incidents: list = []\n last_id = arg_to_number(demisto.params().get('first_fetch', 0))\n max_fetch = arg_to_number(demisto.params().get('max_fetch')) or 50\n\n last_run = demisto.getLastRun()\n if last_run and last_run.get('last_id') is not None:\n last_id = last_run.get('last_id')\n\n events = http_request('GET', '/events?after_event_id=' + str(last_id))\n while events and events['events'] and len(incidents) < max_fetch:\n for event in events['events']:\n incident = {\n 'name': \"DeepInstinct_\" + str(event['id']), # name is required field, must be set\n 'occurred': event['insertion_timestamp'],\n 'rawJSON': json.dumps(event)\n }\n incidents.append(incident)\n if len(incidents) >= max_fetch:\n demisto.setLastRun({'last_id': event['id']})\n break\n\n demisto.setLastRun({'last_id': events['last_id']})\n events = http_request('GET', '/events?after_event_id=' + str(events['last_id']))\n\n demisto.incidents(incidents)\n\n\ndef test_module():\n \"\"\"\n Test Module\n \"\"\"\n try:\n api_suffix = \"/api/v1\"\n base_url = demisto.params().get('base_url')\n if base_url.endswith(\"/\"): # remove slash in the end\n base_url = base_url[:-1]\n api_key = demisto.params().get('apikey')\n\n headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n 'Authorization': str(api_key)\n }\n request_url = f'{base_url}/{api_suffix}/groups/'\n\n r = requests.get(request_url, headers=headers)\n\n if r.status_code == 200:\n demisto.results(\"ok\")\n\n if r.status_code == 401:\n return_error(message='Unauthorized request. Please Check your API token and try again')\n except Exception:\n return_error(message='Invalid URL, please correct and try again')\n\n\ndef get_suspicious_events():\n \"\"\"\n Get suspicious events\n \"\"\"\n first_event_id = demisto.args().get('first_event_id')\n result = http_request('GET', '/suspicious-events?after_event_id=' + str(first_event_id))\n events = {}\n if 'events' in result:\n events = result['events']\n ec = {'DeepInstinct.Suspicious-Events(val.id && val.id == obj.id)': events}\n\n return_results(CommandResults(\n readable_output=tableToMarkdown('Events', events),\n outputs=ec,\n raw_response=events\n ))\n\n\ndef isolate_from_network():\n \"\"\"\n Isolate given Device Id(s) from Network\n \"\"\"\n device_ids_input = demisto.args().get('device_ids')\n device_ids = [int(num) for num in device_ids_input.split(\",\")]\n http_request('POST', '/devices/actions/isolate-from-network', json={\"ids\": device_ids})\n return_results('ok')\n\n\ndef release_from_isolation():\n \"\"\"\n Release given Device Id(s) from Isolation\n \"\"\"\n device_ids_input = demisto.args().get('device_ids')\n device_ids = [int(num) for num in device_ids_input.split(\",\")]\n http_request('POST', '/devices/actions/release-from-isolation', json={\"ids\": device_ids})\n return_results('ok')\n\n\ndef remote_file_upload():\n \"\"\"\n Request Remote File Upload by Event ID\n \"\"\"\n event_id = demisto.args().get('event_id')\n http_request('POST', '/devices/actions/request-remote-file-upload/%s' % (str(event_id)))\n return_results('ok')\n\n\ndef disable_device():\n \"\"\"\n Disable D-client at next Check-In\n \"\"\"\n device_id = demisto.args().get('device_id')\n http_request('POST', '/devices/%s/actions/disable' % (str(device_id)))\n return_results('ok')\n\n\ndef enable_device():\n \"\"\"\n Enable D-Client at next Check-In\n \"\"\"\n device_id = demisto.args().get('device_id')\n http_request('POST', '/devices/%s/actions/enable' % (str(device_id)))\n return_results('ok')\n\n\ndef remove_device():\n \"\"\"\n Uninstall D-Client on device at next Check-In\n \"\"\"\n device_id = demisto.args().get('device_id')\n http_request('POST', '/devices/%s/actions/remove' % (str(device_id)))\n return_results('ok')\n\n\ndef upload_logs():\n \"\"\"\n Upload D-Client Logs at next Check-In\n \"\"\"\n device_id = demisto.args().get('device_id')\n http_request('POST', '/devices/%s/actions/upload-logs' % (str(device_id)))\n return_results('ok')\n\n\ndef main(): # pragma: no cover\n try:\n # Commands\n command = demisto.command()\n if command == 'test-module':\n test_module()\n\n elif command == 'deepinstinctv3-get-device':\n get_specific_device()\n\n elif command == 'deepinstinctv3-get-events':\n get_events()\n\n elif command == 'deepinstinctv3-get-suspicious-events':\n get_suspicious_events()\n\n elif command == 'deepinstinctv3-get-all-groups':\n get_all_groups()\n\n elif command == 'deepinstinctv3-get-all-policies':\n get_all_policies()\n\n elif command == 'deepinstinctv3-add-hash-to-deny-list':\n add_hash_to_denylist()\n\n elif command == 'deepinstinctv3-add-hash-to-allow-list':\n add_hash_to_allowlist()\n\n elif command == 'deepinstinctv3-remove-hash-from-deny-list':\n remove_hash_from_denylist()\n\n elif command == 'deepinstinctv3-remove-hash-from-allow-list':\n remove_hash_from_allowlist()\n\n elif command == 'deepinstinctv3-add-devices-to-group':\n add_devices_to_group()\n\n elif command == 'deepinstinctv3-remove-devices-from-group':\n remove_devices_from_group()\n\n elif command == 'deepinstinctv3-delete-files-remotely':\n delete_files_remotely()\n\n elif command == 'deepinstinctv3-terminate-processes':\n terminate_remote_processes()\n\n elif command == 'deepinstinctv3-close-events':\n close_events()\n\n elif command == 'fetch-incidents':\n fetch_incidents()\n\n elif command == 'deepinstinctv3-isolate-from-network':\n isolate_from_network()\n\n elif command == 'deepinstinctv3-release-from-isolation':\n release_from_isolation()\n\n elif command == 'deepinstinctv3-remote-file-upload':\n remote_file_upload()\n\n elif command == 'deepinstinctv3-disable-device':\n disable_device()\n\n elif command == 'deepinstinctv3-enable-device':\n enable_device()\n\n elif command == 'deepinstinctv3-remove-device':\n remove_device()\n\n elif command == 'deepinstinctv3-upload-logs':\n upload_logs()\n except Exception as e:\n return_error(f'Failed to execute {command} command. Error: {e}', error=traceback.format_exc())\n\n\nif __name__ in ('__builtin__', 'builtins'):\n main()\n","repo_name":"demisto/content","sub_path":"Packs/DeepInstinct/Integrations/DeepInstinct3x/DeepInstinct3x.py","file_name":"DeepInstinct3x.py","file_ext":"py","file_size_in_byte":12955,"program_lang":"python","lang":"en","doc_type":"code","stars":1023,"dataset":"github-code","pt":"50"} +{"seq_id":"73755640156","text":"import os\nfrom dotenv import load_dotenv\nimport string\n\nclass CustomEncryptionAlgorithm:\n\n def __init__(self):\n load_dotenv()\n self.key = int(os.getenv(\"KEY\", default=5))\n self.chars = string.digits + string.ascii_letters + \" \" + \"áéíóú\"\n\n def encode_msg(self, message):\n encoded_txt = \"\"\n for letter in message:\n try:\n if letter in self.chars:\n value = self.chars.index(letter) + 1\n encrypted_value = (value * self.key) % len(self.chars)\n encoded_txt += self.chars[encrypted_value - 1]\n else:\n encoded_txt += letter\n except ValueError:\n encoded_txt += letter\n return encoded_txt\n\n def decode_msg(self, encoded_txt):\n decoded_txt = \"\"\n for letter in encoded_txt:\n try:\n if letter in self.chars:\n value = self.chars.index(letter) + 1\n decrypted_value = (value * pow(self.key, -1, len(self.chars))) % len(self.chars)\n decoded_txt += self.chars[decrypted_value - 1]\n else:\n decoded_txt += letter\n except:\n decoded_txt += letter\n return decoded_txt\n\n# Uso del Custom Encryption Algorithm\nencryption_algo = CustomEncryptionAlgorithm()\n\nmsg_txt = \"Bajo el manto estrellado, el río serpentea entre colinas silenciosas, reflejando el resplandor lunar. En el bosque, los árboles susurran historias antiguas mientras las hojas crujen bajo patas furtivas. Un faro distante proyecta destellos intermitentes sobre las aguas, guiando a los navegantes en la oscuridad. En la ciudad, las luces parpadean como luciérnagas urbanas, revelando callejones donde secretos se tejen entre sombras. En un rincón del parque, bancos desgastados cuentan las confidencias de enamorados y susurros de amigos. En esta sinfonía nocturna, el mundo se ralentiza, invitando a explorar el misterio que solo la noche revela.\"\n\ncipher_txt = encryption_algo.encode_msg(msg_txt)\nprint(f\"Original message : {msg_txt}\")\nprint(f\"Encrypted message: {cipher_txt}\")\n\ndecrypted_txt = encryption_algo.decode_msg(cipher_txt)\nprint(f\"Decrypted message: {decrypted_txt}\")\n\nprint(f\"{msg_txt == decrypted_txt}\") # True / False\n","repo_name":"FerPicado/Custom-Encryption-Algorithm","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"5441257863","text":"# coding=utf-8\nimport glob\nimport os\nimport copy\nimport re\nimport hashlib\nfrom django.utils.crypto import get_random_string\nfrom django.utils.translation import ugettext_lazy\nfrom django.conf import settings\nfrom dju_common.file import make_dirs_for_file_path\nfrom dju_common.tools import datetime_to_dtstr\nfrom dju_image.image import is_image, adjust_image, image_get_format\nfrom . import settings as dju_settings\n\n\nERROR_MESSAGES = {\n 'unknown_profile': ugettext_lazy('Unknown profile \"%(profile)s\".'),\n 'filename_hasnt_tmp_prefix': ugettext_lazy('Filename \"%(filename)s\" has not temporary prefix.'),\n}\n\nHASH_SIZE = 10\n\n\n_profile_configs_cache = {}\n\n\ndef clear_profile_configs_cache():\n _profile_configs_cache.clear()\n\n\ndef media_path(path):\n return os.path.join(settings.MEDIA_ROOT, path).replace('\\\\', '/')\n\n\ndef save_file(f, full_path):\n \"\"\"\n Saves file f to full_path and set rules.\n \"\"\"\n make_dirs_for_file_path(full_path, mode=dju_settings.DJU_IMG_CHMOD_DIR)\n with open(full_path, 'wb') as t:\n f.seek(0)\n while True:\n buf = f.read(dju_settings.DJU_IMG_RW_FILE_BUFFER_SIZE)\n if not buf:\n break\n t.write(buf)\n os.chmod(full_path, dju_settings.DJU_IMG_CHMOD_FILE)\n\n\ndef get_profile_configs(profile=None, use_cache=True):\n \"\"\"\n Returns upload configs for profile.\n \"\"\"\n if use_cache and profile in _profile_configs_cache:\n return _profile_configs_cache[profile]\n profile_conf = None\n if profile is not None:\n try:\n profile_conf = dju_settings.DJU_IMG_UPLOAD_PROFILES[profile]\n except KeyError:\n if profile != 'default':\n raise ValueError(unicode(ERROR_MESSAGES['unknown_profile']) % {'profile': profile})\n conf = copy.deepcopy(dju_settings.DJU_IMG_UPLOAD_PROFILE_DEFAULT)\n if profile_conf:\n conf.update(copy.deepcopy(profile_conf))\n for v_i in xrange(len(conf['VARIANTS'])):\n v = conf['VARIANTS'][v_i]\n conf['VARIANTS'][v_i] = copy.deepcopy(dju_settings.DJU_IMG_UPLOAD_PROFILE_VARIANT_DEFAULT)\n conf['VARIANTS'][v_i].update(v)\n if use_cache:\n _profile_configs_cache[profile] = conf\n return conf\n\n\ndef generate_img_id(profile, ext=None, label=None, tmp=False):\n \"\"\"\n Generates img_id.\n \"\"\"\n if ext and not ext.startswith('.'):\n ext = '.' + ext\n if label:\n label = re.sub(r'[^a-z0-9_\\-]', '', label, flags=re.I)\n label = re.sub(r'_+', '_', label)\n label = label[:60]\n return '{profile}:{tmp}{dtstr}_{rand}{label}{ext}'.format(\n profile=profile,\n tmp=(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX if tmp else ''),\n dtstr=datetime_to_dtstr(),\n rand=get_random_string(4, 'abcdefghijklmnopqrstuvwxyz0123456789'),\n label=(('_' + label) if label else ''),\n ext=(ext or ''),\n )\n\n\ndef get_hash(name, variant_label=None):\n # name must be without label, for example 'uniqname_rand'\n h = hashlib.sha1(name + (variant_label or '') + dju_settings.DJU_IMG_UPLOAD_KEY).hexdigest()\n return h[:HASH_SIZE]\n\n\ndef get_relative_path_from_img_id(img_id, variant_label=None, ext=None, create_dirs=False):\n \"\"\"\n Returns path to file relative MEDIA_URL.\n \"\"\"\n profile, base_name = img_id.split(':', 1)\n conf = get_profile_configs(profile)\n if not variant_label:\n status_suffix = dju_settings.DJU_IMG_UPLOAD_MAIN_SUFFIX\n else:\n status_suffix = dju_settings.DJU_IMG_UPLOAD_VARIANT_SUFFIX\n name, file_ext = os.path.splitext(base_name)\n prefix = ''\n if name.startswith(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):\n name = name[len(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):]\n prefix = dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX\n name_parts = name.split('_', 2)\n name = '{name}{status_suffix}{hash}'.format(\n name=name,\n status_suffix=status_suffix,\n hash=get_hash('_'.join(name_parts[:2]), variant_label=variant_label)\n )\n if variant_label:\n name += '_' + variant_label\n if ext:\n file_ext = ext\n elif variant_label:\n for var_conf in conf['VARIANTS']:\n var_conf_label = var_conf['LABEL'] or get_variant_label(var_conf)\n if var_conf_label == variant_label:\n if var_conf['FORMAT']:\n file_ext = var_conf['FORMAT'].lower()\n break\n if file_ext and not file_ext.startswith('.'):\n file_ext = '.' + file_ext\n relative_path = os.path.join(\n dju_settings.DJU_IMG_UPLOAD_SUBDIR,\n conf['PATH'],\n name_parts[0][-2:],\n (prefix + name + file_ext)\n ).replace('\\\\', '/')\n if create_dirs:\n path = media_path(relative_path)\n make_dirs_for_file_path(path, mode=dju_settings.DJU_IMG_CHMOD_DIR)\n return relative_path\n\n\ndef is_img_id_exists(img_id):\n \"\"\"\n Checks if img_id has real file on filesystem.\n \"\"\"\n main_rel_path = get_relative_path_from_img_id(img_id)\n main_path = media_path(main_rel_path)\n return os.path.isfile(main_path)\n\n\ndef is_img_id_valid(img_id):\n \"\"\"\n Checks if img_id is valid.\n \"\"\"\n t = re.sub(r'[^a-z0-9_:\\-\\.]', '', img_id, re.IGNORECASE)\n t = re.sub(r'\\.+', '.', t)\n if img_id != t or img_id.count(':') != 1:\n return False\n profile, base_name = img_id.split(':', 1)\n if not profile or not base_name:\n return False\n try:\n get_profile_configs(profile)\n except ValueError:\n return False\n return True\n\n\ndef get_variant_label(v_conf):\n \"\"\"\n Generates name for variant images based settings (by variants sizes).\n \"\"\"\n if v_conf['MAX_SIZE'][0] is None:\n return 'h{}'.format(v_conf['MAX_SIZE'][1])\n if v_conf['MAX_SIZE'][1] is None:\n return 'w{}'.format(v_conf['MAX_SIZE'][0])\n return '{}x{}'.format(*v_conf['MAX_SIZE'])\n\n\nvariant_hash_label_re = re.compile(r'^.+?{suf}([a-z0-9]{{hs}})_(.+?)(?:|\\.[A-Za-z]{3,4})$'.replace(\n '{suf}', dju_settings.DJU_IMG_UPLOAD_VARIANT_SUFFIX\n).replace(\n '{hs}', str(HASH_SIZE)\n))\n\n\ndef get_files_by_img_id(img_id, check_hash=True):\n \"\"\"\n Шукає файли для img_id.\n Повертає:\n {\n 'main': 'relative path to main image',\n 'variants': {\n 'label': 'relative path to variant image by label',\n ...\n }\n }\n Якщо check_hash=True, тоді файли з невірним хешем будуть ігноруватись.\n Якщо файл не існує, тоді поверає None.\n Пошук варіантів відбуваться в файловій системі не залежно від налаштувань.\n \"\"\"\n main_rel_path = get_relative_path_from_img_id(img_id)\n main_path = media_path(main_rel_path)\n if not os.path.isfile(main_path):\n return None\n filename = os.path.basename(main_rel_path)\n name_left_part = filename.split(dju_settings.DJU_IMG_UPLOAD_MAIN_SUFFIX, 1)[0]\n img_name = name_left_part\n if img_name.startswith(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):\n img_name = img_name[len(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):]\n img_name_parts = img_name.split('_', 2)\n img_name = '_'.join(img_name_parts[:2])\n search_pattern = name_left_part + dju_settings.DJU_IMG_UPLOAD_VARIANT_SUFFIX + '*'\n search_dir = os.path.dirname(main_path)\n variants = {}\n for var_path in glob.iglob(os.path.join(search_dir, search_pattern.replace('\\\\', '/'))):\n var_filename = os.path.basename(var_path)\n m = variant_hash_label_re.match(var_filename)\n if not m:\n continue\n var_hash, var_label = m.groups()\n if check_hash and var_hash != get_hash(img_name, var_label):\n continue\n variants[var_label] = os.path.relpath(var_path, settings.MEDIA_ROOT)\n return {\n 'main': main_rel_path,\n 'variants': variants,\n }\n\n\ndef remove_all_files_of_img_id(img_id):\n \"\"\"\n Removes all img_id's files.\n \"\"\"\n files = get_files_by_img_id(img_id, check_hash=False)\n if files:\n os.remove(media_path(files['main']))\n for fn in files['variants'].values():\n os.remove(media_path(fn))\n\n\ndef img_id_has_tmp_prefix(img_id):\n return (':' + dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX) in img_id\n\n\ndef remove_tmp_prefix_from_filename(filename):\n \"\"\"\n Remove tmp prefix from filename.\n \"\"\"\n if not filename.startswith(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):\n raise RuntimeError(ERROR_MESSAGES['filename_hasnt_tmp_prefix'] % {'filename': filename})\n return filename[len(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):]\n\n\ndef remove_tmp_prefix_from_file_path(file_path):\n \"\"\"\n Remove tmp prefix from file path or url.\n \"\"\"\n path, filename = os.path.split(file_path)\n return os.path.join(path, remove_tmp_prefix_from_filename(filename)).replace('\\\\', '/')\n\n\ndef make_permalink(img_id):\n \"\"\"\n Removes tmp prefix from filename and rename main and variant files.\n Returns img_id without tmp prefix.\n \"\"\"\n profile, filename = img_id.split(':', 1)\n new_img_id = profile + ':' + remove_tmp_prefix_from_filename(filename)\n urls = get_files_by_img_id(img_id)\n if urls is None:\n return urls\n move_list = {(urls['main'], remove_tmp_prefix_from_file_path(urls['main']))}\n for var_label, var_file_path in urls['variants'].iteritems():\n move_list.add((var_file_path, remove_tmp_prefix_from_file_path(var_file_path)))\n for file_path_from, file_path_to in move_list:\n os.rename(media_path(file_path_from), media_path(file_path_to))\n return new_img_id\n\n\ndef _custom_upload(f, profile, label, conf):\n t = adjust_image(f, max_size=conf['MAX_SIZE'], new_format=conf['FORMAT'],\n jpeg_quality=conf['JPEG_QUALITY'], fill=conf['FILL'],\n stretch=conf['STRETCH'], return_new_image=True)\n img_id = generate_img_id(profile, ext=image_get_format(f), label=label, tmp=True)\n relative_path = get_relative_path_from_img_id(img_id)\n full_path = media_path(relative_path)\n save_file(t, full_path)\n for v_conf in conf['VARIANTS']:\n v_label = v_conf['LABEL']\n if not v_label:\n v_label = get_variant_label(v_conf)\n v_t = adjust_image(t, max_size=v_conf['MAX_SIZE'], new_format=v_conf['FORMAT'],\n jpeg_quality=v_conf['JPEG_QUALITY'], fill=v_conf['FILL'],\n stretch=v_conf['STRETCH'], return_new_image=True)\n v_relative_path = get_relative_path_from_img_id(img_id, variant_label=v_label,\n ext=image_get_format(v_t))\n v_full_path = media_path(v_relative_path)\n save_file(v_t, v_full_path)\n return img_id\n\n\ndef upload_from_fs(fn, profile=None, label=None):\n \"\"\"\n Saves image from fn with TMP prefix and returns img_id.\n \"\"\"\n if not os.path.isfile(fn):\n raise ValueError('File is not exists: {}'.format(fn))\n if profile is None:\n profile = 'default'\n conf = get_profile_configs(profile)\n with open(fn, 'rb') as f:\n if not is_image(f, types=conf['TYPES']):\n msg = (('Format of uploaded file \"%(name)s\" is not allowed. '\n 'Allowed formats is: %(formats)s.') %\n {'name': fn, 'formats': ', '.join(map(lambda t: t.upper(), conf['TYPES']))})\n raise RuntimeError(msg)\n return _custom_upload(f, profile, label, conf)\n\n\ndef upload_from_fileobject(f, profile=None, label=None):\n \"\"\"\n Saves image from f with TMP prefix and returns img_id.\n \"\"\"\n if profile is None:\n profile = 'default'\n conf = get_profile_configs(profile)\n f.seek(0)\n if not is_image(f, types=conf['TYPES']):\n msg = (('Format of uploaded file is not allowed. '\n 'Allowed formats is: %(formats)s.') %\n {'formats': ', '.join(map(lambda t: t.upper(), conf['TYPES']))})\n raise RuntimeError(msg)\n return _custom_upload(f, profile, label, conf)\n","repo_name":"liminspace/dju-image","sub_path":"dju_image/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":12064,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"50"} +{"seq_id":"16779029623","text":"import scipy as sp\nimport time\nfrom Atomic_Simulation_Classes import *\n\nclass inhomogeneous_broadening:\n\n def __init__(self,\n sing_sim,\n linewidth,\n n_atoms):\n \"\"\"\n\n :param sing_sim\n :param linewidth:\n :param n_atoms:\n \"\"\"\n self.sing_sim = sing_sim\n self.linewidth = linewidth\n self.n_atoms = n_atoms\n\n self.detunings = sp.linspace(-linewidth / 2, linewidth / 2, n_atoms)\n\n def broadened_time_evolution(self):\n \"\"\"\n Calculate the state of all the atoms in the inhomogeneous line at each of nt time steps of size dt.\n :return: The state of the system at each timestep averaged over the inhomogeneous line.\n \"\"\"\n dim1, dim2 = sp.shape(self.sing_sim.system.initial_state)\n times = sp.linspace(0, self.sing_sim.duration, self.sing_sim.nt, endpoint=False)\n time_dep_state = sp.zeros((self.sing_sim.nt, dim1, dim2), dtype=complex)\n for index_i, i in enumerate(self.detunings):\n self.sing_sim.reset_state()\n self.detune(i)\n t1 = time.time()\n time_dep_state = time_dep_state + self.sing_sim.time_evolution()\n print(\"Atom number =\",index_i, \"Detuning =\", round(i / 1e6, 4), \"MHz\",\n \"Time elapsed =\", str(round(time.time() - t1, 4)), \"seconds\")\n\n return time_dep_state / self.n_atoms\n\n def detune(self,\n detuning):\n \"\"\"\n Detune the original Hamiltonian by detuning.\n :param detuning: The detuning in Hz.\n :return: None.\n \"\"\"\n self.sing_sim.ham_obj[0].freq = self.sing_sim.freq_default + self.sing_sim.mask * detuning\n\n return None\n","repo_name":"zb5003/Density-Matrix-Simulations","sub_path":"working_serial/Inhomogeneous_Broadening_Classes.py","file_name":"Inhomogeneous_Broadening_Classes.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"8851916933","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ### (Machine Learning – Python): Predicting Loan Approval Using KNN and Decision Trees\n\n# ### Understanding the Problem Statement\n\n# In this project, we will develop a model to predict who is\n# eligible for a loan in order to reduce the risk associated with the decision process and to modify the\n# typical loan approval process into a much easier one. Moreover, we will make use of previous data of\n# loan decisions made by the company and with the help of various data mining techniques, we will\n# develop a loan approval decision predicting model which can draw decisions for each individual based\n# on the information provided by them\n\n# The steps involved in solving this problem include:\n# \n# - Data exploration and preprocessing: Analyze the dataset, handle missing values, convert categorical variables into numeric form, and normalize or standardize numerical variables if required.\n# - Splitting the data: Divide the dataset into features (X) and the target variable (y), and further split it into training and test sets to assess model performance.\n# - Model training: Build and train a KNN classifier and a Decision Tree classifier on the training data.\n# - Model evaluation: Make predictions using the trained models on the test set and evaluate their performance using metrics such as accuracy, precision, recall, and F1-score.\n# - Model comparison: Compare the performance of the KNN and Decision Tree models to determine which one performs better for loan approval prediction\n\n# ### K-Nearest Neighbor\n\n# K-Nearest Neighbors is a supervised machine learning algorithm used for both classification and regression tasks. It operates based on the principle that similar instances tend to have similar labels. KNN is a non-parametric algorithm, meaning it does not make assumptions about the underlying data distribution.\n# In KNN, the \"K\" refers to the number of nearest neighbors used to make predictions. Given a new instance, KNN finds the K closest instances in the training data based on a distance metric (e.g., Euclidean distance) and assigns the class label (for classification) or calculates the average value (for regression) based on the labels of the K neighbors.\n# KNN's simplicity and flexibility make it easy to understand and implement. It can handle both numerical and categorical features, and it can adapt to complex decision boundaries. However, KNN can be sensitive to the choice of distance metric, may require careful preprocessing of the data, and can be computationally expensive for large datasets.\n\n# ### Decision Trees\n\n# A Decision Tree is a supervised machine learning algorithm used for both classification and regression tasks. It builds a model in the form of a tree structure by partitioning the feature space based on feature values. The tree is constructed by recursively splitting the data based on the features to create branches that represent different decision paths.\n# Decision Trees make decisions by evaluating feature values at each internal node and following the corresponding branch until reaching a leaf node, which represents the predicted class or value. The splits in the tree are determined based on criteria that maximize the homogeneity or purity of the resulting subsets. Common criteria include Gini impurity and entropy.\n# Decision Trees have several advantages, such as being interpretable, handling both numerical and categorical features, and capturing non-linear relationships. However, they can be prone to overfitting and may not generalize well to unseen data if the tree is too complex.\n\n# ### The data\n# Data selection is very important for every machine learning problem. \n# The dataset chosen for this project is located here here\n# \n# \n\n# The dataset is in CSV format and contains the following columns:\n# \n# - Loan_ID: Identifier for the loan.\n# - Gender: Gender of the applicant. (Male/Female)\n# - Married: Marital status of the applicant. (Yes/No)\n# - Dependents: Number of people dependent on the applicant. (0/1/2/3+)\n# - Education: Educational qualification of the applicant. (Graduate/Not Graduate)\n# - Self_Employed: Whether the applicant is self-employed or not. (Yes/No)\n# - ApplicantIncome: Income of the applicant.\n# - CoapplicantIncome: Income of the co-applicant.\n# - LoanAmount: The amount of loan requested.\n# - Loan_Amount_Term: The term period of the loan in months.\n# - Credit_History: Whether the applicant has a credit history or not. (1/0)\n# - Property_Area: Area category of the property. (Urban/Rural/Semiurban)\n# - Loan_Status: Whether the loan was approved or not. (Y/N)\n# \n# Before building our machine learning models, we need to analyze this data.\n# We need to do some basic data exploratory analysis like checking the data types, looking for missing values, visualizing the distribution of the data, etc. \n\n# ### Step 1: Data exploration, cleaning and Preprocessing\n\n# In[14]:\n\n\n#import the libraries that we will need\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix\nimport urllib.request\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler, LabelEncoder\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.metrics import confusion_matrix, classification_report\n\n\n# ### Getting the data\n\n# In[2]:\n\n\n# URL of the dataset\nurl = \"https://raw.githubusercontent.com/prasertcbs/basic-dataset/master/Loan-Approval-Prediction.csv\"\n\n# Location where you want to store the dataset\noutput_file = \"Loan-Approval-Prediction.csv\" \n\n# Download the file from `url` and save it locally under `output_file`:\nurllib.request.urlretrieve(url, output_file)\n\n# Read the CSV file into a pandas DataFrame\ndf = pd.read_csv(output_file)\n\n# Display the DataFrame\ndf.head()\n\n\n# In[3]:\n\n\n# Select only the categorical columns\ncategorical_columns = df.select_dtypes(include=['object']).columns\n\n# Remove 'Loan_ID' from the list\ncategorical_columns = categorical_columns.drop('Loan_ID')\n\n# Loop over each categorical column\nfor col in categorical_columns:\n # Print the column name\n print(col)\n \n # Print the unique values in this column\n print(df[col].unique())\n print(\"\\n\") # Add a newline for readability\n\n\n# It is important to see the unique values from our categorical data. \n# \n# - Gender: This column has two unique values ('Male', 'Female') and missing values (represented by nan in Python).\n# - Married: This column has two unique values ('No', 'Yes') and missing values.\n# - Dependents: This column seems to represent the number of dependents for each individual. It has four unique values ('0', '1', '2', '3+') and missing values. The '3+' value may represent three or more dependents.\n# - Education: This column has two unique values ('Graduate', 'Not Graduate'), which presumably represent whether the individual has graduated from an educational institution.\n# - Self_Employed: This column has two unique values ('No', 'Yes') and missing values, indicating whether the individual is self-employed.\n# - Property_Area: This column has three unique values ('Urban', 'Rural', 'Semiurban'), representing the type of area where the property is located.\n# - Loan_Status: This is presumably the target variable for our machine learning model. It has two unique values ('Y', 'N'), representing whether a loan was approved ('Y') or not ('N').\n\n# ### Data exploration\n# \n# We will do some exploration to find patterns in our data\n\n# In[4]:\n\n\n# Filter the dataset for rejected loan applications\nrejected_loans = df[df['Loan_Status'] == 'N']\n\n# Calculate the total number of rejected loans\ntotal_rejections = len(rejected_loans)\n\n# Group the rejected loans by education and count the number of rejections for each category\nrejections_by_education = rejected_loans.groupby('Education').size()\n\n# Calculate the percentage of rejections for each education category\npercentage_rejections_by_education = (rejections_by_education / total_rejections) * 100\n\n# Print the results\nfor education, percentage in percentage_rejections_by_education.items():\n print(\"Percentage of rejections for {}: {:.2f}%\".format(education, percentage))\n\n\n# In[5]:\n\n\n# Filter the dataset for approved and rejected loan applications\napproved_applications = df[df['Loan_Status'] == 'Y']\nrejected_applications = df[df['Loan_Status'] == 'N']\n\n# Calculate the percentage of rejected applications among the employed\nemployed_rejected = rejected_applications[rejected_applications['Self_Employed'] == 'No']\npercentage_employed_rejected = (len(employed_rejected) / len(rejected_applications)) * 100\n\n# Calculate the percentage of rejected applications among the unemployed\nunemployed_rejected = rejected_applications[rejected_applications['Self_Employed'] == 'Yes']\npercentage_unemployed_rejected = (len(unemployed_rejected) / len(rejected_applications)) * 100\n\n# Plot the results\nlabels = ['Employed', 'Unemployed']\npercentages = [percentage_employed_rejected, percentage_unemployed_rejected]\n\nplt.bar(labels, percentages)\nplt.ylabel('Percentage')\nplt.title('Percentage of Rejected Loans among Employed and Unemployed Applicants')\n\n# Add the percentages as text labels on the bars\nfor i, perc in enumerate(percentages):\n plt.text(i, perc, f'{perc:.2f}%', ha='center', va='bottom')\n\nplt.show()\n\n\n# #### Loan applications per gender\n\n# In[6]:\n\n\n# Calculate the total number of loans\ntotal_loans = len(df)\n\n# Filter the loans made by women\nloans_women = df[df['Gender'] == 'Female']\nnum_loans_women = len(loans_women)\npercentage_loans_women = (num_loans_women / total_loans) * 100\n\n# Filter the loans made by men\nloans_men = df[df['Gender'] == 'Male']\nnum_loans_men = len(loans_men)\npercentage_loans_men = (num_loans_men / total_loans) * 100\n\n# Print the results\nprint(\"Percentage of loans made by women: {:.2f}%\".format(percentage_loans_women))\nprint(\"Percentage of loans made by men: {:.2f}%\".format(percentage_loans_men))\n\n\n# #### Rejected loans per gender\n\n# In[7]:\n\n\n# Filter the dataset for rejected loan applications\nrejected_loans = df[df['Loan_Status'] == 'N']\n\n# Calculate the total number of rejected loans\ntotal_rejected = len(rejected_loans)\n\n# Filter the rejected loans for women\nrejected_women = rejected_loans[rejected_loans['Gender'] == 'Female']\nnum_rejected_women = len(rejected_women)\npercentage_rejected_women = (num_rejected_women / total_rejected) * 100\n\n# Filter the rejected loans for men\nrejected_men = rejected_loans[rejected_loans['Gender'] == 'Male']\nnum_rejected_men = len(rejected_men)\npercentage_rejected_men = (num_rejected_men / total_rejected) * 100\n\n# Print the results\nprint(\"Percentage of rejected loans for women: {:.2f}%\".format(percentage_rejected_women))\nprint(\"Percentage of rejected loans for men: {:.2f}%\".format(percentage_rejected_men))\n\n\n# In[8]:\n\n\n# Create a copy of the dataset and drop missing values\ndf_cleaned = df.dropna().copy()\n\n# Drop the 'Loan_ID' column\ndf_cleaned = df_cleaned.drop('Loan_ID', axis=1)\n\n# Convert categorical variables to numeric variables using one-hot encoding\ncategorical_columns = ['Gender', 'Married', 'Dependents', 'Education', 'Self_Employed', 'Property_Area']\ndf_cleaned_encoded = pd.get_dummies(df_cleaned, columns=categorical_columns, drop_first=True)\n\n# Normalize/Standardize numerical variables\nscaler = StandardScaler()\nnumerical_columns = ['ApplicantIncome', 'CoapplicantIncome', 'LoanAmount', 'Loan_Amount_Term']\ndf_cleaned_encoded[numerical_columns] = scaler.fit_transform(df_cleaned_encoded[numerical_columns])\n\n# Split the data into features and target variable\nX = df_cleaned_encoded.drop('Loan_Status', axis=1) # Exclude the target variable 'Loan_Status'\ny = df_cleaned_encoded['Loan_Status'] # Use the target variable 'Loan_Status'\n\n# Split the data into a training set and a test set\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\n\n# In[9]:\n\n\ndf_cleaned_encoded\n\n\n# ### Step 2: Training and Evaluating the Models\n# Next, we'll train two models, one using the K-Nearest Neighbors (KNN) algorithm and the other using a decision tree.\n\n# In[10]:\n\n\n# Create a KNN classifier and train it\nknn = KNeighborsClassifier()\nknn.fit(X_train, y_train)\n\n# Make predictions with the KNN classifier\nknn_predictions = knn.predict(X_test)\n\n\n# In[11]:\n\n\n# Train a KNN model\nknn_model = KNeighborsClassifier(n_neighbors=5)\nknn_model = knn_model.fit(X_train, y_train)\nknn_model\n\n\n# In[12]:\n\n\n# Train a Decision Tree model\ndt_model = DecisionTreeClassifier()\ndt_model = dt_model.fit(X_train, y_train)\ndt_model\n\n\n# In[15]:\n\n\n# Predict using the KNN model\nknn_predictions = knn_model.predict(X_test)\n\n# Print the confusion matrix for the KNN model\nknn_cm = confusion_matrix(y_test, knn_predictions)\nprint(\"Confusion Matrix - KNN Model:\")\nprint(knn_cm)\n\n# Print the classification report for the KNN model\nknn_report = classification_report(y_test, knn_predictions)\nprint(\"Classification Report - KNN Model:\")\nprint(knn_report)\n\n# Predict using the Decision Tree model\ndt_predictions = dt_model.predict(X_test)\n\n# Print the confusion matrix for the Decision Tree model\ndt_cm = confusion_matrix(y_test, dt_predictions)\nprint(\"Confusion Matrix - Decision Tree Model:\")\nprint(dt_cm)\n\n# Print the classification report for the Decision Tree model\ndt_report = classification_report(y_test, dt_predictions)\nprint(\"Classification Report - Decision Tree Model:\")\nprint(dt_report)\n\n\n# In[16]:\n\n\ndt_predictions\n\n\n# After preprocessing the data and training the models, both KNN and Decision Tree were evaluated using accuracy, precision, recall, and F1 score. The performance of the models can be compared based on these metrics to choose the best one for deployment. In general, the choice between the two models would depend on their performance on the metrics that are most important for the bank. For example, if the bank wants to minimize false negatives (i.e., incorrectly rejecting loans that should have been approved), they should choose the model with the highest recall. If they want to minimize false positives (i.e., incorrectly approving loans that should have been rejected), they should choose the model with the highest precision. The F1 score provides a balance between precision and recall, while the accuracy gives an overall measure of the model's performance.\n# \n# \n\n# In[17]:\n\n\n# Plot the confusion matrix for the KNN model\nplt.figure(figsize=(8, 6))\nsns.heatmap(knn_cm, annot=True, cmap=\"Blues\", fmt=\"d\")\nplt.title(\"Confusion Matrix - KNN Model\")\nplt.xlabel(\"Predicted\")\nplt.ylabel(\"Actual\")\nplt.show()\n\n# Plot the confusion matrix for the Decision Tree model\nplt.figure(figsize=(8, 6))\nsns.heatmap(dt_cm, annot=True, cmap=\"Blues\", fmt=\"d\")\nplt.title(\"Confusion Matrix - Decision Tree Model\")\nplt.xlabel(\"Predicted\")\nplt.ylabel(\"Actual\")\nplt.show()\n\n\n# In[18]:\n\n\nimport os\n\n# Create the \"models\" directory if it doesn't exist\nif not os.path.exists(\"models\"):\n os.makedirs(\"models\")\n\n\n# In[19]:\n\n\nimport pickle\n\n# Save the trained models using pickle\nwith open('models/knn_model.pkl', 'wb') as f:\n pickle.dump(knn_model, f)\n\nwith open('models/decision_tree_model.pkl', 'wb') as f:\n pickle.dump(dt_model, f)\n\n\n# In[20]:\n\n\n# Load the saved models using pickle\nwith open('models/knn_model.pkl', 'rb') as f:\n loaded_knn_model = pickle.load(f)\n\nwith open('models/decision_tree_model.pkl', 'rb') as f:\n loaded_dt_model = pickle.load(f)\n\n\n# ### Make predictions\n# With our loaded models we can perform predictions in new data. It would be interesting if we test our model in new dataframes or deploy it with a framework like Flask.\n\n# In[21]:\n\n\n# Make predictions using the loaded models\nknn_predictions = loaded_knn_model.predict(X_test)\ndt_predictions = loaded_dt_model.predict(X_test)\n\n# Print the predictions\nprint(\"KNN Predictions:\", knn_predictions)\nprint(\"Decision Tree Predictions:\", dt_predictions)\n\n\n# ### Important Features for Loan Approval Prediction\n\n# #### Decision Trees\n# To analyze the important features for loan approval prediction, we can examine the feature importances provided by the models. Decision Trees can provide insights into feature importance based on how often they are used for splitting the data.\n\n# In[22]:\n\n\n# Get feature importances from the loaded Decision Tree model\nfeature_importances = loaded_dt_model.feature_importances_\n\n# Create a DataFrame to display feature importances\nfeature_importance_df = pd.DataFrame({'Feature': X.columns, 'Importance': feature_importances})\nfeature_importance_df = feature_importance_df.sort_values('Importance', ascending=False)\n\n# Print the feature importances\nprint(\"Feature Importance:\")\nprint(feature_importance_df)\n\n\n# In[23]:\n\n\n# Plot the feature importances for the Decision Tree model\nplt.figure(figsize=(10, 6))\nsns.barplot(x='Importance', y='Feature', data=feature_importance_df)\nplt.title(\"Feature Importances - Decision Tree Model\")\nplt.xlabel(\"Importance\")\nplt.ylabel(\"Feature\")\nplt.show()\n\n\n# #### KNN\n# \n# In the case of KNN, we can use a different approach to understand the importance of features. We can calculate the mean value of each feature for the approved and rejected loan samples. The difference in mean values can indicate the relative importance of a feature in loan approval prediction. Here's the modified code snippet to calculate feature importance for the KNN model:\n\n# In[24]:\n\n\n# Convert the target variable to numeric format\nlabel_encoder = LabelEncoder()\ny_numeric = label_encoder.fit_transform(y)\n\n# Create a DataFrame combining the features and numeric target variable\ndf_combined = pd.concat([X, pd.Series(y_numeric, name='Loan_Status')], axis=1)\n\n# Calculate the mean values for each feature based on loan status\nmean_approved = df_combined[df_combined['Loan_Status'] == 1].mean()\nmean_rejected = df_combined[df_combined['Loan_Status'] == 0].mean()\n\n# Calculate the difference in mean values between approved and rejected loans\nfeature_importances = mean_approved - mean_rejected\n\n# Sort the feature importances in descending order\nfeature_importance_df = pd.DataFrame({'Feature': feature_importances.index, 'Importance': feature_importances.values})\nfeature_importance_df = feature_importance_df.sort_values('Importance', ascending=False)\n\n# Remove the target variable from feature importances\nfeature_importance_df = feature_importance_df[feature_importance_df['Feature'] != 'Loan_Status']\n\n# Print the feature importances\nprint(\"Feature Importance (KNN):\")\nprint(feature_importance_df)\n\n\n# In[25]:\n\n\n# Plot the feature importances for the KNN model\nplt.figure(figsize=(10, 6))\nsns.barplot(x='Importance', y='Feature', data=feature_importance_df)\nplt.title(\"Feature Importance - KNN Model\")\nplt.xlabel(\"Importance\")\nplt.ylabel(\"Feature\")\nplt.show()\n\n\n# ### Conclusion\n# \n# By considering the model performance and feature importances, the bank can gain insights into the predictive power of different models and identify areas of improvement in their loan approval process. They can focus on the most important features and refine their decision-making criteria to enhance accuracy and efficiency.\n# \n# Additionally, it's important to note that feature importance analysis should be interpreted with caution, as it reflects the specific model used and may not generalize across different models or datasets. It is recommended to perform further analysis and domain expertise to validate the findings and make informed decisions.\n# \n# Based on the insights from the feature importances, the bank can refine their loan approval process, give more weightage to important features, and potentially adjust their decision criteria to improve accuracy and efficiency in predicting loan approval outcomes.\n# \n# \n# \n# \n# \n\n# To determine which model performed better for loan prediction, we need to compare the performance metrics of the KNN and Decision Tree models. Typically, the choice of the \"better\" model depends on the specific requirements and priorities of the bank. \n# \n# - Accuracy: Accuracy measures the overall correctness of the model's predictions. It is the ratio of correctly predicted loan approvals to the total number of predictions. A higher accuracy indicates a better-performing model. You can compare the accuracy scores of both models and choose the one with a higher accuracy.\n# \n# - Precision: Precision measures the proportion of correctly predicted loan approvals out of the total predictions of loan approvals. It focuses on minimizing false positives, i.e., cases where the model incorrectly predicts loan approval. Higher precision indicates a lower rate of false positives.\n# \n# - Recall: Recall (also known as sensitivity or true positive rate) measures the proportion of actual loan approvals that are correctly predicted by the model. It focuses on minimizing false negatives, i.e., cases where the model incorrectly predicts loan rejection for applicants who should have been approved. Higher recall indicates a lower rate of false negatives.\n# \n# - F1-score: The F1-score is the harmonic mean of precision and recall. It provides a balanced measure of the model's performance, taking into account both false positives and false negatives. Higher F1-score indicates better overall performance.\n\n# Based on the evaluation metrics, we can analyze the performance of the models as follows:\n# \n# - Accuracy: The KNN model achieved an accuracy of 0.77, while the Decision Tree model achieved an accuracy of 0.73. In terms of overall correctness, the KNN model performed slightly better.\n# \n# - Precision: The KNN model had a precision of 0.88 for class N (rejected loans) and 0.76 for class Y (approved loans). The Decision Tree model had a precision of 0.53 for class N and 0.84 for class Y. In terms of minimizing false positives, the KNN model performed better for class N, while the Decision Tree model had higher precision for class Y.\n# \n# - Recall: The KNN model achieved a recall of 0.25 for class N and 0.99 for class Y. The Decision Tree model had a recall of 0.64 for class N and 0.76 for class Y. The KNN model had higher recall for class Y, indicating it was better at capturing true positives.\n# \n# - F1-score: The KNN model achieved an F1-score of 0.39 for class N and 0.86 for class Y. The Decision Tree model had an F1-score of 0.58 for class N and 0.80 for class Y. The KNN model had higher F1-scores for both classes.\n# \n# Based on these evaluations, it can be concluded that the KNN model performs better in predicting loan approvals compared to the Decision Tree model. However, it's important to note that these conclusions are based on the specific dataset and evaluation metrics used. It is recommended to further validate the models on additional datasets and consider the specific requirements and priorities of the bank before making a final decision.\n","repo_name":"Minakoaino/Loan-approval-prediction","sub_path":"Predicting Loan Approval Using KNN and Decision Trees.py","file_name":"Predicting Loan Approval Using KNN and Decision Trees.py","file_ext":"py","file_size_in_byte":23201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"9357045210","text":"from src.satellite import Satellite\nfrom src.coordinates import Coordinates\nfrom datetime import datetime\n\n\n# Satellite initialization:\n# sat = Satellite(sat_id=1109,\n# coords_info=Coordinates(coords=[0, 3454.842, 6787.935, 6871000, 0, 0],\n# epoch=datetime(2022, 12, 1),\n# coord_system='agesc'))\n\ndef test():\n sat = Satellite(sat_id=1109,\n coords_info=Coordinates(coords=[0, 3454.842, 6787.935, 6871000, 0, 0],\n epoch=datetime(2022, 12, 1),\n coord_system='agesc'))\n print(sat)\n\n\nif __name__ == \"__main__\":\n test()","repo_name":"kharno/satellite","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"35734335089","text":"from game_state import GameState\nfrom .action_manager import ActionManager\n\n\nclass InputManager:\n\n @classmethod\n def check_bindings(cls):\n if \"binding\" in GameState.current_state:\n bindings = GameState.current_state[\"binding\"]\n for binding in bindings:\n assert \"type\" in binding\n assert type(\"type\") == str\n assert binding[\"type\"]\n if binding[\"type\"] == \"keyboard\":\n assert \"key_action\" in binding\n key_action = binding[\"key_action\"]\n assert type(key_action) == str\n assert key_action in (\"pressed\", \"released\")\n assert \"key\" in binding\n key = binding[\"key\"]\n assert type(key) == int\n assert key\n assert \"action\" in binding\n action = binding[\"action\"]\n assert type(action) == str\n assert action\n\n @classmethod\n def key_press(cls, symbol):\n for binding in GameState.current_state[\"bindings\"]:\n if binding[\"type\"] == \"keyboard\":\n if binding[\"key_action\"] == \"pressed\":\n if binding[\"key\"] == symbol:\n action = binding[\"action\"]\n print(action)\n getattr(ActionManager, action)()\n\n @classmethod\n def key_release(cls, symbol):\n for binding in GameState.current_state[\"bindings\"]:\n if binding[\"type\"] == \"keyboard\":\n if binding[\"key_action\"] == \"released\":\n if binding[\"key\"] == symbol:\n action = binding[\"action\"]\n print(action)\n getattr(ActionManager, action)()\n\n","repo_name":"peb-8/luna-project","sub_path":"managers/input_manager.py","file_name":"input_manager.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"36859812338","text":"from copy import deepcopy\nfrom typing import List, Tuple\n\nimport numpy as np\nfrom pyclipper import (\n ET_CLOSEDPOLYGON,\n ET_OPENROUND,\n ET_OPENSQUARE,\n JT_MITER,\n JT_ROUND,\n JT_SQUARE,\n PolyTreeToPaths,\n PyPolyNode,\n PyclipperOffset,\n scale_from_clipper,\n scale_to_clipper,\n)\n\nfrom bluemira.base.look_and_feel import bluemira_warn\nfrom bluemira.geometry.coordinates import Coordinates, rotation_matrix_v1v2\nfrom bluemira.geometry.error import GeometryError\n\n__all__ = [\"offset_clipper\"]\n\n# =============================================================================\n# Pyclipper utilities\n# =============================================================================\n\n\ndef coordinates_to_pyclippath(coordinates: Coordinates) -> np.ndarray:\n \"\"\"\n Transforms a bluemira Coordinates object into a Path for use in pyclipper\n\n Parameters\n ----------\n coordinates:\n The Coordinates to be used in pyclipper\n\n Returns\n -------\n The vertex polygon path formatting required by pyclipper\n \"\"\"\n return scale_to_clipper(coordinates.xz.T)\n\n\ndef pyclippath_to_coordinates(path: np.ndarray) -> Coordinates:\n \"\"\"\n Transforms a pyclipper path into a bluemira Coordinates object\n\n Parameters\n ----------\n path:\n The vertex polygon path formatting used in pyclipper\n\n Returns\n -------\n The Coordinates from the path object\n \"\"\"\n p2 = scale_from_clipper(np.array(path).T)\n return Coordinates({\"x\": p2[0], \"y\": 0, \"z\": p2[1]})\n\n\ndef pyclippolytree_to_coordinates(polytree: List[np.ndarray]) -> List[Coordinates]:\n \"\"\"\n Converts a ClipperLib PolyTree into a list of Coordinates\n\n Parameters\n ----------\n polytree:\n The polytree to convert to Coordinates\n \"\"\"\n paths = PolyTreeToPaths(polytree)\n return [pyclippath_to_coordinates(path) for path in paths]\n\n\nclass PyclipperMixin:\n \"\"\"\n Mixin class for typical pyclipper operations and processing\n \"\"\"\n\n name = NotImplemented\n\n def perform(self):\n \"\"\"\n Perform the pyclipper operation\n \"\"\"\n raise NotImplementedError\n\n def raise_warning(self):\n \"\"\"\n Raise a warning if None is to be returned.\n \"\"\"\n bluemira_warn(f\"{self.name} operation on 2-D polygons returning None.\\n\")\n\n def handle_solution(self, solution: Tuple[np.ndarray]) -> List[Coordinates]:\n \"\"\"\n Handles the output of the Pyclipper.Execute(*) algorithms, turning them\n into Coordaintes objects. NOTE: These are closed by default.\n\n Parameters\n ----------\n solution:\n The tuple of tuple of tuple of path vertices\n\n Returns\n -------\n The list of Coordinates objects produced by the pyclipper operations\n \"\"\"\n if not solution:\n self.raise_warning()\n return None\n coords = []\n if isinstance(solution, PyPolyNode):\n coords = pyclippolytree_to_coordinates(solution)\n else:\n for path in solution:\n c = pyclippath_to_coordinates(path)\n c.close()\n coords.append(c)\n\n # Sort open coordinates by length\n return sorted(coords, key=lambda x: -x.length)\n\n\n# =============================================================================\n# Offset operations\n# =============================================================================\n\n\nclass OffsetOperationManager(PyclipperMixin):\n \"\"\"\n Abstract base class for offset operations\n\n Parameters\n ----------\n coordinates:\n The Coordinates upon which to perform the offset operation\n \"\"\"\n\n method = NotImplemented\n closed_method = ET_CLOSEDPOLYGON\n open_method = NotImplementedError\n\n def __init__(self, coordinates: Coordinates):\n self.tool = PyclipperOffset()\n path = coordinates_to_pyclippath(coordinates)\n self._scale = self._calculate_scale(path, coordinates) # Store scale\n\n co_method = self.closed_method if coordinates.closed else self.open_method\n\n self.tool.AddPath(path, self.method, co_method)\n\n def perform(self, delta: float):\n \"\"\"\n Perform the offset operation.\n\n Parameters\n ----------\n delta:\n The value of the offset [m]. Positive for increasing size, negative for\n decreasing\n \"\"\"\n delta = int(round(delta * self._scale)) # approximation\n solution = self.tool.Execute(delta)\n return self.handle_solution(solution)\n\n @staticmethod\n def _calculate_scale(path: np.ndarray, coordinates: Coordinates):\n \"\"\"\n Calculate the pyclipper scaling to integers\n \"\"\"\n # Find the first non-zero dimension (low number of iterations)\n for i in range(len(path) - 1):\n if path[i][0] != 0:\n return path[i][0] / coordinates.x[i]\n if path[i][1] != 0:\n return path[i][1] / coordinates.z[i]\n return None\n\n\nclass RoundOffset(OffsetOperationManager):\n \"\"\"\n Offset class for rounded offsets.\n \"\"\"\n\n name = \"Round Offset\"\n method = JT_ROUND\n open_method = ET_OPENROUND\n\n\nclass SquareOffset(OffsetOperationManager):\n \"\"\"\n Offset class for squared offsets.\n \"\"\"\n\n name = \"Square Offset\"\n method = JT_SQUARE\n open_method = ET_OPENSQUARE\n\n\nclass MiterOffset(OffsetOperationManager):\n \"\"\"\n Offset class for mitered offsets.\n \"\"\"\n\n name = \"Miter Offset\"\n method = JT_MITER\n open_method = ET_OPENROUND\n\n def __init__(self, coordinates: Coordinates, miter_limit: float = 2.0):\n super().__init__(coordinates)\n\n self.tool.MiterLimit = miter_limit\n\n\ndef offset_clipper(\n coordinates: Coordinates,\n delta: float,\n method: str = \"square\",\n miter_limit: float = 2.0,\n) -> Coordinates:\n \"\"\"\n Carries out an offset operation on the Coordinates using the ClipperLib library.\n Only supports closed Coordinates.\n\n Parameters\n ----------\n coordinates:\n The Coordinates upon which to perform the offset operation\n delta:\n The value of the offset [m]. Positive for increasing size, negative for\n decreasing\n method:\n The type of offset to perform ['square', 'round', 'miter']\n miter_limit:\n The ratio of delta to use when mitering acute corners. Only used if\n method == 'miter'\n\n Returns\n -------\n The offset Coordinates result\n\n Raises\n ------\n GeometryError:\n If the Coordinates are not planar\n If the Coordinates are not closed\n \"\"\"\n if not coordinates.is_planar:\n raise GeometryError(\"Cannot offset non-planar coordinates.\")\n\n if not coordinates.closed:\n raise GeometryError(\"Open Coordinates are not supported by offset_clipper.\")\n\n # Transform coordinates to x-z plane\n coordinates = deepcopy(coordinates)\n com = coordinates.center_of_mass\n\n t_coordinates = transform_coordinates_to_xz(\n coordinates, -np.array(com), (0.0, 1.0, 0.0)\n )\n\n if method == \"square\":\n tool = SquareOffset(t_coordinates)\n elif method == \"round\":\n bluemira_warn(\"I don't know why, but this is very slow...\")\n tool = RoundOffset(t_coordinates)\n elif method == \"miter\":\n tool = MiterOffset(t_coordinates, miter_limit=miter_limit)\n else:\n raise GeometryError(\n \"Please choose an offset method from:\\n round \\n square \\n miter\"\n )\n\n result = tool.perform(delta)\n if result is None:\n raise GeometryError(\n f\"Offset operation with delta={delta} resulted in no geometry.\"\n )\n\n if len(result) > 1:\n bluemira_warn(\n f\"Offset operation with delta={delta} has produced multiple 'islands'; only\"\n \" returning the biggest one!\"\n )\n\n result = result[0]\n\n # Transform offset coordinates back to original plane\n return transform_coordinates_to_original(result, com, coordinates.normal_vector)\n\n\ndef transform_coordinates_to_xz(\n coordinates: Coordinates, base: np.ndarray, direction: np.ndarray\n) -> Coordinates:\n \"\"\"\n Rotate coordinates to the x-z plane.\n \"\"\"\n coordinates.translate(base)\n if abs(coordinates.normal_vector[1]) == 1.0: # noqa: PLR2004\n return coordinates\n\n r = rotation_matrix_v1v2(coordinates.normal_vector, np.array(direction))\n x, y, z = r.T @ coordinates\n\n return Coordinates({\"x\": x, \"y\": y, \"z\": z})\n\n\ndef transform_coordinates_to_original(\n coordinates: Coordinates, base: np.ndarray, original_normal: np.ndarray\n) -> Coordinates:\n \"\"\"\n Rotate coordinates back to original plane\n \"\"\"\n r = rotation_matrix_v1v2(coordinates.normal_vector, np.array(original_normal))\n x, y, z = r.T @ coordinates\n coordinates = Coordinates({\"x\": x, \"y\": y, \"z\": z})\n coordinates.translate(base)\n return coordinates\n","repo_name":"Fusion-Power-Plant-Framework/bluemira","sub_path":"bluemira/geometry/_pyclipper_offset.py","file_name":"_pyclipper_offset.py","file_ext":"py","file_size_in_byte":8902,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"50"} +{"seq_id":"11625877211","text":"import functools\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport models.archs.arch_util as arch_util\nimport torch\nfrom models.archs.FSRCNN_arch import FSRCNN_net\nimport numpy as np\nimport time\n\nfrom brt.router import ScatterRouter, GatherRouter\n\n\nclass classSR_3class_fsrcnn_net(nn.Module):\n def __init__(self, in_nc=3, out_nc=3):\n super(classSR_3class_fsrcnn_net, self).__init__()\n self.upscale = 4\n self.classifier = Classifier()\n self.net1 = FSRCNN_net(in_nc, self.upscale, 16, 12, 4)\n self.net2 = FSRCNN_net(in_nc, self.upscale, 36, 12, 4)\n self.net3 = FSRCNN_net(in_nc, self.upscale, 56, 12, 4)\n self.scatter_router = ScatterRouter(\n protocol_type=\"topk\", protocol_kwargs={\"top_k\": 1}\n )\n self.gather_router = GatherRouter(fabric_type=\"combine\")\n\n def forward(self, x, is_train=False):\n\n weights = self.classifier(x)\n sr_x = self.scatter_router(x, weights)\n y = [self.net1(sr_x[0]), self.net2(sr_x[1]), self.net3(sr_x[2])]\n gr_x = self.gather_router(y)\n return gr_x, [yy.shape[0] for yy in y]\n\n\nclass Classifier(nn.Module):\n def __init__(self):\n super(Classifier, self).__init__()\n self.lastOut = nn.Linear(32, 3)\n\n # Condtion network\n self.CondNet = nn.Sequential(\n nn.Conv2d(3, 128, 4, 4),\n nn.LeakyReLU(0.1, True),\n nn.Conv2d(128, 128, 1),\n nn.LeakyReLU(0.1, True),\n nn.Conv2d(128, 128, 1),\n nn.LeakyReLU(0.1, True),\n nn.Conv2d(128, 128, 1),\n nn.LeakyReLU(0.1, True),\n nn.Conv2d(128, 32, 1),\n )\n self.avgPool2d = nn.AvgPool2d(8)\n arch_util.initialize_weights([self.CondNet], 0.1)\n\n def forward(self, x):\n # assert x.shape[1:] == torch.Size([3, 32, 32]), x.shape\n out = self.CondNet(x) # [bs, 32, 8, 8]\n out = self.avgPool2d(out) # [bs, 32, 1, 1]\n out = out.view(-1, 32) # [bs, 32]\n out = self.lastOut(out) # [bs, 3]\n return out\n","repo_name":"Raphael-Hao/brainstorm","sub_path":"benchmark/classsr/codes/models/archs/classSR_fsrcnn_arch.py","file_name":"classSR_fsrcnn_arch.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"50"} +{"seq_id":"8907107776","text":"import tensorflow as tf\nfrom tensorflow.python.ops.rnn_cell import LSTMCell\nfrom tensorflow.python.ops.rnn_cell import MultiRNNCell\nfrom tensorflow.python.ops.rnn_cell import DropoutWrapper\nfrom tensorflow.python.ops.rnn_cell import ResidualWrapper\n\nfrom tensorflow.python.layers.core import Dense\n\nfrom tensorflow.contrib.seq2seq.python.ops import beam_search_decoder\nfrom tensorflow.contrib.seq2seq.python.ops import attention_wrapper\n\nimport math\nimport CorpusProcessor as sp\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.ops import array_ops\n\nSOS_TOKEN = 0 # Start of sentence token\nEOS_TOKEN = 1 # End of sentence token and padding\nUNK_TOKEN = 2 # Unknown word token\n\nclass Decoder(object):\n\n def __init__(self, params, encoder, model_type):\n\n self.encoder = encoder\n self.model_type = model_type\n\n # Decoder parameters configuration\n self.dtype = tf.float32\n self.hidden_units_num = params[\"hidden_units_num\"]\n self.deep_layers_num = params[\"deep_layers_num\"]\n\n self.enable_residual_wrapper = params[\"enable_residual_wrapper\"]\n\n self.enable_dropout_wrapper = params[\"enable_dropout_wrapper\"]\n self.input_keep_prob = params[\"input_keep_prob\"]\n self.output_keep_prob = params[\"output_keep_prob\"]\n\n self.vocabulary_size = params[\"vocabulary_size\"]\n self.embedding_size = params[\"embedding_size\"]\n self.glove_weights_initializer = params[\"glove_weights_initializer\"]\n\n self.attention_method = params[\"attention_method\"]\n self.beam_width = params[\"beam_width\"]\n\n self.learning_rate = params[\"learning_rate\"]\n self.optimizer_type = params[\"optimizer_type\"]\n self.gradient_clipping_norm = params[\"gradient_clipping_norm\"]\n\n self.global_step = tf.Variable(0, trainable=False, name='global_step')\n self.max_decode_step = params[\"max_decode_step\"]\n\n self.initialize_data_placeholders()\n\n\n # Set the decoder's placeholders ----------------------------------------------\n def initialize_data_placeholders(self):\n self.inputs_batch_size = tf.shape(self.encoder.inputs)[0]\n\n if self.model_type == \"model_predict\":\n return\n\n self.inputs = tf.placeholder(dtype=tf.int32, shape=(None, None), name='dec_inputs')\n self.inputs_len = tf.placeholder(dtype=tf.int32, shape=(None,), name='dec_inputs_len')\n\n # Column list of the sentence padded starting word\n start_word = tf.ones([self.inputs_batch_size, 1], tf.int32)\n start_word *= SOS_TOKEN\n\n # Column list of the sentence padded ending word\n end_word = tf.ones([self.inputs_batch_size, 1], tf.int32)\n end_word *= EOS_TOKEN\n\n # decoder_inputs_train: [batch_size , max_time_steps + 1]\n # insert _GO symbol in front of each decoder input\n self.train_inputs = tf.concat([start_word, self.inputs], axis=1)\n\n # decoder_inputs_length_train: [batch_size]\n self.train_inputs_len = self.inputs_len + 1\n\n # decoder_targets_train: [batch_size, max_time_steps + 1]\n # insert EOS symbol at the end of each decoder input\n self.train_targets = tf.concat([self.inputs, end_word], axis=1)\n\n def get_LSTM_cell(self):\n # Creates a full LSTM cell of a fixed hidden units number\n LSTM_cell = LSTMCell(self.hidden_units_num)\n\n # Use dropout for improving model performance and reducing overfitting chance\n if self.enable_dropout_wrapper:\n LSTM_cell = DropoutWrapper(LSTM_cell, dtype=self.dtype,\n input_keep_prob=self.input_keep_prob, output_keep_prob=self.output_keep_prob)\n\n # Skip connections for passing layers and avoid gradient explosion or vanish\n if self.enable_residual_wrapper:\n LSTM_cell = ResidualWrapper(LSTM_cell)\n\n return LSTM_cell\n\n def get_deep_LSTM_cell_list(self):\n return [self.get_LSTM_cell() for _ in range(self.deep_layers_num)]\n\n def tile_data_for_beamsearch(self):\n # DE MODIFICAT AICI DACA NU MERGE CEVA\n tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(self.encoder.outputs, multiplier=self.beam_width)\n tiled_encoder_final_state = nest.map_structure(lambda s: tf.contrib.seq2seq.tile_batch(s, self.beam_width), self.encoder.state)\n tiled_encoder_inputs_len = tf.contrib.seq2seq.tile_batch(self.encoder.inputs_len,\n multiplier=self.beam_width)\n\n return tiled_encoder_outputs, tiled_encoder_final_state, tiled_encoder_inputs_len\n\n\n def get_attention_mechanism(self, tiled_encoder_outputs, tiled_encoder_inputs_len):\n attention = None\n\n if self.attention_method == 'luong':\n attention = attention_wrapper.LuongAttention(num_units=self.hidden_units_num, memory=tiled_encoder_outputs,\n memory_sequence_length=tiled_encoder_inputs_len, )\n\n if self.attention_method == \"bahdanau\":\n attention = attention_wrapper.BahdanauAttention(num_units=self.hidden_units_num, memory=tiled_encoder_outputs,\n memory_sequence_length=tiled_encoder_inputs_len, )\n return attention\n\n\n def get_deep_attention_LSTM_cell(self):\n\n # Obtain a deep LSTM RNN for the decoder\n self.deep_cell = self.get_deep_LSTM_cell_list()\n\n tiled_encoder_outputs = self.encoder.outputs\n tiled_encoder_final_state = self.encoder.state\n tiled_encoder_inputs_len = self.encoder.inputs_len\n\n # Tile the data for beamsearch\n if self.model_type == \"predict_model\":\n tiled_encoder_outputs, tiled_encoder_final_state, tiled_encoder_inputs_len = self.tile_data_for_beamsearch()\n\n # Create an attention mechanism and assign it to the last deep layer of the decoder\n self.attention_mechanism = self.get_attention_mechanism(tiled_encoder_outputs, tiled_encoder_inputs_len)\n\n def attn_decoder_input_fn(inputs, attention):\n _input_layer = Dense(self.hidden_units_num, dtype=self.dtype,\n name='attn_input_feeding')\n return _input_layer(array_ops.concat([inputs, attention], -1))\n\n self.deep_cell[-1] = attention_wrapper.AttentionWrapper(cell=self.deep_cell[-1],\n attention_mechanism=self.attention_mechanism,\n attention_layer_size=self.hidden_units_num,\n cell_input_fn=attn_decoder_input_fn,\n initial_cell_state=tiled_encoder_final_state[-1],\n alignment_history=False,\n name='attention_wrapper')\n\n # Convert the last state of the encoder to a beamsearch + attention state of the decoder\n decoder_initial_state = [state for state in tiled_encoder_final_state]\n\n attention_batch_size = self.inputs_batch_size\n if self.model_type != \"train_model\":\n attention_batch_size *= self.beam_width\n\n decoder_initial_state[-1] = self.deep_cell[-1].zero_state(dtype=self.dtype,\n batch_size=attention_batch_size)\n\n # Done using the last deep layer\n self.state = tuple(decoder_initial_state)\n self.deep_cell = MultiRNNCell(self.deep_cell)\n\n def get_RNN_optimizer(self):\n if self.optimizer_type == 'adadelta':\n self.optimizer = tf.train.AdadeltaOptimizer(learning_rate=self.learning_rate)\n elif self.optimizer_type == 'adam':\n self.optimizer = tf.train.AdamOptimizer(\n learning_rate=self.learning_rate,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-08\n )\n elif self.optimizer_type == 'rmsprop':\n self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate)\n else:\n self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)\n\n trainable_variables = tf.trainable_variables()\n variables_gradients = tf.gradients(self.loss_function, trainable_variables)\n\n # Clip gradients by a given maximum_gradient_norm\n clip_gradients, _ = tf.clip_by_global_norm(variables_gradients, self.gradient_clipping_norm)\n\n # Update the model\n self.gradient_updates = self.optimizer.apply_gradients(zip(clip_gradients, trainable_variables),\n global_step=self.global_step)\n\n def get_train_model(self, input_proj_layer, output_proj_layer):\n\n\n embedded_inputs = tf.nn.embedding_lookup(params=self.embeddings, ids=self.train_inputs)\n self.projected_embedded_inputs = input_proj_layer(embedded_inputs)\n\n training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=self.projected_embedded_inputs,\n sequence_length=self.train_inputs_len,\n time_major=False,\n name='training_helper')\n\n training_decoder = tf.contrib.seq2seq.BasicDecoder(cell=self.deep_cell,\n helper=training_helper,\n initial_state=self.state,\n output_layer=output_proj_layer)\n\n # Maximum decoder time_steps in current batch\n max_decoder_length = tf.reduce_max(self.train_inputs_len)\n\n # decoder_outputs_train: BasicDecoderOutput\n # namedtuple(rnn_outputs, sample_id)\n # decoder_outputs_train.rnn_output: [batch_size, max_time_step + 1, num_decoder_symbols] if output_time_major=False\n # [max_time_step + 1, batch_size, num_decoder_symbols] if output_time_major=True\n # decoder_outputs_train.sample_id: [batch_size], tf.int32\n (self.train_outputs, self.train_state, self.train_outputs_len) = (tf.contrib.seq2seq.dynamic_decode(\n decoder=training_decoder,\n output_time_major=False,\n impute_finished=True,\n maximum_iterations=max_decoder_length))\n\n # logits_train: [batch_size, max_time_step + 1, num_decoder_symbols]\n self.train_logits = tf.identity(self.train_outputs.rnn_output)\n\n # Use argmax to extract decoder symbols to emit\n self.train_prediction = tf.argmax(self.train_logits, axis=-1, name='dec_pred_train')\n\n # masks: masking for valid and padded time steps, [batch_size, max_time_step + 1]\n masks = tf.sequence_mask(lengths=self.train_inputs_len,\n maxlen=max_decoder_length, dtype=self.dtype, name='masks')\n\n # Computes per word average cross-entropy over a batch\n # Internally calls 'nn_ops.sparse_softmax_cross_entropy_with_logits' by default\n self.loss_function = tf.contrib.seq2seq.sequence_loss(logits=self.train_logits,\n targets=self.train_targets,\n weights=masks,\n average_across_timesteps=True,\n average_across_batch=True)\n # Training summary for the current batch_loss\n tf.summary.scalar('loss', self.loss_function)\n\n # Contruct graphs for minimizing loss\n self.get_RNN_optimizer()\n\n def get_predict_model(self, input_proj_layer, output_proj_layer):\n\n start_word = tf.ones([self.inputs_batch_size, ], tf.int32)\n start_word *= SOS_TOKEN\n\n end_word = EOS_TOKEN\n\n def embedd_project_input(inputs):\n return input_proj_layer(tf.nn.embedding_lookup(self.embeddings, inputs))\n\n # Beamsearch is used to approximately find the most likely translation\n print(\"building beamsearch decoder..\")\n inference_decoder = beam_search_decoder.BeamSearchDecoder(cell=self.deep_cell,\n embedding=embedd_project_input,\n start_tokens=start_word,\n end_token=end_word,\n initial_state=self.state,\n beam_width=self.beam_width,\n output_layer=output_proj_layer, )\n # For BeamSearchDecoder, return\n # decoder_outputs_decode: FinalBeamSearchDecoderOutput instance\n # namedtuple(predicted_ids, beam_search_decoder_output)\n # decoder_outputs_decode.predicted_ids: [batch_size, max_time_step, beam_width] if output_time_major=False\n # [max_time_step, batch_size, beam_width] if output_time_major=True\n # decoder_outputs_decode.beam_search_decoder_output: BeamSearchDecoderOutput instance\n # namedtuple(scores, predicted_ids, parent_ids)\n\n (self.decoder_outputs_decode, self.decoder_last_state_decode,\n self.decoder_outputs_length_decode) = (tf.contrib.seq2seq.dynamic_decode(decoder=inference_decoder,\n output_time_major=False,\n maximum_iterations=self.max_decode_step))\n\n # Use beam search to approximately find the most likely translation\n # decoder_pred_decode: [batch_size, max_time_step, beam_width] (output_major=False)\n self.decoder_pred_decode = self.decoder_outputs_decode.predicted_ids\n\n def create_custom_LSTM_decoder(self):\n with tf.variable_scope('decoder'):\n self.get_deep_attention_LSTM_cell()\n self.embeddings = tf.get_variable(name='embedding_weights',\n shape=[self.vocabulary_size, self.embedding_size],\n initializer=self.glove_weights_initializer,\n dtype=self.dtype, trainable=False)\n\n # Projection layer for projecting encoder's output to encoder num of hidden nodes\n input_proj_layer = Dense(self.hidden_units_num, dtype=self.dtype, name='input_projection_layer')\n\n # Projection layer for projecting decoder's outputs to probability selection of a vocabulary word\n output_proj_layer = Dense(self.vocabulary_size, name='output_projection_layer')\n\n if self.model_type == 'train_model':\n self.get_train_model(input_proj_layer, output_proj_layer)\n elif self.model_type == 'predict_model':\n self.get_predict_model(input_proj_layer, output_proj_layer)","repo_name":"jackal02/ChatbotWithPersonality","sub_path":"Decoder.py","file_name":"Decoder.py","file_ext":"py","file_size_in_byte":15910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"31180173243","text":"# -*- coding: utf-8 -*-\nimport logging\nimport requests\nimport json\n\norderid = '917507886106620' # 订单号\napi_url = \"http://dps.kdlapi.com/api/getdps/?orderid=917507886106620&num=20&pt=1&format=json&sep=1\" # 生成代理ip的链接\n\nlogger = logging.getLogger(__name__) # 日志\n\n\ndef fetch_proxy():\n fetch_url = api_url.format(orderid)\n r = requests.get(fetch_url)\n if r.status_code != 200:\n logger.error(\"fail to fetch proxy\")\n return False\n content = json.loads(r.content.decode('utf-8'))\n ips = content['data']['proxy_list']\n return ips\n\n\nif __name__ == '__main__':\n print(\"proxy: \", fetch_proxy())\n","repo_name":"pythoner-LW/Zhihu_Crawl-and-NLP","sub_path":"zhihui_redis/zhihui_redis/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"50"} +{"seq_id":"21774937940","text":"import tkinter as tk\r\nimport os\r\nfrom datetime import *\r\n\r\ndef reformatDates(hw):\r\n num = -1\r\n if os.stat(\"HWdata.txt\").st_size != 0:\r\n aDates = []\r\n for i in hw:\r\n num += 1\r\n\r\n temp1 = hw[num].split(\" | \")\r\n temp2 = temp1[3].split(\"/\")\r\n if len(temp2[0]) == 1:\r\n temp2[0] = f\"0{temp2[0]}\"\r\n\r\n if len(temp2[1]) == 1:\r\n temp2[1] = f\"0{temp2[1]}\"\r\n\r\n tempList = []\r\n assignmentDate = f\"{temp2[2]}-{temp2[0]}-{temp2[1]}\"\r\n tempList.append(i)\r\n tempList.append(assignmentDate)\r\n aDates.append(tempList)\r\n return aDates\r\n\r\n\r\ndef updateListbox(hw, listbox):\r\n listbox.delete(0, \"end\")\r\n num = -1\r\n if os.stat(\"HWdata.txt\").st_size != 0:\r\n aDates = []\r\n for i in hw:\r\n num += 1\r\n\r\n temp1 = hw[num].split(\" | \")\r\n temp2 = temp1[3].split(\"/\")\r\n if len(temp2[0]) == 1:\r\n temp2[0] = f\"0{temp2[0]}\"\r\n\r\n if len(temp2[1]) == 1:\r\n temp2[1] = f\"0{temp2[1]}\"\r\n\r\n assignmentDate = f\"{temp2[2]}-{temp2[0]}-{temp2[1]}\"\r\n aDates.append(assignmentDate)\r\n\r\n aDates.sort(key = lambda date: datetime.strptime(date, '%Y-%m-%d'))\r\n\r\n\r\n originalDates = reformatDates(hw)\r\n\r\n temp = -1\r\n for m in range(0, len(aDates)):\r\n temp += 1\r\n for x in originalDates:\r\n\r\n if x[1] == aDates[temp]:\r\n listbox.insert(\"end\", x[0])\r\n\r\n\r\nclass Planner:\r\n\r\n def __init__(self, master, classes, hw, todayDate):\r\n\r\n self.hwStorage = hw\r\n self.master = master\r\n self.classes = classes\r\n self.frame = tk.Frame(self.master, background=\"light blue\")\r\n\r\n # Add Homework Button\r\n self.addHomeworkButton = tk.Button(self.frame, text=\"Add Homework\", command = self.new_window, background=\"pink\", font=\"helvetica 10 italic bold\")\r\n self.addHomeworkButton.grid(padx=10, pady=(10, 0), column=1, row=1)\r\n\r\n # Modify Classes Button\r\n self.modifyClassButton = tk.Button(self.frame, text=\"Modify Classes\", command=self.new_window2, background=\"pink\", font=\"helvetica 10 italic bold\")\r\n self.modifyClassButton.grid(padx=10, pady=(10, 0), column=2, row=1)\r\n\r\n # Remove Button\r\n self.removeButton = tk.Button(self.frame, text=\"Remove Selected Assignment\", background=\"pink\", font=\"helvetica 10 italic bold\", command=self.removeSelected)\r\n self.removeButton.grid(padx=10, pady=(10, 0), column=3, row=1)\r\n\r\n #Scroll Bar\r\n scrollBar = tk.Scrollbar(self.frame)\r\n \r\n\r\n # List Box\r\n self.listbox = tk.Listbox(self.frame, width=60, height=16, font=15, bd=4, background=\"pink\", yscrollcommand = scrollBar.set)\r\n self.listbox.grid(padx=10, pady=10, row=2, columnspan=3, column=1)\r\n\r\n # Sorts all the hw and displays them in the listbox\r\n updateListbox(self.hwStorage, self.listbox)\r\n\r\n scrollBar.config(command=self.listbox.yview)\r\n\r\n self.frame.grid()\r\n\r\n # Opens the add homework page\r\n def new_window(self):\r\n self.newWindow = tk.Toplevel(self.master)\r\n self.app = AddHomeworkPage(self.newWindow, self.classes, self.hwStorage, self.listbox, self.master)\r\n\r\n def new_window2(self):\r\n self.newWindow2 = tk.Toplevel(self.master)\r\n self.app2 = ModifyClassPage(self.newWindow2, self.classes)\r\n\r\n def removeSelected(self):\r\n if self.listbox.size() != 0:\r\n\r\n selection = self.listbox.curselection()\r\n if selection != ():\r\n hw = self.listbox.get(selection[0])\r\n self.listbox.delete(selection[0])\r\n\r\n tempList = []\r\n\r\n file = open(\"HWdata.txt\", \"r\")\r\n lines = file.readlines()\r\n for line in lines:\r\n tempList.append(line.rstrip())\r\n self.hwStorage = tempList\r\n file.close()\r\n\r\n temp = -1\r\n for line in lines:\r\n temp += 1\r\n if line == hw:\r\n del lines[temp]\r\n\r\n\r\n new_file = open(\"HWdata.txt\", \"w+\")\r\n for line in lines:\r\n new_file.write(line)\r\n new_file.close()\r\n\r\n self.master.destroy()\r\n main()\r\n\r\n\r\n\r\n\r\nclass AddHomeworkPage:\r\n\r\n def __init__(self, master, classes, hw, listbox, master1):\r\n self.mainRoot = master1\r\n self.listbox = listbox\r\n self.hwStorage = hw\r\n self.classes = classes\r\n self.master = master\r\n self.frame = tk.Frame(self.master, background=\"light blue\")\r\n\r\n self.nameText = tk.Label(self.frame, text=\"Assignment Name\", background=\"light blue\", font=\"helvetica 10 italic bold\")\r\n self.nameText.grid(row=1, column=1, padx=5, pady=5)\r\n\r\n self.nameEntry = tk.Entry(self.frame, background=\"pink\")\r\n self.nameEntry.grid(row=1, column=2, padx=5, pady=5)\r\n\r\n self.classText = tk.Label(self.frame, text=\"Class Name\", background=\"light blue\", font=\"helvetica 10 italic bold\")\r\n self.classText.grid(row=2, column=1, padx=5, pady=5)\r\n\r\n self.o1V = tk.StringVar(self.frame)\r\n self.o1V.set(\"Select a Class\")\r\n self.classOption = tk.OptionMenu(self.frame, self.o1V, *self.classes)\r\n self.classOption.grid(row=2, column=2, padx=5, pady=5)\r\n self.classOption.config(background=\"pink\")\r\n\r\n self.dateText = tk.Label(self.frame, text=\"Due Date (mm/dd/yyyy)\", background=\"light blue\", font=\"helvetica 10 italic bold\")\r\n self.dateText.grid(row=3, column=1, padx=5, pady=5)\r\n\r\n self.dateEntry = tk.Entry(self.frame, background=\"pink\")\r\n self.dateEntry.grid(row=3, column=2, padx=5, pady=5)\r\n\r\n self.timeText = tk.Label(self.frame, text=\"Due Time\", background=\"light blue\", font=\"helvetica 10 italic bold\")\r\n self.timeText.grid(row=4, column=1, padx=5, pady=5)\r\n\r\n self.timeEntry = tk.Entry(self.frame, background=\"pink\")\r\n self.timeEntry.grid(row=4, column=2, padx=5, pady=5)\r\n\r\n self.submitButton = tk.Button(self.frame, text=\"Submit Assignment\", font=\"helvetica 10 italic bold\", command=self.submit, background=\"pink\")\r\n self.submitButton.grid(row=5, column=1, columnspan=2,padx=5, pady=5)\r\n\r\n self.frame.grid()\r\n\r\n def submit(self):\r\n temp = self.dateEntry.get()\r\n tempList = temp.split(\"/\")\r\n if len(tempList[2]) == 4:\r\n\r\n\r\n self.hwStorage.append(f\" | {self.nameEntry.get()} | {self.o1V.get()} | {self.dateEntry.get()} | {self.timeEntry.get()} | \")\r\n\r\n file = open(\"HWdata.txt\", \"a\")\r\n file.write(f\" | {self.nameEntry.get()} | {self.o1V.get()} | {self.dateEntry.get()} | {self.timeEntry.get()} | \" +\"\\n\")\r\n file.close()\r\n self.close_windows()\r\n\r\n updateListbox(self.hwStorage, self.listbox)\r\n\r\n self.mainRoot.destroy()\r\n main()\r\n\r\n def close_windows(self):\r\n self.master.destroy()\r\n\r\n\r\n\r\n\r\n\r\nclass ModifyClassPage:\r\n\r\n def __init__(self, master, classes):\r\n\r\n self.classes = classes\r\n self.master = master\r\n self.frame = tk.Frame(self.master, background=\"light blue\")\r\n\r\n self.enterClassText = tk.Label(self.frame, text=\"Enter Class Name\", font=\"helvetica 10 italic bold\", background=\"light blue\")\r\n self.enterClassText.grid(padx=5, pady=5, row=1, column=1)\r\n\r\n self.addClassEntry = tk.Entry(self.frame, background=\"pink\")\r\n self.addClassEntry.grid(padx=5, pady=5, row=1, column=2)\r\n\r\n self.submitClassesButton = tk.Button(self.frame, text=\"Add Class\", font=\"helvetica 10 italic bold\", command=self.submitClasses, background=\"pink\")\r\n self.submitClassesButton.grid(padx=5, pady=5, row=1, column=3)\r\n\r\n self.removeAllClassesText = tk.Label(self.frame, text= \"Check To Enable Button\", font=\"helvetica 10 italic bold\", background=\"light blue\")\r\n self.removeAllClassesText.grid(padx=5, pady=5, column=1, row=2)\r\n\r\n self.checkBox = tk.Checkbutton(self.frame, command=self.activate, background=\"light blue\")\r\n self.checkBox.grid(padx=5, pady=5, row=2, column=2)\r\n\r\n self.removeAllClasses = tk.Button(self.frame, state=tk.DISABLED, text=\"Remove All Classes\", font=\"helvetica 10 italic bold\", command=self.removeCLasses, background=\"pink\")\r\n self.removeAllClasses.grid(padx=5, pady=5, row=2, column=3)\r\n\r\n self.frame.grid()\r\n\r\n\r\n def submitClasses(self):\r\n if self.addClassEntry.get() != 0:\r\n self.classes.append(self.addClassEntry)\r\n file = open(\"classData.txt\", \"a\")\r\n file.write(self.addClassEntry.get() + \"\\n\")\r\n file.close()\r\n self.addClassEntry.delete(0, \"end\")\r\n\r\n\r\n def activate(self):\r\n if self.removeAllClasses[\"state\"] == tk.DISABLED:\r\n self.removeAllClasses[\"state\"] = tk.NORMAL\r\n\r\n\r\n def removeCLasses(self):\r\n self.classes = []\r\n file = open(\"classData.txt\", \"r+\")\r\n file.truncate(0)\r\n file.close()\r\n\r\n\r\n\r\ndef main():\r\n\r\n\r\n root = tk.Tk()\r\n today = date.today()\r\n\r\n\r\n\r\n hwStorage = []\r\n if os.stat(\"HWdata.txt\").st_size != 0:\r\n file = open(\"HWdata.txt\", \"r\")\r\n for line in file:\r\n hwStorage.append(line)\r\n file.close()\r\n\r\n classes = []\r\n if os.stat(\"classData.txt\").st_size != 0:\r\n file = open(\"classData.txt\", \"r\")\r\n for line in file:\r\n classes.append(line.rstrip())\r\n file.close()\r\n\r\n\r\n app = Planner(root, classes, hwStorage, today)\r\n root.mainloop()\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n","repo_name":"jfain11/homework-planner","sub_path":"HWplanner/class.py","file_name":"class.py","file_ext":"py","file_size_in_byte":9810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"43048988548","text":"from flask import Flask, request, jsonify\nfrom flasgger import Swagger, swag_from, LazyJSONEncoder\nfrom manager.user_base import UserBase\nfrom manager.team_base import TeamBase\nfrom manager.project_board_base import ProjectBoardBase\nimport json\n\napp = Flask(__name__)\n\napp.config[\"SWAGGER\"] = {\"title\": \"Project Planner API\", \"uiversion\": 2}\n\nswagger_config = {\n \"headers\": [],\n \"specs\": [\n {\n \"endpoint\": \"apispec_1\",\n \"route\": \"/apispec_1.json\",\n \"rule_filter\": lambda rule: True, # all in\n \"model_filter\": lambda tag: True, # all in\n }\n ],\n \"static_url_path\": \"/flasgger_static\",\n \"swagger_ui\": True,\n \"specs_route\": \"/swagger/\",\n}\n\napp.json_encoder = LazyJSONEncoder\nswagger = Swagger(app, config=swagger_config)\n\nuser_base = UserBase()\nteam_base = TeamBase()\nproject_board_base = ProjectBoardBase()\n\n\n@app.route('/create_user', methods=['POST'])\n@swag_from('swagger_yaml/create_user.yml')\ndef create_user():\n request_data = request.get_json()\n response = user_base.create_user(json.dumps(request_data))\n return response\n\n\n@app.route('/list_users', methods=['GET'])\n@swag_from('swagger_yaml/list_users.yml')\ndef list_users():\n response = user_base.list_users()\n return response\n\n\n@app.route('/describe_user', methods=['POST'])\n@swag_from('swagger_yaml/describe_user.yml')\ndef describe_user():\n request_data = request.get_json()\n response = user_base.describe_user(json.dumps(request_data))\n return response\n\n\n@app.route('/update_user', methods=['POST'])\n@swag_from('swagger_yaml/update_user.yml')\ndef update_user():\n request_data = request.get_json()\n response = user_base.update_user(json.dumps(request_data))\n return response\n\n\n@app.route('/get_user_teams', methods=['POST'])\n@swag_from('swagger_yaml/get_user_teams.yml')\ndef get_user_teams():\n request_data = request.get_json()\n response = user_base.get_user_teams(json.dumps(request_data))\n return response\n\n\n@app.route('/create_team', methods=['POST'])\n@swag_from('swagger_yaml/create_team.yml')\ndef create_team():\n request_data = request.get_json()\n response = team_base.create_team(json.dumps(request_data))\n return response\n\n\n@app.route('/list_teams', methods=['GET'])\n@swag_from('swagger_yaml/list_teams.yml')\ndef list_teams():\n response = team_base.list_teams()\n return response\n\n\n@app.route('/describe_team', methods=['POST'])\n@swag_from('swagger_yaml/describe_team.yml')\ndef describe_team():\n request_data = request.get_json()\n response = team_base.describe_team(json.dumps(request_data))\n return response\n\n\n@app.route('/update_team', methods=['POST'])\n@swag_from('swagger_yaml/update_team.yml')\ndef update_team():\n request_data = request.get_json()\n response = team_base.update_team(json.dumps(request_data))\n return response\n\n\n@app.route('/add_users_to_team', methods=['POST'])\n@swag_from('swagger_yaml/add_users_to_team.yml')\ndef add_users_to_team():\n request_data = request.get_json()\n response = team_base.add_users_to_team(json.dumps(request_data))\n return response\n\n\n@app.route('/remove_users_from_team', methods=['POST'])\n@swag_from('swagger_yaml/remove_users_from_team.yml')\ndef remove_users_from_team():\n request_data = request.get_json()\n response = team_base.remove_users_from_team(json.dumps(request_data))\n return response\n\n\n@app.route('/list_team_users', methods=['POST'])\n@swag_from('swagger_yaml/list_team_users.yml')\ndef list_team_users():\n request_data = request.get_json()\n response = team_base.list_team_users(json.dumps(request_data))\n return response\n\n\n@app.route('/create_board', methods=['POST'])\n@swag_from('swagger_yaml/create_board.yml')\ndef create_board():\n request_data = request.get_json()\n response = project_board_base.create_board(json.dumps(request_data))\n return response\n\n\n@app.route('/close_board', methods=['POST'])\n@swag_from('swagger_yaml/close_board.yml')\ndef close_board():\n request_data = request.get_json()\n response = project_board_base.close_board(json.dumps(request_data))\n return response\n\n\n@app.route('/add_task', methods=['POST'])\n@swag_from('swagger_yaml/add_task.yml')\ndef add_task():\n request_data = request.get_json()\n response = project_board_base.add_task(json.dumps(request_data))\n return response\n\n\n@app.route('/update_task_status', methods=['POST'])\n@swag_from('swagger_yaml/update_task_status.yml')\ndef update_task_status():\n request_data = request.get_json()\n response = project_board_base.update_task_status(json.dumps(request_data))\n return response\n\n\n@app.route('/list_boards', methods=['POST'])\n@swag_from('swagger_yaml/list_boards.yml')\ndef list_boards():\n request_data = request.get_json()\n response = project_board_base.list_boards(json.dumps(request_data))\n return response\n\n\n@app.route('/export_board', methods=['POST'])\n@swag_from('swagger_yaml/export_board.yml')\ndef export_board():\n request_data = request.get_json()\n response = project_board_base.export_board(json.dumps(request_data))\n return response\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"NirajPatel07/Project-Planner-API","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5131,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"2263299928","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.contrib.sites.models import Site\nfrom django.core.urlresolvers import reverse\n\nfrom django_webtest import WebTest\nfrom post_office.models import Email, EmailTemplate\n\nfrom rynda.test.factories import UserFactory\nfrom rynda.users.models import UserAuthCode\n\n\nclass TestUserRegistration(WebTest):\n \"\"\" Checks that the user can successfully register and activate account. \"\"\"\n\n def setUp(self):\n self.site = Site.objects.get()\n self.site.domain = \"example.com\"\n self.site.name = \"Example site\"\n self.site.save()\n confirm = EmailTemplate(\n name='registration confirmation',\n subject='Account activation',\n content='http://{{site.domain}}/user/activate/{{user.id}}/{{activation_code}}',\n html_content='http://{{site.domain}}/user/activate/{{user.id}}/{{activation_code}}',\n )\n confirm.save()\n complete = EmailTemplate(\n name='registration complete',\n subject='Welcome to team !',\n )\n complete.save()\n\n def action_registration(self):\n \"\"\" User fills registration form \"\"\"\n page = self.app.get(reverse(\"user-creation\"))\n form = page.forms[\"registration_form\"]\n user = UserFactory.attributes(create=False)\n form[\"first_name\"] = user['first_name']\n form[\"last_name\"] = user['last_name']\n form[\"email\"] = user['email']\n form[\"password1\"] = \"123\"\n form[\"password2\"] = \"123\"\n response = form.submit()\n return response\n\n def test_registration_page(self):\n \"\"\" User create account \"\"\"\n users = User.objects.count()\n page = self.action_registration()\n self.assertEqual(200, page.status_code)\n self.assertTemplateUsed(\"registration_success.html\")\n self.assertEqual(users+1, User.objects.count())\n\n def test_registration_email(self):\n \"\"\" Tests for account activation email \"\"\"\n activation_string = \"http://{}/user\".format(self.site.domain)\n self.action_registration()\n mail = Email.objects.get()\n self.assertEqual(\"Account activation\", mail.subject)\n self.assertIn(\n activation_string, mail.message, mail.message.encode('utf-8'))\n self.assertIn(activation_string, mail.html_message)\n\n def test_activation_link(self):\n \"\"\" Click on auto-activation link \"\"\"\n self.action_registration()\n user = User.objects.all().order_by('-id')[0]\n self.assertFalse(user.is_active)\n activation_code = UserAuthCode(settings.SECRET_KEY).auth_code(user)\n activation_url = \"/user/activate/{0}/{1}/\".format(user.id, activation_code)\n page = self.app.get(activation_url).follow()\n self.assertEqual(200, page.status_code)\n self.assertTrue(User.objects.get(id=user.id).is_active)\n self.assertTemplateUsed(\"login.html\")\n email = Email.objects.all().order_by(\"-id\")[0]\n self.assertEqual(user.email, email.to[0])\n self.assertEqual(u\"Welcome to team !\", email.subject)\n\n def test_logged_in(self):\n \"\"\" Logged in user attempts to get registration form \"\"\"\n user = UserFactory.create(is_active=True)\n page = self.app.get(reverse(\"user-creation\"), user=user)\n self.assertRedirects(\n page, reverse(\"user-details\", kwargs={'pk': user.id, }))\n","repo_name":"sarutobi/Rynda","sub_path":"rynda/test/test_user_registration.py","file_name":"test_user_registration.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"50"} +{"seq_id":"43269003645","text":"from typing import List\n\nclass Solution:\n def partition(self, s: str) -> List[List[str]]:\n \"\"\"\n 还可以用递归,比如判断第0个字符,肯定是回文,则加入列表,并将当前结果递归处理,只是处理的字符串变成[1:],后续就不讲述了\n 然后遍历到第1个字符,如果可以和第0个字符组成回文,则递归处理当前结果,只是处理的字符串变成[2:]\n 然后遍历到第2个字符,如果可以和前两个字符组成回文,则递归处理当前结果,只是处理的字符串变成[3:]\n \"\"\"\n ret = [[s[0]]] #初始化\n for c in s[1:]:\n cur_len = len(ret)\n for i in range(cur_len): #首先遍历已有列表,判断当前字符c能否和列表最后一个字符串,以及列表最后两个字符串组成回文,不存在其他情况了\n zcs = ret[i]\n ss = zcs[-1] + c\n if ss == ss[::-1]: #判断当前字符c能否和列表最后一个字符串组成回文\n new_list = zcs[:-1] + [ss] #如果可以,则需要复制这个列表,并把最后一个字符串替换为新的组合\n if new_list not in ret:\n ret.append(new_list)\n if len(zcs) >= 2:\n ss = zcs[-2] + zcs[-1] + c\n if ss == ss[::-1]: #判断当前字符c能否和列表最后两个字符串组成回文\n new_list = zcs[:-2] + [ss] #如果可以,则需要复制这个列表,并把最后两个字符串替换为新的组合\n if new_list not in ret:\n ret.append(new_list)\n ret[i] += c\n # print(ret)\n return ret\n\n\nif __name__ == \"__main__\":\n ret = Solution().partition(\"cbbbcc\")\n\n for line in ret:\n for p in line:\n print(p, end = \" \")\n print()\n # print(board)","repo_name":"zcsxll/leetcode","sub_path":"0131. Palindrome Partitioning.py","file_name":"0131. Palindrome Partitioning.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"13621499516","text":"import unittest\n\nfrom puzzle import partOne, partTwo\n\n\nclass TestPuzzle(unittest.TestCase):\n\n def testPartOne(self):\n firstExpected = 436\n firstData = [0, 3, 6]\n\n secondExpected = 1\n secondData = [1, 3, 2]\n\n thirdExpected = 10\n thirdData = [2, 1, 3]\n\n fourthExpected = 27\n fourthData = [1, 2, 3]\n\n fifthExpected = 78\n fifthData = [2, 3, 1]\n\n sixthExpected = 438\n sixthData = [3, 2, 1]\n\n seventhExpected = 1836\n seventhData = [3, 1, 2]\n\n self.assertEqual(partOne(firstData), firstExpected)\n self.assertEqual(partOne(secondData), secondExpected)\n self.assertEqual(partOne(thirdData), thirdExpected)\n self.assertEqual(partOne(fourthData), fourthExpected)\n self.assertEqual(partOne(fifthData), fifthExpected)\n self.assertEqual(partOne(sixthData), sixthExpected)\n self.assertEqual(partOne(seventhData), seventhExpected)\n \n# def testPartTwo(self):\n# firstExpected = 175594\n# firstData = [0, 3, 6]\n#\n# secondExpected = 2578\n# secondData = [1, 3, 2]\n#\n# thirdExpected = 3544142\n# thirdData = [2, 1, 3]\n#\n# fourthExpected = 261214\n# fourthData = [1, 2, 3]\n#\n# fifthExpected = 6895259\n# fifthData = [2, 3, 1]\n#\n# sixthExpected = 18\n# sixthData = [3, 2, 1]\n#\n# seventhExpected = 362\n# seventhData = [3, 1, 2]\n#\n# self.assertEqual(partTwo(firstData), firstExpected)\n# self.assertEqual(partTwo(secondData), secondExpected)\n# self.assertEqual(partTwo(thirdData), thirdExpected)\n# self.assertEqual(partTwo(fourthData), fourthExpected)\n# self.assertEqual(partTwo(fifthData), fifthExpected)\n# self.assertEqual(partTwo(sixthData), sixthExpected)\n# self.assertEqual(partTwo(seventhData), seventhExpected)\n \n\nif __name__ == '__main()__':\n unittest.main()\n\n","repo_name":"noMad1717/aoc_2020","sub_path":"day_15/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"11916553230","text":"import json\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef measure_loss(dir):\n files = [os.path.join(dir, \"train_history_{}.json\".format(i)) for i in range(0, 10)]\n losses = []\n for f in files:\n with open(f) as file:\n cont = json.load(file)[\"losses\"]\n losses.append(cont)\n return np.mean(losses, axis=0)\n\n##############################################\n# Configure script here\n##############################################\n\n# Source directories\nsources = []\n\n# Labels for each directory\nlabels = []\n\n\nPLOT_COLORS = [\"#CC4F1B\", \"#7167FF\", \"#80477B\", \"#14FFAD\"]\n\nlosses_g = []\nfor dir in sources:\n losses_g.append(measure_loss(dir))\n\nxes = [i * 128 for i in range(0, len(losses_g[0]))]\nplt.figure(figsize=(8, 4), dpi=200)\nfor i, l in enumerate(losses_g):\n plt.plot(xes, l, label=labels[i], color=PLOT_COLORS[i % len(PLOT_COLORS)])\n\naxes = plt.gca()\naxes.set_ylim([0.0, 0.0005])\nplt.legend()\nplt.show()\n","repo_name":"qiaowenchuan/rlpricing","sub_path":"measure_loss.py","file_name":"measure_loss.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"29988247609","text":"import os\nimport re\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom git import Repo\nfrom dotenv import load_dotenv\nimport time\nfrom selenium.webdriver import Firefox, Chrome\nfrom selenium.webdriver.chrome.options import Options\nimport git\nimport subprocess\nimport sys\n\nload_dotenv()\n# Check if ChromeDriver has execute permission\nif not os.access('/usr/bin/google-chrome', os.X_OK):\n os.chmod('/usr/bin/google-chrome', 0o755)\n\n# Check if required dependencies are installed\ntry:\n subprocess.check_call(['google-chrome', '--version'])\nexcept subprocess.CalledProcessError:\n sys.exit('Google Chrome is not installed')\n\ntry:\n subprocess.check_call(['chromedriver', '--version'])\nexcept subprocess.CalledProcessError:\n sys.exit('ChromeDriver is not installed')\n\nchrome_options = Options()\nchrome_options.add_argument(\"--headless\")\n\ndriver = Chrome()\n\ndriver.get(\"https://www.codewars.com/users/sign_in\")\n\ndriver.implicitly_wait(10)\n\nusername_field = driver.find_element(By.ID, \"user_email\")\n\nusername_field.send_keys(os.getenv(\"USER_EMAIL\"))\n\npassword_field = driver.find_element(By.ID, \"user_password\")\n\npassword_field.send_keys(os.getenv(\"PASS\"))\n\nlogin_button = driver.find_element(By.XPATH,\n \"//button[@class='btn mt-3 w-full text-center inline-flex items-center justify-center px-3 py-2 border border-transparent text-sm leading-4 font-medium rounded-md shadow-sm text-white dark:text-gray-200 is-red focus:outline-none focus:ring-2 focus:ring-blue-400 dark:focus:ring-cgray-600']\")\n\nlogin_button.click()\n\ndriver.implicitly_wait(10)\ndriver.get(\"https://www.codewars.com/users/joshua_abel27/completed_solutions\")\nlast_height = driver.execute_script(\n \"return Math.max( document.body.scrollHeight, document.body.offsetHeight, document.documentElement.clientHeight, document.documentElement.scrollHeight, document.documentElement.offsetHeight );\")\n\nwhile True:\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(3)\n new_height = driver.execute_script(\n \"return Math.max( document.body.scrollHeight, document.body.offsetHeight, document.documentElement.clientHeight, document.documentElement.scrollHeight, document.documentElement.offsetHeight );\")\n if new_height == last_height:\n break\n last_height = new_height\n\n# level = driver.find_element(\n# By.XPATH, \"//div[@class='small-hex is-extra-wide is-inline mr-15px is-white-rank']\")\n# folder_kata = level.text\n\ncode_element = driver.find_element(\n By.XPATH, \"//div[@class='items-list w-full md:w-2/3 md:pl-4 md:border-l md:grow']\")\ncode = code_element.text\n# print(code)\ndriver.quit()\n\nfunctions = re.findall(r\"(\\d+) kyu.*?Python:(.*?)(?=\\n\\d|\\Z)\", code, re.DOTALL)\n\n\ndef build_functions(func_list):\n functions = {}\n repo_dir = '.'\n repo = Repo.init(repo_dir)\n kyu_out = \"\"\n for func in func_list:\n kyu = func[0]\n kyu_out = func[0]\n func_code = func[1].replace(\"\\nlast month\\nRefactor\\nDiscuss\", \"\").split(\"Refactor\")[0]\n func_name = re.search(r'def (\\w+)', func_code).group(1)\n folder_name = \"kyu_\" + kyu\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n file_name = os.path.join(folder_name, func_name + \".py\")\n with open(file_name, \"w\") as f:\n f.write(func_code.replace(\"last month\", \"\"))\n functions[func_name] = file_name\n if os.path.exists(os.path.join(folder_name, file_name)):\n with open(\".gitmodules\", \"a\") as f:\n # Modificación: Cambie \"kyu_6\" a \"kyu_{kyu}\"\n f.write(f\"[submodule \\\"kyu_{kyu}\\\"]\\n\\tpath = kyu_{kyu}\\n\\turl = https://github.com/joshuaabel1/Codewars/tree/main/kyu_{kyu}\\n\")\n repo.git.add(os.path.join(folder_name, file_name))\n repo.index.commit(f\"Update {file_name}\")\n else:\n repo.git.add(A=True)\n repo.index.commit(f\"Add kyu_{kyu} files\")\n origin = repo.remote(name='origin')\n origin.push()\n return functions\n\n\n\n\nbuild_functions(functions)\n","repo_name":"joshuaabel1/Codewars","sub_path":"scrapper_code.py","file_name":"scrapper_code.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"27979374522","text":"class BinaryTree:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n\ndef helper(node: BinaryTree, branch, array):\n if node != None:\n branch += node.value\n if node.left == None and node.right == None:\n array.append(branch)\n return\n helper(node.left, branch, array)\n helper(node.right, branch, array)\n\n\ndef branchSums(root):\n array = []\n helper(root, 0, array)\n return array\n","repo_name":"AyushiGarg13/algoexpert-solutions","sub_path":"Easy/Branch Sums/Branch Sums.py","file_name":"Branch Sums.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"50"} +{"seq_id":"26281564804","text":"from django import forms\nfrom book_store.models import Book\n\n\nclass AddToCartForm(forms.Form):\n item_slug = forms.CharField(widget=forms.HiddenInput)\n rent_days = forms.IntegerField(min_value=1)\n\n def __init__(self, *args, **kwargs):\n super(AddToCartForm, self).__init__(*args, **kwargs)\n\n def clean(self):\n item_slug = self.cleaned_data['item_slug']\n rent_days = self.cleaned_data['rent_days']\n\n if not Book.objects.filter(slug=item_slug).exists():\n raise forms.ValidationError(\"Book not found!\")\n if rent_days < 0:\n raise forms.ValidationError(\"Rent days < 0\")\n","repo_name":"AntonChernov/LoriSystemTestTask","sub_path":"book_store/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"36652873369","text":"print('Enter the details of loan you want?')\np = int(input('principal = '))\nr = int(input('Rate of simple interest(%) = '))\nt = int(input('Time period(yrs) = '))\n\ninterest = p*(r/100)*t\nprint(f'Interest to be returned = {interest}')\namt = interest + p\nnumber_of_m = 12 * t\nemi_month = amt/number_of_m\nprint(f'You can pay Rs.{emi_month} every month')","repo_name":"kaushalfeb/Capstone_Projects","sub_path":"07 Mortgage Calculator/mortgage.py","file_name":"mortgage.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"26221742008","text":"from langchain.chains import RetrievalQA\nfrom langchain.chains.query_constructor.base import AttributeInfo\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.embeddings.openai import OpenAIEmbeddings\nfrom langchain.retrievers.self_query.base import SelfQueryRetriever\nfrom pydantic import BaseModel\nfrom build_vector_database import build_and_get_database\n\nfrom src.config.config import (\n OPEN_AI_LLM_MODEL,\n OPENAI_API_KEY,\n)\n\nllm = ChatOpenAI(\n temperature=0.0, model=OPEN_AI_LLM_MODEL, openai_api_key=OPENAI_API_KEY\n)\n\nembeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)\n\nvector_store = build_and_get_database()\n\n\nPERFUME_TOOL_DESCRIPTION = \"\"\"Useful for finding a perfume by name, brand, or fragrance notes.\n Can filter perfume by price metadata.\n Information in database is in Russian.\n Action: search for a perfume by name, brand, or fragrance notes.\n Action Input: name, brand, or fragrance notes\n Action Output: perfume data with url, name and price.\n \"\"\"\n\nmetadata_field_info = [\n AttributeInfo(\n name=\"source\",\n description=\"Source URL for the perfume\",\n type=\"string or list[string]\",\n ),\n AttributeInfo(\n name=\"name\",\n description=\"Name of the perfume\",\n type=\"string\",\n ),\n AttributeInfo(\n name=\"price\",\n description=\"The price of the perfume\",\n type=\"number\",\n ),\n]\n\n\nclass PerfumeSearchTool(BaseModel):\n name: str = \"perfume_search\"\n description: str = PERFUME_TOOL_DESCRIPTION\n\n @staticmethod\n def run(input: str):\n retriever = SelfQueryRetriever.from_llm(\n llm,\n vector_store,\n document_contents=\"Description of the perfume\",\n metadata_field_info=metadata_field_info,\n verbose=True,\n )\n\n retrieval_qa = RetrievalQA.from_chain_type(\n llm=llm,\n chain_type=\"stuff\",\n retriever=retriever,\n return_source_documents=True,\n )\n result = retrieval_qa({\"query\": input})\n answer = result[\"result\"]\n docs = result[\"source_documents\"]\n answer = answer + \"\\n---\"\n for doc in docs:\n answer = (\n answer\n + \"\\nName: \"\n + doc.metadata[\"name\"]\n + \", URL: \"\n + doc.metadata[\"source\"]\n + \", Price:\"\n + str(doc.metadata[\"price\"])\n )\n\n return answer\n","repo_name":"kpister/prompt-linter","sub_path":"data/scraping/repos/RaftDigiAI~chat-bot-with-knowledge-base/src~tools~perfume_search_tool.py","file_name":"src~tools~perfume_search_tool.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"37263917630","text":"import numpy as np\nimport scipy.io as sio\nimport os\nimport pandas as pd\nfrom regression_utils import MakeRegression,GetModel\nfrom sklearn.model_selection import StratifiedKFold\n\n######################## Load PSD and csv files ################################\n\ndata_path=os.getcwd()\nsave_path=os.path.join(data_path,'Results')\nif not os.path.exists(save_path):\n os.makedirs(save_path)\nPsd_file=os.path.join(data_path,'PSD_100_ROIs.mat')\nPSD_all_bands=sio.loadmat(Psd_file)['PSD']\nvar_file=os.path.join(data_path,'data_N45_cortico_ITMTX.xls')\ndf=pd.read_excel(var_file)\n\n########################### Test On/off #######################################\n\ntest=True\nif test:\n bandes=['Delta']\nelse:\n bandes=['Delta','Theta','Alpha','Beta','Gamma1','Gamma2','Gamma3','GammaL']\n\n####################### Set regression parameters ##############################\n\ntarget_name='ITMTX'# Set the target name : 'ITMTX', 'Cortico'\ntarget=df[target_name].tolist()\nregressor='Lasso' # Regressor name\ninner_cv=10 # Inner cross validation used to optimise the model's params\nouter_cv=5 # Outer cv used to train and test data\noptimise=True # Turn to True if you want to optimise the model\nFeatSelect=False # Set to True if you want to run feature selection\nstat=True\nnbperms=10\n# [float(i) for i in target]\n# print(type(target[0]))\n########################### Run Regression #####################################\nfor bdi,bd in enumerate(bandes):\n print('Runing regression for bande {}'.format(bd))\n scores,perm_sc,pvals=[],[],[]\n\n X=np.squeeze(PSD_all_bands[:,bdi,:]).T\n model=GetModel(regname=regressor,\n optimisation=optimise,\n cv=inner_cv)\n score,permutation_score,pvalue=MakeRegression(model=model,\n X=X,\n y=target,\n inner_cv=inner_cv,\n outer_cv=outer_cv,\n stat=stat,\n nperms=nbperms,\n njobs=-1)\n scores.append(score)\n perm_sc.append(permutation_score)\n pvals.append(pvalue)\n print(score)\nsave_file=os.path.join(save_path,'predict_variables','Res_100ROI_{b}_{reg}'.format(b=bd,\n reg=regressor))\nsio.savemat(save_file,{'scores':scores,'perm_sc':perm_sc,'pvals':pvals})\n","repo_name":"TarekLaj/regression","sub_path":"predict_variables.py","file_name":"predict_variables.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"24069438001","text":"import os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom .dataset import Dataset\nfrom .models import EdgeModel, SRModel\nfrom .metrics import PSNR, EdgeAccuracy\nfrom .utils import Progbar, create_dir, stitch_images, imsave\n\n\nclass EdgeMatch():\n def __init__(self, config):\n self.config = config\n\n if config.MODEL == 1:\n model_name = 'edge'\n elif config.MODEL == 2:\n model_name = 'SR'\n elif config.MODEL == 3:\n model_name = 'joint'\n\n self.debug = False\n self.model_name = model_name\n self.edge_model = EdgeModel(config).to(config.DEVICE)\n self.sr_model = SRModel(config).to(config.DEVICE)\n\n self.psnr = PSNR(255.0).to(config.DEVICE)\n self.edgeacc = EdgeAccuracy(config.EDGE_THRESHOLD).to(config.DEVICE)\n\n self.test_dataset = Dataset(config.TEST_FLIST_LR, config.TEST_FLIST_LR, sigma=config.SIGMA, scale=1, hr_size=0, augment=False)\n self.train_dataset = Dataset(config.TRAIN_FLIST_LR, config.TRAIN_FLIST_HR, sigma=config.SIGMA, scale=config.SCALE, hr_size=config.HR_SIZE, augment=True)\n self.val_dataset = Dataset(config.VAL_FLIST_LR, config.VAL_FLIST_HR, sigma=config.SIGMA, scale=config.SCALE, hr_size=config.HR_SIZE, augment=False)\n self.sample_iterator = self.val_dataset.create_iterator(config.SAMPLE_SIZE)\n\n self.samples_path = os.path.join(config.PATH, 'samples')\n self.results_path = os.path.join(config.PATH, 'results')\n\n if config.RESULTS is not None:\n self.results_path = os.path.join(config.RESULTS)\n\n if config.DEBUG is not None and config.DEBUG != 0:\n self.debug = True\n\n self.log_file = os.path.join(config.PATH, 'log_' + model_name + '.dat')\n\n def load(self):\n if self.config.MODEL == 1:\n self.edge_model.load()\n\n elif self.config.MODEL == 2:\n self.sr_model.load()\n\n else:\n self.edge_model.load()\n self.sr_model.load()\n\n def save(self):\n if self.config.MODEL == 1:\n self.edge_model.save()\n\n elif self.config.MODEL == 2:\n self.sr_model.save()\n\n else:\n self.edge_model.save()\n self.sr_model.save()\n\n def train(self):\n train_loader = DataLoader(\n dataset=self.train_dataset,\n batch_size=self.config.BATCH_SIZE,\n num_workers=4,\n drop_last=True,\n shuffle=True\n )\n\n epoch = 0\n keep_training = True\n model = self.config.MODEL\n max_iteration = int(float((self.config.MAX_ITERS)))\n total = len(self.train_dataset)\n\n if total == 0:\n print('No training data was provided! Check \\'TRAIN_FLIST\\' value in the configuration file.')\n return\n\n while(keep_training):\n epoch += 1\n print('\\n\\nTraining epoch: %d' % epoch)\n\n progbar = Progbar(total, width=20, stateful_metrics=['epoch', 'iter'])\n\n for items in train_loader:\n self.edge_model.train()\n self.sr_model.train()\n\n lr_images, hr_images, lr_edges, hr_edges = self.cuda(*items)\n\n # edge model\n if model == 1:\n # train\n hr_edges_pred, gen_loss, dis_loss, logs = self.edge_model.process(lr_images, hr_images, lr_edges, hr_edges)\n\n # metrics\n precision, recall = self.edgeacc(hr_edges, hr_edges_pred)\n logs.append(('precision', precision.item()))\n logs.append(('recall', recall.item()))\n\n # backward\n self.edge_model.backward(gen_loss, dis_loss)\n iteration = self.edge_model.iteration\n\n\n # sr model / joint model\n else:\n # train\n hr_edges_pred = self.scale(lr_edges) if model == 2 else self.edge_model(lr_images, lr_edges).detach()\n hr_images_pred, gen_loss, dis_loss, logs = self.sr_model.process(lr_images, hr_images, lr_edges, hr_edges_pred)\n\n # metrics\n psnr = self.psnr(self.postprocess(hr_images), self.postprocess(hr_images_pred))\n mae = (torch.sum(torch.abs(hr_images - hr_images_pred)) / torch.sum(hr_images)).float()\n logs.append(('psnr', psnr.item()))\n logs.append(('mae', mae.item()))\n\n # backward\n self.sr_model.backward(gen_loss, dis_loss)\n iteration = self.sr_model.iteration\n\n if iteration > max_iteration:\n keep_training = False\n print('Maximum number of iterations reached!')\n break\n\n logs = [\n (\"epoch\", epoch),\n (\"iter\", iteration),\n ] + logs\n\n progbar.add(len(hr_images), values=logs if self.config.VERBOSE else [x for x in logs if not x[0].startswith('l_')])\n\n # log model at checkpoints\n if self.config.LOG_INTERVAL and iteration % self.config.LOG_INTERVAL == 0:\n self.log(logs)\n\n # sample model at checkpoints\n if self.config.SAMPLE_INTERVAL and iteration % self.config.SAMPLE_INTERVAL == 0:\n self.sample()\n\n # evaluate model at checkpoints\n if self.config.EVAL_INTERVAL and iteration % self.config.EVAL_INTERVAL == 0:\n print('\\nstart eval...\\n')\n self.eval()\n\n # save model at checkpoints\n if self.config.SAVE_INTERVAL and iteration % self.config.SAVE_INTERVAL == 0:\n self.save()\n\n print('\\nEnd training....')\n\n def eval(self):\n val_loader = DataLoader(\n dataset=self.val_dataset,\n batch_size=self.config.BATCH_SIZE,\n drop_last=True,\n shuffle=True\n )\n\n model = self.config.MODEL\n total = len(self.val_dataset)\n\n self.edge_model.eval()\n self.sr_model.eval()\n\n progbar = Progbar(total, width=20, stateful_metrics=['iter'])\n iteration = 0\n\n for items in val_loader:\n iteration += 1\n lr_images, hr_images, lr_edges, hr_edges = self.cuda(*items)\n\n # edge model\n if model == 1:\n # eval\n hr_edges_pred, gen_loss, dis_loss, logs = self.edge_model.process(lr_images, hr_images, lr_edges, hr_edges)\n\n # metrics\n precision, recall = self.edgeacc(hr_edges, hr_edges_pred)\n logs.append(('precision', precision.item()))\n logs.append(('recall', recall.item()))\n\n\n # sr model / joint model\n else:\n hr_edges_pred = self.scale(lr_edges) if model == 2 else self.edge_model(lr_images, lr_edges).detach()\n hr_images_pred, gen_loss, dis_loss, logs = self.sr_model.process(lr_images, hr_images, lr_edges, hr_edges_pred)\n\n # metrics\n psnr = self.psnr(self.postprocess(hr_images), self.postprocess(hr_images_pred))\n mae = (torch.sum(torch.abs(hr_images - hr_images_pred)) / torch.sum(hr_images)).float()\n logs.append(('psnr', psnr.item()))\n logs.append(('mae', mae.item()))\n\n logs = [(\"iter\", iteration), ] + logs\n progbar.add(len(hr_images), values=logs)\n\n def test(self):\n self.edge_model.eval()\n self.sr_model.eval()\n\n model = self.config.MODEL\n create_dir(self.results_path)\n\n test_loader = DataLoader(\n dataset=self.test_dataset,\n batch_size=1,\n )\n\n index = 0\n for items in test_loader:\n name = self.test_dataset.load_name(index)\n lr_images, hr_images, lr_edges, hr_edges = self.cuda(*items)\n index += 1\n\n # edge model\n if model == 1:\n outputs = self.edge_model(lr_images, lr_edges)\n\n # sr model / joint model\n else:\n hr_edges_pred = self.scale(lr_edges) if model == 2 else self.edge_model(lr_images, lr_edges).detach()\n outputs = self.sr_model(lr_images, hr_edges_pred)\n\n output = self.postprocess(outputs)[0]\n path = os.path.join(self.results_path, name)\n print(index, name)\n\n imsave(output, path)\n\n print('\\nEnd test....')\n\n def sample(self):\n if len(self.val_dataset) == 0:\n return\n\n self.edge_model.eval()\n self.sr_model.eval()\n\n model = self.config.MODEL\n items = next(self.sample_iterator)\n lr_images, hr_images, lr_edges, hr_edges = self.cuda(*items)\n\n # edge model\n if model == 1:\n iteration = self.edge_model.iteration\n outputs = self.edge_model(lr_images, lr_edges)\n\n # sr model / joint model\n else:\n iteration = self.sr_model.iteration\n hr_edges = self.scale(lr_edges) if model == 2 else self.edge_model(lr_images, lr_edges).detach()\n outputs = self.sr_model(lr_images, hr_edges)\n\n image_per_row = 2\n if self.config.SAMPLE_SIZE <= 6:\n image_per_row = 1\n\n images = stitch_images(\n self.postprocess(lr_images),\n self.postprocess(hr_images),\n self.postprocess(hr_edges),\n self.postprocess(outputs),\n img_per_row=image_per_row\n )\n\n\n path = os.path.join(self.samples_path, self.model_name)\n name = os.path.join(path, str(iteration).zfill(5) + \".png\")\n create_dir(path)\n print('\\nsaving sample ' + name)\n images.save(name)\n\n def scale(self, tensor):\n return F.interpolate(tensor, scale_factor=self.config.SCALE)\n\n def log(self, logs):\n with open(self.log_file, 'a') as f:\n f.write('%s\\n' % ' '.join([str(item[1]) for item in logs]))\n\n def cuda(self, *args):\n return (item.to(self.config.DEVICE) for item in args)\n\n def postprocess(self, img):\n # [0, 1] => [0, 255]\n img = img * 255.0\n img = img.permute(0, 2, 3, 1)\n return img.int()\n","repo_name":"knazeri/edge-informed-sisr","sub_path":"src/edge_match.py","file_name":"edge_match.py","file_ext":"py","file_size_in_byte":10440,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"50"} +{"seq_id":"14717357022","text":"def seq_search(arr,ele):\n\n\tfound = False\n\tpos = 0\n\n\twhile pos < len(arr) and not found:\n\n\t\tif arr[pos] == ele:\n\n\t\t\tfound = True\n\n\t\telse:\n\n\t\t\tpos += 1\n\n\treturn found\n\nprint(seq_search([1,2,3,4,5],2))","repo_name":"harshivvp/Python-Algorithms","sub_path":"Searching-Algorithms/Unordered_Seq_Search.py","file_name":"Unordered_Seq_Search.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"11965118607","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:\n dummy = ListNode()\n node = dummy\n array = []\n N = len(lists)\n for i in range(N):\n if lists[i]:\n heappush(array, (lists[i].val, i))\n\n while array:\n _, index = heappop(array)\n node.next = lists[index]\n lists[index] = lists[index].next\n node = node.next\n if lists[index]:\n heappush(array, (lists[index].val, index))\n node.next = None\n return dummy.next","repo_name":"duressa-feyissa/A2SV_Programming","sub_path":"merge-k-sorted-lists.py","file_name":"merge-k-sorted-lists.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"50"} +{"seq_id":"40914903947","text":"#1\ndef number_of_food_groups():\n return 5\nprint(number_of_food_groups())\n\n# Prints 5\n\n\n#2\ndef number_of_military_branches():\n return 5\nprint(number_of_days_in_a_week_silicon_or_triangle_sides() + number_of_military_branches())\n\n# Throws an Error\n\n\n#3\ndef number_of_books_on_hold():\n return 5\n return 10\nprint(number_of_books_on_hold())\n\n# Prints 5\n\n\n#4\ndef number_of_fingers():\n return 5\n print(10)\nprint(number_of_fingers())\n\n# Prints 5\n\n\n#5\ndef number_of_great_lakes():\n print(5)\nx = number_of_great_lakes()\nprint(x)\n\n# None (Actual Answer: Prints 5 then None)\n\n\n#6\ndef add(b,c):\n print(b+c)\nprint(add(1,2) + add(2,3))\n\n# Values of None Throw an Error\n\n\n#7\ndef concatenate(b,c):\n return str(b)+str(c)\nprint(concatenate(2,5))\n\n# Prints 25\n\n\n#8\ndef number_of_oceans_or_fingers_or_continents():\n b = 100\n print(b)\n if b < 10:\n return 5\n else:\n return 10\n return 7\nprint(number_of_oceans_or_fingers_or_continents())\n\n# Prints 10 (Actual Answer: Prints 100 then 10)\n\n\n#9\ndef number_of_days_in_a_week_silicon_or_triangle_sides(b,c):\n if b 0 GROUP BY state ORDER BY Total_Appopeners DESC limit 10\")\r\n df=pd.DataFrame(mycursor.fetchall(),columns=['State','Total_Appopeners'])\r\n st.write(df)\r\n fig=px.pie(df,\r\n values='Total_Appopeners',\r\n names='State',\r\n title='Top 10 Phonepe users according to Appopeners',\r\n hole=0.5,\r\n color='Total_Appopeners')\r\n st.plotly_chart(fig,use_container_width=True)\r\n \r\n \r\n#Explore the data\r\nif selected==\"Explore Data\":\r\n st.markdown(\"##:violet[Exploring the Data]\")\r\n Type = st.sidebar.selectbox(\"**Type**\",(\"Analysis of Transactions\",\"Users\")) \r\n if Type==\"Analysis of Transactions\":\r\n col1,col2=st.columns([1,1.5],gap=\"large\")\r\n with col1:\r\n \r\n Year = st.slider(\"**Select the Year**\",min_value=2018,max_value=2022)\r\n Quarter=st.selectbox('**Select the Quarter',('1','2','3','4'),key='qgwe2')\r\n with col2:\r\n st.write(\r\n \"\"\"\r\n In this page we will get the insights of transactions count According to District,Transaction Types vs Total Transactions amount and Geomap visualization to show the state based data according to Transaction count and TRransaction amount.\r\n \"\"\"\r\n )\r\n st.markdown(\"##:violet[**Transaction Count According To District**]\")\r\n selected_state= st.selectbox(\"**please select any State to visualize**\",\r\n ('andaman-&-nicobar-islands','andhra-pradesh','arunachal-pradesh','assam','bihar',\r\n 'chandigarh','chhattisgarh','dadra-&-nagar-haveli-&-daman-&-diu','delhi','goa','gujarat','haryana',\r\n 'himachal-pradesh','jammu-&-kashmir','jharkhand','karnataka','kerala','ladakh','lakshadweep',\r\n 'madhya-pradesh','maharashtra','manipur','meghalaya','mizoram',\r\n 'nagaland','odisha','puducherry','punjab','rajasthan','sikkim',\r\n 'tamil-nadu','telangana','tripura','uttar-pradesh','uttarakhand','west-bengal'),index=22,key=\"state_to selectbox\")\r\n mycursor.execute(f\"select State,District,year,quarter,sum(transaction_count) as Total_Transactions_Count from map_trans_sql where year ={Year} and quarter = {Quarter} and State = '{selected_state}' group by State,District,year,quarter order by state,district\")\r\n \r\n df1=pd.DataFrame(mycursor.fetchall(),columns=['State','District','Year','Quarter','Total_Transactions_count'])\r\n \r\n \r\n fig=px.bar(df1,\r\n title='Transaction Count According To District',\r\n x=\"District\",\r\n y=\"Total_Transactions_count\",\r\n orientation='v',\r\n color='Total_Transactions_count',\r\n color_continuous_scale=px.colors.sequential.Magenta)\r\n \r\n st.plotly_chart(fig,use_container_width=True)\r\n st.markdown(\"##:violet[payment type]\")\r\n selected_state=st.selectbox(\"**please select any state to visualize**\",\r\n ('andaman-&-nicobar-islands','andhra-pradesh','arunachal-pradesh','assam','bihar',\r\n 'chandigarh','chhattisgarh','dadra-&-nagar-haveli-&-daman-&-diu','delhi','goa','gujarat','haryana',\r\n 'himachal-pradesh','jammu-&-kashmir','jharkhand','karnataka','kerala','ladakh','lakshadweep',\r\n 'madhya-pradesh','maharashtra','manipur','meghalaya','mizoram',\r\n 'nagaland','odisha','puducherry','punjab','rajasthan','sikkim',\r\n 'tamil-nadu','telangana','tripura','uttar-pradesh','uttarakhand','west-bengal'),key=\"state_selectbox\")\r\n Type = st.selectbox('**Please select the values to visualize**',('Transaction_count','Transaction_amount'))\r\n if Type ==\"Transaction_count\":\r\n mycursor.execute(f\"select Transaction_type,sum(Transaction_count) as Total_Transactions_count from agg_trans_sql where year = {Year} and quarter ={Quarter} group by transaction_type order by Transaction_type\")\r\n df=pd.DataFrame(mycursor.fetchall(),columns=['Transaction_type','Total_Transactions_count'])\r\n\r\n fig = px.bar(df,\r\n title= 'Transaction Types vs Total_Transactions_count',\r\n x='Transaction_type',\r\n y='Total_Transactions_count',\r\n orientation='v',\r\n color='Transaction_type',\r\n color_continuous_scale=px.colors.sequential.Magenta)\r\n st.plotly_chart(fig,use_container_width=False) \r\n \r\n if Type==\"Transaction_amount\":\r\n mycursor.execute(f\"select Transaction_type, sum(Transaction_amount) as Total_Transaction_amount from agg_trans_sql where year={Year} and quarter={Quarter} group by transaction_type order by Transaction_type\")\r\n df=pd.DataFrame(mycursor.fetchall(),columns=['Transaction_type','Total_Transactions_amount'])\r\n fig=px.bar(df,\r\n title='Tranasction Types vs Total_Transactions_amount',\r\n x='Transaction_type',\r\n y='Total_Transactions_amount',\r\n orientation='v',\r\n color='Transaction_type',\r\n color_continuous_scale=px.colors.sequential.Magenta)\r\n st.plotly_chart(fig,use_container_width=False)\r\n \r\n #Map\r\n\r\n india= json.load(open(\"C:\\Guvi\\Data science\\Projects\\india_state_geo.json\",\"r\"))\r\n\r\n\r\n select1 = st.selectbox(\"Select a any one\",[\"Transaction count\",\"Transaction amount\"])\r\n st.markdown(\":violet[This map used to show the state based data according to Transaction count Transaction amount]\")\r\n mycursor.execute(f\"select State, sum(Transaction_count) as Total_Transaction_count,sum(Transaction_amount) as Total_Transaction_amount from map_trans_sql where year={Year} and quarter= {Quarter} group by State order by State\")\r\n df1 = pd.DataFrame(mycursor.fetchall(),columns=['State','Total_Transaction_count','Total_Transaction_amount'])\r\n State_name=\"C:\\Guvi\\Data science\\Projects\\India States-UTs.csv\"\r\n data=pd.read_csv(State_name)\r\n df1.State=data\r\n if select1==\"Transaction amount\":\r\n fig1 = px.choropleth(df1,geojson=india,\r\n featureidkey='properties.ST_NM',\r\n locations = 'State',\r\n color ='Total_Transaction_amount',\r\n color_continuous_scale='Aggrnyl')\r\n \r\n fig1.update_geos(fitbounds='locations',visible=False)\r\n st.plotly_chart(fig1,use_container_width=True)\r\n \r\n if select1==\"Transaction count\":\r\n fig2=px.choropleth(df1,geojson=india,\r\n featureidkey='properties.ST_NM',\r\n locations='State',\r\n color='Total_Transaction_count',\r\n color_continuous_scale='Aggrnyl')\r\n \r\n fig2.update_geos(fitbounds=\"locations\",visible=False)\r\n st.plotly_chart(fig2,use_container_width=True) \r\n \r\n \r\n if Type==\"Users\":\r\n Data_segmentation=st.sidebar.selectbox(\"**Data segmentation**\",(\"Registered Users\",\"Analysis of country\"),key=\"Data_selectbox\")\r\n col1,col2=st.columns([1,1.5],gap=\"large\")\r\n with col1:\r\n Year = st.slider(\"**Select the Year**\",min_value=2018,max_value=2022)\r\n Quarter=st.selectbox(\"**Select the Quarter**\",('1234'),key='quart')\r\n if Data_segmentation==\"Registered Users\":\r\n st.markdown(\"##:violet[Total Numbers of Registered Users According to Districts\")\r\n selected_state=st.selectbox(\"**Select any state to fetch the data**\",\r\n ('andaman-&-nicobar-islands','andhra-pradesh','arunachal-pradesh','assam','bihar',\r\n 'chandigarh','chhattisgarh','dadra-&-nagar-haveli-&-daman-&-diu','delhi','goa','gujarat','haryana',\r\n 'himachal-pradesh','jammu-&-kashmir','jharkhand','karnataka','kerala','ladakh','lakshadweep',\r\n 'madhya-pradesh','maharashtra','manipur','meghalaya','mizoram',\r\n 'nagaland','odisha','puducherry','punjab','rajasthan','sikkim',\r\n 'tamil-nadu','telangana','tripura','uttar-pradesh','uttarakhand','west-bengal'),index=1)\r\n mycursor.execute(f\"select State,year,quarter,District,sum(Registered_user) as Total_Registered_Users from map_user_sql where year={Year} and quarter ={Quarter} and state = '{selected_state}' group by State,District,year,quarter order by state,district\")\r\n df=pd.DataFrame(mycursor.fetchall(),columns=['State','year','quarter','District','Total_Registered_Users'])\r\n fig = px.bar(df,\r\n x=\"District\",\r\n y=\"Total_Registered_Users\",\r\n orientation='v',\r\n color=\"Total_Registered_Users\",\r\n color_continuous_scale=px.colors.sequential.Magenta)\r\n st.plotly_chart(fig,use_container_width=True)\r\n india1 = json.load(open(\"C:\\Guvi\\Data science\\Projects\\india_state_geo.json\",\"r\"))\r\n if Data_segmentation==\"Analysis of country\":\r\n st.markdown(\":violet[This geomap used to show the state based data according to Registered users and App_Openers]\")\r\n mycursor.execute(f\"select State,sum(Registered_user) as Registered_Users, sum(app_opening) as App_Opens from map_user_sql where year={Year} and quarter ={Quarter} group by state\")\r\n df1=pd.DataFrame(mycursor.fetchall(),columns=[\"State\",\"Registered_Users\",\"App_Opens\"])\r\n State_name=\"C:\\Guvi\\Data science\\Projects\\India States-UTs.csv\"\r\n data=pd.read_csv(State_name)\r\n df1.State=data\r\n fig=px.choropleth(df1,\r\n geojson=india1,\r\n featureidkey=\"properties.ST_NM\",\r\n locations=\"State\",\r\n color=\"Registered_Users\",\r\n hover_data=[\"State\",\"Registered_Users\",\"App_Opens\"],\r\n color_continuous_scale={\r\n })\r\n fig.update_geos(fitbounds=\"locations\",visible=False)\r\n fig.update_layout(height=600,width=800)\r\n st.plotly_chart(fig,use_container_width=False,key='choropleth_chart')\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"dineshdilip2/Phonepe_pulse","sub_path":"Phonepe_pulse.py","file_name":"Phonepe_pulse.py","file_ext":"py","file_size_in_byte":18340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"21267901830","text":"from requests import get\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n\ndef download_results(url, race):\n \n df_columns = ['Place','Name','City','Bib_No','Age','Gender','Age_Group','Chip_Time','Gun_Time','Chip_Diff','Pace', 'Race']\n \n df_rows = []\n \n page = get(url)\n soup = BeautifulSoup(page.text, 'html.parser')\n \n #grab our results table\n result_table = soup.find('table', class_='racetable')\n rows = result_table.find_all('tr')\n \n #loop through rows\n for row in rows:\n first = row.find('td')\n \n #table has extra garbage only look at rows with a number (place) in the\n #first cell\n if first.text.isnumeric():\n #get all cells for this row\n cells = row.find_all('td')\n #grab the first 11 cells\n result = [cells[0].text,\n cells[1].text,\n cells[2].text,\n cells[3].text,\n cells[4].text,\n cells[5].text,\n cells[6].text,\n cells[7].text,\n cells[8].text,\n cells[9].text,\n cells[10].text,\n race]\n df_rows.append(result)\n df = pd.DataFrame(df_rows, columns=df_columns)\n return df\n\n\n#results urls\nfull_results_url = 'http://competitivetiming.com/results/1850841O'\nhalf_results_url = 'http://competitivetiming.com/results/1850842O'\n\nfull_df = download_results(full_results_url, 'full')\nhalf_df = download_results(half_results_url, 'half')\n\n#save to csv\nfull_df.to_csv('data/2018_missoula_marathon_full.csv', index=False)\nhalf_df.to_csv('data/2018_missoula_marathon_half.csv', index=False)","repo_name":"4one4/2018-Missoula-Marathon","sub_path":"results_scrapper.py","file_name":"results_scrapper.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"14282647025","text":"\r\nfrom Domain.CardClient import CardClient\r\n\r\n\r\nclass CardClientValidator:\r\n def valideaza(self, card: CardClient):\r\n erori = []\r\n if int(card.CNP) < 0:\r\n erori.append(\"CNP-ul trebuie sa fie strict pozitiv!\")\r\n lista = []\r\n for x in card.CNP:\r\n lista.append(x)\r\n if len(lista) != 13:\r\n erori.append(\"CNP-ul trebuie aiba 13 cifre!\")\r\n if len(erori) > 0:\r\n raise ValueError(erori)\r\n","repo_name":"NastaseNicoleta/PythonLab8910","sub_path":"lab-8910-NastaseNicoleta/Domain/cardClientValidator.py","file_name":"cardClientValidator.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"32302010219","text":"import unittest\nimport os\nfrom src.utils import load_config\n\nload_config()\n\n\nclass IMDBTestCase(unittest.TestCase):\n def setUp(self) -> None:\n self.imdb_root = os.environ['IMDB_ROOT']\n\n def test_build_imdb(self):\n from src.datasets import build_imdb\n df = build_imdb(f\"{self.imdb_root}/imdb.mat\", n=None, save=f\"{self.imdb_root}/imdb.pickle\")\n print(df)\n print(df[:5])\n\n def test_load_pickle(self):\n from src.datasets import unpickle_imdb\n df = unpickle_imdb(f\"{self.imdb_root}/imdb.pickle\")\n print(df)\n\n def test_imdb_dataset(self):\n from src.datasets import ImdbDataset\n from torchvision.transforms import ToTensor\n from src.datasets import unpickle_imdb\n df = unpickle_imdb(f\"{self.imdb_root}/imdb.pickle\")\n ds = ImdbDataset(root=self.imdb_root, df=df, transform=ToTensor())\n\n from torchvision.transforms import ToPILImage\n pil = ToPILImage()\n tensor, label = ds[-1]\n #print(f\"Label: {label}\")\n #pil(tensor).show()\n\n\nclass VGGFaceTestCase(unittest.TestCase):\n def setUp(self) -> None:\n from torchvision import transforms\n from src.datasets import VGGFaceDataset\n self.root = os.environ['VGGFACE_ROOT']\n self.trans = transforms.Compose([\n transforms.Resize(64),\n transforms.CenterCrop(64),\n ])\n self.ds = VGGFaceDataset(self.root, self.trans)\n\n def test_init(self):\n self.assertEqual(str(self.root), str(self.ds.root))\n\n def test_nidents(self):\n from src.datasets import VGGFaceDataset\n n = 5\n ds = VGGFaceDataset(self.root, self.trans, nidents=n)\n self.assertEqual(len(ds.identity.keys()), n)\n\n def test_getitem(self):\n item = self.ds[0]\n self.assertIsNotNone(item)\n item[0].show()\n\n def test_len(self):\n l_ds = len(self.ds)\n self.assertGreater(l_ds, 0)\n self.assertGreater(l_ds, 10000)\n\n def test_getiden(self):\n iden = 'n000002'\n expected = [\n '0001_01.jpg',\n '0002_01.jpg',\n '0003_01.jpg'\n ]\n items = self.ds.getiden(iden)\n for exp in expected:\n self.assertIn(exp, items)\n self.assertIsNone(self.ds.getiden('potatosalad'))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"rondeaujared/gender-prediction","sub_path":"tests/test_datasets.py","file_name":"test_datasets.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"29397950865","text":"class Suica:\n def __init__(self):\n self._balance = 500\n\n # ゲッターにより読み込みは出来るが書き込みは出来ないようにする\n @property\n def balance(self):\n return self._balance\n\n def deposit(self, num):\n if num < 100:\n raise\n\n self._balance += num\n\nclass VendingMachine:\n def __init__(self):\n self.drink_pepsi = [Drink(\"pepsi\", 150) for _ in range(5)]\n self.drink_monster = [Drink(\"monster\", 150) for _ in range(5)]\n self.drink_irohasu = [Drink(\"irohasu\", 150) for _ in range(5)]\n self.drink_all = [self.drink_pepsi, self.drink_monster, self.drink_irohasu]\n self._sales = 0\n\n @property\n def sales(self):\n return self._sales\n\n def target_drink(self, drink: object):\n for i in self.drink_all:\n if i[0].name == drink.name:\n target_drink = i\n \n return target_drink\n\n def buy(self, drink: object, num, suica: object):\n if len(self.target_drink(drink)) < num:\n raise\n\n for _ in range(num):\n self.target_drink(drink).pop()\n\n self._sales += int(drink.price) * num\n\n suica._balance -= int(drink.price) * num\n if suica._balance < 0:\n raise\n\n def available_list(self, suica: object):\n available_list = []\n for drink in self.drink_all:\n if (suica.balance >= drink[0].price) \\\n and (len(drink) > 0):\n available_list.append(drink[0].name)\n \n return available_list\n \n def stock(self):\n stock_list = []\n for drink in self.drink_all:\n drink_list = f\"({drink[0].name}:{len(drink)})\"\n stock_list.append(drink_list)\n \n return stock_list\n \n def add_stock(self, drink: object, num):\n for _ in range(num):\n self.target_drink(drink).append(drink)\n\nclass Drink:\n def __init__(self, name, price):\n self.name = name\n self.price = price\n\n# Step1\nsuica = Suica()\nsuica.deposit(int(input(\"チャージ金額:\"))) # 100円未満は例外発生\nprint(f\"現在のチャージ残高:{suica.balance}\") # 500\n\n# Step2\nvending = VendingMachine()\nprint(f\"現在の在庫:{vending.stock()}\") # 5, 5, 5\nprint()\n\n# Step3\ntarget = {1: [\"pepsi\", 150], 2: [\"monster\", 150], 3: [\"irohasu\", 150]}\nprint(\"1: pepsi, 2: monster, 3: irohasu\")\ninput_no = int(input(\"購入番号:\"))\ninput_drink = Drink(target[input_no][0], target[input_no][1])\ninput_num = int(input(\"購入数量:\")) # チャージ残高ない又は在庫ゼロは例外発生\nvending.buy(input_drink, input_num, suica)\nprint(f\"現在の在庫:{vending.stock()}\")\nprint(f\"現在の売上:{vending.sales}\")\nprint(f\"現在のチャージ残高:{suica.balance}\")\nprint()\n\n# Step4\nprint(\"1: pepsi, 2: monster, 3: irohasu\")\ninput_no = int(input(\"在庫番号:\"))\ninput_drink = Drink(target[input_no][0], target[input_no][1])\ninput_num = int(input(\"追加数量:\"))\nvending.add_stock(input_drink, input_num)\nprint(f\"現在の在庫:{vending.stock()}\")\nprint(f\"購入可能リスト:{vending.available_list(suica)}\")\n\n# 書き込み不可\n# suica.balance = 1000\n# 書き込み可能\n# suica._balance = 1000\n","repo_name":"sousou1216/hc_practice","sub_path":"python/tmp_vending_machine.py","file_name":"tmp_vending_machine.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"9611502536","text":"'''\nArticle on testing TF llite model with\n\nhttps://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/label_image.py\n'''\n\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image\n\nTARGET_IMAGE_SIZE = 224\n\n\n\nlabels = {0: 'left', 1: 'right', 2: 'upright', 3: 'upsidedown'}\nimage_path = '/home/ivo/Downloads/airplane_0010.jpg'\n\nTFLITE_MODEL = \"models/export/image_flip.tflite\"\n\ninterpreter = tf.lite.Interpreter(model_path=TFLITE_MODEL)\ninterpreter.allocate_tensors()\n\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\n\nprint(input_details)\nprint(output_details)\n\n# check the type of the input tensor\nfloating_model = input_details[0]['dtype'] == np.float32\n\n# NxHxWxC, H:1, W:2\nheight = input_details[0]['shape'][1]\nwidth = input_details[0]['shape'][2]\nimg = Image.open(image_path).resize((width, height))\n\n# add N dim\ninput_data = np.expand_dims(img, axis=0)\ninput_mean = 127.5\ninput_std = 127.5\n\nif floating_model:\n input_data = (np.float32(input_data) - input_mean) / input_std\n\ninterpreter.set_tensor(input_details[0]['index'], input_data)\n\ninterpreter.invoke()\n\noutput_data = interpreter.get_tensor(output_details[0]['index'])\nresults = np.squeeze(output_data)\n\ntop_k = results.argsort()[-5:][::-1]\nfor i in top_k:\n if floating_model:\n print('{:08.6f}: {}'.format(float(results[i]), labels[i]))\n else:\n print('{:08.6f}: {}'.format(float(results[i] / 255.0), labels[i]))","repo_name":"istefano82/auto_image_rotator","sub_path":"auto_image_flip/model_serve_docker/test_model_tflite.py","file_name":"test_model_tflite.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"30423981260","text":"import os\nimport requests as rq\nimport spotipy as sp\nfrom bs4 import BeautifulSoup\nfrom spotipy.oauth2 import SpotifyOAuth\n\n# date = input(\"Which year do you want to travel to? (YYY-MM-DD): \")\ndate = \"2000-10-01\"\nyear = date.split(\"-\")[0]\nURL = f\"https://www.billboard.com/charts/hot-100/{date}/\"\nCLIENT_ID = os.environ.get(\"CLIENT_ID\")\nCLIENT_SECRET = os.environ.get(\"CLIENT_SECRET\")\nREDIRECT_URL = \"https://example.com/callback\"\n\nspotify = sp.Spotify(auth_manager=SpotifyOAuth(\n client_id=CLIENT_ID,\n client_secret=CLIENT_SECRET,\n redirect_uri=REDIRECT_URL,\n scope=\"playlist-modify-private playlist-read-private\",\n show_dialog=True,\n cache_path=\"token.txt\",))\nuser_id = spotify.current_user()[\"id\"]\n\n\nresponse = rq.get(URL)\nhot_100 = response.text\nsoup = BeautifulSoup(hot_100, \"html.parser\")\ntitles = soup.find_all(name=\"h3\", class_=\"a-no-trucate\", id=\"title-of-a-story\")\nartists = soup.find_all(name=\"span\", class_=\"a-no-trucate\")\n\ntitles_list = [title.getText().replace(\"\\n\", \"\").replace(\"\\t\", \"\") for title in titles]\nartists_list = [artist.getText().replace(\"\\n\", \"\").replace(\"\\t\", \"\") for artist in artists]\nsong_list = dict(zip(artists_list, titles_list))\n\n# for index, title in enumerate(titles_list):\n# print(f\"{index+1}) {title} - {artists_list[index]}\")\n\nuri_list = []\nfor i, title in enumerate(titles_list):\n spotify_result = spotify.search(q=f\"track:{title}\", type=\"track\")\n try:\n uri = spotify_result[\"tracks\"][\"items\"][0][\"uri\"]\n uri_list.append(uri)\n except IndexError:\n print(f\"{title} doesn't exist in Spotify. Skipped.\")\n\n\nmy_playlist = spotify.user_playlist_create(user=f\"{user_id}\",\n name=f\"Top 100 '{year}' Billboard Tracks\",\n public=False,\n description=f\"Top 100 Billboard Tracks from {year}y, created by Python.\")\nspotify.playlist_add_items(playlist_id=my_playlist[\"id\"], items=uri_list)","repo_name":"kaesik/100-Days-of-Code","sub_path":"3_Intermediate+/Day 46/time_maschine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"8716109042","text":"height = int(input())\nx = 1\n\nfor i in range(1,height):\n\n for j in range(1,i+1):\n\n print(x,end=\"# \")\n x += 1\n \n print()\n \n# Sample Input :- 5\n# Output :-\n# 1# \n# 2# 3# \n# 4# 5# 6# \n# 7# 8# 9# 10# \n","repo_name":"Mobasherah12/Python-PatternHouse","sub_path":"Numeric Patterns/numericpattern171.py","file_name":"numericpattern171.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"29"} +{"seq_id":"17809011222","text":"from flask import render_template, request, redirect, Blueprint, url_for, flash, abort\nfrom project.auth.forms import RegisterForm, LoginForm, UpdateProfileForm\nfrom project import login_manager\nfrom project.models import User, Comment\nfrom project import db\nfrom flask_login import login_user, logout_user, login_required, current_user\n\nauth = Blueprint('auth', __name__, template_folder='templates/auth',\n url_prefix='/auth')\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\n@auth.route('/register', methods=['GET', 'POST'])\ndef register():\n form = RegisterForm()\n if form.validate_on_submit() and request.method == 'POST':\n if User.query.filter_by(username=form.username.data).first():\n msg = 'Такой логин уже существует'\n flash(msg)\n else:\n user = User(username=form.username.data,\n first_name=form.first_name.data,\n last_name=form.last_name.data,\n password=form.password.data, usergroup_id=2)\n db.session.add(user)\n db.session.commit()\n return redirect(url_for('auth.login'))\n elif request.method == 'POST':\n flash('Заполните форму.')\n\n return render_template('register.html', form=form)\n\n\n@auth.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user:\n if user.verify_password(form.password.data):\n login_user(user)\n return redirect(url_for('index'))\n else:\n flash('Неправильный логин или пароль')\n return render_template('login.html', form=form)\n\n\n@auth.route('/me')\n@login_required\ndef profile():\n comments = current_user.comments\n return render_template('profile.html', me=current_user, comments=comments)\n\n\n@auth.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n\n@auth.route('edit/', methods=['GET', 'POST'])\n@login_required\ndef update_profile(user_id):\n if current_user.id != user_id:\n abort(403)\n else:\n user = load_user(user_id)\n form =UpdateProfileForm()\n if form.validate_on_submit():\n user.first_name = form.first_name.data\n user.last_name = form.last_name.data\n if form.password.data:\n user.password = form.password.data\n db.session.commit()\n return redirect(url_for('auth.profile'))\n elif request.method == 'GET':\n form = UpdateProfileForm()\n form.first_name.data = current_user.first_name\n form.last_name.data = current_user.last_name\n return render_template('update_profile.html', form=form)\n\n@auth.route('/admin')\n@login_required\ndef admin(mode='users'):\n if int(current_user.usergroup_id) != 1:\n abort(403)\n try:\n mode = request.args['mode']\n except:\n mode = 'users'\n page = request.args.get('page', 1, type=int)\n users = User.query.order_by(User.username).paginate(page=page, per_page=10)\n comments = Comment.query.order_by(Comment.created_at.desc()).paginate(page=page, per_page=10)\n return render_template('admin.html', mode=mode, users=users, comments=comments)\n\n\n@auth. route('/delete_user/')\n@login_required\ndef delete_user(id):\n if int(current_user.usergroup_id) != 1:\n abort(403)\n user = User.query.get(id)\n if user:\n db.session.delete(user)\n db.session.commit()\n return redirect(url_for('auth.admin', mode='users'))\n\n\n@auth.route('/delete_comment/')\n@login_required\ndef delete_comment(id):\n if int(current_user.usergroup_id) != 1:\n abort(403)\n comment = Comment.query.get(id)\n if comment:\n db.session.delete(comment)\n db.session.commit()\n return redirect(url_for('auth.admin', mode='comments'))\n","repo_name":"korzhix/Tieworld","sub_path":"project/auth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"27724579708","text":"def solution(n, k):\n prime = []\n answer = []\n num = make_n_binary(k, n)\n make_numbers(num, prime)\n\n print(prime)\n for p in prime:\n if p == '':\n pass\n elif p == '2':\n answer.append(p)\n else:\n if is_prime_number(int(p)):\n answer.append(p)\n return len(answer)\n\n\ndef make_numbers(num, prime):\n for i in range(len(num)):\n if num[i] == '0':\n if len(prime) < 1:\n prime.append(num[:i])\n num = num[i:]\n break\n\n for i in range(len(num)):\n if num[i] == '0':\n for j in range(i + 1, len(num)):\n if j == len(num) - 1:\n prime.append(num[i+1:])\n break\n\n elif num[j] == '0':\n prime.append(num[i + 1:j])\n break\n\ndef is_prime_number(n):\n for i in range(2, n):\n if n % i == 0:\n return False\n return True\n\n\ndef make_n_binary(k, n):\n base = ''\n while n > 0:\n n, mod = divmod(n, k)\n base += str(mod)\n base = base[::-1]\n return base\n\n\nsolution(437674, 3)\nsolution(110011, 10)","repo_name":"HEUMMAN/codingTest","sub_path":"programmers/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"8010992798","text":"import argparse\nimport traceback\nimport sys\nimport torch\nfrom torch import nn\nimport torch.optim as optim\nimport torch.utils.data\nfrom torchvision.transforms import Compose, CenterCrop, Resize, Normalize, ToTensor\nimport numpy as np\nimport os\nfrom pathlib import Path\n\nfrom models import Generator, Discriminator\nfrom datasets import GrayscaleImageFolder\n\n\nROOT = Path(__file__).parent.resolve()\nIMAGE_SIZE = 128\nIMAGE_CHANNELS = 1\nDEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--data-folder\", type=str)\n parser.add_argument(\"--experiment-name\", type=str)\n parser.add_argument(\"--batch-size\", type=int, default=64)\n parser.add_argument(\"--latent-vector-length\", type=int, default=100)\n parser.add_argument(\"--generator-features\", type=int, default=64)\n parser.add_argument(\"--discriminator-features\", type=float, default=64)\n parser.add_argument(\"--filename-filter\", type=str, default='')\n parser.add_argument(\"--image-extension\", type=str, default='png')\n parser.add_argument(\"--epochs\", type=int, default=10)\n parser.add_argument(\"--lr\", type=float, default=0.0002)\n parser.add_argument(\"--beta\", type=float, default=0.7)\n try:\n return parser.parse_args()\n except SystemExit as err:\n traceback.print_exc()\n sys.exit(err.code)\n\n\ndef train(args):\n transforms = Compose([\n Resize(IMAGE_SIZE),\n CenterCrop(IMAGE_SIZE),\n ToTensor(),\n Normalize((0.5,),(0.5,)) \n ])\n\n dataset = GrayscaleImageFolder(root=args.data_folder,\n filename_filter=args.filename_filter,\n image_extension=args.image_extension,\n transform=transforms)\n\n dataloader = torch.utils.data.DataLoader(dataset,\n batch_size=args.batch_size,\n shuffle=True)\n\n generator = Generator(in_channels=args.latent_vector_length,\n feature_channels=args.generator_features,\n out_channels=IMAGE_CHANNELS).to(DEVICE)\n\n discriminator = Discriminator(in_channels=IMAGE_CHANNELS,\n feature_channels=args.discriminator_features,\n input_size=IMAGE_SIZE).to(DEVICE)\n\n criterion = nn.BCELoss()\n\n real_label = 1.\n fake_label = 0.\n\n optimizerD = optim.Adam(discriminator.parameters(), lr=args.lr, betas=(args.beta, 0.999))\n optimizerG = optim.Adam(generator.parameters(), lr=args.lr, betas=(args.beta, 0.999))\n\n if not os.path.exists(f'{ROOT}/runs/{args.experiment_name}'):\n os.makedirs(f'{ROOT}/runs/{args.experiment_name}')\n\n G_losses = []\n D_losses = []\n\n print(\"Starting Training Loop...\")\n for epoch in range(args.epochs):\n for i, data in enumerate(dataloader, 0):\n discriminator.zero_grad()\n real_batch = data.to(DEVICE)\n b_size = real_batch.size(0)\n label = torch.full((b_size,), real_label, dtype=torch.float, device=DEVICE)\n\n output = discriminator(real_batch).view(-1)\n\n errD_real = criterion(output, label)\n errD_real.backward()\n\n noise = torch.randn(b_size, args.latent_vector_length, 1, 1, device=DEVICE)\n fake = generator(noise)\n label.fill_(fake_label)\n\n output = discriminator(fake.detach()).view(-1)\n\n errD_fake = criterion(output, label)\n errD_fake.backward()\n errD = errD_real + errD_fake\n optimizerD.step()\n\n generator.zero_grad()\n label.fill_(real_label) \n\n output = discriminator(fake).view(-1)\n\n errG = criterion(output, label)\n errG.backward()\n optimizerG.step()\n \n if i % 50 == 0:\n print('[%d/%d][%d/%d]\\tLoss_D: %.4f\\tLoss_G: %.4f' % (epoch, args.epochs, i, len(dataloader), errD.item(), errG.item()))\n \n G_losses.append(errG.item())\n D_losses.append(errD.item())\n \n torch.jit.save(torch.jit.script(generator), f'{ROOT}/runs/{args.experiment_name}/generator_last.pt')\n torch.jit.save(torch.jit.script(discriminator), f'{ROOT}/runs/{args.experiment_name}/discriminator_last.pt')\n\n np.save(f'{ROOT}/runs/{args.experiment_name}/generator_losses.npy', np.array(G_losses))\n np.save(f'{ROOT}/runs/{args.experiment_name}/discriminator_losses.npy', np.array(D_losses))\n\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n train(args)\n","repo_name":"RobertZsoltSzabo/DL8TQ2_KepalkotoDiagnosztikaBeadando","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"70639674318","text":"import pandas\n\ndata = pandas.read_csv(\"nato_phonetic_alphabet.csv\")\n\nword_dict = {row.letter: row.code for (index, row) in data.iterrows()}\n\ndef nato():\n answer = input(\"Give a String: \").upper()\n try:\n word_list = [word_dict[letter] for letter in answer]\n except KeyError:\n print(\"Please choose from the words in dictionary.\")\n nato()\n else:\n print(word_list)\n\n\nnato()\n","repo_name":"farrukhkhalid1/100Days","sub_path":"Day26/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"31811797993","text":"import os\nfrom logging import DEBUG\nfrom logging.config import dictConfig\nfrom picamera_server.config.config import FLASK_INSTANCE_FOLDER, APP_ENV_TESTING, APP_ENV_DEVELOPMENT\n\n\nLOGGING_FILE_PATH = os.path.join(FLASK_INSTANCE_FOLDER, 'logs', 'logfile.log')\nLOGGING_FORMATTER = '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'\nLOGGING_CONF = {\n 'version': 1,\n 'formatters': {\n 'default': {\n 'format': LOGGING_FORMATTER,\n }\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'default'\n },\n 'file-handler': {\n 'class': 'logging.handlers.RotatingFileHandler',\n 'formatter': 'default',\n 'filename': LOGGING_FILE_PATH,\n 'maxBytes': 10*1024*1024,\n 'backupCount': 50,\n 'level': DEBUG\n }\n },\n 'root': {\n 'level': DEBUG,\n 'handlers': ['console', 'file-handler']\n }\n}\n\n\ndef set_up_logging(app_env: str = APP_ENV_DEVELOPMENT) -> None:\n \"\"\"\n Set up logging using dictConfig\n\n :param app_env: App env type\n :return:\n \"\"\"\n os.makedirs(os.path.dirname(LOGGING_FILE_PATH), exist_ok=True)\n\n logging_conf_dict = LOGGING_CONF.copy()\n\n # Disable file logging when testing\n if app_env == APP_ENV_TESTING:\n logging_conf_dict['root']['handlers'] = ['console']\n\n dictConfig(logging_conf_dict)\n","repo_name":"gaberrini/raspberry-cam","sub_path":"flask_server/picamera_server/config/logging_config.py","file_name":"logging_config.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"70107377998","text":"# Number Served\r\n\r\n# Restaurant\r\nclass Restaurant:\r\n\t\"\"\"Modeling a restaurant\"\"\"\r\n\tdef __init__(self,restaurant_name,cuisine_type):\r\n\t\t\"\"\"Initialize the restaurant name and cuisine type attributes\"\"\"\r\n\t\tself.restaurant = restaurant_name\r\n\t\tself.cuisine = cuisine_type\r\n\t\tself.number_served = 0\r\n\r\n\tdef describe_restaurant(self):\r\n\t\t\"\"\"Print the restaurant name and cuisine type\"\"\"\r\n\t\tprint(f'\\nThe restaurant\\'s name is: {self.restaurant.title()}\\n'\r\n\t\t\t\tf'and they specialise in {self.cuisine.title()} cuisine.')\r\n\r\n\tdef open_restaurant(self):\r\n\t\tprint(f'{self.restaurant.title()} is OPEN for business!')\r\n\r\n\tdef read_served(self):\r\n\t\tprint(f'\\n{self.restaurant.title()} has served {self.number_served} customers. Wow!')\r\n\r\n\tdef set_number_served(self,num_customers):\r\n\t\t\"\"\" Adding the given amount to num_customers\"\"\"\r\n\t\tself.number_served = num_customers\r\n\r\n\tdef increment_served(self,served):\r\n\t\t\"\"\"Add more customers to number_served everytime this method is called\"\"\"\r\n\t\tself.number_served += served\r\n\t\tprint(f'\\n{self.restaurant.title()} has served {self.number_served} customers today. Thank you!\\n')\r\n\r\n\r\nrestaurant1 = Restaurant('mugg & bean','american bistro')\r\nrestaurant1.number_served = 154\r\n\r\nrestaurant1.describe_restaurant()\r\nrestaurant1.read_served()\r\n\r\n# Changing the value and printing\r\nrestaurant1.number_served = 205\r\nrestaurant1.read_served()\r\n\r\n# Calling the method and passing the parameters\r\nrestaurant1.set_number_served(335)\r\nrestaurant1.read_served()\r\nrestaurant1.increment_served(300)\r\n\r\n\r\n# Login Attempts\r\n# Users\r\nclass User:\r\n\t\"\"\"Modeling a User Profile\"\"\"\r\n\tdef __init__(self,first_name,last_name,gender,race,m_status,profession):\r\n\t\t\"\"\"Initialize all the relevant attributes\"\"\"\r\n\t\tself.f_name = first_name\r\n\t\tself.l_name = last_name\r\n\t\tself.status = m_status\r\n\t\tself.prof = profession\r\n\t\tself.gender = gender\r\n\t\tself.race = race\r\n\t\tself.login_attempts = 0\r\n\r\n\tdef greet_user(self):\r\n\t\t\"\"\"Print a personalised welcome message\"\"\"\r\n\t\tprint(f'\\nWelcome {self.f_name.title()} great to have you back!')\r\n\r\n\tdef describe_user(self):\r\n\t\t\"\"\"Print the user's profile\"\"\"\r\n\t\tprint(f'\\nUser Info Summary:\\n'\r\n\t\t\t\tf'\\tFirst Name:\\t\\t{self.f_name.title()}\\n'\r\n\t\t\t\tf'\\tLast Name:\\t\\t{self.l_name.title()}\\n'\r\n\t\t\t\tf'\\tGender:\\t\\t\\t{self.gender.upper()}\\n'\r\n\t\t\t\tf'\\tRace:\\t\\t\\t{self.race.title()}\\n'\r\n\t\t\t\tf'\\tMarital Status:\\t{self.status.title()}\\n'\r\n\t\t\t\tf'\\tProfession:\\t\\t{self.prof.title()}'\r\n\t\t\t\t)\r\n\r\n\tdef read_login_attempts(self):\r\n\t\tprint(f'\\n\\tUnsuccessful login attempts: {self.login_attempts}')\r\n\r\n\tdef increment_login_attempts(self,attempt):\r\n\t\tself.login_attempts += attempt\r\n\r\n\tdef reset_login_attempts(self):\r\n\t\tself.login_attempts = 0\r\n\t\tprint(f'\\n\\tYou login attempts have been reset to: {self.login_attempts}')\r\n\r\nuser1 = User('anna','levine','f','caucasian','single','developer')\r\n\r\nuser1.greet_user()\r\nuser1.describe_user()\r\nuser1.increment_login_attempts(1)\r\nuser1.increment_login_attempts(5)\r\nuser1.read_login_attempts()\r\nuser1.reset_login_attempts()","repo_name":"caylemh/Python","sub_path":"chapter_9/pg234_TIY.py","file_name":"pg234_TIY.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"79915084","text":"from django.urls import path, include\nfrom . import views\nfrom rest_framework.routers import DefaultRouter\nfrom rest_framework_nested import routers\n\nurlpatterns = [\n path('products/', views.ProductsList.as_view()),\n path('products/search/', views.search),\n path('checkout/', views.checkout),\n path('orders/', views.OrdersList.as_view()),\n path('confirm-payment/', views.payment_response),\n path('products///', views.ProductDetail.as_view()),\n path('products//', views.CategoryDetail.as_view()),\n path('category/', views.CategoryList.as_view(),)\n]","repo_name":"tobest2/ecommerce","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"73672960078","text":"#!/usr/bin/env python3\n\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport os\nimport string\nimport sys\n\nfrom datetime import datetime\nfrom pyspark import SparkConf, SparkContext\nfrom pyspark.ml.stat import Correlation\nfrom pyspark.ml.feature import VectorAssembler\nfrom pyspark.ml.linalg import Vectors\nfrom pyspark.sql import SparkSession\n\ndef correlate(uri1, uri2, conf):\n spark = SparkSession.builder \\\n .config(conf=conf) \\\n .getOrCreate()\n\n df1 = spark.read.format(\"csv\").options(header=True, inferschema=True).load(uri1)\n df2 = spark.read.format(\"csv\").options(header=True, inferschema=True).load(uri2)\n\n df1.printSchema()\n df2.printSchema()\n\n \"\"\"\n For Spearman, a rank correlation, we need to create an RDD[Double] for each column and sort it\n in order to retrieve the ranks and then join the columns back into an RDD[Vector], which is fairly costly.\n Cache the input Dataset before calling corr with method = ‘spearman’ to avoid recomputing the common lineage.\n \"\"\"\n # join 2 datasets and ignore first resolution columns\n joined = df1.join(df2, [\"temp_res\", \"spat_res\"], 'inner')\n\n feature_types = joined.dtypes[2:]\n # print(feature_types)\n\n # drop non numeric features just in case\n num_feature_types = filter(lambda t: t[1] == \"int\" or t[1] == \"double\" or t[1] == \"float\", feature_types)\n features = [f_t[0] for f_t in num_feature_types]\n # print(features)\n\n joined = joined.select(features)\n joined.printSchema()\n\n # assemble the Vectors for Correlation.corr(), np.array is equivalent to dense venctors\n vecAssembler = VectorAssembler(\n inputCols=features,\n outputCol=\"features\"\n )\n joinedVec = vecAssembler.transform(joined)\n spearmanCorr = Correlation.corr(joinedVec, 'features', method='spearman').collect()[0][0]\n\n # turn into pandas dataframe\n spearmanCorr = spearmanCorr.toArray()\n print(spearmanCorr)\n\n # prepare and write correlation result\n out_dir = spark.conf.get(\"output\")\n out_dir = \"correlations/\" + out_dir\n print(\"output directory is: \" + out_dir)\n\n pandasDF = pd.DataFrame(spearmanCorr, index=features, columns=features)\n pandasDF.to_csv(out_dir)\n\n spark.stop()\n\ndef read_args():\n \"\"\"\n Argument parser for datasets and their corresponding headers\n \"\"\"\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument(\"-input\", dest=\"input_files\", nargs=\"+\", required=True,\n type=str, help=\"2 aggregated datasets\")\n\n arg_parser.add_argument(\"-output\", dest=\"output_dir\", nargs=1, required=True,\n type=str, help=\"correlation output directory\")\n\n args = arg_parser.parse_args()\n return args\n\ndef setup():\n args = read_args()\n uri1, uri2 = args.input_files\n out_dir = args.output_dir[0]\n\n conf = SparkConf()\n conf.setAppName(\"CS6513 project correlation\") \\\n .set(\"output\", out_dir)\n\n return uri1, uri2, conf\n\nif __name__ == \"__main__\":\n uri1, uri2, conf = setup()\n correlate(uri1, uri2, conf)\n","repo_name":"yuzheng38/spark","sub_path":"correlate.py","file_name":"correlate.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"24651542541","text":"\"\"\"Data Anonymizer configurator\n\nDocumentation in generator.py\n\"\"\"\n\n# Imports\nfrom shutil import copyfile, rmtree\nfrom json import dump, load\nfrom os import walk, path, mkdir, getcwd\nfrom random import random\nfrom modules.Number import Number\nfrom modules.Boolean import Boolean\nfrom modules.String import String\nfrom modules.File import File\nfrom modules.CustomJSONEncoder import CustomJSONEncoder\n\nclass Configurator:\n\n def __init__(self):\n self.search_configs()\n\n\n def search_configs(self):\n '''Import available configurations\n '''\n self.__configs = {}\n for root, dirs, files in walk(getcwd() + '/config/', topdown=False):\n for name in dirs:\n if not name == '__pycache__':\n with open(getcwd() + '/config/' + name + '/' + name + '.json', 'r') as f:\n self.__configs[name] = self.CustomJSONDecoder(load(f))\n\n\n def print_configs(self):\n '''print the list of available configurations \n '''\n for key in self.__configs.keys():\n st = key + ' -- '\n for col in self.__configs[key].keys():\n st += col + ', '\n print(st[:-2])\n\n\n def use_config(self, config_name):\n '''Return the selected configuration\n '''\n if config_name in self.__configs.keys():\n return self.__configs[config_name]\n raise Exception('Cannot find the config \"' + config_name + '\"')\n\n\n def export(self):\n pass\n\n \n def CustomJSONDecoder(self, json_obj):\n '''Decode a json configuration \n '''\n columns = {}\n for k, v in json_obj.items():\n if v['type'] == 'File':\n columns[k] = File(v['file_name'], v['file_del'], v['ignore_nl'])\n elif v['type'] == 'Number':\n columns[k] = Number(v['type_n'], v['max_n'], v['min_n'])\n elif v['type'] == 'Boolean':\n columns[k] = Boolean(v['option_1'], v['option_2'])\n elif v['type'] == 'String':\n columns[k] = String(v['max_length'], v['min_length'], v['regex'])\n else:\n raise Exception('Could not import configuration')\n return columns\n\n\n def __edit(self, config_name, config):\n cmd = ''\n while cmd == '' or not 'exit' in cmd.lower():\n print('\\nAvailable columns:')\n for col in config.keys():\n print(' - ' + col + ', type: ' + config[col].type)\n print('''\\nType:\n edit # To edit the selected column\n new # To create a new column\n del # To delete the selected column\n exit # To save the configuration and exit''')\n cmd = input('> ')\n if 'edit' in cmd.lower():\n pass\n elif 'new' in cmd.lower():\n col_name, col = self.__new_column(config_name)\n if col != None:\n config[col_name] = col\n elif 'del' in cmd.lower():\n if not cmd.split(' ')[1] in config.keys():\n continue\n print('Deleting column...')\n del config[cmd.split(' ')[1]]\n if len(config) == 0:\n print('No more columns in this configuration. Deleting...')\n rmtree(getcwd() + '/config/' + config_name)\n return\n elif 'exit' in cmd.lower():\n break\n else:\n cmd = input('> ')\n dump(config, open(getcwd() + '/config/' + config_name + '/' + config_name + '.json', 'w'), cls=CustomJSONEncoder, indent=4)\n print('Configuration successfully saved!')\n return\n\n\n def create_configuration(self, config_name):\n '''Interactive and guided configuration\n '''\n # Check if configuration already exists\n if config_name in self.__configs.keys():\n print('''A configuration called \"''' + config_name + '''\" already exists!\n[1] Edit\n[2] Replace\n[0] Exit''')\n selection = input('Select an option: ')\n while selection == '' or not selection in ['0', '1', '2']:\n selection = input('Select an option: ')\n if selection == '0':\n return\n elif selection == '1':\n self.__edit(config_name, self.__configs[config_name])\n return\n elif selection == '2':\n # Overwrite configuration\n rmtree(getcwd() + '/config/' + config_name)\n\n # Creating new configuration \n creating_columns = {}\n print('\\nCreating new configuration: ' + config_name)\n mkdir(getcwd() + '/config/' + config_name) \n\n while True:\n add_new = input('\\nAdd a new column? [Y/n] ')\n while not add_new.upper() in ['Y', 'N', '']:\n add_new = input('Add a new column? [Y/n] ')\n if add_new.upper() == 'N':\n break\n\n col_name, col = self.__new_column(config_name)\n if col != None:\n creating_columns[col_name] = col\n\n if len(creating_columns) == 0:\n print('No column has been created. Cannot save the configuration')\n rmtree(getcwd() + '/config/' + config_name)\n return\n \n dump(creating_columns, open(getcwd() + '/config/' + config_name + '/' + config_name + '.json', 'w'), cls=CustomJSONEncoder, indent=4)\n print('Configuration successfully created!')\n return\n\n\n def __new_column(self, config_name):\n # Creating new column\n col_name = input('\\nInsert the name of the column: ')\n while col_name == '':\n col_name = input('Insert the name of the column: ')\n print('''What is the type of the column?\n[1] Import from file\n[2] Number\n[3] Boolean\n[4] String\n[0] Exit''')\n col_type = input('Select an option: ')\n while col_type == '' or not col_type in ['0', '1', '2', '3', '4']:\n col_type = input('Select an option: ')\n try:\n ret = None\n # Column type: import from file\n if col_type == '1':\n ret = File()\n ret.config(config_name) \n # Column type: number\n elif col_type == '2': \n ret = Number()\n ret.config() \n # Column type: boolean\n elif col_type == '3':\n ret = Boolean()\n ret.config() \n # Column type: string\n elif col_type == '4':\n ret = String()\n ret.config()\n except Exception as e:\n print('Error occured:', e)\n return col_name, ret ","repo_name":"sampozz/csv-generator","sub_path":"config/configurator.py","file_name":"configurator.py","file_ext":"py","file_size_in_byte":6778,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"3415415714","text":"import math\n\nnumbers = [1, 5, 2, 7, 1, 9, 3, 8, 5, 9]\n\nR = max(numbers) - min(numbers)\n\naverage = sum(numbers) / len(numbers)\nD = math.sqrt(sum([(average - n)**2 for n in numbers]) / (len(numbers) - 1))\n\nprint(\"Range:\", R)\nprint(\"Variance:\", round(D, 2))\n\n\nprint(\"Confirmation of dispersion properties:\")\n\nc = 3\n\ntest_one = [ n + c for n in numbers ]\naverage_one = sum(test_one) / len(test_one)\none_D = math.sqrt(sum([(average_one - n)**2 for n in test_one]) / (len(test_one) - 1))\n\ntest_two = [ n * c for n in numbers ]\naverage_two = sum(test_two) / len(test_two)\ntwo_D = math.sqrt(sum([(average_two - n)**2 for n in test_two]) / (len(test_two) - 1))\n\nprint(\"first property:\", one_D == D)\nprint(\"second property:\", two_D == D * c)","repo_name":"NeutrinoZh/python-and-statistics","sub_path":"MeasuresOfVariability/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"43346383562","text":"from PyQt5.Qt import QTextEdit,QPushButton,QWidget,QApplication,QThread,pyqtSignal,QComboBox,QLabel,QCheckBox\r\nimport sys\r\nfrom functools import partial\r\nimport importlib\r\nimport sip\r\nimport os\r\nimport re\r\nfrom urllib import parse\r\nimport threading\r\nimport time\r\nimport subprocess\r\nfrom create_dic import create_dic\r\nimport math\r\nimport random\r\n\r\nclass Worker(QThread):\r\n sinOut = pyqtSignal(str)\r\n\r\n def __init__(self, parent=None):\r\n super(Worker, self).__init__(parent)\r\n #设置工作状态与初始num数值\r\n self.working = False\r\n self.num = 0\r\n\r\n def __del__(self):\r\n #线程状态改变与线程终止\r\n self.working = False\r\n self.wait()\r\n print('销毁')\r\n\r\n def run(self):\r\n print('run')\r\n while True:\r\n time.sleep(2)\r\n if self.working:\r\n print('??')\r\n #获取文本\r\n file_str = 'File index{0}'.format(self.num)\r\n self.num += 1\r\n # 发射信号\r\n self.sinOut.emit(file_str)\r\n # 线程休眠2秒\r\n self.working=False\r\n self.__del__()\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass Window(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.setWindowTitle('文件包含字典生成')\r\n self.resize(500, 400)\r\n self.bt_list=[]\r\n self.tmp = os.path.dirname(os.path.realpath(__file__)) + '/tmp'\r\n if not os.path.exists(self.tmp):\r\n os.mkdir(self.tmp)\r\n\r\n self.response=''\r\n self.create_dic=create_dic() #实例化self.create_dic()类\r\n self.s = set()\r\n self.txt = set()\r\n self.result_file=''\r\n self.run_set=set()\r\n self.level='5'\r\n self.base_password='default.txt'\r\n self.setup_ui()\r\n\r\n\r\n\r\n def thread_get_banner(self,keys):\r\n generate_key=''\r\n self.lable.setText('正在生成字典...')\r\n generate_keys=self.create_dic.generate_lfi_dic(self.level,self.base_password,keys,self.run_set)\r\n # print(generate_keys)\r\n self.lable.setText('生成字典成功...')\r\n # for key in generate_keys:\r\n # generate_key+=key.strip()+'\\n'\r\n # generate_key=''.join(generate_keys)\r\n # print(generate_key)\r\n\r\n self.response=generate_keys\r\n print(self.response)\r\n if self.response:\r\n name=''\r\n for i in self.usernames.split('\\n'):\r\n name+=i.strip()+','\r\n name=name.rstrip(',')\r\n name=name+'_'+self.base_password.rstrip('.txt')\r\n self.lable.setText('正在写入磁盘...')\r\n try:\r\n with open(self.tmp+'/'+str(time.strftime(\"%m.%d\", time.localtime()))+'+{}.txt'.format(name),'w',encoding='utf-8') as f:\r\n f.writelines(self.response)\r\n except:\r\n with open(self.tmp+'/'+str(time.strftime(\"%m.%d-%H.%M\", time.localtime()))+'.txt','w',encoding='utf-8') as f:\r\n f.writelines(self.response)\r\n self.lable.setText('正在刷新...')\r\n self.flush.working = True\r\n\r\n def flush_ui(self,response):\r\n\r\n self.result_button()\r\n self.lable.setText('完成,点击可打开文件')\r\n\r\n def setup_ui(self):\r\n\r\n\r\n self.lable=QLabel(self)\r\n self.lable.resize(150,50)\r\n self.lable.move(5,1)\r\n self.lable.setText('')\r\n\r\n\r\n\r\n ql_b = QTextEdit(self)\r\n ql_b.move(0, 50)\r\n ql_b.resize(200,150)\r\n ql_b.setPlaceholderText(\"请输入文件名,以换行分割\\n例如:\\nindex.php\\n/etc/passwd\\n\\n【*】index.php会循环不同右侧下拉列表可选择服务器常用文件名字典-但生成的lfi字典会很大,default.txt内容为空,可自行在config/static_dic中配置\")\r\n\r\n rg_lab=QLabel(self)\r\n rg_lab.setText('复选框选择绕过方式(结果取并集):')\r\n rg_lab.move(0,200)\r\n rg_lab.resize(200,20)\r\n\r\n self.check_1 = QCheckBox('http', self)\r\n self.check_1.move(0,220)\r\n self.check_1.stateChanged.connect(self.choose)\r\n self.check_2 = QCheckBox('file', self)\r\n self.check_2.move(0, 240)\r\n self.check_2.stateChanged.connect(self.choose)\r\n self.check_3 = QCheckBox('compress', self)\r\n self.check_3.move(0, 260)\r\n self.check_3.stateChanged.connect(self.choose)\r\n self.check_4 = QCheckBox('php', self)\r\n self.check_4.move(0, 280)\r\n self.check_4.stateChanged.connect(self.choose)\r\n self.check_5 = QCheckBox('data', self)\r\n self.check_5.move(0, 300)\r\n self.check_5.stateChanged.connect(self.choose)\r\n self.check_6 = QCheckBox('去后缀', self)\r\n self.check_6.move(70, 220)\r\n self.check_6.stateChanged.connect(self.choose)\r\n self.check_7 = QCheckBox('双写', self)\r\n self.check_7.move(70, 240)\r\n self.check_7.stateChanged.connect(self.choose)\r\n self.check_8 = QCheckBox('大小写', self)\r\n self.check_8.move(70, 260)\r\n self.check_8.stateChanged.connect(self.choose)\r\n self.check_9 = QCheckBox('后缀绕过', self)\r\n self.check_9.move(130, 220)\r\n self.check_9.stateChanged.connect(self.choose)\r\n\r\n\r\n quanxuan_bt=QPushButton(self)\r\n quanxuan_bt.setText('全选')\r\n quanxuan_bt.resize(50,30)\r\n quanxuan_bt.move(0,300)\r\n quanxuan_bt.clicked.connect(self.quanxuan)\r\n\r\n fanxuan_bt = QPushButton(self)\r\n fanxuan_bt.setText('反选')\r\n fanxuan_bt.resize(50, 30)\r\n fanxuan_bt.move(50, 300)\r\n fanxuan_bt.clicked.connect(self.fanxuan)\r\n\r\n level_lab = QLabel(self)\r\n level_lab.setText('目录层:')\r\n level_lab.move(110, 300)\r\n level_lab.resize(50, 30)\r\n self.level_comb=QComboBox(self)\r\n self.level_comb.resize(40,30)\r\n self.level_comb.move(155,300)\r\n self.level_comb.addItems(['0','1','2','3','4','5','6','7','8','9','10'])\r\n self.level_comb.setCurrentIndex(5)\r\n self.level_comb.currentIndexChanged[str].connect(self.level_change)\r\n\r\n\r\n self.ql_c = QTextEdit(self)\r\n # self.ql_c.setText('还需要手工测试的内容:\\n1.php://input方式(POST方式)\\n2.远程文件包含:http://\\n3.上传结合包含利用的zip:// compress.zlib:// compress.bzip2://')\r\n self.ql_c.move(205, 50)\r\n self.ql_c.resize(300, 350)\r\n\r\n\r\n\r\n btn = QPushButton(self)\r\n btn.setText('go')\r\n btn.resize(100,40)\r\n btn.move(150, 10)\r\n\r\n bt2=QComboBox(self)\r\n bt2.move(250,11)\r\n bt2.resize(250,38)\r\n bt2.AdjustToContentsOnFirstShow\r\n files=os.listdir(os.path.dirname(os.path.realpath(__file__))+'/config/static_dic')\r\n bt2.addItems(files)\r\n bt2.setCurrentIndex(-1)\r\n\r\n self.flush=Worker()\r\n self.flush.sinOut.connect(self.flush_ui)\r\n self.result_button()\r\n\r\n def go():\r\n self.result_button()\r\n self.usernames = ql_b.toPlainText()\r\n self.flush.start()\r\n self.flush.working = False\r\n t=threading.Thread(target=self.thread_get_banner,args=(self.usernames.split('\\n'),))\r\n t.start()\r\n\r\n btn.clicked.connect(go)\r\n bt2.currentIndexChanged[str].connect(self.print_value)\r\n\r\n def print_value(self,str):\r\n self.base_password=str\r\n\r\n def choose(self):\r\n self.run_set.clear()\r\n choice_1 = self.check_1.text() if self.check_1.isChecked() else ''\r\n choice_2 = self.check_2.text() if self.check_2.isChecked() else ''\r\n choice_3 = self.check_3.text() if self.check_3.isChecked() else ''\r\n choice_4 = self.check_4.text() if self.check_4.isChecked() else ''\r\n choice_5 = self.check_5.text() if self.check_5.isChecked() else ''\r\n choice_6 = self.check_6.text() if self.check_6.isChecked() else ''\r\n choice_7 = self.check_7.text() if self.check_7.isChecked() else ''\r\n choice_8 = self.check_8.text() if self.check_8.isChecked() else ''\r\n choice_9 = self.check_9.text() if self.check_9.isChecked() else ''\r\n\r\n self.run_set.add(choice_1)\r\n self.run_set.add(choice_2)\r\n self.run_set.add(choice_3)\r\n self.run_set.add(choice_4)\r\n self.run_set.add(choice_5)\r\n self.run_set.add(choice_6)\r\n self.run_set.add(choice_7)\r\n self.run_set.add(choice_8)\r\n self.run_set.add(choice_9)\r\n\r\n\r\n\r\n def quanxuan(self):\r\n self.check_1.setChecked(True)\r\n self.check_2.setChecked(True)\r\n self.check_3.setChecked(True)\r\n self.check_4.setChecked(True)\r\n self.check_5.setChecked(True)\r\n self.check_6.setChecked(True)\r\n self.check_7.setChecked(True)\r\n self.check_8.setChecked(True)\r\n self.check_9.setChecked(True)\r\n\r\n def fanxuan(self):\r\n self.check_1.setChecked(False)\r\n self.check_2.setChecked(False)\r\n self.check_3.setChecked(False)\r\n self.check_4.setChecked(False)\r\n self.check_5.setChecked(False)\r\n self.check_6.setChecked(False)\r\n self.check_7.setChecked(False)\r\n self.check_8.setChecked(False)\r\n self.check_9.setChecked(False)\r\n\r\n def level_change(self,str):\r\n self.level=str\r\n\r\n def fz(self):\r\n clipboard = QApplication.clipboard()\r\n clipboard.setText(self.result_file)\r\n\r\n def pop_text(self,file):\r\n os.popen(self.tmp+'/'+file)\r\n\r\n def del_bt(self,file):\r\n os.remove(self.tmp+'/'+file)\r\n self.result_button()\r\n\r\n def result_button(self):\r\n files=os.listdir(self.tmp)\r\n\r\n n=0\r\n try:\r\n if self.bt_list:\r\n print(self.bt_list)\r\n for bt in self.bt_list:\r\n print('删除',bt.text())\r\n sip.delete(bt)\r\n self.bt_list.clear()\r\n self.bt_list.clear()\r\n except:\r\n self.bt_list.clear()\r\n\r\n for file in files:\r\n size=(os.path.getsize(self.tmp+'/'+file))/1024/1024\r\n size='%.1fM' % size\r\n exec('btt{}=QPushButton(self.ql_c)'.format(n))\r\n exec('btt{}.setText(\"{} {}\")'.format(n,file,size))\r\n exec('btt{}.resize(260,40)'.format(n))\r\n exec('btt{}.move(0,40*{})'.format(n,n))\r\n exec('btt{}.clicked.connect(partial(self.pop_text,\"{}\"))'.format(n,file))\r\n exec('btt{}.show()'.format(n))\r\n exec('self.bt_list.append(btt{})'.format(n))\r\n\r\n exec('btd{}=QPushButton(self.ql_c)'.format(n))\r\n exec('btd{}.setText(\"{}\")'.format(n,'删除'))\r\n exec('btd{}.resize(40,40)'.format(n))\r\n exec('btd{}.move(260,40*{})'.format(n,n))\r\n exec('btd{}.clicked.connect(partial(self.del_bt,\"{}\"))'.format(n,file))\r\n exec('btd{}.show()'.format(n))\r\n exec('self.bt_list.append(btd{})'.format(n))\r\n\r\n\r\n n += 1\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n window = Window()\r\n window.show()\r\n sys.exit(app.exec_())","repo_name":"StormEyePro/lfi-dic-creater","sub_path":"lfi_gui.py","file_name":"lfi_gui.py","file_ext":"py","file_size_in_byte":11144,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"29"} +{"seq_id":"17800255183","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 27 17:25:42 2022\n\n@author: cuiyiyuan\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport os\nimport glob\n\n#list all csv files only\ncsv_files = glob.glob('*.{}'.format('csv'))\n\ncity_frames = []\n#append all files together\nfor file in csv_files:\n df_temp = pd.read_csv(file)\n city_frames.append(df_temp)\n \ndf = pd.concat(city_frames) \n\n#strip columns\ncol = {t:t.strip().title() for t in df.columns}\ndf.rename(columns = col, inplace=True)\n\ndf.rename(columns={'Averagerent': 'Avg_Rent', 'Minrent': 'Min_Rent', 'Maxrent':'Max_Rent', 'Totalrentals':'Total Rentals'}, inplace=True)\ndf['Zip'] = df.Zip.astype(str).str.zfill(5)\ndf['City_Zip'] = df[['City','Zip']].agg('_'.join, axis=1)\ndf = df[df.Bedrooms<=5]\n\ndf.to_csv('MasterFile.csv', index=False)\n\n\n\n\n\n\n \n\n","repo_name":"YiyuanCui88/MA705-Dashboard","sub_path":"Data_Wrangling/wrangling.py","file_name":"wrangling.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"70794981518","text":"from tkinter import *\r\nfrom tkinter import filedialog, messagebox\r\nfrom PIL import ImageTk\r\nfrom main import *\r\nimport cv2\r\nfrom pathlib import Path\r\n\r\n\r\nroot = Tk()\r\n\r\n\r\ndef wybierz():\r\n filename = filedialog.askopenfilenames(parent=root, initialdir=\"/\", title=\"Obrazki\",\r\n filetypes=[(\"PNG, JPEG, BMP\", \"*.png; *jpg; *jpeg; *.bmp\")])\r\n\r\n try:\r\n clear = 255 * np.ones((300, 300), dtype=np.uint8)\r\n clear = Image.fromarray(clear)\r\n clear = clear.resize((300, 300), Image.ANTIALIAS)\r\n clear = ImageTk.PhotoImage(clear)\r\n clear = Label(image=clear)\r\n clear.image = clear\r\n clear.place(x=320, y=250)\r\n image = Image.open(filename[0])\r\n image = image.resize((300, 300), Image.ANTIALIAS)\r\n render = ImageTk.PhotoImage(image)\r\n img = Label(image=render)\r\n img.image = render\r\n img.place(x=10, y=250)\r\n global file\r\n file = filename[0]\r\n except IndexError:\r\n messagebox.showinfo(\"\", \"Nie wybrano obrazka\")\r\n\r\n\r\ndef wektoryzacja():\r\n try:\r\n zmienna1 = int(e1.get())\r\n zmienna2 = float(e2.get())\r\n image = cv2.imread(file)\r\n nazwa = Path(file).resolve().stem\r\n folder = os.path.dirname(file)\r\n run(image, zmienna1, zmienna2, folder, nazwa)\r\n wynik = Image.open(folder + '/wektoryzacja/' + nazwa + '.png')\r\n image = wynik.resize((300, 300), Image.ANTIALIAS)\r\n render = ImageTk.PhotoImage(image)\r\n img = Label(image=render)\r\n img.image = render\r\n img.place(x=320, y=250)\r\n except NameError:\r\n messagebox.showwarning(\"\", \"Nie wybrano obrazka\")\r\n\r\n\r\nroot.title('')\r\nroot.geometry(\"635x565\")\r\nroot.resizable(width=False, height=False)\r\nroot.iconbitmap('img/v.ico')\r\nroot.config(background=\"black\")\r\n\r\nA = 255 * np.ones((300, 300), dtype=np.uint8)\r\nim = Image.fromarray(A)\r\nim = im.resize((300, 300), Image.ANTIALIAS)\r\nim2 = ImageTk.PhotoImage(im)\r\nim3 = Label(image=im2)\r\nim3.image = im2\r\nim3.place(x=10, y=250)\r\n\r\nim22 = ImageTk.PhotoImage(im)\r\nim4 = Label(image=im2)\r\nim4.image = im2\r\nim4.place(x=320, y=250)\r\n\r\ntytul = Label(root, text=\"Algorytm wektoryzacji\", width=80, height=2, fg=\"blue\",\r\n background=\"white\", font=(\"Ariel\", 10, \"bold\"))\r\nbutton_wybierz = Button(root, text=\"Wybierz obrazek\", border=3, command=wybierz)\r\nbutton_wektoryzacja = Button(root, text=\"Wektoryzacja\", border=3, command=wektoryzacja)\r\nl1 = Label(root, text=\"Rozmycie gaussowskie:\", fg=\"white\", background=\"black\")\r\ne1 = Entry(root, width=6)\r\ne1.insert(END, '3')\r\nl2 = Label(root, text=\"Dolny próg:\", fg=\"white\", background=\"black\")\r\ne2 = Entry(root, width=6)\r\ne2.insert(END, '0.12')\r\ntytul.place(x=0, y=0)\r\nl1.place(x=380, y=80)\r\ne1.place(x=510, y=80)\r\nl2.place(x=380, y=105)\r\ne2.place(x=510, y=105)\r\nbutton_wektoryzacja.place(x=430, y=150)\r\nbutton_wybierz.place(x=115, y=125)\r\nroot.mainloop()\r\n","repo_name":"Vadner44/Wektoryzacja-grafiki-rastrowej","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"40535169397","text":"import tkinter as tk\r\nfrom tkinter import *\r\n\r\nclass main():\r\n def __init__(self):\r\n self.root = Tk()\r\n self.root.title(\"BodyMassIndex\")\r\n self.root.resizable(False, False)\r\n self.root.geometry(\"320x210\")\r\n self.BmiTitle=Label(self.root, font=(\"Courier\",20), text=\"Body Mass Index\")\r\n self.BmiTitle.pack(anchor=\"n\")\r\n self.r=IntVar()\r\n self.manBut=Radiobutton(self.root,text=\"Male\", variable=self.r,value=1,font=(\"Courier\",12)).place(x=60,y=30)\r\n self.WomanBut = Radiobutton(self.root, text=\"Female\", font=(\"Courier\",12), variable=self.r, value=2).place(x=180,y=30)\r\n heightlab = Label(self.root,font=(\"Courier\",12), text=\"Height(cm):\" )\r\n heightlab.place(x=35,y=60)\r\n vcmd = self.root.register(self.correct)\r\n self.heightInp = Entry(self.root,width=12,font=(\"Courier\",12), validate=\"key\",validatecommand=(vcmd,'%P'))\r\n self.heightInp.place(x=150, y=60)\r\n weightlab = Label(self.root,font=(\"Courier\",12), text=\"Weight(kg):\")\r\n weightlab.place(x=35,y=90)\r\n self.weightInp = Entry(self.root,width=12,font=(\"Courier\",12), validate=\"key\",validatecommand=(vcmd,'%P'))\r\n self.weightInp.place(x=150,y=90)\r\n calculateBut = Button(self.root,font=(\"Courier\",12), text=\"Calculate\")\r\n calculateBut.bind(\"