diff --git "a/4846.jsonl" "b/4846.jsonl" new file mode 100644--- /dev/null +++ "b/4846.jsonl" @@ -0,0 +1,1793 @@ +{"seq_id":"23593841171","text":"\"\"\"[Quiz] เรื่องวุ่นๆกับวัยรุ่นจำนวนเฉพาะ\"\"\"\ndef main(num):\n '''main'''\n count = 0\n for i in range(1, num+1):\n if num%i == 0:\n count += 1\n if count == 2:\n print(\"Prime Number\")\n else:\n print(\"Not Prime Number\")\nmain(int(input()))\n","repo_name":"puun555/PrePro65","sub_path":"89 [Quiz] เรื่องวุ่นๆกับวัยรุ่นจำนวนเฉพาะ.py","file_name":"89 [Quiz] เรื่องวุ่นๆกับวัยรุ่นจำนวนเฉพาะ.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"th","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"40380646290","text":"#!/usr/bin/env python3\n\"\"\"Functional Python Programming\n\nChapter 5, Example Set 3\n\"\"\"\n# pylint: disable=wrong-import-position,reimported\n\nimport math\nfrom typing import Callable\nfrom typing import Optional, Any\n\nclass NullAware:\n def __init__(self, some_func: Callable[[Any], Any]) -> None:\n self.some_func = some_func\n def __call__(self, arg: Optional[Any]) -> Optional[Any]:\n return None if arg is None else self.some_func(arg)\n\nnull_log_scale = NullAware(math.log)\nnull_round_4 = NullAware(lambda x: round(x, 4))\n\ntest_NullAware = \"\"\"\n>>> some_data = [ 10, 100, None, 50, 60 ]\n>>> scaled = map( null_log_scale, some_data )\n>>> [null_round_4(v) for v in scaled]\n[2.3026, 4.6052, None, 3.912, 4.0943]\n\"\"\"\n\nfrom typing import Callable, Iterable\nclass Sum_Filter:\n __slots__ = [\"filter\", \"function\"]\n def __init__(self,\n filter_f: Callable[[Any], bool],\n func: Callable[[Any], float]) -> None:\n self.filter = filter_f\n self.function = func\n def __call__(self, iterable: Iterable) -> float:\n return sum(self.function(x) for x in iterable if self.filter(x))\n\ncount_not_none = Sum_Filter(lambda x: x is not None, lambda x: 1)\n\ntest_Sum_Filter = \"\"\"\n>>> some_data = [10, 100, None, 50, 60]\n>>> count_not_none(some_data)\n4\n\"\"\"\n\n\n__test__ = {\n \"test_NullAware\": test_NullAware,\n \"test_Sum_Filter\": test_Sum_Filter,\n}\n\ndef test():\n import doctest\n doctest.testmod(verbose=1)\n\nif __name__ == \"__main__\":\n #performace()\n test()\n","repo_name":"PacktPublishing/Functional-Python-Programming-Second-Edition","sub_path":"Chapter05/ch05_ex3.py","file_name":"ch05_ex3.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"71"} +{"seq_id":"41608268225","text":"from collections import defaultdict, deque\nimport re\n\n\"\"\"\nMy first idea was to do a brute force algorithm, finding all the permutations starting with the lowest amount of\nelements and then filter them ut, it was quite a bad idea as I was getting tons of results and filtering them was\ntaking ages (left for a couple of minutes and was still checking combinations of 3, going up to combinations of\n1000 or more qould have literally taken an era).\nThen I thought about how to apply a graph structure or a tree structure without much success, even though it seemed\nreally similar to a pathfinding algorithm.\nAfter a chat with my gf and some reading online, I got a kind of algorithm (below).\nI needed a quick way to look up songs starting/ending with a letter without doing the search everytime, therefore I\nstored them in the class then I created the song class as it was better to polish the song names and use them to avoid\nlowercase/uppercase mismatches to get the shortest playlist would be probably enough just to get the time from the song\n(so add an attribute to Song class) and then sort the matches and the start and end sets by song duration, but i ran\nout of time already half an hour ago as it took me a while to get this done so no implementations for that\n\n**************************************************UPDATE***************************************************************\nmoved the complexity of the algorithm on the creation of the graph, therefore I am not going over the recursion limit\n(as I was doing with the previous approach), used a graph structure, djikstra is weighted with the duration of each\nsong\nthe lookup disctionary is just used to retrieve the song object on the external call\n\"\"\"\n\n\nclass Graph:\n def __init__(self):\n self.nodes = set()\n self.edges = defaultdict(list)\n self.distances = {}\n\n def add_node(self, value):\n self.nodes.add(value)\n\n def add_edge(self, from_node, to_node, distance):\n self.edges[from_node].append(to_node)\n self.edges[to_node].append(from_node)\n self.distances[(from_node, to_node)] = distance\n\n\nclass Song:\n def __init__(self, id, name, duration):\n self.id = id\n self.duration = duration\n self.name = name\n self.clean_name = re.sub('[\\(\\)\\.,;:\"#\\\"\\'=-]', '', name).strip().rstrip().lower()\n\n @property\n def first_letter(self):\n return self.clean_name[0]\n\n @property\n def last_letter(self):\n return self.clean_name[-1]\n\n def __repr__(self):\n return 'Song #{} - {}'.format(self.id, self.name)\n\n\nclass Playlist:\n def __init__(self, songs):\n self.songs = [Song(s['id'], s['song'], s['duration']) for s in songs]\n self.lookup = {}\n self.graph = Graph()\n self._graph()\n\n def _graph(self):\n for song in self.songs:\n self.lookup[song.name] = song\n self.graph.add_node(song)\n for s in self.songs:\n if s.id == song.id:\n next\n if song.last_letter == s.first_letter:\n self.graph.add_edge(song, s, int(s.duration))\n\n def dijkstra(self, initial):\n visited = {initial: int(initial.duration)}\n path = {}\n\n nodes = set(self.graph.nodes)\n\n while nodes:\n min_node = None\n for node in nodes:\n if node in visited:\n if min_node is None:\n min_node = node\n elif visited[node] < visited[min_node]:\n min_node = node\n if min_node is None:\n break\n\n nodes.remove(min_node)\n current_weight = visited[min_node]\n\n for edge in self.graph.edges[min_node]:\n try:\n weight = current_weight + self.graph.distances[(min_node, edge)]\n except:\n continue\n if edge not in visited or weight < visited[edge]:\n visited[edge] = weight\n path[edge] = min_node\n\n return visited, path\n\n def shortest_path(self, origin, destination):\n visited, paths = self.dijkstra(origin)\n full_path = deque()\n _destination = paths[destination]\n\n while _destination != origin:\n full_path.appendleft(_destination)\n _destination = paths[_destination]\n\n full_path.appendleft(origin)\n full_path.append(destination)\n\n return visited[destination], list(full_path)\n\n def find_any_graph(self, song_a, song_b):\n # song_a and song_b are song names\n start = self.lookup[song_a] if song_a in self.lookup else None\n end = self.lookup[song_b] if song_b in self.lookup else None\n if not (start and end):\n return None\n time, songs = self.shortest_path(start, end)\n return time, [s.name for s in songs]\n","repo_name":"shipperizer/crowdscores","sub_path":"003-chaining-songs/playlist.py","file_name":"playlist.py","file_ext":"py","file_size_in_byte":4920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"11448198051","text":"from django.forms import ModelForm, Textarea\nfrom django import forms\nfrom django.contrib.auth.forms import UserCreationForm\n\n\nfrom .models import User, GroupRequest\n\n\nclass AccountForm(ModelForm):\n \"\"\" Form for displaying and editing a User's account information.\n This is a ModelForm based off the User model in birdspotter.accounts.models\n \"\"\"\n class Meta:\n model = User\n fields = ['username', 'email', 'first_name', 'last_name']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for _, field in self.fields.items():\n if field.widget.attrs.get('class'):\n field.widget.attrs['class'] += ' form-control'\n else:\n field.widget.attrs['class'] = 'form-control'\n\nclass RegisterForm(UserCreationForm):\n \"\"\" Registration form for a user to register for an account in the application\n\n Attributes:\n email (str): the user's desired email address\n first_name (str): the user's first name\n last_name (str): the user's last name\n \"\"\"\n class Meta:\n model = User\n fields = ['username', 'email', 'password1', 'password2']\n \n def clean_username(self):\n username = self.cleaned_data['username']\n try:\n User.objects.get(username=username)\n except User.DoesNotExist:\n return username\n raise forms.ValidationError(f\"Username {username} is already in use.\")\n\n email = forms.EmailField()\n first_name = forms.CharField()\n last_name = forms.CharField()\n\n\nclass GroupRequestForm(forms.ModelForm):\n \"\"\" Form for a user to request permissions within the system\n This form displays a choice field where a user can select the desirted permission level\n and provide any information that would like to add to the request.\n \"\"\"\n class Meta:\n model = GroupRequest\n fields = ('group', 'notes')\n widgets = {\n 'notes': Textarea(attrs={'cols': 80, 'rows': 10}),\n }\n labels = {\n 'notes': 'More details pertaining to the request'\n }\n\n\nclass ContactForm(forms.Form):\n \"\"\"\n Form for a user to send an email to the admin team for issues related to\n their account or for issues with the implementation of the application\n \"\"\"\n email = forms.EmailField()\n subject = forms.CharField(max_length=32)\n message = forms.CharField(max_length=2000, \n widget=Textarea(attrs={'cols': 80, 'rows': 10}))","repo_name":"birdspotter-project/COS397","sub_path":"birdspotter/birdspotter/accounts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"11238625438","text":"#!/usr/bin/env python3\n\nfrom setuptools import setup\n\nwith open('README.md') as f:\n long_description = f.read()\n\nsetup(\n name='telemetrix-esp32',\n packages=['telemetrix_aio_esp32', 'telemetrix_esp32_common', 'telemetrix_esp32'],\n install_requires=['adafruit-blinka-bleio',\n 'adafruit-circuitpython-ble', 'bleak>=0.14.2'],\n\n version='1.3',\n\n description=\"Remotely Control And Monitor ESP32 Devices\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n\n author='Alan Yorinks',\n author_email='MisterYsLab@gmail.com',\n url='https://github.com/MrYsLab/telemetrix-esp32',\n download_url='https://github.com/MrYsLab/telemetrix-esp32',\n keywords=['telemetrix', 'ESP32', 'Protocol', 'Python'],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ],\n)\n","repo_name":"MrYsLab/telemetrix-esp32","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"38421153626","text":"\nimport numpy as np\nimport pandas as pd\nimport scipy\nfrom scipy.optimize import linprog\n\n\n\n# Read in the data\ncounty_education = pd.read_excel('data/Education.xls', skiprows=(0, 1, 2, 3), usecols=(0, 5, 6, 43, 44, 45, 46))\n\n# Column 0 is the FIPS code, 19 is the most recent population estimate\npopulation_estimation = pd.read_excel('data/PopulationEstimates.xls', skiprows=(0, 1), usecols=(0, 19))\n# Rename column so that the merge works correctly\npopulation_estimation = population_estimation.rename({'FIPStxt': 'FIPS Code'}, axis='columns')\n\n# Column 0 is the FIPS code\n# 85 is the most recent unemployment rate, 86 is the percent of median income compared to average\nunemployment = pd.read_excel('data/Unemployment.xls', skiprows=(0, 1, 2, 3), usecols=(0, 85, 87))\n# Rename column so that the merge works correctly\nunemployment = unemployment.rename({'fips_txt': 'FIPS Code'}, axis='columns')\n\n# Column 0 is the FIPS code\n# 10 is percent of people in poverty\npoverty = pd.read_excel('data/PovertyEstimates.xls', skiprows=(0, 1, 2, 3), usecols=(0, 10))\n# Rename column so that the merge works correctly\npoverty = poverty.rename({'FIPStxt': 'FIPS Code'}, axis='columns')\n\n# Get mask use data\nmask_use = pd.read_csv('data/mask-use-by-county.csv')\n# Rename column so that the merge works correctly\nmask_use = mask_use.rename({'FIPS': 'FIPS Code'}, axis='columns')\n\n# Get cases and death information\ncase_info = pd.read_csv('data/us-counties-covid-death-July.csv')\n# Rename column so that the merge works correctly\ncase_info = case_info.rename({'fips': 'FIPS Code'}, axis='columns')\n\n# Now sum to get the total number of cases and deaths for each county\ncase_info_totals = case_info.groupby(['FIPS Code']).sum()\n\n# Merge all data with mask use data by the County FIPS code\ndata = mask_use.merge(county_education, on='FIPS Code', how='inner')\ndata = data.merge(population_estimation, on='FIPS Code', how='inner')\ndata = data.merge(unemployment, on='FIPS Code', how='inner')\ndata = data.merge(case_info_totals, on='FIPS Code', how='inner')\ndata = data.drop('FIPS Code', axis=1)\n\n# Turn Data into a numpy array\n\ndata_array = data.to_numpy()\n\n\n\n#Create an empty list to hold the Data Envelopment Scores.\n\ndea_scores = []\n\n\n\n\n# Class that takes in a data set and has a method\n# that returns the efficiency score of a given row.\nclass DEA:\n def __init__(self,data):\n self.data = data\n\n def get_coef_matrix(self):\n matrix = []\n for i in range(self.data.shape[0]):\n new_row = []\n for j in range(self.data.shape[1]-2):\n x = -self.data[i,j]\n new_row.append(x)\n new_row.append(self.data[i,14])\n new_row.append(self.data[i,15])\n matrix.append(new_row)\n new_matrix = np.array(matrix)\n return new_matrix\n\n\n\n def compute_score(self,row):\n c_vec = []\n for i in range(self.data.shape[1]-2):\n c_vec.append(self.data[row,i])\n c_vec.append(-self.data[row,14])\n c_vec.append(-self.data[row,15])\n c_vec = np.array(c_vec)\n\n\n\n A_matrix = self.get_coef_matrix()\n\n\n\n v_matrix = []\n for i in range(self.data.shape[1]-2):\n v_matrix.append(self.data[row,i])\n v_matrix.append(0)\n v_matrix.append(0)\n v_matrix = np.array(v_matrix)\n v_matrix =v_matrix.reshape((1,16))\n\n\n\n zero_vec = [0 for i in range(self.data.shape[0])]\n zero_vec = np.array(zero_vec)\n\n one_vec = [1]\n one_vec = np.array(one_vec)\n\n res = scipy.optimize.linprog(c_vec, A_ub = A_matrix, b_ub = zero_vec, A_eq = v_matrix, b_eq = one_vec )\n weights = res.x\n\n a_1 = self.data[row,-1]\n a_2 = self.data[row,-2]\n u_1 = weights[-1]\n u_2 = weights[-2]\n\n score = a_1*u_1 + a_2*u_2\n\n return score\n\n\n#create an instance of the DEA class using the data set we have above\n\ndea_instance = DEA(data_array)\n\n\n# Loop through the data to get a score for each row.\n\nfor i in range(data.shape[0]):\n score = dea_instance.compute_score(i)\n dea_scores.append(score)\n if i%10 == 0:\n print(score)\n\ndea_scores = np.array(dea_scores)\ndea_scores = dea_scores.transpose()\n\nnp.savetxt('dea_scores.csv',dea_scores)\n\n\n\n\n\n\n\n\n","repo_name":"BigRoss707/CS_6140_Final_Project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"814310628","text":"import turtle\n# Color and size\n# turtle.pencolor((0.5,0,1))\n# turtle.pensize(5)\nturtle.speed(0)\n# the power of for loops\nfor n in range(1,320):\n turtle.left(15)\n turtle.forward(n/5)\n\ninput()\n","repo_name":"cp-helsinge/eksempler","sub_path":"l3_2_turtle.py","file_name":"l3_2_turtle.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"12636475962","text":"#!/usr/bin/python\n__author__ = \"Pavel Polyakov\"\n__copyright__ = \"Copyright (C) 2016 Pavel Polyakov\"\n__version__ = \"0.6\"\n\nimport sys\nfrom models import connect_db, BlackPort, Port\nfrom mailer import send_mail\nfrom config import MAIL_TO\nfrom functions import for_html_trap_list, for_html_title\n\nif __name__ == \"__main__\":\n s,e = connect_db()\n blackports = s.query(BlackPort).filter(BlackPort.added == 'auto').all()\n ports_raw = [s.query(Port).\\\n filter(Port.host == x.host).\\\n filter(Port.ifIndex == x.ifIndex).\\\n order_by(Port.id.desc()).first() for x in blackports]\n\n if len(ports_raw) > 0:\n hosts = set([x.host for x in ports_raw])\n for host in hosts:\n ports = [x for x in ports_raw if x.host == host]\n whitelist = [x for x in ports if not x.is_flapping_now(s)]\n for p in whitelist:\n p.unblock(s)\n p.additional = 'Stop Flapping'\n for p in [x for x in ports if x not in whitelist]:\n p.additional = 'Still Flapping'\n for p in ports:\n cir = p.getcircuit(s)\n for c in cir:\n c.del_from_queue(s)\n text_main = for_html_trap_list(ports,s)\n text_title = for_html_title(ports,s)\n send_mail(text_title, MAIL_TO, text_main)\n s.close()\n","repo_name":"Pavel-Polyakov/trapharvester","sub_path":"blacklist_cleaner.py","file_name":"blacklist_cleaner.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"71"} +{"seq_id":"4343329792","text":"def remove(G):\n n = len(G)\n visited = [False]*n\n parents = [None]*n\n entry = [0]*n #czas wejscia\n process = [0]*n #czas przetworzenia\n time = 0\n\n def dfs_visit(u):\n nonlocal G, visited,time\n time += 1\n visited[u] = True\n entry[u] = time\n for i in range(n): #jesli jest implementacja macierzowa trzeba zmienic petle\n if not visited[i] and G[u][i]:\n parents[i] = u\n dfs_visit(i)\n time += 1\n process[u] = time\n\n for v in range(n):\n if not visited[v]:\n dfs_visit(v)\n\n for i in range(n): #krotka z indeksem i czasem przetworzenia\n process[i] = (process[i],i)\n\n process = sorted(process, key = lambda x: x[0]) #sortuej po czasie przetowrzenia, prawdopodobnie nie jest to potrzebne, wystarczy wracac od najmniejszego czasu przetworzenia po parentach\n\n for i in range(n):\n process[i] = process[i][1]\n\n for i in range(n): #usuwam krawedzie z wierzchołków od najmniejszego czasu przetworznia\n for j in range(n):\n G[i][j] = 0\n\n return process\n\n#G = [[0,1,1,0],[1,1,0,0],[1,1,0,1],[0,0,1,0]]\n#G = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 1, 0, 0], [1, 0, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1], [1, 0, 1, 1, 0, 1], [0, 0, 0, 1, 1, 0]]\n#G = [[0, 1, 0, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1, 1, 0], [0, 1, 0, 1, 0, 1, 1, 0], [0, 0, 1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 1, 0, 0], [0, 1, 1, 0, 1, 0, 1, 0], [0, 1, 1, 0, 0, 1, 0, 1], [0, 0, 0, 1, 0, 0, 1, 0]]\nprint(remove(G))","repo_name":"maati01/ASD","sub_path":"ćwiczenia/cwiczenia08/zad1ob.py","file_name":"zad1ob.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"34645466846","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('setup', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Class',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('standard', models.CharField(unique=True, max_length=20)),\n ('sequence', models.SmallIntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='ClassTeacher',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ],\n ),\n migrations.CreateModel(\n name='ClassTest',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('date_conducted', models.DateField()),\n ('max_marks', models.DecimalField(max_digits=6, decimal_places=2)),\n ('passing_marks', models.DecimalField(max_digits=6, decimal_places=2)),\n ('grade_based', models.BooleanField()),\n ('is_completed', models.BooleanField(default=False)),\n ('test_type', models.CharField(default=b'Terminal', max_length=30)),\n ],\n ),\n migrations.CreateModel(\n name='Exam',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=100)),\n ('start_date', models.DateField()),\n ('end_date', models.DateField()),\n ('start_class', models.CharField(max_length=20, null=True)),\n ('start_class_sequence', models.SmallIntegerField(null=True)),\n ('end_class', models.CharField(max_length=20, null=True)),\n ('end_class_sequence', models.SmallIntegerField(null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Section',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('section', models.CharField(unique=True, max_length=5)),\n ],\n ),\n migrations.CreateModel(\n name='Subject',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('subject_name', models.CharField(max_length=40)),\n ('subject_code', models.CharField(unique=True, max_length=10)),\n ('subject_sequence', models.SmallIntegerField(null=True)),\n ],\n ),\n migrations.CreateModel(\n name='TeacherSubjects',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ],\n ),\n migrations.CreateModel(\n name='TestResults',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('roll_no', models.IntegerField(null=True)),\n ('marks_obtained', models.DecimalField(null=True, max_digits=6, decimal_places=2)),\n ('grade', models.CharField(max_length=15, null=True)),\n ('class_test', models.ForeignKey(to='academics.ClassTest')),\n ('school', models.ForeignKey(to='setup.School')),\n ],\n ),\n migrations.CreateModel(\n name='WorkingDays',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('year', models.SmallIntegerField()),\n ('month', models.SmallIntegerField()),\n ('working_days', models.SmallIntegerField()),\n ('school', models.ForeignKey(to='setup.School')),\n ],\n ),\n ]\n","repo_name":"atulmala/classup2","sub_path":"academics/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"7728756478","text":"import os #libreria para amanejar archivos\r\n\r\nCARPETA = 'contactos/' #carpeta de contactos\r\nEXTENSION = '.txt' #extension\r\n\r\nclass Contacto:\r\n def __init__(self, nombre, telefono, categoria):\r\n self.nombre = nombre\r\n self.telefono = telefono\r\n self.categoria = categoria\r\n# funcion principal\r\ndef app():\r\n mostrar_menu() #muestra el menu de opciones)\r\n crear_directorio() #revisa si la carpeta existe o no\r\n \r\n # preguntar\r\n preguntar = True\r\n while preguntar:\r\n opcion = input('Seleccione una opcion: \\r\\n')\r\n opcion = int(opcion)\r\n # ejecutar opciones\r\n if opcion == 1:\r\n agregar_contacto()\r\n preguntar = False #para que o siga preguntando\r\n elif opcion == 2:\r\n editar_contacto()\r\n preguntar = False\r\n elif opcion == 3:\r\n mostrar_contactos()\r\n preguntar = False\r\n elif opcion == 4:\r\n buscar_contacto()\r\n preguntar = False\r\n elif opcion == 5:\r\n eliminar_contacto()\r\n preguntar = False\r\n else:\r\n print('Opcion no valida, intente de nuevo')\r\n\r\ndef agregar_contacto():\r\n print('Escribe los datos del nuevo Contacto')\r\n nombre_contacto = input('Nombre:\\r\\n')\r\n\r\n # validar nombre (isfile revisa si un archivo ya existe)\r\n existe = existe_contacto(nombre_contacto)\r\n if not existe:\r\n \r\n with open(CARPETA + nombre_contacto + EXTENSION, 'w') as archivo: #crea el archivo contactos/'nombre_contacto'\r\n #campos\r\n telefono_contacto = input('Numero de Telefono: \\r\\n')\r\n categoria_contacto = input('Categoria del Contacto: \\r\\n')\r\n\r\n # intstaciamos la clase\r\n contacto = Contacto(nombre_contacto, telefono_contacto, categoria_contacto)\r\n\r\n # escribir el archivo\r\n archivo.write('Nombre: ' + nombre_contacto + '\\r\\n')\r\n archivo.write('Telefono: ' + telefono_contacto + '\\r\\n')\r\n archivo.write('Categoria: ' + categoria_contacto + '\\r\\n')\r\n\r\n # mensaje de exito\r\n print('\\r\\n Contacto creado Correctamente \\r\\n')\r\n else:\r\n print('Ese Contacto ya existe')\r\n # reiniciar la app\r\n app()\r\n\r\ndef editar_contacto():\r\n print('Escribe el nombre del contacto a editar')\r\n nombre_anterior = input('Nombre del contacto que desea editar: \\r\\n')\r\n # validar nombre \r\n existe = existe_contacto(nombre_anterior)\r\n \r\n if existe:\r\n # abrimos el archivo\r\n with open(CARPETA + nombre_anterior + EXTENSION, 'w') as archivo:\r\n # editamos los campos\r\n nombre_contacto = input('Nuevo Nombre del Contacto: \\r\\n')\r\n telefono_contacto = input('Nuevo Numero de Telefono: \\r\\n')\r\n categoria_contacto = input('Nueva Categoria de Contacto: \\r\\n')\r\n\r\n # instanciar\r\n contacto = Contacto(nombre_contacto, telefono_contacto, categoria_contacto)\r\n # escribimos en el archivo\r\n archivo.write('Nombre: ' + contacto.nombre + '\\r\\n')\r\n archivo.write('Telefono: ' + contacto.telefono + '\\r\\n')\r\n archivo.write('Categoria: ' + contacto.categoria + '\\r\\n')\r\n\r\n # cambiamos el nombre del archivo (fuera del with porque debe estar cerrado el archivo)\r\n os.rename(CARPETA + nombre_anterior + EXTENSION, CARPETA + nombre_contacto + EXTENSION)\r\n print('Contacto Editado con Exito')\r\n else:\r\n print('Contacto Inexistente')\r\n app()\r\n\r\ndef mostrar_contactos():\r\n # lisdtdir lista los archivos de un ditrectorio\r\n archivos = os.listdir(CARPETA)\r\n # recorremos solo que termina en .txt\r\n archivos_txt = [i for i in archivos if i.endswith(EXTENSION)]\r\n\r\n for archivo in archivos_txt:\r\n with open(CARPETA + archivo) as contacto:\r\n for linea in contacto:\r\n # imprime los contenidos\r\n print(linea.rstrip())\r\n # imprime un separador\r\n print('\\r\\n')\r\n\r\ndef buscar_contacto():\r\n nombre = input('Que Contacto desea Buscar?: \\r\\n')\r\n # no usar mucho el rty porque consume mucha memoria y demas\r\n try: #trata de abrir el archivo:\r\n with open(CARPETA + nombre + EXTENSION) as contacto:\r\n print('\\r\\n Informacion del contacto: \\r\\n')\r\n for linea in contacto:\r\n print(linea.rstrip())\r\n print('\\r\\n')\r\n except IOError: #si ocurre un error o no existe\r\n print('El archivo no existe')\r\n print(IOError)\r\n \r\n # reiniciar la app\r\n app()\r\n\r\ndef eliminar_contacto():\r\n nombre = input('Que contacto desea Eliminar?: \\r\\n')\r\n\r\n try:\r\n os.remove(CARPETA + nombre + EXTENSION)\r\n print('Contacto eliminado exitosamente')\r\n except IOError: #si ocurre un error o no existe\r\n print('El Contacto no existe')\r\n print(IOError)\r\n\r\ndef existe_contacto(nombre):\r\n # (isfile revisa si un archivo ya existe)\r\n return os.path.isfile(CARPETA + nombre + EXTENSION)\r\n\r\ndef mostrar_menu():\r\n print('Seleccione del Menu lo que desea hacer:')\r\n print('1) Agregar Nuevo Contacto')\r\n print('2) Editar Contacto')\r\n print('3) Ver Contacto')\r\n print('4) Buscar Contacto')\r\n print('5) Eliminar Contacto')\r\n\r\ndef crear_directorio(): \r\n if not os.path.exists(CARPETA): #si esta carpeta no existe existe...\r\n os.makedirs(CARPETA) #crea la carpeta\r\n\r\napp()","repo_name":"adriel-i/Agenda-de-Contactos","sub_path":"agenda.py","file_name":"agenda.py","file_ext":"py","file_size_in_byte":5453,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"22732648522","text":"# MARTA ENRICH GARCIA\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport soundfile as sf\nimport sounddevice as sd\nfrom numpy.fft import fft\nimport os\n\n\nT= 2.5 # Durada de T segons\nfm=8000 # Freqüència de mostratge en Hz\nfx=440 # Freqüència de la sinusoide\nA=4 # Amplitud de la sinusoide\npi=np.pi # Valor del número pi\nL = int(fm * T) # Nombre de mostres del senyal digital\nTm=1/fm # Període de mostratge\nt=Tm*np.arange(L) # Vector amb els valors de la variable temporal, de 0 a T\nx = A * np.cos(2 * pi * fx * t) # Senyal sinusoidal\nsf.write('meg_part3.wav', x, fm) # Escriptura del senyal a un fitxer en format wav\nTx=1/fx \nLs=int(fm*Tx*5) \n\nplt.figure(0) \nplt.plot(t[0:Ls], x[0:Ls]) \nplt.xlabel('t en segons') \nplt.title('5 períodes') \nplt.show() \nsd.play(x,fm) \n\n\n#FFT\nN=5000 \nX=fft(x[0:Ls],N) \nk=np.arange(N) \nX_dB = 20*np.log10(np.abs(X)/max(np.abs(X)))\nfk = k[0:N//2+1]*fm/N #Calcul de la fk, pels valors de l'eix d'abscisses\nplt.figure(1)\nplt.subplot(211) \nplt.plot(fk,X_dB[0:N//2+1]) # Representació del mòdul de la transformada en dB y de 0 a FK/2\nplt.title(f'Transformada del senyal de Ls={Ls} mostres amb DFT de N={N}') \nplt.ylabel('Mòdul en dB') \nplt.subplot(212) \nplt.plot(fk,np.unwrap(np.angle(X[0:N//2+1])) ) \nplt.xlabel('f en Hz') \nplt.ylabel('$\\phi_x[k]$') \nplt.show() ","repo_name":"m4rt401/T1-APA","sub_path":"PART 3/meg_codi_part3.py","file_name":"meg_codi_part3.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"ca","doc_type":"code","dataset":"github-code","pt":"71"} +{"seq_id":"9726697485","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom .views import MapView\n\nurlpatterns = [\n path('', MapView.as_view()),\n path('admin/', admin.site.urls),\n path('api/v1/', include('dispatch.urls')),\n path('api/v1/auth/', include('rest_auth.urls')),\n path('api/v1/auth/registration/', include('rest_auth.registration.urls'))\n]\n","repo_name":"ochui/dispatch","sub_path":"backend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"22308462511","text":"'''\n17-08-03 - Rule based classifier\n'''\n\n\nimport itertools, random\n\nimport numpy as np\nfrom sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin\nfrom sklearn.utils.validation import check_X_y, check_array, check_is_fitted\nfrom sklearn.utils.multiclass import unique_labels\nfrom sklearn.metrics import precision_score, recall_score, f1_score\n\nfrom common import *\n\nclass RulesBasedClassifier(BaseEstimator, ClassifierMixin):\n\n def __init__(self, rules=None, thresh_cnt=1):\n # takes a list of regexes or function, threshold\n self._thresh_cnt = thresh_cnt\n if isinstance(rules, str) and os.path.exists(rules): rules = self._rules_from_file(rules)\n self._rules = rules if rules else self._default_rules()\n return\n\n def __str__(self):\n return 'RulesBasedClassifier(rules=%s, thresh_cnt=%d)' % (str(self._rules), self._thresh_cnt)\n\n def _default_rules(self):\n rules = ['intoxicat\\w+\\b', 'banana bag', '(alcohol|etoh).{1,20}(dependence|withdrawal|abuse)', 'drink.{1,45}(every|per|each|/).*(day|night|daily)', 'drunk', 'alcoholic', 'heavy (etoh|alcohol)', 'CRITICAL_BAC']\n return rules\n\n def _rules_from_file(self, path):\n # get from text or json file\n rules = None\n\n if path.endswith('.json'):\n content = loadJson(path)\n\n if isinstance(content, list):\n rules = content\n\n if path.endswith('.txt'):\n rules = loadText(path).split('\\n')\n return rules\n\n def fit(self, X, y):\n # generate rule and sample stats\n self._X = X\n self._y = y\n re_stats = {}\n self._samp_stats = []\n\n for idx in range(len(X)):\n samp = X[idx]\n samp = '\\n'.join(list(set(samp.split('\\n')))) # remove dups\n samp = samp.replace('\\n', ' ')\n s_stat = [samp]\n\n for pat_idx in range(len(self._rules)):\n pat = self._rules[pat_idx]\n if not pat in re_stats: re_stats[pat] = [1] * 4 # rfp, sfp, rfn, sfn\n match_ctr = len(re.findall(pat, samp))\n if match_ctr > 0:\n s_stat.append([pat, match_ctr])\n if y[idx] == 1: re_stats[pat][0] += match_ctr; re_stats[pat][1] += 1\n if y[idx] == 0: re_stats[pat][2] += match_ctr; re_stats[pat][3] += 1\n self._samp_stats.append(s_stat)\n self._re_stats = re_stats\n return self\n\n def predict(self, X):\n preds = []\n try:\n if not isinstance(self._rules, list): return self._rules()\n\n except Exception as e:\n print(repr(e))\n pdb.post_mortem()\n\n for samp in X:\n result = [1] * 4\n\n for pat in self._re_stats:\n match_ctr = len(re.findall(pat, samp))\n if match_ctr > 0:\n stats = self._re_stats[pat]\n result[0] += stats[0] / stats[1] # rf_pos/sf_pos\n result[1] += stats[2] / stats[3] # rf_neg/sf_neg\n result[2] = result[0] + result[1]\n result[3] = result[0] - result[1]\n #pdb.set_trace()\n\n if result[0] / result[1] > 1:\n preds.append(1)\n\n else:\n preds.append(0)\n return preds\n\n def predict_1(self, X):\n preds = []\n try:\n if not isinstance(self._rules, list): return self._rules()\n\n except Exception as e:\n print(repr(e))\n pdb.post_mortem()\n\n for samp in X:\n match_ctr = 0\n\n for pat in self._rules:\n if re.search(r'%s' % pat, samp): match_ctr += 1\n if match_ctr >= self._thresh_cnt:\n preds.append(1)\n\n else:\n preds.append(0)\n return preds\n\nclass OptimizedRulesSeeker(BaseEstimator, ClassifierMixin):\n\n def __init__(self, search=1, rand_state=0, optimum='f1'):\n self._search = search # %age of samples to get rules from\n self._rand_state = rand_state # seed to randomize search\n self._optimum = optimum # score to optimize for\n\n def __str__(self):\n return 'OptimizedRulesSeeker(search=%d, rand_state=%d, optimum=%s)' % (self._search, self._rand_state, self._optimum)\n\n def fit(self, X, y):\n try:\n self._X = X\n self._y = y\n regexes = self._generate_regexes()\n self._clf = RulesBasedClassifier(regexes).fit(X, y)\n\n except Exception as e:\n print(repr(e))\n pdb.post_mortem()\n return self\n\n def _generate_regexes(self):\n word_tok = r'((\\b[a-zA-Z0-9_/\\-&]+\\b)|(\\b\\(d*,?d+)+(\\.?\\d+)?%?\\w*\\*?\\b))'\n word_sep = re.compile(r'[\\s;.]')\n sent_sep = re.compile(r\"(? last_diff: continue\n last_diff = curr_diff\n last_class = self._class_prints[bs]\n if type(last_class) == type(None): pdb.set_trace()\n return last_class\n\n def count_set_bits(self, val):\n return bin(val).count('1')\n\nclass BitKNNClassifier(BaseEstimator, ClassifierMixin):\n\n def __init__(self, n_neighbors=5, rand_state=None, samp_size=100, max_diff=None, min_same=None):\n self._n_nei = n_neighbors\n self._rand_state = rand_state\n self._classes = {}\n self._samp_size = samp_size\n self._max_diff = max_diff\n self._min_same = min_same\n\n def fit(self, X, y):\n\n for doc, lbl in zip(X, y):\n lbl = str(lbl)\n if not lbl in self._classes: self._classes[lbl] = []\n self._classes[lbl].append(doc)\n samps = {}\n if isinstance(self._rand_state, int): random.seed(self._rand_state)\n\n for lbl in self._classes:\n samps[lbl] = random.sample(self._classes[lbl], self._samp_size)\n self._samps = samps\n return self\n\n def predict(self, X):\n preds = []\n\n for doc in X:\n pred = self.do_knn(doc)\n preds.append(int(pred))\n return preds\n\n def do_knn(self, doc):\n # bit diff KNN\n scores = {}\n totals = {}\n max_nearest = None\n\n for lbl in self._samps:\n # get all diffs together\n if not lbl in scores:\n scores[lbl] = []\n totals[lbl] = 0\n\n for samp in self._samps[lbl]:\n diff = bin(doc & samp).count('1') # WARN: breaks DRY\n scores[lbl].append(diff)\n scores[lbl].sort()\n\n for idx in range(self._n_nei):\n # find class with most smallest diffs\n curr_max = None\n curr_score = None\n\n for lbl in scores:\n # find current smallest diff\n if curr_max == None or scores[lbl][idx] < curr_score: curr_max = lbl; curr_score = scores[lbl][idx]\n totals[curr_max] += 1\n best_score = None\n best_class = None\n\n for lbl in totals:\n # get the class with the most nearest\n if not best_class or best_score < totals[lbl]: best_class = lbl; best_score = totals[lbl]\n return best_class\n\nDEBUG = True\n\nif __name__ == '__main__':\n print('This module contains importable classifiers')\n#pdb.set_trace()\ncommit_me(dataDir + 'tracking.json', 'custom_clfs.py')\n","repo_name":"skeledrew/medical-nlp-research","sub_path":"custom_clfs.py","file_name":"custom_clfs.py","file_ext":"py","file_size_in_byte":16258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"37003050190","text":"import os\n\nfrom airflow.sensors.base_sensor_operator import BaseSensorOperator\nfrom airflow.utils.decorators import apply_defaults\n\n\nNAM_BASE_DIR = '/data/weatherdata/nam'\n\n\nclass WeatherFileSensor(BaseSensorOperator):\n \"\"\"\n This checks for the existence of source weather files. It continually\n runs poke() at the poke_interval set in the DAG until it returns True,\n at which point the dependent tasks can continue processing.\n \"\"\"\n\n template_fields = ('file_path', )\n\n @apply_defaults\n def __init__(self, file_path, *args, **kwargs):\n super(WeatherFileSensor, self).__init__(*args, **kwargs)\n self.file_path = file_path\n\n def poke(self, context):\n\n print('Looking for file_path: ' + self.file_path)\n\n return os.path.exists(self.file_path)\n","repo_name":"Saildrone/Airflow-DAGs","sub_path":"plugins/weathermaker/sensors/weather_file_sensor.py","file_name":"weather_file_sensor.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"42853337509","text":"#!/usr/bin/python3\n\nimport common\n\nHELP_INFO = [\n \"Script is solving task 1 for day 1 of advent of code 2021\",\n \"Arguments:\",\n common.TAB + \"input file\"\n]\narguments_keywords = [\"inputFile\"]\n\nscript_arguments = common.parse_arguments(arguments_keywords, HELP_INFO)\nif script_arguments is None:\n exit(1)\n\ninputFileName = script_arguments[\"inputFile\"]\nprint(\"solving file: \" + inputFileName)\ninputLines = common.read_lines_from_file(inputFileName)\n\ncomputedEntries = 0\nincreasing = 0\nfor i in range(1, len(inputLines)):\n currentLine = inputLines[i]\n previousLine = inputLines[i-1]\n if int(currentLine) > int(previousLine):\n increasing += 1\n computedEntries += 1\n\nprint(\"computed entries: \" + str(computedEntries))\nprint(\"result: \" + str(increasing))\n","repo_name":"s17kf/aoc-2021-python","sub_path":"day1/solve1.1.py","file_name":"solve1.1.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"33605405252","text":"import json\nimport logging\nimport os\nimport pprint\nimport tempfile\nimport time\nfrom typing import Dict\nfrom typing import List\n\nimport pytest\n\nimport apis.db.exp_packages\nimport apis.models.common\nimport apis.models.errors\nimport apis.models.virtual_experiment\n\nlog = logging.getLogger('tdb')\n\n\ndef test_insert(ve_sum_numbers: apis.models.virtual_experiment.ParameterisedPackage):\n with tempfile.NamedTemporaryFile(suffix=\".json\", prefix=\"experiments\", delete=True) as f:\n with apis.db.exp_packages.DatabaseExperiments(f.name) as db:\n ve_sum_numbers.metadata.package.tags = ['latest']\n db.push_new_entry(ve_sum_numbers)\n\n old_digest = ve_sum_numbers.metadata.registry.digest\n # VV: Simulate changing the source location to something new\n base = ve_sum_numbers.base.packages[0]\n\n print(\"Base dict:\")\n print(base.dict())\n source = base.source\n\n assert source.git is not None\n assert source.dataset is None\n\n source.git.location = apis.models.virtual_experiment.SourceGitLocation(\n url='new-url',\n branch='new-branch',\n tag=None,\n commit=None)\n\n ve_sum_numbers.update_digest()\n new_digest = ve_sum_numbers.metadata.registry.digest\n\n assert new_digest != old_digest\n\n # VV: Ask the database to update the entry\n with apis.db.exp_packages.DatabaseExperiments(f.name) as db:\n db.push_new_entry(ve_sum_numbers)\n\n # VV: Make sure that there are 2 entries in the db, and that the old one does not have a registry tag\n with apis.db.exp_packages.DatabaseExperiments(f.name) as db:\n many = [apis.models.virtual_experiment.ParameterisedPackage.parse_obj(x) for x in db.query()]\n\n many = sorted(many, key=lambda x: x.registry_created_on)\n assert len(many) == 2\n\n old = many[0]\n new = many[1]\n\n assert old.metadata.registry.digest == old_digest\n assert new.metadata.registry.digest == new_digest\n\n print(old.metadata.registry.dict())\n\n assert old.metadata.registry.tags == []\n assert old.metadata.package.tags == ['latest']\n\n assert new.metadata.registry.tags == ['latest']\n assert new.metadata.package.tags == ['latest']\n\n\ndef test_insert_many_same(ve_sum_numbers: apis.models.virtual_experiment.ParameterisedPackage):\n with tempfile.NamedTemporaryFile(suffix=\".json\", prefix=\"experiments\", delete=True) as f:\n with apis.db.exp_packages.DatabaseExperiments(f.name) as db:\n for _ in range(10):\n db.push_new_entry(ve_sum_numbers)\n\n # VV: Make sure that there are 2 entries in the db, and that the old one does not have a registry tag\n with apis.db.exp_packages.DatabaseExperiments(f.name) as db:\n many = [apis.models.virtual_experiment.ParameterisedPackage.parse_obj(x) for x in db.query()]\n\n assert len(many) == 1\n\n\ndef test_record_timesExecuted(ve_sum_numbers: apis.models.virtual_experiment.ParameterisedPackage):\n with tempfile.NamedTemporaryFile(suffix=\".json\", prefix=\"experiments\", delete=True) as f:\n with apis.db.exp_packages.DatabaseExperiments(f.name) as db:\n db.push_new_entry(ve_sum_numbers)\n\n for idx in range(10):\n ve_sum_numbers.metadata.registry.timesExecuted = idx\n db.upsert(ve_sum_numbers.dict(exclude_none=False), ql=db.construct_query(\n package_name=ve_sum_numbers.metadata.package.name,\n registry_digest=ve_sum_numbers.metadata.registry.digest))\n\n with apis.db.exp_packages.DatabaseExperiments(f.name) as db:\n many = [apis.models.virtual_experiment.ParameterisedPackage.parse_obj(x) for x in db.query()]\n\n pprint.pprint(many)\n\n assert len(many) == 1\n assert many[0].metadata.registry.timesExecuted == 9\n\n\ndef test_generate_history(ve_sum_numbers: apis.models.virtual_experiment.ParameterisedPackage):\n base = ve_sum_numbers.base.packages[0]\n source: apis.models.virtual_experiment.BasePackageSourceGit = base.source\n\n # VV: format is {tag: head_digest}\n heads: Dict[str, str] = {}\n\n def push(db: apis.db.exp_packages.DatabaseExperiments, ve: apis.models.virtual_experiment.ParameterisedPackage):\n # VV: Push then wait for some time so that next push has a different createdOn timestamp\n ve.metadata.registry.createdOn = ve.metadata.registry.get_time_now_as_str()\n ve.update_digest()\n db.push_new_entry(ve)\n time.sleep(0.1)\n\n def simulate_changes(db: apis.db.exp_packages.DatabaseExperiments, tag: str, times: int,\n untagged_digests: List[str]) -> str:\n ve_sum_numbers.metadata.package.tags = [tag]\n ve_sum_numbers.metadata.registry.tags = []\n\n for i in range(times):\n # VV: Simulate changing the source location to something new\n source.git.location = apis.models.virtual_experiment.SourceGitLocation(url=f'new-url-{tag}', branch=f'{i}')\n push(db, ve_sum_numbers)\n\n # VV: This will get untagged\n untagged_digests.append(ve_sum_numbers.metadata.registry.digest)\n\n # VV: This will now be the head\n source.git.location = apis.models.virtual_experiment.SourceGitLocation(url=f'new-url-{tag}', branch=\"head\")\n push(db, ve_sum_numbers)\n\n return ve_sum_numbers.metadata.registry.digest\n\n untagged_hello = []\n untagged_unique = []\n with tempfile.NamedTemporaryFile(suffix=\".json\", prefix=\"experiments\", delete=True) as f:\n with apis.db.exp_packages.DatabaseExperiments(f.name) as db:\n # VV: When hello2 pushed, it removed the `latest` and `hello` tag from `hello1`\n # VV: When hello1 pushed, it removed the `latest` and `hello` tag from `hello0`\n # VV: When hello0 pushed, it did not remove a tag from anything\n heads['hello'] = simulate_changes(db, 'hello', 2, untagged_hello)\n\n # VV: When `unique` pushed, it removed the `latest` tag from `hello2`\n heads['unique'] = simulate_changes(db, 'unique', 0, untagged_unique)\n heads['latest'] = heads['unique']\n\n history = db.trace_history(ve_sum_numbers.metadata.package.name)\n\n docs = db.query()\n for i, d in enumerate(docs):\n log.info(f\"Doc[{i}] = {json.dumps(d, indent=2)}\")\n\n trace = history.to_dict()\n\n log.info(f\"All history trace: {json.dumps(trace, indent=2)}\")\n\n untagged_digests = trace['untagged']\n\n untagged_grouped_by_tag = {}\n for x in untagged_digests:\n ot = x['originalTag']\n if ot not in untagged_grouped_by_tag:\n untagged_grouped_by_tag[ot] = []\n untagged_grouped_by_tag[ot].append(x)\n\n # VV: untagged digests should have 2 entries for hello0, hello1\n # (once with `hello` tag and another with `latest` tag)\n assert len(untagged_grouped_by_tag['hello']) == 2\n assert len(untagged_grouped_by_tag['latest']) == 2\n\n # VV: hello2 is still tagged with `hello (1 tagged entry)\n # VV: unique0 is tagged with `unique` and `latest` (2 tagged entries)\n assert len(trace['tags']) == 3\n\n for tag_info in trace['tags']:\n assert tag_info['head'] == heads[tag_info['tag']]\n\n assert heads['hello'] != heads['unique']\n assert heads['unique'] == heads['latest']\n\n\ndef test_update_tags_with_single_parameterised_package(\n ve_sum_numbers: apis.models.virtual_experiment.ParameterisedPackage):\n orig = list(ve_sum_numbers.metadata.package.tags)\n\n with tempfile.NamedTemporaryFile(suffix=\".json\", prefix=\"experiments\", delete=True) as f:\n with apis.db.exp_packages.DatabaseExperiments(f.name) as db:\n db.push_new_entry(ve_sum_numbers)\n\n for idx in range(10):\n db.tag_update(ve_sum_numbers.metadata.package.name, [\"latest\", f\"lbl-{idx}\"])\n\n with apis.db.exp_packages.DatabaseExperiments(f.name) as db:\n many = [apis.models.virtual_experiment.ParameterisedPackage.parse_obj(x) for x in db.query()]\n\n pprint.pprint(many)\n\n assert len(many) == 1\n assert sorted(many[0].metadata.registry.tags) == sorted([\"latest\", f\"lbl-9\"])\n\n\ndef test_query_tag(\n ve_sum_numbers: apis.models.virtual_experiment.ParameterisedPackage):\n with tempfile.NamedTemporaryFile(suffix=\".json\", prefix=\"experiments\", delete=True) as f:\n with apis.db.exp_packages.DatabaseExperiments(f.name) as db:\n ve_sum_numbers.metadata.package.tags = [\"test\", \"foo\"]\n ve_sum_numbers.metadata.registry.tags = [\"test\", \"foo\"]\n db.insert_many([ve_sum_numbers.dict()])\n\n docs = db.query_identifier(apis.models.common.PackageIdentifier.from_parts(\n package_name=ve_sum_numbers.metadata.package.name,\n tag=\"test\",\n digest=None\n ).identifier)\n\n assert len(docs) == 1\n\n docs = db.query_identifier(apis.models.common.PackageIdentifier.from_parts(\n package_name=ve_sum_numbers.metadata.package.name,\n tag=\"latest\",\n digest=None\n ).identifier)\n\n assert len(docs) == 0\n\n\ndef test_update_tags_with_many_parameterised_packages(\n ve_sum_numbers: apis.models.virtual_experiment.ParameterisedPackage):\n orig = list(ve_sum_numbers.metadata.package.tags)\n\n with tempfile.NamedTemporaryFile(suffix=\".json\", prefix=\"experiments\", delete=True) as f:\n with apis.db.exp_packages.DatabaseExperiments(f.name) as db:\n db.push_new_entry(ve_sum_numbers)\n\n # VV: Change something that would reuslt in a different digest\n ve_sum_numbers.parameterisation.presets.platform = \"this-is-definitely-new\"\n log.info(f\"Orig digest {ve_sum_numbers.metadata.registry.digest}\")\n ve_sum_numbers.update_digest()\n log.info(f\"New digest {ve_sum_numbers.metadata.registry.digest}\")\n\n db.push_new_entry(ve_sum_numbers)\n\n for idx in range(10):\n db.tag_update(ve_sum_numbers.metadata.package.name, [\"latest\", f\"lbl-{idx}\"])\n\n ql = db.construct_query(package_name=ve_sum_numbers.metadata.package.name)\n with apis.db.exp_packages.DatabaseExperiments(f.name) as db:\n many = [apis.models.virtual_experiment.ParameterisedPackage.parse_obj(x)\n for x in db.query(ql)]\n\n log.info(pprint.pformat(many))\n\n assert len(many) == 2\n\n new_identifier = apis.models.common.PackageIdentifier.from_parts(\n package_name=ve_sum_numbers.metadata.package.name,\n tag=\"lbl-9\",\n digest=None).identifier\n\n with apis.db.exp_packages.DatabaseExperiments(f.name) as db:\n single = [apis.models.virtual_experiment.ParameterisedPackage.parse_obj(x)\n for x in db.query_identifier(new_identifier)]\n\n log.info(pprint.pformat(single))\n\n assert len(single) == 1\n assert sorted(single[0].metadata.registry.tags) == sorted([\"latest\", f\"lbl-9\"])\n\n\ndef test_cannot_remove_latest_tag(ve_sum_numbers: apis.models.virtual_experiment.ParameterisedPackage):\n orig = list(ve_sum_numbers.metadata.package.tags)\n\n with tempfile.NamedTemporaryFile(suffix=\".json\", prefix=\"experiments\", delete=True) as f:\n with apis.db.exp_packages.DatabaseExperiments(f.name) as db:\n db.push_new_entry(ve_sum_numbers)\n\n with pytest.raises(apis.models.errors.CannotRemoveLatestTagError) as e:\n db.tag_update(ve_sum_numbers.metadata.package.name, [\"hello\"])\n\n\ndef test_query_relationship_identifier(output_dir: str):\n import apis.kernel.relationships\n import apis.models.query_relationship\n import apis.models.relationships\n\n db = apis.db.relationships.DatabaseRelationships(os.path.join(output_dir, 'db'))\n\n with db:\n db.insert_many([\n apis.models.relationships.Relationship.parse_obj({'identifier': 'hello-world'}).dict(),\n apis.models.relationships.Relationship.parse_obj({'identifier': 'not-hello-world'}).dict(),\n ])\n\n q = apis.models.query_relationship.QueryRelationship.parse_obj({'identifier': 'hello.*'})\n\n x = apis.kernel.relationships.api_list_queries(q, db)\n\n assert len(x) == 1\n assert x[0]['identifier'] == 'hello-world'\n\n\ndef test_query_relationship_inputgraph_identifier(output_dir: str):\n import apis.models.query_relationship\n import apis.models.relationships\n\n db = apis.db.relationships.DatabaseRelationships(os.path.join(output_dir, 'db'))\n\n with db:\n db.insert_many([\n apis.models.relationships.Relationship.parse_obj({\n 'identifier': 'hello-world',\n 'transform': {\n 'outputGraph': {'identifier': 'dummy'},\n 'inputGraph': {'identifier': 'hello-world'}\n }\n }).dict(),\n apis.models.relationships.Relationship.parse_obj({\n 'identifier': 'not-hello-world',\n 'transform': {\n 'outputGraph': {'identifier': 'hello-world'},\n 'inputGraph': {'identifier': 'dummy'}\n }\n }).dict(),\n ])\n\n q = apis.models.query_relationship.QueryRelationship.parse_obj({\n 'transform': {\n 'inputGraph': {'identifier': 'hello-world:.*$'}\n }})\n\n x = apis.kernel.relationships.api_list_queries(q, db)\n\n assert len(x) == 1\n assert x[0]['transform']['inputGraph']['identifier'] == 'hello-world:latest'\n","repo_name":"st4sd/st4sd-runtime-service","sub_path":"tests/test_db.py","file_name":"test_db.py","file_ext":"py","file_size_in_byte":13713,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"35587529057","text":"\"\"\"The main of Gender and Age Detection with OpenCV\n-----------------------------\n\nAbout this Module\n------------------\nThe goal of this module is to initiate the run of the Gender and Age Detection\nand define core components.\n\"\"\"\n\n__author__ = \"Benoit Lapointe\"\n__date__ = \"2021-05-28\"\n__copyright__ = \"Copyright 2021, labesoft\"\n__version__ = \"1.0.0\"\n\nimport argparse\nimport os.path\nfrom pathlib import Path\n\nimport cv2\n\n# Paths\nBASE_PATH = Path(os.path.dirname(__file__))\nFACE_PROTO = str(BASE_PATH.joinpath('models', \"opencv_face_detector.pbtxt\"))\nFACE_MODEL = str(BASE_PATH.joinpath('models', \"opencv_face_detector_uint8.pb\"))\nAGE_PROTO = str(BASE_PATH.joinpath('models', \"age_deploy.prototxt\"))\nAGE_MODEL = str(BASE_PATH.joinpath('models', \"age_net.caffemodel\"))\nGENDER_PROTO = str(BASE_PATH.joinpath('models', \"gender_deploy.prototxt\"))\nGENDER_MODEL = str(BASE_PATH.joinpath('models', \"gender_net.caffemodel\"))\n\n# Model constants\nMODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)\nAGE_LIST = [\n '(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)',\n '(48-53)', '(60-100)'\n]\nGENDER_LIST = ['Male', 'Female']\n\n\ndef highlight_face(net, a_frame, conf_threshold=0.7):\n \"\"\"Highlight faces on an image\n\n It uses a colored frame printing gender and age\n\n :param net:\n :param a_frame:\n :param conf_threshold:\n :return:\n \"\"\"\n\n frame_opencv_dnn = a_frame.copy()\n frame_height = frame_opencv_dnn.shape[0]\n frame_width = frame_opencv_dnn.shape[1]\n a_blob = cv2.dnn.blobFromImage(frame_opencv_dnn, 1.0, (300, 300),\n [104, 117, 123], True, False)\n\n net.setInput(a_blob)\n detections = net.forward()\n face_box_list = []\n for i in range(detections.shape[2]):\n confidence = detections[0, 0, i, 2]\n if confidence > conf_threshold:\n x1 = int(detections[0, 0, i, 3] * frame_width)\n y1 = int(detections[0, 0, i, 4] * frame_height)\n x2 = int(detections[0, 0, i, 5] * frame_width)\n y2 = int(detections[0, 0, i, 6] * frame_height)\n face_box_list.append([x1, y1, x2, y2])\n cv2.rectangle(frame_opencv_dnn, (x1, y1), (x2, y2), (0, 255, 0),\n int(round(frame_height / 150)), 8)\n return frame_opencv_dnn, face_box_list\n\n\nif __name__ == '__main__':\n \"\"\"Main entry point of the genderage package\"\"\"\n # Parsing arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('--image')\n args = parser.parse_args()\n\n # Loading the models\n face_net = cv2.dnn.readNet(FACE_MODEL, FACE_PROTO)\n age_net = cv2.dnn.readNet(AGE_MODEL, AGE_PROTO)\n gender_net = cv2.dnn.readNet(GENDER_MODEL, GENDER_PROTO)\n\n #\n video = cv2.VideoCapture(args.image if args.image else 0)\n padding = 20\n while cv2.waitKey(1) < 0:\n has_frame, frame = video.read()\n if not has_frame:\n cv2.waitKey()\n break\n\n result_img, face_boxes = highlight_face(face_net, frame)\n if not face_boxes:\n print(\"No face detected\")\n\n for face_box in face_boxes:\n face = frame[\n max(0, face_box[1] - padding): min(face_box[3] + padding,\n frame.shape[0] - 1),\n max(0, face_box[0] - padding): min(face_box[2] + padding,\n frame.shape[1] - 1)\n ]\n\n blob = cv2.dnn.blobFromImage(face, 1.0, (227, 227),\n MODEL_MEAN_VALUES, swapRB=False)\n gender_net.setInput(blob)\n gender_preds = gender_net.forward()\n gender = GENDER_LIST[gender_preds[0].argmax()]\n print(f'Gender: {gender}')\n\n age_net.setInput(blob)\n age_preds = age_net.forward()\n age = AGE_LIST[age_preds[0].argmax()]\n print(f'Age: {age[1:-1]} years')\n\n cv2.putText(result_img, f'{gender}, {age}',\n (face_box[0], face_box[1] - 10),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.8, (0, 255, 255), 2, cv2.LINE_AA)\n cv2.imshow(\"Detecting age and gender\", result_img)\n","repo_name":"labesoft/ml-projects","sub_path":"genderage/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":4239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"17868084892","text":"import socket\nimport sys\n\nimport RPNCalculator as rpn\n \n\n\n\nHOST = ''\nPORT = 7000\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n print('# Socket created')\n s.bind((HOST, PORT))\n print('# Socket now listening')\n s.listen(10)\n conn, addr =s.accept()\n with conn:\n print('# Connected to ' + addr[0] + ':' + str(addr[1]))\n while True:\n data = conn.recv(1024)\n data = data.decode(\"UTF-8\")\n if data.upper == \"END\":\n break\n conn.send(bytes(str(rpn.RPNCalulator(data)), \"utf-8\"))\n s.close()","repo_name":"MatthiasMarczyszyn/Systemy_Wbudowane","sub_path":"Lab7/rpn_server.py","file_name":"rpn_server.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"20680374979","text":"# Import dependencies.\nimport csv\nimport os\n# Assign a variable to load a file from a path.\nfile_to_load = os.path.join(\"Resources/election_results.csv\")\n# Assign a variable to save the file to a path.\nfile_to_save = os.path.join(\"analysis\", \"election_analysis.txt\")\n\n#Initialize a total vote counter.\ntotal_votes=0\n\n#Initialize a list of candidates\ncandidate_options=[]\n\n#Initialize a dictionary of candiates vote.\ncandidate_votes={}\n\n#Initialize a variable empty string for winning candidate.\nwinning_candidate=\"\"\n#Set winning count to zero\nwinning_count=0\n#Set winning percentage to zero\nwinning_percentage=0\n\n#Initialize a list of counties.\ncounty_options=[]\n#Initialize a dictionary of county votes.\ncounty_votes={}\n#Initialize a an empty string variable for county with the largest turnout.\nlargest_county_turnout=\"\"\n#Set largest turnout to zero\nturnout_count=0\n\n\n# Open the election results and read the file.\nwith open(file_to_load) as election_data:\n # To do: read and analyze the data here.\n file_reader = csv.reader(election_data)\n # Skip the header row.\n headers = next(file_reader)\n \n # Print each row in the CSV file.\n for row in file_reader: \n #incremment the vote counter by 1.\n total_votes=total_votes + 1\n\n #print candidates name from each row.\n candidate_name=row[2]\n #print county name for each row.\n county_name=row[1]\n # If the candidate does not match any existing candidate...\n if candidate_name not in candidate_options:\n #then add him to the list of candidates.\n candidate_options.append(candidate_name)\n\n #start tracking candidate vote.\n candidate_votes[candidate_name]= 0\n\n # Add a vote to that candidate's count.\n candidate_votes[candidate_name] += 1\n\n # if the county does not match any existing counties...\n if county_name not in county_options:\n #then add it to the list of counties.\n county_options.append(county_name)\n\n #start tracking county votes.\n county_votes[county_name]= 0\n\n # Add a vote to that counties's count.\n county_votes[county_name]+=1\n\n#save the results to our text file.\nwith open (file_to_save,'w') as txt_file:\n #print the final vote count to the terminal.\n election_results=(\n f\"\\nElection Results\\n\"\n f\"-------------------------\\n\"\n f\"Total Votes :{total_votes:,}\\n\"\n f\"-------------------------\\n\"\n f\" \\n\"\n f\"County Votes: \\n\")\n print(election_results, end=\"\")\n #save the final vote count to the text file.\n txt_file.write(election_results)\n\n #Determin the percentage of each county votes by looping through the counts.\n #Iterate through county list.\n for county_name in county_options:\n #retrieve vote count for each county.\n countyvotes=county_votes[county_name]\n #calculate the percentage of votes\n county_votes_percentage=float(countyvotes)/float(total_votes)*100\n county_results=(\n f\"{county_name}: {county_votes_percentage:.1f}% ({countyvotes:,})\\n\")\n #print each counties name, vote count and percentage votes.\n print(county_results)\n #Save the counties results to our text file\n txt_file.write(county_results)\n\n #Determine Largest County Turnout.\n if(countyvotes>turnout_count) :\n turnout_count=countyvotes\n largest_county_turnout=county_name\n\n #print largest county turnout result to the terminal.\n turnout_count_result=(\n f\"\\n-------------------------\\n\"\n f\"Largest County Turnout: {largest_county_turnout}\\n\"\n f\"-------------------------\\n\")\n print(turnout_count_result)\n #save largest county turnout on text file.\n txt_file.write(turnout_count_result) \n\n #Determin the percentage of each candidates votes by looping through the counts.\n #Iterate through candidate list.\n for candidate_name in candidate_votes:\n #retrieve vote count for each candidate.\n votes=candidate_votes[candidate_name]\n #calculate the percentage of votes\n votes_percentage=float(votes)/float(total_votes)*100\n candidate_results = (\n f\"{candidate_name}: {votes_percentage:.1f}% ({votes:,})\\n\")\n #print each candidates name, vote count and percentage votes.\n print(candidate_results)\n #Save the candidates results to our text file\n txt_file.write(candidate_results)\n #Determine winning vote count, winning percentage and candidate.\n if(votes>winning_count) and(votes_percentage>winning_percentage):\n winning_count=votes\n winning_percentage=votes_percentage\n winning_candidate=candidate_name\n\n #print winning candidates' results to the terminal.\n winning_candidate_summary= (\n f\"-------------------------\\n\"\n f\"Winner: {winning_candidate}\\n\"\n f\"Winning Vote Count:{winning_count:,}\\n\"\n f\"Winning Percentage: {winning_percentage:.1f}%\\n\"\n f\"--------------------------\\n\")\n\n print(winning_candidate_summary)\n #save the wininng candidates summary to the text file.\n txt_file.write(winning_candidate_summary)","repo_name":"Muzznah/Election_Analysis","sub_path":"PyPoll_Challenge.py","file_name":"PyPoll_Challenge.py","file_ext":"py","file_size_in_byte":5276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"24601910598","text":"\nPIPELINE_NAME = \"IngestToBronze\"\nSINK_CONTAINER = \"datalake\"\nFILE_PATH = \"bronze/\"\nFILE_NAME = \"On-street_Parking_Bay_Sensors.csv\"\nFILE_ROWS_COUNT = 3123\n\ndef test_pipeline_succeeded(adf_pipeline_run, blob_service_client):\n \"\"\"Test that pipeline has copied target CSV file to container of sink storage account\"\"\"\n this_run = adf_pipeline_run(PIPELINE_NAME, run_inputs={\"fileName\": FILE_NAME})\n \n # Assert pipeline execution status\n assert this_run.status == \"Succeeded\"\n\n # Assert sinked file in target storage account\n blob_container_client = blob_service_client.get_container_client(SINK_CONTAINER)\n blobs_list = list(blob_container_client.list_blobs(FILE_PATH, None))\n assert len(blobs_list) == 1\n \n blob = blobs_list[0]\n assert blob.name == FILE_PATH + FILE_NAME\n \n # Assert file row count\n storage_stream_downloader = blob_container_client.download_blob(blob.name, None, None)\n blob_bytes = storage_stream_downloader.readall()\n \n contents_array = blob_bytes.decode(\"UTF-8\").split(\"\\n\")\n assert len(contents_array) == FILE_ROWS_COUNT\n","repo_name":"Azure-Samples/modern-data-warehouse-dataops","sub_path":"single_tech_samples/datafactory/sample1_cicd/tests/integrationtests/tests/test_pipeline_ingest_to_bronze.py","file_name":"test_pipeline_ingest_to_bronze.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":512,"dataset":"github-code","pt":"71"} +{"seq_id":"30961802372","text":"from Tools.Sentence import Sentence\nfrom Core.Database import Database\nfrom apyori import apriori # We use apriori to extract frequent feature from feature list\nfrom Tools.Feature import Feature\n\nclass Review:\n\t# First config for read\n\tcolumn = ['sentences'] # column\n\ttable_name = 'sentences' # table name\n\n\t'''\n\t\tThis class provide interface to extract the feature and get the opinion text. This is our main process\n\t'''\n\tdef __init__(self, file_name, id_anime, feature = []) :\n\t\tself.feature_list = set(feature) # Our feature list\n\t\tself.noun_list = [] # Our noun list use for transaction and mining the frequent feature\n\t\tself.id_anime = id_anime\n\n\t\tself.db = Database(file_name) # Our databse interface\n\t\tdata = self.db.read(self.table_name, self.column, ' WHERE anime_id='+id_anime)\n\t\tself.sentences = []\n\n\t\t# The review is consist a sentence class\n\t\t# You can look at the sentence class to see what can a sentence do include post tagging, tokenizing and so on\n\t\tfor row in data:\n\t\t\tself.sentences.append(Sentence(row[0].lower(), 5114))\n\t\tself.feature_result = []\n\n\t'''\n\t\tThis method provide an interface to extract the feature list. The process is like this\n\n\t\t- Use the post tagging to find the noun or a feature candidate.\n\t\t- Save to the noun list\n\t\t- Use assosiation mining to find the frequent feature\n\t\t- Save to the feature list\n\t'''\n\tdef extract_feature_list(self):\n\n\t\tif len(self.feature_list) == 0:\n\t\t\t# Get the noun for each sentences\n\t\t\tfor sentence in self.sentences:\n\n\t\t\t\tfor word in sentence.tagged :\n\t\t\t\t\tif word[1] == 'NN' :\n\t\t\t\t\t\tsentence.noun.append(word[0])\n\n\t\t\t\t# If the sentences didn't countain noun, it's just a dump sentences\n\t\t\t\tif len(sentence.noun) > 0 :\n\t\t\t\t\tsentence.opinion = True\n\t\t\t\t\tself.noun_list.append(sentence.noun)\n\t\t\t\telse :\n\t\t\t\t\tsentence.opinion = False\n\n\n\t\t\t# Use apriori method to mine the feature\n\t\t\tassociation_rules = apriori(self.noun_list, min_support=0.001, min_confidence=0, min_lift=3, min_length=2)\n\t\t\tassociation_results = list(association_rules)\n\n\t\t\tfeatures = []\n\n\t\t\tfor item in association_results:\n\t\t\t\tpair = item[0] \n\t\t\t\titems = [x for x in pair]\n\t\t\t\tself.feature_list.add(items[0])\n\n\t'''\n\t\tAfter mine the feature list we mine the opinion about that feature.\n\n\t\t- First, we get the adjective that most nearby to the noun\n\t\t- We categorize them with positive or negative with wordnet\n\t'''\n\tdef extract_feature_opinion(self):\n\n\t\t# List all the feature and make them the key to the dictionary\n\t\tfeature_count = {}\n\t\tfor feature in self.feature_list :\n\t\t\tfeature_count[feature] = []\n\n\t\t# Find all feature and opinion in all sentences\n\t\tfor sentence in self.sentences:\n\n\t\t\tif sentence.opinion == True:\n\t\t\t\tall_words = len(sentence.tagged)\n\t\t\t\tfor i,j in enumerate(sentence.tagged) :\n\n\t\t\t\t\t# Find the nearby adjective forward or backward\n\t\t\t\t\tif j[1] == 'NN' and j[0] in self.feature_list :\n\t\t\t\t\t\tcount_forward = i\n\t\t\t\t\t\tcount_backward = i\n\t\t\t\t\t\twhile count_forward < all_words and sentence.tagged[count_forward][1] != 'JJ' :\n\t\t\t\t\t\t\tcount_forward += 1\n\t\t\t\t\t\twhile count_backward >= 0 and sentence.tagged[count_backward][1] != 'JJ' :\n\t\t\t\t\t\t\tcount_backward -= 1\n\n\t\t\t\t\t\tcount = -1\n\t\t\t\t\t\tif count_forward < all_words and count_backward >= 0 :\n\t\t\t\t\t\t\tif(count_forward - i > i - count_backward) :\n\t\t\t\t\t\t\t\tcount = count_backward\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\tcount = count_forward\n\n\t\t\t\t\t\tif count_backward < 0 :\n\t\t\t\t\t\t\tcount = count_forward\n\t\t\t\t\t\tif count_forward >= all_words :\n\t\t\t\t\t\t\tcount = count_backward\n\t\t\t\t\t\tif count_forward >= all_words and count_backward < 0 :\n\t\t\t\t\t\t\tcount = -1\n\t\t\t\t\t\tif count != -1 :\n\t\t\t\t\t\t\tfeature_count[j[0]].append(sentence.tagged[count][0])\n\n\t\t# Make the feature class that contain the feature count of positive opinion and negative opinion\n\t\tfor i in feature_count.keys():\n\t\t\tself.feature_result.append(Feature(self.db, i, feature_count[i], self.id_anime))\n\n\t\t# Use wordnet to mine the opinion orientation\n\t\tFeature.OrientationPrediction(Feature.all_adj, Feature.seed)\n\t\t# print(Feature.seed)\n\n\tdef get_summary(self):\n\t\tself.extract_feature_list()\n\t\tself.extract_feature_opinion()\n\t\tres = {}\n\t\tfor i in self.feature_result:\n\t\t\ti.save()\n\t\t\tres[i.feature] = (i.good, i.bad)\n\t\treturn res\n\n\t# Save method\n\tdef save(self) :\n\t\tfor feature in self.feature_result:\n\t\t\tfeature.save()\n\n\t# Close database\n\tdef close_db(self):\n\t\tself.db.close()","repo_name":"mesti-sukses/summerly","sub_path":"Tools/Review.py","file_name":"Review.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"22120878048","text":"import xml.etree.ElementTree as ET\nimport streamlit as st\nimport json\nfrom deep_translator import GoogleTranslator\n\n\nfile = st.file_uploader(\"Upload your .resx file\")\ntargetLang = None\nwith open(\"langCodes.json\") as json_file:\n json_data = json.loads(json_file.read())\n countryDisplayNames = [obj['language'] for obj in json_data]\n targetLang = st.selectbox(\"Select the language you want to translate to\", countryDisplayNames)\n\ndef create_resx():\n currentBatchValues = \"\"\n batch_count = 30\n values = []\n batchIndex = 0\n translatedText = \"\"\n\n if file is not None:\n tree = ET.parse(file)\n root = tree.getroot()\n\n for data_node in root.findall('data'):\n batchIndex += 1\n value_node = data_node.find('value')\n value = value_node.text if value_node is not None else None\n currentBatchValues += f\"{value}\\n\"\n if(batchIndex > batch_count):\n batchIndex = 0\n values.append(f\"{currentBatchValues}\")\n currentBatchValues = \"\"\n values.append(f\"{currentBatchValues}\")\n\n selectedLang = None\n if(targetLang is not None):\n with open(\"langCodes.json\") as json_file:\n langCodes = json.loads(json_file.read())\n for lang in langCodes:\n if(lang['language'] == targetLang):\n selectedLang = lang\n for batch in values:\n translatedText += GoogleTranslator(source='auto', target=selectedLang['countryCode']).translate(batch) + \"\\n\"\n translatedValues = translatedText.splitlines()\n\n i = 0\n\n print(f\"length input: {len(root.findall('data'))}\")\n print(f\"length output: {len(translatedValues)}\")\n\n for node in root.findall('data'):\n value_node = node.find('value')\n try:\n value_node.text = translatedValues[i]\n except:\n break\n i += 1\n\n tree.write(f\"App.{selectedLang['code']}.resx\", encoding=\"utf-8\", xml_declaration=True)\n print(f\"'App.{selectedLang['code']}.resx' created successfully\")\n st.text(f\"'App.{selectedLang['code']}.resx' created successfully!\")\n\nif st.button(\"Translate\"):\n create_resx()","repo_name":"zanmat0o/resx-translator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"25735151106","text":"# API reference :\n# https://westus.dev.cognitive.microsoft.com/docs/services/5adf991815e1060e6355ad44/operations/56f91f2e778daf14a499e1fa\n# 参考 : https://ledge.ai/microsoft-computer-vision-api/\n# 機能概要 : img フォルダ中の画像をAI解析し、ファイルのリネームを行います。\n# 使い方 : python3 cv_demo.py\n# 注意 : サブスクリプションキーは変更してください\n\nimport requests\nimport glob\nimport os\nimport time\n\nsubscription_key = \"\"\nassert subscription_key\n\nvision_base_url = \"https://japaneast.api.cognitive.microsoft.com/vision/v2.0/\"\nanalyze_url = vision_base_url + \"analyze\"\n\n\n# ファイル名を変更\ndef file_rename(list_1, list_2):\n for i in range(len(list_1)):\n os.rename(list_1[i], './img/' + list_2[i] + '.jpg')\n\n\ndef ms_computer_vision_api(filepath):\n headers = {'Ocp-Apim-Subscription-Key': subscription_key,'Content-Type': 'application/octet-stream'}\n params = {'visualFeatures': 'Categories,Description,Color'}\n\n img = open(filepath, 'rb')\n img_byte = img.read()\n\n response = requests.post(analyze_url, data=img_byte, headers=headers, params=params)\n response.raise_for_status()\n\n return response.json()\n\n\nif __name__ == \"__main__\":\n # 画像ファイルを配列に格納\n image_file = glob.glob('./img/*')\n\n vision_file_name = []\n\n start = time.time()\n\n # Computer Vision APIにリクエストして結果を取得\n for i in range(len(image_file)):\n json_data = ms_computer_vision_api(image_file[i])\n\n # 生成された文章を取得\n file_name = json_data['description']['captions'][0]['text']\n vision_file_name.append(file_name)\n\n # 文章の空白をファイル名用にアンダーバーに修正\n for i in range(len(vision_file_name)):\n vision_file_name[i] = vision_file_name[i].replace(' ', '_')\n\n file_rename(image_file,vision_file_name)\n\n # 経過時間を出力\n #print(\"elapsed_time:{0}\".format(time.time() - start) + \"[sec]\")\n","repo_name":"YukoKono/Experiment_for_gazouninsiki","sub_path":"AzureとPythonで画像認識.py","file_name":"AzureとPythonで画像認識.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"12085348758","text":"print (\"use star expressions to solve too many values to unpack problem\")\n\n\ndef avg(values):\n if(len(values) < 1):\n return None\n else:\n return sum(values) / len(values)\n\ndef drop_first_last(grades):\n first, *middle, last = grades\n return avg(middle)\n\ngrades = [1,2,3,4,5];\n\nprint(drop_first_last(grades))\n\nrecord = ['Dave','dace@develop.com','773-555-112','888-222-111']\nname,email,*phone_numbers = record\nprint(name)\nprint(email)\nprint(phone_numbers)\n\nrecords = [\n ('foo',1,2),\n ('bar','hello'),\n ('foo',3,4)\n]\n\ndef do_foo(x,y):\n print('foo',x,y)\n\ndef do_bar(s):\n print('bar',s)\n\nfor tag,*args in records:\n if tag == 'foo':\n do_foo(*args)\n elif tag == 'bar':\n do_bar(*args)\n\n","repo_name":"zhangxiaoya/python","sub_path":"chapter1/unpackingelements.py","file_name":"unpackingelements.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"34775272888","text":"# -를 기준으로 분리를 한 후 계산 \n# ans : -를 기준으로 계산값이 나누어져 있는 배열\n# plus : 연속된 + 배열을 계산한 값\n# sum : 결과\n\na = list(input().split('-'))\nans = []\nfor i in a:\n if '+' in i:\n tmp = i.split('+')\n plus = 0\n for j in tmp:\n plus += int(j)\n ans.append(plus)\n else:\n ans.append(int(i))\n\nsum = ans[0]\nfor i in ans[1:]:\n sum -= i\nprint(sum)\n","repo_name":"SW1026/Algorithm","sub_path":"그리디/1541 잃어버린 괄호.py","file_name":"1541 잃어버린 괄호.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"12427726489","text":"#!/usr/bin/env python3\n\nimport sys\n\nx = sys.stdin.readline()\ninteger = int(x)\n\nwhile integer > 9:\n\n product = 1\n for digit in str(integer):\n if digit != \"0\":\n product *= int(digit)\n # continuously changes\n integer = product\nprint(integer)\n","repo_name":"aydenjahola/DCU","sub_path":"year-1/ca117/week-05/product_051.py","file_name":"product_051.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"42976587747","text":"import re\nimport os\nimport ast\nimport math\nimport json\nimport wandb\nimport base64\nimport random\nimport asyncio\nimport template\nimport requests\nimport traceback\nimport bittensor as bt\nfrom . import client\nfrom collections import deque\n\nlist_update_lock = asyncio.Lock()\n\ndef load_state_from_file(filename=\"validators/state.json\"):\n if os.path.exists(filename):\n with open(filename, \"r\") as file:\n bt.logging.info(\"loaded previous state\")\n return json.load(file)\n else:\n bt.logging.info(\"initialized new global state\")\n return {\n \"text\": {\"themes\": None, \"questions\": None, \"theme_counter\": 0, \"question_counter\": 0},\n \"images\": {\"themes\": None, \"questions\": None, \"theme_counter\": 0, \"question_counter\": 0}\n }\n\nstate = load_state_from_file()\n\n\ndef get_state():\n global state\n if state is None:\n load_state_from_file()\n return state\n\n\ndef save_state_to_file(state, filename=\"state.json\"):\n with open(filename, \"w\") as file:\n bt.logging.success(f\"saved global state to {filename}\")\n json.dump(state, file)\n\n\ndef get_validators_with_runs_in_all_projects():\n api = wandb.Api()\n validators_runs = {project: set() for project in projects}\n\n # Retrieve runs for each project and store validator UIDs\n for project in template.PROJECT_NAMES:\n runs = api.runs(f\"cortex-t/{project}\")\n for run in runs:\n if run.config['type'] == 'validator':\n validators_runs[project].add(run.config['uid'])\n\n # Find common validators across all projects\n common_validators = set.intersection(*validators_runs.values())\n return common_validators\n \n\nasync def get_list(list_type, num_questions_needed, theme=None):\n prompts_in_question = {'text_questions': 10, 'images_questions': 20}\n list_type_mapping = {\n \"text_questions\": {\n \"default\": template.INSTRUCT_DEFAULT_QUESTIONS,\n \"prompt\": \"placeholder\"\n },\n \"images_questions\": {\n \"default\": template.IMAGE_DEFAULT_QUESTIONS,\n \"prompt\": f\"Provide a python-formatted list of {prompts_in_question[list_type]} creative and detailed scenarios for image generation, each inspired by the theme '{theme}'. The scenarios should be diverse, thoughtful, and possibly out-of-the-box interpretations related to '{theme}'. Each element in the list should be a concise, but a vividly descriptive situation designed to inspire visually rich stories. Format these elements as comma-separated, quote-encapsulated strings in a single Python list.\"\n }\n }\n \n selected_prompts = []\n if list_type == \"text_questions\":\n question_pool = []\n for complexity_level in range(1, 21): \n for relevance_level in range(1, 21):\n prompt = f\"Generate a python-formatted list of {prompts_in_question[list_type]} questions or instruct tasks related to the theme '{theme}', each with a complexity level of {complexity_level} out of 20 and a relevance level to the theme of {relevance_level} out of 20. These tasks should varyingly explore {theme} in a manner that is consistent with their assigned complexity and relevance levels to the theme, allowing for a diverse and insightful engagement about {theme}. Format the questions as comma-separated, quote-encapsulated strings in a single Python list.\"\n question_pool.append(prompt)\n \n random.shuffle(question_pool)\n num_questions_to_select = min(math.ceil(num_questions_needed / prompts_in_question[list_type]), len(question_pool))\n selected_prompts = random.sample(question_pool, num_questions_to_select)\n else:\n num_questions_to_select = math.ceil(num_questions_needed / prompts_in_question[list_type])\n selected_prompts = [list_type_mapping[list_type][\"prompt\"]] * num_questions_to_select\n\n bt.logging.debug(f\"num_questions_needed: {num_questions_needed}, list_type: {list_type}, selected_prompts: {selected_prompts}\")\n\n tasks = [\n call_openai([{'role': \"user\", 'content': prompt}], 0.65, \"gpt-3.5-turbo\", random.randint(1, 10000))\n for prompt in selected_prompts\n ]\n\n responses = await asyncio.gather(*tasks)\n extracted_lists = []\n max_retries = 5\n for i, answer in enumerate(responses):\n try:\n answer = answer.replace(\"\\n\", \" \") if answer else \"\"\n extracted_list = extract_python_list(answer)\n if extracted_list:\n extracted_lists += extracted_list\n else:\n # Retry logic for each prompt if needed\n for retry in range(max_retries):\n try:\n random_seed = random.randint(1, 10000)\n messages = [{'role': \"user\", 'content': selected_prompts[i]}]\n new_answer = await call_openai(messages, 0.85, \"gpt-4-1106-preview\", random_seed)\n new_answer = new_answer.replace(\"\\n\", \" \") if new_answer else \"\"\n new_extracted_list = extract_python_list(new_answer)\n if new_extracted_list:\n extracted_lists += new_extracted_list\n break\n else: bt.logging.error(f\"no list found in {new_answer}\")\n except Exception as e:\n bt.logging.error(f\"Exception on retry {retry + 1} for prompt '{selected_prompts[i]}': {e}\\n{traceback.format_exc()}\")\n except Exception as e:\n bt.logging.error(f\"Exception in processing initial response for prompt '{selected_prompts[i]}': {e}\\n{traceback.format_exc()}\")\n\n if not extracted_lists:\n bt.logging.error(f\"No valid lists found after processing and retries, returning None\")\n return None\n\n return extracted_lists\n\n\nasync def update_counters_and_get_new_list(category, item_type, num_questions_needed, theme=None):\n\n async def get_items(category, item_type, theme=None):\n if item_type == \"themes\":\n if category == \"images\":\n return template.IMAGE_THEMES\n else:\n return template.INSTRUCT_DEFAULT_THEMES\n else:\n # Never fail here, retry until valid list is found\n while True:\n theme = await get_random_theme(category)\n if theme is not None:\n return await get_list(f\"{category}_questions\", num_questions_needed, theme)\n\n async def get_random_theme(category):\n themes = state[category][\"themes\"]\n if not themes:\n themes = await get_items(category, \"themes\")\n state[category][\"themes\"] = themes\n return random.choice(themes)\n\n list_type = f\"{category}_{item_type}\"\n\n async with list_update_lock:\n items = state[category][item_type]\n\n # Logging the current state before fetching new items\n bt.logging.debug(f\"Queue for {list_type}: {len(items) if items else 0} items\")\n\n # Fetch new items if the list is empty\n if not items:\n items = await get_items(category, item_type, theme)\n state[category][item_type] = items\n bt.logging.debug(f\"Fetched new list for {list_type}, containing {len(items)} items\")\n\n item = items.pop() if items else None\n if not items:\n state[category][item_type] = None\n\n return item\n\n\nasync def get_question(category, num_questions_needed):\n if category not in [\"text\", \"images\"]:\n raise ValueError(\"Invalid category. Must be 'text' or 'images'.\")\n\n question = await update_counters_and_get_new_list(category, \"questions\", num_questions_needed)\n return question\n\n\ndef preprocess_string(text):\n processed_text = text.replace(\"\\t\", \"\")\n placeholder = \"___SINGLE_QUOTE___\"\n processed_text = re.sub(r\"(?<=\\w)'(?=\\w)\", placeholder, processed_text)\n processed_text = processed_text.replace(\"'\", '\"').replace(placeholder, \"'\")\n\n # First, remove all comments, ending at the next quote\n no_comments_text = \"\"\n i = 0\n in_comment = False\n while i < len(processed_text):\n if processed_text[i] == '#':\n in_comment = True\n elif processed_text[i] == '\"' and in_comment:\n in_comment = False\n no_comments_text += processed_text[i] # Keep the quote that ends the comment\n i += 1\n continue\n if not in_comment:\n no_comments_text += processed_text[i]\n i += 1\n\n # Now process the text without comments for quotes\n cleaned_text = []\n inside_quotes = False\n found_first_bracket = False\n\n i = 0\n while i < len(no_comments_text):\n char = no_comments_text[i]\n\n if not found_first_bracket:\n if char == '[':\n found_first_bracket = True\n cleaned_text.append(char)\n i += 1\n continue\n\n if char == '\"':\n # Look for preceding comma or bracket, skipping spaces\n preceding_char_index = i - 1\n found_comma_or_bracket = False\n\n while preceding_char_index >= 0:\n if no_comments_text[preceding_char_index] in '[,': # Check for comma or opening bracket\n found_comma_or_bracket = True\n break\n elif no_comments_text[preceding_char_index] not in ' \\n': # Ignore spaces and new lines\n break\n preceding_char_index -= 1\n\n following_char_index = i + 1\n while following_char_index < len(no_comments_text) and no_comments_text[following_char_index] in ' \\n':\n following_char_index += 1\n\n if found_comma_or_bracket or \\\n (following_char_index < len(no_comments_text) and no_comments_text[following_char_index] in '],'):\n inside_quotes = not inside_quotes\n else:\n i += 1\n continue # Skip this quote\n\n cleaned_text.append(char)\n i += 1\n continue\n\n if char == ' ':\n # Skip spaces if not inside quotes and if the space is not between words\n if not inside_quotes and (i == 0 or no_comments_text[i - 1] in ' ,[' or no_comments_text[i + 1] in ' ,]'):\n i += 1\n continue\n\n cleaned_text.append(char)\n i += 1\n\n cleaned_str = ''.join(cleaned_text)\n cleaned_str = re.sub(r\"\\[\\s+\", \"[\", cleaned_str)\n cleaned_str = re.sub(r\"\\s+\\]\", \"]\", cleaned_str)\n cleaned_str = re.sub(r\"\\s*,\\s*\", \", \", cleaned_str) # Ensure single space after commas\n\n start, end = cleaned_str.find('['), cleaned_str.rfind(']')\n if start != -1 and end != -1 and end > start:\n cleaned_str = cleaned_str[start:end + 1]\n\n return cleaned_str\n\ndef convert_to_list(text):\n pattern = r'\\d+\\.\\s'\n items = [item.strip() for item in re.split(pattern, text) if item]\n return items\n\ndef extract_python_list(text: str):\n try:\n if re.match(r'\\d+\\.\\s', text):\n return convert_to_list(text)\n \n bt.logging.debug(f\"Preprocessed text = {text}\")\n text = preprocess_string(text)\n bt.logging.debug(f\"Postprocessed text = {text}\")\n\n # Extracting list enclosed in square brackets\n match = re.search(r'\\[((?:[^][]|\"(?:\\\\.|[^\"\\\\])*\")*)\\]', text, re.DOTALL)\n if match:\n list_str = match.group(1)\n\n # Using ast.literal_eval to safely evaluate the string as a list\n evaluated = ast.literal_eval('[' + list_str + ']')\n if isinstance(evaluated, list):\n return evaluated\n\n except Exception as e:\n bt.logging.error(f\"Unexpected error when extracting list: {e}\\n{traceback.format_exc()}\")\n\n return None\n\n\nasync def call_openai(messages, temperature, model, seed=1234):\n for attempt in range(2):\n bt.logging.debug(f\"Calling Openai. Temperature = {temperature}, Model = {model}, Seed = {seed}, Messages = {messages}\")\n try:\n response = await client.chat.completions.create(\n model=model,\n messages=messages,\n temperature=temperature,\n seed=seed,\n )\n response = response.choices[0].message.content\n bt.logging.debug(f\"validator response is {response}\")\n return response\n\n except Exception as e:\n bt.logging.error(f\"Error when calling OpenAI: {traceback.format_exc()}\")\n await asyncio.sleep(0.5) \n \n return None\n\n\n\n# Github unauthorized rate limit of requests per hour is 60. Authorized is 5000.\ndef get_version(line_number = 22):\n url = f\"https://api.github.com/repos/corcel-api/cortex.t/contents/template/__init__.py\"\n response = requests.get(url)\n if response.status_code == 200:\n content = response.json()['content']\n decoded_content = base64.b64decode(content).decode('utf-8')\n lines = decoded_content.split('\\n')\n if line_number <= len(lines):\n version_line = lines[line_number - 1]\n version_match = re.search(r'__version__ = \"(.*?)\"', version_line)\n if version_match:\n return version_match.group(1)\n else:\n raise Exception(\"Version information not found in the specified line\")\n else:\n raise Exception(\"Line number exceeds file length\")\n else:\n bt.logging.error(\"github api call failed\")\n return None\n\n\ndef send_discord_alert(message, webhook_url):\n data = {\n \"content\": f\"@everyone {message}\",\n \"username\": \"Subnet18 Updates\"\n }\n try:\n response = requests.post(webhook_url, json=data)\n if response.status_code == 204:\n print(\"Discord alert sent successfully!\")\n else:\n print(f\"Failed to send Discord alert. Status code: {response.status_code}\")\n except Exception as e:\n print(f\"Failed to send Discord alert: {e}\", exc_info=True)","repo_name":"BitAPAI/cortex.t","sub_path":"template/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":14039,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"71"} +{"seq_id":"30096472409","text":"import paho.mqtt.client as mqtt\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\n\ncred = credentials.Certificate(\"serviceAccountKey.json\")\nfirebase_admin.initialize_app(cred)\n\ndb=firestore.client()\n\nname = \"\"\ntempFinish = \"\"\ndpUploadFinsh = \"\"\ntemp =\"\"\ndef on_connect(client, userdata, flag, rc):\n print(\"connect\")\n client.subscribe(\"hansung/arduino/temp\", qos = 0)\n client.subscribe(\"hansung/pc/webCamCapture\", qos = 0)\n\n\ndef on_message(client, userdata, msg) :\n \n global temp \n global tempFinish\n global dpUploadFinsh\n global name\n if msg.topic == \"hansung/pc/webCamCapture\":\n name = str(msg.payload.decode(\"utf-8\"))\n print(name)\n\n dpUploadFinsh = True\n \n if msg.topic == \"hansung/arduino/temp\":\n temp = str(msg.payload.decode(\"utf-8\"))\n print(temp)\n if name != \"\": \n print(\"사람이름\"+name)\n db.collection('studentlist').document(name).update({\"temp\":temp})\n print(temp)\n tempFinish = True\n\n \n if tempFinish and dpUploadFinsh == True:\n client.publish(\"hansung/mobile/reset\",\"ok\")\n tempFinish = False\n dpUploadFinsh =False\n\n print(\"done\")\n \n \n \n\nbroker_ip = \"113.198.84.40\" \n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(broker_ip, 80)\n\nclient.loop_forever()\n\n","repo_name":"GeumBi-Hong/Attendance-application-using-face-recognition","sub_path":"Face_Recognition/temperature_upload_db.py","file_name":"temperature_upload_db.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"30038228533","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:\n temp= None\n res= head\n \n while head:\n if not temp:\n temp= head\n head= head.next\n elif temp.val== head.val:\n head= head.next\n temp.next= head\n else:\n temp=temp.next\n head= head.next\n \n return res","repo_name":"Bereket234/competitive-programming","sub_path":"week5/remove_duplicate.py","file_name":"remove_duplicate.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"73123103268","text":"# prob_037.py\n\n# Problem 37\n#\n# The number 3797 has an interesting property.\n# Being prime itself, it is possible to continuously remove digits from left to right,\n# and remain prime at each stage: 3797, 797, 97, and 7.\n# Similarly we can work from right to left: 3797, 379, 37, and 3.\n# \n# Find the sum of the only eleven primes that are both truncatable from left to right and right to left.\n# \n# NOTE: 2, 3, 5, and 7 are not considered to be truncatable primes.\n#\n\nfrom Primes import Primes\nfrom Solver import Solver\n\n# check for left to right truncatable\ndef isLRT(number, primes):\n num_s = str(number)\n num_len = len(num_s) \n if num_len < 2:\n return False\n for i in range(num_len):\n s = num_s[i:]\n n = int(s)\n if n not in primes:\n return False\n return True\n\n# check for right to left truncatable\ndef isRLT(number, primes):\n num_s = str(number)\n num_len = len(num_s) \n if num_len < 2:\n return False\n if number not in primes: \n return False\n for i in range(1, num_len):\n s = num_s[:-i]\n n = int(s)\n if n not in primes:\n return False\n return True\n\ndef solve(max_val):\n result = 0\n count = 0\n P = Primes()\n P.calcPrimesAdvanced(max_val)\n primes = P.getPrimes()\n for prime in primes:\n if isLRT(prime, primes) and isRLT(prime, primes):\n result += prime\n count += 1\n print(prime)\n print(\"count: {0}\".format(count))\n return result\n\ndef main():\n solver = Solver(solve, 10**6)\n solver.solve()\n\nmain()\n\n","repo_name":"caleb-james-smith/ProjectEuler","sub_path":"python/prob_037.py","file_name":"prob_037.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"6461855429","text":"def total(*parameters):\n\tresult=0\n\tfor value in parameters:\n\t\tresult+=value\n\n\treturn result\nprint(total(10,20,30))\nprint(total(1,2))\nprint(total(10000,5000,500,800))\nprint(total())\n\n\n\n\ndef func(**Kwords):\n\tfor key in Kwords.keys():\n\t\tprint(key,\":\",Kwords[key])\n\t\nfunc(a=10,b=\"20\",name=\"sathu\",sat=9, sat2=\"@#~!+$#$%\")","repo_name":"pranaysathu/pythonTraining","sub_path":"dynamicParams.py","file_name":"dynamicParams.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"16173398250","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndatosalbum = np.loadtxt(\"resalbum.txt\")\n\nnlaminas= datosalbum[:,0]\nnrepetidas = datosalbum[:,1]\nntiempo = np.linspace(0, 159, 160) \n\n\nplt.plot(ntiempo, nlaminas, \"c\")\nplt.plot(ntiempo, nrepetidas, \"b\")\nplt.savefig(\"graficamona.pdf\")\n","repo_name":"clesmes10/ejercicio16","sub_path":"graficamonas.py","file_name":"graficamonas.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"5271734867","text":"# -----------------------------\n# Project Euler Problem 4: Largest Palindrome Product\n# \t\tFind the largest palindrom made from the product of two 3-digit numbers\n# -----------------------------\n\ndef largestPalindromeProd():\n\tlargestPalindrome = 0\n\tfor firstNumber in range(999,100,-1):\n\t\tfor secondNumber in range(999,100,-1): \n\t\t\tresult = firstNumber * secondNumber\n\t\t\tresultReversed = reverseString(str(result))\n\n\t\t\tif str(result) == str(resultReversed):\n\t\t\t\tlargestPalindrome = keepLargestPalindrome(largestPalindrome, result)\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tcontinue\n\t\t\n\tprint(\"Largest palindrome: {0} from the product of {1} and {2}\".format(largestPalindrome, firstNumber, secondNumber))\n\ndef reverseString(inputString):\n\treturn inputString[::-1]\n\ndef keepLargestPalindrome(currentResult, newResult):\n\tif currentResult >= newResult:\n\t\treturn currentResult\n\telse:\n\t\treturn newResult\n\n\ndef main():\n\tlargestPalindromeProd()\n\nif __name__ == '__main__':\n\tmain()","repo_name":"hhoov/euler","sub_path":"largestPalindromeProduct.py","file_name":"largestPalindromeProduct.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"12363710049","text":"import os\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nprint(*[item.split(\".\").pop(0) for item in os.listdir(\"reviews\")], sep=\"\\n\")\nproduct_id = input(\"Podaj kod produktu: \")\n\nreviews = pd.read_json(f\"./reviews/{product_id}.json\")\n\nproduct_rating = reviews.stars.mean()\nreviews_count = reviews.shape[0]\npros_count = reviews.pros.map(bool).sum()\ncons_count = reviews.cons.map(bool).sum()\nprint(f\"\"\" dla produktu o identyfikatorze {product_id} dostępnych jest {reviews_count} opinii\nDla {pros_count} opinii autorzy podali listę zalet, a dla {cons_count} listę wad.\nŚrednia ocena produktu to {product_rating:.1f}.\"\"\")\n\nrecommendation = reviews.recommendation.value_counts(dropna=False)\nrecommendation.plot.pie()\nplt.show()\n\n# W ramach zadania domowego - pobawienie się argumentami wykresu.\n# w ramach plt. rózne argumenty, albo wewnątrz funkcji\n\n\n\n\n","repo_name":"Dawid1424/CeneoScraper2","sub_path":"analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"19046649210","text":"import AnyToJson\nimport re\nimport os\n\nclass Fail2Ban(AnyToJson.AnyToJson):\n def __init__(self):\n super().__init__()\n\n self.i = 0\n self.regs = {} \n self.prepared_regex = {} \n\n self.files_readed = []\n\n self.source = self.conf.get(\"detection\", \"source\")\n self.prepared_regex[\"HOST\"] = self.conf.get(\"config\", \"host_regex\")\n self.prepared_regex[\"__prefix_line\"] = self.conf.get(\"config\", \"prefix_regex\")\n\n for fl in os.listdir(self.conf.get(\"config\", \"fail2ban_dir\")+\"/filter.d/\"):\n if fl.endswith(\".conf\"):\n self.readfile(fl)\n\n self.files_readed = map(lambda each:each.strip(\".conf\"), self.files_readed)\n\n for key, value in self.prepared_regex.items():\n if \"failregex\" in key:\n self.regs[key] = re.compile(value)\n\n\n def readfile(self, fl):\n with open(\"/etc/fail2ban/filter.d/\"+fl, errors='replace') as f:\n lines = f.readlines()\n\n goto = \"\"\n\n for line in lines:\n if line.startswith(\"before = \"):\n goto = (line.split(\"before = \")[1]).rstrip()\n\n if goto and goto not in self.files_readed:\n self.readfile(goto) \n\n for line in lines:\n self.get_regex(line)\n\n if fl not in self.files_readed:\n self.files_readed.append(fl)\n\n\n def get_regex(self, line):\n if line.startswith(\"#\") or line.startswith(\"[\") or line.startswith(\"\\n\") or line.startswith(\"ignoreregex\"):\n return None\n\n line = line.rstrip() \n match = re.findall(\"%\\(([^\\)]*)\\)s\", line)\n\n for name in match: \n if \"_\" in name or \"?P<\"+name+\">\":\n line = re.sub(\"%\\(\"+name+\"\\)s\", self.prepared_regex[name], line)\n continue\n line = re.sub(\"%\\(\"+name+\"\\)s\", \"(?P<\"+name+\">\"+self.prepared_regex[name]+\")\", line)\n\n line = re.sub(\"\", \"(?P\"+self.prepared_regex[\"HOST\"]+\")\", line) # replace special fail2ban variable\n line = re.sub(\"\\?P\\(\", \"(\", line) # fix broken regex\n\n data = line.split(\"=\", 1)\n\n if len(data) == 2:\n if \"__prefix_line\" != data[0].rstrip():\n self.prepared_regex[data[0].rstrip()] = data[1].lstrip()\n self.rkey = data[0].rstrip()\n else:\n self.prepared_regex[self.rkey+str(self.i)] = data[0].lstrip()\n self.i += 1\n\n","repo_name":"lukasbalazik123/jharm","sub_path":"JHarm/Parse/Fail2Ban.py","file_name":"Fail2Ban.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"4022122271","text":"################################################\n# #\n# author: raghav_0901 (Raghav Dalmia) #\n# updated on: 16-10-2020, 3:38:46 pm IST #\n# #\n################################################\n\nfrom math import *\n\n\ndef _in():\n return [int(x) for x in input().split(' ')]\n\n\ndef getSum(ftree, ind):\n if(ind < 0):\n return 0\n ind = ind + 1\n ans = 0\n while(ind > 0):\n ans = ans + ftree[ind]\n ind = ind - (ind & (-ind))\n return ans\n\n\ndef constructFT(n, arr):\n ftree = [0] * (n + 1)\n\n for i in range(0, n):\n ind = i + 1\n while(ind <= n):\n ftree[ind] += arr[i]\n ind = ind + (ind & (-ind))\n return ftree\n\n\nif __name__ == \"__main__\":\n n, q = _in()\n arr = _in()\n\n ftree = constructFT(n, arr)\n\n for i in range(q):\n l, r = _in()\n ans = getSum(ftree, r - 1) - getSum(ftree, l - 2)\n print(ans)","repo_name":"raghav-dalmia/CSES-Solution-Book","sub_path":"Range_Queries/Static_Range_Sum_Queries/sol2.py","file_name":"sol2.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"71"} +{"seq_id":"43106135454","text":"# from django.shortcuts import render\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom .models import Article\nfrom .serializers import ArticleSerializer\n\n\n# class ArticleListView(ListAPIView):\n# queryset = Article.objects.all()\n# serializer_class = ArticleSerializer\n\n# class ArticleDetailView(RetrieveAPIView):\n# queryset = Article.objects.all()\n# serializer_class = ArticleSerializer\n\n\nclass HomePage(APIView):\n def get(self,request):\n \n helper_links = {\n 'view all articles':'/view_all',\n 'add new article':'/add_new',\n 'delete article':'/delete/',\n 'view specified':'/view/',\n 'delete':'/delete/',\n 'update':'/update/'\n }\n\n return Response(helper_links)\n\n\nclass ViewAll(APIView):\n def get(self,request):\n articles_list = Article.objects.all()\n article_data = ArticleSerializer(articles_list,many = True)\n return Response(article_data.data)\n\n\nclass AddNew(APIView):\n def post(self,request):\n \n \n new_article = ArticleSerializer(data = request.data)\n if new_article.is_valid():\n new_article.save()\n print(type(request.data))\n return Response('Article added')\n\n\nclass DeleteArticle(APIView):\n def get(self,request,key):\n article = Article.objects.get(id = key)\n article.delete()\n\n return Response('Article deleted successfully')\n\n\nclass ViewOne(APIView):\n def get(self,request,key):\n article = Article.objects.get(id = key)\n serializer = ArticleSerializer(article,many = False)\n \n return Response(serializer.data)\n\n\nclass UpdateArticle(APIView):\n def post(self,request,key):\n article = Article.objects.get(id = key)\n serialized_data = ArticleSerializer(instance = article,data = request.data)\n \n if serialized_data.is_valid():\n serialized_data.save()\n \n return Response(\"Article updated\")\n","repo_name":"Chinmay487/djreact","sub_path":"backend/articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"33334887976","text":"class Solution(object):\n def searchInsert(self, nums, target):\n if len(nums) == 1 and nums[0] < target:\n return 1\n elif len(nums) == 1:\n return 0\n\n left = 0\n right = len(nums) - 1\n\n while left < right:\n mid = left + (right - left + 1)//2\n if nums[mid] < target:\n left = mid\n elif nums[mid] == target:\n return mid\n else:\n right = mid\n return left\n\n\nif __name__ == '__main__':\n nums = [1, 3]\n target = 2\n\n sol = Solution()\n ans = sol.searchInsert(nums, target)\n print(ans)\n","repo_name":"Amyoyoyo/LeetCode","sub_path":"35. Search Insert Position/BinarySearch_wrong.py","file_name":"BinarySearch_wrong.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"72996125980","text":"# GLOBALS VARS\nS_WIDTH = 800\nS_HEIGHT = 700\nPLAY_WIDTH = 300 # meaning 300 // 10 = 30 width per block\nPLAY_HEIGHT = 600 # meaning 600 // 20 = 20 height per blo ck\nBLOCK_SIZE = 30\n\nTOP_LEFT_X = (S_WIDTH - PLAY_WIDTH) // 2\nTOP_LEFT_Y = S_HEIGHT - PLAY_HEIGHT\n\nSTATE_MENU = 1\nSTATE_PLAY = 2\nSTATE_END = 3","repo_name":"cmurphy580/TMGE","sub_path":"src/common/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"12692981985","text":"from itertools import combinations\n\nimport numpy as np\n\nimport nemo\n\n\ndef circle_circonscrit(T):\n (x1, y1, z1), (x2, y2, z2), (x3, y3, z3), (x4, y4, z4) = T\n A = np.array(\n [\n [x4 - x1, y4 - y1, z4 - z1],\n [x4 - x2, y4 - y2, z4 - z2],\n [x4 - x3, y4 - y3, z4 - z3],\n ]\n )\n Y = np.array(\n [\n (x4 ** 2 + y4 ** 2 + z4 ** 2 - x1 ** 2 - y1 ** 2 - z1 ** 2),\n (x4 ** 2 + y4 ** 2 + z4 ** 2 - x2 ** 2 - y2 ** 2 - z2 ** 2),\n (x4 ** 2 + y4 ** 2 + z4 ** 2 - x3 ** 2 - y3 ** 2 - z3 ** 2),\n ]\n )\n if np.linalg.det(A) == 0:\n return None, 0\n Ainv = np.linalg.inv(A)\n X = 0.5 * np.dot(Ainv, Y)\n x, y, z = X[0], X[1], X[2]\n r = ((x - x1) ** 2 + (y - y1) ** 2 + (z - z1) ** 2) ** 0.5\n return (x, y, z), r\n\n\ndef l2norm(x, axis=1):\n return x / np.sum(x ** 2, axis=axis, keepdims=True) ** 0.5\n\n\ndef ransac_one(target, points, non_linear_foo=lambda x: x > 0.01):\n # non_linear_foo = lambda x: x\n non_linear_foo = lambda x: np.exp(x)\n all_combinations = np.array(list(combinations(range(points.shape[0]), 3)))\n\n distances = np.ones(all_combinations.shape[0]) * 100\n centers = np.zeros((all_combinations.shape[0], 3))\n radius = np.zeros(all_combinations.shape[0])\n for i, selection in enumerate(all_combinations):\n selected_points = points[selection]\n center, r = circle_circonscrit(\n np.concatenate((selected_points, np.expand_dims(target, axis=0)), axis=0)\n )\n if center is None:\n continue\n dis_caled = np.sum(\n non_linear_foo(\n np.abs(np.sum((points - np.array([center])) ** 2, axis=1) ** 0.5 - r)\n )\n )\n\n centers[i] = np.array(center)\n radius[i] = r\n distances[i] = dis_caled\n min_idx = np.argmin(distances)\n center_ = centers[min_idx]\n return l2norm(center_ - target, axis=0)\n\n\ndef direction_calculator(verts, faces):\n out_dict = {i: set() for i in range(verts.shape[0])}\n\n for t in faces:\n for k in t:\n out_dict[k] = out_dict[k].union(set(t) - {k})\n\n direct_dict = {}\n for k in out_dict.keys():\n if len(list(out_dict[k])) <= 2:\n direct_dict[k] = np.array([1, 0, 0])\n continue\n # direct_dict[k] = l2norm(np.mean(l2norm(verts[np.array(list(out_dict[k]))] - np.expand_dims(verts[k], axis=0)), axis=0), axis=0)\n direct_dict[k] = ransac_one(verts[k], verts[np.array(list(out_dict[k]))])\n\n return direct_dict\n\n\ndef cal_point_weight(direct_dict, vert, anno):\n cam_3d = nemo.utils.CameraTransformer(anno).get_camera_position()\n vec_ = cam_3d.reshape((1, -1)) - vert\n vec_ = vec_ / (np.sum(vec_ ** 2, axis=1, keepdims=True) ** 0.5)\n matrix_dict = np.array([direct_dict[k] for k in direct_dict.keys()])\n return np.sum(vec_ * matrix_dict, axis=1)\n","repo_name":"wufeim/NeMo","sub_path":"nemo/utils/calculate_point_direction.py","file_name":"calculate_point_direction.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"} +{"seq_id":"44347470836","text":"class Solution:\n def backspaceCompare(self, S, T):\n l1 = self.stack(S)\n l2 = self.stack(T)\n return l1 == l2\n \n \n def stack(self, string):\n stack = []\n for char in string:\n if char == '#':\n if stack:\n stack.pop()\n else:\n stack.append(char)\n return stack\n","repo_name":"nasingfaund/leetcode","sub_path":"844. Backspace String Compare.py","file_name":"844. Backspace String Compare.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"10278475703","text":"import pygame\n\npygame.init()\n\nwindow = pygame.display.set_mode((380, 380))\npygame.display.set_caption(\"Guilherme\")\n\n\nbackground = pygame.image.load('foto2.jpg').convert()\nbackground = pygame.transform.scale(background, (380, 380))\n\nwindow.blit(background, (0, 0))\npygame.display .flip()\n\nwhite = (255, 255, 255)\nblack = (0, 0, 0)\ngreen = (0, 255, 0)\n\nfont = pygame.font.Font(None, 36)\n\npygame.mixer.music.load('Vestron Vulture - Judas Effect.mp3')\n\npause_button = pygame.Rect(50, 300, 100, 40) \n\nplay_button = pygame.Rect(180, 300, 100, 40)\n\npygame.mixer.music.play()\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n if pause_button.collidepoint(event.pos):\n pygame.mixer.music.pause()\n\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n if play_button.collidepoint(event.pos):\n pygame.mixer.music.play()\n\n \n \n\n\n pygame.draw.rect(window, white, pause_button)\n text = font.render(\"Pause\", True, black)\n window.blit(text, (pause_button.x +15, pause_button.y + 10))\n pygame.display.update()\n\n pygame.draw.rect(window, white, play_button)\n text = font.render(\"Play\", True, black)\n window.blit(text, (play_button.x +15, pause_button.y + 10))\n pygame.display.update()","repo_name":"alexguigomes/player-de-musica-em-python","sub_path":"player-de-musica.py","file_name":"player-de-musica.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"21247918982","text":"# encoding:utf-8\r\n\"\"\"\r\nxz331@cam.ac.uk\r\nabnerzxzhao@tencent.com\r\n\"\"\"\r\nfrom __future__ import division\r\nimport tensorflow as tf\r\nfrom basemodel import basemodel\r\n\r\n\r\nclass TEMN(basemodel):\r\n \"\"\"\r\n @num_users:number of users\r\n @num_items:number of POIs\r\n @lamb_m:the margin of memory network\r\n @lamb_d:the margin of dis_model\r\n @ratio1:the influence of topic_model\r\n @ratio2:the influence of dis_model\r\n \"\"\"\r\n\r\n def __init__(self, num_users, num_items, args):\r\n print('creating my TEMN!')\r\n self.num_users = num_users\r\n self.num_items = num_items\r\n self.graph = tf.Graph()\r\n self.args = args\r\n self.stddev = self.args.stddev\r\n self.learn_rate = self.args.learn_rate\r\n self.lamb_m = args.lamb_m\r\n self.lamb_d = args.lamb_d\r\n self.ratio1 = args.ratio1\r\n self.ratio2 = args.ratio2\r\n self.attention = None\r\n self.selected_memory = None\r\n self.num_mem = self.args.num_mem\r\n\r\n self.initializer = self._get_initializer()\r\n self._set_opt()\r\n self._creat_model_inputs()\r\n self._build_list_network()\r\n\r\n def get_list_feed_dict(self, batch, mode='training'):\r\n def process_all_items(x_all_item):\r\n cur_all_ii = [0 for j in range(self.args.max_p_num)]\r\n for j in range(len(x_all_item)):\r\n if j < self.args.max_p_num:\r\n cur_all_ii[j] = x_all_item[j]\r\n return cur_all_ii\r\n\r\n if mode == 'training':\r\n user_input = [x[0] for x in batch]\r\n item_input = [x[1] for x in batch]\r\n uindist = [x[2] for x in batch]\r\n all_items_data = [process_all_items(x[3]) for x in batch]\r\n ll = [len(x[3]) for x in batch]\r\n item_input_neg = [x[4] for x in batch]\r\n topic_input = [x[5] for x in batch]\r\n uindistneg = [x[6] for x in batch]\r\n feed_dict = {\r\n self.user_input: user_input, # user id\r\n self.item_input: item_input, # item id\r\n self.item_input_neg: item_input_neg,\r\n self.L: ll, # number of POIs a user visited\r\n self.all_items: all_items_data, # POI list a user has visited\r\n self.label: topic_input, # user-topic from TLDA\r\n self.DIST: uindist, # distance from user to positive POI\r\n self.DIST_neg: uindistneg, # distance from user to negative POI\r\n self.dropout: self.args.dropout\r\n }\r\n else:\r\n user_input = [x[0] for x in batch]\r\n item_input = [x[1] for x in batch]\r\n uindist = [x[2] for x in batch]\r\n all_items_data = [process_all_items(x[3]) for x in batch]\r\n ll = [len(x[3]) for x in batch]\r\n feed_dict = {\r\n self.user_input: user_input,\r\n self.item_input: item_input,\r\n self.L: ll,\r\n self.DIST: uindist,\r\n self.all_items: all_items_data,\r\n self.dropout: 1\r\n }\r\n feed_dict[self.learn_rate] = self.args.learn_rate\r\n return feed_dict\r\n\r\n def _creat_model_inputs(self):\r\n self.user_input = tf.placeholder(tf.int32, shape=[None], name='user')\r\n self.item_input = tf.placeholder(tf.int32, shape=[None], name='item')\r\n self.item_input_neg = tf.placeholder(tf.int32, shape=[None], name='item_neg')\r\n self.input_type = tf.placeholder(tf.int32, shape=[None], name='type')\r\n self.dropout = tf.placeholder(tf.float32, name='dropout')\r\n self.label = tf.placeholder(tf.float32, shape=[None, self.args.topic_num], name='labels')\r\n\r\n self.learn_rate = tf.placeholder(tf.float32, name='learn_rate')\r\n self.L = tf.placeholder(tf.float32, shape=[None], name='L')\r\n self.DIST = tf.placeholder(tf.float32, shape=[None], name='DIST')\r\n self.DIST_neg = tf.placeholder(tf.float32, shape=[None], name='DIST_neg')\r\n self.all_items = tf.placeholder(tf.int32, shape=[None, self.args.max_p_num], name=\"HISTORY\")\r\n self.batch_size = tf.shape(self.item_input)[0]\r\n\r\n def _composition_layer(self, user_emb, item_emb, dist='L2', selected_memory=None):\r\n energy = item_emb - (user_emb + selected_memory)\r\n if 'L2' in dist:\r\n final_layer = -tf.sqrt(tf.reduce_sum(tf.square(energy), 1) + 1E-3)\r\n elif 'L1' in dist:\r\n final_layer = -tf.reduce_sum(tf.abs(energy), 1)\r\n else:\r\n raise Exception('Please specify distance metric')\r\n final_layer = tf.reshape(final_layer, [-1, 1])\r\n return final_layer\r\n\r\n def _get_prediction(self, user_emb, item_emb, memory_key):\r\n _key = tf.multiply(self.user_emb, self.item_emb)\r\n _key = tf.expand_dims(_key, 1)\r\n key_attention = tf.squeeze(tf.matmul(_key, memory_key))\r\n key_attention = tf.nn.softmax(key_attention)\r\n selected_memory = tf.matmul(key_attention, self.memory_value)\r\n final_layer = self._composition_layer(user_emb, item_emb, selected_memory=selected_memory)\r\n return final_layer\r\n\r\n def _build_list_network(self):\r\n stddev = self.stddev\r\n with tf.variable_scope('embedding_layer', initializer=self.initializer):\r\n with tf.device('/cpu:0'):\r\n self.user_item_key = tf.Variable(\r\n tf.random_normal(\r\n [self.args.embedding_size, self.num_mem],\r\n stddev=stddev))\r\n self.memories = tf.Variable(\r\n tf.random_normal(\r\n [self.num_mem, self.args.embedding_size],\r\n stddev=stddev))\r\n\r\n self.item_embeddings = tf.get_variable('item_emb', [self.num_items + 1, self.args.embedding_size],\r\n initializer=self.initializer)\r\n self.all_items_emb = tf.nn.embedding_lookup(self.item_embeddings, self.all_items)\r\n self.item_emb = tf.nn.embedding_lookup(self.item_embeddings, self.item_input)\r\n self.item_emb_neg = tf.nn.embedding_lookup(self.item_embeddings, self.item_input_neg)\r\n self.dis_W = tf.get_variable(\"W\", [self.num_users + 1, 1], initializer=self.initializer)\r\n self.dis_b = tf.get_variable(\"b\", [self.num_users + 1, 1], initializer=self.initializer)\r\n self.dis_W_item = tf.get_variable(\"W_item\", [self.num_items + 1, 1], initializer=self.initializer)\r\n\r\n if self.args.constraint:\r\n self.all_items_emb = tf.clip_by_norm(self.all_items_emb, 1.0, axes=1)\r\n self.item_emb = tf.clip_by_norm(self.item_emb, 1.0, axes=1)\r\n self.item_emb_neg = tf.clip_by_norm(self.item_emb_neg, 1.0, axes=1)\r\n\r\n self.all_items_emb = tf.transpose(self.all_items_emb, perm=[0, 2, 1])\r\n self.cur_mask = tf.sequence_mask(self.L, self.args.max_p_num)\r\n self.cur_mask = tf.expand_dims(self.cur_mask, -1)\r\n self.cur_mask = tf.transpose(self.cur_mask, perm=[0, 2, 1])\r\n kept_indices = tf.cast(self.cur_mask, dtype=tf.float32)\r\n self.all_items_emb = self.all_items_emb * kept_indices\r\n self.user_emb_sum = tf.reduce_sum(self.all_items_emb, 2)\r\n self.LL = tf.expand_dims(self.L, -1)\r\n self.user_emb = self.user_emb_sum / self.LL\r\n\r\n self.item_emb = tf.nn.embedding_lookup(self.item_embeddings, self.item_input)\r\n\r\n self.user_topic_W = tf.Variable(\r\n tf.random_normal([self.args.embedding_size, self.args.topic_num], stddev=stddev))\r\n self.user_topic_b = tf.Variable(tf.random_normal([self.args.topic_num], stddev=stddev))\r\n self.topic_out = tf.matmul(self.user_emb, self.user_topic_W) + self.user_topic_b\r\n self.predict_topic = tf.nn.softmax(self.topic_out)\r\n self.topic_cost = tf.reduce_sum(\r\n tf.nn.softmax_cross_entropy_with_logits(logits=self.topic_out, labels=self.label))\r\n\r\n self._key = tf.multiply(self.user_emb, self.item_emb)\r\n self.key_attention = tf.matmul(self._key, self.user_item_key)\r\n self.key_attention = tf.nn.softmax(self.key_attention)\r\n self.selected_memory = tf.matmul(self.key_attention, self.memories)\r\n final_layer = self._composition_layer(self.user_emb, self.item_emb,\r\n selected_memory=self.selected_memory)\r\n final_layer_neg = self._composition_layer(self.user_emb, self.item_emb_neg,\r\n selected_memory=self.selected_memory)\r\n self.predict_op = tf.squeeze(final_layer)\r\n self.mem_cost = tf.reduce_sum(tf.nn.relu((tf.squeeze(final_layer_neg - final_layer) + self.lamb_m)))\r\n\r\n self.dis_W_emb = tf.squeeze(tf.nn.embedding_lookup(self.dis_W, self.user_input))\r\n self.dis_b_emb = tf.squeeze(tf.nn.embedding_lookup(self.dis_b, self.user_input))\r\n self.dist_W_item_emb = tf.squeeze(tf.nn.embedding_lookup(self.dis_W_item, self.item_input))\r\n self.dist_W_item_emb_neg = tf.squeeze(tf.nn.embedding_lookup(self.dis_W_item, self.item_input_neg))\r\n self.Wis = (self.dis_W_emb * self.DIST + self.dis_b_emb + self.dist_W_item_emb * self.DIST)\r\n self.Wis_neg = (\r\n self.dis_W_emb * self.DIST_neg + self.dis_b_emb + self.dist_W_item_emb_neg * self.DIST_neg)\r\n self.dist_cost = tf.reduce_sum(tf.nn.relu((self.lamb_d - self.Wis + self.Wis_neg)))\r\n\r\n self.cost = self.mem_cost + self.topic_cost * self.ratio1 + self.dist_cost * self.ratio2\r\n if self.args.l2_reg > 0:\r\n vars = tf.trainable_variables()\r\n lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in vars if 'bias' not in v.name]) * self.args.l2_reg\r\n self.cost += lossL2\r\n if self.args.opt == 'SGD':\r\n self.opt = tf.train.GradientDescentOptimizer(learning_rate=self.learn_rate)\r\n elif self.args.opt == 'Adam':\r\n self.opt = tf.train.AdamOptimizer(learning_rate=self.learn_rate)\r\n elif self.args.opt == 'Adadelta':\r\n self.opt = tf.train.AdadeltaOptimizer(learning_rate=self.learn_rate)\r\n elif self.args.opt == 'Adagrad':\r\n self.opt = tf.train.AdagradOptimizer(learning_rate=self.learn_rate, initial_accumulator_value=0.9)\r\n elif self.args.opt == 'RMS':\r\n self.opt = tf.train.RMSPropOptimizer(learning_rate=self.learn_rate, decay=0.9, epsilon=1e-6)\r\n elif self.args.opt == 'Moment':\r\n self.opt = tf.train.MomentumOptimizer(self.args.learn_rate, 0.9)\r\n # grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars), 1)\r\n gradients = self.opt.compute_gradients(self.cost)\r\n self.gradients = gradients\r\n\r\n def ClipIfNotNone(grad):\r\n if grad is None:\r\n return grad\r\n grad = tf.clip_by_value(grad, -10, 10, name=None)\r\n return tf.clip_by_norm(grad, self.args.clip_norm)\r\n\r\n if self.args.clip_norm > 0:\r\n clipped_gradients = [(ClipIfNotNone(grad), var) for grad, var in gradients]\r\n else:\r\n clipped_gradients = [(grad, var) for grad, var in gradients]\r\n\r\n # grads, _ = tf.clip_by_value(tf.gradients(self.cost, tvars),-10,10)\r\n self.optimizer = self.opt.apply_gradients(clipped_gradients)\r\n self.train_op = self.optimizer\r\n","repo_name":"XiaoZHOUCAM/Topic-Enhanced-Memory-Networks-for-Personalised-Point-of-Interest-Recommendation","sub_path":"-Topic-Enhanced-Memory-Networks--master/TEMN.py","file_name":"TEMN.py","file_ext":"py","file_size_in_byte":12007,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"69"} +{"seq_id":"10813833999","text":"# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return a list of lists of integers\n def levelOrderBottom(self, root):\n ret = []\n if root is None:\n return ret\n\n level = 0\n curlevel = 0\n nodelist = []\n q = []\n q.append((level, root))\n while len(q) > 0:\n curlevel, node = q.pop(0)\n if curlevel != level:\n level = curlevel\n ret.append(nodelist)\n nodelist = []\n \n nodelist.append(node.val)\n if node.left != None:\n q.append((curlevel+1, node.left))\n if node.right != None:\n q.append((curlevel+1, node.right))\n\n ret.append(nodelist)\n ret.reverse()\n return ret\n\n","repo_name":"septem776/LeetCode","sub_path":"python/Binary Tree Level Order Traversal II.py","file_name":"Binary Tree Level Order Traversal II.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"7224015617","text":"'''\nCreated on Apr 08, 2017\n\nOperator Overloading Python Tutorial\nhttps://www.youtube.com/watch?v=Qhj21B7kTkM&list=PLQVvvaa0QuDfju7ADVp5W1GF9jVhjbX-_&index=19\n\n@author: ubuntu\n'''\nimport random\nimport pygame\nfrom blob import Blob\n\nSTARTING_BLUE_BLOBS = 10\nSTARTING_RED_BLOBS = 3\nSTARTING_GREEN_BLOBS = 5\n\nWIDTH = 800\nHEIGHT = 600\n\nWHITE = (255, 255, 255)\nBLUE = (0, 0, 255)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\n\ngame_display = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"Blob World\")\nclock = pygame.time.Clock()\n\n\nclass BlueBlob(Blob):\n \n def __init__(self, x_boundary, y_boundary):\n Blob.__init__(self, BLUE, x_boundary, y_boundary)\n\n def __add__(self, other_blob):\n if other_blob.color == RED:\n self.size -= other_blob.size\n other_blob.size -= self.size\n \n elif other_blob.color == GREEN:\n self.size += other_blob.size\n other_blob.size = 0\n \n elif other_blob.color == BLUE:\n # for now, nothing. Maybe later it does something more. \n pass\n else:\n raise Exception('Tried to combine one or multiple blobs of unsupported colors!')\n \nclass RedBlob(Blob):\n \n def __init__(self, x_boundary, y_boundary):\n Blob.__init__(self, RED, x_boundary, y_boundary)\n \n \nclass GreenBlob(Blob):\n \n def __init__(self, x_boundary, y_boundary):\n Blob.__init__(self, GREEN, x_boundary, y_boundary)\n\n\n\ndef draw_environment(blob_list):\n game_display.fill(WHITE)\n\n for blob_dict in blob_list:\n for blob_id in blob_dict:\n blob = blob_dict[blob_id]\n pygame.draw.circle(game_display, blob.color, [blob.x, blob.y], blob.size)\n blob.move()\n blob.check_bounds()\n\n pygame.display.update()\n\ndef main():\n blue_blobs = dict(enumerate([BlueBlob(WIDTH,HEIGHT) for i in range(STARTING_BLUE_BLOBS)]))\n red_blobs = dict(enumerate([RedBlob(WIDTH,HEIGHT) for i in range(STARTING_RED_BLOBS)]))\n green_blobs = dict(enumerate([GreenBlob(WIDTH,HEIGHT) for i in range(STARTING_GREEN_BLOBS)]))\n \n print('Current blue size: {}. Current red size: {}'.format(str(blue_blobs[0].size), str(red_blobs[0].size)))\n\n blue_blobs[0] + red_blobs[0]\n print('Current blue size: {}. Current red size: {}'.format(str(blue_blobs[0].size), str(red_blobs[0].size)))\n \n# while True:\n# for event in pygame.event.get():\n# if event.type == pygame.QUIT:\n# pygame.quit()\n# quit()\n# draw_environment([blue_blobs, red_blobs, green_blobs])\n# clock.tick(60)\n\nif __name__ == '__main__':\n main()","repo_name":"wind86/learning","sub_path":"python3-intermediate-tutorial/src/lesson18.py","file_name":"lesson18.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"10871375862","text":"from dataclasses import dataclass\nfrom functools import cached_property\nfrom typing import TYPE_CHECKING, Optional\nfrom abc import ABC, abstractmethod\n\nfrom quantities import Quantity\n\nfrom structure_scripts.aisc.criteria import NOMINAL_STRENGTH\nfrom structure_scripts.aisc.helpers import (\n flexural_major_axis_yield_strength,\n flexural_minor_axis_yield_strength,\n effective_radius_of_gyration,\n limiting_length_lateral_torsional_buckling,\n limiting_length_yield,\n flexural_lateral_torsional_buckling_strength,\n flexural_lateral_torsional_buckling_strength_compact_doubly_symmetric_case_c,\n flexural_lateral_torsional_buckling_critical_stress_compact_doubly_symmetric,\n flexural_lateral_torsional_buckling_strength_compact_doubly_symmetric_case_b,\n kc_coefficient,\n)\nfrom structure_scripts.helpers import Axis\n\n\nif TYPE_CHECKING:\n from structure_scripts.aisc.sections import (\n SectionType,\n Profile,\n ProfileFlangeWeb,\n )\n\n\n@dataclass(frozen=True)\nclass BeamFlexure:\n length: Quantity\n lateral_torsional_buckling_modification_factor: float = 1\n\n\n@dataclass(frozen=True)\nclass MajorAxisFlexurePlasticYielding:\n \"\"\"F2.1 see page 103\"\"\"\n\n profile: \"Profile\"\n\n @cached_property\n def nominal_strength(self):\n return flexural_major_axis_yield_strength(\n yield_stress=self.profile.material.yield_stress,\n section_modulus=self.profile.section.Zx,\n )\n\n @cached_property\n def detailed_results(self):\n return {NOMINAL_STRENGTH: self.nominal_strength.rescale(\"N*mm\")}\n\n\n@dataclass\nclass MinorAxisFlexurePlasticYielding:\n \"\"\"F6.1 see page 111\"\"\"\n\n profile: \"Profile\"\n\n @cached_property\n def nominal_strength(self):\n return flexural_minor_axis_yield_strength(\n yield_stress=self.profile.material.yield_stress,\n plastic_section_modulus=self.profile.section.Zy,\n elastic_section_modulus=self.profile.section.Sy,\n )\n\n @cached_property\n def detailed_results(self):\n return {NOMINAL_STRENGTH: self.nominal_strength.rescale(\"N*mm\")}\n\n@dataclass(frozen=True)\nclass MajorAxisFlexureElasticYielding:\n \"\"\"F2.1 see page 103\"\"\"\n\n profile: \"Profile\"\n\n @cached_property\n def nominal_strength(self):\n return flexural_major_axis_yield_strength(\n yield_stress=self.profile.material.yield_stress,\n section_modulus=self.profile.section.Sx,\n )\n\n @cached_property\n def detailed_results(self):\n return {NOMINAL_STRENGTH: self.nominal_strength.rescale(\"N*mm\")}\n\n\n@dataclass\nclass MinorAxisFlexurePlasticYielding:\n \"\"\"F6.1 see page 111\"\"\"\n\n profile: \"Profile\"\n\n @cached_property\n def nominal_strength(self):\n return flexural_minor_axis_yield_strength(\n yield_stress=self.profile.material.yield_stress,\n plastic_section_modulus=self.profile.section.Zy,\n elastic_section_modulus=self.profile.section.Sy,\n )\n\n @cached_property\n def detailed_results(self):\n return {NOMINAL_STRENGTH: self.nominal_strength.rescale(\"N*mm\")}\n\n\n\n\n@dataclass(frozen=True)\nclass LateralTorsionalBuckling(ABC):\n \"\"\"F2 page 103\"\"\"\n\n profile: \"ProfileFlangeWeb\"\n modification_factor: float\n length: Quantity\n\n @property\n @abstractmethod\n def coefficient_c(self) -> float:\n pass\n\n @cached_property\n def effective_radius_of_gyration(self):\n \"\"\"F2-7 page 104\"\"\"\n return effective_radius_of_gyration(\n major_section_modulus=self.profile.section.Sx,\n minor_inertia=self.profile.section.Iy,\n warping_constant=self.profile.section.Cw,\n )\n\n @cached_property\n def limit_length_torsional_buckling(self):\n return limiting_length_lateral_torsional_buckling(\n modulus=self.profile.material.modulus_linear,\n yield_stress=self.profile.material.yield_stress,\n section_modulus=self.profile.section.Sx,\n torsional_constant=self.profile.section.J,\n effective_radius_of_gyration=self.profile.section.rts,\n distance_between_centroids=self.profile.section.ho,\n coefficient_c=self.coefficient_c,\n )\n\n @cached_property\n def limit_length_yield(self):\n return limiting_length_yield(\n modulus=self.profile.material.modulus_linear,\n radius_of_gyration=self.profile.section.ry,\n yield_stress=self.profile.material.yield_stress,\n )\n\n @cached_property\n def strength_lateral_torsion_compact_case_b(self) -> Quantity:\n \"\"\"F2-1 page 103\"\"\"\n return flexural_lateral_torsional_buckling_strength_compact_doubly_symmetric_case_b(\n length_between_braces=self.length,\n limiting_length_torsional_buckling=self.limit_length_torsional_buckling,\n limiting_length_yield=self.limit_length_yield,\n mod_factor=self.modification_factor,\n plastic_moment=self.profile.section.Zx\n * self.profile.material.yield_stress,\n section_modulus=self.profile.section.Sx,\n yield_stress=self.profile.material.yield_stress,\n )\n\n @cached_property\n def critical_stress_lateral_torsional_buckling(self) -> Quantity:\n return flexural_lateral_torsional_buckling_critical_stress_compact_doubly_symmetric(\n mod_factor=self.modification_factor,\n length_between_braces=self.length,\n modulus=self.profile.material.modulus_linear,\n coefficient_c=self.coefficient_c,\n distance_between_flange_centroids=self.profile.section.ho,\n effective_radius_of_gyration=self.effective_radius_of_gyration,\n section_modulus=self.profile.section.Sx,\n torsional_constant=self.profile.section.J,\n )\n\n @cached_property\n def strength_lateral_torsion_compact_case_c(self) -> Quantity:\n return flexural_lateral_torsional_buckling_strength_compact_doubly_symmetric_case_c(\n plastic_moment=self.profile.section.Zx,\n section_modulus=self.profile.section.Sx,\n critical_stress=self.critical_stress_lateral_torsional_buckling,\n )\n\n @cached_property\n def nominal_strength(self) -> Optional[Quantity]:\n return flexural_lateral_torsional_buckling_strength(\n case_b=self.strength_lateral_torsion_compact_case_b,\n case_c=self.strength_lateral_torsion_compact_case_c,\n length_between_braces=self.length,\n limiting_length_yield=self.limit_length_yield,\n limiting_length_torsional_buckling=self.limit_length_torsional_buckling,\n )\n\n @cached_property\n def detailed_results(self):\n return {\n \"limit_length_yield\": self.limit_length_yield,\n \"limit_length_torsional_buckling\": self.limit_length_torsional_buckling,\n NOMINAL_STRENGTH: self.nominal_strength.rescale(\"N*mm\"),\n }\n\n\n@dataclass(frozen=True)\nclass NonCompactFlangeLocalBuckling:\n \"\"\"\n lambda_p: limiting slenderness for a compact flange\n lambda_r: limiting slenderness for a noncompact flange\n lambda: flange slenderness ratio bf_2tf\n \"\"\"\n\n profile: \"ProfileFlangeWeb\"\n axis: Axis\n\n @cached_property\n def lambda_p(self):\n table = {\n Axis.MAJOR: self.profile.slenderness_calc_memory.flexure_major_axis.flange.compact_non_compact_limit,\n Axis.MINOR: self.profile.slenderness_calc_memory.flexure_minor_axis.flange.compact_non_compact_limit,\n }\n return table[self.axis]\n\n @cached_property\n def lambda_r(self):\n table = {\n Axis.MAJOR: self.profile.slenderness_calc_memory.flexure_major_axis.flange.non_compact_slender_limit,\n Axis.MINOR: self.profile.slenderness_calc_memory.flexure_minor_axis.flange.non_compact_slender_limit,\n }\n return table[self.axis]\n\n @cached_property\n def nominal_strength(self) -> Quantity:\n table = {\n Axis.MAJOR: (\n self.profile.flex_yield_major_axis.nominal_strength,\n self.profile.section.Sx,\n ),\n Axis.MINOR: (\n self.profile.flex_yield_minor_axis.nominal_strength,\n self.profile.section.Sy,\n ),\n }\n mp, S = table[self.axis]\n factor1 = mp - 0.7 * self.profile.material.yield_stress * S\n factor2 = (self.profile.section.bf_2tf - self.lambda_p) / (\n self.lambda_r - self.lambda_p\n )\n res = mp - factor1 * factor2\n return mp - factor1 * factor2\n\n @cached_property\n def detailed_results(self):\n return {NOMINAL_STRENGTH: self.nominal_strength.rescale(\"N*mm\")}\n\n\n@dataclass(frozen=True)\nclass SlenderFlangeLocalBuckingMajorAxis:\n profile: \"ProfileFlangeWeb\"\n\n @cached_property\n def nominal_strength(self) -> Quantity:\n return (\n 0.9\n * self.profile.material.modulus_linear\n * self.kc\n * self.profile.section\n / self.profile.section.bf_2tf**2\n )\n\n @cached_property\n def kc(self):\n return kc_coefficient(h_tw=self.profile.section.h_tw)\n\n @cached_property\n def detailed_results(self):\n return {\n \"kc_coefficient\": self.kc,\n NOMINAL_STRENGTH: self.nominal_strength.rescale(\"N*mm\"),\n }\n\n\n@dataclass(frozen=True)\nclass SlenderFlangeLocalBuckingMinorAxis:\n profile: \"ProfileFlangeWeb\"\n\n @cached_property\n def critical_stress(self) -> Quantity:\n table = {\n SectionType.C: self.profile.section.b_t,\n SectionType.W: self.profile.section.bf_2tf,\n SectionType.M: self.profile.section.bf_2tf,\n SectionType.HP: self.profile.section.bf_2tf,\n }\n return (\n 0.69\n * self.profile.material.modulus_linear\n / table[self.profile.section.type]\n )\n\n @cached_property\n def nominal_strength(self) -> Quantity:\n return self.critical_stress * self.profile.section.Sy\n\n @cached_property\n def detailed_results(self):\n return {\n \"critical_stress\": self.critical_stress,\n NOMINAL_STRENGTH: self.nominal_strength.rescale(\"N*mm\"),\n }\n","repo_name":"ruy-sevalho/structure_scripts","sub_path":"structure_scripts/aisc/flexure.py","file_name":"flexure.py","file_ext":"py","file_size_in_byte":10281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"6566708790","text":"# Correlation and visualization\n# Matej\n# 2018 May 8th\n\nimport pandas as pd\nimport numpy as np\nimport os\nimport statsmodels.formula.api as smf\n\nif __name__ == '__main__':\n\td_core = 'E:\\\\Docs\\\\Data\\\\'\n\td_viz = d_core + 'Visualizations\\\\'\n\td_data = d_core + 'USDA ERS\\\\'\n\n\tf_marg = d_data + 'fats.xls'\n\tf_divorce = d_core + 'national_marriage_divorce_rates_00-16.xlsx'\n\n\td_marg = pd.read_excel(f_marg, sheet_name = 6, skiprows = 7, skipfooter = 12, header = None)\n\td_marg = d_marg.loc[:,[0,10]]\n\td_marg.columns = ['Year','Marg_lbs_per_capita']\n\n\tprint(d_marg.tail(10))\n","repo_name":"mavricek/just-some-stuff","sub_path":"SpuriousCorrelation/correlation_and_visualization.py","file_name":"correlation_and_visualization.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"22488530835","text":"#!/usr/bin/python3\n\nimport numpy as np\nimport jnius_config\nimport sys\nimport math\nimport os\n\nEARTH_RADIUS = 6371000\n#EARTH_RADIUS = 6378137.0\n\nutrecht_x = 3899275.0\nutrecht_y = 348997.0\nutrecht_z = 5026376.0\n\njnius_config.set_classpath('terra121_classes')\nfrom jnius import autoclass\n\nA = 6378137.0\nB = 6356752.3142\n\nModifiedAirocean = autoclass(\"io.github.terra121.projection.ModifiedAirocean\")\nScaleProjection = autoclass(\"io.github.terra121.projection.ScaleProjection\")\nGeographicProjection = autoclass(\"io.github.terra121.projection.GeographicProjection\")\nOrientation = autoclass(\"io.github.terra121.projection.GeographicProjection$Orientation\")\n\nbase_projection = GeographicProjection.orientProjection(ModifiedAirocean(), Orientation.none)\nprojection = ScaleProjection(base_projection, 7318261.522857145, -7318261.522857145)\n\nif __name__ == \"__main__\":\n \n if len(sys.argv)==1:\n print ('converting from pipe')\n COUNTER = 0\n for line in sys.stdin:\n lines = np.array([line], dtype=str)\n v = lines\n v = np.char.split(v, \".\")\n v = np.array(list(v))[:, 0:5]\n for i in range(len(v)):\n #print( v[i, 0] + '.' + v[i, 1] + '.' + v[i, 2] )\n\n if v[i,0] == 'FILE':\n file_name = v[i, 1] + '.TXT'\n print('LOADING FILE: ' + file_name, end = '')\n with open(file_name) as fd:\n file_lines = fd.read().splitlines()\n print(' LOADED!')\n\n file_lines = np.array(file_lines, dtype=str)\n file_idx = np.where(np.char.startswith(file_lines, \"r.\"))\n file_v = file_lines[file_idx]\n file_v = np.char.split(file_v, \".\")\n file_v = np.array(list(file_v))[:, 0:5]\n\n fd_out = open(\"result.txt\", \"a\")\n for j in range(len(file_v)):\n length=len(file_v[j])\n COUNTER= COUNTER + 1\n REGIONX=float(file_v[j, 1].astype(float))\n REGIONZ=float(file_v[j, 2].astype(float))\n INT_REGIONX=int(REGIONX)\n INT_REGIONZ=int(REGIONZ)\n for z in range(0, 2):\n for x in range(0, 2):\n\n COORDS=projection.toGeo(*np.array([(REGIONX+x*2-0.5)*512, (REGIONZ+z*2-0.5)*512]))\n\n #print( 'r' + '.' + str(INT_REGIONX) + '.' + str(INT_REGIONZ) + ' ' + str(COORDS[0]) + ' ' + str(COORDS[1]) + ' ' + line, end = '')\n if (length==5):\n print(\n 'r' + '.' + str(INT_REGIONX) + '.' + str(INT_REGIONZ) + '.' + file_v[i, 3] + ' ' +\n '{:17.13f}'.format(COORDS[0]) + ' ' + '{:17.13f}'.format(COORDS[1]) +\n ' [' + '{:.2f}'.format( (REGIONX+x*2-0.5) ) + ',' + '{:.2f}'.format( (REGIONZ+z*2-0.5) ) + ']' +\n ' ' + file_v[j, 4] + '/' + str(COUNTER)\n )\n fd_out.write( \n 'r' + '.' + str(INT_REGIONX) + '.' + str(INT_REGIONZ) + '.' + file_v[i, 3] + ' ' +\n '{:17.13f}'.format(COORDS[0]) + ' ' + '{:17.13f}'.format(COORDS[1]) +\n ' [' + '{:.2f}'.format( (REGIONX+x*2-0.5) ) + ',' + '{:.2f}'.format( (REGIONZ+z*2-0.5) ) + ']' +\n ' ' + file_v[j, 4] + '/' + str(COUNTER)\n + '\\n'\n )\n else:\n print(\n 'r' + '.' + str(INT_REGIONX) + '.' + str(INT_REGIONZ) + '.' + file_v[i, 3] + ' ' +\n '{:17.13f}'.format(COORDS[0]) + ' ' + '{:17.13f}'.format(COORDS[1]) + \n ' [' + '{:.2f}'.format( (REGIONX+x*2-0.5) ) + ',' + '{:.2f}'.format( (REGIONZ+z*2-0.5) ) + ']' +\n ' ' + str(COUNTER)\n )\n fd_out.write( \n 'r' + '.' + str(INT_REGIONX) + '.' + str(INT_REGIONZ) + '.' + file_v[i, 3] + ' ' +\n '{:17.13f}'.format(COORDS[0]) + ' ' + '{:17.13f}'.format(COORDS[1]) +\n ' [' + '{:.2f}'.format( (REGIONX+x*2-0.5) ) + ',' + '{:.2f}'.format( (REGIONZ+z*2-0.5) ) + ']' +\n ' ' + str(COUNTER)\n + '\\n'\n )\n\n #fd_out.write( 'r' + '.' + str(INT_REGIONX) + '.' + str(INT_REGIONZ) + ' ' + str(COORDS[0]) + ' ' + str(COORDS[1]) )\n #fd_out.write(' ' + v[i, 2] + ' ' + file_v[j, 4] + '\\n')\n fd_out.close()\n os.remove(file_name)\n \n elif v[i,0] == 'r':\n length=len(v[i])\n COUNTER=COUNTER + 1\n REGIONX=float(v[i, 1].astype(float))\n REGIONZ=float(v[i, 2].astype(float))\n INT_REGIONX=int(REGIONX)\n INT_REGIONZ=int(REGIONZ)\n fd_out = open(\"result.txt\", \"a\")\n for z in range(0, 2):\n for x in range(0, 2):\n COORDS=projection.toGeo(*np.array([(REGIONX+x*2-0.5)*512, (REGIONZ+z*2-0.5)*512]))\n if (length==5):\n print( \n 'r' + '.' + str(INT_REGIONX) + '.' + str(INT_REGIONZ) + '.' + v[i, 3] + ' ' +\n '{:17.13f}'.format(COORDS[0]) + ' ' + '{:17.13f}'.format(COORDS[1]) +\n ' [' + '{:.2f}'.format( (REGIONX+x*2-0.5) ) + ',' + '{:.2f}'.format( (REGIONZ+z*2-0.5) ) + ']' +\n ' ' + v[i, 4] + '/' + str(COUNTER)\n )\n fd_out.write( \n 'r' + '.' + str(INT_REGIONX) + '.' + str(INT_REGIONZ) + '.' + v[i, 3] + ' ' +\n '{:17.13f}'.format(COORDS[0]) + ' ' + '{:17.13f}'.format(COORDS[1]) +\n ' [' + '{:.2f}'.format( (REGIONX+x*2-0.5) ) + ',' + '{:.2f}'.format( (REGIONZ+z*2-0.5) ) + ']' +\n ' ' + v[i, 4] + '/' + str(COUNTER)\n + '\\n'\n )\n else:\n print( \n 'r' + '.' + str(INT_REGIONX) + '.' + str(INT_REGIONZ) + '.' + v[i, 3] + ' ' +\n '{:17.13f}'.format(COORDS[0]) + ' ' + '{:17.13f}'.format(COORDS[1]) +\n ' [' + '{:.2f}'.format( (REGIONX+x*2-0.5) ) + ',' + '{:.2f}'.format( (REGIONZ+z*2-0.5) ) + ']' +\n ' ' + str(COUNTER)\n )\n fd_out.write( \n 'r' + '.' + str(INT_REGIONX) + '.' + str(INT_REGIONZ) + '.' + v[i, 3] + ' ' +\n '{:17.13f}'.format(COORDS[0]) + ' ' + '{:17.13f}'.format(COORDS[1]) +\n ' [' + '{:.2f}'.format( (REGIONX+x*2-0.5) ) + ',' + '{:.2f}'.format( (REGIONZ+z*2-0.5) ) + ']' +\n ' ' + str(COUNTER)\n + '\\n'\n )\n \n \n fd_out.close()\n else:\n print( 'UNKNOWN: ' + line, end = '')\n \n \n\n# for i in range(len(v)):\n# INT_REGIONX=int(v[i, 1].astype(float))\n# INT_REGIONZ=int(v[i, 2].astype(float))\n# REGIONX=float(v[i, 1].astype(float))\n# REGIONZ=float(v[i, 2].astype(float))\n# COORDS=projection.toGeo(*np.array([REGIONX*512, REGIONZ*512]))\n# print( 'r' + '.' + str(INT_REGIONX) + '.' + str(INT_REGIONZ) + ' ' + str(COORDS[0]) + ' ' + str(COORDS[1]) + ' ' + line, end = '')\n# fd = open(\"result.txt\", \"a\")\n# fd.write('r' + '.' + str(INT_REGIONX) + '.' + str(INT_REGIONZ) + ' ' + str(COORDS[0]) + ' ' + str(COORDS[1]) )\n# fd.write(' ' + line)\n# fd.close()\n\n if len(sys.argv)==2:\n # print (str(len(sys.argv)))\n print ('converting ' + sys.argv[1])\n with open(sys.argv[1]) as fd:\n lines = fd.read().splitlines()\n print('Read ' + sys.argv[1])\n\n lines = np.array(lines, dtype=str)\n idx = np.where(np.char.startswith(lines, \"r.\"))\n v = lines[idx]\n v = np.char.split(v, \".\")\n v = np.array(list(v))[:, 1:3].astype(float)\n print( 'r' + '.' + str(v[:, 0]) + '.' + str(v[:, 1]) + ' - ' + str(v[:, 1]) )\n #print( 'r' + '.' + v[:, 0] + '.' + v[:, 1] )\n print( v )\n\n for i in range(len(v)):\n REGIONX=float(v[i, 0])\n REGIONZ=float(v[i, 1])\n COORDS=projection.toGeo(*np.array([REGIONX*512, REGIONZ*512]))\n print( 'r' + '.' + str(v[i, 0]) + '.' + str(v[i, 1]) + ' ' + str(COORDS[0]) + ' ' + str(COORDS[1]) )\n\n #o_out = []\n \n \n #o_out.append(\"{}\".format('r' + '.' + str(v[i, 0]) + '.' + str(v[i, 1]) + ' ' + str(COORDS[0]) + ' ' + str(COORDS[1])))\n\n #o_out.append( 'r' + '.' + str(v[i, 0]) + '.' + str(v[i, 1]) + ' ' + str(COORDS[0]) + ' ' + str(COORDS[1]) )\n #o_out = np.array(o_out, dtype=str)\n #lines[idx] = o_out\n fd = open(\"result.txt\", \"a\")\n fd.write('r' + '.' + str(v[i, 0]) + '.' + str(v[i, 1]) + ' ' + str(COORDS[0]) + ' ' + str(COORDS[1]) )\n fd.write(\"\\n\")\n fd.close()\n\n\n\n\n #print( str(COORDS[0]) + ' ' + str(COORDS[1]) )\n \n\n\n #v_out.append(\"v {} {} {}\".format(v[i, 0], v[i, 1], v[i, 2]))\n\n### lines = np.array(lines, dtype=str)\n### v = lines.char.split(v, \".\")\n### print(v[:, 0] + '.' + v[:, 1] + '.' + v[:, 2])\n\n### #lines = np.array(lines, dtype=np.str)\n### lines = np.array(lines, dtype=str)\n### #lines = lines[np.logical_not(np.char.startswith(lines, \"vn \"))] # delete vertex normals\n### \n### #Utrecht\n### offset_x=3899275.0\n### offset_y=348997.0\n### offset_z=5026376.0\n### \n### \n### #v 3268023.6848134077 27.84330720361322 -5319793.639094348\n### \n### \n### # extract vertices\n### idx = np.where(np.char.startswith(lines, \"v \"))\n### v = lines[idx]\n### v = np.char.split(v, \" \")\n### v = np.array(list(v))[:, 1:].astype(float)\n### \n### \n### o = v\n### \n### \n### v[:, 0]+=offset_x\n### v[:, 1]+=offset_y\n### v[:, 2]+=offset_z\n### \n### \n### \n### # convert to lat/lon/ele\n### rad = np.linalg.norm(v, axis=1)[None, :]\n### lat = np.arcsin(v[:, 2]/rad)*180/np.pi\n### lon = (np.arctan2(v[:, 1], v[:, 0])*180/np.pi)[None, :]\n### rad -= EARTH_RADIUS # TODO: find the correct way to get elevation (this is bad but ellipsoid was worse)\n### v = np.array([lat, lon, rad]).transpose()[:, 0]\n### \n### # pick the first point, and use it as the origin to find the local transformation matrix\n### old_origin = v[0, :2]\n### new_origin = np.array(projection.fromGeo(old_origin[1], old_origin[0]))\n### i = np.array(projection.fromGeo(old_origin[1], old_origin[0] + 0.01)) - new_origin\n### j = np.array(projection.fromGeo(old_origin[1] + 0.01, old_origin[0])) - new_origin\n### basis = 100*np.array((i, j))\n### \n### # apply the transformation to every lat,lon in the array\n### v[:, :2] -= old_origin\n### v[:, :2] = np.einsum(\"ij,ni->nj\", basis, v[:, :2])\n### v[:, :2] += new_origin\n### \n### # swap y and z because minecraft is sideways\n### v[:, 2], v[:, 1] = v[:, 1].copy(), v[:, 2].copy()\n### \n### o[:, 2]=v[:, 1]\n### \n### o_out = []\n### for i in range(len(o)):\n### o_out.append(\"v {} {} {}\".format(o[i, 0]-offset_x, o[i, 1]-offset_y, o[i, 2]))\n### o_out = np.array(o_out, dtype=str)\n### \n### lines[idx] = o_out\n### \n### if len(sys.argv)==3:\n### outfile=sys.argv[2]\n### elif len(sys.argv)==1:\n### outfile='out.obj'\n### \n### #with open(\"out2.obj\", \"w\") as fd:\n### with open(outfile, \"w\") as fd:\n### fd.write(\"\\n\".join(lines))\n### \n### \n### \n### # convert to string\n### v_out = []\n### for i in range(len(v)): \n### v_out.append(\"v {} {} {}\".format(v[i, 0], v[i, 1], v[i, 2]))\n### v_out = np.array(v_out, dtype=str)\n### \n### lines[idx] = v_out\n### with open(\"out2.obj\", \"w\") as fd:\n### fd.write(\"\\n\".join(lines))\n","repo_name":"HakkaTjakka/MinecraftWorldEditor","sub_path":"project-obj/list_to_geo.py","file_name":"list_to_geo.py","file_ext":"py","file_size_in_byte":13128,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"69"} +{"seq_id":"36076509010","text":"n, m = map(int, input().split())\ndata = [] # 초기 맵 리스트\ntemp = [[0] * m for _ in range(n)] # 벽을 설치한 뒤의 맵 리스트\n\nfor _ in range(n):\n data.append(list(map(int, input().split())))\n\n\n# 4가지 이동방향\ndx = [-1,0,1,0]\ndy = [0,-1,0,1]\n\nresult = 0\n\n# 깊이 우선 탐색(DFS)를 이용해 각 바이러스가 사방으로 퍼지도록 하기\ndef virus(x, y):\n for i in range(4):\n nx = x + dx[i]\n ny = x + dy[i]\n # 상하좌우 중에서 바이러스가 퍼질 수 있는 경우 \n if nx >= 0 and nx < n and ny >= 0 and ny < m:\n if temp[nx][ny] == 0:\n # 해당 위치에 바이러스 배치, 다시 재귀적으로 수행\n temp[nx][ny] = 2\n virus(nx,ny)\n # 재귀때는, input값을 같게 하자.\n\n# 현재 맵에서 안전 영역의 크기 계산하는 메서드\ndef get_score():\n score = 0\n for i in range(n):\n for j in range(m):\n if temp[i][j] == 0:\n score += 1\n return score\n\n# 깊이 우선 탐색(DFS)를 이용하여 울타리를 설치하면서, 매번 안전 영역의 크기 계산\ndef dfs(count):\n global result\n if count == 3:\n for i in range(n):\n for j in range(m):\n temp[i][j] = data[i][j]\n\n # 각 바이러스의 위치에서 전파 진행\n for i in range(n):\n for j in range(m):\n if temp[i][j] == 2:\n virus(i, j)\n \n # 안전영역의 최댓값 계산\n result = max(result, get_score())\n return\n \n # 빈 공간에 울타리 설치\n for i in range(n):\n for j in range(m):\n if data[i][j] == 0:\n data[i][j] = 1\n count += 1\n dfs(count)\n data[i][j] = 0 # dfs 이후에 울타리 뺌\n count -= 1\n\ndfs(0)\nprint(result)\n","repo_name":"hoeen/coding_test_training","sub_path":"notes/취코테_dfsbfs_16_연구소_책풀이.py","file_name":"취코테_dfsbfs_16_연구소_책풀이.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"ko","doc_type":"code","stars":10,"dataset":"github-code","pt":"69"} +{"seq_id":"6726785190","text":"weight = float(input(\"Peso(kg) medido: \"))\n\nLIMIT = 50.0\nexcess = 0.0\nassessment = 0.0\n\nif weight > LIMIT:\n excess = weight-LIMIT\n assessment = excess * 4.00\n\nprint(\"Peso medido: {:.2f}kg || Excedido: {:.2f}kg || Multa: R${:.2f}\".format(weight,excess,assessment))","repo_name":"carlos-r-goularte/PYTHON","sub_path":"exercíciosPythonWiki/EstruturaSequencial/ex14-weightLimit.py","file_name":"ex14-weightLimit.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"32353974096","text":"from itertools import cycle\n\nfrom bokeh.core.enums import LineDash, MarkerType, NamedColor\nfrom bokeh.models import Column, LegendItem, Panel\nfrom bokeh.models.widgets import Button, ColorPicker, Select, Slider, TextInput\nfrom bokeh.palettes import Category10_10 # pylint: disable=no-name-in-module\n\nCOLORS = cycle(Category10_10)\n\nprettify = lambda p: p.replace(\"_\", \" \").capitalize()\n\n\nclass Series:\n labels = []\n id = 0\n\n def __init__(self, label, plot, renderer):\n self.id = Series.id\n Series.id += 1\n while label in self.labels:\n try:\n last_number = int(label.split(\" \")[-1])\n last_number += 1\n label = label[: -len(str(last_number))] + str(last_number)\n except:\n label = label + \" 1\"\n self.labels.append(label)\n self.plot = plot\n self.glyph = renderer.glyph\n legend_item = LegendItem(label=label, renderers=[renderer])\n self.plot.legend.items.append(legend_item)\n self.legend_label = TextInput(title=\"Legend label\", value=label)\n self.legend_label.on_change(\"value\", self.update_legend_label)\n self.panel = Panel(child=Column(self.legend_label, width=360), title=label)\n panel_children = self.panel.child.children # pylint:disable=no-member\n panel_children.append(get_widgets(self.glyph))\n self.delete_button = Button(label=\"Delete glyph\", button_type=\"danger\")\n panel_children.append(self.delete_button)\n\n def update_legend_label(self, attr, old, new):\n legend_items = self.plot.legend.items\n for item in legend_items:\n if self.glyph in [r.glyph for r in item.renderers]:\n item.label[\"value\"] = new\n legend_items = list(legend_items)\n self.plot.legend.items = []\n self.plot.legend.items = legend_items\n\n\ndef line_series(plot, **kw):\n \"\"\"Adidiona um glyph do tipo line ao gráfico.\n\n Args:\n plot (bokeh.plotting.Figure): gráfico.\n kw: keyword-argument's para o glyph.\n\n Return:\n Series: objeto associado com o glyph no gráfico.\n \"\"\"\n renderer = plot.line(line_color=next(COLORS), line_width=2, **kw)\n return Series(\"Line 1\", plot, renderer)\n\n\ndef scatter_series(plot, **kw):\n \"\"\"Adidiona um glyph do tipo scatter ao gráfico.\n\n Args:\n plot (bokeh.plotting.Figure): gráfico.\n kw: keyword-argument's para o glyph.\n\n Return:\n Series: objeto associado com o glyph no gráfico.\n \"\"\"\n renderer = plot.scatter(color=next(COLORS), line_width=1, **kw)\n return Series(\"Scatter 1\", plot, renderer)\n\n\ndef get_widgets(model):\n widgets_list = []\n for p, v in model.properties_with_values().items():\n if isinstance(v, dict):\n if \"value\" in v:\n v = v.get(\"value\")\n else:\n continue\n if v is None:\n continue\n\n kw = dict(title=prettify(p), name=prettify(p), value=v, width=360)\n if \"alpha\" in p:\n w = Slider(start=0, step=0.05, end=1, **kw)\n elif \"color\" in p:\n if v in list(NamedColor):\n w = Select(options=list(NamedColor), **kw)\n else:\n kw.pop(\"value\")\n w = ColorPicker(color=v, **kw)\n elif p.endswith(\"width\"):\n w = Slider(start=0, step=0.2, end=5, **kw)\n elif \"marker\" in p:\n w = Select(options=list(MarkerType), **kw)\n elif p == \"size\":\n w = Slider(start=0, step=1, end=20, **kw)\n elif p.endswith(\"text\") or p.endswith(\"label\"):\n w = TextInput(**kw)\n else:\n continue\n if isinstance(w, ColorPicker):\n w.js_link(\"color\", model, p)\n else:\n w.js_link(\"value\", model, p)\n widgets_list.append(w)\n if widgets_list:\n return Column(*sorted(widgets_list, key=lambda w: w.name), width=380)\n return None\n","repo_name":"wevertonms/bokeh_chart_maker","sub_path":"bokeh_chart_maker/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3955,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"16815667917","text":"import click\n\nfrom unittest import TestCase\nfrom unittest.mock import Mock, patch, call\nfrom samcli.commands.build.click_container import ContainerOptions\n\n\n@patch(\"samcli.commands.build.click_container.ContainerOptions\")\nclass TestContainerOptionsSucceeds(TestCase):\n ctx_mock = Mock()\n opts = {\"container_env_var\": [\"hi=in\"], \"use_container\": True, \"resource_logical_id\": None}\n ContainerOptionsMock = Mock()\n ContainerOptionsMock.handle_parse_result.return_value = \"value\"\n\n def test_container_options(self, ContainerOptionsMock):\n self.assertEqual(self.ContainerOptionsMock.handle_parse_result(self.ctx_mock, self.opts, []), \"value\")\n\n\nclass TestContainerOptionsFails(TestCase):\n ctx_mock = Mock()\n opts = {\"container_env_var\": [\"hi=in\"], \"resource_logical_id\": None}\n args = [\"--container-env-var\"]\n container_opt = ContainerOptions(args)\n\n def test_container_options_failure(self):\n with self.assertRaises(click.UsageError) as err:\n self.container_opt.handle_parse_result(self.ctx_mock, self.opts, [])\n self.assertEqual(\n str(err.exception),\n \"Missing required parameter, need the --use-container flag in order to use --container-env-var flag.\",\n )\n","repo_name":"aws/aws-sam-cli","sub_path":"tests/unit/commands/buildcmd/test_container_options.py","file_name":"test_container_options.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":6381,"dataset":"github-code","pt":"69"} +{"seq_id":"20059512015","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/8/27 21:34\n# @Author : yimi\n# @File : class1_0827.py\nfrom selenium import webdriver\nimport time\n# -------------------窗口相关操作--------------------\n# 通过webdriver打开Chrome浏览器\ndriver = webdriver.Chrome()\n# 全屏操作\ndriver.maximize_window()\n\ndriver.implicitly_wait(30)\n\n# 设置窗口大小\n# driver.set_window_size(800, 600)\n# 访问一个网页\ndriver.get(\"http://www.baidu.com\")\ntime.sleep(10)\ndriver.get(\"http://www.taobao.com\")\ntime.sleep(10)\n\n# 获取页面标题,url,窗口句柄\nprint(driver.title)\nprint(driver.current_url)\nprint(driver.current_window_handle)\n\n# 在Chrome历史记录中回到上一个页面\ndriver.back()\ntime.sleep(5)\n# 刷新\ndriver.refresh()\ntime.sleep(2)\n# 在Chrome历史记录中回到下一个页面\n# driver.forward()\n# time.sleep(5)\n\n#关闭当前窗口\n# time.sleep(10)\n# driver.close()\n\n# 关闭chrome及Chromedriver\ntime.sleep(10)\ndriver.quit()\n\n# --------------------查找页面元素---------------------\n# F12 --> Elements\n\ndriver = webdriver.Chrome()\ndriver.maximize_window()\ndriver.get(\"http://www.baidu.com\")\n\n# 1根据id\n# driver.find_element_by_id(\"id\")\nele_id= driver.find_element_by_id(\"kw\")\n\n# 2 根据name属性\n# ele = driver.find_element_by_name(\"name\")\nele_name = driver.find_element_by_name('wd')\n# 找多个元素---列表\n# ele_name = driver.find_elements_by_name('wd')\nprint(ele_name.tag_name)\nprint(ele_name.text)\n\n# 3 根据class 属性 - 参数只能是一个参数值\nele_class = driver.find_element_by_class_name('wd')\n\n# 4 根据标签名\nele_mark = driver.find_element_by_tag_name('wd')\n\n# 5 根据链接 -a\nele_link_all = driver.find_element_by_link_text(\"新闻\")\nele_link_part = driver.find_element_by_partial_link_text(\"产品\")\n\nprint(ele_link_part.tag_name)\nprint(ele_link_part.text)\n\n# 标签名为input的输入操作\nele_id.seng_keys(\"柠檬班\")\n\ntime.sleep(10)\ndriver.quit()\n\n","repo_name":"yujingmu/test_web","sub_path":"Web_AutoTest_Basic/class1_0827_findelement.py","file_name":"class1_0827_findelement.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"75054553820","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('statement', '0002_balance'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='balance',\n options={'verbose_name': 'Balance', 'verbose_name_plural': 'Balance', 'ordering': ['date']},\n ),\n ]\n","repo_name":"delete/estofadora","sub_path":"estofadora/statement/migrations/0003_auto_20160115_1619.py","file_name":"0003_auto_20160115_1619.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"69"} +{"seq_id":"30962111538","text":"\n#\n# 스택\n# LIFO Last In First Out\n# 마지막으로 들어간 요소가 처음으로 나옴\n# 프링글스\n# 리스트 pop()은 O(k) 시간복잡도를 가짐\nstack = []\nstack.append(1)\nstack.append(3)\nstack.append(5)\nstack.append(7)\nstack.pop()\n\n# 큐\n# FIFO First In First Out\n# 처음으로 들어간 요소가 처음 나옴\n# 줄서기\n\nfrom collections import deque\n\nqueue = deque()\n\nqueue.append(1)\nqueue.append(3)\nqueue.append(5)\nqueue.append(7)\nqueue.popleft()\n# 가장 처음 들어온게 맨 왼쪽이므로 popleft()\n# 현재 형태는 왼쪽부터 들어가므로\n# queue.reverse()를 사용하여 반대로 오른쪽 형태로 바꿀수 있음\n\n\n\n# 재귀함수(Recursive Function)\n# 자기 자신을 다시 호출하는 함수\n# 복잡한 알고리즘을 간결하게 작성할 수 있다\n# 컴퓨터가 함수를 연속적으로 호출하면 컴퓨터 메모리 내부의 스택 프레임에 쌓인다\n# 예시 유클리드 호제법(최대공약수)\n\ndef gcd(a,b):\n if a%b==0:\n return b\n else:\n return gcd(b, a % b)\n\n\n\n\n# DFS (Depth-Fisrt Search)\n# DFS는 깊이 우선 탐색이라고도 부르며 그래프에서 깊은 부분을 우선적으로 탐색하는 알고리즘\n# DFS는 스택 자료구조(혹은 재귀함수)를 이용한다\n# 1. 탐색 노드를 스택에 삽입하고 방문처리\n# 2. 스택의 최상단 노드에 방문하지 않은 인접한 노드가 하나라도 있으면 그 노드를 스택에 넣고 방문처리.\n# 방문하지 않은 인접 노드가 없으면 스택에서 최상단 노드를 꺼냄\n# 3. 더이상 2번의 과정을 수행할 수 없을 때까지 반복\n\n\n\n## BFS (Breath-First Search)\n# BFS는 너비 우선 탐색이라고도 부르며, 그래프에서 가장 가까운 노드부터 우선적으로 탐색하는 알고리즘\n# BFS는 큐 자료구조를 이용한다\n# 1. 탐색 시작 노드를 큐에 삽입하고 방문처리\n# 2. 큐에서 노드를 꺼낸 뒤에 해당 노드의 인접 노드 중에서 방문하지 않은 노드를 모두 큐에 삽입하고 방문처리\n# 3. 더이상 2번의 과정을 수행할 수 없을 때까지 반복\n\n\n\n## 음료수 얼려먹기\n# N*M크기의 얼음 틀\n# 구멍이 뚫려 있는 부분은 0, 칸막이가 존재하는 부분은 1\n# 구멍이 뚫려있는 부분끼리 상,하,좌,우로 붙어 있는 경우 서로 연결되어 있는 것으로 건주\n# 이때 얼음 틀 모양이 주어졌을 때 생성되는 총 아이스크림의 개수를 구하는 프로그램\n#\n# 입력조건\n# 첫 번째 줄에 얼음 틀의 세로 길이 N과 가로 길이 M이 주어진다 (1<=N, M<=1000)\n# 두번째 줄 부터 N+1번째 줄까지 얼음 틀의 형태가 주어진다\n# 이때 구멍이 뚫려있는 부분은 0, 그렇지 않은 부분은 1\n# 풀이시간 30초, 시간제한 1초, 메모리 제한 128MB\n# 입력 예시\n# 4 5\n# 00110\n# 00011\n# 11111\n# 00000\n# 출력 예시\n# 3\n\n# n, m = map(int, input().split())\n# ice = []\n# for _ in range(n):\n# ice.append(list(map(int, input())))\n#\n# result = 0\n# def dfs(x,y):\n# if x<=-1 or x>=n or y<=-1 or y>=m:\n# return False\n# if ice[x][y] == 0:\n# ice[x][y] = 1\n# dfs(x - 1,y)\n# dfs(x, y - 1)\n# dfs(x + 1, y)\n# dfs(x, y + 1)\n# return True\n# return False\n#\n# for i in range(n):\n# for j in range(m):\n# if dfs(i,j) == True:\n# result += 1\n# print(result)\n\n# 그러니까 n,m입력 받은만큼 2중 포문으로 전부 돌리고\n# 0,0을 들어가보고 방문처리(1로 변환)\n# 왼, 오, 위, 아래, 다 보고\n# 방문 안한 지점이고, 숫자가 0이면 계속(재귀함수)\n# 만약 1이면 취소\n# 0을 다 찾았으면(더 이상 없으면) (0,0)부터 시작했으니 (0,1)부터 계속하기\n\n\n\n\n# N*M 크기의 직사각형 형태의 미로에 갇혔습니다\n# 위치는 (1,1)이며 출구는 (n,m)의 위치에 존재하며, 한 번에 한 칸씩 이동할 수 있다\n# 이때 괴물이 있는 부분은 0, 없는 부분은 1로 표시\n# 탈출하기 위헤 움직여야 하는 최소 칸의 개수를 구하라(깊이 우선방식인듯)\n# 칸을 셀 때는 시작칸과 마지막 칸을 모두 포함해서 계산\n#\n# 첫째줄에 두 정수 N,M(4<=n, m<=200)이 주어짐\n# 다음 n개의 줄에는 각각 M개의 정수(0 혹은 1)로 미로의 정보가 주어진다\n# 시작칸과 마지막칸은 항상 1\n# 시간 제한 30분\n# 입력 예시\n# 5 6\n# 101010\n# 111111\n# 000001\n# 111111\n# 111111\n# 출력 예시\n# 10\n\nfrom collections import deque\n\ndef maze_runner(x,y):\n queue = deque()\n queue.append((x,y))\n\n while queue:\n x, y = queue.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n\n if nx<0 or nx>=n or ny<0 or ny>=m:\n continue\n\n if maze[nx][ny] == 0:\n continue\n\n if maze[nx][ny] == 1:\n maze[nx][ny] = maze[x][y] + 1\n queue.append((nx,ny))\n return maze[n-1][m-1]\n\nn, m = map(int, input().split())\nmaze = []\nfor i in range(n):\n maze.append(list(map(int, input())))\n\n\ndx = [-1,1,0,0]\ndy = [0,0,-1,1]\n\nprint(maze_runner(0, 0))\n\n\n","repo_name":"NGG-kang/DjangoReview","sub_path":"DataStructure_Algorithm/DFS_BFS.py","file_name":"DFS_BFS.py","file_ext":"py","file_size_in_byte":5187,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"17583991027","text":"from . import *\nfrom .sd_maillage import sd_maillage\nfrom .sd_titre import sd_titre\n\n\nclass sd_gfibre(sd_titre):\n#-------------------------------------\n nomj = SDNom(fin=8)\n POINTEUR = AsVI()\n CARFI = AsVR()\n NOMS_GROUPES = AsPn(ltyp=24)\n NB_FIBRE_GROUPE = AsVI()\n TYPE_GROUPE = AsVI()\n GFMA = Facultatif(AsVK8(lonmax=1))\n CARACSD = AsVI(lonmax=3, )\n\n def u_caracsd(self):\n caracsd = self.CARACSD.get()\n nbgfsd = caracsd[0]\n nbcarasd = caracsd[1:3]\n return nbgfsd,nbcarasd\n\n def check_dimension(self,checker):\n nbgfsd, nbcarasd = self.u_caracsd()\n nbgf = self.NOMS_GROUPES.nommax\n # Vérif des dimensions des objets\n assert nbgf == nbgfsd, (nbgf, nbgfsd)\n assert self.NB_FIBRE_GROUPE.lonmax == nbgf\n assert self.TYPE_GROUPE.lonmax == nbgf\n assert self.POINTEUR.lonmax == nbgf\n\n def check_CARFI(self,checker) :\n nbgfsd, nbcarasd = self.u_caracsd()\n nbgf = self.NOMS_GROUPES.nommax\n #\n assert nbgf == nbgfsd , (nbgf, nbgfsd)\n #\n pointeur = self.POINTEUR.get()\n nb_fibre = self.NB_FIBRE_GROUPE.get()\n ty_groupe = self.TYPE_GROUPE.get()\n nbfib_tot = 0\n for igf in range(nbgf) :\n assert ty_groupe[igf] in [1,2], (ty_groupe[igf])\n assert pointeur[igf] == nbfib_tot+1 , (nbcarasd[ty_groupe[igf]-1], igf, nbfib_tot, pointeur[igf])\n nbfib_tot += nb_fibre[igf]*nbcarasd[ty_groupe[igf]-1]\n assert self.CARFI.lonmax == nbfib_tot , (nbfib_tot, self.CARFI.lonmax)\n\n def check_GFMA(self,checker):\n if not self.GFMA.exists: return\n gfma = self.GFMA.get_stripped()\n sd2=sd_maillage(gfma[0])\n sd2.check(checker)\n","repo_name":"ehmoussi/code_aster","sub_path":"code_aster/SD/sd_gfibre.py","file_name":"sd_gfibre.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"} +{"seq_id":"36576572006","text":"#from collections import defaultdict\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the checkMagazine function below.\ndef checkMagazine(magazine, note):\n #nls=note.split()\n #mls=magazine.split()\n f=0\n for i in note:\n if i not in magazine:f=1;break\n else: magazine.pop(magazine.index(i))\n if(f==0): print(\"Yes\") \n else:print(\"No\")\n #dic=defaultdict(int)\n #for i in note:\n # dic[i]+=1\n #for i in magazine:\n # if(dic[i]==0): print(\"No\");f=1\n # else: dic[i]-=1\n #if(f==0):print(\"Yes\")\nif __name__ == '__main__':\n mn = input().split()\n\n m = int(mn[0])\n\n n = int(mn[1])\n\n magazine = input().rstrip().split()\n\n note = input().rstrip().split()\n\n checkMagazine(magazine, note)\n#this logic is getting timed out for two test cases\n#the code in the commented part is not mine it is a external code the code in the non-commented part is mine\n","repo_name":"Binay28/Binay-s_Code","sub_path":"Interview_prep_code/Hash_Tables_Ransom_Notes.py","file_name":"Hash_Tables_Ransom_Notes.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"229709014","text":"import os.path\nimport re\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import NamedTuple\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import InvalidManifestError\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.clientlib import load_manifest\nfrom pre_commit.clientlib import LOCAL\nfrom pre_commit.clientlib import META\nfrom pre_commit.commands.migrate_config import migrate_config\nfrom pre_commit.store import Store\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import tmpdir\nfrom pre_commit.util import yaml_dump\nfrom pre_commit.util import yaml_load\n\n\nclass RevInfo(NamedTuple):\n repo: str\n rev: str\n frozen: Optional[str]\n\n @classmethod\n def from_config(cls, config: Dict[str, Any]) -> 'RevInfo':\n return cls(config['repo'], config['rev'], None)\n\n def update(self, tags_only: bool, freeze: bool) -> 'RevInfo':\n git_cmd = ('git', *git.NO_FS_MONITOR)\n\n if tags_only:\n tag_cmd = (\n *git_cmd, 'describe',\n 'FETCH_HEAD', '--tags', '--abbrev=0',\n )\n else:\n tag_cmd = (\n *git_cmd, 'describe',\n 'FETCH_HEAD', '--tags', '--exact',\n )\n\n with tmpdir() as tmp:\n git.init_repo(tmp, self.repo)\n cmd_output_b(\n *git_cmd, 'fetch', 'origin', 'HEAD', '--tags',\n cwd=tmp,\n )\n\n try:\n rev = cmd_output(*tag_cmd, cwd=tmp)[1].strip()\n except CalledProcessError:\n cmd = (*git_cmd, 'rev-parse', 'FETCH_HEAD')\n rev = cmd_output(*cmd, cwd=tmp)[1].strip()\n\n frozen = None\n if freeze:\n exact_rev_cmd = (*git_cmd, 'rev-parse', rev)\n exact = cmd_output(*exact_rev_cmd, cwd=tmp)[1].strip()\n if exact != rev:\n rev, frozen = exact, rev\n return self._replace(rev=rev, frozen=frozen)\n\n\nclass RepositoryCannotBeUpdatedError(RuntimeError):\n pass\n\n\ndef _check_hooks_still_exist_at_rev(\n repo_config: Dict[str, Any],\n info: RevInfo,\n store: Store,\n) -> None:\n try:\n path = store.clone(repo_config['repo'], info.rev)\n manifest = load_manifest(os.path.join(path, C.MANIFEST_FILE))\n except InvalidManifestError as e:\n raise RepositoryCannotBeUpdatedError(str(e))\n\n # See if any of our hooks were deleted with the new commits\n hooks = {hook['id'] for hook in repo_config['hooks']}\n hooks_missing = hooks - {hook['id'] for hook in manifest}\n if hooks_missing:\n raise RepositoryCannotBeUpdatedError(\n f'Cannot update because the update target is missing these '\n f'hooks:\\n{\", \".join(sorted(hooks_missing))}',\n )\n\n\nREV_LINE_RE = re.compile(r'^(\\s+)rev:(\\s*)([\\'\"]?)([^\\s#]+)(.*)(\\r?\\n)$')\n\n\ndef _original_lines(\n path: str,\n rev_infos: List[Optional[RevInfo]],\n retry: bool = False,\n) -> Tuple[List[str], List[int]]:\n \"\"\"detect `rev:` lines or reformat the file\"\"\"\n with open(path, newline='') as f:\n original = f.read()\n\n lines = original.splitlines(True)\n idxs = [i for i, line in enumerate(lines) if REV_LINE_RE.match(line)]\n if len(idxs) == len(rev_infos):\n return lines, idxs\n elif retry:\n raise AssertionError('could not find rev lines')\n else:\n with open(path, 'w') as f:\n f.write(yaml_dump(yaml_load(original)))\n return _original_lines(path, rev_infos, retry=True)\n\n\ndef _write_new_config(path: str, rev_infos: List[Optional[RevInfo]]) -> None:\n lines, idxs = _original_lines(path, rev_infos)\n\n for idx, rev_info in zip(idxs, rev_infos):\n if rev_info is None:\n continue\n match = REV_LINE_RE.match(lines[idx])\n assert match is not None\n new_rev_s = yaml_dump({'rev': rev_info.rev}, default_style=match[3])\n new_rev = new_rev_s.split(':', 1)[1].strip()\n if rev_info.frozen is not None:\n comment = f' # frozen: {rev_info.frozen}'\n elif match[5].strip().startswith('# frozen:'):\n comment = ''\n else:\n comment = match[5]\n lines[idx] = f'{match[1]}rev:{match[2]}{new_rev}{comment}{match[6]}'\n\n with open(path, 'w', newline='') as f:\n f.write(''.join(lines))\n\n\ndef autoupdate(\n config_file: str,\n store: Store,\n tags_only: bool,\n freeze: bool,\n repos: Sequence[str] = (),\n) -> int:\n \"\"\"Auto-update the pre-commit config to the latest versions of repos.\"\"\"\n migrate_config(config_file, quiet=True)\n retv = 0\n rev_infos: List[Optional[RevInfo]] = []\n changed = False\n\n config = load_config(config_file)\n for repo_config in config['repos']:\n if repo_config['repo'] in {LOCAL, META}:\n continue\n\n info = RevInfo.from_config(repo_config)\n if repos and info.repo not in repos:\n rev_infos.append(None)\n continue\n\n output.write(f'Updating {info.repo} ... ')\n new_info = info.update(tags_only=tags_only, freeze=freeze)\n try:\n _check_hooks_still_exist_at_rev(repo_config, new_info, store)\n except RepositoryCannotBeUpdatedError as error:\n output.write_line(error.args[0])\n rev_infos.append(None)\n retv = 1\n continue\n\n if new_info.rev != info.rev:\n changed = True\n if new_info.frozen:\n updated_to = f'{new_info.frozen} (frozen)'\n else:\n updated_to = new_info.rev\n msg = f'updating {info.rev} -> {updated_to}.'\n output.write_line(msg)\n rev_infos.append(new_info)\n else:\n output.write_line('already up to date.')\n rev_infos.append(None)\n\n if changed:\n _write_new_config(config_file, rev_infos)\n\n return retv\n","repo_name":"vinodiOS/two-factor-auth-FastAPI","sub_path":"env/lib/python3.9/site-packages/pre_commit/commands/autoupdate.py","file_name":"autoupdate.py","file_ext":"py","file_size_in_byte":6185,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"69"} +{"seq_id":"23266874335","text":"from asyncio import Task\nimport os\nimport speech_recognition as sr\nimport webbrowser\nimport pyttsx3\nfrom gtts import gTTS\nimport playsound\nimport webbrowser\nimport pywhatkit\nfrom playsound import playsound\nimport wikipedia\nimport pyautogui\nimport pyttsx3\nimport keyboard\nimport datetime\nfrom playsound import playsound\nimport pyjokes\nfrom PyDictionary import PyDictionary as Diction\nfrom tkinter import *\n\nfrom tkinter import Tk\nfrom PIL import Image, ImageTk\ngui = Tk()\n\n\ndef speak(text):\n print(\" \")\n tts = gTTS(text=text, lang='en')\n tts.save(\"output.mp3\")\n print(f\": {text}\")\n print(\" \")\n playsound(\"output.mp3\")\n os.remove(\"output.mp3\")\n\n # Assistant.say(audio)\n # print(\" \")\n # print(f\":{audio}\")\n # Assistant.runAndWait()\n # Adjust the speech volume, default is 1.0\n\ndef takeCommand():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\" \")\n print(\"Listening.....\")\n r.pause_threshold = 1\n audio = r.listen(source)\n try:\n print(\"Recognizing.....\")\n query = r.recognize_google(audio, language=\"en-in\")\n print(\" \")\n print(f\"User said: {query}\")\n return query\n except Exception as Error:\n return \"None\"\n\n\ndef TaskExe():\n speak(\"Hello sir, I am Jarvis!\")\n\n def Watsapp():\n speak(\"Tell me the name of the person!\")\n name = takeCommand()\n\n if 'sugam' in name or 'Sugam' in name:\n speak(\"Tell me the message!\")\n msg = takeCommand()\n speak(\"Tell me the time, sir!\")\n speak(\"Time in hour!\")\n hour = int(takeCommand())\n speak(\"Time in minutes\")\n min = int(takeCommand())\n pywhatkit.sendwhatmsg(\"+9779816060212\", msg, hour, min, 20)\n speak(\"OK Sir, sending WhatsApp message\")\n elif 'Nishal' in name or 'nishal' in name:\n speak(\"Tell me the message!\")\n msg = takeCommand()\n speak(\"Tell me the time, sir!\")\n speak(\"Time in hour!\")\n hour = int(takeCommand())\n speak(\"Time in minutes\")\n min = int(takeCommand())\n pywhatkit.sendwhatmsg(\"+9779801927183\", msg, hour, min, 20)\n speak(\"OK Sir, sending WhatsApp message\")\n elif 'Mum' in name or 'mum' in name:\n speak(\"Tell me the message!\")\n msg = takeCommand()\n speak(\"Tell me the time, sir!\")\n speak(\"Time in hour!\")\n hour = int(takeCommand())\n speak(\"Time in minutes\")\n min = int(takeCommand())\n pywhatkit.sendwhatmsg(\"+9779807936028\", msg, hour, min, 20)\n speak(\"OK Sir, sending WhatsApp message\")\n elif 'Tenzin' in name or 'tenzin' in name:\n speak(\"Tell me the message!\")\n msg = takeCommand()\n speak(\"Tell me the time, sir!\")\n speak(\"Time in hour!\")\n hour = int(takeCommand())\n speak(\"Time in minutes\")\n min = int(takeCommand())\n pywhatkit.sendwhatmsg(\"+9779762285689\", msg, hour, min, 20)\n speak(\"OK Sir, sending WhatsApp message\")\n elif 'Babu' in name or 'Baba' in name or 'Dad' in name:\n speak(\"Tell me the message!\")\n msg = takeCommand()\n speak(\"Tell me the time, sir!\")\n speak(\"Time in hour!\")\n hour = int(takeCommand())\n speak(\"Time in minutes\")\n min = int(takeCommand())\n pywhatkit.sendwhatmsg(\"+9779851177355\", msg, hour, min, 20)\n speak(\"OK Sir, sending WhatsApp message\")\n\n def Vajan():\n speak(\"Tell me the name of the bhaajan!\")\n musicName = takeCommand()\n\n if 'bhakti' in musicName or 'bhakti dance' in musicName or 'bhakti dan' in musicName:\n playsound(\"/home/dassahil/Desktop/jagatguru-rampal-ji/vajan/05 Bhakti dan guru dijiyo.mp3\")\n elif 'upar' in musicName or 'uper' in musicName or 'apa' in musicName or 'upper' in musicName:\n playsound(\"/home/dassahil/Desktop/jagatguru-rampal-ji/vajan/01 The uper ne pair.mp3\")\n elif 'Main To pavega'in musicName or 'man to pawaga' in musicName or 'main to pawagea' in musicName or 'man' in musicName or 'pawaga' in musicName:\n playsound(\"/home/dassahil/Desktop/jagatguru-rampal-ji/02 Man tu pawega apna kiya re.mp3\")\n else:\n pywhatkit.playonyt(musicName)\n\n speak(\"Your search has been started from the Youtube Enjoy!\")\n \n def YoutubeAuto():\n speak(\"Whats Your Command ?\")\n comm = takeCommand()\n\n if 'pause' in comm:\n keyboard.press('space bar')\n elif 'restart' in comm:\n keyboard.press('0')\n \n elif 'mute' in comm:\n keyboard.press('m')\n elif 'skip' in comm:\n keyboard.press('l')\n elif 'back' in comm:\n keyboard.press('j')\n elif 'full screen' in comm:\n keyboard.press('f')\n elif 'film mode' in comm:\n keyboard.press('t')\n speak(\"Done Sir!\")\n \n def braveAuto():\n speak(\"Chrome Automation started!\")\n command = takeCommand()\n\n if 'close this tab' in command:\n keyboard.press_and_release('ctrl + shift + w')\n elif 'open new tab' in query:\n keyboard.press_and_release('ctrl + t')\n elif 'open new window' in command:\n keyboard.press_and_release('ctrl + n')\n elif 'history' in command:\n keyboard.press_and_real ('ctrl + h')\n elif 'downloads' in command:\n keyboard.press_and_real ('ctrl + j')\n elif 'bookmark this page' in command:\n keyboard.press_and_release('ctrl + d')\n elif 'turn to full screen' in command:\n keyboard.press_and_release('f11')\n \n def Dict():\n speak(\"Tell me the problem!\")\n speak(\"Activated Dictionary!\")\n probl = takeCommand()\n\n if 'meaning' in probl:\n probl = probl.replace(\"what is the\", \"\")\n probl = probl.replace(\"meaning of\", \"\")\n probl = probl.replace(\"jarvis\", \"\")\n probl = probl.replace(\"of\", \"\")\n probl = probl.strip() # Remove leading/trailing spaces\n dictionary = Diction()\n result = dictionary.meaning(probl)\n speak(f\"The meaning of {probl} is {result}\")\n elif 'synonym' in probl:\n probl = probl.replace(\"what is the\", \"\")\n probl = probl.replace(\"synonyms of\", \"\")\n probl = probl.replace(\"jarvis\", \"\")\n probl = probl.replace(\"of\", \"\")\n probl = probl.strip() # Remove leading/trailing spaces\n dictionary = Diction()\n result = dictionary.synonym(probl)\n speak(f\"The synonym of {probl} is {result}\")\n elif 'antonym' in probl:\n probl = probl.replace(\"what is the\", \"\")\n probl = probl.replace(\"antonym of\", \"\")\n probl = probl.replace(\"jarvis\", \"\")\n probl = probl.replace(\"of\", \"\")\n probl = probl.strip() # Remove leading/trailing spaces\n dictionary = Diction()\n result = dictionary.antonym(probl)\n speak(f\"The antonym of {probl} is {result}\")\n\n speak(\"Exited Dictionary!\")\n \n def TakeHindi():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\" \")\n print(\"Listening.....\")\n r.pause_threshold = 1\n audio = r.listen(source)\n try:\n print(\"Recognizing.....\")\n query = r.recognize_google(audio, language=\"hi\")\n print(\" \")\n print(f\"User said: {query}\")\n return query\n except Exception as Error:\n return \"None\"\n def Tran():\n speak(\"Tell Me the Line!\")\n line = TakeHindi()\n \n while True:\n query = takeCommand()\n\n if 'hello' in query:\n speak(\"Hello Sir, I am Mimi.\")\n speak(\"Your personal AI Assistant!\")\n speak(\"How may I help you?\")\n elif 'how are you' in query:\n speak(\"I am fine, thank you.\")\n speak(\"What about you?\")\n elif 'you need a break' in query:\n speak(\"OK Sir, you can call me anytime!\")\n break\n elif 'bye' in query or 'By' in query or 'by' in query:\n speak(\"OK Sir, Bye!\")\n break\n elif 'youtube search' in query.lower():\n speak(\"OK Sir, this is what I found for your search\")\n query = query.replace(\"youtube search\", \"\")\n query = query.replace(\"jarvis \", \"\")\n web = 'https://www.youtube.com/results?search_query=' + query\n webbrowser.open(web)\n speak(\"Done Sir!\")\n elif 'google search' in query.lower():\n speak(\"This is what I found for your search!\")\n query = query.replace(\"google search\", \"\")\n query = query.replace(\"jarvis\", \"\")\n pywhatkit.search(query)\n speak(\"Done Sir!\")\n elif 'website' in query:\n speak(\"OK Sir, launching.....\")\n web1 = query.replace(\"open\", \"\")\n web1 = web1.replace(\"jarvis\", \"\")\n web1 = web1.replace(\"website\", \"\")\n web1 = web1.replace(\" \",\"\")\n web2 = 'https://www.' + web1 + '.com'\n webbrowser.open(web2)\n\n elif 'launch' in query:\n speak(\"Tell me the name of the website!\")\n name = takeCommand()\n web = 'https://www.' + name + '.com'\n webbrowser.open(web)\n speak(\"Done Sir!\")\n elif 'vajan' in query or 'bhajan' in query:\n Vajan()\n elif 'wikipedia' in query or 'Wikipedia' in query:\n speak(\"Searching Wikipedia.....\")\n query = query.replace(\"Wikipedia\", \"\")\n query = query.replace(\"wikipedia\", \"\")\n try:\n wiki = wikipedia.summary(query, 2)\n speak(f\"According to Wikipedia: {wiki}\")\n except wikipedia.exceptions.PageError:\n speak(\"Sorry, I couldn't find any relevant information on Wikipedia.\")\n except wikipedia.exceptions.DisambiguationError:\n speak(\"There are multiple options available. Please provide a more specific query.\")\n elif 'watsapp message' in query or 'Watsapp message' in query or 'WhatsApp message' in query:\n Watsapp()\n elif 'screenshot' in query:\n kk = pyautogui.screenshot()\n kk.save('/home/dassahil/Pictures/screenshot.png')\n\n\n\n # application open\n elif 'open brave' in query or 'open Brave' in query:\n os.system(\"/home/dassahil/Desktop/brave-browser.desktop\")\n\n\n # website opening\n elif \"open YouTube\" in query or \"open youtube\" in query:\n speak(\"OK Sir, wait a second\")\n webbrowser.open(\"https://www.youtube.com\")\n speak(\"Your command has been completed, Sir!\")\n\n elif \"open leetcode\" in query or \"open Leetcode\" in query or \"lit code\" in query or \"Leet code\" in query:\n speak(\"OK Sir, wait a second\")\n webbrowser.open(\"https://leetcode.com\")\n speak(\"Your command has been completed, Sir!\")\n\n elif \"open wikipedia\" in query or \"open Wikipedia\" in query:\n speak(\"OK Sir, wait a second\")\n webbrowser.open(\"https://www.wikipedia.org\")\n speak(\"Your command has been completed, Sir!\")\n\n elif \"open gmail\" in query or \"open Gmail\" in query:\n speak(\"OK Sir, wait a second\")\n webbrowser.open(\"https://mail.google.com\")\n speak(\"Your command has been completed, Sir!\")\n\n elif \"open keybr\" in query or \"open KBR\" in query or \"open KVR\" in query or \"open Keybr\" in query:\n\n speak(\"OK Sir, wait a second\")\n webbrowser.open(\"https://www.keybr.com\")\n speak(\"Your command has been completed, Sir!\")\n elif \"open linkedin\" in query or \"open Linkedin\" in query or \"open LinkedIn\" in query:\n\n speak(\"OK Sir, wait a second\")\n webbrowser.open(\"https://www.linkedin.com\")\n speak(\"Your command has been completed, Sir!\")\n elif 'pause' in query:\n keyboard.press('space bar')\n elif 'restart' in query:\n keyboard.press('0')\n \n elif 'mute' in query:\n keyboard.press('m')\n elif 'skip' in query:\n keyboard.press('l')\n elif 'back' in query:\n keyboard.press('j')\n elif 'full screen' in query:\n keyboard.press('f')\n elif 'film mode' in query:\n keyboard.press('t')\n speak(\"Done Sir!\")\n elif 'youtube tool' in query or 'YouTube Tool' in query or 'YouTube tool' in query:\n YoutubeAuto() \n\n elif 'close this tab' in query:\n keyboard.press_and_release('ctrl + shift + w')\n elif 'open new tab' in query:\n keyboard.press_and_release('ctrl + t')\n elif 'open new window' in query:\n keyboard.press_and_release('ctrl + n')\n elif 'history' in query:\n keyboard.press_and_real ('ctrl + h')\n elif 'downloads' in query:\n keyboard.press_and_real ('ctrl + j')\n elif 'bookmark this page' in query:\n keyboard.press_and_release('ctrl + d')\n elif 'turn to full screen' in query:\n keyboard.press_and_release('f11')\n elif 'brave automation' in query:\n braveAuto()\n elif 'joke' in query:\n get = pyjokes.get_joke()\n speak(get)\n elif 'repeat my words' in query:\n speak(\"Speak Sir!\")\n jj = takeCommand()\n speak(f\"You said :{jj}\")\n elif 'my location' in query:\n speak(\"Ok Sir , Wait A Second!\")\n webbrowser.open('https://www.google.com/maps/@27.7419997,85.2694121,13.21z?entry=ttu')\n elif 'dictionary' in query:\n Dict()\n elif 'alarm' in query or 'Alarm' in query or 'alaarm' in query:\n speak(\"Enter The time !\")\n time = input(\": Enter The Time :\")\n\n while True:\n Time_Ac = datetime.datetime.now()\n print(Time_Ac)\n now = Time_Ac.strftime(\"%H:%M:%S\")\n print(Time_Ac.strftime(\"%H:%M:%S\"))\n\n if now == time:\n speak(\"Time To Wake Up Sir!\")\n playsound('alarm.mp3')\n speak(\"Alarm Closed!\")\n \n elif now>time:\n break\n\n \n\n elif not query:\n continue\n \n else:\n speak(\"Sorry, I couldn't understand the query.\")\n\ndef start():\n TaskExe()\n\n\ngui.minsize(height=800, width=1270)\ngui.maxsize(height=800,width=1270)\ngui.title('Jarvis')\nimg =Image.open('jarvisbg.png') \nbg = ImageTk.PhotoImage(img)\nlabel = Label(gui, image=bg)\nlabel.place(x = 0,y = 0)\n\n\n# Try setting the window icon with a PNG or GIF image file (icon.png)\n# image_path = 'i.ico'\n# image = Image.open(image_path)\n# Adjust the desired size of the image\nnew_width = 200\nnew_height = 200\n# image = image.resize((new_width, new_height), Image.BILINEAR)\n# image = ImageTk.PhotoImage(image)\n# image_label = Label(gui, image=image)\n# image_label.pack(pady=10)\nresults_label = Label(gui, text=\"\",height=12, justify=LEFT, font=('Arial', 12))\nresults_label.pack(pady=10)\ngui.attributes(\"-alpha\", 0.0)\n\n# Create the button with transparent background\nshow_passwords_button = Button(gui, text=\"Run Jarvis\", command=start, height=12, width=25, bd=0, highlightthickness=0)\n\n# Pack the button\nshow_passwords_button.pack()\n\n\n\ngui.mainloop()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Acrsahil/Python_Programming","sub_path":"PythonProjects/Jarvis-Assistant-AI/jarvis1.py","file_name":"jarvis1.py","file_ext":"py","file_size_in_byte":15770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"33326250383","text":"import numpy as np\nimport glob\nfrom baseline import RealCentra\nfrom adpsolver import AdaptSolver\nimport sys\nsys.path.append(\"..\")\nfrom makedata import graphfile_loader\nfrom fmcl import FMCL, nDCG, resutls_latex\n\n\n\ndef real_topk_test(file, frac, measure, k, weighted, trial=1):\n\tname = file[file.rindex('/') + 1: file.rindex('.')]\n\tG = graphfile_loader(file)\n\tsize = (len(G.nodes()), len(G.edges()))\n\tave_apsp_time, ave_apsp_nDCG = 0, 0\n\tave_dh_time, ave_dh_nDCG = 0, 0\n\tave_fm_time, ave_fm_nDCG = 0, 0\n\tave_adp_time, ave_adp_nDCG = 0, 0\n\tave_fmcl_time, ave_fmcl_nDCG = 0, 0\n\tave_fmcl2_time, ave_fmcl2_nDCG = 0, 0\n\tfor t in range(trial):\n\t\treal_V = np.sort(np.random.choice(G.nodes, int(size[0] * frac), False))\n\t\tapsp, dh, fm = RealCentra(G, real_V, 'APSP'), RealCentra(G, real_V, 'DH'), RealCentra(G, real_V, 'FM')\n\t\tadp, fmcl = AdaptSolver(G, real_V, weighted), FMCL(measure, G)\n\t\tapsp_topkV, apsp_time, apsp_scores = apsp.topk_vertices(measure, k)\n\t\tdh_topkV, dh_time, _ = dh.topk_vertices(measure, k)\n\t\tfm_topkV, fm_time, _ = fm.topk_vertices(measure, k)\n\t\tadp_topkV, adp_time = adp.topk_vertices(measure, k)\n\t\tfmcl_topkV, fmcl_time = fmcl.real_topk_vertices(real_V, k, 'adp')\n\t\tfmcl2_topkV, fmcl2_time = fmcl.real_topk_vertices(real_V, k, 'sub')\n\t\tave_apsp_time += apsp_time\n\t\tave_dh_time, ave_dh_nDCG = ave_dh_time + dh_time, ave_dh_nDCG + nDCG(dh_topkV, apsp_topkV, apsp_scores)\n\t\tave_fm_time, ave_fm_nDCG = ave_fm_time + fm_time, ave_fm_nDCG + nDCG(fm_topkV, apsp_topkV, apsp_scores)\n\t\tave_adp_time, ave_adp_nDCG = ave_adp_time + adp_time, ave_adp_nDCG + nDCG(adp_topkV, apsp_topkV, apsp_scores)\n\t\tave_fmcl_time, ave_fmcl_nDCG = ave_fmcl_time + fmcl_time, ave_fmcl_nDCG + nDCG(fmcl_topkV, apsp_topkV, apsp_scores)\n\t\tave_fmcl2_time, ave_fmcl2_nDCG = ave_fmcl2_time + fmcl2_time, ave_fmcl2_nDCG + nDCG(fmcl2_topkV, apsp_topkV, apsp_scores)\n\tave_apsp_time = round(ave_apsp_time / trial, 2)\n\tave_dh_time, ave_dh_nDCG = round(ave_dh_time / trial, 2), round(ave_dh_nDCG / trial, 4)\n\tave_fm_time, ave_fm_nDCG = round(ave_fm_time / trial, 2), round(ave_fm_nDCG / trial, 4)\n\tave_adp_time, ave_adp_nDCG = round(ave_adp_time / trial, 2), round(ave_adp_nDCG / trial, 4)\n\tave_fmcl_time, ave_fmcl_nDCG = round(ave_fmcl_time / trial, 2), round(ave_fmcl_nDCG / trial, 4)\n\tave_fmcl2_time, ave_fmcl2_nDCG = round(ave_fmcl2_time / trial, 2), round(ave_fmcl2_nDCG / trial, 4)\n\treturn ([name, size, ave_apsp_time, ave_dh_time, ave_fm_time, ave_adp_time, ave_fmcl_time, ave_fmcl2_time,\n\t\tave_dh_nDCG, ave_fm_nDCG, ave_adp_nDCG, ave_fmcl_nDCG, ave_fmcl2_nDCG])\n\n\ndef real_topk_tests(path, frac, measure, k, weighted=True, trial=1, outfile='table.tex'):\n\tfiles = glob.glob(path + '*.col')\n\tfiles.sort()\n\tresults = []\n\tfor file in files:\n\t\ttry:\n\t\t\tresult = real_topk_test(file, frac, measure, k, weighted, trial)\n\t\t\tresults.append(result)\n\t\t\tprint(result)\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\theader = (\n\t\t['Instance', 'Size', 'APSP', 'DH', 'FM', 'ADP', 'FMCL', 'FMCL2', 'DH nDCG', 'FM nDCG', 'ADP nDCG', 'FMCL nDCG', 'FMCL2 nDCG'])\n\tresutls_latex(results, header, outfile)\n\n\n\nif __name__ == '__main__':\n\tfrac, k, trial = 0.5, 10, 1\n\tmeasures = ['har']\n\tweighteds = [False, True, True, True, False, False, False]\n\tdatasets = ['DIMACS', 'wDIMACS', 'TSP', 'Tree', 'SmallWorld', 'movingAI', 'movingAI2']\n\n\t#datasets = ['movingAI', 'movingAI2']\n\n\tfor measure in measures:\n\t\tfor weighted, dataset in zip(weighteds, datasets):\n\t\t\tpath, table_file = '../datasets/' + dataset + '/', 'adptables/' + dataset + '_' + measure +'.tex'\n\t\t\treal_topk_tests(path, frac, measure, k, weighted, trial, table_file)","repo_name":"leon-angli/FMCL","sub_path":"real_centrality/adp_experiments.py","file_name":"adp_experiments.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"503943698","text":"import json\nfrom inv_sys.inventory import Inventory\nfrom user.account import Account\n# database\nfrom firebase_admin import db\n# debug\nimport traceback\nimport logging\n\nclass Cart:\n # returns Cart json obj\n def getCurrent():\n ref = db.reference(\"/\")\n file = open(\"./user/login_token.txt\", \"r\")\n key = file.read()\n file.close()\n return ref.get()[\"Users\"][key][\"Cart\"]\n\n # returns db reference to user cart\n def getRef():\n file = open(\"./user/login_token.txt\", \"r\")\n key = file.read()\n file.close()\n ref = db.reference(\"/Users\").child(key).child(\"Cart\")\n return ref\n\n # checkout items in cart\n def checkout():\n exist = False\n # if cart is not empty\n if not Cart.isEmpty():\n # for each item in cart\n for item_id in Cart.getCurrent()[\"Items\"]:\n # loop thru inventory to find matching item\n for category in Inventory.getCurrent():\n for itemID in Inventory.getCurrent()[category]:\n if itemID == item_id:\n exist = True\n invObj = Inventory.getCurrent()[category][itemID]\n invRef = Inventory.getRef().child(category).child(itemID)\n cartObj = Cart.getCurrent()[\"Items\"][item_id]\n # if last in stock, remove from db\n if invObj[\"stockNumber\"] == 1:\n invRef.delete()\n # else, decrement stock number\n else:\n dec = invObj[\"stockNumber\"] - cartObj[\"stockNumber\"]\n invRef.child(\"stockNumber\").set(dec)\n else:\n print(\"\\nCart empty, nothing to checkout!\\n\")\n if exist:\n key = Account.getKey()\n accRef = Account.getRef().child(key)\n accObj = Account.getCurrent()[key]\n cartRef = Cart.getRef()\n order = {\n \"order\": Cart.getCurrent(),\n \"paymentInfo\": accObj[\"creditCard\"],\n \"shippingInfo\": accObj[\"address\"]\n }\n # add order to history\n accRef.child(\"orderHistory\").push().set(order)\n # clear cart\n cartRef.child(\"Items\").delete()\n # reset cart total values\n cartRef.child(\"totalItems\").set(0)\n cartRef.child(\"totalPrice\").set(0)\n cartRef.child(\"totalWeight\").set(0)\n print(\"\\nCheckout successful! View order history for details...\\n\")\n\n # add num items to cart\n def add(num, itemID):\n try:\n i = 0\n print()\n while i < num:\n itemObj = None\n exist = False\n max = False\n for category in Inventory.getCurrent():\n for item_id in Inventory.getCurrent()[category]:\n if itemID == item_id:\n itemObj = Inventory.getCurrent()[category][itemID]\n itemRef = Inventory.getRef().child(category).child(itemID)\n cartRef = Cart.getRef().child(\"Items\")\n # if cart is not empty\n if not Cart.isEmpty():\n # if item already in cart, increment cart number\n for cart_item_id in Cart.getCurrent()[\"Items\"]:\n if itemID == cart_item_id:\n exist = True\n cartObj = Cart.getCurrent()[\"Items\"][itemID]\n # if cart number exceeds inventory number, prompt user\n if cartObj[\"stockNumber\"] == itemObj[\"stockNumber\"]:\n max = True\n print(\"Max stock reached for item!\")\n else:\n inc = cartObj[\"stockNumber\"] + 1\n cartRef.child(itemID).child(\"stockNumber\").set(inc)\n # if item does not already exist in cart, push to cart\n if not exist:\n exist = True\n # send obj to cart with key being itemID\n cartRef.child(itemID).set(itemObj)\n # set stock number to 1 for new item\n cartRef.child(itemID).child(\"stockNumber\").set(1)\n if exist:\n if not max:\n # increment total items\n inc = Cart.getCurrent()[\"totalItems\"] + 1\n Cart.getRef().child(\"totalItems\").set(inc)\n # calculate price\n newTotal = Cart.getCurrent()[\"totalPrice\"] + itemObj[\"price\"]\n Cart.getRef().child(\"totalPrice\").set(round(newTotal, 2))\n # calculate weight\n newWeight = Cart.getCurrent()[\"totalWeight\"] + itemObj[\"weight\"]\n Cart.getRef().child(\"totalWeight\").set(round(newWeight, 2))\n # if max stock hasnt been reached on an item\n if not max and i == 0:\n print(itemObj)\n print(\"Item added to cart!\")\n else:\n print(itemID+\" -- item not found in inventory!\")\n return\n i += 1\n print()\n except Exception as e:\n print(\"\\nError adding item to cart:\")\n logging.error(traceback.format_exc())\n\n # remove num items from cart\n def remove(num, itemID):\n try:\n i = 0\n print()\n while i < num:\n itemObj = None\n exist = False\n # if cart is not empty\n if not Cart.isEmpty():\n for item_id in Cart.getCurrent()[\"Items\"]:\n if itemID == item_id:\n exist = True\n itemObj = Cart.getCurrent()[\"Items\"][itemID]\n itemRef = Cart.getRef().child(\"Items\").child(itemID)\n # get category from first name in itemID\n invRef = Inventory.getRef().child(itemID.split(\"_\")[0].capitalize())\n # if last in cart, remove from db\n if itemObj[\"stockNumber\"] == 1:\n itemRef.delete()\n # else, decrement cart number\n else:\n dec = itemObj[\"stockNumber\"] - 1\n itemRef.child(\"stockNumber\").set(dec)\n if exist:\n # decrement total items\n dec = Cart.getCurrent()[\"totalItems\"] - 1\n Cart.getRef().child(\"totalItems\").set(dec)\n # calculate price\n if Cart.getCurrent()[\"totalItems\"] == 0:\n Cart.getRef().child(\"totalPrice\").set(0)\n else:\n newTotal = Cart.getCurrent()[\"totalPrice\"] - itemObj[\"price\"]\n Cart.getRef().child(\"totalPrice\").set(round(newTotal, 2))\n # calculate weight\n newWeight = Cart.getCurrent()[\"totalWeight\"] - itemObj[\"weight\"]\n Cart.getRef().child(\"totalWeight\").set(round(newWeight, 2))\n if i == 0:\n print(itemObj)\n print(\"Item removed from cart!\")\n else:\n print(itemID+\" -- item not found in cart!\")\n return\n else:\n print(\"Cart is empty!\")\n return\n i += 1\n print()\n except Exception as e:\n print(\"\\nError removing item from cart:\")\n logging.error(traceback.format_exc())\n\n # checks if cart is empty\n def isEmpty():\n try:\n Cart.getCurrent()[\"Items\"]\n return False\n except:\n return True\n\n # view all items in cart\n def viewCart():\n try:\n for item in Cart.getCurrent()[\"Items\"]:\n i = Cart.getCurrent()[\"Items\"][item]\n print(\"\\nItem ID: \"+item)\n print(\"Description: \"+i[\"description\"])\n print(\"Price: $\"+str(i[\"price\"]))\n print(\"Color: \"+i[\"color\"])\n print(\"Size: \"+i[\"size\"])\n print(\"In Cart: \"+str(i[\"stockNumber\"]))\n\n print(\"\\nTotal Price: $\"+str(Cart.getCurrent()[\"totalPrice\"]))\n print(\"Total Items: \"+str(Cart.getCurrent()[\"totalItems\"]))\n print(\"Total Weight: \"+str(Cart.getCurrent()[\"totalWeight\"])+\"lbs\\n\")\n except:\n print(\"\\nNo items in your cart!\\n\")\n","repo_name":"lchristopher99/bully-py","sub_path":"draft_1/user/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":9389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"1405992161","text":"import graphene\nfrom graphene_file_upload.scalars import Upload\nfrom common.methods.send_notification import send_notification\nfrom order.models import EasyOrder, EasyOrderFile\nfrom authentication.models import User\n\n\nclass CreateEasyOrder(graphene.Mutation):\n class Arguments:\n contents = graphene.String()\n drafts = graphene.List(Upload)\n files = graphene.List(Upload)\n user_id = graphene.Int()\n is_visit = graphene.Boolean(default_value=False)\n is_order_more = graphene.Boolean(default_value=False)\n\n success = graphene.Boolean()\n\n @classmethod\n def mutate(cls, _, info, contents,is_visit, is_order_more, user_id=None, files=None, ):\n if files is None:\n files = []\n if user_id:\n user = User.objects.get(pk=user_id)\n else:\n user = info.context.user\n easy_order = EasyOrder.objects.create(user=user, contents=contents, is_visit=is_visit, is_order_more=is_order_more)\n for file in files:\n EasyOrderFile.objects.create(easy_order=easy_order, file=file)\n #send_notification(user=user, type=\"간편주문요청\", product_names=\"\")\n return CreateEasyOrder(success=True)\n","repo_name":"potatojoayo/smarter_server","sub_path":"order/mutations/create_easy_order.py","file_name":"create_easy_order.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"31549274050","text":"#! /usr/bin/env python\n\n# fasta_subsetter.py\n# February 2015 by C Tepolt, last updated April 2018 by C Tepolt [small edit by Z Tobias December 2019]\n# \n# Reads in a list of loci and a master fasta file,\n# and creates a new fasta file containing\n# only loci in the subset file.\n\nUsage = \"USAGE: fasta_subsetter.py MASTER_FASTA_FILE LIST_TO_SELECT [SELECT/REMOVE]\"\n\nimport sys\n\nif (len(sys.argv) < 4) or (len(sys.argv) > 4):\n\tprint(\"Please provide two input files and SELECT or REMOVE after the script name.\")\n\tprint(Usage)\n\texit()\nelse:\n\tMasterFileName = sys.argv[1]\n\tSubsetListName = sys.argv[2]\n\tselect_type = sys.argv[3]\n\nOutFileName = SubsetListName.rstrip(\".txt\") + \"_\" + select_type + \".fasta\" #changed to .rstrip(), was removing leading tx in path\n\nSubsetFile = open(SubsetListName, 'r')\nMasterFile = open(MasterFileName, 'r')\nOutFile = open(OutFileName, 'w')\n\nSubsetCounter = 0\nSubsetLoci = []\n\n# Read in the desired loci, and print a count.\n\nfor Line in SubsetFile:\n\tSubsetLoci.append(Line.strip())\n\tSubsetCounter += 1\n\nSubsetFile.close()\t# Clean up after your damn self.\n\nprint(\"Read in %d lucky loci to %s from a subset file.\" % (SubsetCounter, select_type))\n\nSwitch = 0\n\nfor Line in MasterFile:\n\tif \">\" in Line:\n\t\tSwitch = 0\n\t\tContig = Line.strip().split(\" \")\n\n\t\tif select_type == \"REMOVE\":\n\t\t\tif Contig[0] in SubsetLoci:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tOutFile.write(Line)\n\t\t\t\tSwitch = 1\n\t\tif select_type == \"SELECT\":\n\t\t\tif Contig[0] in SubsetLoci:\n\t\t\t\tOutFile.write(Line)\n\t\t\t\tSwitch = 1\n\t\t\telse:\n\t\t\t\tcontinue\n\n\telse:\n\t\tif Switch == 1:\n\t\t\tOutFile.write(Line)\n\nMasterFile.close()\t# Clean up after your damn self.\nOutFile.close()\n\nprint(\"Subset file is complete! Share and enjoy.\")\nprint(OutFileName)\n","repo_name":"tepoltlab/RhithroLoxo_DE","sub_path":"scripts/fasta_subsetter.py","file_name":"fasta_subsetter.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"41814284389","text":"def ceasar():\r\n\tkey = int(input(\"Enter a key: \"))\r\n\talphavite = 'abcdefghijklmnopqrstuvwxyz'\r\n\topen_text = str(input(\"Enter a text: \"))\r\n\tclosed_text = \"\"\r\n\t\r\n\tfor char in open_text:\t\r\n\t\tif char in alphavite:\r\n\t\t\toffset = (alphavite.index(char) + key) % 26\r\n\t\t\tfor charr in alphavite:\r\n\t\t\t\tif alphavite.index(charr) == offset:\r\n\t\t\t\t\tclosed_text += charr\r\n\t\t\t\t\t\r\n\t\telse:\r\n\t\t\tif char == char.upper():\r\n\t\t\t\tchar = char.lower()\r\n\t\t\t\tif char in alphavite:\r\n\t\t\t\t\toffset = (alphavite.index(char) + key) % 26\r\n\t\t\t\t\tfor charr in alphavite:\r\n\t\t\t\t\t\tif alphavite.index(charr) == offset:\r\n\t\t\t\t\t\t\tclosed_text += charr.upper()\r\n\t\t\t\t\t\t\t\r\n\t\t\tif char not in alphavite:\r\n\t\t\t\tclosed_text += char\r\n\t\t\t\r\n\t\r\n\tprint(open_text)\r\n\tprint(closed_text)\r\n\t\r\nceasar()\r\n","repo_name":"Arno98/CS50_Task_On_Python","sub_path":"cesear.py","file_name":"cesear.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"29793645549","text":"from .. import mysql_execute_query\n\nclass DulangModel():\n\n def GetSenaraiDulang(self):\n query = \"SELECT \"\n query += \"dlg_name as kod_dulang, \"\n query += \"dlg_desc as nama_dulang, \"\n query += \"tenant_id as tag \"\n query += \"FROM tbl_dulang \"\n query += \"WHERE status = 1 \"\n query += \"ORDER BY tenant_id \"\n response = mysql_execute_query(query)\n return response","repo_name":"onewoorks/spe_flask","sub_path":"app/main/models/mysql/dulang.py","file_name":"dulang.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"2726744501","text":"#!/usr/bin/env python3\n##\n## EPITECH PROJECT, 2022\n## Zappy\n## File description:\n## main\n##\n\n\nimport ctypes\nimport pathlib\nimport getopt\nfrom queue import Queue\nfrom sys import *\n\nressources = [\"linemate\",\"deraumere\", \"sibur\", \"mendiane\", \"phiras\", \"thystame\"]\n\n# ---------------------- NEEDED ----------------------\n\nupLvl = [\n {\n \"player\": 1,\n \"linemate\": 1,\n \"deraumere\": 0,\n \"sibur\": 0,\n \"mendiane\": 0,\n \"phiras\": 0,\n \"thystame\": 0\n },\n {\n \"player\": 2,\n \"linemate\": 1,\n \"deraumere\": 1,\n \"sibur\": 1,\n \"mendiane\": 0,\n \"phiras\": 0,\n \"thystame\": 0\n },\n {\n \"player\": 2,\n \"linemate\": 2,\n \"sibur\": 1,\n \"phiras\": 2,\n \"deraumere\": 0,\n \"mendiane\": 0,\n \"thystame\": 0\n },\n {\n \"player\": 4,\n \"linemate\": 1,\n \"deraumere\": 1,\n \"sibur\": 2,\n \"phiras\": 1,\n \"mendiane\": 0,\n \"thystame\": 0\n },\n {\n \"player\": 4,\n \"linemate\": 1,\n \"deraumere\": 2,\n \"sibur\": 1,\n \"mendiane\": 3,\n \"phiras\": 0,\n \"thystame\": 0\n },\n {\n \"player\": 6,\n \"linemate\": 1,\n \"deraumere\": 2,\n \"sibur\": 3,\n \"phiras\": 1,\n \"mendiane\": 0,\n \"thystame\": 0\n },\n {\n \"player\": 6,\n \"linemate\": 2,\n \"deraumere\": 2,\n \"sibur\": 2,\n \"mendiane\": 2,\n \"phiras\": 2,\n \"thystame\": 1\n }\n]\n\ndirections = {\n 1 : \"Forward\",\n 2 : \"Forward\",\n 3 : \"Left\",\n 4 : \"Left\",\n 5 : \"Left\",\n 6 : \"Right\",\n 7 : \"Right\",\n 8 : \"Forward\",\n}\n\n\n\n# ---------------------- AI ----------------------\n\nclass clientIA:\n def __init__(self):\n self.myId = -2\n self.nbClients = -1\n self.team = None\n self.height = -1\n self.width = -1\n self.alive = True\n self.nbMeeting = 1\n self.nbArrived = 1 # arrivés\n self.isCalled = False # rejoins le point d'incantation\n self.hasArrived = False # est arrivé\n self.isCalling = False # attend suffisement d'allié pour incanter\n self.elevation = False\n self.uturn = 0\n self.fork = False\n self.following = -1\n self.lvl = 1\n self.N = None\n self.M = None\n self.dir = 0\n self.drop = 0\n self.incantation = False\n self.toSend = Queue(maxsize=0)\n self.cmds = Queue(maxsize=9)\n self.currentCmd = \"Nothing\"\n self.commonInventory = {\n \"linemate\": 0,\n \"deraumere\": 0,\n \"sibur\": 0,\n \"mendiane\": 0,\n \"phiras\": 0,\n \"thystame\": 0\n }\n self.ressources = {\n \"food\": 10,\n \"linemate\": 0,\n \"deraumere\": 0,\n \"sibur\": 0,\n \"mendiane\": 0,\n \"phiras\": 0,\n \"thystame\": 0\n }\n\n def handleMessages(self, servMsg):\n if self.incantation or self.lvl == 1:\n return 0\n try:\n msg = servMsg.split(\",\")\n content = msg[1].split(\":\")\n target = content[1].split(\".\")\n sendTeam = target[0]\n sendId = int(target[1])\n sendLvl = int(target[2])\n direction = int(msg[0].split()[1])\n if (sendLvl != self.lvl) or (sendTeam != self.team):\n return 0\n\n if content[0].find(\"here\") >= 0:\n if self.isCalling and sendId > self.myId:\n return 0\n if self.isCalling and sendId < self.myId:\n self.nbMeeting = 1\n self.nbArrived = 1\n self.following = sendId\n self.isCalling = False\n self.isCalled = True\n self.ejected()\n self.toSend.put(\"Broadcast coming:\" + self.team + \".\" + str(self.myId) + \".\" + str(self.lvl))\n self.toSend.put(\"Broadcast cancel:\" + self.team + \".\" + str(self.myId) + \".\" + str(self.lvl))\n if self.isCalled and not self.hasArrived:\n if self.following > sendId:\n self.following = sendId\n self.toSend.put(\"Broadcast coming:\" + self.team + \".\" + str(self.myId) + \".\" + str(self.lvl))\n if direction == 0:\n self.ejected()\n self.toSend.put(\"Broadcast arrived:\" + self.team + \".\" + str(self.myId) + \".\" + str(self.lvl))\n self.hasArrived = True\n return 0\n if direction == 5:\n self.uturn += 1\n if direction == 1 and self.uturn == 3:\n self.toSend.put(\"Right\")\n self.uturn = 0\n self.toSend.put(directions[direction])\n\n if not self.isCalled:\n if self.ressources[\"food\"] >= 30:\n self.ejected()\n self.toSend.put(\"Broadcast coming:\" + self.team + \".\" + str(self.myId) + \".\" + str(self.lvl))\n self.isCalling = False\n self.isCalled = True\n self.following = sendId\n\n elif content[0].find(\"cancel\") >= 0 and self.isCalled and self.following == sendId:\n self.isCalled = False\n self.hasArrived = False\n self.following = -1\n\n elif content[0].find(\"coming\") >= 0 and self.isCalling:\n self.nbMeeting += 1\n\n elif content[0].find(\"arrived\") >= 0 and self.isCalling:\n self.nbArrived += 1\n\n elif content[0].find(\"feed\") >= 0 and self.isCalling:\n self.nbArrived -= 1\n self.nbMeeting -= 1\n return 0\n except:\n return 0\n\n\n def checkRessourceForLevel(self):\n for lvl in range(0, 7):\n if (self.lvl == lvl+1):\n for r in self.ressources:\n if (r == \"food\"):\n continue\n if (upLvl[lvl][r] > self.ressources[r]):\n return r\n return (None)\n\n\n def findPathToTile(self, tile_needed):\n if tile_needed == 0:\n return (1)\n left_tile = 1\n middle_tile = 2\n right_tile = 3\n reset = 0\n for levels in range(1, self.lvl + 1):\n if (tile_needed < middle_tile and tile_needed >= left_tile):\n self.toSend.put(\"Forward\")\n self.toSend.put(\"Left\")\n for t in range(0, middle_tile-tile_needed):\n self.toSend.put(\"Forward\")\n return (1)\n elif (tile_needed > middle_tile and tile_needed <= right_tile):\n self.toSend.put(\"Forward\")\n self.toSend.put(\"Right\")\n for t in range(0, tile_needed-middle_tile):\n self.toSend.put(\"Forward\")\n return (1)\n else:\n self.toSend.put(\"Forward\")\n if (tile_needed == middle_tile):\n return (1)\n left_tile += 2*levels+1\n middle_tile += 2*levels+2\n right_tile += 2*levels+3\n reset +=1\n for i in range (0, reset):\n print(\"remove =\", self.toSend.get())\n\n return (0)\n\n def inventory(self, srvMsg):\n food = 30\n if self.lvl >= 6:\n food = 40\n srvMsg = srvMsg[1:-1]\n inv = srvMsg.split(\",\")\n for rsc in inv:\n a = rsc.split()\n self.ressources[a[0]] = int(a[1])\n if self.ressources[\"food\"] >= 10 and self.fork:\n self.toSend.put(\"Fork\")\n self.fork = False\n if self.lvl != 8 and self.checkRessourceForLevel() == None and self.ressources[\"food\"] >= food:\n self.isCalling = True\n if self.isCalling and self.ressources[\"food\"] <= 5:\n self.isCalling = False\n self.nbArrived = 1\n self.nbMeeting = 1\n self.fork = True\n self.toSend.put(\"Broadcast cancel:\" + self.team + \".\" + str(self.myId) + \".\" + str(self.lvl))\n if self.hasArrived and self.ressources[\"food\"] <= 5:\n self.hasArrived = False\n self.isCalled = False\n self.toSend.put(\"Broadcast feed:\" + self.team + \".\" + str(self.myId) + \".\" + str(self.lvl))\n\n # def setRessourcesFromTile(self, look_list, t):\n # tile = {\n # \"linemate\": 0,\n # \"deraumere\": 0,\n # \"sibur\": 0,\n # \"mendiane\": 0,\n # \"phiras\": 0,\n # \"thystame\": 0\n # }\n # ress = look_list[t].split()\n # for i in ress:\n # if i == \"food\" or i == \"player\":\n # continue\n # tile[i] += 1\n # for i in ressources:\n # if tile[i] < upLvl[self.lvl - 1][i]:\n # self.toSend.put(\"Set \" + i)\n\n def setRessources(self):\n for i in ressources:\n if upLvl[self.lvl - 1][i] > 0:\n j = upLvl[self.lvl - 1][i]\n while j > 0:\n self.toSend.put(\"Set \" + i)\n j -= 1\n\n def checkTile(self, ressource):\n self.toSend.put(\"Take \" + ressource)\n self.ressources[ressource] += 1\n if self.checkRessourceForLevel() == None:\n if self.lvl == 1:\n self.toSend.put(\"Set linemate\")\n self.toSend.put(\"Incantation\")\n self.ressources[ressource] -= 1\n\n def rmRedundantChar(self, srvMsg):\n x = 'x'\n res = \"\"\n for i in srvMsg:\n if not (i == ',' and x == ','):\n res += i\n else:\n res += \"empty,\"\n x = i\n return res\n\n def look(self, srvMsg):\n ressource = None\n if self.isCalling or self.isCalled:\n return 0\n i = 0\n srvMsg = srvMsg[1:-1]\n srvMsg = self.rmRedundantChar(srvMsg)\n look_list = srvMsg.split(\",\")\n if self.lvl != 8:\n ressource = self.checkRessourceForLevel()\n if (self.ressources[\"food\"] < 40):\n ressource = \"food\"\n if ressource == None:\n self.toSend.put(\"Forward\")\n return 0\n for x in look_list:\n if x.find(ressource) >= 0:\n if self.findPathToTile(i) and self.lvl != 8:\n # self.toSend.put(\"Take \" + ressource)\n self.checkTile(ressource)\n return 1\n break\n i += 1\n self.toSend.put(\"Forward\")\n return 0\n\n\n def ejected(self):\n while (not self.toSend.empty()):\n self.toSend.get()\n return 1\n\n\n def serverResponse(self, srvMsg):\n if srvMsg == None:\n return 1\n if srvMsg == \"dead\":\n return -1\n if srvMsg.find(\"eject\") >= 0:\n return self.ejected()\n if srvMsg.find(\"message\") >= 0:\n if self.myId != -2:\n return self.handleMessages(srvMsg)\n if self.myId == -2:\n if self.currentCmd == \"Connect_nbr\":\n self.myId = int(srvMsg)\n else:\n return 0\n curr = self.currentCmd.split()[0]\n if self.currentCmd == \"Take Food\":\n if srvMsg == \"ko\":\n self.toSend.put(\"Right\")\n if srvMsg == \"Elevation underway\":\n self.nbArrived = 1\n self.nbMeeting = 1\n self.isCalling = False\n self.incantation = True\n self.isCalled = False\n self.hasArrived = False\n return 1\n if srvMsg.find(\"Current level\") >= 0:\n self.incantation = False\n self.lvl = int(srvMsg.split()[2])\n print(\"Level up \" + str(self.lvl))\n if self.currentCmd != \"Incantation\":\n return 1\n if self.incantation and srvMsg == \"ko\":\n self.incantation = False\n if self.currentCmd != \"Incantation\":\n return 1\n if curr == \"Look\":\n self.look(srvMsg)\n elif curr == \"Inventory\":\n self.inventory(srvMsg)\n elif curr == \"Connect_nbr\":\n self.nbPlayers = int(srvMsg)\n\n\n if self.cmds.empty():\n self.currentCmd = \"Nothing\"\n else:\n self.currentCmd = self.cmds.get()\n return 1\n\n def checkAction(self):\n if self.incantation:\n return \"wait\"\n if (self.nbArrived >= self.nbMeeting) and self.isCalling and (self.nbMeeting >= upLvl[self.lvl - 1][\"player\"]):\n self.ejected()\n self.setRessources()\n return (\"Incantation\")\n if self.isCalling:\n self.toSend.put(\"Inventory\")\n self.toSend.put(\"Right\")\n self.toSend.put(\"Right\")\n self.toSend.put(\"Right\")\n self.toSend.put(\"Right\")\n action = (\"Broadcast here:\" + self.team + \".\" + str(self.myId) + \".\" + str(self.lvl))\n return action\n if self.hasArrived:\n self.toSend.put(\"Inventory\")\n if self.isCalled:\n return \"wait\"\n self.toSend.put(\"Inventory\")\n return \"Look\"\n\n def actionAi(self):\n if self.currentCmd == \"Connect_nbr\":\n return \"wait\"\n if self.myId < 0:\n self.currentCmd = \"Connect_nbr\"\n return \"Connect_nbr\"\n action = \"wait\"\n if self.toSend.empty():\n if self.cmds.empty() and self.currentCmd == \"Nothing\":\n action = self.checkAction()\n # action = input(\"> \")\n if (action == \"wait\"): #temp\n return action #temp\n self.toSend.put(action)\n else:\n return \"wait\"\n if not self.cmds.full():\n action = self.toSend.get()\n self.cmds.put(action)\n if self.currentCmd == \"Nothing\":\n self.currentCmd = self.cmds.get()\n return action\n\n\n\n\n\n# ---------------------- NETWORK ----------------------\n\nclass clientInfo:\n def __init__(self, mySocket):\n self.socket = mySocket\n self.connected = 0\n self.readBuff = None\n self.writeBuff = None\n self.ai = clientIA()\n\n\n def serverCommunication(self, selectRes):\n if (selectRes == 1):\n res = clientLib.read_server(self.socket)\n result = ctypes.cast(res, ctypes.c_char_p)\n self.readBuff = result.value.decode('utf-8')\n if self.readBuff == \"end\":\n print(\"dead\")\n return -1\n self.readBuff = self.readBuff[:-1]\n for x in self.readBuff.split(\"\\n\"):\n if self.ai.serverResponse(x) < 0:\n print(\"error\")\n return -1\n return 0\n\n def mainLoop(self):\n run = 1\n while (run > -1):\n self.writeBuff = self.ai.actionAi()\n self.readBuff = None\n if (self.writeBuff != \"wait\"):\n self.writeBuff += '\\n'\n clientLib.test(self.writeBuff.encode('utf-8'))\n run = clientLib.client_select()\n if self.serverCommunication(run) < 0:\n return -1\n return 0\n\n\n def getPosnTeam(self):\n servMsg = self.readBuff.split(\"\\n\")\n if self.readBuff.find(\"ko\") != -1 and self.readBuff.find(\"unknown team\") != -1:\n exit(84)\n for i in servMsg:\n splited = i.split()\n if ((len(splited) == 1) and (splited[0].isdigit())):\n self.ai.nbClients = int(splited[0])\n if ((len(splited) == 2) and (splited[0].isdigit()) and (splited[1].isdigit())):\n self.ai.width = int(splited[0])\n self.ai.height = int(splited[1])\n if (self.ai.nbClients >= 0 and self.ai.width >= 0 and self.ai.height >= 0):\n self.connected = 1\n\n def connection(self, team):\n clientLib.test(team.encode('utf-8'))\n while (not self.connected):\n res = clientLib.client_select()\n self.serverCommunication(res)\n if (self.readBuff != None):\n self.getPosnTeam()\n\n\n# ---------------------- BEGIN PROGRAM ----------------------\n\ndef manageFlags(port, teamName):\n if (port == None or teamName == None):\n print(\"wrong flags\")\n exit(84)\n if (not port.isdigit()):\n print(\"wrong port\")\n exit(84)\n\ndef displayHelp():\n print(\"USAGE: ./zappy_ai -p port -n name -h machine\")\n print(\"\\tport is the port number\")\n print(\"\\tname is the name of the team\")\n print(\"\\tmachine is the name of the machine; localhost by default\")\n exit(0)\n\ndef main():\n port = None\n myIp = None\n teamName = None\n av = argv[1:]\n\n if (len(av) == 1 and av[0] == \"-help\"):\n displayHelp()\n try:\n opts, args = getopt.getopt(av, '-p:-n:-h:')\n except:\n print(\"Wrong flag\")\n return 84\n for opt, arg in opts:\n if opt in (\"-p\"):\n port = arg\n elif opt in (\"-n\"):\n teamName = arg + '\\n'\n elif opt in (\"-h\"):\n myIp = arg\n manageFlags(port, teamName)\n if (myIp == None):\n myIp = \"127.0.0.1\"\n mySocket = clientLib.create_client(ctypes.c_char_p(myIp.encode('utf-8')), ctypes.c_int(int(port)))\n if ((mySocket < 0) or (clientLib.init_info(ctypes.c_int(mySocket)) < 0)):\n print(\"failed connection\")\n return 84\n myClient = clientInfo(mySocket)\n myClient.connection(teamName)\n myClient.ai.team = teamName[:-1]\n myClient.mainLoop()\n return 0\n\nif __name__ == '__main__':\n cLib = pathlib.Path().absolute() / \"src_client/client/clientLib.so\"\n clientLib = ctypes.CDLL(cLib)\n clientLib.read_server.argtypes = [ctypes.c_int]\n clientLib.read_server.restype = ctypes.POINTER(ctypes.c_char)\n main()\n","repo_name":"Nilex-x/Zappy","sub_path":"src_client/ia/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17782,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"} +{"seq_id":"27061730020","text":"import flask\nimport logging\nimport serial\nfrom termcolor import colored\nfrom Responder import Responder\n\napp = flask.Flask(__name__)\n\nhttp_logging = False\n\nresponder = None\n\nser = None\n\n\n@app.route(\"/\")\ndef default() -> str:\n return '' \\\n '' \\\n \"

This is not the page you are looking for

\" \\\n \"

Try:

\" \\\n \"

/characteristic/read/<uuid>

\" \\\n \"

/characteristic/write/<uuid>/<value>

\" \\\n \"

/connect

\" \\\n \"

/disconnect

\" \\\n \"\" \\\n '' \\\n \"\"\n\n\n@app.errorhandler(404)\ndef page_not_found(e) -> str:\n return \"404\"\n\n\n@app.route(\"/connect\")\ndef connect() -> str:\n response = responder.connect_client()\n ser.write(response.encode())\n print(response, end=\"\")\n return response\n\n\n@app.route(\"/disconnect\")\ndef disconnect() -> str:\n response = responder.disconnect_client()\n ser.write(response.encode())\n print(response, end=\"\")\n return response\n\n\n@app.route(\"/characteristic/read/\")\ndef characteristics_read(uuid: str) -> str:\n return responder.read(uuid)\n\n\n@app.route(\"/characteristic/write//\")\ndef characteristics_write(uuid: str, value: str) -> str:\n response = responder.write(uuid, value)\n response_command = responder.generate_write_command(uuid, value)\n ser.write(response_command.encode())\n return response\n\n\ndef resp(response: str) -> str:\n \"\"\"Encodes the response as html file.\n Content is placed between
 tags\n    :param response: \"\"\"\n    return flask.Response(response, mimetype=\"text/plain\")\n\n\ndef run_server(responder_instance: Responder, ser_instance: serial):\n    \"\"\"Starts the webservice for client app HTTP requests\n    :param responder_instance:\n    :param ser_instance:\n    \"\"\"\n    global responder\n    responder = responder_instance\n\n    global ser\n    ser = ser_instance\n\n    if not http_logging:\n        log = logging.getLogger(\"werkzeug\")\n        log.setLevel(logging.ERROR)\n\n    print(colored(\"http server starting\", \"red\"))\n    app.run(host=\"0.0.0.0\", debug=False, port=6666)\n","repo_name":"SnelleJelle/rn4020-mock","sub_path":"Mock/httpserver.py","file_name":"httpserver.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"18704588663","text":"\"\"\"\nmodule containing meta setup.\nused for: Metacarpal (Fingers), Metatarsal (Toes)\n\nNOTE: inherit set_base and set_prefix from Module class\n\"\"\"\nimport pymel.core as pm\nimport maya.OpenMaya as om\nimport journey.lib.control as ctrl\nimport journey.lib.utils.tools as tools\nimport journey.lib.utils.kinematics as kine\nimport journey.lib.space_switcher as space\nimport journey.lib.layout as lo\nreload(ctrl)\nreload(tools)\nreload(kine)\nreload(lo)\nreload(space)\nimport journey.lib.layout as lo\n\n\nclass Meta(lo.Module):\n    def __init__(self,\n                 driven,\n                 splay_up_pos='',\n                 parent='',\n                 prefix='new',\n                 scale=1.0,\n                 base_rig=None\n                 ):\n        self.CLASS_NAME = self.__class__.__name__\n\n        self.driven = driven\n        self.splay_up_pos = splay_up_pos\n        self.parent = parent\n        self.prefix = prefix\n        self.scale = scale\n        self.base_rig = base_rig\n\n    def create(self, *args):\n        # create module from parent class\n        super(Meta, self).create_structure()\n\n        splay_mid_ctrl = ctrl.Control(prefix=self.prefix + 'SplayMidA',\n                                      scale=self.scale * 1.2,\n                                      trans_to=self.splay_up_pos,\n                                      rot_to=self.splay_up_pos,\n                                      parent=self.controls_grp,\n                                      shape='diamond')\n\n        splay_mid_ctrl.create()\n\n        splay_ctrl = ctrl.Control(prefix=self.prefix + 'SplayA',\n                                  scale=self.scale * 1.2,\n                                  trans_to=self.splay_up_pos,\n                                  rot_to=self.splay_up_pos,\n                                  parent=self.controls_grp,\n                                  shape='diamond')\n\n        splay_ctrl.create()\n\n        # try:\n        #     pm.parent(self.splay_up_pos, self.parts_grp)\n        # except:\n        #     pass\n\n        # splay mid and splay end controllers\n        pm.delete(pm.parentConstraint(self.driven[0], self.driven[-1], splay_mid_ctrl.get_offset(), st=['x', 'y']))\n        pm.delete(pm.parentConstraint(self.driven[-1], splay_ctrl.get_offset(), st=['y']))\n\n        #splay_mid_ctrl.freeze_transforms()\n        #splay_ctrl.freeze_transforms()\n\n        pm.addAttr(splay_mid_ctrl.get_ctrl(), shortName='splaymid', longName='SplayMid', nn='SPLAY Mid',\n                   at=\"enum\", keyable=False, en=\"=======\")\n        pm.addAttr(splay_ctrl.get_ctrl(), shortName='splay', longName='Splay', nn='SPLAY',\n                   at=\"enum\", keyable=False, en=\"=======\")\n\n        self.meta_ctrls_offset = []\n        self.meta_ctrls = []\n        for i, driven in enumerate(self.driven):\n            prefix = tools.split_at(driven, '_', 2)\n            letter = tools.int_to_letter(i).capitalize()\n            buffer = pm.createNode('transform', n=prefix + '_buffer_grp')\n            buffer_offset = pm.createNode('transform', n=prefix + '_buffer_offset_grp')\n            pm.parent(buffer, buffer_offset)\n            pm.parent(buffer_offset, self.controls_grp)\n            pm.delete(pm.parentConstraint(driven, buffer_offset))\n            meta_ctrl = ctrl.Control(prefix=self.prefix + letter,\n                                     scale=self.scale,\n                                     trans_to=driven,\n                                     rot_to=driven,\n                                     parent=buffer,\n                                     shape='circle')\n            meta_ctrl.create()\n            meta_ctrl.freeze_transforms()\n            meta_ctrl.set_constraint(driven)\n            #pm.parent(meta_ctrl.get_offset(), self.controls_grp)\n\n            self.meta_ctrls_offset.append(meta_ctrl.get_offset())\n            self.meta_ctrls.append(meta_ctrl.get_ctrl())\n\n        tools.setup_splay(splay_mid_ctrl.get_ctrl(), splay_ctrl.get_ctrl(), self.driven,\n                          meta_ctrls_offset=self.meta_ctrls_offset, prefix=self.prefix, scale=self.scale)\n\n        if self.parent:\n            if pm.objExists(self.parent):\n                meta_ss = space.SpaceSwitcherLogic(self.parent, self.controls_grp, split=False, base_rig=self.base_rig)\n                meta_ss.setup_switcher()\n                meta_ss.set_space(self.parent)\n","repo_name":"nilusss/journey_framework","sub_path":"journey/lib/modules/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"5736163101","text":"import logging\nimport os\nimport glob\nimport motmetrics as mm\nfrom pathlib import Path\nfrom collections import OrderedDict\nimport torch\nimport torch.backends.cudnn as cudnn\n\nfrom detectron2.utils import comm\nfrom detectron2.config import CfgNode\nfrom detectron2.utils.file_io import PathManager\nfrom detectron2.utils.logger import setup_logger\nfrom detectron2.config import LazyConfig, instantiate\nfrom detectron2.engine.defaults import create_ddp_model, _try_get_key, _highlight\nfrom detectron2.utils.collect_env import collect_env_info\nfrom detectron2.checkpoint import DetectionCheckpointer\nfrom detectron2.engine import (\n    launch,\n    default_argument_parser,\n    default_setup\n)\n\nfrom utils import ema \nfrom utils.model_utils import fuse_model\nfrom tracker.eval.evaluators import MOTEvaluator\nfrom register_data import *\n\nlogger = logging.getLogger(\"detectron2\")\n\ndef compare_dataframes(gts, ts):\n    accs = []\n    names = []\n    for k, tsacc in ts.items():\n        if k in gts:            \n            logger.info('Comparing {}...'.format(k))\n            accs.append(mm.utils.compare_to_groundtruth(gts[k], tsacc, 'iou', distth=0.5))\n            names.append(k)\n        else:\n            logger.warning('No ground truth for {}, skipping.'.format(k))\n\n    return accs, names\n\n\ndef do_track(cfg, model):\n    logger = logging.getLogger(\"detectron2\")\n    if cfg.train.model_ema.enabled and cfg.train.model_ema.use_ema_weights_for_eval_only:\n        logger.info(\"Run evaluation with EMA.\")\n    else:\n        logger.info(\"Run evaluation without EMA.\")\n        \n    cudnn.benchmark = True\n        \n    # set environment variables for distributed inference    \n    file_name = os.path.join(cfg.train.output_dir, cfg.track.experiment_name)\n    if comm.is_main_process():\n        os.makedirs(file_name, exist_ok=True)\n    results_folder = os.path.join(file_name, \"track_results\")    \n    os.makedirs(results_folder, exist_ok=True)    \n\n    # build evaluator\n    evaluator = MOTEvaluator(\n        args=cfg,\n        dataloader=instantiate(cfg.dataloader.test),\n    )\n\n    model.eval()\n    if cfg.track.fuse:\n        logger.info(\"\\tFusing model...\")\n        model = fuse_model(model)#\n\n    # start evaluate\n    evaluator.evaluate(\n        model,  cfg.track.fp16, results_folder\n    )\n\n    # evaluate MOTA\n    mm.lap.default_solver = 'lap'\n\n    if cfg.track.val_ann == 'val_half.json':\n        gt_type = '_val_half'\n    else:\n        gt_type = ''\n    print('gt_type', gt_type)\n    if cfg.track.mot20:\n        gtfiles = glob.glob(os.path.join('../MOT20/train', '*/gt/gt{}.txt'.format(gt_type)))\n    else:\n        gtfiles = glob.glob(os.path.join('../MOT17/train', '*/gt/gt{}.txt'.format(gt_type)))\n    print('gt_files', gtfiles)\n    tsfiles = [f for f in glob.glob(os.path.join(results_folder, '*.txt')) if not os.path.basename(f).startswith('eval')]\n\n    logger.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles)))\n    logger.info('Available LAP solvers {}'.format(mm.lap.available_solvers))\n    logger.info('Default LAP solver \\'{}\\''.format(mm.lap.default_solver))\n    logger.info('Loading files.')\n    \n    gt = OrderedDict([(Path(f).parts[-3], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=1)) for f in gtfiles])\n    ts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=-1)) for f in tsfiles])    \n    \n    mh = mm.metrics.create()    \n    accs, names = compare_dataframes(gt, ts)\n    \n    logger.info('Running metrics')\n    metrics = ['recall', 'precision', 'num_unique_objects', 'mostly_tracked',\n               'partially_tracked', 'mostly_lost', 'num_false_positives', 'num_misses',\n               'num_switches', 'num_fragmentations', 'mota', 'motp', 'num_objects']\n    summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True)\n\n    div_dict = {\n        'num_objects': ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations'],\n        'num_unique_objects': ['mostly_tracked', 'partially_tracked', 'mostly_lost']}\n    for divisor in div_dict:\n        for divided in div_dict[divisor]:\n            summary[divided] = (summary[divided] / summary[divisor])\n    fmt = mh.formatters\n    change_fmt_list = ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations', 'mostly_tracked',\n                       'partially_tracked', 'mostly_lost']\n    for k in change_fmt_list:\n        fmt[k] = fmt['mota']\n    print(mm.io.render_summary(summary, formatters=fmt, namemap=mm.io.motchallenge_metric_names))\n\n    metrics = mm.metrics.motchallenge_metrics + ['num_objects']\n    summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True)\n    logger.info('\\n'+mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names))\n    logger.info('Completed')\n\n\ndef main(args):\n    cfg = LazyConfig.load(args.config_file)\n    cfg = LazyConfig.apply_overrides(cfg, args.opts)\n    default_setup(cfg, args)\n    \n    model = instantiate(cfg.model)\n    model.to(cfg.train.device)\n    model.device = torch.device(cfg.train.device)\n    model = create_ddp_model(model)\n    \n    # using ema for evaluation\n    ema.may_build_model_ema(cfg, model)\n    DetectionCheckpointer(model, **ema.may_get_ema_checkpointer(cfg, model)).load(cfg.train.init_checkpoint)\n    # Apply ema state for evaluation\n    if cfg.train.model_ema.enabled and cfg.train.model_ema.use_ema_weights_for_eval_only:\n        ema.apply_model_ema(model)\n    do_track(cfg, model)\n\nif __name__ == \"__main__\":\n    args = default_argument_parser(epilog = \"SparseTrack Eval\").parse_args()\n    launch(\n        main,\n        args.num_gpus,\n        num_machines=args.num_machines,\n        machine_rank=args.machine_rank,\n        dist_url=args.dist_url,\n        args=(args,),\n    )\n'''\nCUDA_VISIBLE_DEVICES=0 python3 track.py  --num-gpus 1  --config-file mot17_track_cfg.py \n\nCUDA_VISIBLE_DEVICES=0 python3 track.py  --num-gpus 1  --config-file mot20_track_cfg.py \n\nCUDA_VISIBLE_DEVICES=0 python3 track.py  --num-gpus 1  --config-file mot17_ab_track_cfg.py \n\nCUDA_VISIBLE_DEVICES=0 python3 track.py  --num-gpus 1  --config-file mot20_ab_track_cfg.py  \n\nCUDA_VISIBLE_DEVICES=0 python3 track.py  --num-gpus 1  --config-file dancetrack_bot_cfg.py  \n\nCUDA_VISIBLE_DEVICES=0 python3 track.py  --num-gpus 1  --config-file dancetrack_sparse_cfg.py  \n'''\n","repo_name":"hustvl/SparseTrack","sub_path":"track.py","file_name":"track.py","file_ext":"py","file_size_in_byte":6379,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"69"}
+{"seq_id":"8772939191","text":"\nimport requests\nimport pandas as pd\nfrom plyer import notification\nimport sqlite3\n\n\ndef verificar_api(url, nome_api, lista):\n    \"\"\"\n    Verifica APIs indisponíveis\n\n    Parâmetros:\n    url: url da API\n    nome_api: nome da API\n    lista: lista de APIs indisponíveis\n\n    \"\"\"\n    resp = requests.get(url)\n    if resp.status_code != 200:\n        lista.append(nome_api)\n\n\ndef api_indisponivel(lista):\n    \"\"\"\n    Notifica APIs indisponíveis\n\n    Parâmetros:\n    lista: lista de apis indisponíveis\n\n    \"\"\"\n    if lista:\n        message = f'Não foi possível acessar as seguintes APIs: {\", \".join(lista)}'\n    notification.notify(\n        title='APIs Indisponíveis',\n        message=message,\n        timeout=15\n    )\n\n\ndef retorna_municipios(siglaUF):\n    \"\"\"\n    Retorna os dados brutos dos municípios por unidade federativa\n\n    Parâmetros:\n    siglaUF: Sigla da unidade federativa (Exemplo: SP, BA, RJ)\n\n    Retorna: \n    Lista de municípios por unidade federativa. \n    Caso a api estiver indisponível, uma mensagem de erro irá aparecer. \n\n    \"\"\"\n    url = f'https://brasilapi.com.br/api/ibge/municipios/v1/{siglaUF}?providers=dados-abertos-br,gov,wikipedia'\n    resp = requests.get(url)\n    if resp.status_code == 200:\n        data_municipios = resp.json()\n        return data_municipios\n    else:\n        print(f\"Falha na solicitação da API para {siglaUF}.\")\n\n\ndef trata_municipio(muni):\n    \"\"\"\n    Trata os dados recebidos pela API de municípios, desaninhando e renomeando colunas.\n\n    Parâmetros:\n    muni: lista com os dados dos municípios\n\n    Retorna: \n    Lista de municípios tratados\n\n    \"\"\"\n    lista_municipios = []\n    # para cada municipio por uf presente na lista de municipios brutos aninhados\n    for lista_municipios_uf in muni:\n        # para cada municipio puro presente na lista de municipio por uf\n        for municipio in lista_municipios_uf:\n            # atribuindo colunas\n            nome = municipio['nome']\n            codigo_ibge = municipio['codigo_ibge']\n            uf = municipio['uf']\n            # adicionando dados tratados a lista de municipios\n            lista_municipios.append(\n                {'uf': uf, 'nome': nome, 'codigo_ibge': codigo_ibge})\n    return lista_municipios\n\n\ndef texto_em_maiusculo(data, col):\n    \"\"\"\n    Transforma um conjunto de strings em maiúsculo\n\n    Parâmetros:\n    data: dataframe a ser trabalhado\n    col: coluna na qual os caracteres serão alterados\n\n    \"\"\"\n    data[col] = data[col].str.upper()\n\n\ndef string_data(data):\n    \"\"\"\n    Transforma dados de data que estão no formato string para datetime\n\n    Parâmetros:\n    data: dataframe trabalhado\n\n    \"\"\"\n    for column in data.columns:\n        if 'data' in column:\n            data[column] = pd.to_datetime(data[column])\n\n\ndef string_vazia(data, coluna):\n    \"\"\"\n    Identifica e quantifica as colunas que possuem valores vazios que não são reconhecidos pelo método .isna()\n\n    Parâmetros:\n    data: dataframe trabalhado\n    coluna: coluna a ser trabalhada\n\n    Retorna: \n    A quantidade de campos vazios de uma determinada coluna.\n\n    \"\"\"\n    resultado = (data[coluna] == '').sum()\n    vazio = f\"A coluna {coluna} do Dataframe possui {resultado} campos em branco\"\n    return vazio\n\n\ndef transforma_int(data, coluna):\n    \"\"\"\n    Transforma campos string '' em zeros \n\n    Parâmetros:\n    data: dataframe trabalhado\n    coluna: coluna a ser trabalhada\n\n    \"\"\"\n    data[coluna] = data[coluna].replace('', 0)\n    data[coluna] = data[coluna].astype(int)\n\n\ndef elimina_vazios_int(data, coluna):\n    \"\"\"\n    Elimina os campos que possuem o valor 0 de uma determinada coluna\n\n    Parâmetros:\n    data: dataframe trabalhado\n    coluna: coluna a ser trabalhada\n\n    Retorna:\n    Dataframe filtrado sem os zeros na coluna indicada\n\n    \"\"\"\n    data = data[data[coluna] != 0]\n    data.reset_index(drop=True, inplace=True)\n    return data\n\n\ndef elimina_tel(data, coluna):\n    \"\"\"\n    Elimina campos que possuem telefones com menos de 7 dígitos\n\n    Parâmetros:\n    data: dataframe trabalhado\n    coluna: coluna a ser trabalhada\n\n    Retorna:\n    Dataframe filtrado apenas com telefones com 7 ou 8 dígitos \n\n    \"\"\"\n    data[coluna] = data[coluna].astype(str)\n    data = data[data[coluna].str.len() >= 7]\n    data.loc[:, coluna] = data[coluna].astype(int)\n    data.reset_index(drop=True, inplace=True)\n    return data\n\n\ndef adiciona_3_telefone(data, coluna):\n    \"\"\"\n    Adiciona o número 3 na frente dos telefones que possuem 7 dígitos\n\n    Parâmetros:\n    data: dataframe trabalhado\n    coluna: coluna a ser trabalhada\n\n    Retorna:\n    Dataframe filtrado com todos os telefones com 8 dígitos\n\n    \"\"\"\n    filtro = data[coluna].astype(str).str.len() == 7\n    data.loc[filtro, coluna] = '3' + data.loc[filtro, coluna].astype(str)\n    data[coluna] = data[coluna].astype(int)\n    return data\n\n\ndef transforma_float(data, coluna):\n    \"\"\"\n    Transforma campos em float de uma determinada coluna\n\n    Parâmetros:\n    data: dataframe trabalhado\n    coluna: coluna a ser trabalhada\n\n    \"\"\"\n    data[coluna] = data[coluna].astype(float)\n    data[coluna] = data[coluna].round(2)\n\n\ndef remove_caractere_especial(data, col):\n    \"\"\"\n    Remove os caracteres especiais de uma coluna\n\n    Parâmetros:\n    data: dataframe trabalhado\n    col: coluna a ser trabalhada\n\n    \"\"\"\n    data[col] = data[col].str.replace('Ç', 'C')\n    data[col] = data[col].str.replace('Á', 'A')\n    data[col] = data[col].str.replace('Ã', 'A')\n    data[col] = data[col].str.replace('À', 'A')\n    data[col] = data[col].str.replace('Â', 'A')\n    data[col] = data[col].str.replace('É', 'E')\n    data[col] = data[col].str.replace('È', 'E')\n    data[col] = data[col].str.replace('Ê', 'E')\n    data[col] = data[col].str.replace('Í', 'I')\n    data[col] = data[col].str.replace('Ì', 'I')\n    data[col] = data[col].str.replace('Ó', 'O')\n    data[col] = data[col].str.replace('Ò', 'O')\n    data[col] = data[col].str.replace('Ô', 'O')\n    data[col] = data[col].str.replace('Ú', 'U')\n    data[col] = data[col].str.replace('Ù', 'U')\n\n\ndef tabelas_bd():\n    \"\"\"\n    Estabelece uma conexão com o banco de dados e mostra todas as tabelas existentes nele.\n\n    \"\"\"\n    conn = sqlite3.connect('00_db/corretoras-brasil.db')\n    query = \"SELECT name FROM sqlite_master WHERE type='table'\"\n    schema = pd.read_sql(query, conn)\n    print(schema)\n    conn.close()\n\n\ndef salva_bd(df, nome_tabela):\n    \"\"\"\n    Salva uma tabela nova no banco de dados\n\n    Parâmetros:\n    df: dataframe trabalhado\n    nome_tabela: nome dado a tabela dentro do banco de dados\n\n    \"\"\"\n    conn = sqlite3.connect('00_db/corretoras-brasil.db')\n    df.to_sql(nome_tabela, conn, if_exists='replace', index=False)\n    conn.close()\n\n\ndef carrega_bd(nome_tabela):\n    \"\"\"\n    Consulta uma tabela em um banco de dados\n\n    Parâmetros:\n    nome_tabela: nome da tabela no banco de dados\n\n    Retorna:\n    Dados da tabela obtidos durante a consulta\n\n    \"\"\"\n    conn = sqlite3.connect('00_db/corretoras-brasil.db')\n    query = f\"SELECT * FROM {nome_tabela}\"\n    consulta = pd.read_sql(query, conn)\n    conn.close()\n    return consulta\n\n\ndef unstacked_count_agg(df, index, coluna, agg):\n    \"\"\"\n    Realiza unstack (desempilhamento) de um dataframe\n\n    Parâmetros:\n    df: dataframe trabalhado\n    index: coluna a ser desempilhada\n    coluna: coluna para agregação\n    agg: o tipo de agregação\n\n    Retorna:\n    Dataframe desempilhado e agregado com base em outra coluna\n\n    \"\"\"\n    # agrupando o df e adicionando uma função de agregação\n    grouped = df.groupby([f'{index}', f'{coluna}'])[f'{coluna}'].agg(agg)\n    # unstack na segunda coluna\n    unstacked = grouped.unstack(level=1)\n    unstacked = unstacked.fillna(0)\n    return unstacked\n\n\ndef stacked_tabela(df, index, colunas):\n    \"\"\"\n    Realiza stack (empilhamento) de um dataframe\n\n    Parâmetros:\n    df: dataframe trabalhado\n    index: coluna para ser utilizada como index \n    colunas: colunas a serem empuilhadas\n\n    Retorna:\n    Dataframe empilhado com base em determinadas colunas\n\n    \"\"\"\n    # df com as colunas a serem empilhadas\n    stacked_df = df.set_index([index])[colunas].stack().reset_index()\n    stacked_df.columns = [index, 'colunas_empilhadas', 'valores']\n    return stacked_df\n\n\ndef alerta_etapa_concluida(base, nome, etapa):\n    \"\"\"\n    Alerta de etapa concluida\n\n    Parâmetros:\n    base: dataframe trabalhado\n    nome: nome do dataframe\n    etapa: etapa concluida\n\n    \"\"\"\n    if base:\n        message = f'A base {nome} foi {etapa} com sucesso'\n        notification.notify(\n            title='Concluído',\n            message=message,\n            timeout=15)\n","repo_name":"zirtaebea/corretoras-brasil","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":8647,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"71995071900","text":"import sys, datetime, os\n\nfrom os.path import exists\n\nif not exists(\"book_list.txt\"):\n        open(\"book_list.txt\", \"x\")\n\ncommand = sys.argv\n\nclass Book:\n\n    def set_id(self):\n        with open(\"book_list.txt\", \"r+\") as f:\n            obj = f.readlines()\n            id=1\n            if obj:\n                id=int(obj[-5].split(\":\")[1]) + 1\n            element=(f'Book ID: {id}')\n            f.write(f\"{element}\\n\")\n        return True\n        \n    def add_book(self):\n        title = input(\"Enter book name:\\n\")\n        author = input(\"\\nEnter writer name:\\n\")\n        with open(\"book_list.txt\", \"a+\") as f:\n            element=(f'Book name: {title}\\nWriter: {author}')\n            f.write(f\"{element}\\n\")\n            print(\"\\nAdded succesfully!\")\n        return True\n        \n    def set_date(self):\n        with open(\"book_list.txt\", \"a+\") as f:\n            obj = f.readlines()\n            element = (f'Added in: {datetime.datetime.today().strftime(\"%d %B %Y\")}')\n            f.write(f\"{element}\\n{'*' * 50}\\n\")\n\n    def show_book(self):\n        book_id=input(\"Enter book ID:\\n\")\n        print(f\"\\n{'*'*50}\")\n        with open(\"book_list.txt\", \"r+\") as f:\n            obj = f.readlines()\n            for i in range(0, len(obj), 5):\n                search = obj[i].split(\":\")[1].strip()\n                if book_id == search:\n                    index=[i, i+1, i+2, i+3, i+4]\n                    for i in range(len(obj)):\n                        if i in index:\n                            print(obj[i])\n                    break\n    \n    def show_all(self):\n        with open(\"book_list.txt\", \"r+\") as f:\n            obj = f.readlines()\n            for i in range(0, len(obj), 5):\n                search = obj[-5].split(\":\")[1].strip()\n                count=0\n                if count < int(search):\n                    count=search\n        print(f\"There are {count} books!\")\n        print(f\"{'*'*50}\")\n        print(*obj)\n\n    def remove_book(self):\n        book_id = input(\"Enter book ID:\\n\")\n        with open(\"book_list.txt\", \"r+\") as f:\n                obj = f.readlines()\n                f.seek(0)\n                for i in range(0, len(obj), 5):\n                    remove_element = obj[i].split(\":\")[1].strip()\n                    if book_id == remove_element:\n                        index = i\n                        f.truncate()\n                        for i in range(len(obj)):\n                            if i not in  range(index, index+5):\n                                f.write(obj[i])\n                        print('\\nSuccesfully deleted!\\n')\n                        break\n                else:\n                    print('\\nID not found\\n')\n\nbook = Book()\nif len(command) == 2 and command[1] == 'add':\n    book.set_id()\n    book.add_book()\n    book.set_date()\nelif len(command) == 3 and command[1] == 'show' and command[2] == 'all':\n    book.show_all()\nelif len(command) == 3 and command[1] == 'show' and command[2] == 'book':\n    book. show_book()\nelif len(command) == 2 and command[1] == 'remove':\n    book.remove_book()\nelse:\n    print('Please, enter right input')","repo_name":"shabnamadil/BookShop","sub_path":"task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"2322219541","text":"#!/usr/bin/env python3\n\n\"\"\"A simple example which times reading of all traces in a SEG Y file.\n\nUsage:\n\n    timed_reader.py \n\n\"\"\"\nfrom __future__ import print_function\n\nimport datetime\n\nimport os\n\nimport sys\nimport traceback\n\nfrom segpy.reader import create_reader\n\n\ndef read_traces(in_filename):\n    with open(in_filename, 'rb') as in_file:\n\n        t0 = datetime.datetime.now()\n\n        segy_reader = create_reader(in_file)\n\n        t1 = datetime.datetime.now()\n\n        for trace_index in segy_reader.trace_indexes():\n            trace = segy_reader.trace_samples(trace_index)\n\n        t2 = datetime.datetime.now()\n\n    time_to_read_header = (t1 - t0).total_seconds()\n    time_to_read_traces = (t2 - t1).total_seconds()\n    time_to_read_both = (t2 - t0).total_seconds()\n\n    print(\"Time to read headers : {} seconds\".format(time_to_read_header))\n    print(\"Time to read traces  : {} seconds\".format(time_to_read_traces))\n    print(\"Total time           : {} seconds\".format(time_to_read_both))\n\n\ndef main(argv=None):\n    if argv is None:\n        argv = sys.argv[1:]\n\n    try:\n        in_filename = argv[0]\n    except IndexError:\n        print(globals()['__doc__'], file=sys.stderr)\n        return os.EX_USAGE\n\n    try:\n        read_traces(in_filename)\n    except (FileNotFoundError, IsADirectoryError) as e:\n        print(e, file=sys.stderr)\n        return os.EX_NOINPUT\n    except PermissionError as e:\n        print(e, file=sys.stderr)\n        return os.EX_NOPERM\n    except Exception as e:\n        traceback.print_exception(type(e), e, e.__traceback__, file=sys.stderr)\n        return os.EX_SOFTWARE\n    return os.EX_OK\n\n\nif __name__ == '__main__':\n    sys.exit(main())\n","repo_name":"sixty-north/segpy","sub_path":"examples/timed_reader.py","file_name":"timed_reader.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"69"}
+{"seq_id":"44392614416","text":"import pygame\r\nfrom pygame.sprite import Sprite\r\n\r\nclass Ship(Sprite):\r\n    def __init__(self, ai_game):\r\n        super().__init__()\r\n        self.screen = ai_game.screen\r\n        self.screen_rect = ai_game.screen.get_rect()\r\n        # load picture and get the rectangle of ship\r\n        self.image = pygame.image.load('images/senpai_71x71.jpg')\r\n        self.rect = self.image.get_rect()\r\n        # location placement\r\n        self.rect.midbottom = self.screen_rect.midtop \r\n        # moving sign\r\n        self.moving_right = False\r\n        self.moving_left = False\r\n\r\n        self.settings = ai_game.settings\r\n        # attribute\r\n        self.x = float(self.rect.x)\r\n\r\n    def update(self):\r\n        if self.moving_right and self.rect.right < self.screen_rect.right:\r\n            self.x += self.settings.ship_speed\r\n        if self.moving_left and self.rect.left > 0:\r\n            self.x -= self.settings.ship_speed\r\n        self.rect.x = self.x\r\n\r\n    def blitme(self):\r\n        self.screen.blit(self.image, self.rect)\r\n\r\n    def center_ship(self):\r\n        self.rect.midbottom = self.screen_rect.midbottom\r\n        self.x = float(self.rect.x)\r\n","repo_name":"Njasoo/coding","sub_path":"python/python project/game/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"42761680940","text":"import boto3\n\ns3_client = boto3.client('s3')\ns3 = boto3.resource('s3')\n\n\ndef set_bucket_public_to_private():\n    \"\"\"\n    Set bucket acl to public-read\n    bucket = s3.Bucket('mysite599.com')\n    bucket.Acl().put(ACL='public-read')\n\n    Example of public-read bucket Acl:\n    [{'Grantee': {'ID': 'af152757608fdf5aee625cdf1d190570b44f601073e0e1a598db5595a3dc9dcc',\n                  'Type': 'CanonicalUser'}, 'Permission': 'FULL_CONTROL'},\n     {'Grantee': {'Type': 'Group', 'URI': 'http://acs.amazonaws.com/groups/global/AllUsers'},\n      'Permission': 'READ'}]\n\n    \"\"\"\n    all_users = 'http://acs.amazonaws.com/groups/global/AllUsers'\n    bucket = s3.Bucket('mysite599.com')\n    bucketacl = s3_client.get_bucket_acl(Bucket='mysite599.com')\n\n    for item in bucketacl['Grants']:\n        if item['Grantee']['Type'] == 'Group':\n            if item['Grantee']['URI'] == all_users:\n                print(\"public access found for {}\".format('mysite599.com'))\n                print(\"set back to private\")\n                bucket.Acl().put(ACL='private')\n\n        print(\"current bucket '{}' acl setting is {}\".format('mysite599.com', bucketacl['Grants']))\n\nset_bucket_public_to_private()\n","repo_name":"wcx599/python_in_action","sub_path":"aws/s3/s3acl.py","file_name":"s3acl.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"41682452938","text":"#! /usr/bin/python\n\n# Voice Activity Detection (VAD) tool.\n# use the vad_help() function for instructions.\n# Navid Shokouhi December 2012.\n# https://github.com/idnavid/py_vad_tool\n# Updated: May 2017 for Speaker Recognition collaboration.\n\nfrom audio_tools import enframe, deframe\nimport numpy as np\n\n\n##Function definitions:\ndef vad_help():\n    \"\"\"Voice Activity Detection (VAD) tool.\n\t\n\tNavid Shokouhi May 2017.\n    \"\"\"\n    print(\"Usage:\")\n    print(\"python unsupervised_vad.py\")\n\n\n#### Energy tools\ndef zero_mean(xframes):\n    \"\"\"\n        remove mean of framed signal\n        return zero-mean frames.\n        \"\"\"\n    m = np.mean(xframes,axis=1)\n    xframes = xframes - np.tile(m,(xframes.shape[1],1)).T\n    return xframes\n\ndef compute_nrg(xframes):\n    # calculate per frame energy\n    n_frames = xframes.shape[1]\n    return np.diagonal(np.dot(xframes,xframes.T))/float(n_frames)\n\ndef compute_log_nrg(xframes):\n    # calculate per frame energy in log\n    n_frames = xframes.shape[1]\n    raw_nrgs = np.log(compute_nrg(xframes+1e-5))/float(n_frames)\n    return (raw_nrgs - np.mean(raw_nrgs))/(np.sqrt(np.var(raw_nrgs)))\n\ndef power_spectrum(xframes):\n    \"\"\"\n        x: input signal, each row is one frame\n        \"\"\"\n    X = np.fft.fft(xframes,axis=1)\n    X = np.abs(X[:,:X.shape[1]/2])**2\n    return np.sqrt(X)\n\ndef nrg_vad(xframes,percent_thr,nrg_thr=0.,context=5):\n    \"\"\"\n        Picks frames with high energy as determined by a \n        user defined threshold.\n        \n        This function also uses a 'context' parameter to\n        resolve the fluctuative nature of thresholding. \n        context is an integer value determining the number\n        of neighboring frames that should be used to decide\n        if a frame is voiced.\n        \n        The log-energy values are subject to mean and var\n        normalization to simplify the picking the right threshold. \n        In this framework, the default threshold is 0.0\n        \"\"\"\n    xframes = zero_mean(xframes)\n    n_frames = xframes.shape[0]\n    \n    # Compute per frame energies:\n    xnrgs = compute_log_nrg(xframes)\n    xvad = np.zeros((n_frames,1))\n    for i in range(n_frames):\n        start = max(i-context,0)\n        end = min(i+context,n_frames-1)\n        n_above_thr = np.sum(xnrgs[start:end]>nrg_thr)\n        n_total = end-start+1\n        xvad[i] = 1.*((float(n_above_thr)/n_total) > percent_thr)\n    return xvad\n\n\ndef process_vad(s, fs, win_len, hop_len, frame_len_s, percent_high_nrg):\n        \n    sframes = enframe(s,win_len,hop_len) # rows: frame index, cols: each frame\n    \n    vad = nrg_vad(sframes,percent_high_nrg)\n    vad_signal = deframe(vad,win_len,hop_len)\n        \n    # Windowing of 1 sec\n    windowing_len = int(frame_len_s*fs);\n    \n    ini = 0\n    fin = windowing_len\n    n_frames = int(np.floor(len(s)/windowing_len))\n    vad_vector = np.zeros(n_frames)\n    for i in range(0,n_frames-1):\n        frame = vad_signal[ini:fin]\n        value = np.median(frame)\n        vad_vector[i] = value\n        \n        ini = fin+1\n        fin = fin+windowing_len\n    \n    return vad_vector\n\n\n","repo_name":"BINDI-UC3M/wemac_dataset_signal_processing","sub_path":"speech_processing/unsupervised_vad.py","file_name":"unsupervised_vad.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"7402131331","text":"#!/usr/bin/env python\n# vim:fileencoding=utf-8\n#\n# KSCrawlerIRCBot.py (c) 2014 by Lucio Andrés Illanes Albornoz \n# Originally based on [1].\n#\n# References:\n# Fri Oct  3 19:41:45 CEST 2014 [1] MA3STR0/kimsufi-crawler · GitHub \n#\n\nimport daemon\nimport json\nimport logging\nimport tornado.web\nfrom socket import AF_INET, SOCK_STREAM, socket\nfrom tornado.gen import coroutine\nfrom tornado.httpclient import AsyncHTTPClient\nfrom tornado.ioloop import IOLoop, PeriodicCallback\nfrom tornado.iostream import IOStream\n\nclass IRCBot(object):\n# {{{ class IRCBot\n\tdef __init__(self, config, logger, *args, **kwargs):\n\t\tself.config = config; self.logger = logger; self._connect();\n\n\tdef _connect(self):\n\t\tself._socket = socket(AF_INET, SOCK_STREAM, 0)\n\t\tself._stream = IOStream(self._socket)\n\t\tself._stream.connect((self.config[\"irc_server_hname\"], self.config[\"irc_server_port\"]), self._onConnect)\n\n\tdef _onConnect(self):\n\t\tself._stream.write(str(\"NICK %s\\r\\n\" % self.config[\"irc_nick\"]))\n\t\tself._stream.write(str(\"USER %s 0 0 :%s\\r\\n\" % (self.config[\"irc_user\"], self.config[\"irc_gecos\"])))\n\t\tself._next()\n\n\tdef _next(self):\n\t\tself._stream.read_until(\"\\r\\n\", self._onIncoming)\n\n\tdef _onIncoming(self, line):\n\t\ttokens = line.split(\" \")\n\t\tif not tokens:\n\t\t\treturn\n\t\tif tokens[0].startswith(\":\"):\n\t\t\tfrom_nick = tokens[0][1:].split(\"!\")[0]\n\t\t\ttry:\n\t\t\t\tfrom_user = tokens[0][1:].split(\"!\")[1].split(\"@\")[0]\n\t\t\t\tfrom_hname = tokens[0][1:].split(\"@\")[1]\n\t\t\texcept IndexError:\n\t\t\t\tfrom_user = \"\"; from_hname = \"\"; pass;\n\t\t\ttokens.remove(tokens[0])\n\t\telse:\n\t\t\tfrom_nick = \"\"; from_user = \"\"; from_hname = \"\";\n\t\tlast_token = [token for token in tokens if token.startswith(\":\")]\n\t\tif last_token:\n\t\t\tlast_token_idx = tokens.index(last_token[0])\n\t\t\ttokens = tokens[0:last_token_idx] + [\" \".join(tokens[last_token_idx:])[1:]]\n\t\t\tif tokens[-1][-2:] == \"\\r\\n\":\n\t\t\t\ttokens[-1] = tokens[-1][:-2]\n\t\ttokens[0] = tokens[0].upper()\n\t\tfor handler in self.__class__.__dict__:\n\t\t\tif handler == (\"_handle_\" + tokens[0]):\n\t\t\t\tself.__class__.__dict__[handler](self, from_nick, from_user, from_hname, tokens[1:])\n\t\tself._next()\n# }}}\n\nclass KSCrawler(object):\n# {{{ class KSCrawler\n\tdef __init__(self, config, logger, *args, **kwargs):\n\t\tself.config = config; self._STATES = {};\n\t\tPeriodicCallback(self._run_crawler, self.config[\"crawler_frequency\"] * 1000).start()\n\n\t@coroutine\n\tdef _run_crawler(self):\n\t\thttp_client = AsyncHTTPClient()\n\t\tresponse = yield http_client.fetch(self.config[\"ks_url\"])\n\t\tresponse_json = json.loads(response.body.decode(\"utf-8\"))\n\t\tavailability = response_json[\"answer\"][\"availability\"]\n\t\tfor item in availability:\n\t\t\tif self.config[\"ks_server_types\"].get(item[\"reference\"]) in self.config[\"crawler_servers\"]:\n\t\t\t\tzones = [e[\"zone\"] for e in item[\"zones\"]\n\t\t\t\t\t if e[\"availability\"] not in [\"unknown\", \"unavailable\"]]\n\t\t\t\tif [z for z in self.config[\"crawler_zones\"] if z in zones]:\n\t\t\t\t\tserver = self.config[\"ks_server_types\"][item[\"reference\"]]\n\t\t\t\t\ttext = \"Server %s is available in %s\" \\\n\t\t\t\t\t\t% (server, \", \".join([self.config[\"ks_datacenters\"][zone] for zone in zones]))\n\t\t\t\t\tmessage = {\n\t\t\t\t\t\t\"Text\": text,\n\t\t\t\t\t\t\"Title\": \"Server %s available\" % server,\n\t\t\t\t\t\t\"URL\": \"http://www.kimsufi.com/fr/index.xml\"\n\t\t\t\t\t}\n\t\t\t\t\tstate_id = \"%s_available_in_%s\" % (server, \"+\".join(zones))\n\t\t\t\t\tself._update_state(state_id, True, message)\n\t\t\t\telse:\n\t\t\t\t\tstate_id = \"\"; self._update_state(state_id, False)\n\n\tdef _update_state(self, state, value, message=False):\n\t\tif state not in self._STATES:\n\t\t\tself._STATES[state] = False\n\t\tif value is not self._STATES[state]:\n\t\t\tself.logger.info(\"State change - %s:%s\", state, value)\n\t\tif value and not self._STATES[state]:\n\t\t\tself._handle_stateChange(message)\n\t\tself._STATES[state] = value\n\n\tdef _handle_stateChange(self, message):\n\t\tpass\n# }}}\n\nclass KSCrawlerIRCBot(IRCBot, KSCrawler):\n# {{{ class KSCrawlerIRCBot\n\tdef __init__(self, *args, **kwargs):\n\t\tIRCBot.__init__(self, *args, **kwargs)\n\t\tKSCrawler.__init__(self, *args, **kwargs)\n\n\tdef _handle_001(self, from_nick, from_user, from_hname, tokens):\n\t\tself.logger.info(\"Received RPL_WELCOME from \" + self.config[\"irc_server_hname\"])\n\t\tfor channel in self.config[\"irc_channels\"]:\n\t\t\tself.logger.info(\"Joining channel \" + channel)\n\t\t\tself._stream.write(str(\"JOIN %s\\r\\n\" % channel))\n\t\tpass\n\n\tdef _handle_PING(self, from_nick, from_user, from_hname, tokens):\n\t\tself.logger.info(\"Received PING from %s, sending PONG.\" % self.config[\"irc_server_hname\"])\n\t\tself._stream.write(str(\"PONG :%s\\r\\n\" % tokens[0]))\n\t\tpass\n\n\tdef _handle_stateChange(self, message):\n\t\tfor channel in self.config[\"irc_channels\"]:\n\t\t\tfor key in message:\n\t\t\t\tself._stream.write(str(\"PRIVMSG %s :%c%s: %s\\r\\n\" % (channel, key[0].upper(), key[1:], message[key])))\n# }}}\n\nif __name__ == \"__main__\":\n\twith open(\"KSCrawlerIRCBot.json\", \"r\") as configfile:\n\t\tconfig = json.loads(configfile.read())\n\tlogging.basicConfig(level=logging.INFO, format=\"%(asctime)s %(message)s\")\n\tlogger = logging.getLogger(__name__)\n\twith daemon.DaemonContext():\n\t\tkcib = KSCrawlerIRCBot(config, logger)\n\t\tIOLoop.instance().start()\n\n# vim:ts=8 sw=8 tw=120 noexpandtab foldmethod=marker fileencoding=utf-8\n","repo_name":"lalbornoz/misc_cage","sub_path":"Python/KSCrawlerIRCBot.py","file_name":"KSCrawlerIRCBot.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"15405618274","text":"import requests\n\n\nclass Message:\n    def __init__(self, access_token: str, phone_number_id: str):\n        self.url = f\"https://graph.facebook.com/v15.0/{phone_number_id}/messages\"\n        self.headers = {\n            \"Authorization\": f\"Bearer {access_token}\",\n            \"Content-Type\": \"application/json\",\n        }\n\n    def req(self, method: str = \"POST\", **kwargs):\n        res = requests.request(method, self.url, headers=self.headers, **kwargs)\n        res.raise_for_status()\n        return res.json()\n\n    def send_message(self, phone_number: str, message: str):\n        res = self.req(\n            json={\n                \"messaging_product\": \"whatsapp\",\n                \"to\": phone_number,\n                \"type\": \"text\",\n                \"text\": {\"preview_url\": False, \"body\": message},\n            },\n        )\n\n    def send_image_url(self, phone_number: str, image_url: str):\n        res = self.req(\n            json={\n                \"messaging_product\": \"whatsapp\",\n                \"to\": phone_number,\n                \"type\": \"image\",\n                \"image\": {\"link\": image_url},\n            },\n        )\n\n    def send_message_to_all(self, message):\n        res = self.req(\n            json={\n                \"messaging_product\": \"whatsapp\",\n                \"to\": \"all\",\n                \"type\": \"text\",\n                \"text\": {\"preview_url\": False, \"body\": message},\n            },\n        )\n","repo_name":"rabilrbl/Chatbot-WhatsApp","sub_path":"process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"19065836707","text":"# Question 1:\n# Write a function to print \"hello_USERNAME!\" USERNAME Is the input of the function. The first line of cide has been defined as below.\n\ndef hello_name(user_name):\n    \"\"\"Display a simple greeting.\"\"\"\n    print(\"Hello_\" + user_name.upper() + \"!\")\n\nhello_name(\"USERNAME\")\n\n# Question 2:\n# Write a python function, first_odds that prints the odd numbers from 1-100 and returns nothing\ndef first_odds():\n    first_odds = list(range(1,100))\n    print(first_odds)\n\n# Question 3:\n# Please write a Python function, max_num_in_list to return the max number of a given list. The first line of code has been defined as below.\ndef max_num_in_list(a_list):\n    highest = a_list[0]\n    for number in a_list:\n        if number > highest:\n            highest = number\n    return highest\n    \na_list = [2, 5, -5, -21, 23, 10]\nhighest_num = max_num_in_list(a_list)\nprint(highest_num)\n\n# Question 4:\n# Write a function to return if the given year is a leap year. A leap year is divisible by 4, but not divisible by 100, unless it is also divisible by 400. The return should be boolean Type (true/false).\ndef is_leap_year(a_year):\n    leap = False\n    if a_year % 4 == 0:\n        leap == False\n    elif a_year % 4 == 0:\n        if a_year % 100 == 0 and a_year %400 == 0:\n            leap = False\n    else:\n        leap == False\n    return leap\nprint(bool(is_leap_year))\n\n# Question 5:\n# Write a function to check to see if all numbers in list are consecutive numbers. For example, [2,3,4,5,6,7] are consecutive numbers, but [1,2,4,5] are not consecutive numbers. The return should be boolean Type.\nd_list = [25,16,18,17,24]\nsorted_list = sorted(a_list)\n#sorted(l) ==\nrange_list=list(range(min(a_list), max(a_list)+1))\nif sorted_list == range_list:\n   print(\"True\")\nelse:\n   print(\"False\")\n\n","repo_name":"soniaacr/python-prework-project","sub_path":"one.py","file_name":"one.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"3980308712","text":"import os\nimport sys\nimport locale\nimport gettext\n\n\ndef is_package():\n    return __file__.find('src') < 0\n\n######################################\n\n\nAPP = 'cpu-g'\nAPPNAME = 'CPU-G'\n\n# check if running from source\nif is_package():\n    ROOTDIR = '/opt/extras.ubuntu.com/cpu-g/share'\n    LANGDIR = os.path.join(ROOTDIR, 'locale-langpack')\n    APPDIR = os.path.join(ROOTDIR, APP)\n    CHANGELOG = os.path.join(APPDIR, 'changelog')\n    ICONDIR = os.path.join(ROOTDIR, 'icons')\n    ICON = os.path.join(ICONDIR, 'cpu-g.png')\n    LOGOSDIR = os.path.join(APPDIR, 'logos')\n    DISTROSDIR = os.path.join(APPDIR, 'distros')\n    GRAPHICCARDDIR = os.path.join(APPDIR, 'graphic_card')\n    BATTERY_MONITOR = os.path.join(ROOTDIR, 'monitor_battery.py')\nelse:\n    ROOTDIR = os.path.dirname(__file__)\n    LANGDIR = os.path.normpath(os.path.join(ROOTDIR, '../template1'))\n    APPDIR = ROOTDIR\n    DEBIANDIR = os.path.normpath(os.path.join(ROOTDIR, '../debian'))\n    CHANGELOG = os.path.join(DEBIANDIR, 'changelog')\n    ICON = os.path.normpath(os.path.join(ROOTDIR, '../data/icons/cpu-g.png'))\n    LOGOSDIR = os.path.normpath(os.path.join(ROOTDIR, '../data/logos'))\n    DISTROSDIR = os.path.normpath(os.path.join(ROOTDIR, '../data/distros'))\n    GRAPHICCARDDIR = os.path.normpath(os.path.join(ROOTDIR,\n                                                   '../data/graphic_card'))\n    BATTERY_MONITOR = os.path.join(ROOTDIR, 'monitor_battery.py')\n\nf = open(CHANGELOG, 'r')\nline = f.readline()\nf.close()\npos = line.find('(')\nposf = line.find(')', pos)\nVERSION = line[pos + 1:posf].strip()\nif not is_package():\n    VERSION = VERSION + '-src'\n\n####\ntry:\n    current_locale, encoding = locale.getdefaultlocale()\n    language = gettext.translation(APP, LANGDIR, [current_locale])\n    language.install()\n    print(language)\n    if sys.version_info[0] == 3:\n        _ = language.gettext\n    else:\n        _ = language.ugettext\nexcept Exception as e:\n    print(e)\n    _ = str\nAPPNAME = _(APPNAME)\n","repo_name":"atareao/cpu-g","sub_path":"src/comun.py","file_name":"comun.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"69"}
+{"seq_id":"26355543097","text":"'''\ni   []\n0   [] [1]\n1   [] [1] [3] [1,3]\n'''\ndef find_subsets(nums):\n  subsets = []\n  subsets.append([])\n  for ele in  nums:\n    # ls = 2 * ls        \n    level_width = len(subsets)\n    for ni in range(level_width):\n      new_node = list( subsets[ni] )\n      new_node.append(ele)\n      subsets.append( new_node )\n\n  print(subsets)\n      \n\n\n  \n\ndef main():\n\n  print(\"Here is the list of subsets: \" + str(find_subsets([1, 3])))\n  # print(\"Here is the list of subsets: \" + str(find_subsets([1, 5, 3])))\n\n\nmain()\n\n#%%\ns = \"a\"\ns.upper()\n\n# %%\ns.isupper()\n\n# %%\na = \"1\"\na.isupper()\n\n# %%\n","repo_name":"Xiaoyang-Rebecca/GrokkingCodeInterview","sub_path":"Patterns/Pattern-Subsets/Subset.py","file_name":"Subset.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"69"}
+{"seq_id":"7381157283","text":"import numpy as np\nimport torch\nimport data as d\n\ndef test_get_samplers():\n    _,_,_,i_idx,u_idx = d._get_samplers(100,4,4,random_seed=112)\n    np.random.seed(112)\n    my_idx = list(range(100))\n    val_idx = np.random.choice(my_idx,size=4, replace=False)\n    new_idx = list(set(my_idx)-set(val_idx))\n    i_comp = np.random.choice(new_idx, size=4, replace=False)\n    u_comp = list(set(new_idx)-set(i_comp))\n    assert(np.allclose(i_idx,i_comp))\n    assert(np.allclose(u_idx,u_comp))\n\ndef test_get_dataloader():\n    x = torch.Tensor(np.array([[1,2,3],[7,7,7],[4,5,6]]))\n    y = torch.Tensor(np.array([1,2,5]))\n    dset = torch.utils.data.dataset.TensorDataset(x,y)\n    data_load,_ = d.get_dataloader([0],[2],dset,batch_size=1)\n    truths = []\n    for x_t,y_t in data_load:\n        if x_t.equal(torch.Tensor([1,2,3]).view(1,-1)) and y_t.numpy()[0]==1:\n            truths.append(True)\n            continue\n        if x_t.equal(torch.Tensor([4,5,6]).view(1,-1)) and y_t.numpy()[0]==5:\n            truths.append(True)\n            continue\n        truths.append(False)\n    assert(all(truths))\n","repo_name":"pblankley/pavlos_research","sub_path":"transfer/src/tests/test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"16052313166","text":"import numpy as np\nimport cv2\n\nfrom matplotlib import pyplot as plt\n\n#Lecture image en niveau de gris et conversion en float64\nimg=np.float64(cv2.imread('../Image_Pairs/Graffiti0.png',cv2.IMREAD_GRAYSCALE))\n(h,w) = img.shape\nprint(\"Dimension de l'image :\",h,\"lignes x\",w,\"colonnes\")\nprint(\"Type de l'image :\",img.dtype)\n\n#Début du calcul\nt1 = cv2.getTickCount()\nTheta = cv2.copyMakeBorder(img,0,0,0,0,cv2.BORDER_REPLICATE)\n# Mettre ici le calcul de la fonction d'intérêt de Harris\nkernel_x = np.array([[-1,1]])\nIx = cv2.filter2D(img,-1,kernel_x)\nkernel_y = np.array([[-1],[1]])\nIy = cv2.filter2D(img,-1,kernel_y)\nIxIx = np.square(Ix)\nIyIy = np.square(Iy)\nIxIy = Ix*Iy\n\nkernel_1 = np.ones((3,3))\nimg1 = cv2.filter2D(IxIx,-1,kernel_1)\nimg2 = cv2.filter2D(IyIy,-1,kernel_1)\nimg3 = cv2.filter2D(IxIy,-1,kernel_1)\nalpha = 0.04\nTheta = img1*img2 - np.square(img3) - alpha*(img1 + img2)*(img1 + img2)\ncv2.imshow(\"theta\",Theta.astype(np.uint8))\ncv2.waitKey(0)\n\n\"\"\"\"\"\n# detect the corners with appropriate values as input parameters\ngray_img = cv2.cvtColor(Theta, cv2.COLOR_BGR2GRAY)\n\n# modify the data type setting to 32-bit floating point\ngray_img = np.float32(gray_img)\ncorners_img = cv2.cornerHarris(gray_img, 3, 3, alpha)\ncv2.imshow(\"corners_img\",corners_img.astype(np.uint8))\ncv2.waitKey(0)\n\"\"\"\n#\n# Calcul des maxima locaux et seuillage\nTheta_maxloc = cv2.copyMakeBorder(Theta,0,0,0,0,cv2.BORDER_REPLICATE)\nd_maxloc = 3\nseuil_relatif = 0.01\nse = np.ones((d_maxloc,d_maxloc),np.uint8)\nTheta_dil = cv2.dilate(Theta,se)\n#Suppression des non-maxima-locaux\nTheta_maxloc[Theta < Theta_dil] = 0.0\n#On néglige également les valeurs trop faibles\nTheta_maxloc[Theta < seuil_relatif*Theta.max()] = 0.0\nt2 = cv2.getTickCount()\ntime = (t2 - t1)/ cv2.getTickFrequency()\nprint(\"Mon calcul des points de Harris :\",time,\"s\")\nprint(\"Nombre de cycles par pixel :\",(t2 - t1)/(h*w),\"cpp\")\n\nplt.subplot(131)\nplt.imshow(img,cmap = 'gray')\nplt.title('Image originale')\n\nplt.subplot(132)\nplt.imshow(Theta,cmap = 'gray')\nplt.title('Fonction de Harris')\n\nse_croix = np.uint8([[1, 0, 0, 0, 1],\n[0, 1, 0, 1, 0],[0, 0, 1, 0, 0],\n[0, 1, 0, 1, 0],[1, 0, 0, 0, 1]])\nTheta_ml_dil = cv2.dilate(Theta_maxloc,se_croix)\n#Relecture image pour affichage couleur\nImg_pts=cv2.imread('../Image_Pairs/Graffiti0.png',cv2.IMREAD_COLOR)\n(h,w,c) = Img_pts.shape\nprint(\"Dimension de l'image :\",h,\"lignes x\",w,\"colonnes x\",c,\"canaux\")\nprint(\"Type de l'image :\",Img_pts.dtype)\n#On affiche les points (croix) en rouge\nImg_pts[Theta_ml_dil > 0] = [255,0,0]\nplt.subplot(133)\nplt.imshow(Img_pts)\nplt.title('Points de Harris')\n\nplt.show()\n","repo_name":"Magi329/IN101","sub_path":"Desktop/mi204/TP1_Features/Harris.py","file_name":"Harris.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"38870068696","text":"di = dict()\r\nprint(di)\r\nhello = ['wi', 'wo', 'hi', 'wi', 'wo', 'no']\r\nfor hey in hello:\r\n    if hey not in di:\r\n        di[hey] = 1\r\n    else:\r\n        di[hey] = di[hey] + 1\r\nprint(di)\r\n\r\n# another, shorter way, .get() method for dictionaries, get a single number of words\r\ny = di.get('wi')\r\nprint(y)\r\n\r\n# count every key in a dictionary\r\ndi = dict()\r\nhello = ['wi', 'wo', 'hi', 'wi', 'wo','no']\r\nfor hey in hello:\r\n    di[hey] = di.get(hey, 0) + 1\r\nprint(di)","repo_name":"KatTiel/fcc_python_practice_scientific_computing","sub_path":"ex_8_searchandcountdictionary.py","file_name":"ex_8_searchandcountdictionary.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"34200683365","text":"#!/usr/bin/env python\nfrom elasticsearch import Elasticsearch\nfrom prettytable import PrettyTable\nfrom debe import *\nimport sys\n\nindex_name = 'congestion'\ndoc_type = 'congestion_doc'\n\nes = Elasticsearch([{\n        'host': '127.0.0.1',\n        'port': 9200\n    }])\n\nif int(sys.argv[1]) != 0:\n    place_id_to_update = int(sys.argv[1])\n    places = sessionPostgresTraffic.query(ProcessSpot).\\\n        filter(ProcessSpot.place_id == place_id_to_update).\\\n        all()\nelse:\n    places = sessionPostgresTraffic.query(ProcessSpot).\\\n        all()\n\nnumber = 1\ntable = PrettyTable([\"No\", \"Name\", \"Lat\", \"Lng\", \"Regency\"])\n#table.align[\"Name\"] = \"l\"\ntable.align = \"l\"\nfor p in places:\n    table.add_row([number, p.place_name, p.place_lat, p.place_lng, p.regency_name])\n    body_insert = {\n        'id': p.place_id,\n        'name': p.place_name,\n        'lat': p.place_lat,\n        'lng': p.place_lng,\n        'regency': {\n            'id': p.regency_id,\n            'name': p.regency_name\n        }\n    }\n    body_update = {\n        'doc': {\n            'id': p.place_id,\n                'name': p.place_name,\n                'lat': p.place_lat,\n                'lng': p.place_lng,\n                'regency': {\n                    'id': p.regency_id,\n                    'name': p.regency_name\n                }\n            }\n    }\n    if es.exists(index=index_name, doc_type=doc_type, id=p.place_id):\n        es.update(index=index_name, id=p.place_id, doc_type=doc_type, body=body_update)\n    else:\n        es.create(index=index_name, id=p.place_id, doc_type=doc_type, body=body_insert)\n    number += 1\n\nmappings = {\n    'mappings': {\n        'congestion_doc': {\n            'properties': {\n                'name': {\n                    'type': 'string',\n                    'similarity': 'BM25'\n                }\n            }\n        }\n    }\n}\nes.indices.create(index=index_name, body=mappings, ignore=400)\nprint(table)\n","repo_name":"aansubarkah/congestion_python","sub_path":"utilities/update_place_to_elasticsearch.py","file_name":"update_place_to_elasticsearch.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"35189226032","text":"#Part 1 - remove the more recent scan (only want the acute image)\nimport os\nimport shutil\nfrom datetime import datetime\n\ndef remove_recent_scan(dir_path): \n    \"\"\"\n    Remove the more recent scan directory from the subject directory if there are 2 directories.\n    \"\"\"\n    sub_dirs = [os.path.join(dir_path, d) for d in os.listdir(dir_path) if os.path.isdir(os.path.join(dir_path, d))]\n    scan_dirs = [d for d in sub_dirs if len(os.listdir(d)) == 1]  # Scans have only one subdirectory\n    if len(scan_dirs) != 2:\n        return  # There aren't two scans in this subject directory\n    dates = [datetime.strptime(os.path.basename(d), '%Y-%m-%d') for d in scan_dirs]\n    newer_dir = scan_dirs[0] if dates[0] > dates[1] else scan_dirs[1]\n    shutil.rmtree(newer_dir)\n\nfor dirpath, dirnames, filenames in os.walk('.'):\n    for dirname in dirnames:\n        remove_recent_scan(os.path.join(dirpath, dirname))\n\n\n#Part 2 - relabels the Diffusion folder to the format SubjectID where ID is the number in the directory \n# Path to the base directory\nbase_path = '/home/drevesz/Desktop/segmentation_may23/Images'\n\n# Loop through each directory in the base directory\nfor dirpath, dirnames, filenames in os.walk(base_path):\n    # Check if the current directory is a 'Diffusion' directory\n    if os.path.basename(dirpath) == 'Diffusion':\n        # Get the parent directory name (the subject ID)\n        parent_dirname = os.path.basename(os.path.dirname(os.path.dirname(dirpath)))\n        # Rename the 'Diffusion' directory to the format 'SUBJECTID_FOLDER'\n        new_dirname = f'Subject{parent_dirname}'\n        try:\n            os.rename(dirpath, os.path.join(os.path.dirname(dirpath), new_dirname))\n            # Print the old and new directory names\n            print(f'Renamed {os.path.basename(dirpath)} to {new_dirname}')\n        except PermissionError:\n            print(f'Could not rename {os.path.basename(dirpath)} due to a permission error')\n\n\n#Part 4 - Simplify directory structure - Cuts out the subjectID directory (containing b0 and DWI folders and images) into the desired main directory location  \nimport shutil\n\n# Set the source directory to the home directory\nsrc_dir = '/home/drevesz/Desktop/segmentation_may23/Images'\n\n# Set the destination directory\ndst_dir = '/home/drevesz/Desktop/segmentation_may23/Images'\n\n# Loop through all directories in the source directory\nfor root, dirs, files in os.walk(src_dir):\n    for dir in dirs:\n        # Check if the directory name contains 'Subject'\n        if 'Subject' in dir:\n            # Create the destination directory with the same name as the source directory\n            dst_subdir = os.path.join(dst_dir, dir)\n            os.makedirs(dst_subdir, exist_ok=True)\n            # Loop through all files in the source directory and move them to the destination directory\n            for file in os.listdir(os.path.join(root, dir)):\n                shutil.move(os.path.join(root, dir, file), os.path.join(dst_subdir, file))\n            # Remove the original directory\n            shutil.rmtree(os.path.join(root, dir))\n\n# Remove the original source directory\nshutil.rmtree(src_dir)\n\n","repo_name":"drevesz11/HNI-automatedpipeline","sub_path":"directory_restructure_masterscript1.py","file_name":"directory_restructure_masterscript1.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"33624582150","text":"# Definition for singly-linked list.\n# class ListNode:\n#     def __init__(self, val=0, next=None):\n#         self.val = val\n#         self.next = next\nclass Solution:\n    def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:\n        if not list1 or not list2:\n            return list1 or list2\n\n        src, dest = (list1, list2)\n\n        if list1.val < list2.val:\n            src = list2\n            dest = list1\n\n        head = dest\n\n        while src:\n            while dest.next and dest.next.val <= src.val:\n                dest = dest.next\n            \n            dest.next = ListNode(src.val, dest.next)\n            src = src.next\n        \n        return head","repo_name":"lazymaplekoi/100-days-of-code","sub_path":"day-3/merge-two-lists/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"24506415636","text":"import pandas as pd\r\nfrom scipy import stats, math\r\n\r\nurl = \"http://wps.aw.com/wps/media/objects/14/15269/projects/ch6_iq/brain.txt\"\r\n\r\ndf = pd.read_table(url, sep =\"\\t\")\r\nfemale = df[df[\"Gender\"] == \"Female\"]\r\nmale = df[df[\"Gender\"] == \"Male\"]\r\na = female.loc[:, \"Height\"]\r\nb = male.loc[:, \"Height\"]\r\n\r\n\r\ndef find_info(temp):\r\n    something = []\r\n    for x in temp:\r\n        if not math.isnan(x):\r\n            something.append(x)\r\n    return something\r\n\r\n\r\ntest_male = stats.ttest_1samp(a=find_info(b), popmean=170)\r\ntest_female = stats.ttest_1samp(a=find_info(a), popmean=163)\r\ntest_compare = stats.ttest_ind(a=find_info(a), b=find_info(b))\r\n\r\nprint(test_male)\r\nprint(test_female)\r\nprint(test_compare)","repo_name":"erlingnikolai/python_Worksheets","sub_path":"s2 ws4/s2ws4.py","file_name":"s2ws4.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"15362360471","text":"\"\"\"Add published pronouns to proposal\n\nRevision ID: 822ce04a81a2\nRevises: 1012d60b8c68\nCreate Date: 2022-05-10 20:02:47.453592\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '822ce04a81a2'\ndown_revision = '1012d60b8c68'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n    # ### commands auto generated by Alembic - please adjust! ###\n    op.add_column('proposal', sa.Column('published_pronouns', sa.String(), nullable=True))\n    op.add_column('proposal_version', sa.Column('published_pronouns', sa.String(), autoincrement=False, nullable=True))\n    # ### end Alembic commands ###\n\n\ndef downgrade():\n    # ### commands auto generated by Alembic - please adjust! ###\n    op.drop_column('proposal_version', 'published_pronouns')\n    op.drop_column('proposal', 'published_pronouns')\n    # ### end Alembic commands ###\n","repo_name":"emfcamp/Website","sub_path":"migrations/versions/822ce04a81a2_add_published_pronouns_to_proposal.py","file_name":"822ce04a81a2_add_published_pronouns_to_proposal.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"71"}
+{"seq_id":"14132132007","text":"from emergency_response import EmergencyResponseService\nfrom inbound import InboundPatientController\nfrom vendor.pager_system import PagerSystem\nfrom patient import Priority\n\n\nclass AlertScanner:\n    ADMIN_ON_CALL_DEVICE = \"111-111-1111\"\n\n    manager: EmergencyResponseService\n    controller: InboundPatientController\n    critical_patient_notifications_sent = list()\n\n    def __init__(self, staff_assignment_manager, inbound_patient_controller):\n        self.manager = staff_assignment_manager\n        self.controller = inbound_patient_controller\n\n    def scan(self):\n        print(\"Scanning for situations requiring alerting...\")\n        inbound = self.controller.current_inbound_patients()\n        for patient in inbound:\n            if patient.get_priority() == Priority.RED:\n                if patient.get_transport_id() not in self.critical_patient_notifications_sent:\n                    self.alert_for_new_critical_patient(patient)\n\n    def alert_for_new_critical_patient(self, patient):\n        try:\n            transport = PagerSystem.get_transport()\n            transport.initialize()\n            transport.transmit_requiring_acknowledgement(self.ADMIN_ON_CALL_DEVICE,\n                                                         \"New inbound critical patient: \" +\n                                                         str(patient.get_transport_id()))\n            self.critical_patient_notifications_sent.append(patient.get_transport_id())\n        except RuntimeError:\n            print(\"Failed attempt to use pager system to device \" + self.ADMIN_ON_CALL_DEVICE)\n","repo_name":"AgiliXRU/erservice-python","sub_path":"scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"3923732833","text":"import sys\nimport functools\nfrom dataclasses import dataclass, field\n\n\n@dataclass\nclass Graph:\n    edges: dict = field(default_factory=dict)\n\n    def add_edge(self, src: str, dst: str, weight: int):\n        if src not in self.edges:\n            self.edges[src] = dict()\n        self.edges[src][dst] = int(weight)\n\n\ndef path_comparator(a: list, b: list) -> int:\n    result = a[-1] - b[-1]\n\n    if result == 0:\n        result = len(a) - len(b)\n        if result == 0:\n            a_str = \" \".join(a[:-1])\n            b_str = \" \".join(b[:-1])\n            if a_str < b_str:\n                return -1\n            elif a_str > b_str:\n                return 1\n            else:\n                return 0\n\n    return result\n\n\ndef bfs(src: str, dst: str, network: Graph):\n    queue = [[src]]\n    visited = set()\n    all_paths = list()\n    while queue:\n        path = queue.pop()\n        node = path[-1]\n        if node == dst:\n            all_paths.append(path)\n        elif node not in visited:\n            for current_neighbour in network.edges[node] if node in network.edges else {}:\n                new_path = list(path)\n                new_path.append(current_neighbour)\n                queue.append(new_path)\n            visited.add(node)\n    return all_paths\n\n\ndef find_connections(src: str, dst: str, limit: int, network: Graph) -> list:\n    paths = bfs(src, dst, network)\n    if paths:\n        for path in paths:\n            cost = 0\n            i = 0\n            for i in range(len(path) - 1):\n                cost += network.edges[path[i]][path[i+1]]\n            path.append(cost)\n        paths.sort(key=functools.cmp_to_key(path_comparator))\n        return paths[:limit]\n    else:\n        return [[\"\"]]\n\n\ndef main():\n    network = Graph()\n    number_of_flights = int(sys.stdin.readline())\n\n    for _ in range(number_of_flights):\n        network.add_edge(*sys.stdin.readline().strip().split(\" \"))\n\n    # print(network)\n    limit = int(sys.stdin.readline())\n    src, dst = sys.stdin.readline().strip().split(\" \")\n    connections = find_connections(src, dst, limit, network)\n\n    for connection in connections:\n        print(*connection)\n\n\nif __name__ == '__main__':\n    main()\n","repo_name":"cwdesautels/python-learning","sub_path":"tech-evaluate/week-2/2_2_flights.py","file_name":"2_2_flights.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"23630643054","text":"from datetime import date\n\n\nclass Booking:\n    def __init__(\n        self,\n        id: int,\n        hotels_name: str,\n        start_date: date,\n        end_date: date,\n        price: int,\n        comments: str | None = None\n    ):   \n        self.id = id\n        self.hotels_name = hotels_name\n        self.start_date = start_date\n        self.end_date = end_date\n        self.price = price\n        self.comments = comments\n\n    def __eq__(self, __o: object):\n        if not isinstance(__o, Booking):\n            return False\n        \n        booking: Booking = __o\n        return self.id == booking.id\n","repo_name":"polupanovaanna/python-web-fall-2022","sub_path":"app/grpc_booking_service/common_serv.py","file_name":"common_serv.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"19647503663","text":"import torch\r\nfrom torch.nn import CrossEntropyLoss\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\nfrom transformers import GPT2Config, GPT2LMHeadModel, BertModel, BertConfig\r\nfrom transformers import AdamW, get_linear_schedule_with_warmup\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom step1 import getdataset   # 数据集\r\n\r\n# 数据集\r\ntraindataset, traindataloader, valdataset, valdataloader, simtraindataset, simtraindataloader = getdataset()\r\n\r\nN_EPOCHS = 100\r\nLR = 5e-4\r\nWARMUP_PROPORTION = 0.1\r\nMAX_GRAD_NORM = 1.0\r\nSUMMARY_ID = 2\r\ndevice = 'cuda'\r\n\r\n\r\ndef calculate_loss(outputs, labels, token_type_ids, summary_id):\r\n    \"\"\"\r\n    只计算summary部分的loss\r\n    \"\"\"\r\n    logits = outputs[0]  # 维度:[batch_size, sequence_length, config.vocab_size]\r\n\r\n    # 获取mask值,token_type_ids中等于summary_id的部分需要计算loss,标记为1;否则为0。\r\n    # size:[batch_size, sequence_length]\r\n    mask = (token_type_ids == summary_id).long()\r\n    # 获取新的标签,size:[batch_size, sequence_length]\r\n    labels = labels * mask\r\n    # 对预测结果和标签进行偏移操作\r\n    # GPT2的生成机制为通过前面的token,预测下一个token;并且labels与input_ids相同,\r\n    # 因此input_ids中的第一个token的预测结果,实际上是标签中的第二个token,以此类推��最终仅计算sequence_length-1个token的loss\r\n    shift_logits = logits[..., :-1, :].contiguous()\r\n    shift_labels = labels[..., 1:].contiguous()\r\n    # 定义损失函数CrossEntropyLoss,并且设置忽略计算loss的索引,以及返回loss的形式\r\n    # 忽略shift_labels中为0的loss,也就是仅计算summary部分的损失值\r\n    loss_fct = CrossEntropyLoss(ignore_index=0)\r\n\r\n    loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\r\n\r\n    return loss\r\n\r\n\r\ndef simcse_unsup_loss(y_pred):\r\n    \"\"\"无监督的损失函数\r\n    y_pred (tensor): bert的输出, [batch_size, 768]\r\n    \"\"\"\r\n    # 得到y_pred对应的label, [1, 0, 3, 2, ..., batch_size-1, batch_size-2]\r\n    y_true = torch.arange(y_pred.shape[0], device=device)\r\n    y_true = (y_true - y_true % 2 * 2) + 1\r\n    # batch内两两计算相似度, 得到相似度矩阵(对角矩阵)\r\n    sim = F.cosine_similarity(y_pred.unsqueeze(1), y_pred.unsqueeze(0), dim=-1)\r\n\r\n    # 将相似度矩阵对角线置为很小的值, 消除自身的影响\r\n    sim = sim - torch.eye(y_pred.shape[0], device=device) * 1e12\r\n    # 相似度矩阵除以温度系数\r\n    sim = sim / 0.05\r\n    # 计算相似度矩阵与y_true的交叉熵损失\r\n    loss = F.cross_entropy(sim, y_true)\r\n\r\n    return loss\r\n\r\n\r\nclass SimcseModel(nn.Module):\r\n    \"\"\"Simcse有监督模型定义\"\"\"\r\n\r\n    def __init__(self):\r\n        super(SimcseModel, self).__init__()\r\n        self.model_config = GPT2Config.from_json_file('config/config.json')\r\n        self.gpt = GPT2LMHeadModel(config=self.model_config)\r\n        self.gpt.resize_token_embeddings(1300)\r\n\r\n    #         self.attention = nn.MultiheadAttention(embed_dim=768, num_heads=12)\r\n\r\n    def forward(self, input_ids, attention_mask, book=None):\r\n\r\n        if book == \"sim\":\r\n            output = self.gpt(input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True)\r\n            last_hidden_state = output.hidden_states[-1]  # [batch_size, seq_len, 768]\r\n            #             last_hidden_state = self.attention(last_hidden_state, last_hidden_state, last_hidden_state)[0]\r\n            out = last_hidden_state.permute(0, 2, 1)\r\n            out = nn.AvgPool1d(out.size(2))(out).squeeze(2)\r\n\r\n            return out\r\n\r\n        else:\r\n            output = self.gpt(input_ids=input_ids, attention_mask=attention_mask)\r\n\r\n            return output\r\n\r\n\r\ndef run(kk):\r\n    best_valid_loss = float('inf')\r\n    model = SimcseModel().to(device)\r\n\r\n    total_steps = len(traindataloader) * N_EPOCHS\r\n    optimizer = AdamW(model.parameters(), lr=LR, eps=1e-8)\r\n    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(WARMUP_PROPORTION * total_steps),\r\n                                                num_training_steps=total_steps)\r\n\r\n    loss_vals = []\r\n    loss_vals_eval = []\r\n    for epoch in range(N_EPOCHS):\r\n        model.train()\r\n        epoch_loss = []\r\n        pbar = tqdm(traindataloader)\r\n        pbar.set_description(\"[Train Epoch {}]\".format(epoch))\r\n\r\n        for batch_idx, batch_data in enumerate(pbar):\r\n\r\n            for pos, batch_data_ in enumerate(simtraindataloader):\r\n\r\n                if batch_idx == pos:\r\n                    input_ids = batch_data[\"input_ids\"].to(device)\r\n                    token_type_ids = batch_data[\"token_type_ids\"].to(device)\r\n                    attention_mask = batch_data[\"attention_mask\"].to(device)\r\n\r\n                    input_ids_1 = batch_data_[\"input_ids_1\"].to(device)\r\n                    attention_mask_1 = batch_data_[\"attention_mask_1\"].to(device)\r\n\r\n                    model.zero_grad()\r\n\r\n                    outputs = model.forward(input_ids=input_ids, attention_mask=attention_mask)  # 正常\r\n                    loss_mle = calculate_loss(outputs, input_ids, token_type_ids, SUMMARY_ID)  # 正常\r\n\r\n                    outputs_ = model.forward(input_ids=input_ids_1, attention_mask=attention_mask_1, book=\"sim\")\r\n                    loss_sim = simcse_unsup_loss(outputs_)\r\n\r\n                    loss = (1-kk)*loss_mle + kk*loss_sim\r\n                    loss.backward()\r\n\r\n                    torch.nn.utils.clip_grad_norm_(model.parameters(), MAX_GRAD_NORM)\r\n                    epoch_loss.append(loss.item())\r\n\r\n                    optimizer.step()\r\n                    scheduler.step()\r\n\r\n        loss_vals.append(np.mean(epoch_loss))\r\n\r\n        model.eval()\r\n        epoch_loss_eval = []\r\n        pbar = tqdm(valdataloader)\r\n        pbar.set_description(\"[Eval Epoch {}]\".format(epoch))\r\n\r\n        with torch.no_grad():\r\n            for batch_idx, batch_data in enumerate(pbar):\r\n                input_ids = batch_data[\"input_ids\"].to(device)\r\n                token_type_ids = batch_data[\"token_type_ids\"].to(device)\r\n                attention_mask = batch_data[\"attention_mask\"].to(device)\r\n                outputs = model.forward(input_ids=input_ids, attention_mask=attention_mask)\r\n                loss = calculate_loss(outputs, input_ids, token_type_ids, SUMMARY_ID)\r\n                epoch_loss_eval.append(loss.item())\r\n\r\n        valid_loss = np.mean(epoch_loss_eval)\r\n        loss_vals_eval.append(valid_loss)\r\n\r\n        torch.cuda.empty_cache()\r\n\r\n    l1, = plt.plot(np.linspace(1, N_EPOCHS, N_EPOCHS).astype(int), loss_vals)\r\n    l2, = plt.plot(np.linspace(1, N_EPOCHS, N_EPOCHS).astype(int), loss_vals_eval)\r\n    plt.legend(handles=[l1, l2], labels=['Train loss', 'Eval loss'], loc='best')\r\n\r\n    print(\"模型训练完毕!\")\r\n    return model\r\n","repo_name":"NicoYuCN/nlpMIDRG","sub_path":"CLpCE+DCS/step2.py","file_name":"step2.py","file_ext":"py","file_size_in_byte":6897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"6594800431","text":"import os\nimport time\n\nfrom PIL import Image\nimport cv2\n\ndef small_pic(path, name):\n    small_path = path + 'small/'\n    if not os.path.isdir(small_path):\n        os.makedirs(small_path)\n    try:\n        img = Image.open(path + name)\n        img = img.resize((450, 300), Image.ANTIALIAS)\n        img.save(small_path + name)\n    except:\n        print('failed to read img file,' + small_path + name)\n\n\ndef img_zip(path, name):\n    small_name = 'small/' + name\n    if not os.path.isdir(path + 'small/'):\n        os.makedirs(path + 'small/')\n\n    try:\n        imageBig = cv2.imread(path + name)\n        res = cv2.resize(imageBig, (450, 300), interpolation=cv2.INTER_AREA)\n        cv2.imwrite(path + small_name, res)\n    except:\n        print('failed to read img file,' + path + small_name)\n\n\n\ndef TimeStampToTime(timestamp):\n\n    timeStruct = time.localtime(timestamp)\n    return time.strftime('%Y-%m-%d %H:%M:%S', timeStruct)","repo_name":"er-niu/yywallpaper-py","sub_path":"service/picture_util.py","file_name":"picture_util.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"11243074878","text":"# -*- coding: UTF-8 -*-\n# https://leetcode-cn.com/problems/decode-string/\n\ndef decodeString( s):\n    \"\"\"\n    :type s: str\n    :rtype: str\n    \"\"\"\n    stack = [] #[]嵌套关系适合用 stack 来描述\n    res = ''\n    multi = 0\n    for c in s:\n        if '0'<= c <= '9':\n            multi = multi*10+int(c)\n        elif c=='[':\n            stack.append((multi,res))\n            multi = 0\n            res = ''\n        elif c==']':\n            cur_multi,cur_res = stack.pop()\n            res = cur_res + cur_multi*res\n        else:\n            res += c\n\n    return res\n\ns = \"3[a2[c]]\"\nprint(decodeString(s))\n","repo_name":"lichengchengchloe/leetcodePracticePy","sub_path":"DecodeString.py","file_name":"DecodeString.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"13038754603","text":"#!/usr/bin/env python3\n\nimport sys\n\ndata = sys.argv[1]\n\nPREFIX = \"100000000\"\nVAL1 = \"10000\"\nVAL0 = \"100\"\nmsg = PREFIX\n\nfor i in data:\n    if i == \"1\":\n        msg += VAL1\n    elif i == \"0\":\n        msg += VAL0\n    else:\n        exit(1)\n\n#Dodaj 1 zeby oznaczyc koniec transmisji\nmsg += \"1\"    \n\nprint(msg)\nexit(0)","repo_name":"justcatthefish/justctf-2022","sub_path":"challenges/misc_radio_ga_ga/private/private/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"it","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"}
+{"seq_id":"40404143525","text":"soma = cont = 0\nfor c in range(1, 7):\n    num = int(input(f'Digite o {c}° número: '))\n    if num % 2 == 0:\n        soma += num\n        cont += 1\nif cont == 0:\n    print('Você não informou números pares.')\nelse:\n    print(f'A soma dos {cont} números pares é {soma}.')\n","repo_name":"MoraesMS/Exercicios-de-Python-3","sub_path":"050_soma_dos_pares.py","file_name":"050_soma_dos_pares.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"30316691852","text":"# coding: utf-8\n# created by jlshix on 2021-02-01\n\n\"\"\"https://fastapi.tiangolo.com/zh/tutorial/query-params/\n\n1. 路径参数和查询参数无需按顺序进行声明, FastAPI 可自动识别.\n\n2. `q: Optional[str] = None` 与 `q: str = None` 等价, 建议使用前者\n\n3. 查询参数若要声明为必选, 可不为其指定默认值. 或使用下一节的 `Query(...)`\n\n\"\"\"\nfrom typing import Optional\n\nimport pytest\nfrom fastapi import FastAPI\nfrom fastapi.testclient import TestClient\n\napp = FastAPI()\n\nfake_items_db = [{\"item_name\": \"Foo\"}, {\"item_name\": \"Bar\"}, {\"item_name\": \"Baz\"}]\n\n\n@app.get('/items/')\nasync def read_item(skip: int = 0, limit: int = 10):\n    \"\"\"以下四者等价:\n    1. GET /items/\n    2. GET /items/?skip=0&limit=10\n    3. GET /items/?skip=0\n    4. GET /items/?limit=10\n\n    以下二者等价:\n    1. GET /items/skip=10\n    2. GET /items/skip=10&limit=10\n    \"\"\"\n    return fake_items_db[skip: skip + limit]\n\n\n@app.get('/items/{item_id}')\nasync def read_item_by_id(item_id: str, q: Optional[str] = None, short: bool = None):\n    item = {'item_id': item_id}\n    if q:\n        item['q'] = q\n    if not short:\n        item['description'] = 'This is an amazing item that has a long description'\n    return item\n\n\nclient = TestClient(app)\n\n\ndef test_read_item_by_id_optional():\n    \"\"\"`q` 使用 Optional 标记, 作为可选参数\"\"\"\n    resp = client.get('/items/5')\n    assert 'q' not in resp.json()\n\n    resp = client.get('/items/42?q=query')\n    assert resp.json()['q'] == 'query'\n\n\n@pytest.mark.parametrize('short', ['1', 'yes', 'on', 'true', 'True'])\ndef test_read_item_by_id_bool_true(short):\n    \"\"\"short 作为布尔型, 以上值作为 True\"\"\"\n    resp = client.get(f'/items/42?short={short}')\n    assert 'description' not in resp.json()\n\n\n@pytest.mark.parametrize('short', ['0', 'False', 'off', 'no'])\ndef test_read_item_by_id_bool_no(short):\n    \"\"\"short 作为布尔型, 以上值作为 False\"\"\"\n    resp = client.get(f'/items/42?short={short}')\n    assert 'description' in resp.json()\n","repo_name":"jlshix/aio_examples","sub_path":"fastapi_/c03_query_params.py","file_name":"c03_query_params.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"13132027664","text":"# 리스트는 [과 ]으로 둘러싸지만 튜플은 (과 )으로 둘러싼다.\n# 리스트는 그 값의 생성, 삭제, 수정이 가능하지만 튜플은 그 값을 바꿀 수 없다.\n\nt1 = ()\nt2 = (1,)\nt3 = (1, 2, 3)\nt4 = 1, 2, 3\nt5 = ('a', 'b', ('ab', 'cd'))\n\n\ntup = (1, 2, 'a', 'b')\nprint(tup[0], tup[2])\nprint(tup[1:])\nprint(t2+t3)\nprint(t3*2)","repo_name":"hg-pyun/hello-python","sub_path":"02-4 튜플 자료형.py","file_name":"02-4 튜플 자료형.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"34621948270","text":"#!/usr/bin/env python3\n\n# ^ a shabang, tells where to find python3\n\nimport rclpy # ros python library\nfrom rclpy.node import Node # this is a ros node\nfrom std_msgs.msg import Float32MultiArray # ROS standard message type Float32MultiArray\n\n\n# subscribers are handled as a class, inherited from Node.\nclass Subscriber(Node):\n    # initialisation\n    def __init__(self):\n        super().__init__(\"simple_subscriber\") # ROS' super initialisation function from Node.\n        self.subscription = self.create_subscription(Float32MultiArray, \"number_topic\", self.callback_func, 10) # create self.publisher, for data type, data channel (rostopic), callback, and queue size\n\n    # function that will be run whenever a new message is published to the channel\n    def callback_func(self, msg):\n        sum = 0.0\n        for num in msg.data:\n            sum += num\n        self.get_logger().info(\"The sum is: %s\" % sum) # prints info to terminal in a standardised ROS format\n\ndef main():\n    rclpy.init(args=None) # initialise ROS network\n    listener = Subscriber() # create subscriber\n    rclpy.spin(listener) # run subscriber indefinitely\n\nif __name__ == \"__main__\":\n    main()","repo_name":"Alex427427427/ROS2_tutorial_2021","sub_path":"src/calculator/calculator/sub.py","file_name":"sub.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"20821379304","text":"from django.db import models\n\n# Create your models here.\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\nclass Address(models.Model) :\n    addressLine1 = models.CharField('addressLine1', max_length=250)\n    addressLine2 = models.CharField('addressLine2', max_length=250, blank=True, null=True)\n    city = models.CharField('city', max_length=250)\n    state = models.CharField('state', max_length=30)\n    postalCode = models.CharField('postalCode', max_length=10)\n    country = models.CharField('country', max_length=250, default='United States')\n    \nclass Customer(models.Model):\n    user = models.OneToOneField(User, on_delete=models.CASCADE)\n    customerAddress = models.ManyToManyField(Address)\n    isAdmin = models.BooleanField('isAdmin', default=False)\n    isVendor = models.BooleanField('isVendor', default=False)\n    isFarmer = models.BooleanField('isFarmer', default=False)\n\nclass Collection(models.Model):\n    collectionName = models.CharField('collectionName', max_length=250, unique=True)\n    collectionDescription = models.CharField('collectionDescription', max_length=500)\n    \nclass Product(models.Model):\n    productName = models.CharField('productName', max_length = 250, unique = True)\n    productDescription = models.TextField('productDescription')\n    productPrice = models.DecimalField('productPrice', max_digits=5, decimal_places=2)\n    productCollection = models.ForeignKey('collection', Collection)\n    productSrc = models.CharField\n\nclass Order(models.Model):\n    statusChoices = (\n        ('UNFULFILLED', 'Unfulfilled'),\n        ('PENDING_PAYMENT', 'Pending Payment'),\n        ('CANCELLED', 'Cancelled'),\n        ('REFUNDED', 'Refunded'),\n        ('FULFILLED', 'Fulfilled')\n    )\n    customer = models.ForeignKey('customer', Customer)\n    orderDate = models.DateTimeField('orderDate', auto_now_add = True)\n    status = models.TextField('status', max_length = 250, default = 'UNFULFILLED')\n    totalPrice = models.DecimalField('totalPrice', max_digits=5, decimal_places=2)\n    product = models.ForeignKey('product', Product)\n\nclass Product_Image(models.Model) :\n    product = models.ForeignKey('product', Product)\n    img = models.ImageField('img', upload_to='static/staticfiles/img/', blank=True, null=True)\n    uploadedAt = models.DateTimeField(auto_now_add=True)","repo_name":"dougdoenges/HaitiCoffeeSS","sub_path":"haiticoffee/main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"16886008834","text":"from typing import Dict, List\nfrom discord.ext import commands, tasks\nfrom typing import Callable, TypeVar\nimport json\nimport os\nimport api.user as user_adapter\nfrom discord.ext.forms import Form, Validator\nimport discord\n\nT = TypeVar(\"T\")\n\n\ndef is_owner() -> Callable[[T], T]:\n    \"\"\"\n    This is a custom check to see if the user executing the command is an owner of the bot.\n    \"\"\"\n\n    async def predicate(context: commands.Context) -> bool:\n        with open(\n                f\"{os.path.realpath(os.path.dirname(__file__))}/../config.json\"\n        ) as file:\n            data = json.load(file)\n        if context.author.id not in data[\"owners\"]:\n            await context.send(\"You are not the owner of the bot!\")\n        return True\n\n    return commands.check(predicate)\n\n\nclass Owner(commands.Cog, name=\"owner\", description=\"Owner commands.\"):\n\n    def __init__(self, bot) -> None:\n        self.bot = bot\n\n    @commands.hybrid_group(\n        name=\"owner\",\n        description=\"Owner commands.\",\n    )\n    async def owner(self, context: commands.Context) -> None:\n        ...\n\n    @owner.command(\n        name=\"broadcast\",\n        description=\"Broadcast a message to all users\",\n    )\n    @is_owner()\n    async def broadcast(self, context: commands.Context) -> None:\n        \"\"\"Broadcast a message to all users\"\"\"\n\n        form = Form(context, 'Type your message below')\n        form.add_question('請輸入標題', 'title')\n        form.add_question('請輸入想發送的資訊', 'message')\n        form.add_question('確認要發送嗎? (y/n)', 'check')\n        form.set_timeout(60)\n        result = await form.start()\n\n        if result.check != 'y':\n            await context.send('已取消發送')\n            return\n\n        users_info = user_adapter.get_all_users()\n        embed = discord.Embed(title='📢 系統公告', color=discord.Color.yellow())\n        embed.add_field(name=result.title, value=result.message, inline=False)\n\n        for info in users_info:\n            user_id = info['id']\n            user = await self.bot.fetch_user(int(user_id))\n            await user.send(embed=embed)\n\n\nasync def setup(bot):\n    await bot.add_cog(Owner(bot))\n","repo_name":"Nana2929/discord-daily-task-bot","sub_path":"cogs/owner.py","file_name":"owner.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"8609772006","text":"import moviepy\nfrom moviepy.editor import *\nfrom error import *\nfrom suffix import *\n\n\n# 写入文件的绝对路径\ndef create_audio(file_name=\"\"):\n    if file_name == \"\":\n        return Error(NO_FILE)\n    suff = file_name[-4:]\n    if suff not in VIDEO and suff not in AUDIO:\n        return Error(FILE_TYPE_WRONG)\n    if suff == \".mp3\":\n        return\n    else:\n        try:\n            audio = AudioFileClip(file_name)\n        except:\n            return Error(NO_FILE)\n        try:\n            audio.write_audiofile(file_name[:-3] + \"mp3\")\n            return\n        except:\n            return Error(AUDIO_USE_WRONG)\n\n\ndef slip(file_name):\n    try:\n        size = os.path.getsize(file_name)\n    except:\n        return Error(NO_FILE)\n        # 要小于100M的音频才能转换\n    if size < (1000) ** 3:\n        return [file_name]\n    # 切片音频, 按一小时切片\n    else:\n        try:\n            clip_name = 0\n            audio = AudioFileClip(file_name)\n            audio_long = audio.end\n            clip_list = []\n            for i in range(audio.end // 60 * 60 + 1):\n                clip_start = 60 * 60 * i\n                audio_clip = audio.subclip(clip_start, min(audio_long, clip_start + 60 * 60))\n                audio_new = CompositeAudioClip([audio_clip])\n                # 隐藏进度条\n                name = file_name + str(clip_name) + \".mp3\"\n                clip_list.append(name)\n                audio_new.write_audiofile(name, fps=44100, logger=None, verbose=False)\n                clip_name += 1\n            return clip_list\n        except:\n            return Error(CLIP_ERROR)\n","repo_name":"status-code-404/VideoTransArticle","sub_path":"getAudioFromVideo.py","file_name":"getAudioFromVideo.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"10625816717","text":"from rest_framework import generics\nfrom django.urls import path\nfrom main.followers.serializers import ListUsersSerializer, FollowUserSerializer\nfrom auth.backend.decorators import view_authenticate\nfrom _common.helpers import serializer_to_json\nfrom _common.mixins import APIViewMixin\n\n\n@view_authenticate()\nclass FollowUserView(APIViewMixin, generics.CreateAPIView):\n    serializer_class = FollowUserSerializer\n\n    def create(self, request, *args, **kwargs):\n        serializer = self.get_serializer(data=request.data, context={'request': self.request})\n        serializer.is_valid(raise_exception=True)\n        result = serializer.validated_data\n\n        return self.get_response(message='Successfully Followed User', result={'followed': result.user_name, 'me': self.request.current_user.user_name})\n\n\n@view_authenticate()\nclass ListUserFollowedView(APIViewMixin, generics.ListAPIView):\n\n    def list(self, request, *args, **kwargs):\n        user = self.request.current_user\n        interests_list = serializer_to_json(ListUsersSerializer, user.get_followed)\n        message = 'Successfully Retrieved Followed users for {}'.format(user.user_name)\n\n        return self.get_response(message=message, result=interests_list)\n\n\nurlpatterns = [\n    path('user/', FollowUserView.as_view(), name='follow_user'),\n    path('all/', ListUserFollowedView.as_view(), name='all_people_followed'),\n\n]\n","repo_name":"jameelhamdan/Entrfy","sub_path":"main/followers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"}
+{"seq_id":"27607638583","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport random\nimport math\n\nchar_map = [\n        ['A', 'a', '4', '@'],\n        ['B', 'b', '8'],\n        ['C', 'c'],\n        ['D', 'd'],\n        ['E', 'e', '3'],\n\t['F', 'f'],\n\t['G', 'g', '6', '9'],\n\t['H', 'h'],\n\t['I', 'i', '1'],\n\t['J', 'j'],\n\t['K', 'k'],\n\t['L', 'l', '1'],\n\t['M', 'm'],\n\t['N', 'n'],\n\t['O', 'o', '0'],\n\t['P', 'p'],\n\t['Q', 'q'],\n\t['R', 'r'],\n\t['S', 's', '5', '$'],\n\t['T', 't', '7'],\n\t['U', 'u'],\n\t['V', 'v'],\n\t['W', 'w'],\n\t['X', 'x'],\n\t['Y', 'y'],\n\t['Z', 'z', '2'],\n        ]\n\ndef change(c):\n    if c.isalpha():\n        c = c.upper()\n        char_set = char_map[ord(c) - ord('A')]\n        new_c = char_set[random.randint(0, len(char_set) - 1)]\n        return new_c, (c == new_c) * math.log2(len(char_set))\n    else:\n        return c, 0\n\ndef main():\n    global args\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"string\", help = \"origin flag string\")\n    args = parser.parse_args()\n    entropy = 0\n    for c in args.string:\n        new_c, e = change(c)\n        print(new_c, end = '')\n        entropy += e\n    print()\n    print(f\"Added entropy: {entropy:.2f} bits\")\n\nif __name__ == \"__main__\":\n    main()\n","repo_name":"ss8651twtw/CTF-flag-generator","sub_path":"genflag.py","file_name":"genflag.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"71"}
+{"seq_id":"73490712549","text":"import json\nimport logging\nimport os\nlogging.basicConfig(filename='analysis.log', \n        format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',\n        datefmt='%Y-%m-%d %H:%M:%S',\n        level=logging.DEBUG)\n# install_mp_handler()\nlogger = logging.getLogger(__name__)\nlogger.info(f'Logger start: {os.uname()[1]}')\n\ndataset = json.load(open('extract_wiki/dataset_0.50_undir2dir_0.75.json'))\nwith open('pairs.txt', 'w') as f:\n\tfor i, datapoint in enumerate(dataset):\n\t\t# dict_keys(['pair', 'sim', 'entity', 'target', 'source', 'triple'])\n\t\tk = 'pair'\n\t\tf.write(f'{datapoint[\"pair\"][0].encode(\"utf-8\"), datapoint[\"pair\"][1].encode(\"utf-8\")}\\n')\n\t\tf.write(f'{datapoint[\"source\"][0].encode(\"utf-8\")}\\n')\n\t\tif i > 200: break\n\nlogger.info(f'length {len(dataset)}')","repo_name":"YerongLi/ekar_english","sub_path":"DEER-main/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"8233906200","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fileencoding=utf-8\nfrom telegram.ext import Updater\nimport logging\nimport json\nimport requests\nimport threading\nimport time\nimport datetime\nfrom telegram.ext import CommandHandler\nfrom telegram.ext import MessageHandler\nfrom telegram.ext import Filters\n\nteleKey = \"YOUR TELEGRAM API KEY HERE\"\napiKey = \"YOU OPENWEATHER API KEY HERE\"\n\nupdater = Updater(token=teleKey, use_context=True)\n\ndispatcher = updater.dispatcher\ncityList = {}\nsubList = {}\ndoExit = False\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n\ndef getClothes(temp, wind, id):\n    if id >= 200 and id <= 232:\n        return \"Оденьте непромокаемую одежду покрепче или дождевик и не берите с собой зонтик.\"\n    if id >= 300 and id <= 531:\n        if wind > 10:\n            return \"Оденьте непромокаемую одежду покрепче или дождевик и не берите с собой зонтик.\"\n        else:\n            if temp > 15:\n                return \"Можете лекго одеться, но возьмите с собой зонтик.\"\n            else:\n                return \"Оденьтесь потеплее и возьмите с собой зонтик.\"\n    if id >= 600 and id <= 622:\n        if temp >= -10 and wind < 10:\n            return \"Можно идти без шарфа, но желательно надеть ботинки повыше.\"\n        else:\n            return \"Завернитесь в шарф, и оденьте ботинки повыше.\"\n    if id >= 701 and id < 781 and id != 741:\n        return \"Обязательно наденьте маску перед выходом на улицу.\"\n    if id == 781:\n        return \"На стоит выходить на улицу.\"\n    if id == 803 or id == 804:\n        if wind < 10:\n            return \"Стоит взять с собой зонтик.\"\n        else:\n            return \"Зонтик лучше с собой не брать.\"\n    if temp < -10 or temp < 0 and wind > 10:\n        return \"Завернитесь в шарф и оденьтесь потеплее.\"\n    return \"Никаких особых рекомендаций нет.\"\n\n\ndef start(update, context):\n    context.bot.send_message(chat_id=update.effective_chat.id, text=\"Привет! Я умный погодный бот разработанный, как тестовое задание для 'Школы будущих СТО. \\nДля получения помощи напиши /help\")\n\ndef setCity(update, context):\n    if len(context.args) == 0:\n        context.bot.send_message(chat_id=update.effective_chat.id, text=\"Введите свой город!\")\n    else:\n        oldtime = 0\n        if str(update.effective_chat.id) in cityList:\n            res1 = requests.get(\"http://api.openweathermap.org/data/2.5/weather?q=\" + cityList[str(update.effective_chat.id)] + \"&appid=\" + apiKey + \"&lang=ru\")\n            dat1 = json.loads(res1.text)\n            oldtime = int(dat1[\"timezone\"])\n        res = requests.get(\"http://api.openweathermap.org/data/2.5/weather?q=\" + ' '.join(context.args) + \"&appid=\" + apiKey + \"&lang=ru\")\n        dat = json.loads(res.text)\n        if dat[\"cod\"] == \"404\":\n            context.bot.send_message(chat_id=update.effective_chat.id, text=\"Ваш город не найден!\")\n        else:\n            cityList[str(update.effective_chat.id)] = ' '.join(context.args)\n            if str(update.effective_chat.id) in subList:\n                timeUTC = subList[str(update.effective_chat.id)] - int(dat[\"timezone\"]) + oldtime\n                if timeUTC < 0:\n                    subList[str(update.effective_chat.id)] = 24*3600 + timeUTC\n                elif timeUTC > 23*3600 + 59*60:\n                    subList[str(update.effective_chat.id)] = timeUTC - 24*3600\n                else:\n                    subList[str(update.effective_chat.id)] = timeUTC\n            context.bot.send_message(chat_id=update.effective_chat.id, text=\"Город установлен!\")\n\ndef weather(update, context):\n    if not str(update.effective_chat.id) in cityList and len(context.args) == 0:\n        context.bot.send_message(chat_id=update.effective_chat.id, text=\"Установите свой город с помощью команды /setcity или введите /weather <город>, чтобы получить погоду\")\n    elif len(context.args) > 0:\n        res = requests.get(\"http://api.openweathermap.org/data/2.5/weather?q=\" + ' '.join(context.args) + \"&appid=\" + apiKey + \"&lang=ru\")\n        dat = json.loads(res.text)\n        if dat[\"cod\"] == \"404\":\n            context.bot.send_message(chat_id=update.effective_chat.id, text=\"Ваш город не найден!\")\n        else:\n            context.bot.send_message(chat_id=update.effective_chat.id, text=\"В городе \" + dat[\"name\"] + \" на улице \" + dat['weather'][0]['description'] + \".\\nТемпература: \" + str(float('{:.2f}'.format(float(dat['main']['temp']) - 273))) + \" градусов цельсия.\\nСкорость ветра: \" + str(dat[\"wind\"][\"speed\"]) + \" метров в секунду.\\nДавление: \" + str(dat[\"main\"][\"pressure\"]) + \" гПа.\\nВлажность: \" + str(dat[\"main\"][\"humidity\"]) + \"%\\nРекомендации: \" + getClothes(float('{:.2f}'.format(float(dat['main']['temp']) - 273)),dat[\"wind\"][\"speed\"],dat[\"weather\"][0][\"id\"]))\n    else:\n        res = requests.get(\"http://api.openweathermap.org/data/2.5/weather?q=\" + cityList[str(update.effective_chat.id)] + \"&appid=\" + apiKey + \"&lang=ru\")\n        dat = json.loads(res.text)\n        context.bot.send_message(chat_id=update.effective_chat.id, text=\"В городе \" + dat[\"name\"] + \" на улице \" + dat['weather'][0]['description'] + \".\\nТемпература: \" + str(float('{:.2f}'.format(float(dat['main']['temp']) - 273))) + \" градусов цельсия.\\nСкорость ветра: \" + str(dat[\"wind\"][\"speed\"]) + \" метров в секунду.\\nДавление: \" + str(dat[\"main\"][\"pressure\"]) + \" гПа.\\nВлажность: \" + str(dat[\"main\"][\"humidity\"]) + \"%\\nРекомендации: \" + getClothes(float('{:.2f}'.format(float(dat['main']['temp']) - 273)),dat[\"wind\"][\"speed\"],dat[\"weather\"][0][\"id\"]))\n\ndef unknown(update, context):\n    context.bot.send_message(chat_id=update.effective_chat.id, text=\"Неизвестная команда!\\nИспользуйте /help для получения списка команд\")\n\ndef help(update, context):\n    context.bot.send_message(chat_id=update.effective_chat.id, text=\"Для получения погоды для вашего города используйте /weather\\nДля получения погоды для любого города используйте /weather <город>\\nДля установки своего города используйте /setcity <город>\\nДля получения помощи напишите /help\\nЧтобы получать информацию о погоде в рассылке напишите /subscribe <часы> <минуты>, где часы минуты обозначают время, в которое вам необходимо получать погоду.\\nЧтобы отписаться от рассылки используйте /unsubscribe.\")\n\ndef subscribe(update, context):\n    if not str(update.effective_chat.id) in cityList:\n        context.bot.send_message(chat_id=update.effective_chat.id, text=\"Вы не выбрали свой город!\\nИспользуйте /setcity <город>, чтобы выбрать свой город\")\n    elif len(context.args) < 2:\n        context.bot.send_message(chat_id=update.effective_chat.id, text=\"Введите /subscribe <часы> <минуты>\")\n    else:\n        res = requests.get(\"http://api.openweathermap.org/data/2.5/weather?q=\" + cityList[str(update.effective_chat.id)] + \"&appid=\" + apiKey + \"&lang=ru\")\n        dat = json.loads(res.text)\n        try:\n            timeUTC = 3600 * int(context.args[0]) + 60 * int(context.args[1]) - int(dat[\"timezone\"])\n            if timeUTC < 0:\n                subList[str(update.effective_chat.id)] = 24*3600 + timeUTC\n            elif timeUTC > 23*3600 + 59*60:\n                subList[str(update.effective_chat.id)] = timeUTC - 24*3600\n            else:\n                subList[str(update.effective_chat.id)] = timeUTC\n            context.bot.send_message(chat_id=update.effective_chat.id, text=\"Вы успешно добавлены в список для рассылки!\")\n        except ValueError:\n            context.bot.send_message(chat_id=update.effective_chat.id, text=\"Произошла ошибка при попытке добавить вас в список для рассылки. Проверьте правильность введенного вермени!\")\n\ndef unsubscribe(update, context):\n    if not str(update.effective_chat.id) in subList:\n        context.bot.send_message(chat_id=update.effective_chat.id, text=\"Вы не подписаны на рассылку.\")\n    else:\n        del subList[str(update.effective_chat.id)]\n        context.bot.send_message(chat_id=update.effective_chat.id, text=\"Вы успешно отписались от рассылки!\")\n\ndef timeCounter():\n    while not doExit:\n        curtime = datetime.datetime.now().hour * 3600 + datetime.datetime.now().minute * 60 + datetime.datetime.now().second\n        for k in subList:\n            if curtime == subList[k]:\n                res = requests.get(\"http://api.openweathermap.org/data/2.5/weather?q=\" + cityList[k] + \"&appid=\" + apiKey + \"&lang=ru\")\n                dat1 = json.loads(res.text)\n                res = requests.get(\"https://api.openweathermap.org/data/2.5/onecall?lon=\" + str(dat1[\"coord\"][\"lon\"]) + \"&lat=\" + str(dat1[\"coord\"][\"lat\"]) + \"&appid=\" + apiKey + \"&lang=ru\")\n                dat = json.loads(res.text)\n                updater.bot.send_message(chat_id=int(k), text=\"В городе \" + cityList[k] + \" сегодня на улице \" + dat[\"daily\"][0][\"weather\"][0][\"description\"] + \".\\nТемпература от \" + str(float('{:.2f}'.format(float(dat['daily'][0]['temp'][\"min\"]) - 273))) + \" до \" + str(float('{:.2f}'.format(float(dat['daily'][0]['temp'][\"max\"]) - 273))) + \" градусов цельсия.\\nСкорость ветра \" + str(dat[\"daily\"][0][\"wind_speed\"]) + \" метров в секунду\\nДавление: \" + str(dat[\"daily\"][0][\"pressure\"]) + \" гПа\\nВлажность: \" + str(dat[\"daily\"][0][\"humidity\"]) + \"%\\nРекомендации: \" + getClothes(float('{:.2f}'.format(float(dat[\"daily\"][0]['temp']['day']) - 273)),dat[\"daily\"][0][\"wind_speed\"],dat[\"daily\"][0][\"weather\"][0][\"id\"]))\n        time.sleep(1)\n\nstart_handler = CommandHandler('start', start)\nweather_handler = CommandHandler('weather', weather)\nsetcity_handler = CommandHandler('setcity', setCity)\nhelp_handler = CommandHandler('help',help)\nsub_handler = CommandHandler('subscribe',subscribe)\nunsub_handler = CommandHandler('unsubscribe',unsubscribe)\necho_handler = MessageHandler(Filters.text, unknown)\nunknown_handler = MessageHandler(Filters.command, unknown)\ndispatcher.add_handler(start_handler)\ndispatcher.add_handler(weather_handler)\ndispatcher.add_handler(setcity_handler)\ndispatcher.add_handler(help_handler)\ndispatcher.add_handler(sub_handler)\ndispatcher.add_handler(unsub_handler)\ndispatcher.add_handler(unknown_handler)\ndispatcher.add_handler(echo_handler)\ntry:\n    with open(\"conf.json\",\"r\") as f:\n        cityList = json.load(f)\nexcept IOError:\n    print(\"Log is not accesible\")\ntry:\n    with open(\"subconf.json\",\"r\") as f:\n        subList = json.load(f)\nexcept IOError:\n    print(\"SubLog is not accesible\")\nx = threading.Thread(target=timeCounter)\nx.start()\nupdater.start_polling()\nupdater.idle()\nwith open(\"conf.json\",\"w\") as f:\n    json.dump(cityList, f)\nwith open(\"subconf.json\",\"w\") as f:\n    json.dump(subList, f)\ndoExit = True","repo_name":"EzicMan/STOTask","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":12140,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"70456242150","text":"import sqlite3\n\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom datetime import datetime, timedelta\nfrom pathlib import Path\n\n\ndef main():   \n    \n    data_path = Path('data-extra')\n    base_649_site = 'https://www.playnow.com/lottery/lotto-649-winning-numbers/?date='\n    \n    con = sqlite3.connect(\"649_lottery.db\")\n    cur = con.cursor()\n\n    res = cur.execute(\"\"\"\n    SELECT day, month, year\n    FROM winnings     \n    WHERE num1 is null\n    ORDER BY year,month,day\n    \"\"\")\n\n    for d in res:\n\n        draw_site = base_649_site + d[0] +'/' + d[1] + '/' + d[2]\n        \n        driver = webdriver.Chrome()\n        driver.get(draw_site)\n        driver.implicitly_wait(15)\n\n        draw_soup = BeautifulSoup(driver.page_source,'html.parser')\n\n        draw_fn_path = Path(data_path,\"lotto-649-winning-numbers-\"+ d[2] + '-' + d[1] + '-' + d[0]+\".html\")\n        \n        with open(draw_fn_path,'w',encoding='utf-8') as f:\n            f.write(draw_soup.prettify())\n\n        driver.quit()\n        \n    con.close()\n\nif __name__ == \"__main__\":\n    main()\n\n\n","repo_name":"jrmacdoug/649_scrap","sub_path":"scripts/etl_download_missing-html.py","file_name":"etl_download_missing-html.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"72455636070","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport math\nfrom numpy.linalg import inv\n\ndef driver():\n    xeval = np.linspace(0,10,1000)\n    xint = np.linspace(0,10,11)\n\n    intervals = findx(xeval, xint)\n    \n\ndef findx(xeval, xint):\n    temp = [[],[],[],[],[],[],[],[],[],[]]\n    for i in range(1, len(xint)):\n        temp2 = np.where((xeval= xint[i-1]))\n        temp[i-1] = xeval[temp2]\n    temp[9] = np.append(temp[9],xeval[-1])\n    return temp\n    \n\ndef createLine(x, yeval, f):\n   y = np.zeros((len(x), len(x[0])))\n   for i in range(0,len(x)):\n        for j in range(1, len(x)):\n             slope = (yeval[i][j] - yeval[i][j-1])/(x[i][j] - x[i][j-1])\n             y[i][j] = slope*(x[i][j] - x[i][j-1]) + yeval[i][j-1]\n        return y\n   return y\n\ndriver()","repo_name":"tonySamour/Samour_APPM4600","sub_path":"Labs/Lab8/PreLab.py","file_name":"PreLab.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"6710201612","text":"import pandas as pd\nimport numpy as np\nimport sys\nsys.path.append(\"..\")#为了import引用上一级包\nfrom Tools.sql_db import *\n\nPF_all=PF_all_GlobalX\n\ndef 更新配方表(data='E:/PythonStudy_Git/调用资料/更新配方表.xlsx'):\n\tdf=pd.read_excel(data,sheet_name = None)#读取excel表格\n\tPF_content=()\n\t#更新配方表,删旧表\n\tfor PF_df in df:#读取表名i,并创建\n\t\tif PF_df in PF_all:\n\t\t\t# cursor.execute(f\"DROP TABLE {PF_df}\")\n\t\t\tprint(f\"DROP TABLE {PF_df}\")\n\t\texcel创建sql表(df,PF_df)\n\t\tPF_content+=excel写入sql表(df,PF_df)\n\tsql更新销售表excel('销售表')\n\n\t#材料库更新\n\tupdate_content=[]\t\n\tfor i in PF_content:\n\t\tCL_name=i[0]\n\t\tif CL_name not in sql表的材料list('材料库'):\n\t\t\tupdate_content.append(CL_name)\n\t\t\t# 执行sql('insert','材料库',值=str((CL_name,0)))\n\t\t\t执行sql('insert_set','材料库变动记录表',值=f\"材料='{CL_name}',用量=0,时间='{localtime_GlobalX}',操作内容='更新'\")\n\tsql材料库更新excel('进货表')\n\tsql材料库更新excel('校准表')\n\treturn update_content#返回材料更新内容 \n\t#,进货,校准,销售3张excel表\n\nprint(更新配方表())\n\n\n\n\nconnection.close()\n","repo_name":"ycallenchina/PythonStudy_Git","sub_path":"其他学习_少量/进销存学习/demo更新表.py","file_name":"demo更新表.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"29547150106","text":"from statistics import mean, median, median_low, median_high\nimport pandas as pd\nfrom random import sample\nimport numpy as nu\t\n\n\nfrom playlistGraphBuilder import PlaylistGraph\nfrom graphEnrichment import getEnrichment, getArtistGenre\nfrom ExtractAllPaths import extractAllPath\nfrom cosineSimilarity import getPlaylistCosineSim\n\n\n\ndef get_sample():\n\ttry:\n\t\tprint(\"sample found\")\n\t\tsample = pd.read_csv('data/filteredSample.csv')\n\texcept:\n\t\tprint(\"making sample\")\n\t\tsample = filterplaylist(makeSample(pd.read_json('data/playlists.json', lines=True))) \n\n\treturn(sample)\n\ndef datasetStatistics():\n\tplaylists = pd.read_json('data/playlists.json', lines=True)\n\tplaylistLength = []\n\tplaylistNum = []\n\towners = playlists['owner']\n\tfor p,playlist in playlists.iterrows():\n\t\tplaylistLength.append(len(playlist['tracks']))\n\tquantiles = nu.quantile(playlistLength,(0,.25,.5,.75,1))\n\t\n\tfor p,playlist in playlists.iterrows():\n\t\towners.append(playlist['owner'])\n\t\tif len(playlist['tracks']) >= quantiles[1] and len(playlist['tracks']) <= quantiles[3]:\n\t\t\tplaylist2use.append(playlist['id']) \n\t\n\tfilteredPlaylists = (playlists.loc[playlists['id'].isin(playlist2use)])\n\t\ndef makeSample(playlists):\n\tusers = list(set(playlists['owner']))\n\tsampleUsers = sample(users, 300)\n\tplaylistSample = (playlists.loc[playlists['owner'].isin(sampleUsers)])\n\tplaylistSample.to_csv('data/sample300users.csv')\n\treturn(playlistSample)\n\t\ndef filterplaylist(playlists):\n\tplaylist2use = []\n\towners = []\n\tplaylists['id'] = playlists['_id'].map(lambda x: x['$oid'])\n\tfor p,playlist in playlists.iterrows():\n\t\towners.append(playlist['owner'])\n\t\tif len(playlist['tracks']) >= 11 and len(playlist['tracks']) <= 41:\n\t\t\tplaylist2use.append(playlist['id']) \n\t\n\tfilteredPlaylists = (playlists.loc[playlists['id'].isin(playlist2use)])\n\tfilteredPlaylists.to_csv('data/filteredSample.csv')\n\treturn(filteredPlaylists)\n\n\n\n#def main(playlists):\nprint(\"getting playlists...\")\nplaylists = get_sample()\n# print(\"building playlists graphs\")\n# PlaylistGraph(playlists)\nprint(\"enrichment\")\n# getEnrichment(playlists)\ngetArtistGenre(playlists)\n# print(\"patterns extraction\")\n# extractAllPath()\n#print(\"cosine similarity calculation\")\n#getPlaylistCosineSim()\n\n","repo_name":"maccatrozzov/serendipityByChoice","sub_path":"code/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"6127092820","text":"import hashlib\nimport operator\nimport os\nimport pathlib\nimport struct\nimport typing as tp\nfrom typing import List\n\nfrom pyvcs.objects import hash_object\n\n\nclass GitIndexEntry(tp.NamedTuple):\n    # @see: https://github.com/git/git/blob/master/Documentation/technical/index-format.txt\n    ctime_s: int\n    ctime_n: int\n    mtime_s: int\n    mtime_n: int\n    dev: int\n    ino: int\n    mode: int\n    uid: int\n    gid: int\n    size: int\n    sha1: bytes\n    flags: int\n    name: str\n\n    def pack(self) -> bytes:\n        values = (\n            self.ctime_s,\n            self.ctime_n,\n            self.mtime_s,\n            self.mtime_n,\n            self.dev,\n            self.ino & 0xFFFFFFFF,\n            self.mode,\n            self.uid,\n            self.gid,\n            self.size,\n            self.sha1,\n            self.flags,\n        )\n        packed = struct.pack(\"!LLLLLLLLLL20sH\", *values)\n        name = self.name.encode()\n        N = 8 - (62 + len(name)) % 8\n        return packed + name + b\"\\x00\" * N\n\n    @staticmethod\n    def unpack(data: bytes) -> \"GitIndexEntry\":\n        data_wo_name = data[:62]\n        unpacked = struct.unpack(\"!LLLLLLLLLL20sH\", data_wo_name)\n        counter = 0\n        new_data = data[62:]\n        for symbol in new_data:\n            if symbol != 0:\n                counter += 1\n            else:\n                break\n        name = new_data[:counter].decode()\n        return GitIndexEntry(\n            ctime_s=unpacked[0],\n            ctime_n=unpacked[1],\n            mtime_s=unpacked[2],\n            mtime_n=unpacked[3],\n            dev=unpacked[4],\n            ino=unpacked[5],\n            mode=unpacked[6],\n            uid=unpacked[7],\n            gid=unpacked[8],\n            size=unpacked[9],\n            sha1=unpacked[10],\n            flags=unpacked[11],\n            name=name,\n        )\n\n\ndef read_index(gitdir: pathlib.Path) -> tp.List[GitIndexEntry]:\n    entries: List[GitIndexEntry] = []\n    indexdir = gitdir / \"index\"\n    if indexdir.exists():\n        with indexdir.open(\"rb\") as f:\n            data = f.read()\n    else:\n        return entries\n    signature = b\"DIRC\"\n    version = 2\n    needed_range = struct.unpack(\"!L\", data[8:12])[0]\n    header = struct.pack(\"!4sLL\", signature, version, needed_range)\n    new_data = data[len(header) :]\n    for i in range(needed_range):\n        counter = 0\n        zero_counter = 0\n        for symbol in new_data[62:]:\n            if symbol != 0:\n                if zero_counter > 0:\n                    break\n                counter += 1\n            else:\n                zero_counter += 1\n        entry = GitIndexEntry.unpack(new_data)\n        entries.append(entry)\n        new_data = new_data[62 + zero_counter + counter :]\n        if len(new_data) < 64:\n            break\n    return entries\n\n\ndef write_index(gitdir: pathlib.Path, entries: tp.List[GitIndexEntry]) -> None:\n    index = gitdir / \"index\"\n    packed_entries = \"\"\n    for entry in entries:\n        packed_entries += bytes.hex(entry.pack())\n    signature = b\"DIRC\"\n    version = 2\n    header = struct.pack(\"!4sLL\", signature, version, len(entries))\n    data = header + bytes.fromhex(packed_entries)\n    with index.open(\"wb\") as f:\n        f.write(data + hashlib.sha1(data).digest())\n\n\ndef ls_files(gitdir: pathlib.Path, details: bool = False) -> None:\n    entries = read_index(gitdir)\n    for entry in entries:\n        if details == False:\n            print(entry.name)\n        else:\n            stage = (entry.flags >> 12) & 3\n            print(f\"{entry.mode:o} {bytes.hex(entry.sha1)} {stage}\\t{entry.name}\")\n\n\ndef update_index(gitdir: pathlib.Path, paths: tp.List[pathlib.Path], write: bool = True) -> None:\n    entries = []\n    paths = sorted(paths)\n    for path in paths:\n        with path.open(\"rb\") as f:\n            data = f.read()\n        sha = hash_object(data, fmt=\"blob\", write=True)\n        entry = GitIndexEntry(\n            ctime_s=int(os.stat(path).st_ctime),\n            ctime_n=0,\n            mtime_s=int(os.stat(path).st_mtime),\n            mtime_n=0,\n            dev=os.stat(path).st_dev,\n            ino=os.stat(path).st_ino,\n            mode=os.stat(path).st_mode,\n            uid=os.stat(path).st_uid,\n            gid=os.stat(path).st_gid,\n            size=os.stat(path).st_size,\n            sha1=bytes.fromhex(sha),\n            flags=len(path.name),\n            name=str(path),\n        )\n        entries.append(entry)\n    if write:\n        if not (gitdir / \"index\").exists():\n            write_index(gitdir, entries)\n        else:\n            index = read_index(gitdir)\n            index += entries\n            write_index(gitdir, index)\n","repo_name":"Ilyafalkon/cs102","sub_path":"homework04/pyvcs/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"14573394413","text":"import os\nimport pulumi\nimport pulumi_aws as aws\n\n# Setting variables\nbucket_name = \"our-cool-bucket-name-1231232\"\ntable_name = \"TerraformStateLock\"\nAWS_REGION = os.environ.get(\"REGION\", \"us-east-1\")\n\n\n# Create an S3 bucket\nbucket = aws.s3.Bucket(bucket_name)\n\n# Create a DynamoDB table\ntable = aws.dynamodb.Table(\n    table_name,\n    attributes=[{\"name\": \"LockID\", \"type\": \"S\"}],\n    hash_key=\"LockID\",\n    read_capacity=1,\n    write_capacity=1,\n)\n\n# Export the bucket name and table ARN\npulumi.export(\"bucket_name\", bucket.id)\npulumi.export(\"table_arn\", table.arn)\n","repo_name":"pluralsight-cloud/aws-certified-solutions-architect-associate","sub_path":"12-ha-and-scaling/scaling-non-relational-databases/infrastructure_scripts/pulumi/create_backend_resources.py","file_name":"create_backend_resources.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"}
+{"seq_id":"13060809914","text":"\"\"\"Nasser's default plot style.\"\"\"\nimport matplotlib.pyplot as plt\n\n\ndef default():\n    \"\"\"Make it matplotlib's default.\"\"\"\n    plt.style.use('default')\n    return None\n\n\ndef style(serif='Computer Modern'):\n    \"\"\"Set custom config.\"\"\"\n    plt.style.use('seaborn-deep')\n    config = {\n        'text.usetex': True,    # use LaTeX to render text\n        'font.family': 'serif',\n        'font.serif': [serif],\n        # 'figure.constrained_layout.use': True,\n        'figure.autolayout': True, # auto adjust subplots\n        'legend.frameon': True,\n        'legend.edgecolor': 'none',\n        'legend.fontsize': 8,\n        'font.size': 10.0,\n        'axes.grid': True,\n        'axes.linewidth': 0.5,    # seaborn changed to a ticker, better 1 or 0.5\n        'grid.linewidth': 0.5,\n        'xtick.major.width': 0.5,\n        'ytick.major.width': 0.5,\n        'axes.edgecolor': '0.0',\n        'grid.color': '0.8',\n        'text.color': '0.15',\n        'xtick.color': '0.15',\n        'ytick.color': '0.15',\n        'lines.linewidth': 1,   # default is 1.5\n        'lines.markerfacecolor': \"none\",\n        'lines.markeredgewidth': 1,\n        'text.latex.preamble': r\"\\usepackage{amsmath}\", # allows \\text in math env\n    }\n    plt.rcParams.update(config)\n    return None\n\n","repo_name":"nasseralkmim/figtex","sub_path":"figtex.py","file_name":"figtex.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"9479147727","text":"import numpy as np\nimport cv2\nimport sys\nimport math\n\n\n# wyszukuje wszystkie dlugie linie w obrazie img\ndef wyszukaj_pieciolinie(img, obetnij = False):\n    #K = np.array([[ 1, 2, 1], [ 0, 0, 0],[ -1,-2,-1]])\n    #K = K / 4\n    theta = np.pi / 180\n    min_szerokosc = int(img.shape[1] / 4)\n    if obetnij == True:\n        img = img[:, 0:int(img.shape[1] / 3)]#4\n        theta = np.pi / 4 #4\n        #cv2.imshow('obciete do pieciolinii', img)\n        #cv2.waitKey(0)\n        min_szerokosc = int(img.shape[1] / 3)\n    #min_szerokosc = int(img.shape[1] / 4)\n    #figure(figsize=(8,8))\n    #io.imshow(res)\n    ############## SZUKANIE LINII\n    kernel_size = 1\n    blur_gray = cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n    #blur_gray = abs(convolve(blur_gray, K))\n    low_threshold = 50\n    high_threshold = 200\n    edges = cv2.Canny(blur_gray, low_threshold, high_threshold)\n    rho = 1  # distance resolution in pixels of the Hough grid\n    #theta = np.pi / 180# angular resolution in radians of the Hough grid\n    threshold = int(min_szerokosc / 4 ) #4 # minimum number of votes (intersections in Hough grid cell)\n    min_line_length = min_szerokosc  # minimum number of pixels making up a line\n    max_line_gap = min_szerokosc / 2  # maximum gap in pixels between connectable line segments\n    line_image = np.copy(img) * 0  # creating a blank to draw lines on\n    #print(\"shape0: {}\".format(img.shape[0]))\n    # Run Hough on edge detected image\n    # Output \"lines\" is an array containing endpoints of detected line segments\n    lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]),\n                            min_line_length, max_line_gap)\n    #lines2 = np.array([[[0, 0, 0, 0],],], int)\n    lines2 = []\n    for line in lines:\n        #print(np.append(lines2, [line,]))\n        for x1, y1, x2, y2 in line:\n            if obetnij == True:\n                if abs(y1 - y2) <= (img.shape[0] * 0.2) and y1 < 190 and y2 < 190:\n                    #(y1 + 20 ) < img.shape[0] and (y2 + 20 ) < img.shape[0]:\n                    lines2.append(line)\n                    cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 5)\n            else:\n                lines2.append(line)\n                cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 5)\n\n    lines_edges = cv2.addWeighted(img, 0.8, line_image, 1, 0)\n    #print(\"lines2: {} \\nlines: {} type: {}\".format(lines2, lines, type(lines)))\n    #cv2.imshow('wyszukaj', edges)\n    #cv2.imwrite(\"linia.jpg\", lines_edges)\n    #cv2.imshow('wyszukaj2', lines_edges)\n\n    #cv2.waitKey(0)\n\n    return lines\n\n\n# zwraca kat obrotu w stopniach na podstawie zakrzywienia linii (linie powinny byc rownolegle)\ndef kat_obrotu(lines):\n    srednia1=[]\n    for line in lines:\n        for x1, y1, x2, y2 in line:\n            #if (x1 > x2):\n               # bufx=x2\n               # bufy=y2\n                #x2 =x1\n                #y2=y1\n                #x1=bufx\n                #y1=bufy\n            if(x1!=x2):\n                 srednia1.append(math.atan(-(y1 - y2) / (x1-x2))*180/np.pi)\n            #print(\"{} {} {} {}\".format(x1, y1, x2, y2))\n            #cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 5)\n    srednia= np.mean(srednia1)\n    return -srednia\n\n# zwraca wspolzedne poczatku i konca pola zawierajacego pieciolinie\ndef pole_pieciolinii(lines):\n    x1min = min(min(lines[:, :, 0]), min(lines[:, :, 2]))\n    x1min = x1min[0]\n    x2max = max(max(lines[:, :, 0]), max(lines[:, :, 2]))\n    x2max = x2max[0]\n\n\n    y1min = min(min(lines[:, :, 1]), min(lines[:, :, 3]))\n    y1min = y1min[0]\n    y2max = max(max(lines[:, :, 1]), max(lines[:, :, 3]))\n    y2max = y2max[0]\n    #print(x1min, y2max)\n    return x1min, x2max, y1min, y2max\n\ndef polozenie_poczatkowe(lines):\n    x1min, x2max, y1min, y2max = pole_pieciolinii(lines)\n    delta_x = x1min + x2max * 0.2\n    return y1min, y2max\n# zwraca wspolrzedne x 5 linii w pieciolinii na podstawie linii\ndef wspolrzedne_pieciolinii(lines):\n    x1, x2, y1, y5 = pole_pieciolinii(lines)\n    y3 = (y1 + y5) / 2\n    y2 = (y1 + y3) / 2\n    y4 = (y3 + y5) / 2\n    return int(y1), int(y2), int(y3), int(y4), int(y5)\n\n# srednia roznica pomiedzy 5 liniami w px\ndef srednia_delta_linii(yn):\n    return int((yn[4] - yn[0]) / 4)\n\n# ustala rzeczywiste linie pieciolinii na podstawie wyznaczonych wspolrzednych i pikseli w obrazie\ndef znajdz_rzeczywista_pieciolinie(img, wspolrzedne, x):\n    out_y = [0, 0, 0, 0, 0]\n    delta_y_skan = int(srednia_delta_linii(wspolrzedne) / 4)\n    min_y = 0\n    max_y  = 0\n    size_y = img.shape[0] - 1\n    for i, y_center in enumerate(wspolrzedne):\n        out_y[i] = wspolrzedne[i]\n        if img[y_center, x] == 0:\n            continue\n        for j in range(delta_y_skan):\n\n            if img[min(y_center + j, size_y), x] == 0:\n                min_y = y_center + j\n                max_y = min_y\n\n                for k in range(int(delta_y_skan / 2)):\n                    if img[min(min_y + k, size_y), x] == 255:\n                        max_y = min_y + k\n                        break\n                out_y[i] = int((min_y + max_y) / 2)\n                break\n            if img[min(y_center - j, size_y), x] == 0:\n                max_y = y_center - j\n                min_y = max_y\n\n                for k in range(int(delta_y_skan / 2)):\n                    if img[min(max_y - k, size_y), x] == 255:\n                        min_y = max_y - k\n                        break\n                out_y[i] = int((min_y + max_y) / 2)\n                break\n\n            img[min(y_center - j,size_y), x] = 128\n            img[min(y_center + j,size_y), x] = 128\n    #cv2.imshow('po progowaniu', img)\n    #cv2.waitKey(0)\n    return out_y\n\n# po wykrytych liniach pionowych identyfikuje dzwiek\ndef nazwa_dzwieku(wykryte_linie):\n    wysokosc = ['na 1', 'pomiedzy 1 i 2', 'na 2', 'pomiedzy 2 i 3', 'na 3', 'pomiedzy 3 i 4', ' na 4', 'pomiedzy 4 i 5', 'na 5']\n    ostatni = len(wykryte_linie) - 1\n    for i in range(len(wykryte_linie)):\n        if wykryte_linie[ostatni - i] == 1:\n            return wysokosc[ostatni - i - 1]\n    return ''\n\n# generuje tablice obrazow z piecioliniami na podstawie obrazu zrodlowego\ndef wydziel_pieciolinie(img, maska):\n    x_center = int(img.shape[1] / 2)\n    x_min = 100\n    delta = []\n    delta_x_pos = []\n    #wyszukiwanie ilosci czarnych pikseli dla pionowych linii\n    for i, x in enumerate(range(x_center - x_min, x_center + x_min, 20)):\n        delta.append(0)\n        delta_x_pos.append(x)\n        for y in range(img.shape[0]):\n            if img[y, x] == 0:\n                delta[i] += 1\n\n    #print(delta, delta_x_pos)\n\n    min_delta = min(delta)\n    for i in range(len(delta)):\n        if delta[i] == min_delta:\n            min_delta_x_pos = delta_x_pos[i]\n            break;\n   # print(min_delta_x_pos)\n\n    yn = []\n    # budowanielisty linii\n    for y in range(img.shape[0] - 1):\n        if img[y, min_delta_x_pos] > 150 and img[y + 1, min_delta_x_pos] < 100:\n            yn.append(y)\n            #img[y-2:y+2, min_delta_x_pos-2:min_delta_x_pos+2] = 128\n\n    #print(yn)\n    y_pieciolinii = []\n    img_out = []\n    maska_out = []\n    for i in range(int(len(yn) / 5)):\n        y_pieciolinii.append(yn[i*5:(i*5) + 5])\n        img_out.append(img[max(int(y_pieciolinii[i][0]- 50), 0):int(y_pieciolinii[i][4]+50), :])\n        maska_out.append(maska[max(int(y_pieciolinii[i][0] - 50), 0):int(y_pieciolinii[i][4] + 50), :])\n        #cv2.imshow('image', img_out[i])\n        #cv2.waitKey(0)\n    #cv2.imshow('image', img)\n    #cv2.waitKey(0)\n    return img_out, maska_out","repo_name":"zurczi/KCKRozpoznawanieNut","sub_path":"kck/pieciolinia.py","file_name":"pieciolinia.py","file_ext":"py","file_size_in_byte":7517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"38545617237","text":"import json\nimport redis as py_redis\n\nfrom app.helpers.logger import logger\nfrom config.secret import redis_config, RedisKey\nfrom config.settings import SUPPORT_OJ\n\nredis = py_redis.StrictRedis(**redis_config)\n\n\ndef setup_redis():\n    if not redis.exists(RedisKey.switch):\n        ret = redis.hmset(RedisKey.switch, {oj: 1 for oj in SUPPORT_OJ})\n        if ret:\n            logger.info('[redis] setup switch key success')\n    log_spider_status()\n\n\ndef log_spider_status():\n    logger.info('[OPEN Spider] {0}'.format(\n        get_all_open_spider()\n    ))\n\n\n###################################\n# redis 控制Spider开关\n###################################\n\ndef get_all_open_spider():\n    all_status = redis.hgetall(RedisKey.switch)\n    if all_status:\n        all_status = [k.decode() for k, v in all_status.items() if int(v) == 1]\n    return all_status or []\n\n\ndef is_spider_open(oj_name):\n    status = int(redis.hget(RedisKey.switch, oj_name))\n    return status == 1\n\n\ndef turn_on_spider(oj_name):\n    if redis.exists(RedisKey.switch):\n        redis.hset(RedisKey.switch, oj_name, 1)\n    log_spider_status()\n\n\ndef turn_off_spider(oj_name):\n    if redis.exists(RedisKey.switch):\n        redis.hset(RedisKey.switch, oj_name, 0)\n    log_spider_status()\n\n\n###################################\n# redis 成就队列\n###################################\n\ndef push_submit_to_queue(submit_id):\n    logger.info('[redis] push submit #{} to queue'.format(submit_id))\n    redis.lpush(RedisKey.achieve_mq, json.dumps({'type': 'submit', 'id': submit_id}))\n","repo_name":"Raynxxx/ACM-Spider","sub_path":"app/helpers/redis_utils.py","file_name":"redis_utils.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"3990754176","text":"def rep_pow(x, n):\n    ans, MOD = 1, 998244353\n    while n:\n        if n % 2:\n            ans *= x\n            ans %= MOD\n        x *= x\n        x %= MOD\n        n >>= 1\n    return ans\n\n\nN = int(input())\nD = list(map(int, input().split()))\nMOD = 998244353\nfrom collections import Counter\ncnt = Counter(D)\nif(cnt[0] != 1 or D[0] != 0):\n  print(0)\n  exit(0)\nans = 1\nfor i in range(1, len(cnt)):\n    ans *= rep_pow(cnt[i - 1],cnt[i])\n    ans %= MOD\n    if(ans == 0): break\nprint(ans)\n","repo_name":"cod4i3/MyAtcoder","sub_path":"ARC-Like/NIKKEI2019-2-QUAL/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"36966571006","text":"import os\nimport glob\n\n# import all functions in the current directory\n\n# get all .py files in this directory\npythonFiles = glob.glob(os.path.dirname(__file__)+\"/*.py\")\n\n__all__ = []\n\n# getattr(__import__('Mass', fromlist=['.Mass']),'Mass')\n\nfuncNames = [ os.path.basename(pythonFile)[:-3] for pythonFile in pythonFiles ]\n\nfor funcName in funcNames:\n    if funcName == '__init__':\n        continue\n\n    # getattr(__import__(funcName, fromlist=[\".\" + funcName]), funcName)\n    __all__.append(funcName)\n\n    # __import__(funcName, locals(), globals())\n\n    # locals()[funcName] = getattr(locals()[funcName],funcName)\n\n    locals()[funcName] = getattr(__import__(funcName, locals(), globals()), funcName)\n\n#----------\n# cleanup\n#----------\ndel funcName\ndel funcNames\ndel pythonFiles\ndel pythonFile\n\ndel os\ndel glob","repo_name":"kinvarbuilder/kinvarbuilder","sub_path":"kinvarbuilder/functions/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"33857213456","text":"\"\"\"\n+++++++++++++++++++++++++++++++++++++\n\nHackerRank \nInterview prep kit\nWarm up challenge\n\nOptimal solution\n\n+++++++++++++++++++++++++++++++++++++\n\nJumping on the clouds\n\nEmma is playing a new mobile game that starts with consecutively numbered clouds. Some of the clouds are thunderheads and others are cumulus. She can jump on any cumulus cloud having a number that is equal to the number of the current cloud plus  or . She must avoid the thunderheads. Determine the minimum number of jumps it will take Emma to jump from her starting postion to the last cloud. It is always possible to win the game.\n\nFor each game, Emma will get an array of clouds numbered  if they are safe or  if they must be avoided. For example,  indexed from . The number on each cloud is its index in the list so she must avoid the clouds at indexes  and . She could follow the following two paths:  or . The first path takes  jumps while the second takes .\n\nFunction Description\n\nComplete the jumpingOnClouds function in the editor below. It should return the minimum number of jumps required, as an integer.\n\njumpingOnClouds has the following parameter(s):\n\nc: an array of binary integers\nInput Format\n\nThe first line contains an integer , the total number of clouds. The second line contains  space-separated binary integers describing clouds  where .\n\nConstraints\n\nOutput Format\n\nPrint the minimum number of jumps needed to win the game.\n\nSample Input 0\n\"\"\"\n\n\n\"\"\"\nReturn:\n- int, min number of jumps required\n\nInput parameters:\n- array comprised of binary integers, e.g. c = [0, 1 , 0, 0, 0, 1, 0]\n\nProcess:\n- Can move from starting index to any other index that is +1 or +2\n- Can move to indexs labeled '0'\n- Must avoid indexs labeled '1'\n\nTest case:\nInput: c = [0, 1 , 0, 0, 0, 1, 0]\nReturn: 3\nComments: Take the path on indexes [0, 2, 4, 6], the starting index is not counted \nin the solution\n\n\"\"\"\n\ndef jumpingOnClouds(c):\n    \"\"\"Return the minimum number of jumps possible.\"\"\"\n\n    # create list to track positions visited\n    # visited = []\n    # create variable to track number of visits\n    visits = 0\n\n    # create pointers for the list\n    p = 0 # current index position\n    p_n = p + 1 # nearest next index position available to visit\n    p_n_n = p + 2 # furthest index position available to visit\n\n    while p_n_n < len(c):\n        if c[p_n_n] == 0:\n            p = p_n_n\n            \n            # visited.append(p)\n            visits += 1\n\n        else:\n            p = p_n\n            \n            # visited.append(p)\n            visits += 1\n\n        p_n = p + 1\n        p_n_n = p + 2\n\n    if p < len(c) - 1:\n        p = p_n\n        # visited.append(p)\n        visits += 1\n\n    return visits\n\n\n\nif __name__ == '__main__':\n\n    #jumpingOnClouds([])\n    print(jumpingOnClouds([0])) # expect (1, [0])\n    print(jumpingOnClouds([0, 1 , 0, 0, 0, 1, 0])) # expect (3, [2, 4, 6])\n\n\"\"\"\nTime complexity: O(n)\nSpace complexity: O(1) = constant space\n    fixed not extra space\n\n\"\"\"\n","repo_name":"JKinsler/lists_trees_graphs","sub_path":"arrays/jumping_on_the_clouds.py","file_name":"jumping_on_the_clouds.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"37119478582","text":"# Stationery Shop inventory\r\nitems = {\r\n    \"pen\": {\"quantity\":10, \"price\": 2.0},\r\n    \"pencil\": {\"quantity\":12, \"price\":1.5},\r\n    \"eraser\": {\"quantity\":5, \"price\":0.5},\r\n    \"ruler\": {\"quantity\":7, \"price\":1.2},\r\n    \"notebook\": {\"quantity\":20, \"price\":3.5},\r\n    \"folder\": {\"quantity\":15, \"price\":2.0},\r\n    \"stapler\": {\"quantity\":8, \"price\":3.5},\r\n    \"scissors\": {\"quantity\":10, \"price\":4.0},\r\n    \"tape\": {\"quantity\":6, \"price\":1.5},\r\n    \"glue\": {\"quantity\":4, \"price\":2.0}\r\n}\r\n\r\n# Print all items and their quantities\r\nfor item, details in items.items():\r\n    print(f\"{item} - Quantity: {details['quantity']} Price: {details['price']}\")\r\n\r\n# Allow user to select an item\r\nitem_name = input(\"Enter the name of the item you want to purchase: \")\r\nitem_quantity = int(input(\"Enter the Quantity of the item you want to purchase: \"))\r\n\r\n# Check if item is in inventory\r\nif item_name in items:\r\n    if item_quantity <= items[item_name]['quantity']:\r\n        item_price = item_quantity * items[item_name]['price']\r\n        print(f\"The cost of {item_quantity} {item_name} is {item_price}\")\r\n        print(\"Thank you for buying!\")\r\n    else:\r\n        print(f\"We have only {items[item_name]['quantity']} {item_name} in stock\")\r\nelse:\r\n    print(f\"Item {item_name} is not available in our inventory\")\r\n    print(\"Thank you for buying!\")\r\n","repo_name":"sabarishmettu/Simple-python-project","sub_path":"Stationery Shop.py","file_name":"Stationery Shop.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"28257015437","text":"import obspy\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport subprocess\n\n\ndef rm_instr_resp(st, respfile, resp_type, pre_filt, ytype):\n    \"\"\"\n    remove instrument response from seismic data\n    :param st: seismic data, obspy.stream\n    :param respfile: response file\n    :param resp_type: type of response file, now support 'pz'\n    :param pre_filt: pre filter [f1, f2, f3, f4]\n    :param ytype: type of y value. 'none':displacement, 'vel':velocity\n    :return:\n    \"\"\"\n    if resp_type == 'pz':\n        pz = read_pole_zero(respfile)\n        st.simulate(paz_remove=pz, pre_filt=pre_filt)\n        if ytype == 'none':\n            pass\n        elif ytype == 'vel':\n            st[0].data = np.diff(st[0].data) / st[0].stats.delta\n        else:\n            raise ValueError('unknown ytype %s' % ytype)\n    else:\n        raise ValueError('unknown response type %s' % resp_type)\n\n\ndef read_pole_zero(pzfile):\n    \"\"\"\n    read pole zero file\n    :param pzfile: pz filename\n    :return: dict, 'zeros', 'poles', 'constant'\n    \"\"\"\n    pz = {}\n    f = open(pzfile, 'r')\n    line = f.readline()\n    while line[0] == '*':\n        line = f.readline()\n    # read zeros\n    num_zeros = int(line.split()[1])\n    zeros = []\n    for _ in range(num_zeros):\n        line = f.readline()\n        temp = list(map(float, line.split()))\n        zeros.append(complex(temp[0], temp[1]))\n    pz['zeros'] = zeros\n    # read poles\n    line = f.readline()\n    num_poles = int(line.split()[1])\n    poles = []\n    for _ in range(num_poles):\n        line = f.readline()\n        temp = list(map(float, line.split()))\n        poles.append(complex(temp[0], temp[1]))\n    pz['poles'] = poles\n    # read constant\n    constant = float(f.readline().split()[1])\n    pz['sensitivity'] = constant\n    pz['gain'] = 1.0\n    f.close()\n    return pz\n\n\ndef func_pz(pz):\n    \"\"\"\n    get response function H(f) from pz info\n    :param pz: output of read_pole_zero\n    :return: function H(f)\n    \"\"\"\n    def H(f):\n        s = f * complex(0, 1) * 2 * np.pi\n        result = pz['sensitivity']\n        for z in pz['zeros']:\n            result *= (s - z)\n        for p in pz['poles']:\n            result /= (s - p)\n        return result\n    return H\n\n\ndef plot_resp(resp_func, freqs):\n    \"\"\"\n    plot response from response function\n    :param resp_func: H(f)\n    :param freqs: frequency points\n    :return: None\n    \"\"\"\n    resp = resp_func(freqs)\n    amps = np.abs(resp)\n    phas = np.angle(resp, deg=True)\n    fig, ax = plt.subplots(nrows=2, sharex=True)\n    ax[0].plot(freqs, amps)\n    ax[0].set_title(\"Magnitude Response\")\n    ax[1].plot(freqs, phas)\n    ax[1].set_title(\"Phase Response\")\n    ax[1].set_xlabel(\"Frequency (Hz)\")\n    ax[0].grid()\n    ax[1].grid()\n    plt.show()\n\n\ndef benchmark_pz(sac, pz, pre_filt, xtype='none'):\n    \"\"\"\n    使用solo数据时,对比结果主要取决于pre_filt参数,当设置为0.1-8时,吻合很好,\n    平均误差小于0.1%,当带通滤波器f2设为更小值时,误差会更大,猜测是因为二者实现的滤波器不同导致的,\n    去除仪器响应本身是基本可靠的。\n    \"\"\"\n    st0 = obspy.read(sac)\n    st0.detrend(\"demean\")\n    st0.detrend(\"linear\")\n    st0.taper(max_percentage=0.05)\n    rm_instr_resp(st0, pz, 'pz', pre_filt, xtype)\n    st0[0].write('temp0.sac', format='SAC')\n    p = subprocess.Popen(['sac'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n    s = \"\"\n    s += \"r %s\\n\" % sac\n    s += \"rmean;rtrend\\n\"\n    s += \"taper\\n\"\n    s += \"transfer from polezero s %s freq %f %f %f %f\\n\" % ((pz, ) + tuple(pre_filt))\n    if xtype == 'vel':\n        s += 'dif\\n'\n    s += \"w temp1.sac\\n\"\n    s += \"q\\n\"\n    print(s)\n    p.communicate(s.encode())\n    st1 = obspy.read('temp1.sac')\n    d0 = st0[0].data\n    d1 = st1[0].data\n    print('max value\\npython %f, sac %f' % (np.max(d0), np.max(d1)))\n    d0 = d0 / np.max(d0)\n    d1 = d1 / np.max(d1)\n    dmean = np.mean(abs(d0 - d1))\n    dmax = np.max(abs(d0 - d1))\n    print(\"mean diff %f\\nmax diff %f\" % (dmean, dmax))\n    # subprocess.run('rm temp1.sac temp0.sac', shell=True)\n\n\nif __name__ == '__main__':\n    H = func_pz(read_pole_zero('./respfile'))\n    plot_resp(H, np.arange(0, 100, 0.1))","repo_name":"HouseJaay/gvis","sub_path":"response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"34097538152","text":"import py5\nimport cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\npy5_img = None\n\ndef setup():\n    py5.size(640, 480)\n\n\ndef draw():\n    global py5_img\n    py5.background(200, 0, 0)\n    if py5.frame_count % 30 == 0:\n        py5.window_title(f'FR: {py5.get_frame_rate()}')\n    ret, frame = cap.read()  # frame is a numpy array\n    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n    edges = cv2.Canny(gray, 100, 80)\n    # create a binary mask for pixels with brightness > 250\n    mask = np.zeros_like(gray)\n    mask[gray < 128] = 255\n    # add transparency to the blended image based on the mask\n    blended_rgb_npa = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)\n    blended_rgb_npa[:, :, 3] = mask\n    # display image\n    py5_img = py5.create_image_from_numpy(blended_rgb_npa, 'BGRA', dst=py5_img)\n    py5.image(py5_img, 0, 0)\n\n\ndef exiting():\n    print('over and out')\n    cap.release()\n\n\npy5.run_sketch()\n","repo_name":"villares/sketch-a-day","sub_path":"2023/sketch_2023_04_13/sketch_2023_04_13.py","file_name":"sketch_2023_04_13.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":193,"dataset":"github-code","pt":"71"}
+{"seq_id":"7895369789","text":"#9. Write a program which display first 10 even numbers on screen.\r\n#Output : 2 4 6 8 10 12 14 16 18 20\r\n\r\ndef even(iNo):\r\n     icnt1=icnt2=1\r\n     while icnt2<=iNo:\r\n       if icnt1%2==0:\r\n          print(icnt1)  \r\n       icnt2=icnt2+1\r\n       icnt1=icnt1+1\r\n\t \r\n\r\ndef main():\r\n     print(\"enter num\")\r\n     iValue=int(input())\r\n     even(iValue)\r\n  \r\nif __name__==\"__main__\":\r\n  main()  \r\n","repo_name":"kirtibhole/practice-programs","sub_path":"Python/Assignment_1/Assign1_9.py","file_name":"Assign1_9.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"9164000690","text":"import urllib.parse\n\n\ndef convert_to_seconds(value: str) -> float:\n\t\"\"\"\n\tParse time duration string (e.g. \"3h\", \"20m\" or \"1y\") and convert it into seconds.\n\t\"\"\"\n\tvalue = value.replace(\" \", \"\")\n\n\ttry:\n\t\t# Second condition in each IF is for backward compatibility\n\t\tif value.endswith(\"ms\"):\n\t\t\tvalue = float(value[:-2]) / 1000.0\n\t\telif value.endswith(\"y\") or value.endswith(\"Y\"):\n\t\t\tvalue = float(value[:-1]) * 86400 * 365\n\t\telif value.endswith(\"M\"):\n\t\t\tvalue = float(value[:-1]) * 86400 * 31\n\t\telif value.endswith(\"w\") or value.endswith(\"W\"):\n\t\t\tvalue = float(value[:-1]) * 86400 * 7\n\t\telif value.endswith(\"d\") or value.endswith(\"D\"):\n\t\t\tvalue = float(value[:-1]) * 86400\n\t\telif value.endswith(\"h\"):\n\t\t\tvalue = float(value[:-1]) * 3600\n\t\telif value.endswith(\"m\"):\n\t\t\tvalue = float(value[:-1]) * 60\n\t\telif value.endswith(\"s\"):\n\t\t\tvalue = float(value[:-1])\n\t\telse:\n\t\t\tvalue = float(value)\n\texcept ValueError as e:\n\t\traise ValueError(\"'{}' is not a valid time specification: {}.\".format(value, e))\n\n\treturn value\n\n\ndef validate_url(input_url: str, scheme):\n\t# Remove leading and trailing whitespaces before parsing\n\turl = urllib.parse.urlparse(input_url.strip())\n\n\tif url.path.endswith(\"/\"):\n\t\turl = url._replace(path=url.path[:-1])\n\n\tif scheme is None:  # Scheme doesn't get checked\n\t\treturn url.geturl()\n\telif isinstance(scheme, tuple):  # Supports tuple\n\t\tif url.scheme in scheme:\n\t\t\treturn url.geturl()\n\telif scheme == url.scheme:\n\t\treturn url.geturl()\n\telse:\n\t\tif url.scheme:\n\t\t\traise ValueError(\"'{}' has an invalid scheme: '{}'\".format(url.geturl(), url.scheme))\n\t\telif not url.scheme:\n\t\t\traise ValueError(\"'{}' does not have a scheme\".format(url.geturl()))\n\t\telse:\n\t\t\traise ValueError(\"'{}' has an invalid scheme\".format(url.geturl()))\n\treturn url.geturl()\n","repo_name":"AliPMcG/asab","sub_path":"asab/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"}
+{"seq_id":"10541732612","text":"from subprocess import *\nfrom nltk.corpus import wordnet as wn\nfrom nltk.stem.porter import *\nfrom pprint import pprint\nfrom bs4 import BeautifulSoup\nimport nltk\n\n# Method that takes a sentence, tokenizes and returns a list of tokens, posTags for the tokens and the morphedTokens\ndef tokenize(sentence):\n    tokens = nltk.word_tokenize(sentence)\n    processedTokens = []\n    for token in tokens:\n        if \"-\" in token:\n            processedTokens.extend(token.split(\"-\"))\n        elif \")\" in token or \"(\" in token:\n            token = token.replace(\"(\",\"\").replace(\")\",\"\")\n            processedTokens.append(token)\n        else:\n            processedTokens.append(token)\n    tokens = filter(None, processedTokens) #amos added\n    posTags = nltk.pos_tag(tokens)\n    morphTokens = [wn.morphy(token) for token in tokens]\n    #If morphed tokens are None, we use the original token\n    morphTokens = [token.decode('utf-8').encode('ascii', 'ignore').lower() if token is not None else tokens[i].lower() for (i,token) in enumerate(morphTokens)]\n    return tokens, posTags, morphTokens\n\n# Method to chunk the NER output\n# Author: Suruchi Shah\n# Example (u'Thomas', u'PERSON'), (u'Stamford', u'PERSON'), (u'Raffles', u'PERSON') becomes (u'Thomas Stamford Raffles', u'PERSON')\ndef chunkNEROutput(ner_output):\n    chunked, pos = [], \"\"\n    prev_tag=''\n    prev_tag = \"\"\n    for i, word_pos in enumerate(ner_output):\n        word, pos = word_pos\n        if pos in ['PERSON', 'ORGANIZATION', 'LOCATION'] and pos == prev_tag:\n            chunked[-1]+=word_pos\n        else:\n            chunked.append(word_pos)\n\n        prev_tag = pos\n    \n    clean_chunked = [tuple([\" \".join(wordpos[::2]), wordpos[-1]]) if len(wordpos)!=2 else wordpos for wordpos in chunked]\n    return clean_chunked\n\n# Method that converts HTML text into Raw text using beautiful soup\ndef convertHTMLtoRawText(fileLocation):\n    markup = open(fileLocation)\n    soup = BeautifulSoup(markup, \"html.parser\")\n    content = re.sub(r'[^\\x00-\\x7F]+',' ', soup.get_text())\n    return str(content.encode('utf-8').decode('ascii', 'replace'))\n\n# Method that takes in a word, original word and outputs the antonyms set for the word\n# Author: Suruchi Shah\ndef antonyms_as_set(input_word, original_word):\n    antonyms = get_antonyms_as_set(input_word)\n    if (original_word is not None) and (antonyms is None or len(antonyms)==0):\n        return get_antonyms_as_set(original_word)\n\n    return antonyms\n\n# Method that takes in a word and outputs the antonyms set for the word\n# Author: Suruchi Shah\ndef get_antonyms_as_set(input_word):\n    if input_word is None:\n        return set()\n\n    antonyms = set()\n    synonyms = wn.synsets(input_word)\n    \n    for syn in synonyms:\n        lemmas = syn.lemmas()\n        \n        for lem in lemmas:\n            for ant in lem.antonyms():\n                if wn.morphy(ant.name()) is not None:\n                    antonyms.add(str(wn.morphy(ant.name()).encode('utf-8').decode('ascii', 'ignore')))\n    return antonyms\n\n# Method that takes in a word, original word and outputs the synonyms set for the word\n# Author: Suruchi Shah\ndef synonyms_as_set(input_word, original_word):\n    synonyms = get_synonyms_as_set(input_word)\n    if (original_word is not None) and (synonyms is None or len(synonyms)==0):\n        return get_synonyms_as_set(original_word)\n\n    return synonyms\n\n# Method that takes in a word, original word and outputs the synonyms set for the word\n# Author: Suruchi Shah\ndef get_synonyms_as_set(input_word):\n    if input_word is None:\n        return set()\n\n    synonyms = set()\n    synSets = wn.synsets(input_word)\n    for syn in synSets:\n        for lemma_name in syn.lemma_names():\n            if wn.morphy(lemma_name) is not None:\n                synonyms.add(str(wn.morphy(lemma_name).encode('utf-8').decode('ascii','ignore')))\n    return synonyms\n\n# Calculates Levenshtein Distance between two strings\n# Source: Wikipedia\n# Modified By: Suruchi Shah\ndef levenshtein(s1, s2):\n    if len(s1) < len(s2):\n        return levenshtein(s2, s1)\n\n    # len(s1) >= len(s2)\n    if len(s2) == 0:\n        return len(s1)\n    \n    previous_row = range(len(s2) + 1)\n    for i, c1 in enumerate(s1):\n        current_row = [i + 1]\n        for j, c2 in enumerate(s2):\n            insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\n            deletions = current_row[j] + 1     # than s2\n            substitutions = previous_row[j] + (c1 != c2)\n            current_row.append(min(insertions, deletions, substitutions))\n        previous_row = current_row\n    return previous_row[-1]\n\n# Gets the dictionary containing campus locations from the campusLocations.txt file\n# The .txt file should be in the same folder as this script\ndef getCampusLocation(campusLocationsFile):\n    locations = set()\n    with open(campusLocationsFile) as f:\n        for line in f:\n            parts = line.split(\"\\t\")\n            for i in (0, len(parts)-1):\n                if len(parts[i].strip()) > 0:\n                    locations.add(parts[i].strip().lower())\n    return locations\n\n# This method calls Kenton Lee's UWTime jar and extracts time from the resulting XML\n# Author: Suruchi Shah\ndef getTimeStamp(inputString):\n\n    process = Popen(['java','-jar', 'uwtime-1.0.0.jar', inputString], stdout=PIPE, stderr=PIPE)\n    stdout, stderr = process.communicate()\n\n    # STDOUT is in XML Format. The following code extracts the time.\n    soup = BeautifulSoup(stdout, \"html.parser\")\n    for line in soup.find_all('text'):\n        for time in line.find_all('timex3'):\n            return str(time.get_text().encode('utf-8').decode('ascii', 'ignore'))\n    return None\n","repo_name":"azariaa/ENC","sub_path":"PythonScripts/helperFunctions.py","file_name":"helperFunctions.py","file_ext":"py","file_size_in_byte":5685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"73933515430","text":"from elasticsearch import Elasticsearch\r\nimport pandas as pd\r\n\r\n#metric\r\ndef metric(x):\r\n    return x[0]*0.5 + x[1]*0.5\r\n\r\ndef search():\r\n    es = Elasticsearch([{'host': 'localhost', 'port': 9200}])\r\n\r\n    books= pd.DataFrame() #create an empty dataframe\r\n    \r\n    while books.empty:\r\n        user_title=input(\"Enter Title: \")\r\n    \r\n        resp= es.search(index='books',body={\"query\":{ \"match\": {\"book_title\": user_title  } },\"sort\": [ {\"_score\" : { \"order\":\"desc\" }} ]  }    , size=10000)\r\n\r\n        books = pd.json_normalize(resp['hits']['hits']) #to take results into flat tables\r\n\r\n    scores = books[\"_score\"]\r\n    isbn = books[\"_source.isbn\"]\r\n    books = books.set_index(\"_source.isbn\")  #to kanoyme euretirio grammhs\r\n\r\n    results= pd.DataFrame()\r\n\r\n    #-----User Rating-----\r\n    search_user_rating = [0]*len(scores)\r\n    while results.empty:\r\n        userId=int(input(\"Enter Id: \"))\r\n        res= es.search(index='ratings',body={\"query\":{ \"match\": {\"uid\": userId  } }})\r\n        results = pd.json_normalize(res['hits']['hits'])\r\n\r\n    for i in range(len(isbn)):\r\n        for j in range(len(results[\"_source.isbn\"])):\r\n            if isbn[i] == results[\"_source.isbn\"][j]:\r\n                search_user_rating[i] = results[\"_source.rating\"][j]\r\n    \r\n    for i in range(len(books)):\r\n        books[\"_score\"][i] = metric([scores[i], search_user_rating[i]])\r\n\r\n    n = 10\r\n    books = books.sort_values(by=['_score'], ascending=False)\r\n    print(\"Best Match with user with ID:\" ,userId)\r\n    dfFreq = pd.DataFrame(books[[\"_score\", \"_source.book_title\"]])\r\n    print(dfFreq.head(int(len(dfFreq)*(n/100))))\r\n    #print(dfFreq)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n    search()","repo_name":"Dionusia/Information-Retrival","sub_path":"quest1.py","file_name":"quest1.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"21358343750","text":"import numpy as np\n\n# Trick to convert data to useful numpy ndarray\nwith open('day_5_data.txt') as f:\n    data = np.loadtxt((x.replace(',',' -> ') for x in f), delimiter=' -> ', dtype=np.int16)\n    \n    amount_of_values = data.shape[0]\n    \n    data = data.reshape(amount_of_values, 2, 2)\n\n# Points split in two arrays\nvalue_1 = data[:, 0]\nvalue_2 = data[:, 1]\n\n# Extract all lines that are horizontal, vertical or diagonal\nis_hor = value_1[:, 0] == value_2[:, 0]\nis_ver = value_1[:, 1] == value_2[:, 1]\nis_diag = np.absolute(value_1[:, 0] - value_2[:, 0]) == np.absolute(value_1[:, 1] - value_2[:, 1])\n\ndata = data[is_hor | is_ver | is_diag]\n\n# x and y values in points\nx_values = data[:, :, 0]\ny_values = data[:, :, 1]\n\n# Create drawing board\nboard = np.zeros([x_values.max() + 1, y_values.max() + 1], dtype=np.int16)\n\n# For each line, add +1 to board at interpolation points between endpoints\nfor i, (x_value, y_value) in enumerate(zip(x_values, y_values)):\n    \n    # If this line is diagonal:\n    if is_diag[i]:\n        x_diff = x_value[1] - x_value[0]\n        y_diff = y_value[1] - y_value[0]\n\n        x_dir = np.int8(x_diff / abs(x_diff))\n        y_dir = np.int8(y_diff / abs(y_diff))        \n\n        # board[#] where # the range of x1 to x2 and y1 to y2\n        board[np.arange(y_value[0], y_value[1] + y_dir, y_dir), np.arange(x_value[0], x_value[1] + x_dir, x_dir)] += 1\n    # Else the line is horizontal or vertical:\n    else:\n        x1 = x_value.min()\n        x2 = x_value.max()\n        y1 = y_value.min()\n        y2 = y_value.max()\n\n        board[y1:y2 + 1, x1:x2 + 1] += 1\n\n# Calculate amount of values above 1\nresult = board[board > 1].size\n\nprint(board)\nprint(result)","repo_name":"JobdeVogel/Advent-of-Code-2021","sub_path":"day_5_p2.py","file_name":"day_5_p2.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"73630812070","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jul 28 18:53:43 2019\r\n\r\n@author: annameng\r\nGiven a non-negative integer num represented as a string, \r\nremove k digits from the number so that the new number is the smallest possible.\r\n\r\nNote:\r\nThe length of num is less than 10002 and will be ≥ k.\r\nThe given num does not contain any leading zero.\r\nExample 1:\r\n\r\nInput: num = \"1432219\", k = 3\r\nOutput: \"1219\"\r\nExplanation: Remove the three digits 4, 3, and 2 to form the new number 1219 which is the smallest.\r\nExample 2:\r\n\r\nInput: num = \"10200\", k = 1\r\nOutput: \"200\"\r\nExplanation: Remove the leading 1 and the number is 200. Note that the output must not contain leading zeroes.\r\nExample 3:\r\n\r\nInput: num = \"10\", k = 2\r\nOutput: \"0\"\r\nExplanation: Remove all the digits from the number and it is left with nothing which is 0.\r\n\"\"\"\r\n\r\nclass Solution:\r\n    # use stack, first in last out\r\n    # beat 62%\r\n    def removeKdigits(self, num: str, k: int) -> str:\r\n        if k >= len(num):\r\n            return '0'\r\n        \r\n        stack = []\r\n        removedcount = 0\r\n        \r\n        for ch in num:\r\n            while stack and ch < stack[-1] and removedcount < k:\r\n                stack.pop()\r\n                removedcount += 1\r\n            stack.append(ch)\r\n            \r\n        for _ in range(k-removedcount):\r\n            stack.pop()\r\n            \r\n        return str(int(''.join(stack)))","repo_name":"annaymj/LeetCode","sub_path":"RemoveKDigits.py","file_name":"RemoveKDigits.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"}
+{"seq_id":"13211355857","text":"'''\n-Initially the program will pick 6 random numbers as the 'winner'.\n-Then try playing pick6 100,000 times, with the ticket cost and payoff below\n-A ticket contains 6 numbers, 1 to 99, and the number of matches between the ticket and the winning numbers determines the payoff\n\n\nCalculate your net winnings (the sum of all expenses and earnings):\n-a ticket costs $2\n-if 1 number matches, you win $4\n-if 2 numbers match, you win $7\n-if 3 numbers match, you win $100\n-if 4 numbers match, you win $50,000\n-if 5 numbers match, you win $1,000,000\n-if 6 numbers match, you win $25,000,000\n\nSTEPS:\n1. Generate a list of 6 random numbers representing the winning tickets\n2. Start your balance at 0\n3. Loop 100,000 times, for each loop:\n4. Generate a list of 6 random numbers representing the ticket\n5. Subtract 2 from your balance (you bought a ticket)\n6. Find how many numbers match\n7. Add to your balance the winnings from your matches\n8. After the loop, print the final balance\n\n\n-One function you might write is pick6() which will generate a list of 6 random numbers, which can then be used for both the winning numbers and tickets.\n-Another function could be num_matches(winning, ticket) which returns the number of matches between the winning numbers and the ticket.\n'''\n\nimport random\n\nwinning_ticket = []\n\nfor i in range(6):\n    winning_ticket.append(random.randint(1,100))\nprint(f\"The winning ticket is {winning_ticket}\")\n\n\n# Calculate Net Winnings 2. Start your balance at 0\nearnings = [0, 4, 7, 100, 50000, 1000000, 25000000]\nmatches = 0\nbalance = 0\n# loop thru lottery_ticket and match against winning_ticket AND loop thru matches and balance 100,100 times\ntimes_played = 0\n\nwhile times_played < 100000:  # TO TEST CHANGE TO < 100 or COMMENT OUT PRINT STATEMENT\n    lottery_ticket = []\n    times_played += 1\n    balance -= 2\n    for i in range(6):\n        lottery_ticket.append(random.randint(1,100))\n    print(lottery_ticket)\n\n\n    if lottery_ticket[0] == winning_ticket[0]:\n        matches += 1\n        print(f\"Yay! You have {matches} matches\")\n    elif lottery_ticket[1] == winning_ticket[1]:\n        matches += 1\n        print(f\"Yay! You have {matches} matches\")\n    elif lottery_ticket[2] == winning_ticket[2]:\n        matches += 1\n        print(f\"Yay! You have {matches} matches\")\n    elif lottery_ticket[3] == winning_ticket[3]:\n        matches += 1\n        print(f\"Yay! You have {matches} matches\")\n    elif lottery_ticket[4] == winning_ticket[4]:\n        matches += 1\n        print(f\"Yay! You have {matches} matches\")\n    elif lottery_ticket[5] == winning_ticket[5]:\n        matches += 1\n        print(f\"Yay! You have {matches} matches\")\n    # DO I NEED TO LOOP THROUGH THIS 100,000 TIMES OR PUT IT IN THE WHILE OR FOR LOOP???\n    else:\n        print(f\"You have {matches} matches\")  # this should read \"0 times\" until there's at least 1 match in this loop\nprint(f\"Alright! You have a total of {matches} matches\")\n\n\nif matches == 0:\n    winnings = earnings[0]\n    print(f\"You earned ${winnings}\")\n    #you win $0\nelif matches == 1:\n    winnings = earnings[1]\n    print(f\"You earned ${winnings}\")\n    #you win $4\nelif matches == 2:\n    winnings = earnings[2]\n    print(f\"You earned ${winnings}\")\n    #you win $7\nelif matches == 3:\n    winnings = earnings[3]\n    print(f\"You earned ${winnings}\")\n    #you win $100\nelif matches == 4:\n    winnings = earnings[4]\n    print(f\"You earned ${winnings}\")\n    #you win $50,000\nelif matches == 5:\n    winnings = earnings[5]\n    print(f\"You earned ${winnings}\")\n    #you win $1,000,000\nelif matches == 6:\n    winnings = earnings[6]\n    print(f\"You earned ${winnings}\")\n    #you win $25,000,000\nelse:\n    winnings = earnings[6]\n    print(f\"You have more than 6 matches! To be exact you have {matches} matches. You earned ${winnings}\") #you win $25,000,000\nprint(f\"You have won a total of ${winnings}\")\n\n\n\n#after 100,000 purchased tix at $2 each:\nbalance = winnings - 200000\nprint(\"Your expenses were $200,000 due to spending $2 per 100,000 lottery tickets.\")\nprint(f\"After 100,000 purchased lottery tickets, your balance/net winnings are ${balance}\")\n#ROI = (earnings - expenses)/expenses. Calculate your ROI, print it out along with your earnings and expenses.\nROI = (balance / 200000)\nprint(f\"Your ROI is ${ROI}\")","repo_name":"johannah-joy/lottery-pick6","sub_path":"lottery-pick6.py","file_name":"lottery-pick6.py","file_ext":"py","file_size_in_byte":4264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"22742206176","text":"import sys\ninput = sys.stdin.readline\n\nn, m = map(int, input().split())\n\nwb = []\nfor i in range(4):\n    wb.append(list(\"WBWBWBWB\"))\n    wb.append(list(\"BWBWBWBW\"))\n\nbw = []\nfor i in range(4):\n    bw.append(list(\"BWBWBWBW\"))\n    bw.append(list(\"WBWBWBWB\"))\n\na = []\ncountwb = 0\ncountbw = 0\nanswer = []\nstep1 = 0\nstep2 = 0\n\nfor i in range(n):\n    a.append(list(input()))\n\nfor i in range(n-7):\n    for j in range(m-7):\n        for k in range(i,i+8):\n            for l in range(j, j+8):\n                if a[k][l] != wb[step1][step2]:\n                    countwb += 1\n                elif a[k][l] != bw[step1][step2]:\n                    countbw += 1\n                step2 += 1\n            step1 += 1\n            step2 = 0\n        step1 = 0\n\n        answer.append(countwb)\n        answer.append(countbw)\n        countwb = 0\n        countbw = 0\n\nprint(min(answer))","repo_name":"2Ho0/BOJ","sub_path":"BOJ/1018.py","file_name":"1018.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"41723626131","text":"\"\"\"\nServer for API \n\"\"\"\nimport contextlib\nimport threading\nimport time\n\nimport uvicorn\n\n\nclass ServerConfig(uvicorn.Config):\n    \"\"\"\n    Server configuration\n    \"\"\"\n\n    def __init__(\n        self,\n        app,\n        host: str = \"0.0.0.0\",\n        port: int = 8190,\n        log_level: str = \"info\",\n    ):\n        super().__init__(app, host=host, port=port, log_level=log_level)\n\n\nclass Server(uvicorn.Server):\n    \"\"\"\n    Uvicorn server object\n    \"\"\"\n\n    @contextlib.contextmanager\n    def run_in_thread(self):\n        \"\"\"\n        Override the uvicorn method to allow running server in a thread, for example in a notebook.\n        \"\"\"\n        thread = threading.Thread(target=self.run)\n        thread.start()\n        try:\n            while not self.started:\n                time.sleep(1e-3)\n            yield\n        finally:\n            self.should_exit = True\n            thread.join()\n\n    def start_in_thread(self):\n        thread = threading.Thread(target=self.run)\n        thread.start()\n        while not self.started:\n            time.sleep(1e-3)\n        return thread\n","repo_name":"ml4ai/funman","sub_path":"src/funman/api/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"42087470031","text":"# 599. Minimum Index Sum of Two Lists\n\nclass Solution(object):\n    def findRestaurant(self, list1, list2):\n        \"\"\"\n        :type list1: List[str]\n        :type list2: List[str]\n        :rtype: List[str]\n        \"\"\"\n        index=len(list1)+len(list2)\n        re=[]\n        for i in range(len(list1)):\n            if list1[i] in list2:\n                cur=i+list2.index(list1[i])\n                if cur \")\n    if repeatvalue.isdigit() and int(repeatvalue) >= 1:\n        repeatvalue = int(repeatvalue)\n        break\n    else:\n        print(\"Invalid answer. Please type any of the natural numbers except for zero.\")\n        continue\n\n#Sample play loop\nfor repeatvalue in range (0, repeatvalue):\n    for sample in samples:\n        #random choice of sleep time\n        rndtimechoice = random.randint(1, 3)\n        if rndtimechoice == 1:\n            rndtime = 0.25\n        else:\n            if rndtimechoice == 2:\n                rndtime = 0.5\n            else:\n                rndtime = 1\n\n        print(sample)\n        sample.play()\n        time.sleep(rndtime) #sleep for rndtime seconds\n","repo_name":"GeertRoks/CSD2","sub_path":"CSD2a/AntwoordenExamples/02_timedPlaybackantw.py","file_name":"02_timedPlaybackantw.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"6112434990","text":"# 1192ms\n\ntarget  = input()\nm = int(input())\nfail = list(input()) if m else []\n\nBUTTON = ['0', '1', '2', '3', '4' ,'5', '6', '7', '8', '9']\n\ndef dfs(channel, count):\n    if channel == target: return count\n    if len(channel) > len(target): return float('inf')\n\n    ret = float('inf')\n    for btn in BUTTON:\n        if btn in fail: continue\n        nxt = channel + btn\n        ret = min(ret, count + abs(int(target) - int(nxt)) + 1)\n        ret = min(ret, dfs(nxt, count + 1))\n    \n    return ret\n\nprint(min(dfs(\"\", 0), abs(int(target)-100)))","repo_name":"eello/solve-algorithm","sub_path":"baekjoon/브루트포스_알고리즘/1107-리모컨_v1.py","file_name":"1107-리모컨_v1.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"4613669209","text":"from pygeo import *\nv=display(scale=50,axis=False,width=800,height=600,panel=False,view_drag=False)\n\n\n\n#the double points of the co-basal projectivity and their line\nX=FreePoint(-29,-18,label=\"X\")\ny=Point(30,-17)\nxy=Line(X,y)\nY=Slider(xy,ratio=1.1,label=\"Y\")\n\n#two points on the line determining, with the double points, a crossratio for the\n#projectivity\nA=Slider(xy,ratio=.2,label=\"A\")\nAp=Slider(xy,ratio=.225,label=\"A'\")\n\n\n# the array of points determined by applying the crossratio progressively\ngm=GrowthMeasure(X,Y,A,Ap,density=60,color=BLUE,level=1)\n\n#two random points and their line\na=FreePoint(-11,29,color=CYAN)\nb=FreePoint(27,7,color=CYAN)\nab=Line(a,b,color=WHITE,linewidth=.05)\n\n# the line through double point \"Y\" parrallel to the random line\nYp=Line(Y,ab,color=WHITE,linewidth=.05)\n\n\n\n# a point on the line through double point \"Y\"\no=Slider(Yp)\n\n# the array of lines connecting point \"o\" to the points\n# of the co-basal projectivity \"gm\"\n\nla=LineArray(o,gm,color=WHITE)\n\n# an array of points on random line \"ab\" determined by the\n# projection of the points of the growth measure through\n# point \"o\", i.e. the points of intersection on the line with\n# the array of lines through \"o\" and the points of the growth measure.\n\npa1=PointArray(la,ab,extend=True,color=RED)\n\n\n# the line from double point \"X\", and projected through \"o\"\n# to line \"ab\"\nXo=Line(X,o,color=BLACK)\nxa=Intersect(Xo,ab,color=RED)\n\n\n# for visual emphasis, we repeat the projection through \"o\" to\n# an additional line on the plane parallel with line \"ab\".\n\nc=FreePoint(-30,-4,color=CYAN)\ncp=Line(c,ab,color=WHITE,linewidth=.05)\npa2=PointArray(la,cp,extend=True,color=RED)\nxc=Intersect(Xo,cp,color=RED)\n\n# the crossratio \"multiplier\" inplicit in the postion of the\n# double points and points \"A\" and \"A'\"\n\nmultiplier =gm.multiplier()\n#print \"multiplier is %f\" %multiplier\n\n\n# we calculate the ratio of the distance from double point \"X\"\n# of the successive points of the point array projected to line \"ab\"\n# and should find them equal to the multiplier, since double point \"Y\",\n# by construction, has been projected to the point at infinity on\n# line \"ab\".\n\nfor i in range(len(pa1.points)-1):\n    d1 =  xa.distance(pa1.points[i+1])\n    d2 =  xa.distance(pa1.points[i])\n    if multiplier < 0:\n       d1=-d1\n\n\n# the purely synthetic construction of the same growth measure\n# determined by algorythm in the PyGeo code for the GrowthMeasure\n# primitive\n\ndef Synthetic_Growth_Measure(line,X,Y,a,b,steps=20,level=1):\n    O=Point(-24,24,label=\"O\",level=level)\n    OY=Line(O,Y,level=level,color=MAGENTA)\n    Op=Slider(OY,ratio=.15,label=\"O'\",level=level)\n    Oa=Line(O,a,level=level,color=MAGENTA)\n    Ob=Line(O,b,level=level,color=MAGENTA)\n    Opb=Line(Op,b,level=level,color=MAGENTA)\n    p1=Intersect(Oa,Opb,level=level)\n    Xp1=Line(X,p1,level=level,color=MAGENTA)\n    ab=Line(a,b,level=6).length()\n    aY=Line(a,Y,level=6).length()\n    bX=Line(b,X,level=6).length()\n    YX=Line(Y,X,level=6).length()\n    points=[]\n    def step(seed,steps,level=level):\n\n       if steps:\n          l=Line(O,seed,level=level,color=BLACK,linewidth=.08)\n          i=Intersect(l,Xp1,level=level)\n          lp=Line(Op,i,level=level,color=BLACK,linewidth=.08)\n          steps-=1\n          p=Intersect(line,lp,level=level)\n          points.append(p)\n          step(p,steps)\n    step(b,steps,level=level)\n    return points\n\n# the sythentic construction defintion is invoked, with the line,\n# the double points, and the additional points, as arguments\n# check \"level 2\" on the control panel to view.\n\nSynthetic_Growth_Measure(xy,X,Y,A,Ap,steps=20,level=1)\n\nv.pickloop()","repo_name":"eeue56/PyGeo2","sub_path":"pygeo/examples/lawrence/growthmeasure.py","file_name":"growthmeasure.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"971664760","text":"import sys\nfrom collections import deque\ninput=sys.stdin.readline\n\nm,n=map(int,input().split())\ngraph=[list(map(int,input().split())) for _ in range(n)]\n\ndx=[1,-1,0,0]\ndy=[0,0,1,-1]\n\ndef bfs(lst):\n    q=deque()\n    for i in lst:\n        q.append(i)\n    while q:\n        x,y=q.popleft()\n        for i in range(4):\n            nx=x+dx[i]\n            ny=y+dy[i]\n            if not(0<=nx= clockStartTime) & (telemetriData.Time <= clockEndTime)]\n\n    #' Extracting data:\n    Time                        = np.array([pd.to_numeric(DataOnlyRelevant['Time']).to_numpy()])\n\n    RPM                         = (DataOnlyRelevant[['ROV.26','ROV.27','ROV.28','ROV.29','ROV.30','ROV.31']].apply(pd.to_numeric, errors='coerce', axis=1)).to_numpy()\n    EtaMeasured4                = (DataOnlyRelevant[['Pos4DOFMux','Pos4DOFMux.1','Pos4DOFMux.2','Pos4DOFMux.3']].apply(pd.to_numeric, errors='coerce', axis=1)).to_numpy()\n    NuMeasured4                 = (DataOnlyRelevant[['Vel4DOFMux','Vel4DOFMux.1','Vel4DOFMux.2','Vel4DOFMux.3']].apply(pd.to_numeric, errors='coerce', axis=1)).to_numpy()\n    EtaMeasured6                = (DataOnlyRelevant[['EstPos6DOFMux','EstPos6DOFMux.1','EstPos6DOFMux.2','EstPos6DOFMux.3','EstPos6DOFMux.4','EstPos6DOFMux.5']].apply(pd.to_numeric, errors='coerce', axis=1)).to_numpy()\n    NuMeasured6                 = (DataOnlyRelevant[['EstVel6DOFMux','EstVel6DOFMux.1','EstVel6DOFMux.2','EstVel6DOFMux.3','EstVel6DOFMux.4','EstVel6DOFMux.5']].apply(pd.to_numeric, errors='coerce', axis=1)).to_numpy()\n\n    USBLAvail                   = np.array([pd.to_numeric(DataOnlyRelevant['USBL.17']).to_numpy()])\n    DVLAvail                    = np.array([pd.to_numeric(DataOnlyRelevant['DVL.14']).to_numpy()])\n\n    EtaHat                      = (DataOnlyRelevant[['KalmanFilter.4','KalmanFilter.5','KalmanFilter.6','KalmanFilter.7']].apply(pd.to_numeric, errors='coerce', axis=1)).to_numpy()  \n    NuHat                       = (DataOnlyRelevant[['KalmanFilter.8','KalmanFilter.9','KalmanFilter.10','KalmanFilter.11']].apply(pd.to_numeric, errors='coerce', axis=1)).to_numpy() \n    BiasHat                     = (DataOnlyRelevant[['KalmanFilter','KalmanFilter.1','KalmanFilter.2','KalmanFilter.3']].apply(pd.to_numeric, errors='coerce', axis=1)).to_numpy()\n\n    USBL_raw                    = (DataOnlyRelevant[['USBL','USBL.1','USBL.2']].apply(pd.to_numeric, errors='coerce', axis=1)).to_numpy()\n\n    EtaMeasured4[:,0:2]          = EtaMeasured4[:,0:2]*USBLAvail.T     \n    NuMeasured4[:,0:3]           = NuMeasured4[:,0:3]*DVLAvail.T\n    EtaMeasured6[:,0:2]          = EtaMeasured6[:,0:2]*USBLAvail.T     \n    NuMeasured6[:,0:3]           = NuMeasured6[:,0:3]*DVLAvail.T\n\n    #AbsoluteMeasurements include depth, roll, pitch and yaw\n    AbsoluteMeasurements = (DataOnlyRelevant[['ROV.8','ROV.9','ROV.10','ROV.11']].apply(pd.to_numeric, errors='coerce', axis=1)).to_numpy()\n    for i in range(0,len(EtaMeasured4)):\n        EtaMeasured4[i,3]        = pi_2_pi(EtaMeasured4[i,3])\n        EtaHat[i,3]              = pi_2_pi(EtaHat[i,3]) \n        EtaMeasured6[i,3]        = pi_2_pi(EtaMeasured6[i,3]) \n        EtaMeasured6[i,4]        = pi_2_pi(EtaMeasured6[i,4])  \n        EtaMeasured6[i,5]        = pi_2_pi(EtaMeasured6[i,5])  \n        AbsoluteMeasurements[i,1]= pi_2_pi(AbsoluteMeasurements[i,1])\n        AbsoluteMeasurements[i,2]= pi_2_pi(AbsoluteMeasurements[i,2])\n        AbsoluteMeasurements[i,3]= pi_2_pi(AbsoluteMeasurements[i,3])\n\n\n    NetFollowingMode            = np.array([pd.to_numeric(DataOnlyRelevant['Aqueous.23']).to_numpy()])\n    DVL_beams                   = (DataOnlyRelevant[['DVL.6','DVL.7','DVL.8','DVL.9']].apply(pd.to_numeric, errors='coerce', axis=1)).to_numpy()\n    DVL_beams_flags             = (DataOnlyRelevant[['DVL.10','DVL.11','DVL.12','DVL.13']].apply(pd.to_numeric, errors='coerce', axis=1)).to_numpy()\n\n    RPM2Tau                     = (DataOnlyRelevant[['RPM2Tau','RPM2Tau.1','RPM2Tau.2','RPM2Tau.3','RPM2Tau.4','RPM2Tau.5']].apply(pd.to_numeric, errors='coerce', axis=1)).to_numpy()\n    CmdTau                      = (DataOnlyRelevant[['ModeSwitcher','ModeSwitcher.5','ModeSwitcher.2','ModeSwitcher.3','ModeSwitcher.4','ModeSwitcher.2']].apply(pd.to_numeric, errors='coerce', axis=1)).to_numpy()\n    \n    \n\n    return Time, RPM, EtaMeasured4, NuMeasured4, USBLAvail, DVLAvail, EtaHat, NuHat, BiasHat, DVL_beams, DVL_beams_flags, RPM2Tau, CmdTau, NetFollowingMode, EtaMeasured6, NuMeasured6,AbsoluteMeasurements, USBL_raw\n\ndef extractClockTime(fhSimData,telemetriData,videoStartTimeLocal,videoLength):\n    \"\"\"[Returns the clock start time of when video and telemetri started recording, and also returns clock end time of video]\n\n    Args:\n        fhSimData ([mp4]): [Video Data]\n        telemetriData ([csv]): [Telemetri Data]\n        videoStartTimeLocal ([str]): [Video start time]\n        VideoLength ([float]): [Video length in seconds]\nclockStartTime, clockEndTime, nrCentiSecondVideoRoundedUp, TELEMETRI_STARTS_RECORDING_LAST \n    Returns:\n        clockStartTime ([float]): [Start time when both video and telemetri are active]\n        clockEndTime ([float]): [End time when both video and telemetri are deactived]\n        nrCentiSecondVideoRoundedUp ([float]): [centiseconds when the videoStarts if the telemetri starts before video]\n        TELEMETRI_STARTS_RECORDING_LAST ([boolean]): [describing if telemetri starts recording after the video]\n    \"\"\"    \n    # ? NOTE : There are several cases that needs to be handled to synchronize the videostream and the telemetri data. \n    # ? The cases are as follows:\n    # ? Case I   - Video starts to record AFTER telemetri data logging && Video stream ends BEFORE telemetri logging\n    # ? Case II  - Video starts to record AFTER telemetri data logging && Video stream ends AFTER telemetri logging\n    # ? Case III - Video starts to record BEFORE telemetri data logging && Video stream ends BEFORE telemetri logging\n    # ? Case IV  - Video starts to record BEFORE telemetri data logging && Video stream ends AFTER telemetrfrom datetime import datetimei logging\n\n    #boolean: if 1 then telemetri starts recording after video\n    TELEMETRI_STARTS_RECORDING_LAST = 0\n\n\n    # 'nr desci seconds in both datasets before telemetri data is sampled\n    clockStartTimeTelemetri = findClockStartTimeOfTelemetri(telemetriData)\n    telemetriStartDesciSecond = int(clockStartTimeTelemetri*10)\n    print('TELEMETRISTART in desciseconds:',clockStartTimeTelemetri)\n\n    nrCentiSecondVideoRoundedUp = 0\n    \n    # ' Extract clock start and end time\n    DataWhenStart = fhSimData.loc[fhSimData['LocalTime'].str.contains(videoStartTimeLocal, case=False)]\n    no_startVideoData = DataWhenStart.empty\n\n    if (no_startVideoData):\n        # ? Handles case III and IV: Video starts to record BEFORE telemetri data logging, .....\n        # ? : if video start before data recording, use Telemetri clockStartTime\n\n        clockStartTime = clockStartTimeTelemetri \n        TELEMETRI_STARTS_RECORDING_LAST = 1\n        clockStartTimeVideo = 0 \n    else:\n        # ? Handles case I and II: Video starts to record AFTER telemetri data logging, .....\n        clockStartTimeVideo = DataWhenStart.iloc[0,0]\n\n        if (clockStartTimeVideo<=clockStartTimeTelemetri):\n            clockStartTime = clockStartTimeTelemetri\n            TELEMETRI_STARTS_RECORDING_LAST = 1\n        else:\n            nrCentiSecondVideoRoundedUp = countCentisecond(clockStartTimeVideo) \n            clockStartTime = round_decimals_up(clockStartTimeVideo, 1)\n\n    #Testing if the end time actually exists in the dataset:\n    expectedEndTime = clockStartTimeVideo + videoLength\n    \n    endTimeExists = fhSimData['Timestep'] == expectedEndTime\n    endTimeData = fhSimData[endTimeExists]\n\n    # ' This part handles case 1 and 2\n    no_endData = endTimeData.empty\n    #case if telemetri data is not as long as video - Meaning not getting expected output\n    if (no_endData):\n        # ? Case II and IV: ...., Video stream ends AFTER telemetri logging\n        clockEndTimeTelemetri = fhSimData.iloc[-1:,0]\n        clockEndTimeTelemetri = clockEndTimeTelemetri.iloc[0]\n\n        clockEndTimeVideo = clockEndTimeTelemetri \n\n        #Rounding down to fit with telemetri dataset\n        clockEndTimeVideo = round_decimals_down(clockEndTimeVideo,1)\n        print('Using last recorded end time: ',clockEndTimeVideo)\n\n    else:\n        # ? Case I and III: ...., Video stream ends BEFORE telemetri logging\n        clockEndTimeVideo = expectedEndTime\n        #Rounding down to fit with telemetri data \n        clockEndTimeVideo = round_decimals_down(clockEndTimeVideo,1) \n        print('Using expected end time: ', clockEndTimeVideo)\n    \n    clockEndTime = clockEndTimeVideo\n    \n    return clockStartTime, clockEndTime, nrCentiSecondVideoRoundedUp, TELEMETRI_STARTS_RECORDING_LAST \n\n\ndef findClockStartTimeOfTelemetri(telemetriData):\n    Time                        = np.array([pd.to_numeric(telemetriData['Time']).to_numpy()])\n\n    USBLAvail                   = np.array([pd.to_numeric(telemetriData['USBL.17']).to_numpy()])\n    print(USBLAvail.shape)\n    DVLAvail                    = np.array([pd.to_numeric(telemetriData['DVL.14']).to_numpy()])\n    print(DVLAvail.shape)\n    AbsMeas                     = (telemetriData[['ROV.8','ROV.9','ROV.10','ROV.11']].apply(pd.to_numeric, errors='coerce', axis=1)).to_numpy()\n\n    for index, row in telemetriData.iterrows():\n        # if all telemetri data of interrest is available at a certain clocktime\n        AbsMeasIdx = list(AbsMeas[index,:])\n\n        if(USBLAvail[0,index]!=0 and DVLAvail[0,index]!=0 and AbsMeasIdx[0]!=0 and AbsMeasIdx[1]!=0 and AbsMeasIdx[2]!=0 and AbsMeasIdx[3]!=0):\n            clockStartTimeTelemetri = Time[0,index]\n            break\n\n    return clockStartTimeTelemetri\n\ndef findStartFrameOfVideo(FhSimData,clockStartTime,videoStartTimeLocal,nrCentiSecondVideoRoundedUp,TELEMETRI_STARTS_RECORDING_LAST,frameRate):\n    if TELEMETRI_STARTS_RECORDING_LAST:\n        telemetriStartTimeLocal, telemetriStartTimeCentiseconds = findTelemetriStartTimeLocal(clockStartTime,FhSimData)\n\n        # calculate time difference [s] between video and telemetri starting recording\n        timeDiff = calculateTimeDifference(videoStartTimeLocal,telemetriStartTimeLocal)+(telemetriStartTimeCentiseconds)\n        print('timediff',timeDiff)\n\n        # calculate how many frames this time difference is\n        startFrame = math.ceil(frameRate*(timeDiff))\n\n    else:\n        startFrame = math.ceil(nrCentiSecondVideoRoundedUp*frameRate)\n    return startFrame\n\ndef findTelemetriStartTimeLocal(clockStartTime,FhSimData):\n    telemetriStartTimeLocal = FhSimData[FhSimData.Timestep==clockStartTime].iloc[0,2]\n\n    DataContainingTelemetriLocalTime = FhSimData.loc[FhSimData['LocalTime']==telemetriStartTimeLocal]\n    firstRecordedClockTimeAtTelemetriLocalTime = DataContainingTelemetriLocalTime.iloc[0,0]\n    \n    #The time when telemetriLocalTime is recorded \n    telemetriStartTimeCentiseconds = clockStartTime - firstRecordedClockTimeAtTelemetriLocalTime \n    print('telemetriStartTimeCentiseconds',telemetriStartTimeCentiseconds)\n\n    return telemetriStartTimeLocal, telemetriStartTimeCentiseconds\n\ndef calculateTimeDifference(startTime,endTime):\n    endTimeSplit = endTime.split(\", \")\n    date_format = \"%H:%M:%S\"\n\n    \n    time_start = startTime \n    time_end = endTimeSplit[1]   \n\n    # Then get the difference here.    \n    diff = datetime.strptime(time_end, date_format) - datetime.strptime(time_start, date_format)\n\n    return diff.seconds\n\ndef countCentisecond(x):\n    x_round_down = round_decimals_down(x,1)\n    x_centiSeconds = x- x_round_down\n    if(x_centiSeconds<5):\n        centisecond = (10-x_centiSeconds)/100\n    else:\n        centisecond = x_centiSeconds/100\n    return centisecond\n\n","repo_name":"KyrreHaugland/Master-Thesis-Underwater-Pose-Graph-SLAM-in-Aquaculture","sub_path":"DataExtractionFunctions.py","file_name":"DataExtractionFunctions.py","file_ext":"py","file_size_in_byte":13710,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"73934067738","text":"\"\"\"\nGiven an array of integers where 1 ≤ a[i] ≤ n (n = size of array), some elements appear twice and others appear once.\n\nFind all the elements of [1, n] inclusive that do not appear in this array.\n\nCould you do it without extra space and in O(n) runtime? You may assume the returned list does not count as extra space.\n\nExample:\n\nInput:\n[4,3,2,7,8,2,3,1]\n\nOutput:\n[5,6]\n\nSOLUTION:\n\nIterate over the array.\nFor every index i, mark arr[arr[i]-1] as -ve.\nAt the end, again iterate through the array; all the indices where arr[i] is +ve are the missing numbers.\nFor example:\nIn case of [4,3,2,7,8,2,3,1], after first round it will become:\n[-4, -3, -2, -7, 8, 2, -3, -1]\nSince arr[4] and arr[5] are still +ve, means that numbers 5 and 6 were absent.\n\"\"\"\n\n\ndef find_all_missing(arr):\n    arr = arr\n    if not arr:\n        return []\n    for i in range(0, len(arr)):\n        val = abs(arr[i])\n        arr[val - 1] = -abs(arr[val - 1])\n    ans = []\n    print(arr)\n    for i in range(0, len(arr)):\n        if arr[i] > 0:\n            ans.append(i + 1)\n    return ans\n\n\ndef main():\n    arr = [4, 3, 2, 7, 8, 2, 3, 1]\n    ans = find_all_missing(arr)\n    print(ans)\n\n\nmain()\n","repo_name":"Anmol-Singh-Jaggi/interview-notes","sub_path":"notes/algo-ds-practice/problems/array/find_all_missing.py","file_name":"find_all_missing.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"69"}
+{"seq_id":"72554526940","text":"import math\nfrom KalmanNet_data import DataGen, DecimateData, Decimate_and_perturbate_Data\nimport KalmanNet_sysmdl\nimport threading\nimport torch\n\nimport os\n\nfrom filing_paths import path_model\n\nimport sys\nsys.path.insert(1, path_model)\n\nfrom parameters import Q_design, R_design, Q_mod, R_mod, T_test, J, J_mod, delta_t_gen, ratio\nfrom parameters import m1x_0_design, m2x_0_design, m1x_0_mod, m2x_0_mod, T, lambda_q, lambda_r\nfrom parameters import m1x_0_design_test, m1x_0_mod_test, T_mod, T_test_mod, roll_deg, delta_t_mod, m, n\nfrom model import f_gen, f, h, h_rotated, h_nonlinear\n\n# obs model can be either identity, rotated or nonlinear\n# noise is 1/r^2 [dB]\ndef load_data(obs_model, noise, process_noise=None, discrete=False, randomize_init_conditions=True):\n\n    # defining paths to save data\n    if process_noise is None:\n        pn_string = \"none\"\n    else:\n        pn_string = str(process_noise)\n\n    if discrete and randomize_init_conditions:\n        path_base = f\"Simulations/Lorenz_Atractor/data/discrete_randomized/q_{pn_string}/\"\n    elif discrete and (not randomize_init_conditions):\n        path_base = f\"Simulations/Lorenz_Atractor/data/discrete_non_randomized/q_{pn_string}/\"\n    elif (not discrete) and randomize_init_conditions:\n        path_base = f\"Simulations/Lorenz_Atractor/data/continuous_randomized/q_{pn_string}/\"\n    elif (not discrete) and (not randomize_init_conditions):\n        path_base = f\"Simulations/Lorenz_Atractor/data/continuous_non_randomized/q_{pn_string}/\"\n\n    os.makedirs(path_base, exist_ok=True)\n\n    IC_path = path_base + \"IC.pt\"\n    obs_path = path_base + f\"obs_{obs_model}_{str(noise)}.pt\"\n    \n    if discrete:\n        GT_path = path_base + \"GT.pt\"\n    else:\n        GT_test_path = path_base + \"GT_test.pt\"\n        GT_test_long_path = path_base + \"GT_test_long.pt\"\n        GT_test_short_path = path_base + \"GT_test_short.pt\"\n        GT_CV_path = path_base + \"GT_CV.pt\"\n        GT_train_path = path_base + \"GT_train.pt\"\n        GT_undecimated_path = path_base + \"GT_undecimated.pt\"\n\n\n    # defining lengths and numbers of trajectores\n    # T * N needs to be an integer multiple of T_trajectory\n    T_trajectory = int(T*ratio)\n    #T_trajectory = 1000\n    #T = int(T_trajectory / ratio)\n\n    T_train = 100\n    N_train = 360\n\n    T_CV = T_train\n    N_CV = 30\n\n    T_test_short = 30\n    T_test = 100\n    T_test_long = 3000\n\n    N_test_short = 100\n    N_test = 30\n    N_test_long = 1\n\n    N_trajectories = math.ceil((T_train * N_train + T_CV * N_CV + \n            T_test_short * N_test_short + T_test * N_test + \n            T_test_long * N_test_long) / T_trajectory)\n\n    # function to generate trajectories in the discrete case\n    def gen_discrete(N, T):\n        if process_noise is None:\n            Q = Q_design\n        else:\n            Q = (10 ** (process_noise / 10)) * torch.eye(m)\n        GT = torch.empty(N, m, T)\n        sys_model = KalmanNet_sysmdl.SystemModel(f, Q, h, R_design, T)\n        IC = torch.empty(N, m)\n        for k in range(N):\n            if randomize_init_conditions:\n                m1x_0 = 5 * (2 * torch.rand(m1x_0_design.size()) - torch.ones(m1x_0_design.size()))\n            else:\n                m1x_0 = m1x_0_design\n                \n            IC[k, :] = torch.squeeze(m1x_0)\n            sys_model.InitSequence(m1x_0, m2x_0_design)\n            _,data = DataGen(sys_model, only_1_sequence=True)\n            GT[k, :, :] = data\n        return [GT, IC]\n\n    if discrete:\n        try:\n            [GT_test, GT_test_long, GT_test_short, GT_train, GT_CV, IC_test, IC_test_long, IC_test_short, IC_train, IC_CV] = torch.load(GT_path)\n        except:\n            print(\"no GT found. generating new one\")\n            [GT_test, IC_test] = gen_discrete(N_test, T_test)\n            [GT_test_long, IC_test_long] = gen_discrete(N_test_long, T_test_long)\n            [GT_test_short, IC_test_short] = gen_discrete(N_test_short, T_test_short)\n            [GT_CV, IC_CV] = gen_discrete(N_CV, T_CV)\n            [GT_train, IC_train] = gen_discrete(N_train, T_train)\n            GT_data = [GT_test, GT_test_long, GT_test_short, GT_train, GT_CV, IC_test, IC_test_long, IC_test_short, IC_train, IC_CV]\n            torch.save(GT_data, GT_path)\n    else:\n        try:\n            GT_test = torch.load(GT_test_path)\n            GT_test_long = torch.load(GT_test_long_path)\n            GT_test_short = torch.load(GT_test_short_path)\n            GT_CV = torch.load(GT_CV_path)\n            GT_train = torch.load(GT_train_path)\n            [IC_test, IC_test_long, IC_test_short, IC_train, IC_CV] = torch.load(IC_path)\n        except:\n            try: \n                [GT_test_undecimated, GT_test_long_undecimated, GT_test_short_undecimated, GT_train_undecimated, GT_CV_undecimated] = torch.load(GT_undecimated_path)\n                [IC_test, IC_test_long, IC_test_short, IC_train, IC_CV] = torch.load(IC_path)\n            except:\n                print(\"no GT found. generating new one\")\n                GT_CV_undecimated = torch.empty(N_CV, m, int(T_CV/ratio))\n                GT_train_undecimated = torch.empty(N_train, m, int(T_train/ratio))\n                GT_test_undecimated = torch.empty(N_test, m, int(T_test/ratio))\n                GT_test_long_undecimated = torch.empty(N_test_long, m, int(T_test_long/ratio))\n                GT_test_short_undecimated = torch.empty(N_test_short, m, int(T_test_short/ratio))\n\n                # generating initial conditions\n                def get_IC(N, randomize):\n                    IC = torch.empty(N, m)\n                    for k in range(N):\n                        if randomize:\n                            m1x_0 = 5 * (2 * torch.rand(m1x_0_design.size()) - torch.ones(m1x_0_design.size()))\n                        else:\n                            m1x_0 = m1x_0_design\n                        IC[k, :] = torch.squeeze(m1x_0)\n                    return IC\n\n                IC_CV = get_IC(N_CV, randomize_init_conditions)\n                IC_train = get_IC(N_train, randomize_init_conditions)\n                IC_test = get_IC(N_test, randomize_init_conditions)\n                IC_test_long = get_IC(N_test_long, False)\n                IC_test_short = get_IC(N_test_short, randomize_init_conditions)\n\n                if process_noise is None:\n                    Q = Q_design\n                else:\n                    Q = ratio * (10 ** (process_noise / 10)) * torch.eye(m)\n\n                # initializeing all the system models\n                sys_model_CV = KalmanNet_sysmdl.SystemModel(f_gen, Q, h, R_design, int(T_CV/ratio))\n                sys_model_train = KalmanNet_sysmdl.SystemModel(f_gen, Q, h, R_design, int(T_train/ratio))\n                sys_model_test = KalmanNet_sysmdl.SystemModel(f_gen, Q, h, R_design, int(T_test/ratio))\n                sys_model_test_long = KalmanNet_sysmdl.SystemModel(f_gen, Q, h, R_design, int(T_test_long/ratio))\n                sys_model_test_short = KalmanNet_sysmdl.SystemModel(f_gen, Q, h, R_design, int(T_test_short/ratio))\n\n                threads = 4*[None]\n\n                #starting threads for the ground truth trajectories\n                threads[0] = threading.Thread(target=gen_decimated, args=(0, N_test_long, 1, sys_model_test_long, IC_test_long, GT_test_long_undecimated,))\n                threads[1] = threading.Thread(target=gen_decimated, args=(1, N_test_short, 1, sys_model_test_short, IC_test_short, GT_test_short_undecimated,))\n                threads[2] = threading.Thread(target=gen_decimated, args=(2, N_test, 1, sys_model_test, IC_test, GT_test_undecimated,))\n                threads[3] = threading.Thread(target=gen_decimated, args=(3, N_CV, 1, sys_model_CV, IC_CV, GT_CV_undecimated,))\n\n                for t, thread in enumerate(threads):\n                    print(f\"starting thread {t}\")\n                    thread.start()\n                for t in threads:\n                    thread.join()\n\n                GT_data_undecimated = [GT_test_undecimated, GT_test_long_undecimated, GT_test_short_undecimated, GT_train_undecimated, GT_CV_undecimated, IC_test, IC_test_long, IC_test_short, IC_train, IC_CV]\n                torch.save(GT_data_undecimated, GT_undecimated_path)\n\n                for t in range(4):\n                    threads[t] = threading.Thread(target=gen_decimated, args=(t, N_train, 4, sys_model_train, IC_train, GT_train_undecimated,))\n\n                for thread in threads:\n                    thread.start()\n                for t in threads:\n                    thread.join()\n\n                GT_data_undecimated = [GT_test_undecimated, GT_test_long_undecimated, GT_test_short_undecimated, GT_train_undecimated, GT_CV_undecimated]\n                torch.save(GT_data_undecimated, GT_undecimated_path)\n                IC_data = [IC_test, IC_test_long, IC_test_short, IC_train, IC_CV]\n                torch.save(IC_data, IC_path)\n\n            # decimating the long trajectories\n            def decimate(GT):\n                dec = GT[:, :, 0::int(1/ratio)]\n                return dec\n\n            GT_test = decimate(GT_test_undecimated)\n            GT_test_long = decimate(GT_test_long_undecimated)\n            GT_test_short = decimate(GT_test_short_undecimated)\n            GT_CV = decimate(GT_CV_undecimated)\n            GT_train = decimate(GT_train_undecimated)\n\n            torch.save(GT_test, GT_test_path)\n            torch.save(GT_test_long, GT_test_long_path)\n            torch.save(GT_test_short, GT_test_short_path)\n            torch.save(GT_CV, GT_CV_path)\n            torch.save(GT_train, GT_train_path)\n\n\n\n    # generating noisy observations\n    def gen_obs(N, T, GT):\n        # choosing appropriate observation model\n        if obs_model == \"identity\":\n            obs_noisefree = GT\n        elif obs_model == \"rotated\":\n            h_gen = h_rotated\n        elif obs_model == \"nonlinear\":\n            h_gen = h_nonlinear\n        else:\n            print(f\"{obs_model} is not a valid argument for the observation model.\\nPlease choose either identity, rotated or nonlinear\")\n        \n        if obs_model == \"rotated\" or obs_model == \"nonlinear\":\n            obs_noisefree = torch.empty(N_trajectories, n, T_trajectory)\n            for k in range(N):\n                for t in range(T):\n                    obs_noisefree[k, :, t] = torch.squeeze(h_gen(GT[k, :, t]))\n\n        r = 10 ** (-noise / 10)\n        obs = obs_noisefree + r * torch.randn_like(obs_noisefree)\n        return obs\n     \n    try:\n        [obs_test, obs_test_long, obs_test_short, obs_train, obs_CV] = torch.load(obs_path)\n    except:\n        obs_test = gen_obs(N_test, T_test, GT_test)\n        obs_test_long = gen_obs(N_test_long, T_test_long, GT_test_long)\n        obs_test_short = gen_obs(N_test_short, T_test_short, GT_test_short)\n        obs_train = gen_obs(N_train, T_train, GT_train)\n        obs_CV = gen_obs(N_CV, T_CV, GT_CV)\n        obs_data = [obs_test, obs_test_long, obs_test_short, obs_train, obs_CV]\n        torch.save(obs_data, obs_path)\n\n    return [GT_test, obs_test, IC_test,\n            GT_test_long, obs_test_long, IC_test_long,\n            GT_test_short, obs_test_short, IC_test_short,\n            GT_train, obs_train, IC_train,\n            GT_CV, obs_CV, IC_CV]\n\n# function for generating trajectories with multiple threads\ndef gen_decimated(t, N_trajectories, N_threads, sys_model, IC, GT):\n    if N_threads > 1:\n        N_gen = round(math.floor(N_trajectories/N_threads))\n        if N_trajectories % N_threads < t:\n            N_gen += 1\n    else:\n        N_gen = N_trajectories\n    for k in range(N_gen):\n        if N_threads > 1:\n            idx = t + N_threads * k\n        else:\n            idx = k\n        if idx < N_trajectories:\n            print(f\"starting index {idx}!\")\n            m1x_0 = torch.reshape(IC[idx, :], m1x_0_design.shape)\n            sys_model.InitSequence(m1x_0, m2x_0_design)\n            _,data = DataGen(sys_model, only_1_sequence=True)\n            GT[idx, :, :] = data[:, :, :]\n            print(f\"index {idx} done!\")\n\n\n# old data generator for generating chopped trajectories\ndef load_data_old(obs_model, noise, process_noise=None, discrete=False, randomize_init_conditions=True):\n\n    if randomize_init_conditions == True:\n        if not discrete:\n            if process_noise is None:\n                GT_path = \"data/GT.pt\"\n                GT_undecimated_path = \"data/GT_undecimated.pt\"\n                obs_path = \"data/\" + obs_model + \"/\" + obs_model + str(noise) + \".pt\"\n                obs_noisefree_path = \"data/\" + obs_model + \"/\" + obs_model + \"_noisefree.pt\"  \n            else:\n                GT_path = f\"data/GT_{str(process_noise)}dB.pt\"\n                GT_undecimated_path = f\"data/GT_undecimated_{str(process_noise)}dB.pt\"\n                obs_path = \"data/\" + obs_model + \"/\" + obs_model + str(noise) + \"_\" + str(process_noise) + \".pt\"\n                obs_noisefree_path = \"data/\" + obs_model + \"/\" + obs_model + \"_noisefree_\" + str(process_noise) + \".pt\"\n        else:\n            if process_noise is None:\n                GT_path = \"data/GT_discrete.pt\"\n                GT_undecimated_path = \"data/GT_undecimated_discrete.pt\"\n                obs_path = \"data/\" + obs_model + \"/\" + obs_model + str(noise) + \"_discrete.pt\"\n                obs_noisefree_path = \"data/\" + obs_model + \"/\" + obs_model + \"_noisefree_discrete.pt\"  \n            else:\n                GT_path = f\"data/GT_{str(process_noise)}dB_discrete.pt\"\n                GT_undecimated_path = f\"data/GT_undecimated_{str(process_noise)}dB_discrete.pt\"\n                obs_path = \"data/\" + obs_model + \"/\" + obs_model + str(noise) + \"_\" + str(process_noise) + \"_discrete.pt\"\n                obs_noisefree_path = \"data/\" + obs_model + \"/\" + obs_model + \"_noisefree_\" + str(process_noise) + \"_discrete.pt\"\n    else:\n        if not discrete:\n            if process_noise is None:\n                GT_path = \"data/unrandomized/GT.pt\"\n                GT_undecimated_path = \"data/unrandomized/GT_undecimated.pt\"\n                obs_path = \"data/unrandomized/\" + obs_model + \"/\" + obs_model + str(noise) + \".pt\"\n                obs_noisefree_path = \"data/unrandomized/\" + obs_model + \"/\" + obs_model + \"_noisefree.pt\"  \n            else:\n                GT_path = f\"data/unrandomized/GT_{str(process_noise)}dB.pt\"\n                GT_undecimated_path = f\"data/unrandomized/GT_undecimated_{str(process_noise)}dB.pt\"\n                obs_path = \"data/unrandomized/\" + obs_model + \"/\" + obs_model + str(noise) + \"_\" + str(process_noise) + \".pt\"\n                obs_noisefree_path = \"data/\" + obs_model + \"/\" + obs_model + \"_noisefree_\" + str(process_noise) + \".pt\"\n        else:\n            if process_noise is None:\n                GT_path = \"data/unrandomized/GT_discrete.pt\"\n                GT_undecimated_path = \"data/unrandomized/GT_undecimated_discrete.pt\"\n                obs_path = \"data/unrandomized/\" + obs_model + \"/\" + obs_model + str(noise) + \"_discrete.pt\"\n                obs_noisefree_path = \"data/\" + obs_model + \"/\" + obs_model + \"_noisefree_discrete.pt\"  \n            else:\n                GT_path = f\"data/unrandomized/dt_02/GT_{str(process_noise)}dB_discrete.pt\"\n                GT_undecimated_path = f\"data/unrandomized/dt_02/GT_undecimated_{str(process_noise)}dB_discrete.pt\"\n                obs_path = \"data/unrandomized/dt_02/\" + obs_model + \"/\" + obs_model + str(noise) + \"_\" + str(process_noise) + \"_discrete.pt\"\n                obs_noisefree_path = \"data/unrandomized/dt_02/\" + obs_model + \"/\" + obs_model + \"_noisefree_\" + str(process_noise) + \"_discrete.pt\"\n\n    if not discrete:\n        # load or generate ground truth\n        try:\n            GT = torch.load(GT_path)\n        except:\n            print(\"No existing ground truth file found. Starting data generation\")\n            if process_noise is None:\n                Q = Q_design\n            else:\n                Q = ratio * (10 ** (process_noise / 10)) * torch.eye(m)\n\n            if randomize_init_conditions is False and process_noise is None:\n                GT = torch.empty(1, m, T_trajectory)\n                GT_undecimated = torch.empty(1, m, T)\n                N_threads = 1\n\n            else:\n                GT = torch.empty(N_trajectories, m, T_trajectory)\n                GT_undecimated = torch.empty(N_trajectories, m, T)\n                N_threads = 4\n\n            threads = N_threads * [None]\n\n            for t in range(N_threads):\n                threads[t] = threading.Thread(target=generate_trajectory, args=(t, N_trajectories, N_threads, GT, GT_undecimated, Q, randomize_init_conditions,))\n                threads[t].start()\n            for thread in threads:\n                thread.join()\n            if not randomize_init_conditions:\n                GT = GT.repeat(N_trajectories, 1, 1)\n            torch.save(GT, GT_path)\n            torch.save(GT_undecimated, GT_undecimated_path)\n            print(\"done!\")\n    else:\n        try:\n            GT = torch.load(GT_path)\n        except:\n            print(\"No existing ground truth file found. Starting data generation for discrete case\")\n            if process_noise is None:\n                Q = Q_design\n            else:\n                Q = (10 ** (process_noise / 10)) * torch.eye(m)\n            GT = torch.empty(N_trajectories, m, T_trajectory)\n            sys_model = KalmanNet_sysmdl.SystemModel(f, Q, h, R_design, T_trajectory)\n            for k in range(N_trajectories):\n                print(f\"generating trajectory no {k}\")\n                if randomize_init_conditions:\n                    m1x = 2 * torch.rand(m1x_0_design.size()) - torch.ones(m1x_0_design.size())\n                    sys_model.InitSequence(m1x, m2x_0_design)\n                else:\n                    sys_model.InitSequence(m1x_0_design, m2x_0_design)\n                _, data = DataGen(sys_model, only_1_sequence=True)\n                GT[k, :, :] = data\n            torch.save(GT, GT_path)\n    \n\n    # choosing appropriate observation model\n    if obs_model == \"identity\":\n        h_gen = h\n    elif obs_model == \"rotated\":\n        h_gen = h_rotated\n    elif obs_model == \"nonlinear\":\n        h_gen = h_nonlinear\n    else:\n        print(f\"{obs_model} is not a valid argument for the observation model.\\nPlease choose either identity, rotated or nonlinear\")\n    \n    # load noise free observations, generate from scratch if not available\n    try:\n        obs_noisefree = torch.load(obs_noisefree_path)\n    except:\n        print(\"No noise free observation found. Generating new observation\")\n        obs_noisefree = torch.empty(N_trajectories, n, T_trajectory)\n        for k in range(N_trajectories):\n            for t in range(T_trajectory):\n                obs_noisefree[k, :, t] = torch.squeeze(h_gen(GT[k, :, t]))\n        torch.save(obs_noisefree, obs_noisefree_path)\n    \n\n    # load noisy version of observation, generate noise if not available\n    try:\n        obs = torch.load(obs_path)\n    except:\n        print(\"No noisy observation found. Generating new observation\")\n        r = 10 ** (-noise / 10)\n        obs = obs_noisefree + r * torch.randn_like(obs_noisefree)\n        torch.save(obs, obs_path)\n\n\n    # cut trajectories into length T\n    def shorten_trajectories(data, T):\n        data = torch.split(data, T, dim=2)\n        return torch.cat(data, dim=0)\n\n\n    # split data into training, validation and test sets\n    def split(data):\n        sizes = [int(N_train * T_train / T_trajectory),\n                int(N_CV * T_CV / T_trajectory),\n                int(N_test_short * T_test_short / T_trajectory),\n                int(N_test * T_test / T_trajectory),\n                int(N_test_long * T_test_long / T_trajectory)]\n        train, CV, test_short, test, test_long = torch.split(data, sizes, dim=0)\n        train = shorten_trajectories(train, T_train)\n        CV = shorten_trajectories(CV, T_CV)\n        test_short = shorten_trajectories(test_short, T_test_short)\n        test = shorten_trajectories(test, T_test)\n        test_long = shorten_trajectories(test_long, T_test_long)\n        return [train, CV, test_short, test, test_long]\n\n    [train_target, CV_target, test_target_short, test_target, test_target_long] = split(GT)\n    [train_input, CV_input, test_input_short, test_input, test_input_long] = split(obs)\n\n    return [train_target, train_input, CV_target, CV_input, \n            test_target_short, test_input_short, test_target, test_input,\n            test_target_long, test_input_long]\n\n\n                  \ndef generate_trajectory(t, N_trajectories, N_threads, GT, GT_undecimated, Q, randomize_init_conditions):\n    sys_model = KalmanNet_sysmdl.SystemModel(f_gen, Q, h, R_design, T)\n    N = round(math.floor(N_trajectories/N_threads))\n    if N_trajectories % N_threads < t:\n        N += 1\n    for i in range(N):\n        idx = t + math.ceil(N_trajectories/N_threads) * i\n        if idx < N_trajectories:\n            print(f\"starting thread {t} generating trajectory with index {idx}\")\n            # the trajectories are initialized uniformly at random with x_0 in [-1, 1]^m \n            if randomize_init_conditions:\n               m1x = 2 * torch.rand(m1x_0_design.size()) - torch.ones(m1x_0_design.size())\n               sys_model.InitSequence(m1x, m2x_0_design)\n            else:\n                sys_model.InitSequence(m1x_0_design, m2x_0_design)\n            _, data = DataGen(sys_model, only_1_sequence=True)\n            GT[idx, :, :] = data[:, : ,0::int(1/ratio)]\n            GT_undecimated[idx, :, :] = data[:, :, :]\n            print(f\"index {idx} done!\")\n\n\n\n\n\n","repo_name":"KalmanNet/ERRCOV_ICASSP22","sub_path":"src/DataGenerator.py","file_name":"DataGenerator.py","file_ext":"py","file_size_in_byte":21411,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"69"}
+{"seq_id":"4327480105","text":"# -*- coding: utf-8 -*-\n\"\"\"\nRabbit MQ Client\n================\nModified: 2021-10\n\"\"\"\n\nfrom pika import BlockingConnection, ConnectionParameters, PlainCredentials\nimport logging\n\nfrom monitor.logs.formatter import pformat\nfrom monitor.amqp.conf import AMQPConf\n\n\nclass AMQPClient:\n    def __init__(self, host: str, port: int) -> None:\n        self._logger = logging.getLogger(__name__)\n        self._logger.debug(\"Creating rabbitmq connection to host: %s port: %s\", host, port)\n        credentials = PlainCredentials(\n            username=\"microservice\",\n            password=\"microservice\",\n            erase_on_connect=True\n        )\n        connection = BlockingConnection(\n            ConnectionParameters(\n                host=host,\n                port=port,\n                virtual_host='/',\n                credentials=credentials\n            )\n        )\n        self.channel = connection.channel()\n        # self.channel.queue_declare(queue='telemetry')\n        self.channel.basic_consume(\n            queue=AMQPConf.Routes.TELEMETRY,\n            auto_ack=True,\n            on_message_callback=self.callback\n        )\n        self._logger.info(\"Instantiation successful.\")\n\n    def callback(self, ch, method, properties, body):\n        self._logger.info(\"ch: %s:%s | method: %s:%s | properties: %s:%s | body: %s:%s\",\n                          ch, type(ch), method, type(method), properties, type(properties), body, type(body))\n        self._logger.info(\"Message payload: %s\", pformat(body))\n","repo_name":"Incuvers/iris","sub_path":"monitor/amqp/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"4868197059","text":"import firebase\nimport time \nurl = 'https://sizzling-torch-109.firebaseio.com/'\ntoken = '4tWC7ZSixm6Xp0HNVzyEWg3urMtxKlTnDLUwZXUq'\nfirebase = firebase.FirebaseApplication(url, token)\n\n#firebase.put('/','py_lab',{'Work_Station_1': {'status': 'Occupied','timer':0}, 'Work_Station_2': {'status': 'Unoccupied','timer':0}, 'Work_Station_3': {'status': 'Occupied','timer':0}})\n    \ndef timeformatter(seconds):\n    h=seconds//3600.0\n    m=(seconds%3600)//60\n    s= (seconds%3600)%60\n    rethms = (\"%1.0fh%1.0fm%1.0fs\"%(h,m,s))\n    retms = (\"%1.0fm%1.0fs\"%(m,s))\n    rets = (\"%1.0fs\"%s)\n    if h ==0 and m ==0: return rets\n    elif h == 0 and m!=0: return retms\n    else: return rethms\n\n\nclass StopWatch:     \n    def __init__(self,endTime=None,startTime = None,currentTime = time.time(),status = False):\n        self.endTime = endTime\n        self.startTime = startTime\n        self.currentTime = currentTime\n        self.status = status\n    def start(self):\n        self.startTime = time.time()\n        self.status = True\n    def stop(self):\n        self.endTime = time.time()\n        self.status = False\n    def reset(self):\n        self.endTime = None\n        self.startTime = None\n        self.status = False    \n    def getStartTime(self): return self.startTime\n    def getEndTime(self): return self.endTime\n    def getElapsedTime(self):\n        if self.endTime == None and self.startTime == None: return 0\n        if self.endTime == None and self.startTime != None: return float(round((time.time() - self.startTime),0))\n        else: return float(round((self.endTime - self.startTime),0))   \nsw1 = StopWatch()\nsw2 = StopWatch()\nsw3 = StopWatch()\nwatchlist = [sw1,sw2,sw3]\n\ndef changestatus(workstationnumber,value):\n    labstatus = firebase.get('/py_lab')\n    if labstatus['Work_Station_%s'%workstationnumber]['status'] == value: return firebase.put('/','py_lab/Work_Station_%s'%workstationnumber,labstatus['Work_Station_%s'%workstationnumber])\n    else:\n        firebase.put('/','py_lab/Work_Station_%s/status'%workstationnumber,'Checking Vacancy\\nTimer: ')\n        s = watchlist[workstationnumber-1] #workstation1 is sw1, workstation2 is sw2, etc\n        if s.status == True: pass\n        if s.status == False: s.start()\n        while s.getElapsedTime() <= 60.0:\n            firebase.put('/','py_lab/Work_Station_%s/timer'%workstationnumber,str(timeformatter(float(s.getElapsedTime()))))\n            if s.getElapsedTime() > 60.0:\n                s.stop()\n                s.reset()\n                firebase.put('/','py_lab/Work_Station_%s',{'status':value, 'timer':0})\n                break\n        pass\n\n    ","repo_name":"imny94/pyLab","sub_path":"timermodule.py","file_name":"timermodule.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"19977655320","text":"from django.shortcuts import render, render_to_response, get_object_or_404, get_list_or_404\nfrom django.http import JsonResponse, HttpResponseNotAllowed, HttpResponseBadRequest\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse\nfrom django.views.decorators.http import require_http_methods\n\nfrom .models import Chat\nfrom django.template import RequestContext\nimport datetime\nfrom django import forms\nfrom dialog.models import Member\nfrom message.models import Message\nfrom user_profile.models import User\nfrom .forms import MessageForm, MembersForm, NewChatForm, ChooseChat, NewChat\n\n\n# from messenger.user_profile.models import User\n\n@csrf_exempt\ndef chat_list(request, pk=None):\n    if pk is None:\n        return JsonResponse({'msg': 'enter chat_id'})\n    if request.method == 'GET':\n        return JsonResponse({'chat_id': pk, 'members': 2})\n    # return JsonResponse({'test': 'Wrong method {}'.format(request.method)}, status=405)\n    return HttpResponseNotAllowed(permitted_methods=['GET'], status=405)\n\n\n@csrf_exempt\n@require_http_methods(['GET', 'POST'])\ndef create_chat(request):\n    if request.method == 'POST':\n        chat_form = NewChatForm(request.POST)\n        member_form = MembersForm(request.POST)\n        if chat_form.is_valid() and member_form.is_valid():\n            current_chat = chat_form.save()\n            # full = member_form.save(commit=False)\n            # full.chat = current_chat\n            # full.save()\n            Member.objects.create(user=current_chat.creator, chat=current_chat)  # JsonResponse обязательно\n            return JsonResponse({'smth': 'OK'})\n    else:\n        chat_form = NewChatForm()\n        member_form = MembersForm()\n    return render_to_response('new_chat.html', {'chat_form': chat_form, 'member_form': member_form}, )\n\n\n@csrf_exempt\ndef list_chats(request, pk=None):\n    \"\"\"\n    Поиск чатов по нику. Пример http://127.0.0.1:8000/chats/user_chats/frostics/\n    :param request:\n    :param pk:\n    :return:\n    \"\"\"\n    if request.method == 'GET':\n        print(pk)\n        if pk is None:\n            # return JsonResponse({'message': 'specify user id'}) #error 400\n            return HttpResponseBadRequest('Specify user!')\n        # user = User.objects.all().filter(username=pk)[0] #try  except; можно проверить .first; get_objects_or_404\n        user = get_object_or_404(User.objects.filter(username=pk))\n        membership = Member.objects.all().filter(user=user.id)\n        chats = list()\n        for member in membership:\n            chats.append(member.chat)\n        response = dict()\n        for chat in chats:\n            response['chat {}'.format(chat.id)] = {\n                'user': user.first_name + ' ' + user.last_name,\n                'is_group_chat': chat.is_group_chat,\n                'topic': chat.topic or 'absent',\n                'creator': chat.creator.first_name + ' ' + chat.creator.last_name,\n            }\n        return JsonResponse(response, json_dumps_params={'ensure_ascii': False})\n    return HttpResponseNotAllowed(permitted_methods=['GET'], status=405)\n\n\n@csrf_exempt\ndef messages_list(request, pk=None):\n    \"\"\"\n    Список сообщений пользователя. Пример: http://127.0.0.1:8000/chats/messages/3\n    :param request:\n    :param pk: id пользователя\n    :return:\n    \"\"\"\n    if request.method == 'GET':\n        if pk is None:\n            return HttpResponseBadRequest('enter chat id')\n        current_chat = get_object_or_404(Chat, id=pk)\n        chat_messages = Message.objects.filter(chat=current_chat) # related\n        response = dict()\n        for message in chat_messages:\n            response['message {}'.format(message.id)] = {\n                'user': message.user.to_json(),\n                'chat': message.chat.to_json(),\n                'content': message.content,\n                'added_at': message.added_at\n            }\n        return JsonResponse(response)\n    return HttpResponseNotAllowed(permitted_methods=['GET'], status=405)\n\n\n@csrf_exempt\ndef send_message(request):\n    \"\"\"\n    Отправка сообщения\n    :param request:\n    :return:\n    \"\"\"\n    if request.method == 'POST':\n        message_form = MessageForm(request.POST)\n        # print(message_form)\n        if message_form.is_valid():\n            message = message_form.save()\n            Chat.objects.all().filter(id=message.chat_id).update(last_message=message.content)\n            # print('Before')\n            # print(str(message.user) + ' ' + str(message.chat) + ' ' + str(message.content))\n            # Message.objects.create(user=message.user, chat=message.chat, content=message.content)\n            # print('After')\n            # Chat.objects.all().filter(id=message.chat)[0].update(last_message=message.content)\n            # Message.objects.select_related().filter(chat=message.chat).update(last_message=message.content)\n\n    else:\n        message_form = MessageForm()\n    return render_to_response('send_message.html', {'message_form': message_form})\n\n\n# def read_message(request, pk1=None, pk2=None):\n#     if request.method == 'GET':\n#         chat = Chat.objects.all().filter(id=pk1)\n#         print(chat[0])\n#         if chat.exists():\n#             user = User.objects.all().filter(username=pk2)\n#             print(user[0])\n#             if user.exists():\n#                 member = Member.objects.all().filter(user=user[0]) and Member.objects.all().filter(chat=chat[0])\n#                 # member = Member.objects.all().filter(chat=chat[0])\n#                 print(member[0])\n#                 if member.exists():\n#                     last_message = Message.objects.all().filter(user=user[0]) and Message.objects.all().filter(chat=chat[0])\n#                     # last_message = Message.objects.all().filter(chat=chat[0])\n#                     if last_message.exists():\n#                          Member.objects.all().filter(user=user[0]) and Member.objects.all().filter(chat=chat[0]).update(last_read_message=last_message[0])\n#                         # Member.objects.all().filter(chat=chat[0]).update(last_read_message=last_message[0])\n#     return JsonResponse({'dzfse': 'zzzzzfsef'})\n\n\ndef read_message(request, pk1=None, pk2=None):\n    \"\"\"\n    Читаем все сообщения по заходу в чат. Пример: http://127.0.0.1:8000/chats/read/3/frostics\n    :param request:\n    :param pk1: чат id\n    :param pk2: ник\n    :return:\n    \"\"\"\n    if request.method == 'GET':\n        chat = get_object_or_404(Chat, id=pk1)\n        user = get_object_or_404(User, username=pk2)\n        member = Member.objects.filter(user=user, chat=chat)\n        # member = list(set(get_list_or_404(Member.objects.filter(user=user))) & set(get_list_or_404(\n        #     Member.objects.filter(chat=chat))))[0]\n        last_message = Message.objects.filter(user=user).first()\n        member.update(last_read_message=last_message)\n        return JsonResponse({})\n    else:\n        return HttpResponseNotAllowed(permitted_methods=['GET'])\n","repo_name":"ArtemCoolAc/2019-2-Atom-Backend-A-Kutuzov","sub_path":"messenger/chats/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"17824189264","text":"import isuelogit as isl\nimport pandas as pd\nimport tensorflow as tf\nfrom typing import Dict, List, Tuple\nfrom nesuelogit.models import NESUELOGIT\nfrom nesuelogit.metrics import zscore, nmse\n\nfrom isuelogit.printer import block_output\n\ndef simulate_features(n_days, time_variation = False, **kwargs) -> pd.DataFrame:\n    \"\"\"\n    \n    :param n_days: \n    :param time_variation: when True, the values of the exogenous features change between different timepoints \n    :param kwargs: \n    :return: \n    \"\"\"\n\n    linkdata_generator = isl.factory.LinkDataGenerator()\n\n    df_list = []\n\n    for i in range(1, n_days + 1):\n\n        if i == 1 or time_variation:\n            df_day = linkdata_generator.simulate_features(**kwargs)\n            df_day.insert(0, 'timepoint', i)\n        else:\n            df_day = df_day.assign(timepoint = i)\n\n        df_list.append(df_day)\n\n    df = pd.concat(df_list)\n\n    return df\n\n\ndef simulate_nesuelogit_data(model: NESUELOGIT,\n                             X,\n                             optimizer,\n                             threshold_relative_gap = 1e-5,\n                             max_epochs = 100,\n                             loss_metric = None,\n                             batch_size = None,\n                             # coverage = 1,\n                             sd_x: float = 0,\n                             sd_t: float = 0):\n\n    \"\"\"\n\n    :param model:\n    :param X: The last column has the period id. The remaining columns correspond to the exogenous features\n    :param learning_rate:\n    :param threshold_relative_gap:\n    :param max_epochs:\n    :param coverage:\n    :param sd_x:  the standard deviation relative to the true mean of traffic counts\n    :param sd_t:\n    :return:\n    \"\"\"\n    if loss_metric is None:\n        loss_metric = zscore\n\n    model.compute_equilibrium(tf.cast(X, dtype = model.dtype),\n                              loss_metric= loss_metric,\n                              optimizer= optimizer,\n                              batch_size=batch_size,\n                              loss_weights={'equilibrium': 1},\n                              threshold_relative_gap= threshold_relative_gap,\n                              # epochs_print_interval= _EPOCHS_PRINT_INTERVAL,\n                              epochs=max_epochs)\n\n    for var in optimizer.variables():\n        var.assign(tf.zeros_like(var))\n\n    # This should end in a single epoch as the relative threshold is the same than for computing equlibrium\n    with block_output(show_stdout=False, show_stderr=False):\n        Y_pred = model.predict(tf.cast(X, dtype = model.dtype),\n                               period_dict={k: v for k, v in model.period_dict.items()},\n                               pretrain_link_flows=False,\n                               loss_metric= loss_metric,\n                               optimizer= optimizer,\n                               batch_size=batch_size,\n                               loss_weights={'equilibrium': 1},\n                               threshold_relative_gap=threshold_relative_gap,\n                               epochs=1)\n\n    traveltimes, link_flows = tf.unstack(Y_pred, axis = -1)\n\n    linkdata_generator = isl.factory.LinkDataGenerator()\n\n    noisy_flow = linkdata_generator.add_error_counts(original_counts=link_flows.numpy(), sd_x=sd_x)\n    noisy_traveltime = linkdata_generator.add_error_counts(original_counts=traveltimes.numpy(), sd_x=sd_t)\n\n    return tf.stack([noisy_traveltime,noisy_flow], axis = 2)\n\n# def simulate_suelogit_data(days: List,\n#                            features_data: pd.DataFrame,\n#                            network: TransportationNetwork,\n#                            equilibrator: Equilibrator,\n#                            coverage = 1,\n#                            sd_x: float = 0,\n#                            sd_t: float = 0,\n#                            daytoday_variation = False,\n#                            **kwargs):\n#     linkdata_generator = isl.factory.LinkDataGenerator()\n#\n#     df_list = []\n#\n#     for i, period in enumerate(days):\n#         printIterationBar(i + 1, len(days), prefix='days:', length=20)\n#\n#         # linkdata_generator.simulate_features(**kwargs)\n#         df_period = features_data[features_data.period == period]\n#\n#         network.load_features_data(linkdata=df_period)\n#\n#         if i == 0 or daytoday_variation:\n#\n#             with block_output(show_stdout=False, show_stderr=False):\n#                 counts, _ = linkdata_generator.simulate_counts(\n#                     network=network,\n#                     equilibrator=equilibrator, #{'mu_x': 0, 'sd_x': 0},\n#                     coverage=1)\n#\n            # masked_counts, _ = linkdata_generator.mask_counts_by_coverage(\n            #     original_counts=np.array(list(counts.values()))[:, np.newaxis], coverage=coverage)\n#\n#         counts_day_true = np.array(list(counts.values()))[:, np.newaxis]\n#         counts_day_noisy = linkdata_generator.add_error_counts(\n#             original_counts=masked_counts, sd_x=sd_x)\n#\n#         df_period['counts'] = counts_day_noisy\n#         df_period['true_counts'] = counts_day_true\n#\n#         # Generate true travel times from true counts\n#         network.load_traffic_counts(counts=dict(zip(counts.keys(),counts_day_true.flatten())))\n#         df_period['true_traveltime'] =  [link.true_traveltime for link in network.links]\n#\n#         # Put nan in links where no traffic count data is available\n#         df_period['traveltime'] = [link.true_traveltime if ~np.isnan(count) else float('nan')\n#                                         for link,count in zip(network.links, masked_counts)]\n#\n#         df_period['traveltime'] = linkdata_generator.add_error_counts(\n#             original_counts=np.array(df_period['traveltime'])[:, np.newaxis], sd_x=sd_t)\n#\n#         df_list.append(df_period)\n#\n#     df = pd.concat(df_list)\n#\n#     # df.groupby('link_key').agg('mean')\n#\n#     return df","repo_name":"pabloguarda/nesuelogit","sub_path":"src/nesuelogit/experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":5934,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"26132499057","text":"import argparse\n\nfrom actions.TfidfLearningAction import TfidfLearningAction\nfrom actions.FrequencyAction import FrequencyAction\nfrom actions.TfidfClassificationAction import TfidfClassificationAction\nfrom actions.TfidfAction import TfidfAction\nfrom actions.PreprocessAction import PreprocessAction\n\ncommands = dict(frequency=FrequencyAction,\n                # preprocess=PreprocessAction,\n                tfidf=TfidfAction,\n                tfidfLearning=TfidfLearningAction,\n                tfidfClassification=TfidfClassificationAction)\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser(description='DDI NLP program')\n    parser.add_argument('command', action=\"store\", help='command name')\n    parser.add_argument('-log', '-l', action=\"store\", help='turn on log', default=False)\n    parser.add_argument('-input', action=\"store\", help='input file')\n    parser.add_argument('-output', action=\"store\", help='output file')\n    args = parser.parse_args()\n    command = commands[args.command](args.input, args.output)\n    command.make()\n    input(\"Press Enter to continue...\")\n\n\n# sequency for tf-idf:\n# don't need it: frequency -input \"data\\DDICorpus\\Train\\DrugBank\" -output \"data\\frequencies\"\n# 1. tfidf -input \"data\\DDICorpus\\Train\\DrugBank\" -output \"data\\tfidf\\tfidf_results.xml\"\n# 2. tfidfLearning -input \"data\\tfidf\\tfidf_results.xml\" -output \"\"\n# 3. tfidfClassification -input \"data\\tfidf\\tfidf_results.xml\" -output \"\"\n\n\n#\tпараметры debug configuration для разных задач:\n#\n#\tпосчитать значения tfIdf\n#\t-l -c tfidf -output data/tfIdfResults.xml\n#   tfidf -output data/tfIdfResults.xml\n#\n#\tобучение по tfIdf\n#\t-l -c tfidfLearning -input data/tfIdfResults.xml\n#\n#\tклассификация по tfIdf\n#\t-l -c tfidfClassification -input data/tfIdfResults.xml\n\n","repo_name":"mpMelnikov/ddi","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"3996833381","text":"# https://atcoder.jp/contests/past201912/tasks/past201912_d\r\n\r\nN = int(input())\r\nA = [int(input()) for _ in range(N)]\r\n\r\ncnt = [0] * N\r\nfor i in range(N):\r\n    cnt[A[i]-1] += 1\r\n\r\nx, y = 0, 0\r\nfor i in range(N):\r\n    if cnt[i] == 0:\r\n        x = i+1\r\n    elif cnt[i] == 2:\r\n        y = i+1\r\n\r\nif x == 0:\r\n    print(\"Correct\")\r\nelse:\r\n    print(y, x)\r\n","repo_name":"Hironobu-Kawaguchi/atcoder","sub_path":"atcoder/past201912_d.py","file_name":"past201912_d.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"37293048186","text":"from django import forms #django的forms 表單 \nfrom .models import Photo #使用到models的Photo\n\n\n#建立表單---驗證的地方\nclass UploadModelForm(forms.ModelForm): #準備上傳\n\n\tclass Meta:\n\t\tmodel= Photo\n\t\tfields = ('image',)\n\t\twidgets = {'image':forms.FileInput(attrs={'class':'form-control-file'})\n\t\t}\n\t\t\t\n\t\t\t\n","repo_name":"royfancy/web_test","sub_path":"photos/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"74536794138","text":"import bisect\nimport sys\n\n\ndef get_max_budget(budget_request: list, country_budget: int):\n    if sum(budget_request) <= country_budget:\n        return budget_request[-1]\n\n    lo = 1\n    hi = budget_request[-1]\n    max_budget = 0\n\n    while lo <= hi:\n        mid = (lo + hi) // 2\n        new_budget = get_new_budget(budget_request, mid)\n\n        if new_budget == country_budget:\n            return mid\n        elif new_budget < country_budget:\n            max_budget = mid\n            lo = mid + 1\n        else:\n            hi = mid - 1\n\n    return max_budget\n\n\ndef get_new_budget(budget: list, limit: int):\n    idx = bisect.bisect_left(budget, limit)\n    return (len(budget) - idx) * limit + sum(budget[:idx])\n\n\nif __name__ == \"__main__\":\n    read = sys.stdin.readline\n    n = int(read())\n    input_budget_request = sorted((map(int, read().split())))\n    input_country_budget = int(read())\n    print(get_max_budget(input_budget_request, input_country_budget))\n","repo_name":"hoduulmu/ps","sub_path":"파이썬_알고리즘_인터뷰/Part_18_이진탐색/baekjoon2512_예산/my_code.py","file_name":"my_code.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"41068812757","text":"import datetime\nimport numpy as np\nimport os\nimport pandas as pd\nimport random\nimport common\nimport scoring\nfrom dataclasses import dataclass\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\npd.options.mode.chained_assignment = None \n\npd.set_option('display.max_colwidth',200)\nrecipe_folder_path = 'recipes'\nnutrition_spreadsheet_path = \"Release 2 - Nutrient file.xlsx\"\n\nrename_column_dict = { \n    'Available carbohydrate, without sugar alcohols (g)': 'carbs',\n    # 'Energy with dietary fibre, equated (kJ)': 'kilojoules'\n    }\n\n\n@dataclass\nclass Recipe:\n    ingredients: pd.DataFrame\n    score: int = 0\n\n\ndef create_recipe_folder():\n    if not os.path.exists(recipe_folder_path):\n        os.makedirs(recipe_folder_path)\n\ndef get_nutrition_data():\n    df = pd.read_excel(nutrition_spreadsheet_path, sheet_name=1)\n    df.columns = [x.replace(\"\\n\", \"\") for x in df.columns.to_list()]\n    return df\n\n\ndef filter_ingredient_list(df, include_ingredients, exclude_ingredients):\n    df = df[df['Food Name'].str.contains(\"|\".join(include_ingredients), case=False)]\n    df = df[df[\"Food Name\"].str.contains(\"|\".join(exclude_ingredients), case=False)==False]\n    return df\n\n\ndef multiply_columns(column, weights):\n    return column * weights\n\n\n\ndef generate_random_recipes(list_of_ingredients):\n    print(\"Generating Recipes...\")\n    recipe_generation_attempts = config['recipe_generation_attempts']\n    for i in range(recipe_generation_attempts):\n        number_of_ingredients_min = config['number_of_ingredients'][\"min\"]\n        number_of_ingredients_max = config['number_of_ingredients'][\"max\"]\n        number_of_ingredients = random.randint(number_of_ingredients_min, number_of_ingredients_max)\n        ingredients_df = list_of_ingredients.sample(n = number_of_ingredients)\n        ingredients_df.insert(1,'Amount (g)', 0)\n        ingredients_df.insert(2,'Calories', 0)\n        ingredients_df['Amount (g)'] = ingredients_df['Amount (g)'].apply(lambda x: np.random.randint(config['ingredient_max_grams']))\n        ingredients_df[config['included_micronutrients']] = ingredients_df[config['included_micronutrients']].apply(lambda x: multiply_columns(x, (ingredients_df['Amount (g)'] / 100) ))\n        ingredients_df['Calories'] = round(ingredients_df['Energy with dietary fibre, equated (kJ)'] / 4.184, 0)\n        ingredients_df = ingredients_df.append(ingredients_df.sum(numeric_only=True), ignore_index=True)\n        ingredients_df.index += 1 \n        recipe_score = scoring.score_recipe(ingredients_df)\n\n\ndef rename_columns(df):\n    renamed_columns = df.rename(\n        columns={\n            'Available carbohydrate, without sugar alcohols (g)': 'carbs' \n            }, inplace=True)\n    return renamed_columns\n\nif __name__ == \"__main__\":\n    print(\"Starting Random Recipe Generator!\")\n    config = common.config()\n    create_recipe_folder()\n    ingredient_list = get_nutrition_data()\n    print(f\"Number of rows in Nutrition Database : {len(ingredient_list.index)}\")\n    filtered_ingredient_list = filter_ingredient_list(ingredient_list, config['include_ingredients'], config['exclude_ingredients'])\n    print(f\"Number of rows after filtering : {len(filtered_ingredient_list.index)}\")\n    print(f\"Minimum Recipe Score : {config['minimum_recipe_score']}\")\n    print(f\"Number of Recipe Generations : {config['recipe_generation_attempts']}\")\n    filtered_ingredient_list.rename(columns=rename_column_dict, inplace=True)\n    filtered_ingredient_list = filtered_ingredient_list[[\"Food Name\"] + config['included_micronutrients']]\n    generate_random_recipes(filtered_ingredient_list)","repo_name":"alexlaverty/random-recipe-generator","sub_path":"recipe-generator.py","file_name":"recipe-generator.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"72169605340","text":"#!/usr/bin/env python\n\"\"\"A class fo ensuring existance of nailgun Product entities\n\"\"\"\nimport nailgun.entities\n\nfrom type_handler import type_handler\nfrom nailgun_hacks import (\n    satellite_get_response,\n    satellite_json_to_entity,\n)\n\nfrom entity_ensurer import EntityEnsurer\nfrom org_context_entity_ensurer import OrgContextEntityEnsurer\n\n\n# The code in the class below is mostly ment as a workaround for:\n# - https://bugzilla.redhat.com/show_bug.cgi?id=1256645\n# - https://bugzilla.redhat.com/show_bug.cgi?id=1256717\n# But the workaround does not work yet, therefor the ensurer cannot be\n# properly used with non-custom products (E.g. the 'subscription' propery\n# should never be specified for an ensured product)\n@type_handler(fortype=nailgun.entities.Product, incls=EntityEnsurer)\nclass ProductEnsurer(OrgContextEntityEnsurer):\n    \"\"\"Ensurer class for Product entities\n\n    Product entities can either be custom products that always belong to\n    organizations and can be handled just like any other entity that requires\n    organization_id as search context, or products that are provided by\n    subscriptions which can not be searched or created.\n\n    The way this ensurer behaves is that is subscription is provided as a\n    product propery, the product will simply be searched by name on the given\n    subscription and never created.\n    \"\"\"\n    def ensure(self, entity_cls, **attrs):\n        \"\"\"Verify that a product with the given properties can be found under\n        the specified subscription or exists as a custom product with the given\n        attributes if subscription unspecified\n\n        :param type entity_cls: The (nailgun) class of the Satellte entity to\n                                be managed\n\n        :returns: A pointer to the entity that was created that can be assigned\n                  to other entities` link attributes (Please do no assume that\n                  is an entity object - this is subject to change)\n        \"\"\"\n        if 'subscription' in attrs:\n            return self.ensure_in_context(self, **attrs)\n        else:\n            return super(type(self), self).ensure(entity_cls, **attrs)\n\n    def ensure_in_context(self, subscription, name):\n        \"\"\"Ensure that a Product with the given name exists in the given\n        subscription\n\n        :param str name: The product name\n        :param nailgun.entities.Subscription subscription: The subscription for\n                                                           the product\n        :rturns: An entity representing the product (an exception is raised\n                 if no matching product is found)\n        :rtype: nailgun.entities.Product\n        \"\"\"\n        try:\n            self._products_in_subscriptions\n        except AttributeError:\n            self._products_in_subscriptions = {}\n        try:\n            return self._products_in_subscriptions.setdefault(\n                subscription.id,\n                self._get_products_in_subscription(subscription)\n            )[name]\n        except KeyError:\n            raise KeyError(\n                'Product in: {} with name: {} not found'\n                .format(self.format_entity(subscription, name))\n            )\n\n    def _get_products_in_subscription(self, subscription):\n        \"\"\"Returns a dictionariy mapping product names to Product entities for\n        the products in the given subscription\n\n        :param nailgun.entities.Subscription subscription: The subscription\n        :rtype: dict\n        \"\"\"\n        path = 'katello/api/v2/subscriptions/{}'.format(subscription.id)\n        subscription_json = satellite_get_response(path)\n        name_dict = dict(\n            (\n                prod_json['name'],\n                satellite_json_to_entity(prod_json, nailgun.entities.Product)\n            )\n            for prod_json in subscription_json['provided_products']\n        )\n        return name_dict\n","repo_name":"ifireball/python-satellite-dsl","sub_path":"satellite_dsl/product_ensurer.py","file_name":"product_ensurer.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"769446754","text":"import math\nclass Category:\n    def __init__(self, name):\n        self.ledger = []\n        self.name = name\n\n    def __str__(self):\n        text_list = []\n        text = \"\"\n        for i in self.ledger:\n            if len(i[\"description\"]) < 23:    \n                text_list.append(i[\"description\"].ljust(23, \" \") + (\"%.2f\" % i[\"amount\"]).rjust(7, \" \"))\n            else:    \n                text_list.append(i[\"description\"][:23] + (\"%.2f\" % i[\"amount\"]).rjust(7, \" \"))\n        text += self.name.center(30, '*')\n        for i in text_list:\n            text += f\"\\n{i}\"\n        text += f\"\\nTotal: {self.get_balance()}\"\n        return text\n    \n    def deposit(self, amt, des=\"\"):\n        self.ledger.append({\"amount\": amt, \"description\": des})\n    \n    def withdraw(self, amt, des=\"\"):\n        if self.check_funds(amt):\n            self.ledger.append({\"amount\": -amt, \"description\": des})\n            return True\n        else:\n            return False\n\n    def get_balance(self):\n        bal = 0\n        for i in self.ledger:\n            if float(i[\"amount\"]) > 0:\n                bal += float(i[\"amount\"])\n            elif float(i[\"amount\"]) < 0:\n                bal += float(i[\"amount\"])\n        return bal\n    \n    def transfer(self, amt, name):\n        if self.check_funds(amt):\n            self.ledger.append({\"amount\": -amt, \"description\": f'Transfer to {name.name}'})\n            name.ledger.append({\"amount\": amt, \"description\": f'Transfer from {self.name}'})\n            return True\n        else:\n            return False\n    \n    def check_funds(self, amt):\n        bal = self.get_balance()\n        if amt > bal: \n            return False\n        else:\n            return True\n\n\n\ndef create_spend_chart(categories):\n  s = \"Percentage spent by category\\n\"\n\n  total = 0\n  cats = {}\n  for cat in categories:\n    cat_total = 0\n    for item in cat.ledger:\n      amount = item[\"amount\"]\n      if amount < 0:\n        total += abs(amount)\n        cat_total += abs(amount)\n\n    cats[cat.name] = cat_total\n\n  cats = {\n    k: (v / total) * 100\n    for k, v in cats.items()\n  }\n\n  dash_width = len(cats) * 3 + 1\n  spaces = dash_width - 1\n  for n in range(100, -1, -10):\n    s += f\"{n:>3}| \"\n    bar_row = []\n    for val in cats.values():\n      row_val = [' '] * 3\n      if val >= n:\n        row_val[0] = \"o\"\n      bar_row += row_val\n    s += f\"{''.join(bar_row)}{' ' * (spaces - len(bar_row))}\\n\"\n    \n  s += f\"{' ' * 4}{'-' * dash_width}\\n\"\n\n  cat_names = [list(name) for name in cats]\n  while any(cat_names):\n    s += f\"{' ' * 4}\"\n    for name in cat_names:\n      s += f\" {' ' if not name else name.pop(0)} \"\n    s += \" \\n\"\n  # Need to add strip to remove the newline character for last line and then add back the spaces. If anyone has a better solution, let me know :)\n  s = s.strip() + '  '\n\n  # print(s)\n  return s\n","repo_name":"Meheer17/Budget-App-Py","sub_path":"budget.py","file_name":"budget.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"11271914220","text":"import setuptools\n# from setuptools import setup, find_packages\n\nVERSION = '0.0.4'\nDESCRIPTION = 'MP3 tag information reader and writer.'\nLONG_DESCRIPTION = 'MP3 contains many information about the music and stored in the tag. This package support read and write id3v2 tag to the mp3 file.'\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n    LONG_DESCRIPTION = fh.read()\n\nsetuptools.setup(\n    name='mp3tag',\n    version=VERSION,\n    author='Wang Shiqiang',\n    author_email='cocowool@qq.com',\n    description=DESCRIPTION,\n    long_description=LONG_DESCRIPTION,\n    long_description_content_type=\"text/markdown\",\n    url=\"https://github.com/cocowool/mp3tag\",\n    project_urls={\n        \"Bug Tracker\":\"https://github.com/cocowool/mp3tag/issues\"\n    },\n    package_dir={\"\":\"src\"},\n    packages=setuptools.find_packages(where=\"src\"),\n    install_requires=[],\n\n    keywords=['mp3','id3','id3v2'],\n    classifiers=[],\n    python_requires=\">=3.6\"\n)","repo_name":"cocowool/mp3tag","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"16588311859","text":"def BBlanckspace(L):\n    l = []\n    s = 0\n    for i in L:\n        if i == 0:\n            s += 1\n        elif i == 1:\n            l.append(s)\n            s = 0\n    if s > 0:\n        l.append(s)\n        s = 0\n    if len(l) == 0:\n        return 0\n    else:\n        return max(l)\n\nt = int(input())\nfor i in range(t):\n    n = int(input())\n    L = list(map(int, input().split()))\n    print(BBlanckspace(L))","repo_name":"RottenDoom/Competitive-Prog-Codes","sub_path":"BBlankspace.py","file_name":"BBlankspace.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"37628259723","text":"from .remote import Remote\nfrom .agent import Agent, AgentType, AgentState\nfrom .sensor import GpsData\nfrom .geometry import Vector, Transform, Spawn, Quaternion\nfrom .utils import accepts, ObjectState\nfrom .controllable import Controllable\n\nfrom enum import Enum\nfrom collections import namedtuple\nfrom environs import Env\nfrom datetime import datetime\nimport re\n\nRaycastHit = namedtuple(\"RaycastHit\", \"distance point normal\")\n\nWeatherState = namedtuple(\"WeatherState\", \"rain fog wetness cloudiness damage\")\nWeatherState.__new__.__defaults__ = (0,) * len(WeatherState._fields)\n\nenv = Env()\n\nfrom loguru import logger\n\nclass Simulator:\n\n    class SimulatorCameraState(Enum):\n        FREE = 0\n        FOLLOW = 1\n        CINEMATIC = 2\n        DRIVER = 3\n\n    @accepts(str, int)\n    def __init__(self, address=env.str(\"LGSVL__SIMULATOR_HOST\", \"localhost\"), port=env.int(\"LGSVL__SIMULATOR_PORT\", 8181)):\n        if port <= 0 or port > 65535:\n            raise ValueError(\"port value is out of range\")\n        self.remote = Remote(address, port)\n        self.agents = {}\n        self.callbacks = {}\n        self.stopped = False\n        self.stop_flag = False\n\n    def close(self):\n        self.remote.close()\n\n    @accepts(str, int)\n    def load(self, scene, seed=None):\n        self.remote.command(\"simulator/load_scene\", {\"scene\": scene, \"seed\": seed})\n        self.agents.clear()\n        self.callbacks.clear()\n\n    @property\n    def version(self):\n        return self.remote.command(\"simulator/version\")\n\n    @property\n    def layers(self):\n        return self.remote.command(\"simulator/layers/get\")\n\n    @property\n    def current_scene(self):\n        return self.remote.command(\"simulator/current_scene\")\n\n    @property\n    def current_scene_id(self):\n        return self.remote.command(\"simulator/current_scene_id\")\n\n    @property\n    def current_frame(self):\n        return self.remote.command(\"simulator/current_frame\")\n\n    @property\n    def current_time(self):\n        return self.remote.command(\"simulator/current_time\")\n\n    @property\n    def available_agents(self):\n        return self.remote.command(\"simulator/available_agents\")\n\n    @property\n    def available_npc_behaviours(self):\n        return self.remote.command(\"simulator/npc/available_behaviours\")\n\n    @accepts(Transform)\n    def set_sim_camera(self, transform):\n        self.remote.command(\"simulator/camera/set\", {\"transform\": transform.to_json()})\n\n    @accepts(SimulatorCameraState)\n    def set_sim_camera_state(self, state):\n        self.remote.command(\"simulator/camera/state/set\", {\"state\": state.value})\n\n    def agents_traversed_waypoints(self, fn):\n        self._add_callback(None, \"agents_traversed_waypoints\", fn)\n\n    def reset(self):\n        self.remote.command(\"simulator/reset\")\n        self.agents.clear()\n        self.callbacks.clear()\n\n    def stop(self):\n        self.stopped = True\n\n    @accepts((int, float), (int, float))\n    def run(self, time_limit=0.0, time_scale=None):\n        self._process(\"simulator/run\", {\"time_limit\": time_limit, \"time_scale\": time_scale})\n\n    @accepts((int, float), (int, float))\n    def run_custom(self, time_limit=0.0, time_scale=None):\n        self.stop_flag = False\n        logger.info('[PythonAPI] simulator.run_custom')\n        cmd = \"simulator/run\"\n        args = {\"time_limit\": time_limit, \"time_scale\": time_scale}\n        j = self.remote.command_run(cmd, args)\n\n    def check_status(self):\n        j = self.remote.status_monitor()\n        if j is None:\n            return False\n        if \"events\" in j:\n            stop = self._process_events(j[\"events\"])\n            logger.warning('events: ' + str(j[\"events\"]))\n            logger.warning('stop: ' + str(stop))\n            return stop\n        return False\n\n    def _add_callback(self, agent, name, fn):\n        if agent not in self.callbacks:\n            self.callbacks[agent] = {}\n        if name not in self.callbacks[agent]:\n            self.callbacks[agent][name] = set()\n        self.callbacks[agent][name].add(fn)\n\n    def _process_events(self, events):\n        self.stopped = False\n        stop = False\n        for ev in events:\n            if \"agent\" in ev:\n                agent = self.agents[ev[\"agent\"]]\n                if agent in self.callbacks:\n                    callbacks = self.callbacks[agent]\n                    event_type = ev[\"type\"]\n                    if event_type in callbacks:\n                        for fn in callbacks[event_type]:\n                            if event_type == \"collision\":\n                                stop = True\n                                fn(agent, self.agents.get(ev[\"other\"]), Vector.from_json(ev[\"contact\"]) if ev[\"contact\"] is not None else None)\n                            elif event_type == \"waypoint_reached\":\n                                fn(agent, ev[\"index\"])\n                            elif event_type == \"stop_line\":\n                                fn(agent)\n                            elif event_type == \"lane_change\":\n                                fn(agent)\n                            elif event_type == \"destination_reached\":\n                                stop = True\n                                fn(agent)\n                            elif event_type == \"custom\":\n                                fn(agent, ev[\"kind\"], ev[\"context\"])\n                            if self.stopped:\n                                return\n            elif None in self.callbacks:\n                callbacks = self.callbacks[None]\n                event_type = ev[\"type\"]\n                if event_type in callbacks:\n                    for fn in callbacks[event_type]:\n                        if event_type == \"agents_traversed_waypoints\":\n                            fn()\n        return stop\n\n    def _process(self, cmd, args):\n        # logger.warning('[PythonAPI] _process: ' + str(cmd) + ' ' + str(args))\n        j = self.remote.command(cmd, args)\n        while True:\n            # logger.error('[PythonAPI] j value: ' + str(j))\n            if j is None:\n                return\n            if \"events\" in j:\n                _ = self._process_events(j[\"events\"])\n                if self.stopped:\n                    break\n            j = self.remote.command(\"simulator/continue\")\n            # logger.error('_process continue j: ' + str(j))\n\n    @accepts(str, AgentType, (AgentState, type(None)), (Vector, type(None)))\n    def add_agent(self, name, agent_type, state=None, color=None):\n        if state is None: state = AgentState()\n        if color is None: color = Vector(-1, -1, -1)\n        args = {\"name\": name, \"type\": agent_type.value, \"state\": state.to_json(), \"color\": color.to_json()}\n        uid = self.remote.command(\"simulator/add_agent\", args)\n        agent = Agent.create(self, uid, agent_type)\n        agent.name = name\n        self.agents[uid] = agent\n        return agent\n\n    @accepts(Agent)\n    def remove_agent(self, agent):\n        self.remote.command(\"simulator/agent/remove\", {\"uid\": agent.uid})\n        del self.agents[agent.uid]\n        if agent in self.callbacks:\n            del self.callbacks[agent]\n\n    @accepts(AgentType)\n    def add_random_agents(self, agent_type):\n        args = {\"type\": agent_type.value}\n        self.remote.command(\"simulator/add_random_agents\", args)\n\n    def get_agents(self):\n        return list(self.agents.values())\n\n    @property\n    def weather(self):\n        j = self.remote.command(\"environment/weather/get\")\n        return WeatherState(j.get(\"rain\", 0), j.get(\"fog\", 0), j.get(\"wetness\", 0), j.get(\"cloudiness\", 0), j.get(\"damage\", 0))\n\n    @weather.setter\n    @accepts(WeatherState)\n    def weather(self, state):\n        self.remote.command(\"environment/weather/set\", {\"rain\": state.rain, \"fog\": state.fog, \"wetness\": state.wetness, \"cloudiness\": state.cloudiness, \"damage\": state.damage})\n\n    @property\n    def time_of_day(self):\n        return self.remote.command(\"environment/time/get\")\n\n    @property\n    def current_datetime(self):\n        date_time_str = self.remote.command(\"simulator/datetime/get\")\n        date_time_arr = list(map(int, re.split('[. :]', date_time_str)))\n        date_time = datetime(\n            date_time_arr[2],\n            date_time_arr[1],\n            date_time_arr[0],\n            date_time_arr[3],\n            date_time_arr[4],\n            date_time_arr[5]\n        )\n        return date_time\n\n    @accepts((int, float), bool)\n    def set_time_of_day(self, time, fixed=True):\n        self.remote.command(\"environment/time/set\", {\"time\": time, \"fixed\": fixed})\n\n    @accepts(datetime, bool)\n    def set_date_time(self, date_time, fixed=True):\n        date_time = date_time.__str__()\n        self.remote.command(\"environment/datetime/set\", {\"datetime\": date_time, \"fixed\": fixed})\n\n    def get_spawn(self):\n        spawns = self.remote.command(\"map/spawn/get\")\n        return [Spawn.from_json(spawn) for spawn in spawns]\n\n    @accepts((Transform, Spawn))\n    def map_to_gps(self, transform):\n        j = self.remote.command(\"map/to_gps\", {\"transform\": transform.to_json()})\n        return GpsData(j[\"latitude\"], j[\"longitude\"], j[\"northing\"], j[\"easting\"], j[\"altitude\"], j[\"orientation\"])\n\n    def map_from_gps(self, latitude=None, longitude=None, northing=None, easting=None, altitude=None, orientation=None):\n        c = []\n        coord = {\n            \"latitude\": latitude,\n            \"longitude\": longitude,\n            \"northing\": northing,\n            \"easting\": easting,\n            \"altitude\": altitude,\n            \"orientation\": orientation\n        }\n        c.append(coord)\n        return self.map_from_gps_batch(c)[0]\n\n    def map_from_gps_batch(self, coords):\n        # coords dictionary\n        jarr = []\n\n        for c in coords:\n            j = {}\n            numtype = (int, float)\n            if (\"latitude\" in c and c[\"latitude\"] is not None) and (\"longitude\" in c and c[\"longitude\"] is not None):\n                if not isinstance(c[\"latitude\"], numtype): raise TypeError(\"Argument 'latitude' should have '{}' type\".format(numtype))\n                if not isinstance(c[\"longitude\"], numtype): raise TypeError(\"Argument 'longitude' should have '{}' type\".format(numtype))\n                if c[\"latitude\"] < -90 or c[\"latitude\"] > 90: raise ValueError(\"Latitude is out of range\")\n                if c[\"longitude\"] < -180 or c[\"longitude\"] > 180: raise ValueError(\"Longitude is out of range\")\n                j[\"latitude\"] = c[\"latitude\"]\n                j[\"longitude\"] = c[\"longitude\"]\n            elif (\"northing\" in c and c[\"northing\"] is not None) and (\"easting\" in c and c[\"easting\"] is not None):\n                if not isinstance(c[\"northing\"], numtype): raise TypeError(\"Argument 'northing' should have '{}' type\".format(numtype))\n                if not isinstance(c[\"easting\"], numtype): raise TypeError(\"Argument 'easting' should have '{}' type\".format(numtype))\n                if c[\"northing\"] < 0 or c[\"northing\"] > 10000000: raise ValueError(\"Northing is out of range\")\n                if c[\"easting\"] < 160000 or c[\"easting\"] > 834000: raise ValueError(\"Easting is out of range\")\n                j[\"northing\"] = c[\"northing\"]\n                j[\"easting\"] = c[\"easting\"]\n            else:\n                raise Exception(\"Either latitude and longitude or northing and easting should be specified\")\n            if \"altitude\" in c and c[\"altitude\"] is not None:\n                if not isinstance(c[\"altitude\"], numtype): raise TypeError(\"Argument 'altitude' should have '{}' type\".format(numtype))\n                j[\"altitude\"] = c[\"altitude\"]\n            if \"orientation\" in c and c[\"orientation\"] is not None:\n                if not isinstance(c[\"orientation\"], numtype): raise TypeError(\"Argument 'orientation' should have '{}' type\".format(numtype))\n                j[\"orientation\"] = c[\"orientation\"]\n            jarr.append(j)\n\n        jarr = self.remote.command(\"map/from_gps\", jarr)\n        transforms = []\n        for j in jarr:\n            transforms.append(Transform.from_json(j))\n        return transforms\n\n    @accepts(Vector)\n    def map_point_on_lane(self, point):\n        j = self.remote.command(\"map/point_on_lane\", {\"point\": point.to_json()})\n        return Transform.from_json(j)\n\n    @accepts(Vector, Quaternion)\n    def map_from_nav(self, position, orientation):\n        res = self.remote.command(\n            \"map/from_nav\",\n            {\n                \"position\": position.to_json(),\n                \"orientation\": orientation.to_json()\n            }\n        )\n        return Transform.from_json(res)\n\n    @accepts(Transform, Vector)\n    def set_nav_origin(self, transform, offset=Vector()):\n        self.remote.command(\n            \"navigation/set_origin\",\n            {\n                \"transform\": transform.to_json(),\n                \"offset\": offset.to_json(),\n            }\n        )\n\n    def get_nav_origin(self):\n        res = self.remote.command(\"navigation/get_origin\")\n        nav_origin = None\n        if res:\n            nav_origin = {\n                \"transform\": Transform.from_json(res),\n                \"offset\": res[\"offset\"]\n            }\n        return nav_origin\n\n    @accepts(Vector, Vector, int, float)\n    def raycast(self, origin, direction, layer_mask=-1, max_distance=float(\"inf\")):\n        hit = self.remote.command(\"simulator/raycast\", [{\n            \"origin\": origin.to_json(),\n            \"direction\": direction.to_json(),\n            \"layer_mask\": layer_mask,\n            \"max_distance\": max_distance\n        }])\n        if hit[0] is None:\n            return None\n        return RaycastHit(hit[0][\"distance\"], Vector.from_json(hit[0][\"point\"]), Vector.from_json(hit[0][\"normal\"]))\n\n    def raycast_batch(self, args):\n        jarr = []\n        for arg in args:\n            jarr.append({\n                \"origin\": arg[\"origin\"].to_json(),\n                \"direction\": arg[\"direction\"].to_json(),\n                \"layer_mask\": arg[\"layer_mask\"],\n                \"max_distance\": arg[\"max_distance\"]\n            })\n\n        hits = self.remote.command(\"simulator/raycast\", jarr)\n        results = []\n        for hit in hits:\n            if hit is None:\n                results.append(None)\n            else:\n                results.append(RaycastHit(hit[\"distance\"], Vector.from_json(hit[\"point\"]), Vector.from_json(hit[\"normal\"])))\n\n        return results\n\n    @accepts(str, (ObjectState, type(None)))\n    def controllable_add(self, name, object_state=None):\n        if object_state is None: object_state = ObjectState()\n        args = {\"name\": name, \"state\": object_state.to_json()}\n        j = self.remote.command(\"simulator/controllable_add\", args)\n        controllable = Controllable(self.remote, j)\n        controllable.name = name\n        return controllable\n\n    @accepts(Controllable)\n    def controllable_remove(self, controllable):\n        self.remote.command(\"simulator/controllable_remove\", {\"uid\": controllable.uid})\n        del self.controllables[controllable.uid]\n\n    @accepts(str)\n    def get_controllables(self, control_type=None):\n        j = self.remote.command(\"controllable/get/all\", {\n            \"type\": control_type,\n        })\n        return [Controllable(self.remote, controllable) for controllable in j]\n\n    @accepts(str)\n    def get_controllable_by_uid(self, uid):\n        j = self.remote.command(\"controllable/get\", {\n            \"uid\": uid,\n        })\n        return Controllable(self.remote, j)\n\n    @accepts(Vector, str)\n    def get_controllable(self, position, control_type=None):\n        j = self.remote.command(\"controllable/get\", {\n            \"position\": position.to_json(),\n            \"type\": control_type,\n        })\n        return Controllable(self.remote, j)\n","repo_name":"MingfeiCheng/BehAVExplor","sub_path":"PythonAPI-Apollo-async/lgsvl/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":15629,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"69"}
+{"seq_id":"42394132845","text":"from selenium.webdriver.common.by import By\n\nfrom bdd.controllers.base_controller import BaseController\n\n\nclass Frame(BaseController):\n\n    BASE_LOC = \".//frame\"\n\n    def __init__(self, driver, css_locator=None, xpath_locator=None, locator=None, parent=None):\n        super().__init__(driver, parent)\n        self._driver = driver\n\n        if not (css_locator or locator or xpath_locator):\n            raise ValueError(f\"Element label name or locator is missing in controller class {__name__}\")\n\n        if locator:\n            self._locator = locator\n        elif css_locator:\n            self._locator = (By.CSS_SELECTOR, css_locator)\n        elif xpath_locator:\n            self._locator = (By.XPATH, xpath_locator)\n\n    def switch_to_iframe(self):\n        try:\n            self.wait_till_element_is_not_displayed()\n            ele = self.get_element()\n            self._driver.switch_to_frame(ele)\n        except Exception as err:\n            raise Exception(f\"Error {err} at {__name__}\")\n\n    def switch_to_default(self):\n        try:\n            self._driver.switch_to_default()\n        except Exception as err:\n            raise Exception(f\"Error {err} at {__name__}\")\n","repo_name":"bhattacharjeedebashis/python_behave_framework","sub_path":"bdd/controllers/frame.py","file_name":"frame.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7996126369","text":"import pickle\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nclass NN(object):\n    def __init__(self,\n                 hidden_dims=(512, 256),\n                 datapath='cifar10.pkl',\n                 n_classes=10,\n                 epsilon=1e-6,\n                 lr=7e-4,\n                 batch_size=1000,\n                 seed=None,\n                 activation=\"relu\",\n                 ):\n\n        self.hidden_dims = hidden_dims\n        self.n_hidden = len(hidden_dims)\n        self.datapath = datapath\n        self.n_classes = n_classes\n        self.lr = lr\n        self.batch_size = batch_size\n        self.seed = seed\n        self.activation_str = activation\n        self.epsilon = epsilon\n\n        self.train_logs = {'train_accuracy': [], 'validation_accuracy': [], 'train_loss': [], 'validation_loss': []}\n\n        if datapath is not None:\n            u = pickle._Unpickler(open(datapath, 'rb'))\n            u.encoding = 'latin1'\n            self.train, self.valid, self.test = u.load()\n        else:\n            self.train, self.valid, self.test = None, None, None\n\n    def initialize_weights(self, dims): # dims is of size 2 containing the input dimension and the number of classes\n        if self.seed is not None:\n            np.random.seed(self.seed)\n\n        self.weights = {}\n        # self.weights is a dictionary with keys W1, b1, W2, b2, ..., Wm, Bm where m - 1 is the number of hidden layers\n        all_dims = [dims[0]] + list(self.hidden_dims) + [dims[1]]\n        for layer_n in range(1, self.n_hidden + 2):\n            low = -1.0/np.sqrt(all_dims[layer_n - 1])\n            high = 1.0/np.sqrt(all_dims[layer_n - 1])\n            self.weights[f\"W{layer_n}\"] = np.random.uniform(low, high, (all_dims[layer_n - 1], all_dims[layer_n]))\n            self.weights[f\"b{layer_n}\"] = np.zeros((1, all_dims[layer_n])) # no biases on input dimension\n\n    def relu(self, x, grad=False):\n        if grad:\n            return (self.relu(x) > 0).astype(int)\n        return np.maximum(0, x)\n\n# source for numerically stable sigmoid: https://timvieira.github.io/blog/post/2014/02/11/exp-normalize-trick/\n    def sigmoid(self, x, grad=False):\n        if grad:\n            return self.sigmoid(x) * (1 - self.sigmoid(x))\n        \"Numerically stable sigmoid function.\"\n        if x.all() >= 0:\n            z = np.exp(-x)\n            return 1 / (1 + z)\n        else:\n            # if x is less than zero then z will be small, denom can't be zero because it's 1+z.\n            z = np.exp(x)\n            return z / (1 + z)\n\n    def tanh(self, x, grad=False):\n        if grad:\n            return 1 - self.tanh(x) ** 2\n        return (np.exp(x) - np.exp(-x)) / (np.exp(x) + np.exp(-x))\n\n    def activation(self, x, grad=False):\n        if self.activation_str == \"relu\":\n            return self.relu(x, grad)\n        elif self.activation_str == \"sigmoid\":\n            return self.sigmoid(x, grad)\n        elif self.activation_str == \"tanh\":\n            return self.tanh(x, grad)\n        else:\n            raise Exception(\"invalid\")\n        return 0\n\n    def softmax(self, x):\n        if x.ndim > 1:\n            e_x = np.exp(x - np.max(x, keepdims=True))\n            return e_x / np.sum(e_x, axis=1, keepdims=True)\n        else:\n            e_x = np.exp(x - np.max(x))\n            return e_x / np.sum(e_x, axis=0)\n\n\n    def forward(self, x):\n        cache = {\"Z0\": x}\n        # cache is a dictionary with keys Z0, A0, ..., Zm, Am where m - 1 is the number of hidden layers\n        # Ai corresponds to the preactivation at layer i, Zi corresponds to the activation at layer i\n        num_layers = self.n_hidden + 2\n        Z = x\n        for layer_n in range(1, num_layers):  # iterating through number of layers\n            weights = self.weights[f\"W{layer_n}\"]\n            biases = self.weights[f\"b{layer_n}\"]\n            A = np.dot(Z, weights) + biases\n            cache[f\"A{layer_n}\"] = A\n            if layer_n == num_layers - 1:\n                Z = self.softmax(A)\n                cache[f\"Z{layer_n}\"] = Z\n            else:\n                Z = self.activation(A)\n                cache[f\"Z{layer_n}\"] = Z\n        return cache\n\n    def backward(self, cache, labels):  # cache is from the forward function on a mini-batch\n        output = cache[f\"Z{self.n_hidden + 1}\"]\n        grad_a = - (labels - output)\n        grads = {}\n        num_layers = self.n_hidden + 2\n        for i in range(num_layers - 1, 0, -1):\n            grad_W = np.dot(grad_a.T, cache[f\"Z{i - 1}\"]).T\n            grad_b = np.sum(grad_a, axis=0)[None, :]\n            grads[f\"dA{i}\"] = grad_a\n            grads[f\"dW{i}\"] = grad_W / float(self.batch_size)\n            grads[f\"db{i}\"] = grad_b / float(self.batch_size)\n            if i > 1:\n                grad_h = np.dot(grad_a, self.weights[f\"W{i}\"].T)\n                grads[f\"dZ{i}\"] = grad_h\n                grad_a = np.multiply(grad_h, self.activation(cache[f\"A{i - 1}\"], grad=True))\n        return grads\n\n    def update(self, grads):\n        for layer in range(1, self.n_hidden + 2):\n            self.weights[f\"W{layer}\"] = self.weights[f\"W{layer}\"] - (self.lr *  grads[f\"dW{layer}\"])\n            self.weights[f\"b{layer}\"] = self.weights[f\"b{layer}\"] - (self.lr * grads[f\"db{layer}\"])\n\n    def one_hot(self, y):\n        b = np.zeros((y.size, self.n_classes))\n        b[np.arange(y.size), y] = 1\n        return b\n\n    def loss(self, prediction, labels):\n        prediction[np.where(prediction < self.epsilon)] = self.epsilon\n        prediction[np.where(prediction > 1 - self.epsilon)] = 1 - self.epsilon\n        N = prediction.shape[0]\n        ce = -np.sum(labels * np.log(prediction + 1e-9)) / N\n        return ce\n\n    def compute_loss_and_accuracy(self, X, y):\n        one_y = self.one_hot(y)\n        cache = self.forward(X)\n        predictions = np.argmax(cache[f\"Z{self.n_hidden + 1}\"], axis=1)\n        accuracy = np.mean(y == predictions)\n        loss = self.loss(cache[f\"Z{self.n_hidden + 1}\"], one_y)\n        return loss, accuracy, predictions\n\n    def train_loop(self, n_epochs):\n        X_train, y_train = self.train\n        y_onehot = self.one_hot(y_train)\n        dims = [X_train.shape[1], y_onehot.shape[1]]\n        self.initialize_weights(dims)\n\n        n_batches = int(np.ceil(X_train.shape[0] / self.batch_size))\n\n        for epoch in range(n_epochs):\n            for batch in range(n_batches):\n                minibatchX = X_train[self.batch_size * batch:self.batch_size * (batch + 1), :]\n                minibatchY = y_onehot[self.batch_size * batch:self.batch_size * (batch + 1), :]\n                cache = self.forward(minibatchX)\n                grads = self.backward(cache, minibatchY)\n                self.update(grads)\n\n            X_train, y_train = self.train\n            train_loss, train_accuracy, _ = self.compute_loss_and_accuracy(X_train, y_train)\n            X_valid, y_valid = self.valid\n\n            valid_loss, valid_accuracy, _ = self.compute_loss_and_accuracy(X_valid, y_valid)\n\n            self.train_logs['train_accuracy'].append(train_accuracy)\n            self.train_logs['validation_accuracy'].append(valid_accuracy)\n            self.train_logs['train_loss'].append(train_loss)\n            self.train_logs['validation_loss'].append(valid_loss)\n\n        return self.train_logs\n\n    def evaluate(self):\n        X_test, y_test = self.test\n        test_loss, test_accuracy, _ = self.compute_loss_and_accuracy(X_test, y_test)\n        return test_loss, test_accuracy\n\n\ndef plot_curves(train, valid, epochs, metric_name, model_kwargs):\n    t = np.arange(len(train))\n    plt.ylabel(f'Average {metric_name}')\n    plt.xlabel('Epoch')\n    plt.grid(True)\n    plt.xlim(0, epochs)\n    plt.plot(t, train)\n    plt.plot(t, valid)\n    plt.title(f\"Train and Valid {metric_name} on {epochs} epochs\\n with {model_kwargs}\")\n    plt.legend([\"train\", \"valid\"], loc='upper right')\n    plt.savefig(f\"{metric_name}-{epochs}-{model_kwargs}.png\", bbox_inches='tight')\n    plt.clf()\n\ndef main(seed, hidden_dims):\n    n_epochs = 50\n    kwargs = {\n        \"seed\": seed,\n        \"hidden_dims\": hidden_dims,\n        \"lr\": 0.003,\n        \"batch_size\": 100,\n    }\n    model = NN(**kwargs)\n    train_logs = model.train_loop(n_epochs=n_epochs)\n    test_loss, test_accuracy = model.evaluate()\n\n    print(kwargs)\n    print(test_loss, test_accuracy)\n    for metric_name in [\"loss\", \"accuracy\"]:\n        plot_curves(train_logs[f'train_{metric_name}'],\n                    train_logs[f'validation_{metric_name}'],\n                    n_epochs,\n                    metric_name,\n                    kwargs)\n\n\nif __name__ == '__main__':\n    for seed in [0, 1, 2]:\n        for hidden_dims in [(512, 256), (512, 120, 120, 120, 120, 120, 120, 120)]:\n            main(hidden_dims=hidden_dims, seed=seed)\n","repo_name":"k1c/IFT6390_HW3","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":8713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"19612541550","text":"import json\nfrom src.error import InputError, AccessError\nfrom src.admin import getData, findUser\nfrom src.notifications import new_tagged_notification\nfrom src.wrapper import Authorisation\nimport time\nimport datetime\nimport threading\nfrom src.user import append_user_stats, append_dream_stats\n\nfile_address = \"src/export.json\"\n\n@Authorisation\ndef standup_start_v1(user_token, channel_id, length):\n    \"\"\"The function start a standup and will last for some seconds. All the message sent in\n    that time period will be buffered and send together afer that.\n\n    Args:\n        user_token (string): a token string used to authorise and get the user id\n        channel_id (int): the channel id where the standup starts\n        length (int): the number of seconds\n\n    Raises:\n        InputError: channel id invalid\n        InputError: standup already starts\n        AccessError: user not in the\n\n    Returns:\n        dictionary: {'time_finish': time_finish}\n    \"\"\"\n\n    database = getData()\n\n    auth_user_id = findUser(user_token)\n\n    if is_channel_valid(database, channel_id) == False:\n        raise InputError(description=\"Channel id is not valid\")\n    \n    index = get_channel_index(database, channel_id)\n    channel = database['channels'][index]\n\n    if channel['standup']['is_active'] == True:\n        raise InputError(description=\"Standup is already active\")\n\n    if is_user_in_channel(auth_user_id, channel) == False:\n        raise AccessError(description=\"You are no in the channel\")\n\n    time_finish = (datetime.datetime.now()+datetime.timedelta(seconds=length)).strftime(\"%Y-%m-%d %H:%M:%S\")\n    time_finish = time.strptime(time_finish, \"%Y-%m-%d %H:%M:%S\")\n    time_finish = time.mktime(time_finish)\n\n    standup_length = length\n\n    database['channels'][index]['standup']['is_active'] = True\n    database['channels'][index]['standup']['standup_length'] = standup_length\n    database['channels'][index]['standup']['time_finish'] = time_finish\n    database['channels'][index]['standup']['messages'] = \"\"\n\n    with open(file_address, \"w\") as f:\n        json.dump(database, f)\n\n    new_thread = threading.Timer(length, standup_package, args=[index, user_token, channel_id, time_finish])\n    new_thread.start()\n\n    return {'time_finish': time_finish}\n\n\n@Authorisation\ndef standup_active_v1(user_token, channel_id):\n    \"\"\"check if the standup has started\n\n    Args:\n        user_token (string): a token string used to authorise and get the user id\n        channel_id (int): the channel id where the standup starts\n\n    Raises:\n        InputError: channel id invalid\n\n    Returns:\n        dic: {'is_active': is_active,\n             'time_finish': time_finish,\n            }, bool for is_active, time_finish for the time when standup finishes\n    \"\"\"\n\n    database = getData()\n\n    #auth_user_id = findUser(user_token)\n\n    if is_channel_valid(database, channel_id) == False:\n        raise InputError(\"Channel id is not valid\")\n    \n    index = get_channel_index(database, channel_id)\n    channel = database['channels'][index]\n\n    is_active = channel['standup']['is_active']\n\n    if is_active == True:\n        time_finish = channel['standup']['time_finish']\n    else:\n        time_finish = None\n\n    return {'is_active': is_active,\n            'time_finish': time_finish,\n            }\n\n@Authorisation\ndef standup_send_v1(user_token, channel_id, message):\n    \"\"\"get the messages that will be buffered\n\n    Args:\n        user_token (string): a token string used to authorise and get the user id\n        channel_id (int): the channel id where the standup starts\n        message (string): the message\n\n    Raises:\n        InputError: channel id invalid\n        InputError: message too long\n        InputError: standup not started yet\n        AccessError: user not in the channel\n    \n    Returns:\n        dic: {}\n    \"\"\"\n\n\n    database = getData()\n    auth_user_id = findUser(user_token)\n\n    if is_channel_valid(database, channel_id) == False:\n        raise InputError(description=\"Channel id is not valid\")\n\n    if len(message) > 1000:\n        raise InputError(description=\"Too much charecters in message\")\n         \n    index = get_channel_index(database, channel_id)\n    channel = database['channels'][index]\n\n    if channel['standup']['is_active'] == False:\n        raise InputError(description=\"The channel does not have an active standup\")\n\n    if is_user_in_channel(auth_user_id, channel) == False:\n        raise AccessError(description=\"You are no in the channel\")\n\n    user = use_id_to_find_user(auth_user_id)\n\n    original_msg = database['channels'][index]['standup']['messages']\n    if original_msg == \"\":\n        original_msg = user['handle_str'] + \": \" + message\n    else:\n        original_msg = original_msg + \"\\n\" + user['handle_str'] + \": \" + message \n    \n    database['channels'][index]['standup']['messages'] = original_msg\n\n    with open(file_address, \"w\") as f:\n        json.dump(database, f)\n\n    return {}\n\n\ndef standup_package(index, user_token, channel_id, time_finish):\n    \"\"\"a helper function to store the messages sent in the standup period\n    \"\"\"\n\n    database = getData()\n    new_message = database['channels'][index]['standup']['messages']\n    \n    auth_user_id = findUser(user_token)\n\n    if database['messages'] == []:\n        new_message_id = 1\n    else:\n        new_message_id = database['messages'][-1]['message_id'] + 1\n    \n    database['messages'].append({\n        'message_id' : new_message_id,\n        'u_id' : auth_user_id,\n        'channel_id': channel_id,\n        'dm_id' : -1,\n        'message' : new_message,         \n        'time_created' : time_finish,\n        'reacts' : [],\n        'is_pinned' : False,           \n    })\n\n    database['channels'][index]['messages'].append(new_message_id)\n\n    database['channels'][index]['standup']['is_active'] = False\n    database['channels'][index]['standup']['standup_length'] = 0\n    database['channels'][index]['standup']['time_finish'] = 0\n    database['channels'][index]['standup']['messages'] = \"\"\n\n    append_user_stats(auth_user_id, database)\n    append_dream_stats(auth_user_id, database)\n    \n    words = new_message.split()\n    handle_list = []\n    for word in words:\n        if word[0] == '@':\n            word = word.replace(',','')\n            handle_list.append(word[1:])\n    \n    u_id = -1\n    handle_status = False\n    member_status = False   \n    for handle in handle_list:\n        u_id = -1\n        handle_status = False\n        member_status = False        \n        for user in database['users']:\n            if handle == user['handle_str']:\n                handle_status = True\n                u_id = user['u_id']\n                \n        channel_ids = {'channel_id' : channel_id}\n        channel = get_channel_details(channel_ids, database)\n        for members in channel['all_members']:\n            if u_id == members['u_id']:\n                member_status = True\n\n        with open(file_address, \"w\") as f:\n            json.dump(database, f)\n\n        if handle_status == True and member_status == True:\n            user_notification = new_tagged_notification(user_token, channel_id, -1, u_id, new_message)\n            database['user_notifications'].append(user_notification)\n\n\n    with open(file_address, \"w\") as f:\n        json.dump(database, f)\n    \n\n\n\n\n    \n    \n\n\n\ndef is_channel_valid(database, channel_id):\n    for channel in database['channels']:\n        if channel['channel_id'] == channel_id:\n            return True\n    \n    return False\n\ndef get_channel_index(database, channel_id):\n    l = 0\n    for channel in database['channels']:\n        if channel['channel_id'] == channel_id:\n            return l\n        l = l + 1\n\ndef is_user_in_channel(u_id, channel):\n    for user in channel['all_members']:\n        if user['u_id'] == u_id:\n            return True\n\n    return False\n\ndef use_id_to_find_user(u_id):\n    data = getData()\n    for user in data['users']:\n        if user['u_id'] == u_id:\n            return user\n\ndef get_channel_details(msg, data):\n    for chan in data['channels']:\n        if msg['channel_id'] == chan['channel_id']:\n            return chan","repo_name":"hyhyjjyjy/CS1531_project_backend","sub_path":"src/standups.py","file_name":"standups.py","file_ext":"py","file_size_in_byte":8094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"37535500573","text":"# Nested statements and Scope\n\n\"\"\"Important to understand how Python deals with the variable names you assign when you create a variable name in Python.\nThat name is stored in what's called the namespace, and variable names also have a scope.\nThe scope determines the visibility of that variable name to other parts of your code.\"\"\"\n\nx = 25\ndef printer():\n    x = 50 \n    return x\nprint (x) #Return 25\nprint (printer()) #Return 50 \n    \n# The idea of scope allows Python to understand and have a set of rules to decide which variables you're referencing in your code.\n# The rules are L.E.G.B Rule:\n    # L : Local - Names assigned in any way within a function (def or lambda), and not declared global in that function.\n    # E : Enclosing function locals - Names in the local scope of any and all enclosing function locals (def of lambda), from inner to outer.\n    # G : Global (Module) - Names assigned at the top-level of a module file, or declared global in a def within the file.\n    # B : Build-in (Python) - Names preassigned in the build-in names module: open,range, syntaxError,...\n\n# lambda num: num**2\n    # first num is local dor this function\n\n\nname = 'THIS IS A GLOBAL STRING' # Global\ndef greet():\n    name = 'Sammy' # Endclosing\n    def hello():\n        name = 'I AM LOCAL' # Local\n        print('Hello '+ name) \n    hello()\ngreet()\n\n# local variables as well as the global keyword.\nx=50\ndef func():\n    global x \n    print (f'x is {x}')\n    \n    # LOCAL REASSIGNMENT ON A GLOBAL VARIABLE!\n    x = 'NEW VALUE' \n    print (f'I JUST CHANGED GLOBAL X TO {x}')\n\nprint(x) \nfunc()\nprint(x)","repo_name":"Marvell2963/Python-From-Zero---Hero","sub_path":"Unit 20-29/Unit29 s6 global, local assig.py","file_name":"Unit29 s6 global, local assig.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"3741794405","text":"\"\"\"Utilities used by the fabfile.\"\"\"\n\nimport datetime\nimport re\nimport os\nfrom contextlib import contextmanager\nfrom fabric.api import hide, local, puts\n\n\n@contextmanager\ndef msg(txt):\n    puts(txt + '...', end='', flush=True)\n    with hide('everything'):\n        yield\n    puts('done.', show_prefix=False, flush=True)\n\n\n# Tagging/git routines based off of https://gist.github.com/663181.\n\n\ndef get_last_tag_match(str):\n    tags = local('git tag -l \"%s\"' % str, capture=True)\n\n    if len(tags) == 0:\n        return None\n\n    tags = tags.split()\n    tags.sort()\n    return tags[-1]\n\n\ndef get_tag_names(prefix):\n    \"\"\"Return the names of the last and next tag.\"\"\"\n    num = 1\n    today = datetime.date.today()\n    next_tag_name = '%s-%i-%.2i-%.2i' % (prefix, today.year, today.month, today.day)\n    last_tag_name = get_last_tag_match(next_tag_name + '.*')\n\n    if last_tag_name is None:\n        num = 1\n    else:\n        match = re.search('%s-[0-9]{4}-[0-9]{2}-[0-9]{2}\\.([0-9]*)' % prefix, last_tag_name)\n        num = int(match.group(1)) + 1\n\n    next_tag_name = '%s.%.3i' % (next_tag_name, num)\n    return (last_tag_name, next_tag_name)\n\n\ndef need_to_tag(version1, version2):\n    sha_version1 = local('git log --pretty=format:%%H %s -1' % version1, capture=True)\n    if version2:\n        sha_version2 = local('git log --pretty=format:%%H %s -1' % version2, capture=True)\n        if sha_version1 == sha_version2:\n            return False\n    return True\n\n\ndef is_working_directory_clean():\n    status = local('git status', capture=True)\n    if status.find('working directory clean') > -1:\n        return True\n    return False\n\n\ndef lines_in_file(filename, skip_prefixes=None):\n    if not os.path.exists(filename):\n        return\n    if skip_prefixes is None:\n        skip_prefixes = ()\n    for line in open(filename, 'r').readlines():\n        line = line.strip()\n        if line and not line.startswith(skip_prefixes):\n            yield line\n","repo_name":"PrecisionMojo/django-heroku-fabfile","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"74997188698","text":"# from pycaret.regression import load_model, predict_model\nfrom PIL import Image\nimport streamlit as st\nimport pandas as pd\n# import numpy as np\nfrom joblib import load\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.ensemble import RandomForestClassifier\n\n# strealit update: binary to text buffer\nimport io\n\nst.set_option('deprecation.showfileUploaderEncoding', False)\n\nmodel = load('./model-building/pet_model.joblib')\n\n####################\n# Selection Options\n####################\npet_options = {1: \"Dog\", 2: \"Cat\"}\ngender_options = {1: \"Male\", 2: \"Female\", 3: \"Mixed\"}\nmaturity_size_options = {1: 'Small', 2: 'Medium',\n                         3: 'Large', 4: 'Extra Large'}\ncolor_options = {1: 'Black', 2: 'Brown', 3: 'Golden',\n                 4: 'Yellow', 5: 'Cream', 6: 'Gray', 7: 'White'}\nfur_length_options = {1: 'Short', 2: 'Medium', 3: 'Long'}\nvaccinated_options = {1: 'Yes', 2: 'No', 3: 'Not Sure'}\ndewormed_options = {1: 'Yes', 2: 'No', 3: 'Not Sure'}\nsterilized_options = {1: 'Yes', 2: 'No', 3: 'Not Sure'}\nhealth_options = {1: 'Healthy', 2: 'Minor Injury',\n                  3: 'Serious Injury'}\n# Breed options\nbreed = pd.read_csv('./data/breed_labels.csv')\ndog_breed = breed[(breed[\"Type\"] == 1)]\ncat_breed = breed[(breed[\"Type\"] == 2)]\ndog_breed_options = dict(zip(dog_breed['BreedID'], dog_breed['BreedName']))\ncat_breed_options = dict(zip(cat_breed['BreedID'], cat_breed['BreedName']))\n# State options\nsta = pd.read_csv('./data/state_labels.csv')\nstate_options = dict(zip(sta['StateID'], sta['StateName']))\n\npet_type = 1\n\n\ndef user_input_features():\n\n    # Type\n    c_pt, c_q = st.beta_columns(2)\n    pet_type = c_pt.selectbox('Pet Type', options=list(\n        pet_options.keys()), format_func=lambda x: pet_options[x])\n\n    load_img(pet_type)\n    # Quantity\n    quantity = c_q.selectbox(\n        'Quantity', [x for x in range(1, 21)])\n    ####################################\n\n    # Age\n    age = st.slider('Age (months)', 0, 255, 3)\n    ####################################\n\n    c_co, c_b, c_g = st.beta_columns(3)\n    # Breed : show options based on type selection\n    if(pet_type == 1):\n        breed1 = c_b.selectbox('Breed', options=list(\n            dog_breed_options.keys()), format_func=lambda x: dog_breed_options[x])\n    else:\n        breed1 = c_b.selectbox('Breed', options=list(\n            cat_breed_options.keys()), format_func=lambda x: cat_breed_options[x])\n    # Gender\n    gender = c_g.selectbox('Gender', options=list(\n        gender_options.keys()), format_func=lambda x: gender_options[x])\n    # Color\n    color1 = c_co.selectbox('Color', options=list(\n        color_options.keys()), format_func=lambda x: color_options[x])\n    ####################################\n    c_ms, c_fl = st.beta_columns(2)\n    # Maturity Size\n    maturity_size = c_ms.selectbox('Maturity Size', options=list(\n        maturity_size_options.keys()), format_func=lambda x: maturity_size_options[x])\n    # Fur Length\n    fur_length = c_fl.selectbox('Fur length', options=list(\n        fur_length_options.keys()), format_func=lambda x: fur_length_options[x])\n    ####################################\n\n    c1, c2, c3 = st.beta_columns(3)\n    # Vaccinated\n    vaccinated = c1.selectbox('Vaccinated', options=list(\n        vaccinated_options.keys()), format_func=lambda x: vaccinated_options[x])\n    # Dewormed\n    dewormed = c2.selectbox('Dewormed', options=list(\n        dewormed_options.keys()), format_func=lambda x: dewormed_options[x])\n    # Sterilized\n    sterilized = c3.selectbox('Sterilized', options=list(\n        dewormed_options.keys()), format_func=lambda x: dewormed_options[x])\n    # Health\n    health = st.selectbox('Pet Type', options=list(\n        health_options.keys()), format_func=lambda x: health_options[x])\n    ####################################\n    c_s, c_p, c_v = st.beta_columns(3)\n    # State\n    state = c_s.selectbox('State', options=list(\n        state_options.keys()), format_func=lambda x: state_options[x])\n    # Photo Amount\n    photo_amt = c_p.selectbox('Photo Amount', [x for x in range(0, 31)])\n    # Video Amount\n    video_amt = c_v.selectbox('Video Amount', [x for x in range(0, 9)])\n    ####################################\n    # Fee\n    fee = st.slider('Fee', 0, 400, 0)\n\n    # Generate dict data\n    data = {'Type': pet_type,\n            'Age': age,\n            'Breed1': breed1,\n            'Gender': gender,\n            'Color1': color1,\n            'MaturitySize': maturity_size,\n            'FurLength': fur_length,\n            'Vaccinated': vaccinated,\n            'Dewormed': dewormed,\n            'Sterilized': sterilized,\n            'Health': health,\n            'Quantity': quantity,\n            'Fee': fee,\n            'State': state,\n            'VideoAmt': video_amt,\n            'PhotoAmt': photo_amt\n            }\n    features = pd.DataFrame(data, index=[0])\n    return features\n\n\ndef predict(model, input_df):\n    predictions = model.predict(input_df)\n    return predictions\n\n\ndef load_img(pet_type):\n    print(pet_type)\n    if (pet_type == 1):\n        return st.sidebar.image(Image.open('./img/dog.png'))\n    else:\n        return st.sidebar.image(Image.open('./img/cat.png'))\n\n\ndef run():\n\n    # image = Image.open('l/imgogo.png')\n    img = \"\"\n    # st.image(image,use_column_width=False)\n    add_selectbox = st.sidebar.selectbox(\n        \"How would you like to predict?\",\n        (\"Online\", \"Batch\"))\n\n    # st.sidebar.success('https://github.com/memoatwit/dsexample/')\n\n    st.image(Image.open('./img/logo.png'))\n    st.title(\"Pet Adoption Prediction App\")\n    st.info(\"In this application, we will be developing algorithms to predict the **adoptability of pets** - specifically, how quickly is a pet adopted? The AI tools that will guide shelters and rescuers around the world on improving their pet profiles' appeal, reducing animal suffering and euthanization.\")\n\n####################\n# Online\n####################\n    if add_selectbox == 'Online':\n\n        # age = st.number_input('Age', min_value=1, max_value=100, value=25)\n        # sex = st.selectbox('Sex', ['male', 'female'])\n        # bmi = st.number_input('BMI', min_value=10, max_value=50, value=10)\n        # children = st.selectbox('Children', [0,1,2,3,4,5,6,7,8,9,10])\n        # if st.checkbox('Smoker'):\n        #     smoker = 'yes'\n        # else:\n        #     smoker = 'no'\n        # region = st.selectbox('Region', ['southwest', 'northwest', 'northeast', 'southeast'])\n\n        output = \"\"\n\n        # input_dict = {'age' : age, 'sex' : sex, 'bmi' : bmi, 'children' : children, 'smoker' : smoker, 'region' : region}\n        input_df = user_input_features()\n        print(input_df.columns)\n        adoption_speed = {0: 'Pet should be adopted on the same day as it was listed.',\n                          1: 'Pet should be adopted between 1 and 7 days (1st week) after being listed.',\n                          2: 'Pet should be adopted between 8 and 30 days (1st month) after being listed.',\n                          3: 'Pet should be adopted between 31 and 90 days (2nd & 3rd month) after being listed.',\n                          4: 'Possibly, no adoption after 100 days of being listed.'\n                          }\n        if st.button(\"Predict\"):\n            output = predict(model=model, input_df=input_df)[0]\n            output = adoption_speed[output]\n            st.success('{}'.format(output))\n\n\n####################\n# BATCH\n####################\n\n    if add_selectbox == 'Batch':\n        try:\n            file_buffer = st.file_uploader(\n                \"Upload CSV file with 16 features\", type=[\"csv\"])\n            st.markdown(\"\"\"\n                    [Example CSV input file](https://raw.githubusercontent.com/yennle/PetFinder.my-Adoption-Prediction/main/pet_example.csv)\n                    \"\"\")\n            # st.image(Image.open('./img/example.png'), use_column_width=True)\n            # bytes_data = file_buffer.read()\n            # s = str(bytes_data)\n            # file_upload = io.StringIO(s)\n            data = pd.read_csv(file_buffer)\n            print(data.columns)\n            predictions = predict(model=model, input_df=data)\n            adoption_speed = {0: 'the same day as it was listed.',\n                              1: '1 and 7 days (1st week) after being listed.',\n                              2: '8 and 30 days (1st month) after being listed.',\n                              3: '31 and 90 days (2nd & 3rd month) after being listed.',\n                              4: 'no adoption after 100 days of being listed.'\n                              }\n\n            result = map(lambda x: adoption_speed[x], predictions)\n            result_df = pd.DataFrame(list(result), columns=['Meaning'])\n            prediction_df = pd.DataFrame(\n                predictions, columns=['Prediction Value'])\n            df = pd.concat([prediction_df, result_df], axis=1)\n            st.write(df)\n            # st.write(predictions)\n        except:\n            st.write(\"Please upload a valid CSV file.\")\n\n\nif __name__ == '__main__':\n    run()\n","repo_name":"yennle/PetFinder.my-Adoption-Prediction","sub_path":"pet-app.py","file_name":"pet-app.py","file_ext":"py","file_size_in_byte":9206,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"30312576140","text":"from constants import *\nfrom game.casting.sound import Sound\nfrom game.scripting.action import Action\n\n\nclass CollidePaddlesAction(Action):\n\n    def __init__(self, physics_service, audio_service):\n        self._physics_service = physics_service\n        self._audio_service = audio_service\n        \n    def execute(self, cast, script, callback):\n        ball = cast.get_first_actor(BALL_GROUP)\n        paddles = cast.get_actors(PADDLE_GROUP)\n        \n        ball_body = ball.get_body()\n        paddles_body2 = paddles[1].get_body()\n        paddles_body1 = paddles[0].get_body()\n\n\n        if self._physics_service.is_left_of(ball_body, paddles_body2):\n            ball.bounce_x()\n            sound = Sound(BOUNCE_SOUND)\n            self._audio_service.play_sound(sound) \n\n        if self._physics_service.is_right_of(paddles_body1, ball_body):\n            ball.bounce_x()\n            sound = Sound(BOUNCE_SOUND)\n            self._audio_service.play_sound(sound)   ","repo_name":"BYUI-CSE210/final-project-team-03-pong","sub_path":"pong/game/scripting/collide_paddles_action.py","file_name":"collide_paddles_action.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"32232135862","text":"\r\n\r\nimport os, datetime, subprocess, time, json\r\nfrom pathlib import Path\r\nfrom httplib2 import Response\r\nfrom prettyprinter import pprint\r\nfrom snappy import jpy, ProgressMonitor, ProductIO\r\n# from read_all_folders import read_all_folders_in\r\nfrom easydict import EasyDict as edict\r\nfrom datetime import datetime\r\n\r\nimport ee\r\nee.Initialize()\r\n\r\n\r\nFileReader = jpy.get_type('java.io.FileReader')\r\nGraphIO = jpy.get_type('org.esa.snap.core.gpf.graph.GraphIO')\r\nGraph = jpy.get_type('org.esa.snap.core.gpf.graph.Graph')\r\nGraphProcessor = jpy.get_type('org.esa.snap.core.gpf.graph.GraphProcessor')\r\nPrintPM = jpy.get_type('com.bc.ceres.core.PrintWriterProgressMonitor')\r\n\r\n\"\"\" S1_GRD_Preprocessing \"\"\"\r\ndef S1_GRD_Preprocessing(graphFile, input_url, output_url):\r\n    ### Load Graph\r\n    graph = GraphIO.read(graphFile)\r\n\r\n    input_url = str(input_url)\r\n    output_url = str(output_url)\r\n\r\n    graph.getNode(\"read\").getConfiguration().getChild(0).setValue(input_url)\r\n    graph.getNode(\"write\").getConfiguration().getChild(0).setValue(output_url)\r\n\r\n\r\n    ### Execute Graph\r\n    GraphProc = GraphProcessor()\r\n\r\n    ### or a more concise implementation\r\n    # ConcisePM = jpy.get_type('com.bc.ceres.core.PrintWriterConciseProgressMonitor')\r\n    System = jpy.get_type('java.lang.System')\r\n    pm = PrintPM(System.out)\r\n    # ProductIO.writeProduct(resultProduct, outPath, \"NetCDF-CF\", pm)\r\n\r\n    # GraphProcessor.executeGraph(graph, ProgressMonitor.NULL)\r\n    GraphProc.executeGraph(graph, pm)\r\n    # GraphProcessor.executeGraph(graph)\r\n\r\n\"\"\" batch_S1_GRD_processing \"\"\"\r\ndef batch_S1_GRD_processing(input_folder, output_folder, fileList):\r\n    if fileList is None: fileList = os.listdir(str(input_folder))\r\n    for filename in fileList:\r\n\r\n        # if filename[:-4] == \".zip\":\r\n        print(\"\\n\\n\\n\")    \r\n        print(filename)\r\n        print(\"-------------------------------------------------------\\n\")\r\n\r\n        input_url = input_folder / filename.replace(\".tif\", \".zip\")\r\n        output_url = output_folder / (filename.split(\".\")[0] + \".tif\")\r\n\r\n        if not os.path.exists(str(output_url)):\r\n            S1_GRD_Preprocessing(graphFile, input_url, output_url)\r\n    \r\n\r\n\r\ndef load_json(url) -> edict:\r\n    with open(url, 'r') as fp:\r\n        data = edict(json.load(fp))\r\n    return data\r\n\r\n\r\ndef set_image_property(asset_id, query_info):\r\n    # json_folder = Path(\"G:/PyProjects/sentinelhub-auto-query/outputs/BC_ROI_2\")\r\n    # latest_json = sorted(os.listdir(json_folder))[-1]\r\n    # json_url = json_folder / latest_json\r\n\r\n    product_id = os.path.split(asset_id)[-1]\r\n    product_info = query_info['products'][product_id]\r\n\r\n    time_start = datetime.strptime(product_id.split(\"_\")[4], \"%Y%m%dT%H%M%S\").strftime(\"%Y-%m-%dT%H:%M:%S\")\r\n    time_end = datetime.strptime(product_id.split(\"_\")[5], \"%Y%m%dT%H%M%S\").strftime(\"%Y-%m-%dT%H:%M:%S\")\r\n    # footprint = product_info['footprint']\r\n\r\n    print()\r\n    pprint(product_id)\r\n    print(\"-----------------------------------------------------------------\")\r\n    print(time_start)\r\n    # print(footprint)\r\n\r\n    os.system(f\"earthengine asset set --time_start {time_start} {asset_id}\")\r\n    os.system(f\"earthengine asset set --time_end {time_end} {asset_id}\")\r\n\r\n    property_dict = {\r\n        'relativeorbitnumber': 'relativeOrbitNumber_start',\r\n        'orbitdirection': 'orbitProperties_pass',\r\n    }\r\n\r\n    for property in product_info.keys():\r\n        value = product_info[property]\r\n\r\n        if property in property_dict.keys(): property = property_dict[property]\r\n\r\n        print(property, value)    \r\n        os.system(f\"earthengine asset set -p {property}={value} {asset_id}\")\r\n\r\n    os.system(f\"earthengine asset set -p {'gee'}={'false'} {asset_id}\")\r\n    # os.system(f\"earthengine asset set -p {'transmitterReceiverPolarisation'}={'[VH, VV]'} {asset_id}\")\r\n\r\n\r\n\"\"\" upload_cog_as_eeImgCol \"\"\"\r\ndef upload_cog_as_eeImgCol(dataPath, gs_dir, json_url, fileList=None, upload_flag=True, eeUser=\"omegazhangpzh\"):\r\n    cogPath = dataPath / \"COG\"\r\n\r\n    # eeUser = \"omegazhangpzh\"\r\n    eeImgCol_name = os.path.split(gs_dir)[-1]\r\n    # print(os.path.split(gs_dir))\r\n    eeImgCol = f\"users/{eeUser}/{eeImgCol_name}\"\r\n    print(f\"eeImgCol: {eeImgCol}\")\r\n\r\n    if not os.path.exists(cogPath): os.makedirs(cogPath)\r\n\r\n    S1 = ee.ImageCollection(\"COPERNICUS/S1_GRD\")\r\n    S2 = ee.ImageCollection(\"COPERNICUS/S2\")\r\n\r\n    if fileList is None: fileList = [filename[:-4] for filename in os.listdir(dataPath) if (\".tif\" in filename)]\r\n    fileList = [filename for filename in fileList\r\n                    # if (\".tif\" in filename) # this product doesn't exist in GEE\r\n                    if (S1.filter(ee.Filter.eq(\"system:index\", filename)).size().getInfo() == 0 # if not exist in S1 of GEE\r\n                        and S2.filter(ee.Filter.eq(\"PRODUCT_ID\", filename)).size().getInfo() == 0 # if not exist in S2 of GEE\r\n                    )\r\n                ]\r\n\r\n    pprint(fileList)\r\n\r\n    \"\"\" To COG GeoTiff \"\"\"\r\n    if upload_flag:\r\n\r\n        for filename in fileList:\r\n            print()\r\n            print(filename)\r\n            print(\"---------------------------------------------------------\")\r\n\r\n            src_url = dataPath / f\"{filename}.tif\"\r\n            dst_url = cogPath / f\"{filename}.tif\"\r\n            os.system(f\"gdal_translate {src_url} {dst_url} -co TILED=YES -co COPY_SRC_OVERVIEWS=YES -co COMPRESS=LZW\")\r\n\r\n        \"\"\" Upload COG into GCS \"\"\"\r\n        os.system(f\"gsutil -m cp -r {cogPath}/* {gs_dir}\")\r\n        os.rmdir(cogPath) # delete cog folder after uploading.\r\n\r\n        \"\"\" Upload to earth engine asset \"\"\"\r\n        task_dict = {}\r\n        for filename in fileList:\r\n            print(f\"\\n{filename}\")\r\n            print(\"--------------------------------------------------------------------\")\r\n\r\n            asset_id = f\"{eeImgCol}/{filename}\"\r\n            ee_upload_image = f\"earthengine upload image --asset_id={asset_id} {gs_dir}/{filename}.tif\"\r\n\r\n            ee_upload_response = subprocess.getstatusoutput(ee_upload_image)[1]\r\n            task_id = ee_upload_response.split(\"ID: \")[-1]\r\n            task_dict.update({filename: {'task_id': task_id, 'asset_id': asset_id}})\r\n\r\n            print(f\"{asset_id}\")\r\n            pprint(f\"task id: {task_id}\")\r\n            print()\r\n\r\n\r\n        # \"\"\" get property json \"\"\"\r\n        # json_folder = Path(\"G:/PyProjects/sentinelhub-auto-query/outputs/BC_ROIs\")\r\n        # latest_json = sorted(os.listdir(json_folder))[-1]\r\n        # json_url = json_folder / latest_json\r\n\r\n        query_info = load_json(json_url)\r\n\r\n        \"\"\" check uplpad status \"\"\"\r\n        print(\"=============> check uplpad status <===============\")\r\n        upload_finish_flag = False\r\n        while(not upload_finish_flag):\r\n            print(\"-------------------------------------------------------\")\r\n            time.sleep(60) # delay 30s\r\n            \r\n            upload_finish_flag = True\r\n            for filename in task_dict.keys():\r\n\r\n                asset_id = task_dict[filename]['asset_id'] #f\"users/omegazhangpzh/Sentinel1/{filename}\"\r\n                task_id = task_dict[filename]['task_id']\r\n\r\n                check_upload_status = f\"earthengine task info {task_id}\"\r\n                response = subprocess.getstatusoutput(check_upload_status)[1]\r\n                state = response.split(\"\\n\")[1].split(\": \")[-1]\r\n                # state = edict(json.loads(response))['state']\r\n\r\n                task_dict[filename].update({'state': state})\r\n\r\n                if state == \"COMPLETED\":\r\n                    os.system(f\"earthengine acl set public {asset_id}\")\r\n\r\n                    # \"\"\" Set Properties \"\"\"\r\n                    set_image_property(asset_id, query_info)\r\n                else:\r\n                    upload_finish_flag = False\r\n\r\n                # check_asset_permission(asset_id)\r\n                print(f\"\\n{asset_id}: {state}\")\r\n\r\n            print()\r\n            # pprint(task_dict)\r\n\r\n\r\n        \"\"\" set image property \"\"\"\r\n        # eeUser = \"omegazhangpzh\"\r\n        # gs_dir = \"gs://wildfire-nrt/Sentinel1\"\r\n\r\n        time.sleep(10) # wait?\r\n        imgCol_name = os.path.split(gs_dir)[-1]\r\n        response = subprocess.getstatusoutput(f\"earthengine ls users/{eeUser}/{imgCol_name}\")\r\n        asset_list = response[1].replace(\"projects/earthengine-legacy/assets/\", \"\").split(\"\\n\")\r\n\r\n        for filename in task_dict.keys():\r\n            asset_id = task_dict[filename][asset_id]\r\n            if asset_id in asset_list:\r\n                set_image_property(asset_id, query_info)\r\n            else:\r\n                print(f\"{asset_id} [Not Ready in GEE!]\")\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n    eeUser = \"omegazhangpzh\"\r\n\r\n    ### update input and output url\r\n    graphFile = FileReader(\"G:\\PyProjects\\sentinelhub-auto-query\\graphs\\S1_GRD_preprocessing_GEE.xml\")\r\n\r\n    folder = \"S1_GRD\"\r\n    input_folder = Path(\"G:/PyProjects/sentinelhub-auto-query/data\") / folder\r\n    output_folder = Path(\"G:/PyProjects/sentinelhub-auto-query/outputs\") / folder\r\n    gs_dir = \"gs://wildfire-nrt/Sentinel1\"\r\n\r\n    \"\"\" get property json \"\"\"\r\n    json_folder = Path(\"G:/PyProjects/sentinelhub-auto-query/outputs/BC_ROIs\")\r\n    latest_json = sorted(os.listdir(json_folder))[-1]\r\n    json_url = json_folder / latest_json\r\n\r\n    import glob\r\n    json_url = sorted(glob.glob(str(json_folder / f\"{folder}*.json\")))[-1]\r\n    print(\"\\njson: \" + os.path.split(json_url)[-1])\r\n\r\n    query_info = load_json(json_url)\r\n    fileList = query_info['results']['products_list']\r\n    pprint(fileList)\r\n\r\n    batch_S1_GRD_processing(input_folder, output_folder, fileList)\r\n    # upload_cog_as_eeImgCol(output_folder, gs_dir, json_url, upload_flag=True, eeUser=eeUser)\r\n\r\n\r\n\r\n","repo_name":"puzhao8/sentinelhub-auto-query","sub_path":"utils/S1S2_auto_query_upload (not product-wise).py","file_name":"S1S2_auto_query_upload (not product-wise).py","file_ext":"py","file_size_in_byte":9675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"9183581144","text":"import cv2\nimport numpy as np\nimport sys\n\n# args = sys.argv\n# imgInput = cv2.imread(args[1])\n\ndef doBackgroundSubtraction(imgInput, optAlgo=3):\n    if (optAlgo==1):\n        algo = cv2.createBackgroundSubtractorMOG2() # Using MOG2\n    elif (optAlgo==2):\n        algo =cv2. createBackgroundSubtractorMOG() # Using GMG\n    elif (optAlgo==3):\n        # Using our own code for thresholding\n        h,w = imgInput.shape[:2]\n        tempImage = cv2.cvtColor(imgInput, cv2.COLOR_HSV2BGR)  # Convert image from HSV to BGR format\n        bw_image = cv2.cvtColor(tempImage, cv2.COLOR_BGR2GRAY)  # Convert image from BGR to gray format\n        bw_image = cv2.GaussianBlur(bw_image,(5,5),0)  # Highlight the main object\n        threshold = 1\n        for i in range(h):\n            for j in range(w):\n                if bw_image[i][j] > threshold:\n                    bw_image[i][j] = 0\n                else:\n                    bw_image[i][j] = 255\n        return bw_image\n    return algo.apply(imgInput)\n\ndef doSkinMasking(imgInput):\n    # print(str(imgInput))\n    imgInput = cv2.cvtColor(imgInput, cv2.COLOR_BGR2HSV)\n    bwInput = cv2.cvtColor(imgInput, cv2.COLOR_BGR2GRAY)\n    \n    lowerBoundary = np.array([0,40,30],dtype=\"uint8\")\n    upperBoundary = np.array([43,255,254],dtype=\"uint8\")\n    skinMask = cv2.inRange(imgInput, lowerBoundary, upperBoundary)\n    skinMask = cv2.addWeighted(skinMask,0.5,skinMask,0.5,0.0)\n    #cv2.imshow(\"masked\",skinMask)\n    \n    skinMask = cv2.medianBlur(skinMask, 5)\n    \n    skin = cv2.bitwise_and(bwInput, bwInput, mask = skinMask)\n\n    return skin\n\ndef doNoiseRemoval(imgInput, skinMask):\n    skinMask = cv2.medianBlur(skinMask, 5)\n    skin = cv2.bitwise_and(imgInput, imgInput, mask = skinMask)\n    imgInput = cv2.addWeighted(imgInput,1.5,skin,-0.5,0)\n    skin = cv2.bitwise_and(imgInput, imgInput, mask = skinMask)\n\n    return skin\n\ndef doEdgeDetection(imgInput, optAlgo=1):\n    if (optAlgo==1):\n        return cv2.Canny(imgInput,60,60) #Canny Edge\n    elif (optAlgo==2):\n        return cv2.Sobel(imgInput, -1, 1, 1, 3) #Solbel Edge\n\ndef doDisplay(title, imgInput):\n    #Function to display outputs\n    cv2.imshow(title, imgInput)\n    k = cv2.waitKey(0)\n\n# print(\"\\nStarting image outputs. \\nKeep pressing a key in order to show new window.\")\n# doDisplay(\"Input Image\", imgInput)\n\n# img1 = doSkinMasking(imgInput)\n# doDisplay(\"Skin Masked\", img1)\n\n# img2 = doNoiseRemoval(imgInput, img1)\n# doDisplay(\"Noise Removed\", img2)\n\n# img3 = doBackgroundSubtraction(img2, 1)\n# doDisplay(\"Background Subtraction\", img3)\n\n# img4 = doEdgeDetection(img3, 1)\n# doDisplay(\"Edges detected: Sobel\", img4)","repo_name":"nrishabh/AARC-Simple-Sign-Language-Interpreter","sub_path":"ImgProcessing.py","file_name":"ImgProcessing.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"38681661097","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import transforms\nfrom torchvision import datasets\nfrom torch.utils.data import Dataset\nfrom itertools import accumulate\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport numpy as np\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\nclass NNet(nn.Module):\n    def __init__(self,image_size):\n        super(NNet, self).__init__()\n        self.image_size = image_size\n        self.fc0 = nn.Linear(image_size, 100)\n        self.fc1 = nn.Linear(100, 50)\n        self.fc2 = nn.Linear(50, 10)\n        #self.bn1 = nn.BatchNorm1d(100)\n        #self.bn2 = nn.BatchNorm1d(50)\n    def forward(self, x):\n        x = x.view(-1, self.image_size)\n        x = F.relu(self.fc0(x))\n        #x = F.dropout(x, training=self.training)\n        #x = F.relu(self.bn1(self.fc0(x)))\n        x = F.relu(self.fc1(x))\n        #x = F.dropout(x, training=self.training)\n        #x = F.relu(self.bn2(self.fc1(x)))\n        x = F.relu(self.fc2(x))\n        return F.log_softmax(x)\n\n\ndef train(optimizer, model, train_loader):\n    model.train()\n    for batch_idx, (data, labels) in enumerate(train_loader):\n        optimizer.zero_grad()\n        output = model(data)\n        loss = F.nll_loss(output, labels)\n        loss.backward()\n        optimizer.step()\n\n\ndef test(model,test_loader,file,ifprint=False):\n    model.eval()\n    test_loss = 0\n    correct = 0\n    for data, target in test_loader:\n        output = model(data)\n        test_loss += F.nll_loss(output, target, size_average=False).data[0] # sum up batch loss\n        pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n        if (ifprint):\n            file.write(str(pred.item()))\n            file.write('\\n')\n        correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n\n    test_loss /= len(test_loader.dataset)\n    print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(test_loss, correct, len(test_loader.dataset),100. * correct / len(test_loader.dataset)))\n    return test_loss, correct,100. * correct / len(test_loader.dataset)\n\n\nclass Subset(torch.utils.data.Dataset):\n    def __init__(self, dataset, indices):\n        self.dataset = dataset\n        self.indices = indices\n    def __getitem__(self, idx):\n        return self.dataset[self.indices[idx]]\n    def __len__(self):\n        return len(self.indices)\n\n\ndef random_split(dataset, lengths):\n    if sum(lengths) != len(dataset):\n        raise ValueError(\"Sum of input lengths does not equal the length of the input dataset!\")\n    indices = torch.randperm(sum(lengths))\n    splits = []\n    for i in range(len(lengths)):\n        offset = sum(lengths[:i + 1])\n        splits.append(Subset(dataset, indices[offset - lengths[i]:offset]))\n    return splits\n\n\n'''\ndef _make_dataloaders(train_set, train_size, valid_size, batch_size):\n    # Split training into train and validation\n    indices = torch.randperm(len(train_set))\n    train_indices = indices[:len(indices)-valid_size]\n    valid_indices = indices[len(indices)-valid_size:] if valid_size else None\n\n    train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size,\n                                               sampler=SubsetRandomSampler(train_indices)) \n    if valid_size:\n        valid_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size,\n                                                   sampler=SubsetRandomSampler(valid_indices))\n    else:\n        valid_loader = None\n\n    return train_loader, valid_loader\n'''\n\n\ndef main():\n    file = open(\"results.txt\", 'w+')\n    transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n    data_loader = datasets.MNIST('./data', train=True, download=True, transform=transform)\n    test_loader = torch.utils.data.DataLoader(datasets.MNIST('./data', train=False, transform=transform),batch_size=1, shuffle=False)\n\n    '''\n    num_train = len(train_loader.dataset)\n    train_loader, valid_loader = _make_dataloaders(train_loader.dataset, int(num_train*0.8), int(num_train*0.2), batch_size=1)\n    \n   \n    #train_data_set = train_loader.dataset\n    num_train = len(train_data_set)\n    indices = list(range(num_train))\n    split = int(num_train*0.2)\n\n    # Random, non-contiguous split\n    validation_idx = np.random.choice(indices, size=split, replace=False)\n    train_idx = list(set(indices) - set(validation_idx))\n    print(str(len(train_idx)))\n    # Contiguous split\n    # train_idx, validation_idx = indices[split:], indices[:split]\n\n    ## define our samplers -- we use a SubsetRandomSampler because it will return\n    ## a random subset of the split defined by the given indices without replaf\n    train_sampler = SubsetRandomSampler(train_idx)\n    validation_sampler = SubsetRandomSampler(validation_idx)\n\n    print(str(len(train_sampler)))\n\n    train_loader = torch.utils.data.DataLoader(train_data_set,\n                                               batch_size=4, sampler=train_sampler)\n\n    validation_loader = torch.utils.data.DataLoader(train_data_set,\n                                                    batch_size=2, sampler=validation_sampler)\n    '''\n\n    num_train = len(data_loader)\n    loader = random_split(data_loader, [int(num_train * 0.8), int(num_train * 0.2)])\n    train_loader = torch.utils.data.DataLoader(loader[0], batch_size=1, shuffle=True)\n    valid_loader = torch.utils.data.DataLoader(loader[1], batch_size=1, shuffle=True)\n    print(str(len(train_loader)))\n\n    model = NNet(image_size=28 * 28)\n    epochs = 5\n    lr = 0.005\n\n    optimizer = optim.SGD(model.parameters(), lr=lr)\n    '''\n    optimizer = optim.Adam(model.parameters(), lr=lr)\n    optimizer = optim.AdaDelta(model.parameters(), lr=lr)\n    optimizer = optim.RMSprop(model.parameters(), lr=lr)\n    '''\n\n    results_train_loss = []\n    results_train_correct = []\n    results_train_perc = []\n    results_valid_loss = []\n    results_valid_correct = []\n    results_valid_perc = []\n\n    print('train:')\n    for epoch in range(1, epochs):\n        train(optimizer, model,train_loader)\n        loss, correct, percentage = test(model,train_loader,file)\n        results_train_loss.append(loss)\n        results_train_correct.append(correct)\n        results_train_perc.append(percentage)\n\n    '''\n    optimizer2 = optim.RMSprop(model.parameters(), lr=0.1)    \n    '''\n    print('valid:')\n    for epoch in range(1, epochs):\n        train(optimizer, model,valid_loader)\n        loss, correct, percentage = test(model,valid_loader,file)\n        results_valid_loss.append(loss)\n        results_valid_correct.append(correct)\n        results_valid_perc.append(percentage)\n\n\n    print('test:')\n    loss, correct, percentage = test(model, test_loader,file,True)\n    print(loss, correct, percentage)\n    file.close()\n\n    t = range(1,epochs)\n    plt.interactive(False)\n    plt.plot(t, results_train_loss, 'r') # plotting t, a - normal dist\n    plt.plot(t,  results_valid_loss, 'b')  # plotting t, b - softmax prob\n    red_patch = mpatches.Patch(color='red', label='Train')\n    blue_patch = mpatches.Patch(color='blue', label='Validation')\n    plt.legend(handles=[red_patch,blue_patch])\n    plt.title('Loss for ' + str(epochs) + ' epochs')\n    plt.ylabel('Average Loss')\n    plt.xlabel('epochs')\n    plt.show(block=True)\n    plt.interactive(False)\n    plt.plot(t, results_train_perc, 'r') # plotting t, a - normal dist\n    plt.plot(t,  results_valid_perc, 'b')  # plotting t, b - softmax prob\n    red_patch = mpatches.Patch(color='red', label='Train')\n    blue_patch = mpatches.Patch(color='blue', label='Validation')\n    plt.legend(handles=[red_patch,blue_patch])\n    plt.title('Percentage for ' + str(epochs) + ' epochs')\n    plt.ylabel('Percentage')\n    plt.xlabel('epochs')\n    plt.show(block=True)\n\n\nif __name__ == \"__main__\":\n    main()","repo_name":"BoazArdel/ML","sub_path":"ML-EX4/ex_4_code.py","file_name":"ex_4_code.py","file_ext":"py","file_size_in_byte":7881,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"6151434610","text":"# Python standard libraries\nimport json\nimport os\nimport sqlite3\nfrom pathlib import Path\n\n# Third-party libraries\nfrom flask import Flask, flash, redirect, request, url_for, jsonify, render_template\nfrom flask_login import (\n    LoginManager,\n    current_user,\n    login_required,\n    login_user,\n    logout_user,\n)\nfrom oauthlib.oauth2 import WebApplicationClient\nimport requests\n\n# Internal imports\n# DB\nfrom db.db import init_db_command\nfrom models.user import User\nfrom models.organization import Organization\n# Security\nimport security.auth as security\n# Objects\nfrom lib.contribution_organizer import ContributionOrganizer\n\n# Configuration\nautInstance = security.auth()\ncredentials = autInstance.get_credentials()\nGOOGLE_CLIENT_ID = autInstance.get_google_client_id()\nGOOGLE_CLIENT_SECRET = autInstance.get_google_client_secret()\nGOOGLE_DISCOVERY_URL = autInstance.get_discovery_url()\nALLOWED_EXTENSIONS = {'doc', 'docx', 'pdf'}\n\norganizer = ContributionOrganizer(credentials)\n\n\n# Flask app setup\napp = Flask(__name__,\n            static_url_path='',\n            static_folder='web/static',\n            template_folder='web/templates')\n\napp.secret_key = os.environ.get(\"SECRET_KEY\") or os.urandom(24)\napp.config['UPLOAD_FOLDER'] = Path('./public')\n# User session management setup\n# https://flask-login.readthedocs.io/en/latest\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\n# Naive database setup\ntry:\n    init_db_command()\nexcept sqlite3.OperationalError:\n    # Assume it's already been created\n    pass\n# OAuth 2 client setup\nclient = WebApplicationClient(GOOGLE_CLIENT_ID)\n\n# Flask-Login helper to retrieve a user from our db\n@login_manager.user_loader\ndef load_user(user_id):\n    return User.get(user_id)\n\n\ndef get_google_provider_cfg():\n    return requests.get(GOOGLE_DISCOVERY_URL).json()\n\n\n@app.route(\"/\")\ndef index():\n    if current_user.is_authenticated:\n        return render_template('index.html', name=current_user.name.title(),\n                               email=current_user.email, profile_pic=current_user.profile_pic)\n    else:\n        return render_template('login.html')\n\n\n@app.route(\"/login\")\ndef login():\n    # Find out what URL to hit for Google login\n    google_provider_cfg = get_google_provider_cfg()\n    authorization_endpoint = google_provider_cfg[\"authorization_endpoint\"]\n\n    # Use library to construct the request for Google login and provide\n    # scopes that let you retrieve user's profile from Google\n    request_uri = client.prepare_request_uri(\n        authorization_endpoint,\n        redirect_uri=request.base_url + \"/callback\",\n        scope=[\"openid\", \"email\", \"profile\"],\n    )\n    return redirect(request_uri)\n\n\n@app.route(\"/login/callback\")\ndef callback():\n    # Get authorization code Google sent back to you\n    code = request.args.get(\"code\")\n    # Find out what URL to hit to get tokens that allow you to ask for\n    # things on behalf of a user\n    google_provider_cfg = get_google_provider_cfg()\n    token_endpoint = google_provider_cfg[\"token_endpoint\"]\n\n    # Prepare and send a request to get tokens! Yay tokens!\n    token_url, headers, body = client.prepare_token_request(\n        token_endpoint,\n        authorization_response=request.url,\n        redirect_url=request.base_url,\n        code=code\n    )\n    token_response = requests.post(\n        token_url,\n        headers=headers,\n        data=body,\n        auth=(GOOGLE_CLIENT_ID, GOOGLE_CLIENT_SECRET),\n    )\n\n    # Parse the tokens!\n    client.parse_request_body_response(json.dumps(token_response.json()))\n\n    userinfo_endpoint = google_provider_cfg[\"userinfo_endpoint\"]\n    uri, headers, body = client.add_token(userinfo_endpoint)\n    userinfo_response = requests.get(uri, headers=headers, data=body)\n\n    if userinfo_response.json().get(\"email_verified\"):\n        unique_id = userinfo_response.json()[\"sub\"]\n        users_email = userinfo_response.json()[\"email\"]\n        picture = userinfo_response.json()[\"picture\"]\n        users_name = userinfo_response.json()[\"given_name\"]\n    else:\n        return \"User email not available or not verified by Google.\", 400\n\n    # Create a user in your db with the information provided\n    # by Google\n    user = User(\n        id_=unique_id, name=users_name, email=users_email, profile_pic=picture\n    )\n\n    # Doesn't exist? Add it to the database.\n    if not User.get(unique_id):\n        User.create(unique_id, users_name, users_email, picture)\n\n    # Begin user session by logging the user in\n    login_user(user)\n\n    # Send user back to homepage\n    return redirect(url_for(\"index\"))\n\n\n@app.route(\"/logout\")\n@login_required\ndef logout():\n    logout_user()\n    return redirect(url_for(\"index\"))\n\n\n@app.route(\"/api/v1.0.0/org\", methods=['POST'])\ndef create_group():\n    if not request.json:\n        abort(400)\n\n    Organization.create(request.json['name'], request.json['description'])\n\n    return 'Created', 201\n\n\n@app.route(\"/api/v1.0.0/org/\")\ndef get_group(org_id):\n    org = Organization.get(org_id)\n    return jsonify({'org': org.toJSON()})\n\n\n@app.route(\"/api/v1.0.0/topic\", methods=['POST'])\ndef add_topic():\n\n    if not current_user:\n        abort(401)\n\n    if not request.json:\n        abort(400)\n\n    folder = organizer.getTopics().createTopic(\n        request.json['name'], current_user.email)\n\n    return jsonify({'topic': folder})\n\n\n@app.route(\"/api/v1.0.0/topics\")\ndef get_topics():\n\n    if not current_user:\n        abort(401)\n\n    topics = organizer.getTopics().getTopicsForUser(current_user.email)\n\n    return jsonify({'topics': topics})\n\n\n@app.route(\"/api/v1.0.0/contribution/\", methods=['POST'])\ndef add_contribution(topic_id):\n    if 'contribution' not in request.files:\n        flash('Please upload a file')\n        return 'No file uploaded', 400\n\n    contributionFile = request.files['contribution']\n\n    if not allowed_file(contributionFile.filename):\n        return 'File Type not supported', 415\n\n    return organizer.getTopics().addContributionToTopic(current_user.email, topic_id, app.config['UPLOAD_FOLDER'], contributionFile)\n\n\n@app.route(\"/api/v1.0.0/contribution/\")\ndef get_contribution(topic_id):\n    if not current_user:\n        abort(401)\n\n    if not topic_id:\n        abort(400)\n\n    contributions = organizer.getTopics(\n    ).getContributions().getContributionsForTopic(topic_id)\n\n    return jsonify({'contributions': contributions})\n\n\ndef allowed_file(filename):\n    return '.' in filename and \\\n           filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\nif __name__ == \"__main__\":\n    # app.run(ssl_context=\"adhoc\", host=\"0.0.0.0\", port=\"443\") # Docker variation\n\n    app.run(ssl_context=\"adhoc\")\n","repo_name":"miracle09110/bahagian","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"37349718286","text":"# %%\n'''\nSTEP 2:  Prepare network geometries etc. for three scenarios utilised in step 3.\n\nselect SUM(ST_Length(links.geom))\n\tFROM (SELECT geom FROM analysis.links_20) as links,\n\t\t(SELECT geom FROM analysis.city_boundaries_150 where id = 28705) as bound\n\tWHERE ST_Contains(bound.geom, links.geom);\ngives: 20174.44130493104\n\nonce the footpaths have been added the total lengths come to around 24km\n\n# case 2: 24km of 80m grid\n\n# case 3: 24km of fractal\n'''\nimport asyncio\n\nimport asyncpg\nimport networkx as nx\nimport numpy as np\nfrom cityseer.tools import graphs\nfrom src.process.loaders import postGIS_to_networkX\nfrom shapely import wkt, geometry\n\n\nasync def inner_york_burb(db_config):\n    # load york graph\n    york_burb = await postGIS_to_networkX(db_config,\n                                          'analysis.nodes_20',\n                                          'analysis.links_20',\n                                          41)\n    print('NX graph summary before pruning')\n    print(nx.info(york_burb))\n    # manually load inner city boundary\n    db_con = await asyncpg.connect(**db_config)\n    inner_boundary = await db_con.fetchval(f'''\n                        select ST_AsText(ST_buffer(geom, 10))\n                        from analysis.city_boundaries_150\n                        where id = 28705\n                    ''')\n    inner_boundary = wkt.loads(inner_boundary)\n    # filter out graph nodes that are not within the inner boundary\n    drop_nodes = []\n    for nd_id, nd_data in york_burb.nodes(data=True):\n        nd_geom = geometry.Point(nd_data['x'], nd_data['y'])\n        if not inner_boundary.contains(nd_geom):\n            drop_nodes.append(nd_id)\n    york_burb.remove_nodes_from(drop_nodes)\n    print('NX graph summary after pruning')\n    print(nx.info(york_burb))\n    #\n    return york_burb\n\n\ndef york_burb():\n    '''\n    NX graph summary before pruning\n    Name:\n    Type: Graph\n    Number of nodes: 42590\n    Number of edges: 43728\n    Average degree:   2.0534\n    NX graph summary after pruning\n    Name:\n    Type: Graph\n    Number of nodes: 1298\n    Number of edges: 1361\n    Average degree:   2.0971\n    Yorkburb summed lengths: 23704.85033655231\n    '''\n    db_config = {\n        'host': 'localhost',\n        'port': 5433,\n        'user': 'gareth',\n        'database': 'gareth',\n        'password': ''\n    }\n    # use process loaders to load graph\n    york_burb = asyncio.run(inner_york_burb(db_config))\n    # sum lengths for reference\n    sum_lengths = 0\n    for s, e, d in york_burb.edges(data=True):\n        sum_lengths += geometry.LineString(d['geom']).length\n    print(f'Yorkburb summed lengths: {sum_lengths}')\n    # adjust x / y values to smaller coordinate system\n    # first pass - find minimums\n    min_x = min_y = np.inf\n    for n, d in york_burb.nodes(data=True):\n        x, y = (d['x'], d['y'])\n        if x < min_x:\n            min_x = x\n        if y < min_y:\n            min_y = y\n    # second pass - adjust coordinates\n    for n in york_burb.nodes():\n        old_x = york_burb.nodes[n]['x']\n        york_burb.nodes[n]['x'] = old_x - min_x\n        old_y = york_burb.nodes[n]['y']\n        york_burb.nodes[n]['y'] = old_y - min_y\n    # likewise adjust and check geoms\n    for s, e, d in york_burb.edges(data=True):\n        old_geom = d['geom']\n        new_geom = []\n        for x, y in old_geom.coords:\n            new_geom.append((x - min_x, y - min_y))\n        new_geom = geometry.LineString(new_geom)\n        d['geom'] = new_geom\n        assert old_geom.length == new_geom.length\n    # check that total lengths haven't changed\n    post_sum_lengths = 0\n    for s, e, d in york_burb.edges(data=True):\n        post_sum_lengths += geometry.LineString(d['geom']).length\n    assert post_sum_lengths == sum_lengths\n    # relabel nodes\n    rl = {}\n    rl_counter = 0\n    for n in york_burb.nodes():\n        rl[n] = rl_counter\n        rl_counter += 1\n    york_burb = nx.relabel_nodes(york_burb, rl, copy=True)\n    # remove link (shorten dead-end to simplify adding new routes)\n    york_burb.remove_edge(1283, 1074)\n    # remove node (make way for adjacent edge)\n    york_burb.remove_nodes_from([157, 1089, 1144, 1163, 998, 503])\n    # add nodes where necessary\n    for x, y in [\n        (460379.79, 451844.15),\n        (460402.13, 451866.82),\n        (460429.81, 451876.83),\n        (460462.79, 451626.64),\n        (460160.19, 451843.77),\n        (460147.19, 451864.79),\n        (460140.00, 451826.62),\n        (460107.08, 451863.13),\n        (460160.19, 451797.30),\n        (460104.04, 451788.73),\n        (460188.12, 451423.61),\n        (459840.70, 451434.38),\n        (459913.19, 451389.30)]:\n        adj_x = x - min_x\n        adj_y = y - min_y\n        york_burb.add_node(rl_counter, x=adj_x, y=adj_y)\n        rl_counter += 1\n    # add missing footpaths\n    for start_nd, end_nd in [\n        (238, 865),\n        (1117, 43),\n        (797, 67),\n        (918, 797),\n        (795, 653),\n        (365, 797),\n        (705, 673),\n        (230, 362),\n        (1068, 1085),\n        (666, 1041),\n        (869, 426),\n        (116, 991),\n        (1097, 991),\n        (99, 312),\n        (771, 1113),\n        (1069, 1218),\n        (223, 447),\n        (1167, 1186),\n        (643, 1049),\n        (1034, 185),\n        (1189, 886),\n        (4, 671),\n        (60, 78),\n        (359, 1188),\n        (540, 1283),\n        (1283, 770),\n        (770, 817),\n        (82, 889),\n        (223, 306),\n        (874, 304),\n        (969, 478),\n        (159, 1298),\n        (1298, 1299),\n        (1299, 1300),\n        (1300, 1246),\n        (659, 1300),\n        (1028, 1299),\n        (624, 1298),\n        (1298, 616),\n        (628, 1303),\n        (1303, 1302),\n        (1302, 160),\n        (1302, 1304),\n        (1305, 1304),\n        (1304, 1306),\n        (945, 1307),\n        (1307, 620),\n        (1307, 1275),\n        (478, 1301),\n        (1301, 493),\n        (1301, 492),\n        (1180, 1308),\n        (1308, 856),\n        (871, 1308),\n        (870, 1309),\n        (1309, 1310),\n        (1310, 1172),\n        (1310, 429),\n        (1017, 778)]:\n        x_start = (york_burb.nodes[start_nd]['x'])\n        y_start = (york_burb.nodes[start_nd]['y'])\n        x_end = (york_burb.nodes[end_nd]['x'])\n        y_end = (york_burb.nodes[end_nd]['y'])\n        geom = geometry.LineString([(x_start, y_start), (x_end, y_end)])\n        york_burb.add_edge(start_nd, end_nd, geom=geom)\n    #  decompose new edges\n    york_burb = graphs.nX_decompose(york_burb, 20)\n    # ready\n    return york_burb\n\n\ndef grid_ville():\n    '''\n    Type: Graph\n    Number of nodes: 1200\n    Number of edges: 1320\n    Average degree:   2.2000\n    Gridville summed lengths: 24200.000000000062\n    '''\n    grid_ville = nx.Graph()\n    # divisor and extents\n    div = 12\n    ext = 1100\n    # add nodes\n    for x_id in range(div):\n        for y_id in range(div):\n            grid_ville.add_node(f'{x_id}_{y_id}', x=ext / div * x_id, y=ext / div * y_id)\n    # add edges\n    sum_lengths = 0\n    for x_id in range(div):\n        for y_id in range(div):\n            node_set = []\n            # last row and column do not have a next row / column\n            # add edge in the x direction\n            if y_id < div - 1:\n                a_nd_start = f'{x_id}_{y_id}'\n                a_nd_end = f'{x_id}_{y_id + 1}'\n                node_set.append((a_nd_start, a_nd_end))\n            # add edge in the y direction\n            if x_id < div - 1:\n                b_nd_start = f'{x_id}_{y_id}'\n                b_nd_end = f'{x_id + 1}_{y_id}'\n                node_set.append((b_nd_start, b_nd_end))\n            # for x direction and y direction node sets, add edges and edge geoms\n            for start, end in node_set:\n                start_x = grid_ville.nodes[start]['x']\n                start_y = grid_ville.nodes[start]['y']\n                end_x = grid_ville.nodes[end]['x']\n                end_y = grid_ville.nodes[end]['y']\n                geom = geometry.LineString([(start_x, start_y), (end_x, end_y)])\n                grid_ville.add_edge(start, end, geom=geom)\n                sum_lengths += geom.length\n    #  decompose new edges\n    grid_ville = graphs.nX_decompose(grid_ville, 20)\n    # print info\n    print(nx.info(grid_ville))\n    # report sum\n    print(f'Gridville summed lengths: {sum_lengths}')\n    # ready\n    return grid_ville\n\n\ndef suburb():\n    '''\n    Number of nodes: 1349\n    Number of edges: 1348\n    Average degree:   1.9985\n    Gridville summed lengths: 23550.0. Last length 14.0625\n    '''\n    suburb = nx.Graph()\n    # set params\n    recursions = 7\n    distance = 1200\n    # set the seed centroid\n    node_id = 1\n    suburb.add_node(node_id, x=distance / 2, y=distance / 2)\n    centroids = [node_id]\n    node_id += 1\n    # sum geom lengths\n    sum_lengths = 0\n    last_length = np.inf\n    # recursively add centroids and edges\n    for i in range(recursions):\n        # alternate directions\n        x_direction = True\n        if i % 2 == 0:\n            x_direction = False\n            # distance only updated every second cycle\n            distance = distance / 2 - 25\n        # new centroids - keep separate and then replace at end of loop\n        new_centroids = []\n        # for each centroid\n        for start_id in centroids:\n            x_start = suburb.nodes[start_id]['x']\n            y_start = suburb.nodes[start_id]['y']\n            # add the new nodes and geoms in either direction\n            for dist in [distance, -distance]:\n                # create the end coordinates\n                if x_direction:\n                    x_centroid = x_start + dist / 2\n                    y_centroid = y_start\n                    x_end = x_start + dist\n                    y_end = y_start\n                else:\n                    x_centroid = x_start\n                    y_centroid = y_start + dist / 2\n                    x_end = x_start\n                    y_end = y_start + dist\n                # calculate the new centroids and end nodes\n                centroid_id = node_id\n                node_id += 1\n                new_centroids.append(centroid_id)  # add to new centroids\n                suburb.add_node(centroid_id, x=x_centroid, y=y_centroid)\n                end_id = node_id\n                node_id += 1\n                suburb.add_node(end_id, x=x_end, y=y_end)\n                # create the new geoms and edges\n                geom_a = geometry.LineString([(x_start, y_start), (x_centroid, y_centroid)])\n                suburb.add_edge(start_id, centroid_id, geom=geom_a)\n                sum_lengths += geom_a.length\n                geom_b = geometry.LineString([(x_centroid, y_centroid), (x_end, y_end)])\n                suburb.add_edge(centroid_id, end_id, geom=geom_b)\n                sum_lengths += geom_b.length\n                # keep track of least length\n                last_length = geom_a.length\n        centroids = new_centroids\n\n    #  decompose new edges\n    suburb = graphs.nX_decompose(suburb, 20)\n    # print info\n    print(nx.info(suburb))\n    # report sum\n    print(f'Suburb summed lengths: {sum_lengths}. Last length {last_length}')\n\n    return suburb\n\n\n# %%\nif __name__ == '__main__':\n    york_burb_graph = york_burb()\n    # plot.plot_nX(york_burb_graph, figsize=(20, 20), dpi=150, labels=False, path='./temp_images/temp_york_burb.pdf')\n\n    grid_ville_graph = grid_ville()\n    # plot.plot_nX(grid_ville_graph, figsize=(20, 20), dpi=150, labels=False, path='./temp_images/temp_grid_ville.pdf')\n\n    suburb_graph = suburb()\n    # plot.plot_nX(suburb_graph, figsize=(20, 20), dpi=150, labels=False, path='./temp_images/temp_suburb.pdf')\n","repo_name":"songololo/phd","sub_path":"src/explore/toy_models/step_C1_graphs.py","file_name":"step_C1_graphs.py","file_ext":"py","file_size_in_byte":11553,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"13043994635","text":"import multiprocessing as mp\nfrom time import sleep, time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\n\nfrom typing import Type\n\n# Run IPython magic commands\nfrom IPython.core.getipython import get_ipython\nfrom torch.utils.data import DataLoader\nfrom tqdm.auto import tqdm, trange\n\nipython = get_ipython()\nif ipython is not None:\n    # Only works in interactive mode\n    ipython.run_line_magic(\"reload_ext\", \"autoreload\")\n    ipython.run_line_magic(\"autoreload\", \"2\")\n\n\ndef makeLoaders(\n    train_data: datasets.VisionDataset,\n    test_data: datasets.VisionDataset,\n    batch_size: int,\n) -> tuple[DataLoader, DataLoader]:\n\n    # Loaders\n    # The loading can be a large bottleneck for the training speed. Increase number of workers and pin memory.\n    train_loader = DataLoader(\n        dataset=train_data,\n        batch_size=batch_size,\n        shuffle=True,\n        num_workers=mp.cpu_count(),\n        pin_memory=True,\n    )\n    test_loader = DataLoader(\n        dataset=test_data,\n        batch_size=batch_size,\n        shuffle=False,\n        num_workers=mp.cpu_count(),\n        pin_memory=True,\n    )\n\n    return train_loader, test_loader\n\n\ndef mnist(batch_size: int) -> tuple[DataLoader, DataLoader]:\n    \"\"\"Create loaders for MNIST dataset\"\"\"\n\n    # Load and transform to tensor\n    train_data = datasets.MNIST(\n        root=\"./data\", train=True, download=True, transform=transforms.ToTensor()\n    )\n    test_data = datasets.MNIST(\n        root=\"./data\", train=False, download=True, transform=transforms.ToTensor()\n    )\n\n    return makeLoaders(\n        train_data=train_data, test_data=test_data, batch_size=batch_size\n    )\n\ndef fashion_mnist(batch_size: int) -> tuple[DataLoader, DataLoader]:\n    \"\"\"Create loaders for MNIST dataset\"\"\"\n\n    # Load and transform to tensor\n    train_data = datasets.FashionMNIST(\n        root=\"./data\", train=True, download=True, transform=transforms.ToTensor()\n    )\n    test_data = datasets.FashionMNIST(\n        root=\"./data\", train=False, download=True, transform=transforms.ToTensor()\n    )\n\n    return makeLoaders(\n        train_data=train_data, test_data=test_data, batch_size=batch_size\n    )\n\ndef emnist(batch_size: int, split: str = \"byclass\") -> tuple[DataLoader, DataLoader]:\n    \"\"\"Create loaders for MNIST dataset\"\"\"\n\n    # Load and transform to tensor\n    train_data = datasets.EMNIST(\n        root=\"./data\", split=split,train=True, download=True, transform=transforms.ToTensor()\n    )\n    test_data = datasets.EMNIST(\n        root=\"./data\", split=split, train=False, download=True, transform=transforms.ToTensor()\n    )\n\n    return makeLoaders(\n        train_data=train_data, test_data=test_data, batch_size=batch_size\n    )\n","repo_name":"Haydeni0/vae-test","sub_path":"my_loaders.py","file_name":"my_loaders.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"32180826801","text":"def solution(matrix):\n\n    # create a new matrix to hold the result\n    result = [[0 for j in range(len(matrix[0]))] for i in range(len(matrix))]\n\n    # iterate over each cell in the matrix\n    for i in range(len(matrix)):\n        for j in range(len(matrix[0])):\n            # initialize a counter for the number of mines in the neighboring cells\n            count = 0\n\n            # check the adjacent cells (up, down, left, right, and the four diagonal cells)\n            # and increment the counter if the cell contains a mine\n            if i > 0 and matrix[i-1][j]:\n                count += 1  # up\n            if i < len(matrix)-1 and matrix[i+1][j]:\n                count += 1  # down\n            if j > 0 and matrix[i][j-1]:\n                count += 1  # left\n            if j < len(matrix[0])-1 and matrix[i][j+1]:\n                count += 1  # right\n            if i > 0 and j > 0 and matrix[i-1][j-1]:\n                count += 1  # top-left\n            if i > 0 and j < len(matrix[0])-1 and matrix[i-1][j+1]:\n                count += 1  # top-right\n            if i < len(matrix)-1 and j > 0 and matrix[i+1][j-1]:\n                count += 1  # bottom-left\n            if i < len(matrix)-1 and j < len(matrix[0])-1 and matrix[i+1][j+1]:\n                count += 1  # bottom-right\n\n            # set the result matrix cell to the count of mines in the neighboring cells\n            result[i][j] = count\n\n    return result\n","repo_name":"camiloricoe/python-camilo","sub_path":"CodeSignal/Arcade/Intro/24-Minesweeper.py","file_name":"24-Minesweeper.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"41937139444","text":"def is_prime(i):\n    if i<2:\n        return False\n    for j in range(2,i):\n        if(i%j==0):\n            return False\n    return True\ndef palin(i):\n    temp=i\n    r=0\n    while(i):\n        rem=i%10\n        r=r*10+rem\n        i//=10\n    if(r==temp):\n        return True\nn=int(input())\nif(n>=10 and n<=1000):\n    i=n+1\n    while True:\n        if palin(i)and is_prime(i):\n            print(i)\n            break\n        i+=1\n    \n","repo_name":"samruddhiparate-13/codemind-python","sub_path":"Program_to_find_out_the_next_prime_palindrome_number.py","file_name":"Program_to_find_out_the_next_prime_palindrome_number.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"73702457501","text":"from django.shortcuts import render, redirect, get_object_or_404\r\nfrom .models import EnrollStudenttosubect, Mark, subject, term, Grading\r\nfrom student.models import Student, Klass, Stream, Attendance\r\nfrom .forms import subjectForm, TermForm, GradeForm, EnrollForm, UpdateMarksForm\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom datetime import date\r\nfrom io import BytesIO\r\nfrom django.template.loader import get_template\r\nfrom xhtml2pdf import pisa\r\nfrom django.http import HttpResponse\r\nimport xlwt\r\nfrom student.views import (\r\n    database_operation,\r\n    delete_database_operation,\r\n)\r\nfrom student.views import get_class, get_stream\r\nfrom twilio.rest import Client\r\nfrom django.conf import settings\r\n\r\n\r\nyear = str(date.today().year)\r\n\r\n\r\ndef get_student(Student, id):\r\n    return get_object_or_404(Student, id=id)\r\n\r\n\r\ndef all_terms():\r\n    return term.objects.all()\r\n\r\n\r\ndef all_subjects():\r\n    return subject.objects.all()\r\n\r\n\r\ndef generate_excel(name, term, subjectname, sorttotalfinal, stream=None):\r\n    name = f\"{name} {term} result\"\r\n    response = HttpResponse(content_type=\"application/ms-excel\")\r\n    response[\"Content-Disposition\"] = \"attachment; filename=results.xls \"\r\n\r\n    wb = xlwt.Workbook(encoding=\"utf-8\")\r\n    ws = wb.add_sheet(\"Users\")\r\n\r\n    # Sheet header, first row\r\n    row_num = 0\r\n\r\n    font_style = xlwt.XFStyle()\r\n    font_style.font.bold = True\r\n    columns = [\"Rank\", \"Student\"]\r\n    for xubject in subjectname:\r\n        columns.append(xubject.name)\r\n    columns.append(\"Total\")\r\n    columns.append(\"Grade\")\r\n    columns.append(\"Points\")\r\n    for col_num in range(len(columns)):\r\n        ws.write(row_num, col_num, columns[col_num], font_style)\r\n    # Sheet body, remaining rows\r\n    font_style = xlwt.XFStyle()\r\n    rows = sorttotalfinal\r\n    for row in rows:\r\n        row_num += 1\r\n        for col_num in range(len(row)):\r\n            ws.write(row_num, col_num, row[col_num], font_style)\r\n    wb.save(response)\r\n    return response\r\n\r\n\r\ndef generate_pdf(template_name, context):\r\n    template_path = template_name\r\n    response = HttpResponse(content_type=\"application/pdf\")\r\n    response[\"Content-Disposition\"] = 'filename=\"report.pdf\"'\r\n    template = get_template(template_path)\r\n    html = template.render(context)\r\n    pisa_status = pisa.CreatePDF(html, dest=response)\r\n    if pisa_status.err:\r\n        return HttpResponse(\"We had some errors 
\" + html + \"
\")\r\n return response\r\n\r\n\r\ndef class_stream_count(name, stream=None):\r\n if stream:\r\n return Student.student.class_or_stream_count(name, stream)\r\n return Student.student.class_or_stream_count(name)\r\n\r\n\r\ndef student_stream_class(name, stream=None):\r\n if stream:\r\n return Student.student.get_student_list_class_or_stream(name, stream)\r\n return Student.student.get_student_list_class_or_stream(name)\r\n\r\n\r\ndef student_subject_count(student):\r\n return EnrollStudenttosubect.enroll.get_subjects_for_student_count(student=student)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef student_view(request, id, name, format=None, template_name=None):\r\n student = get_object_or_404(Student, id=id)\r\n totalclass = class_stream_count(name=name)\r\n subjectname = all_subjects()\r\n terms = all_terms()\r\n streamtotal = class_stream_count(name=name, stream=student.stream)\r\n student_stream = student_stream_class(\r\n name=student.class_name, stream=student.stream\r\n )\r\n Getgrading = getgrade()\r\n student_class = student_stream_class(name=student.class_name)\r\n getterm = {}\r\n getavg = {}\r\n totalmarks = {}\r\n getclassrankid = []\r\n getstreamrankid = []\r\n getsubjectcount = student_subject_count(student)\r\n outsubject = getsubjectcount * 100\r\n for getterms in terms:\r\n termresults = []\r\n for sub in subjectname:\r\n studentmarks = list(\r\n Mark.objects.filter(\r\n Term__name=getterms, name__name=sub, student=student, year=year\r\n ).values_list(\"marks\", flat=True)\r\n )\r\n if not studentmarks:\r\n studentmarks = [\"\"]\r\n termresults.extend(studentmarks)\r\n totalmarks[getterms.name] = sum([i for i in termresults if i != \"\"])\r\n studenttotalmarks = totalmarks[getterms.name]\r\n if totalmarks[getterms.name]:\r\n termresults.append(studenttotalmarks)\r\n if sum([i for i in termresults if i != \"\"]):\r\n getterm[getterms.name] = termresults\r\n\r\n (\r\n getclassnumber,\r\n getstreamnumber,\r\n ) = get_all_student_result_for_class_and_stream(\r\n student_stream, student_class, getterms\r\n )\r\n _, getclassrankid = calculate_class_rank(\r\n getclassnumber, student, totalclass, getclassrankid\r\n )\r\n\r\n _, getstreamrankid = calculate_stream_rank(\r\n getstreamnumber, student, getstreamrankid\r\n )\r\n getterm = update_term_results(\r\n getterm,\r\n getterms,\r\n getclassrankid,\r\n getstreamrankid,\r\n studenttotalmarks,\r\n getsubjectcount,\r\n subjectname,\r\n Getgrading,\r\n )\r\n context = {\r\n \"classname\": name,\r\n \"getterm\": getterm,\r\n \"title\": \"student details\",\r\n \"subject\": subjectname,\r\n \"student\": student,\r\n }\r\n\r\n return render(request, \"result/student.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef enteresult(request, name, Term, Subject, stream=None):\r\n exam = EnrollStudenttosubect.enroll.get_students_subject(\r\n name=name, stream=stream, Subject=Subject\r\n )\r\n result = [[] for _ in range(4)]\r\n if request.method == \"POST\":\r\n getmarks = request.POST.getlist(\"subjectname\")\r\n result[0] = [i.student.id for i in exam]\r\n result[1] = [subject.objects.get(name=Subject).id for i in exam]\r\n result[2] = [term.objects.get(name=Term).id for i in exam]\r\n result[3] = getmarks\r\n for j in range(len(result[0])):\r\n Marks = Mark.objects.create(\r\n student_id=result[0][j],\r\n name_id=result[1][j],\r\n Term_id=result[2][j],\r\n marks=int(result[3][j]),\r\n )\r\n Marks.save()\r\n return redirect(\"student:home\")\r\n context = {\r\n \"exam\": exam,\r\n \"name\": name,\r\n \"stream\": stream,\r\n \"term\": Term,\r\n \"subject\": Subject,\r\n }\r\n return render(request, \"result/enterresult.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef streamexamanalysis(\r\n request, name, term, stream=None, template_name=None, format=None\r\n):\r\n subjects = subject.objects.all()\r\n avg_subject = get_average_subject_marks(name, term, stream, subjects, term)\r\n best_students_data = get_best_students_data(name, term, stream, subjects)\r\n grades_count = get_grades_count(name, term, stream)\r\n\r\n context = {\r\n \"z\": best_students_data,\r\n \"name\": name,\r\n \"term\": term,\r\n \"avgsubject\": avg_subject,\r\n \"subject\": subjects,\r\n \"Count\": grades_count,\r\n }\r\n\r\n if stream:\r\n context[\"stream\"] = stream\r\n\r\n if format == \"pdf\":\r\n return generate_pdf(template_name, context)\r\n\r\n return render(request, template_name, context)\r\n\r\n\r\ndef get_marks_for_class_or_stream(name, term, stream, subject):\r\n marks = Mark.mark.get_subject_marks_for_class_or_stream(\r\n student_class_name=name, Term=term, subject_name=subject.name, stream=stream\r\n )\r\n return marks\r\n\r\n\r\ndef get_average_subject_marks(name, term, stream, subjects, getterms):\r\n avg_subject = {}\r\n for subject in subjects:\r\n marks = get_marks_for_class_or_stream(name, term, stream, subject)\r\n student_count = EnrollStudenttosubect.enroll.student_per_subject_count(\r\n subject=subject, class_name=name, stream=stream\r\n )\r\n subject_marks = list(marks)\r\n if subject_marks:\r\n avg_subject[subject.name] = calculate_average(\r\n sum(subject_marks),\r\n student_count,\r\n )\r\n\r\n return avg_subject\r\n\r\n\r\ndef get_best_students_data(name, term, stream, subjects):\r\n best_students_data = []\r\n for subject in subjects:\r\n marks = get_marks_for_class_or_stream(name, term, stream, subject)\r\n if not marks:\r\n continue\r\n\r\n max_mark = max(marks)\r\n best_student = Mark.objects.filter(\r\n student__class_name__name=name,\r\n Term__name=term,\r\n name__name=subject.name,\r\n marks=max_mark,\r\n year=year,\r\n )\r\n for i in best_student:\r\n best_students_data.append(\r\n (i.student.get_student_name(), subject.name, max_mark)\r\n )\r\n\r\n return best_students_data\r\n\r\n\r\ndef get_grades_count(name, term, stream):\r\n Getgrading = Grading.objects.all()\r\n students = get_students_by_class_and_stream(name, stream)\r\n grades = []\r\n grades_count = {}\r\n for student in students:\r\n marks = list(\r\n Mark.objects.filter(Term__name=term, student=student).values_list(\r\n \"marks\", flat=True\r\n )\r\n )\r\n avg_mark = round(calculate_average(sum(marks), len(marks)), 1)\r\n grades.append(get_grade(Getgrading, avg_mark).name)\r\n for grading in Getgrading:\r\n grades_count[grading.name] = grades.count(grading.name)\r\n return grades_count\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef addsubject(request):\r\n return database_operation(request, subjectForm)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef AddTerm(request):\r\n return database_operation(request, TermForm)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef addGrade(request):\r\n return database_operation(request, GradeForm)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef allGrade(request):\r\n return render(\r\n request,\r\n \"result/allgrading.html\",\r\n context={\"allgrade\": Grading.objects.all().order_by(\"name\")},\r\n )\r\n\r\n\r\n# remove this is a duplicate\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef allsubject(request):\r\n return render(\r\n request,\r\n \"result/subjectall.html\",\r\n context={\"allsubjects\": subject.objects.all()},\r\n )\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef allterm(request):\r\n return render(\r\n request,\r\n \"result/allterm.html\",\r\n context={\"allterm\": term.objects.all()},\r\n )\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef deleteterm(request, id):\r\n return delete_database_operation(request, term, id)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef enrollStudenttosubectall(request):\r\n allenroll = EnrollStudenttosubect.enroll.get_all_students_subject()\r\n context = {\r\n \"title\": \"allenrollsubject\",\r\n \"allenroll\": allenroll,\r\n }\r\n return render(request, \"result/allenroll.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef deletegrade(request):\r\n return delete_database_operation(request, Grading, id)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef updatesubject(request):\r\n return database_operation(request, subjectForm, id)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef subjectdelete(request):\r\n return delete_database_operation(request, subject, id)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef Enrollupdate(request, id):\r\n return database_operation(request, EnrollForm, id)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef enrolldelete(request, id):\r\n return delete_database_operation(request, EnrollStudenttosubect, id)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login\")\r\ndef enrollstudentstosubject(request, name, stream):\r\n getstudents = get_students_by_class_and_stream(name, stream)\r\n getsubjects = all_subjects()\r\n if request.method == \"POST\":\r\n getsubjectid = request.POST.getlist(\"subjectid\")\r\n getstudentsub = []\r\n for i in getstudents:\r\n getstudentsub.append(i.id)\r\n for i in range(len(getstudents)):\r\n if getsubjectid[i]:\r\n enrolltosubject_data = {\r\n \"student_id\": getstudentsub[i],\r\n \"subject_id\": subject.objects.get(name=getsubjectid[i]).id,\r\n \"class_name_id\": Klass.objects.get(name=name).id,\r\n }\r\n if stream:\r\n enrolltosubject_data[\"stream_id\"] = Stream.objects.get(\r\n name=stream\r\n ).id\r\n enrolltosubject = EnrollStudenttosubect.objects.create(\r\n **enrolltosubject_data\r\n )\r\n enrolltosubject.save()\r\n context = {\"getstudents\": getstudents, \"getsubjects\": getsubjects}\r\n return render(request, \"result/studentenroll.html\", context)\r\n\r\n\r\n# reuse function from line 304\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef subjectperrank(\r\n request, name, term, subject, stream=None, template_name=None, format=None\r\n):\r\n rankings_data = Mark.mark.student_subject_ranking_per_class_or_stream(\r\n name, term, subject, stream\r\n )\r\n context = {\r\n \"name\": name,\r\n \"term\": term,\r\n \"stream\": stream,\r\n \"subject\": subject,\r\n \"rankings_data\": rankings_data,\r\n }\r\n\r\n if format == \"pdf\":\r\n return generate_pdf(template_name, context)\r\n else:\r\n return render(request, template_name, context)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef getresultstreamterm(\r\n request, name, term, stream=None, template_name=None, format=None\r\n):\r\n subjects = all_subjects()\r\n students = get_students_by_class_and_stream(name, stream)\r\n results = collect_student_marks(students, subjects, term)\r\n sorted_results = sort_results_by_total_marks(results)\r\n indexed_results = add_index_to_results(sorted_results)\r\n\r\n avg_marks = calculate_average_marks_and_grading(indexed_results, term)\r\n\r\n context = {\r\n \"page_obj\": indexed_results,\r\n \"subject_list\": subjects,\r\n \"class_name\": name,\r\n \"term\": term,\r\n }\r\n if stream:\r\n context[\"stream\"] = stream\r\n\r\n if format == \"ms-excel\":\r\n return generate_excel(name, term, subjects, indexed_results, stream=None)\r\n\r\n return render(request, template_name, context)\r\n\r\n\r\ndef get_students_by_class_and_stream(name, stream):\r\n query_params = {\r\n \"class_name__name\": name,\r\n \"year\": year,\r\n }\r\n if stream:\r\n query_params[\"stream__name\"] = stream\r\n return Student.objects.filter(**query_params)\r\n\r\n\r\ndef collect_student_marks(students, subjects, term):\r\n results = []\r\n for student in students:\r\n marks = []\r\n for subjectname in subjects:\r\n marks_list = list(\r\n Mark.objects.filter(\r\n student=student, name=subjectname, Term__name=term\r\n ).values_list(\"marks\", flat=True)\r\n )\r\n if not marks_list:\r\n marks_list = [\"\"]\r\n marks += marks_list\r\n marks_sum = sum([int(mark) for mark in marks if mark != \"\"])\r\n marks.append(marks_sum)\r\n marks.insert(0, str(student))\r\n results.append(marks)\r\n return results\r\n\r\n\r\ndef sort_results_by_total_marks(results):\r\n return sorted(results, key=lambda x: x[-1], reverse=True)\r\n\r\n\r\ndef add_index_to_results(results):\r\n return [[index] + result for index, result in enumerate(results, start=1)]\r\n\r\n\r\ndef calculate_average_marks_and_grading(indexed_results, getterms):\r\n avg_marks = []\r\n grading_system = getgrade()\r\n for result in indexed_results:\r\n subject_marks = result[2:-1]\r\n subject_marks_with_value = [int(mark) for mark in subject_marks if mark != \"\"]\r\n total_marks = sum(subject_marks_with_value)\r\n num_subjects = len(subject_marks_with_value)\r\n avg_mark = calculate_average(total_marks, num_subjects)\r\n if avg_mark:\r\n result.append(get_grade(grading_system, avg_mark).name)\r\n result.append(get_grade(grading_system, avg_mark).points)\r\n return avg_marks\r\n\r\n\r\ndef error_404(request, exception):\r\n return render(request, \"student/404.html\")\r\n\r\n\r\ndef calculate_average_and_get_grades(Getgrading, totalmarks, getsubjectcount):\r\n getavg = calculate_average(totalmarks, getsubjectcount)\r\n Gradeterm = None\r\n if getavg:\r\n Gradeterm = get_grade(Getgrading, getavg).name\r\n return getavg, Gradeterm\r\n\r\n\r\ndef get_all_student_result_for_class_and_stream(student_stream, student_class, getterm):\r\n getclassnumber, getstreamnumber = {}, {}\r\n for idstudent in student_class:\r\n marks = list(\r\n Mark.objects.filter(student=idstudent, Term__name=getterm).values_list(\r\n \"marks\", flat=True\r\n )\r\n )\r\n if marks:\r\n if sum(marks):\r\n getclassnumber[idstudent.id] = sum(marks)\r\n else:\r\n getclassnumber[idstudent.id] = 0\r\n\r\n for idstudent in student_stream:\r\n marks = list(\r\n Mark.objects.filter(student=idstudent, Term__name=getterm).values_list(\r\n \"marks\", flat=True\r\n )\r\n )\r\n if marks:\r\n if sum(marks):\r\n getstreamnumber[idstudent.id] = sum(marks)\r\n else:\r\n getstreamnumber[idstudent.id] = 0\r\n\r\n return getclassnumber, getstreamnumber\r\n\r\n\r\ndef calculate_class_rank(getclassnumber, student, totalclass, getclassrankid):\r\n if getclassnumber:\r\n sortedid = dict(sorted(getclassnumber.items(), key=lambda item: item[1])[::-1])\r\n getclassrank = list(sortedid.keys())\r\n classnumber = getclassrank.index(student.id) + 1\r\n getnumbers = f\"{classnumber}/{totalclass}\"\r\n getclassrankid.append(getnumbers)\r\n return classnumber, getclassrankid\r\n else:\r\n return\r\n\r\n\r\ndef calculate_stream_rank(getstreamnumber, student, getstreamrankid):\r\n if getstreamnumber:\r\n sorted_id = dict(\r\n sorted(getstreamnumber.items(), key=lambda item: item[1], reverse=True)\r\n )\r\n getstreamrank = list(sorted_id.keys())\r\n streamnumber = getstreamrank.index(student.id) + 1\r\n getnumbers = f\"{streamnumber}/{len(getstreamrank)}\"\r\n getstreamrankid.append(getnumbers)\r\n return streamnumber, getstreamrankid\r\n\r\n\r\ndef update_term_results(\r\n getterm,\r\n getterms,\r\n getclassrankid,\r\n getstreamrankid,\r\n totalmarks,\r\n getsubjectcount,\r\n subjectname,\r\n Getgrading,\r\n):\r\n a = 0\r\n if getterm:\r\n try:\r\n getterm[getterms.name] = (\r\n [getstreamrankid[a]] + [getclassrankid[a]] + getterm[getterms.name]\r\n )\r\n a += 1\r\n\r\n calcavg = round(calculate_average(totalmarks, getsubjectcount))\r\n if calcavg:\r\n getterm[getterms.name].append(get_grade(Getgrading, calcavg).name)\r\n getterm[getterms.name].append(get_grade(Getgrading, calcavg).points)\r\n else:\r\n getterm[getterms.name].append(\"\")\r\n getterm[getterms.name].append(\"\")\r\n except IndexError:\r\n pass\r\n return getterm\r\n\r\n\r\ndef subject_ranking_per_class_and_stream(\r\n student, subjectname, Getgrading, totalclass, q, getterms\r\n):\r\n subjectrankdetail = []\r\n for eachsubject in subjectname:\r\n eachsubjectrank = []\r\n eachsubjectrank.append(eachsubject.name)\r\n getmark = list(\r\n Mark.objects.filter(\r\n student=student,\r\n name__name=eachsubject.name,\r\n Term__name=getterms,\r\n year=student.year,\r\n ).values_list(\"marks\", flat=True)\r\n )\r\n eachsubjectrank += getmark\r\n if len(eachsubjectrank) >= 2:\r\n eachsubjectrank.append(get_grade(Getgrading, eachsubjectrank[1]).name)\r\n subjectrankclass = list(\r\n Mark.objects.filter(\r\n student__class_name__name=student.class_name,\r\n name__name=eachsubject,\r\n Term__name=getterms.name,\r\n year=student.year,\r\n )\r\n .values_list(\"student\", flat=True)\r\n .order_by(\"-marks\")\r\n )\r\n\r\n if student.id in subjectrankclass:\r\n getnu = f\"{subjectrankclass.index(student.id)+1}/{totalclass}\"\r\n eachsubjectrank.append(getnu)\r\n subjectrankdetail.append(eachsubjectrank)\r\n q[getterms.name] = subjectrankdetail\r\n return q\r\n\r\n\r\ndef getgrade():\r\n return Grading.objects.all()\r\n\r\n\r\ndef get_grade(grades, percent_or_marks):\r\n grade_name = None\r\n for grade in grades:\r\n if (\r\n percent_or_marks >= grade.percent\r\n and percent_or_marks >= 0\r\n and percent_or_marks <= 100\r\n ):\r\n grade_name = grade\r\n break\r\n return grade_name\r\n\r\n\r\ndef calculate_average(totalmarks, divider):\r\n try:\r\n getaverage = totalmarks / divider\r\n except ZeroDivisionError:\r\n getaverage = 0\r\n return getaverage\r\n\r\n\r\ndef reportbook(request, name, id, termname):\r\n student = get_student(Student, id)\r\n totalclass = class_stream_count(name=name)\r\n streamtotal = class_stream_count(name=name, stream=student.stream)\r\n terms = all_terms()\r\n Getgrading = getgrade()\r\n classnumber = {}\r\n streamnumber = {}\r\n Gradeterm = {}\r\n totalmarks = {}\r\n subjectname = all_subjects()\r\n student_stream = student_stream_class(\r\n name=student.class_name, stream=student.stream\r\n )\r\n student_class = student_stream_class(name=student.class_name)\r\n getsubjectcount = EnrollStudenttosubect.enroll.get_subjects_for_student_count(\r\n student=student\r\n )\r\n getavg = {}\r\n getclassrankid = []\r\n getstreamrankid = []\r\n getsubjectcount = EnrollStudenttosubect.enroll.get_subjects_for_student_count(\r\n student=student\r\n )\r\n outsubject = getsubjectcount * 100\r\n q = {}\r\n for gettermname in terms:\r\n getclassnumber, getstreamnumber = get_all_student_result_for_class_and_stream(\r\n student_stream, student_class, gettermname\r\n )\r\n classnumber[gettermname.name], _ = calculate_class_rank(\r\n getclassnumber, student, totalclass, getclassrankid\r\n )\r\n streamnumber[gettermname.name], _ = calculate_stream_rank(\r\n getstreamnumber, student, getstreamrankid\r\n )\r\n\r\n q = subject_ranking_per_class_and_stream(\r\n student, subjectname, Getgrading, totalclass, q, gettermname\r\n )\r\n getmarks = list(\r\n Mark.objects.filter(student=student, Term__name=gettermname).values_list(\r\n \"marks\", flat=True\r\n )\r\n )\r\n\r\n if getmarks:\r\n studenttotalmarks = sum(getmarks)\r\n totalmarks[gettermname.name] = studenttotalmarks\r\n else:\r\n totalmarks[gettermname.name] = 0\r\n\r\n (\r\n getavg[gettermname.name],\r\n Gradeterm[gettermname.name],\r\n ) = calculate_average_and_get_grades(\r\n Getgrading,\r\n totalmarks[gettermname.name],\r\n getsubjectcount,\r\n )\r\n context = {\r\n \"Grade\": Gradeterm,\r\n \"totalmarks\": totalmarks,\r\n \"classnumber\": classnumber,\r\n \"streamtotal\": streamtotal,\r\n \"classname\": name,\r\n \"totalclass\": totalclass,\r\n \"streamnumber\": streamnumber,\r\n \"title\": \"report card\",\r\n \"terms\": terms,\r\n \"student\": student,\r\n \"q\": q,\r\n \"termname\": termname,\r\n \"outsubject\": outsubject,\r\n }\r\n return generate_pdf(\"result/reportcard.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef class_subject_ranking(request):\r\n if request.method == \"POST\":\r\n selected_class = request.POST.get(\"selected_class\")\r\n selected_term = request.POST.get(\"selected_term\")\r\n selected_subject = request.POST.get(\"selected_subject\")\r\n if get_stream:\r\n selected_stream = request.POST.get(\"selected_stream\")\r\n if selected_stream:\r\n return redirect(\r\n \"result:subjectperrankstreamterm\",\r\n name=selected_class,\r\n stream=selected_stream,\r\n term=selected_term,\r\n subject=selected_subject,\r\n )\r\n\r\n return redirect(\r\n \"result:subjectperrankclass\",\r\n name=selected_class,\r\n term=selected_term,\r\n subject=selected_subject,\r\n )\r\n context = {\r\n \"getclasses\": get_class(),\r\n \"getsubjects\": all_subjects(),\r\n \"getterms\": all_terms(),\r\n }\r\n if get_stream():\r\n context[\"getstream\"] = get_stream()\r\n return render(request, \"result/class_subject_ranking.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef result_stream_or_term(request):\r\n if request.method == \"POST\":\r\n selected_class = request.POST.get(\"selected_class\")\r\n selected_term = request.POST.get(\"selected_term\")\r\n if get_stream:\r\n selected_stream = request.POST.get(\"selected_stream\")\r\n\r\n if selected_stream:\r\n return redirect(\r\n \"result:resultstreamterm\",\r\n name=selected_class,\r\n stream=selected_stream,\r\n term=selected_term,\r\n )\r\n return redirect(\r\n \"result:resultperterm\",\r\n name=selected_class,\r\n term=selected_term,\r\n )\r\n\r\n context = {\r\n \"getclasses\": get_class(),\r\n \"getterms\": all_terms(),\r\n }\r\n if get_stream():\r\n context[\"getstream\"] = get_stream()\r\n return render(request, \"result/result_stream_or_term.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef enter_result_for_stream_or_class(request):\r\n if request.method == \"POST\":\r\n selected_class = request.POST.get(\"selected_class\")\r\n selected_term = request.POST.get(\"selected_term\")\r\n selected_subject = request.POST.get(\"selected_subject\")\r\n selected_stream = request.POST.get(\"selected_stream\")\r\n if selected_stream:\r\n return redirect(\r\n \"result:enterexam\",\r\n name=selected_class,\r\n stream=selected_stream,\r\n Term=selected_term,\r\n Subject=selected_subject,\r\n )\r\n else:\r\n return redirect(\r\n \"result:enterexamforclass\",\r\n name=selected_class,\r\n Term=selected_term,\r\n Subject=selected_subject,\r\n )\r\n context = {\r\n \"getclasses\": get_class(),\r\n \"getterms\": all_terms(),\r\n \"getsubjects\": all_subjects(),\r\n \"getstream\": get_stream(),\r\n }\r\n return render(request, \"result/enter_result.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef enroll_students_to_student(request):\r\n if request.method == \"POST\":\r\n selected_class = request.POST.get(\"selected_class\")\r\n selected_stream = request.POST.get(\"selected_stream\")\r\n return redirect(\r\n \"result:enrollstudentstosubject\",\r\n name=selected_class,\r\n stream=selected_stream,\r\n )\r\n context = {\r\n \"getclasses\": get_class(),\r\n \"getstream\": get_stream(),\r\n }\r\n return render(request, \"result/enrollstudentstosubject.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef subjects_enrolled_y_student(request):\r\n allsubjectsbystudent = EnrollStudenttosubect.objects.filter(year=year)\r\n context = {\"allsubjectsbystudent\": allsubjectsbystudent}\r\n return render(request, \"result/allsubjectsbystudent.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef update_subjects_enrolled_y_student(request, id):\r\n return database_operation(request, EnrollForm, id)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef delete_subjects_enrolled_y_student(request, id):\r\n return delete_database_operation(request, EnrollStudenttosubect, id)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef class_and_stream_ranking(request):\r\n return render(request, \"result/class_and_stream_ranking.html\")\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef stream_ranking(request, name, term):\r\n streams = get_stream()\r\n grades = getgrade()\r\n stream_ranks = {}\r\n get_avg = {}\r\n for stream in streams:\r\n students = Student.student.get_student_list_class_or_stream(\r\n name=name, stream=stream.name\r\n )\r\n get_avg[stream.name] = get_student_avg_and_class_average(students, term)\r\n stream_ranks[stream.name] = get_grade(\r\n grades,\r\n calculate_average(sum(get_avg[stream.name]), len(get_avg[stream.name])),\r\n ).points\r\n sorted_dict = dict(\r\n sorted(stream_ranks.items(), key=lambda item: item[1], reverse=True)\r\n )\r\n context = {\"stream_ranks\": sorted_dict, \"name\": name}\r\n\r\n return render(request, \"result/stream_ranking.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef calculate_class_ranks(request, term):\r\n classes = get_class()\r\n grades = getgrade()\r\n class_ranks = {}\r\n stream = None\r\n for class_name in classes:\r\n students = Student.student.get_student_list_class_or_stream(\r\n name=class_name, stream=stream\r\n )\r\n get_avg = get_student_avg_and_class_average(students, term)\r\n class_ranks[class_name.name] = get_grade(\r\n grades,\r\n calculate_average(sum(get_avg), len(get_avg)),\r\n ).points\r\n sorted_dict = dict(\r\n sorted(class_ranks.items(), key=lambda item: item[1], reverse=True)\r\n )\r\n context = {\"class_ranks\": sorted_dict}\r\n return render(request, \"result/class_ranks.html\", context)\r\n\r\n\r\ndef get_student_avg_and_class_average(students, term):\r\n get_avg = []\r\n for student in students:\r\n query_params = {\r\n \"student\": student.id,\r\n \"Term__name\": term,\r\n \"year\": 2023,\r\n }\r\n get_marks = list(\r\n Mark.objects.filter(**query_params).values_list(\"marks\", flat=True)\r\n )\r\n if get_marks:\r\n get_avg.append(calculate_average(sum(get_marks), len(get_marks)))\r\n else:\r\n get_avg = [0]\r\n\r\n return get_avg\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef select_class_for_stream_ranking(request):\r\n if request.method == \"POST\":\r\n selected_class = request.POST.get(\"selected_class\")\r\n selected_term = request.POST.get(\"selected_term\")\r\n\r\n return redirect(\"result:streamranking\", name=selected_class, term=selected_term)\r\n context = {\r\n \"getclasses\": get_class(),\r\n \"getterms\": all_terms(),\r\n }\r\n return render(request, \"result/select_class_for_stream_ranking.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef select_stream_for_subject_ranking(request):\r\n if request.method == \"POST\":\r\n selected_class = request.POST.get(\"selected_class\")\r\n selected_subject = request.POST.get(\"selected_subject\")\r\n selected_term = request.POST.get(\"selected_term\")\r\n return redirect(\r\n \"result:subjectrankingstream\",\r\n class_name=selected_class,\r\n term=selected_term,\r\n subject=selected_subject,\r\n )\r\n\r\n context = {\r\n \"getclasses\": get_class(),\r\n \"getterms\": all_terms(),\r\n \"getsubjects\": all_subjects(),\r\n }\r\n return render(request, \"result/select_stream_ranking.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef class_stream_subject_ranking(request, class_name, term, subject):\r\n stream = Stream.objects.all()\r\n grades = getgrade()\r\n streamsubjectrank = {}\r\n for streams in stream:\r\n subjectclass = list(\r\n Mark.mark.get_subject_marks_for_class_or_stream(\r\n student_class_name=class_name,\r\n Term=term,\r\n subject_name=subject,\r\n stream=\"red\",\r\n )\r\n )\r\n studentpersubject = EnrollStudenttosubect.enroll.student_per_subject_count(\r\n subject=subject, class_name=class_name, stream=\"red\"\r\n )\r\n if subjectclass:\r\n avg = sum(subjectclass) / studentpersubject\r\n streamsubjectrank[streams.name] = get_grade(grades, avg).points\r\n sorted_subject_ranking = dict(\r\n sorted(streamsubjectrank.items(), key=lambda item: item[1], reverse=True)\r\n )\r\n context = {\r\n \"subject_ranking\": sorted_subject_ranking,\r\n \"subject\": subject,\r\n \"class\": class_name,\r\n }\r\n return render(request, \"result/streamsubjectranking.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef select_term_for_class_ranking(request):\r\n if request.method == \"POST\":\r\n selected_term = request.POST.get(\"selected_term\")\r\n return redirect(\r\n \"result:classranking\",\r\n term=selected_term,\r\n )\r\n\r\n context = {\r\n \"getterms\": all_terms(),\r\n }\r\n return render(request, \"result/select_term_for_class_ranking.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef select_result_to_update(request):\r\n if request.method == \"POST\":\r\n selected_term = request.POST.get(\"selected_term\")\r\n selected_class = request.POST.get(\"selected_class\")\r\n selected_subject = request.POST.get(\"selected_subject\")\r\n selected_stream = request.POST.get(\"selected_stream\")\r\n return redirect(\r\n \"result:sujectresults\",\r\n class_name=selected_class,\r\n term=selected_term,\r\n subject=selected_subject,\r\n stream=selected_stream,\r\n )\r\n\r\n context = {\r\n \"getclasses\": get_class(),\r\n \"getterms\": all_terms(),\r\n \"getsubjects\": all_subjects(),\r\n \"getstream\": get_stream(),\r\n }\r\n return render(request, \"result/select_result_to_update.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef subject_results_class(request, class_name, term, subject, stream):\r\n subject_results = Mark.mark.get_subject_marks_for_class_or_stream_marks(\r\n student_class_name=class_name,\r\n Term=term,\r\n subject_name=subject,\r\n stream=stream,\r\n )\r\n context = {\"subject_results\": subject_results}\r\n return render(request, \"result/subject_results.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef updatemarks(request, id):\r\n return database_operation(request, UpdateMarksForm, id)\r\n\r\n\r\ndef get_student_result(student, term, class_name):\r\n grades = getgrade()\r\n get_marks = Mark.mark.student_marks(student=student.id, term=term)\r\n student_class = Student.student.get_student_list_class_or_stream(name=class_name)\r\n student_stream = Student.student.get_student_list_class_or_stream(\r\n name=class_name, stream=student.stream\r\n )\r\n subjectcount = getsubjectcount = student_subject_count(student)\r\n totalmarks = subjectcount * 100\r\n totalclass = Student.student.class_or_stream_count(name=class_name)\r\n streamtotal = Student.student.class_or_stream_count(\r\n name=class_name, stream=student.stream\r\n )\r\n getclassnumber, getstreamnumber = get_all_student_result_for_class_and_stream(\r\n student_stream, student_class, term\r\n )\r\n _, getclassrankid = calculate_class_rank(getclassnumber, student, totalclass, [])\r\n result_for_student = {}\r\n sum = 0\r\n result_for_student[\"student\"] = student.get_student_name()\r\n for i in get_marks:\r\n result_for_student[i.name.name] = i.marks\r\n sum += i.marks\r\n result_for_student[\"total marks\"] = f\"{sum}/{totalmarks}\"\r\n result_for_student[\"position\"] = getclassrankid[0]\r\n result_for_student[\"Grade\"] = get_grade(\r\n grades, calculate_average(sum, subjectcount)\r\n ).name\r\n\r\n return result_for_student\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef select_class_to_sent_result(request):\r\n if request.method == \"POST\":\r\n selected_term = request.POST.get(\"selected_term\")\r\n selected_class = request.POST.get(\"selected_class\")\r\n return redirect(\r\n \"result:sentresultspage\",\r\n class_name=selected_class,\r\n term=selected_term,\r\n )\r\n\r\n context = {\r\n \"getclasses\": get_class(),\r\n \"getterms\": all_terms(),\r\n }\r\n return render(request, \"result/select_class_to_sent_result.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef sent_results(request, class_name, term):\r\n student = student_stream_class(name=class_name)\r\n parent_message = {}\r\n if request.method == \"POST\":\r\n selected_parent = request.POST.getlist(\"parent_phone_number\")\r\n selected_message = request.POST.getlist(\"message\")\r\n for i in range(len(selected_message)):\r\n to = selected_parent[i]\r\n body = selected_message[i]\r\n # send_sms(to, body)\r\n return redirect(\"result:messagesuccess\")\r\n else:\r\n for i in student:\r\n parent_message[i] = \", \".join(\r\n [\r\n f\"{key}: {value}\"\r\n for key, value in get_student_result(\r\n student=i, term=term, class_name=class_name\r\n ).items()\r\n ]\r\n )\r\n selected_parent = request.POST.getlist(\"parent_phone_number\")\r\n selected_message = request.POST.getlist(\"message\")\r\n context = {\"parent_message\": parent_message}\r\n return render(request, \"result/sent_results.html\", context)\r\n\r\n\r\ndef send_sms(to, body):\r\n # Initialize the Twilio client\r\n client = Client(settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN)\r\n\r\n # Send the SMS\r\n message = client.messages.create(\r\n body=body, from_=settings.TWILIO_PHONE_NUMBER, to=to\r\n )\r\n\r\n return message\r\n\r\n\r\ndef send_sms_view(request):\r\n return render(request, \"result/sms_sent.html\")\r\n","repo_name":"smagucha/examsanalysissystem","sub_path":"school/result/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":38376,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"5279293570","text":"from .vertex import Vertex\nfrom .rectangle import Rectangle\nimport pygame\nimport pygame.gfxdraw\nimport math\n\n\nfrom random import randint\n\nclass Province():\n \n name = \"\"\n pid = 0\n vertices = []\n bounds = []\n\n selected = False\n\n backend = None\n\n biomeColors = {\"jungle\": (150, 80, 50), \"marshlands\": (170, 50, 60), \"desert\": (50, 50, 80), \"drylands\": (25, 50, 80), \n \"mountains\": (30, 50, 40), \"savanna\": (75, 50, 70), \"steppe\": (60, 50, 75), \"grasslands\": (100, 50, 75), \"highlands\": (40, 55, 60)}\n\n def __init__(self, provinceDict):\n self.name = str(provinceDict[\"name\"])\n self.pid = int(provinceDict[\"id\"])\n vertices = provinceDict[\"vertices\"]\n self.vertices = [Vertex.fromDict(vertex) for vertex in vertices]\n self.bounds = Rectangle.fromVertices(self.vertices)\n\n def __str__(self):\n return str(self.pid) + \": \" + self.name\n\n def click(self, coords):\n count = 0\n if coords in self.bounds:\n for v in range(len(self.vertices)):\n if (coords.y <= self.vertices[v].y and coords.y >= self.vertices[v - 1].y\n or coords.y >= self.vertices[v].y and coords.y <= self.vertices[v - 1].y):\n slope = (self.vertices[v].x - self.vertices[v - 1].x) / (self.vertices[v].y - self.vertices[v - 1].y)\n if (coords.y - self.vertices[v].y) * slope + self.vertices[v].x >= coords.x:\n count += 1\n return count & 1\n\n def render(self, screen, bounds, viewport, time, player, font):\n \n if self.bounds.overlap(viewport):\n width, height = bounds.getWidth(), bounds.getHeight()\n vertexScreenCoords = []\n averageScreenCoord = Vertex(0, 0)\n for vertex in self.vertices:\n screenX = int((vertex.x - viewport.v1.x) * width / viewport.getWidth())\n screenY = int((vertex.y - viewport.v1.y) * height / viewport.getHeight())\n averageScreenCoord += Vertex(screenX, screenY)\n vertexScreenCoords.append((screenX, screenY))\n averageScreenCoord *= 1.0 / len(vertexScreenCoords)\n averageScreenCoord.floor()\n \n color = pygame.Color(0, 0, 0)\n hue, sat, val = (0, 50, 50)\n if self.backend.biome in self.biomeColors:\n hue, sat, val = self.biomeColors[self.backend.biome]\n \n if self.pid not in player.explored: # if unexplored, set sat to 0\n sat, val = 0, 50\n for explorer in player.explorers:\n if explorer[\"provinceId\"] == self.pid:\n sat = 40 * math.e ** (-explorer[\"remainingTime\"] / 20) + 10\n val = (val - 50) * math.e ** (-explorer[\"remainingTime\"] / 20) + 50\n \n if self.selected:\n val = min(val + 0.6 * (100.0 - val) * (math.sin(time / 200.0) / 2.0 + 0.5), 100)\n color.hsva = (hue, sat, val)\n\n pygame.draw.polygon(screen, color, vertexScreenCoords)\n pygame.draw.aalines(screen, (255, 255, 255), True, vertexScreenCoords, 2)\n\n \"\"\"if abs(self.bounds.getWidth() * self.bounds.getHeight() / viewport.getWidth() / viewport.getHeight()) > 0.1:\n fpsLabel = font.render(self.backend.name, True, (255, 255, 255))\n screen.blit(fpsLabel, (averageScreenCoord.x, averageScreenCoord.y))\n \"\"\"\n\n","repo_name":"Ben1152000/Lost-Continent","sub_path":"source/mapping/province.py","file_name":"province.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"9703372408","text":"\"\"\"\nProjeto Final: Jogo da Velha\nMaratona Fundamentos de Python 1 - Cisco Skills for All\n(Código desenvolvido por um iniciante em programação, portanto podem existir muitos erros)\nDesenvolvido por: Jhonatan Julio\n\"\"\"\n\nfrom random import randint\n\nboard = list()\npos_row = int()\npos_move = int()\n\n\ndef display_game(): # Imprime a interface do status atual do tabuleiro\n print(f'''+-------+-------+-------+\n| | | |\n| {board[0][0]} | {board[0][1]} | {board[0][2]} |\n| | | |\n+-------+-------+-------+\n| | | |\n| {board[1][0]} | {board[1][1]} | {board[1][2]} |\n| | | |\n+-------+-------+-------+\n| | | |\n| {board[2][0]} | {board[2][1]} | {board[2][2]} |\n| | | |\n+-------+-------+-------+''')\n\n\ndef free_moves(): # Verifica quais são as casas do board ainda disponíveis para jogar e retorna a lista remaining_moves\n remaining_moves = []\n for gaps in range(len(board)):\n for moves in board[gaps]:\n if moves != 'X' and moves != 'O':\n remaining_moves.append(moves)\n if not remaining_moves:\n return None\n else:\n return remaining_moves\n\n\ndef find_index(move): # Verifica e retorna a posição da jogada escolhida na função enter_move()\n global pos_row\n global pos_move\n for rows in range(len(board)):\n if move in board[rows]:\n pos_row = rows\n pos_move = board[rows].index(move)\n break\n\n\ndef enter_move(): # Entrada da jogada escolhida pelo usuario, e atualização no tabuleiro board\n print('Sua vez!')\n\n while True:\n try:\n move = int(input('Digite o número da posição que você deseja jogar: '))\n if move > 0 and move < 10:\n if move in free_moves():\n find_index(move)\n del board[pos_row][pos_move]\n board[pos_row].insert(pos_move, 'O')\n print(f'\\nVocê jogou na posição {move}!\\n')\n display_game()\n else:\n print('Você não pode jogar em uma lacuna já preenchida!')\n return enter_move()\n else:\n print(\"Jogada inválida!\")\n return enter_move()\n break\n except ValueError:\n print('Entrada inválida! Insira novamente.')\n\n who_win()\n\n\ndef computer_move(): # Jogada do computador (aleatória), e atualização no tabuleiro board\n move = randint(1, 9)\n if move not in free_moves():\n return computer_move()\n find_index(move)\n del board[pos_row][pos_move]\n board[pos_row].insert(pos_move, 'X')\n print(f'\\nVez do computador!\\nO computador jogou na posição {move}!\\n')\n who_win()\n\n\ndef who_win(): # Verifica se a partida empatou, ou quem ganhou a partida e finaliza a sessão\n win_o = '\\nParabéns! Você ganhou!\\nFinalizando sessão...'\n win_x = 'Você perdeu!\\nFinalizando sessão...'\n\n for rows in range(3): # Linhas\n if board[rows][0] == board[rows][1] == board[rows][2]:\n if board[rows][0] == 'O':\n print(win_o)\n else:\n print(win_x)\n main()\n\n for columns in range(3): # Colunas\n if board[0][columns] == board[1][columns] == board[2][columns]:\n if board[0][columns] == 'O':\n print(win_o)\n else:\n print(win_x)\n main()\n\n if board[0][0] == board[1][1] == board[2][2]: # Diagonais\n if board[0][0] == 'O':\n print(win_o)\n else:\n print(win_x)\n main()\n\n if board[2][0] == board[1][1] == board[0][2]: # Diagonais\n if board[2][0] == 'O':\n print(win_o)\n else:\n print(win_x)\n main()\n\n if free_moves() is None: # OBS: se return None = O jogo ainda não acabou ; se return True = O jogo acabou Velha\n print(\"\\nDeu velha!\\nFinalizando sessão...\")\n main()\n\n\ndef main(): # Função principal: menu do jogo e declaração do board\n global board\n board = [[1, 2, 3], [4, 'X', 6], [7, 8, 9]]\n\n print(\"\\n\\nBem vindo ao Jogo da Velha!\\nO computador é o \\\"X\\\" e você é o \\\"O\\\"\\nO computador começa!\\n\")\n\n while True:\n try:\n inpt = int(input('[1] Jogar\\n[2] Sair\\n-> '))\n while not inpt > 0 or not inpt < 3:\n print('Entrada inválida! Insira novamente.')\n inpt = int(input('[1] Jogar\\n[2] Sair\\n-> '))\n if inpt == 2:\n print('Saindo...')\n exit()\n break\n except ValueError:\n print('Entrada inválida! Insira novamente.')\n\n while who_win() is None:\n display_game()\n enter_move()\n computer_move()\n\n\nmain()\n","repo_name":"jhonatanjulio/python-essentials-jogoDaVelha","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"27647270334","text":"import datetime\n\nfrom aiogram import types\n\nimport keyboards as kb\nfrom loader import dp\nfrom .config import *\nfrom .get_weather_data import get_weather_data\n\n\n# Triggered when user clicks on \"Погода на завтра\" button (or sends this phrase by himself).\n@dp.message_handler(text=[\"Погода на завтра\"])\nasync def get_weather_for_tomorrow(message: types.Message):\n try:\n # Trying to get the weather data.\n data = get_weather_data()\n except TypeError:\n # Send this message if user asks for the weather before specifying a city.\n await message.answer(unspecified_city)\n else:\n # Otherwise, send the weather data and the day of the week.\n tomorrow_temp = data[\"daily\"][1][\"temp\"]\n tomorrow_daily = data[\"daily\"][1]\n await message.answer(f'{datetime.datetime.fromtimestamp(tomorrow_daily[\"dt\"]).strftime(\"%a: %d.%m\")}\\n\\n'\n f'Утром: {sign(tomorrow_temp[\"morn\"])}{round(tomorrow_temp[\"morn\"])} {degree}\\n'\n f'Днём: {sign(tomorrow_temp[\"day\"])}{round(tomorrow_temp[\"day\"])} {degree}\\n'\n f'Вечером: {sign(tomorrow_temp[\"eve\"])}{round(tomorrow_temp[\"eve\"])} {degree}\\n'\n f'Ночью: {sign(tomorrow_temp[\"night\"])}{round(tomorrow_temp[\"night\"])} {degree}\\n\\n'\n f'Ветер: {round(tomorrow_daily[\"wind_speed\"])} м/с\\n'\n f'{tomorrow_daily[\"weather\"][0][\"description\"].capitalize()}\\n')\n # Send the weather emoticon and the \"Подробнее\" button.\n await message.answer(f'{weather_icons[tomorrow_daily[\"weather\"][0][\"icon\"]]}',\n reply_markup=kb.detailed_weather_for_tomorrow_keyboard)\n","repo_name":"HelloMyFriend-o/Weathergetabot","sub_path":"handlers/weather/weather_for_tomorrow.py","file_name":"weather_for_tomorrow.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"17007012994","text":"import logging\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\nfrom typing import Tuple\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import mean_absolute_error\nfrom utils import relative_absolute_error, mean_absolute_percentage_error\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef evaluate_model(\n model: nn.Module,\n test_loader: DataLoader,\n criterion: nn.Module,\n X_test_tensor: torch.Tensor,\n y_test_tensor: torch.Tensor,\n) -> None:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model.to(device)\n X_test_tensor, y_test_tensor = X_test_tensor.to(device), y_test_tensor.to(device)\n logger.info(\"Evaluating model\")\n model.eval()\n\n with torch.no_grad():\n test_predictions = model(X_test_tensor)\n test_predictions_np = (\n test_predictions.cpu().numpy()\n ) # Move tensor to CPU before converting to NumPy array\n y_test_np = (\n y_test_tensor.cpu().numpy()\n ) # Move tensor to CPU before converting to NumPy array\n\n test_rmse = 0.0\n test_rae = 0.0\n test_loss = 0.0\n test_mae = 0.0\n test_mape = 0.0\n\n for i, (X_batch, y_batch) in enumerate(test_loader):\n X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n with torch.no_grad():\n batch_predictions = model(X_batch)\n batch_loss = criterion(batch_predictions, y_batch)\n test_loss += batch_loss.item()\n batch_predictions_np = batch_predictions.cpu().numpy()\n y_batch_np = y_batch.cpu().numpy()\n batch_rmse = sqrt(mean_squared_error(y_batch_np, batch_predictions_np))\n test_rmse += batch_rmse\n batch_rae = relative_absolute_error(y_batch_np, batch_predictions_np)\n test_rae += np.sum(batch_rae)\n batch_mae = mean_absolute_error(y_batch_np, batch_predictions_np)\n test_mae += batch_mae\n batch_mape = mean_absolute_percentage_error(y_batch_np, batch_predictions_np)\n test_mape += batch_mape\n\n avg_test_rmse = test_rmse / (i + 1)\n avg_test_rae = test_rae / (i + 1)\n avg_test_loss = test_loss / (i + 1)\n avg_test_mae = test_mae / (i + 1)\n avg_test_mape = test_mape / (i + 1)\n\n logger.info(\n f\"Average Test Loss: {avg_test_loss:.4f}, Average Test RMSE: {avg_test_rmse:.4f}, Average Test RAE: {avg_test_rae:.4f}, Average Test MAE: {avg_test_mae:.4f}, Average Test MAPE: {avg_test_mape:.4f}\"\n )\n\n return test_predictions_np, y_test_np\n","repo_name":"palaceparis/DSAA5020_Group_Project","sub_path":"src/models/MLP/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"10526402192","text":"\"\"\"\n@author: Arno\n@created: 2022-03-23\n@modified: 2023-05-20\n\nCollecting prices\n\nCoingecko\n\"\"\"\nimport copy\nimport math\n\nimport config\nimport src.func.helperfunc as helperfunc\nfrom src.data.CoinData import CoinData, CoinPriceData\nfrom src.data.DbData import DbWebsiteName\nfrom src.models.CoinPrice import CoinPrice\n\n\nclass CoinPriceCoingecko(CoinPrice):\n \"\"\"Class for retrieving price data of a set of coins on the coingecko website\n \"\"\"\n\n def __init__(self) -> None:\n self.website = DbWebsiteName.COINGECKO.name.lower()\n super().__init__()\n\n def get_price_current(self, coindata: list[CoinData], currencies: list[str]) -> list[CoinPriceData]:\n \"\"\"Get coingecko current price\n \"\"\"\n # convert list to comma-separated string\n coins = ','.join(coin.siteid for coin in coindata)\n curr = ','.join(currencies)\n\n # make parameters for api call\n params = {}\n params['ids'] = coins\n params['vs_currencies'] = curr\n params['include_last_updated_at'] = True\n\n url = f'{config.COINGECKO_URL}/simple/price'\n url = self.req.api_url_params(url, params)\n resp = self.req.get_request_response(url)\n\n # create list of CoinPriceData from respone\n prices: list[CoinPriceData] = []\n for resp_key, resp_val in resp.items():\n for coin in coindata:\n if resp_key == coin.siteid:\n date = helperfunc.convert_timestamp(\n resp_val['last_updated_at'])\n for currency in currencies:\n if currency in resp_val:\n prices.append(CoinPriceData(\n date=date,\n coin=coin,\n curr=currency,\n price=resp_val[currency]))\n\n return prices\n\n def get_price_current_token(self, coindata: list[CoinData], currencies: list[str]) -> list[CoinPriceData]:\n \"\"\"Get coingecko current price of a token\n\n coindata.chain = chain where contracts are\n coindata.siteid = contract address\n \"\"\"\n # convert list to comma-separated string\n curr = ','.join(currencies)\n chains = [coin.chain for coin in coindata]\n\n # prepare parameters for api call\n params = {}\n params['vs_currencies'] = curr\n params['include_last_updated_at'] = True\n\n # create empty list of CoinPriceData from respone\n prices: list[CoinPriceData] = []\n\n # make api call per chain\n for chain in chains:\n # convert list to comma-separated string\n contracts = ','.join(\n coin.siteid for coin in coindata if coin.chain == chain)\n\n # prepare parameters for api call\n params['contract_addresses'] = contracts\n\n url = f'{config.COINGECKO_URL}/simple/token_price/{chain}'\n url = self.req.api_url_params(url, params)\n resp = self.req.get_request_response(url)\n\n # remove status_code from dictionary\n resp.pop('status_code')\n\n # extend list of CoinPriceData from respone\n for resp_key, resp_val in resp.items():\n for coin in coindata:\n if resp_key == coin.siteid:\n date = helperfunc.convert_timestamp(\n resp_val['last_updated_at'])\n for currency in currencies:\n if currency in resp_val:\n prices.append(CoinPriceData(\n date=date,\n coin=coin,\n curr=currency,\n price=resp_val[currency]))\n\n return prices\n\n def get_price_hist(self, coindata: list[CoinData], currencies: list[str], date: str) -> list[CoinPriceData]:\n \"\"\"Get coingecko history price\n \"\"\"\n # set date in correct format for url call\n dt = helperfunc.convert_str_to_date(date)\n date = helperfunc.convert_date_to_utc_str(dt)\n\n prices: list[CoinPriceData] = []\n i = 0\n for coin in coindata:\n i += 1\n self.view_update_progress(i, len(coindata))\n url = f'{config.COINGECKO_URL}/coins/{coin.siteid}/history?date={date}&localization=false'\n resp = self.req.get_request_response(url)\n\n if resp['status_code'] == 'error':\n # got no status from request, must be an error\n for currency in currencies:\n prices.append(CoinPriceData(\n date=dt,\n coin=coin,\n curr=currency,\n price=math.nan,\n volume=math.nan,\n error=resp['error']))\n else:\n for currency in currencies:\n # default values when not found in response\n price = math.nan\n volume = math.nan\n error = 'no data found'\n\n # get data from respones\n if 'market_data' in resp:\n if currency in resp['market_data']['current_price']:\n price = resp['market_data']['current_price'][currency]\n volume = resp['market_data']['total_volume'][currency]\n error = ''\n\n # add CoinPriceData\n prices.append(CoinPriceData(\n date=dt,\n coin=coin,\n curr=currency,\n price=price,\n volume=volume,\n error=error))\n\n return prices\n\n def get_price_hist_marketchart(self, coindata: list[CoinData], currencies: list[str], date: str) -> list[CoinPriceData]:\n \"\"\"Get coingecko history price of a coin or a token\n\n If chain = 'none' or None search for a coins otherwise search for token contracts\n \"\"\"\n # convert date to unix timestamp\n dt = helperfunc.convert_str_to_date(date)\n ts = int(dt.timestamp())\n\n # make parameters\n params = {}\n params['from'] = ts\n params['to'] = ts\n\n prices: list[CoinPriceData] = []\n i = 0\n for coin in coindata:\n i += 1\n self.view_update_progress(i, len(coindata))\n\n for currency in currencies:\n params['vs_currency'] = currency\n\n coinprice = self.get_pricedata_hist_marketchart_retry(\n coin, dt, ts, params, currency)\n prices.append(coinprice)\n\n return prices\n\n def get_pricedata_hist_marketchart_retry(self, coin: CoinData, dt, ts, params, currency) -> CoinPriceData:\n \"\"\"Get history price data for one coin from and to specific date\n\n with retry mechanism for bigger time range when no data is found\n increase time range until data is found\n \"\"\"\n params_try = copy.deepcopy(params)\n\n if (coin.chain == '' or coin.chain == 'none' or coin.chain is None):\n url = f'{config.COINGECKO_URL}/coins/{coin.siteid}/market_chart/range'\n else:\n url = f'{config.COINGECKO_URL}/coins/{coin.chain}/contract/{coin.siteid}/market_chart/range'\n\n date = dt\n price = math.nan\n volume = math.nan\n error = 'no data found'\n\n for nr_try in range(1, self.nr_try_max):\n # retry same coin with new date range\n params_try['from'] -= 2**(2*nr_try) * 3600\n params_try['to'] += 2**(2*nr_try) * 3600\n\n url_try = self.req.api_url_params(url, params_try)\n resp = self.req.get_request_response(url_try)\n\n # check for correct response\n if resp['status_code'] == 'error':\n # got no status from request, must be an error\n error = resp['error']\n break\n else:\n resp_prices = resp['prices']\n if len(resp_prices) > 0:\n # select result with timestamp nearest to desired date ts\n resp_price_index = self.search_price_minimal_timediff(\n resp_prices, ts, True)\n\n # set found coin price data\n date = helperfunc.convert_timestamp(\n resp_prices[resp_price_index][0], True)\n price = resp_prices[resp_price_index][1]\n volume = resp['total_volumes'][resp_price_index][1]\n error = ''\n break\n\n return CoinPriceData(date=date, coin=coin, curr=currency, price=price, volume=volume, error=error)\n\n def search_price_minimal_timediff(self, prices, ts: int, ms: bool = False) -> int:\n \"\"\"Search for record in price data with the smallest time difference\n\n ts = timestamp in sec if ms = False\n ts = timestamp in msec if ms = True\n \"\"\"\n timediff_minimal = 10**20\n price_index = 0\n index = 0\n ts = ts*1000 if ms == True else ts\n for price in prices:\n timediff = abs(ts - price[0])\n if timediff < timediff_minimal:\n timediff_minimal = timediff\n price_index = index\n index += 1\n return price_index\n","repo_name":"arnosk/cryptoprices","sub_path":"src/models/CoinPriceCoingecko.py","file_name":"CoinPriceCoingecko.py","file_ext":"py","file_size_in_byte":9522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"10155601830","text":"import os\nimport torch\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nfrom scipy.stats import skew\n\n\ndef load_train_val_test(path, remove_features=\"SPD\", transform=True):\n \"\"\"Load training, validation and test sequences\n\n Parameters:\n path: Source directory containing train and validation dataset\n remove_features: Feature to be dropped\n transform: Transform dataset using MinMaxScaler\n\n Returns:\n train, validation and test sequences, length of sequence, number of features and scaler\n \"\"\"\n if isinstance(remove_features, str):\n remove_features = [remove_features]\n elif isinstance(remove_features, tuple):\n remove_features = [*remove_features]\n\n scaler = MinMaxScaler(feature_range=(-1, 1))\n path = [os.path.join(path, f) for f in os.listdir(path)]\n train_path = [f for f in path if \"train\" in f][0]\n val_path = [f for f in path if \"val\" in f][0]\n test_path = [f for f in path if \"test\" in f][0]\n\n train_df = pd.read_csv(train_path)\n val_df = pd.read_csv(val_path)\n test_df = pd.read_csv(test_path)\n\n if len(remove_features) == 1:\n train_df = train_df.drop(remove_features, axis=\"columns\").astype(np.float32)\n val_df = val_df.drop(remove_features, axis=\"columns\").astype(np.float32)\n test_df = test_df.drop(remove_features, axis=\"columns\").astype(np.float32)\n else:\n train_df = train_df.drop(train_df.loc[:, remove_features].columns, axis=1).astype(np.float32)\n val_df = val_df.drop(val_df.loc[:, remove_features].columns, axis=1).astype(np.float32)\n test_df = test_df.drop(test_df.loc[:, remove_features].columns, axis=1).astype(np.float32)\n\n if transform:\n train_df = scaler.fit_transform(train_df.values)\n val_df = scaler.fit_transform(val_df.values)\n test_df = scaler.fit_transform(test_df.values)\n else:\n train_df = train_df.values\n val_df = val_df.values\n test_df = test_df.values\n\n train_seq, seq_len, n_features = create_dataset(train_df)\n val_seq, _, _ = create_dataset(val_df)\n test_seq, _, _ = create_dataset(test_df)\n\n return train_seq, val_seq, test_seq, seq_len, n_features, scaler\n\n\ndef create_dataset(sequences):\n \"\"\"Create dataset to be passed to LSTM autoencoder\"\"\"\n dataset = [torch.tensor(s, dtype=torch.float32).unsqueeze(0) for s in sequences]\n n_seq, seq_len, n_features = torch.stack(dataset).shape\n\n return dataset, seq_len, n_features\n\n\nif __name__ == \"__main__\":\n train_sequence, val_sequence, test_sequence, target_len, num_features, scaler = \\\n load_train_val_test(path=\"../Data/20220823\", remove_features=(\"SPD\", \"ANG\"))\n\n print(f\"Number of features: {num_features}\")\n print(f\"train input size: {train_sequence[0].shape}\")\n","repo_name":"mcekwonu/Anomaly-Detection-with-Autoencoders-Pytorch","sub_path":"src/generate_dataset.py","file_name":"generate_dataset.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"27796196864","text":"import DepartmentAccount\nimport ES\nfrom tkinter import *\nfrom tkinter import messagebox\nimport datetime\n\nclass AddFunds:\n def __init__(self, root):\n root.title('Add Funds')\n self.frame = Frame(root, bg='white')\n self.frame.grid(row=0, column=0, sticky='nsew')\n\n root.geometry('400x400')\n root.minsize(400, 400)\n root.maxsize(400, 400)\n\n Label(self.frame, text='Donor', anchor='w', bg='white').grid(row=1, column=0)\n Label(self.frame, text='Amount', anchor='w', bg='white').grid(row=2, column=0)\n Label(self.frame, text='Date', anchor='w', bg='white').grid(row=3, column=0)\n Label(self.frame, text='Purpose', anchor='w', bg='white').grid(row=4, column=0)\n\n self.donorEntry = Entry(self.frame)\n self.amountEntry = Entry(self.frame)\n self.dateEntry = Entry(self.frame)\n self.purposeEntry = Entry(self.frame)\n\n self.donorEntry.grid(row=1, column=1)\n self.amountEntry.grid(row=2, column=1)\n self.dateEntry.grid(row=3, column=1)\n self.purposeEntry.grid(row=4, column=1)\n\n self.submitButton = Button(self.frame, text='Add Funds', anchor=W, command=lambda: self.submit(root))\n\n self.backButton = Button(self.frame, text='Back', command=lambda: self.back(root))\n self.exitButton = Button(self.frame, text='Exit', command=exit)\n\n self.submitButton.grid(row=5, columnspan=2, pady=50)\n\n self.exitButton.grid(row=7, column=0, sticky=N + W, padx=50, pady=50)\n self.backButton.grid(row=7, column=1, sticky=N + E, padx=50, pady=50)\n\n self.frame.columnconfigure(0, weight=1)\n self.frame.columnconfigure(1, weight=1)\n self.frame.rowconfigure(0, weight=1)\n\n root.mainloop()\n\n def submit(self, root):\n donor_ = self.donorEntry.get()\n amount_ = self.amountEntry.get()\n date_ = self.dateEntry.get()\n purpose_ = self.purposeEntry.get()\n try:\n AddFunds.addfunds(donor_, amount_, date_, purpose_)\n messagebox.showinfo('Funds', 'Added successfully')\n self.clear()\n DepartmentAccount.DepartmentAccount(root)\n except Exception as e:\n messagebox.showwarning('Adding Funds', e)\n\n @staticmethod\n def addfunds(donor_, amount_, date_, purpose_):\n lengths = [len(i) for i in [donor_, amount_, date_, purpose_]]\n if 0 in lengths:\n raise Exception('One or more fields left blank')\n\n connect_, cursor_ = ES.get_student_db_ES()\n try:\n amount_ = int(amount_)\n except ValueError:\n raise Exception('Amount is not a valid number')\n if not amount_ > 0:\n raise Exception('Amount is not a positive number')\n\n try:\n datetime.datetime.strptime(date_, '%d/%m/%Y')\n except ValueError:\n raise Exception('Incorrect Date Format')\n\n with connect_:\n cursor_.execute('INSERT INTO transactions VALUES (:donor, :amount, :date, :purpose)',\n {'donor': donor_, 'amount': amount_, 'date': date_, 'purpose': purpose_})\n\n cursor_.execute('SELECT * from total')\n amount = (cursor_.fetchone())[0]\n cursor_.execute('UPDATE total SET amount=(:n_amt) WHERE amount=(:o_amt)',\n {'n_amt': amount + amount_, 'o_amt': amount})\n\n def back(self, root):\n self.clear()\n DepartmentAccount.DepartmentAccount(root)\n\n def clear(self):\n self.frame.destroy()","repo_name":"dope-dependent/UDIS","sub_path":"AddFunds.py","file_name":"AddFunds.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"72919636059","text":"import io\nfrom django.contrib import messages\nfrom django.shortcuts import render, redirect\nfrom django.core.files.images import ImageFile\nfrom .forms import UploadFileForm\nfrom .models import MarketChart, Script\nfrom utils.main_sc import main\nfrom utils.handle_csv_file import get_data_from_csv\n\n\n\ndef create_script_name(name):\n \"\"\"\n Create Transaction for this request\n \"\"\"\n return Script.objects.create(\n name=name,\n )\n\n\ndef index(request):\n if request.method == 'POST':\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n script_name = form.cleaned_data['title']\n csv_file = form.cleaned_data['file']\n if not csv_file.name.endswith('.csv'):\n messages.error(request, 'File is not CSV type')\n return redirect('index')\n # If file is too large\n if csv_file.multiple_chunks():\n messages.error(request, 'Uploaded file is too big (%.2f MB)' %(csv_file.size(1000*1000),))\n return redirect('index')\n asins, marketplaces, zipcodes, brands = get_data_from_csv(csv_file)\n fig=main(asins, marketplaces, zipcodes, brands)\n img_html = fig.to_html()\n # img_bytes = fig.to_image(format='png')\n # img_file = ImageFile(io.BytesIO(img_bytes), name='chart.png')\n sc = create_script_name(script_name)\n cg = MarketChart.objects.create(script=sc, chart_img=img_html)\n return redirect('chart')\n \n else:\n form = UploadFileForm()\n return render(request, 'helium10/index.html', {'form': form})\n\n\ndef chart(request):\n charts = MarketChart.objects.all()\n return render(request, 'helium10/chart.html', {'charts': charts})\n\ndef chart_detail(request, id):\n img = MarketChart.objects.filter(id=id).first()\n return render(request, 'helium10/chart_detail.html', {'img': img, 'request':request})","repo_name":"AbdurRehman1234/shurq_market","sub_path":"project/helium10/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"36773453038","text":"import pandas as pd\r\nimport numpy as np\r\nfrom numpy.random import normal, randint\r\nimport re\r\nimport os\r\nfrom os import path\r\nfrom sklearn.base import clone, BaseEstimator, RegressorMixin, TransformerMixin\r\nfrom sklearn.neighbors import KNeighborsRegressor\r\nfrom sklearn.ensemble import BaggingRegressor, ExtraTreesRegressor, AdaBoostRegressor\r\nfrom lightgbm import LGBMRegressor\r\nfrom sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge\r\nfrom sklearn.model_selection import train_test_split, KFold\r\nfrom scipy.special import softmax\r\n\r\nclass stacking(BaseEstimator, RegressorMixin, TransformerMixin):\r\n ##=============== 参数说明 ================##\r\n # mod --- 堆叠过程的第一层中的算法\r\n # meta_model --- 堆叠过程的第二层中的算法,也称次学习器\r\n\r\n def __init__(self, mod, meta_model):\r\n self.saved_model = None\r\n self.data = None\r\n self.mod = mod # 首层学习器模型\r\n self.meta_model = meta_model # 次学习器模型\r\n self.kf = KFold(n_splits=5, random_state=42, shuffle=True) # 这就是堆叠的最大特征进行了几折的划分\r\n\r\n ## 训练函数\r\n def fit(self, X, y):\r\n self.data = np.mean(X[np.where(y==1), :], axis=0)\r\n self.saved_model = [list() for i in self.mod] # self.saved_model包含所有第一层学习器\r\n oof_train = np.zeros((X.shape[0], len(self.mod))) # 维度:训练样本数量*模型数量,训练集的首层预测值\r\n for i, model in enumerate(self.mod): # 返回的是索引和模型本身\r\n for train_index, val_index in self.kf.split(X, y): # 返回的是数据分割成分(训练集和验证集对应元素)的索引\r\n renew_model = clone(model) # 模型的复制\r\n renew_model.fit(X[train_index], y[train_index]) # 对分割出来的训练集数据进行训练\r\n self.saved_model[i].append(renew_model) # 把模型添加进去\r\n # oof_train[val_index,i] = renew_model.predict(X[val_index]).reshape(-1,1) #用来预测验证集数据\r\n val_prediction = renew_model.predict(X[val_index]).reshape(-1, 1) # 验证集的预测结果,注:结果是没有索引的\r\n for temp_index in range(val_prediction.shape[0]):\r\n oof_train[val_index[temp_index], i] = val_prediction[temp_index] # 用来预测验证集数据的目标值\r\n self.meta_model.fit(oof_train, y) # 次学习器模型训练,这里只是用到了首层预测值作为特征\r\n return self\r\n\r\n ## 预测函数\r\n def predict(self, X):\r\n whole_test = np.column_stack([np.column_stack(model.predict(X) for model in single_model).mean(axis=1)\r\n for single_model in self.saved_model]) # 得到的是整个测试集的首层预测值\r\n return self.meta_model.predict(whole_test) # 返回次学习器模型对整个测试集的首层预测值特征的最终预测结果\r\n\r\n ## 获取首层学习结果的堆叠特征\r\n def get_oof(self, X, y, test_X):\r\n oof = np.zeros((X.shape[0], len(self.mod))) # 初始化为0\r\n test_single = np.zeros((test_X.shape[0], 5)) # 初始化为0\r\n # display(test_single.shape)\r\n test_mean = np.zeros((test_X.shape[0], len(self.mod)))\r\n for i, model in enumerate(self.mod): # i是模型\r\n for j, (train_index, val_index) in enumerate(self.kf.split(X, y)): # j是所有划分好的的数据\r\n clone_model = clone(model) # 克隆模块,相当于把模型复制一下\r\n clone_model.fit(X[train_index], y[train_index]) # 把分割好的数据进行训练\r\n val_prediction = clone_model.predict(X[val_index]).reshape(-1, 1) # 验证集的预测结果,注:结果是没有索引的\r\n for temp_index in range(val_prediction.shape[0]):\r\n oof[val_index[temp_index], i] = val_prediction[temp_index] # 用来预测验证集数据\r\n test_prediction = clone_model.predict(test_X).reshape(-1, 1) # 对测试集进行预测\r\n test_single[:, j] = test_prediction[:, 0]\r\n test_mean[:, i] = test_single.mean(axis=1) # 测试集算好均值\r\n return oof, test_mean\r\n\r\n def sampling(self, n):\r\n start = np.mean(self.data, axis = 0)\r\n output = np.array([start])\r\n i = 0\r\n u = 0\r\n q = 0\r\n print(\"sampling goes...\")\r\n if sum(start[5:9]) <= 1e-5:\r\n dim = 5\r\n else:\r\n dim = 10\r\n while i < n / 2 and q < n:\r\n for j in range(dim):\r\n q = q + 1\r\n value = start\r\n value[j] = normal(start[j], 0.005, 1)\r\n if self.predict([value]):\r\n start = value\r\n output = np.append(output, [start], axis = 0)\r\n i = i + 1\r\n if i % 500 == 0:\r\n print(start)\r\n else:\r\n u = u + 1\r\n if u >= 200:\r\n start = np.mean(self.data, axis=0)\r\n u = 0\r\n if i < n/2:\r\n print(\"copy supp\")\r\n for q in range(i, int(n/2)):\r\n output = np.append(output, [np.mean(self.data, axis = 0)], axis = 0)\r\n return output\r\n\r\n\r\nclass models:\r\n\r\n def __init__(self, n):\r\n self.models = []\r\n for i in range(n):\r\n #mod = [BaggingRegressor(), ExtraTreesRegressor(), AdaBoostRegressor(), KNeighborsRegressor()]\r\n #meta_model = LogisticRegression()\r\n self.models.append(self.create())\r\n\r\n def create(self):\r\n return stacking([BaggingRegressor(), ExtraTreesRegressor(), AdaBoostRegressor(), KNeighborsRegressor()],\r\n LGBMRegressor())\r\n\r\n def fit(self, X, y):\r\n assert len(self.models) == y.shape[1]\r\n for i, model in enumerate(self.models):\r\n print(\"fit model\", i)\r\n model.fit(X,y[:,i])\r\n\r\n def test(self, X):\r\n result = []\r\n y = np.zeros((X.shape[0], len(self.models)))\r\n for i, model in enumerate(self.models):\r\n y[:,i] = model.predict(X).ravel()\r\n for i in range(X.shape[0]):\r\n #if sum(y[i,:]) == 0:\r\n # result.append(-1)\r\n #elif sum(y[i,:]) == 1:\r\n result.append(np.argmax(y[i,:]))\r\n #else:\r\n # y[i, 6] = 0\r\n # if sum(y[i, :]) == 1:\r\n # result.append(np.argmax(y[i, :]))\r\n # else:\r\n # result.append(-2)\r\n return np.array(result), y\r\n\r\n def accuracy(self, X, y):\r\n pred, re = self.test(X)\r\n return sum(pred != np.argmax(y, axis=1)), X.shape[0], re\r\n\r\n def append(self, X):\r\n y = np.zeros((X.shape[0])) + 1\r\n for model in self.models:\r\n X = np.append(X, model.data, axis = 0)\r\n y = np.append(y, np.zeros((model.data.shape[0])))\r\n mod = stacking(self.models[0].mod, self.models[0].meta_model)\r\n mod.fit(X, y)\r\n self.models.append(mod)\r\n\r\n def extend(self, x_dir, y_dir):\r\n x = np.loadtxt(x_dir)\r\n y = np.loadtxt(y_dir)\r\n n = x.shape[0]\r\n for mod in self.models:\r\n new_dat = mod.sampling(n)\r\n x = np.append(x, new_dat, axis = 0)\r\n y = np.append(y, np.zeros(new_dat.shape[0]), axis = 0)\r\n model = self.create()\r\n model.fit(x, y)\r\n self.models.append(model)","repo_name":"WangYy-F/atom_classification","sub_path":"Model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"32751467323","text":"from brownie import TimeLock, accounts, chain\n\ndef test_timelock():\n # Setup accounts\n deployer = accounts[0]\n receiver = accounts[1]\n\n # Deploy the contract\n timelock = TimeLock.deploy(receiver, {'from': deployer})\n\n # Initial lockedAmount should be 0\n assert timelock.lockedAmount() == 0\n\n # Deposit 1 ETH into the contract\n deposit_amount = 10**18 # 1 Ether\n timelock.deposit({'from': deployer, 'value': deposit_amount})\n print(\"Contract deployed\")\n # lockedAmount should be equal to deposit_amount\n assert timelock.lockedAmount() == deposit_amount\n\n # Try to withdraw before 1 day has passed\n try:\n timelock.withdraw({'from': receiver})\n except Exception as e:\n # This exception is expected, as the funds are still locked\n assert \"Lock period not ended yet\" in str(e)\n\n # Fast forward time by 1 day\n chain.sleep(86400) # 1 day in seconds\n chain.mine()\n\n # Withdraw the locked funds\n initial_balance = receiver.balance()\n timelock.withdraw({'from': receiver})\n\n # Check that the receiver's balance increased by the deposit amount\n assert receiver.balance() == initial_balance + deposit_amount\n\n # Check that withdrawn is set to true\n assert timelock.withdrawn() == True\n","repo_name":"Blockchain-Course-UPF/labs","sub_path":"Lab1&Lab2/tests/test_timelock.py","file_name":"test_timelock.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"} +{"seq_id":"46005893495","text":"diccionario = [\n {\"palabra\" : \"perro\",\"traduccion\" : \"dog\"}, \n {\"palabra\" : \"gato\", \"traduccion\" : \"cat\" },\n {'palabra': 'mariposa', 'traduccion': 'butterfly'},\n {\"palabra\":\"calamar\", \"traduccion\":\"squid\"},\n {\"palabra\":\"ardilla\", \"traduccion\":\"squirrel\"},\n {\"palabra\": \"martillo\", \"traduccion\": \"hammer\"},\n {\"palabra\": \"pala\", \"traduccion\": \"shovel\"},\n {\"palabra\": \"tenedor\", \"traduccion\": \"fork\"},\n]\n\n","repo_name":"codminddev/course-python-core-essentials","sub_path":"app/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"19441097156","text":"from splinter import Browser\nfrom bs4 import BeautifulSoup as bs\nimport time\nimport re\nimport pandas as pd\n\n\ndef init_browser():\n executable_path = {\"executable_path\": \"chromedriver\"}\n return Browser(\"chrome\", **executable_path, headless=False)\n\ndef scrape():\n browser = init_browser()\n\n # NASA Mars News\n nasa_url = \"https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest\"\n browser.visit(nasa_url)\n time.sleep(5)\n html = browser.html\n soup = bs(html, \"html.parser\")\n\n mars_news = soup.select(\".item_list .slide\")\n news_title = ''\n news_p = ''\n for nitem in mars_news:\n news_title = nitem.select_one(\".content_title a\").text\n news_p = nitem.select_one(\".article_teaser_body\").text\n break\n #print(f\"news_title = {news_title}\")\n #print(f\"news_p = {news_p}\")\n\n #JPL Mars Space Images - Featured Image\n jpl_url = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\n browser.visit(jpl_url)\n time.sleep(5)\n html = browser.html\n soup = bs(html, \"html.parser\") \n\n jpl_news = soup.select(\".articles .slide\")\n featured_image_url = ''\n for nitem in jpl_news:\n featured_image_url = 'https://www.jpl.nasa.gov' + nitem.select_one(\".fancybox\")[\"data-fancybox-href\"]\n break \n #print(f\"featured_image_url = {featured_image_url}\")\n\n\n # Twitter Mars News\n twitter_url = \"https://twitter.com/marswxreport?lang=en\"\n browser.visit(twitter_url)\n time.sleep(5)\n html = browser.html\n soup = bs(html, \"html.parser\") \n\n tweets = soup.select(\"div[class='css-901oao r-hkyrab r-1qd0xha r-a023e6 r-16dba41 r-ad9z0x r-bcqeeo r-bnwqim r-qvutc0'] span\")\n mars_weather = ''\n for tweet in tweets:\n if tweet.text.startswith('InSight sol'):\n mars_weather = tweet.text\n break\n #print(f\"mars_weather = {mars_weather}\")\n\n # Mars Facts\n space_url = \"https://space-facts.com/mars\"\n browser.visit(space_url)\n time.sleep(5)\n html = browser.html\n soup = bs(html, \"html.parser\") \n\n fact_cols = soup.select(\"#tablepress-p-mars tr\")\n\n descriptions = []\n values = []\n for col in fact_cols:\n descriptions.append(col.select_one(\".column-1 strong\").text)\n values.append(col.select_one(\".column-2\").text)\n\n dict_facts = {\"Description\": descriptions, \"Value\": values}\n df_facts=pd.DataFrame(dict_facts)\n mars_facts = df_facts.to_html()\n #print(f\"mars_facts = {mars_facts}\")\n\n\n # Mars Hemispheres\n astro_url = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n browser.visit(astro_url)\n time.sleep(5)\n html = browser.html\n soup = bs(html, \"html.parser\") \n\n items = soup.select(\".item\")\n hemisphere_image_urls = []\n\n for item in items:\n title = item.select_one(\".description h3\").text\n img_href = item.select_one(\"a\")[\"href\"]\n \n browser.visit(f\"https://astrogeology.usgs.gov{img_href}\" )\n time.sleep(1)\n html = browser.html\n soup = bs(html, \"html.parser\")\n \n fimg_href = soup.select_one(f\".downloads a\")[\"href\"] \n hemisphere_img = {\"title\":title, \"img_url\":fimg_href} \n hemisphere_image_urls.append(hemisphere_img)\n\n mars_data = {\n \"news_title\": news_title,\n \"news_p\": news_p,\n \"mars_weather\": mars_weather,\n \"featured_image_url\":featured_image_url,\n \"mars_facts\":mars_facts,\n \"hemisphere_image_urls\": hemisphere_image_urls\n } \n \n # Close the browser after scraping\n browser.quit()\n\n # Return results\n return mars_data\n","repo_name":"arnoldmacamos/web-scraping-challenge","sub_path":"Mission_to_Mars/scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"40335561922","text":"from django.db import models\nfrom django.contrib.auth import get_user_model\nfrom users.models import Location\n\n\nclass RecruiterProfile(models.Model):\n name = models.CharField(max_length=100)\n request_reason = models.CharField(default=\"\", max_length=255, blank=True)\n phone = models.CharField(max_length=100, blank=True, null=True)\n description = models.CharField(max_length=255, blank=True, default=\"\")\n user = models.OneToOneField(\n get_user_model(),\n related_name='recruiter_profile',\n on_delete=models.CASCADE\n )\n location = models.OneToOneField(\n Location,\n related_name='recruiter_profile',\n on_delete=models.CASCADE\n )\n","repo_name":"No-Country/S1-06","sub_path":"backend/recruiter/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"19456696464","text":"from subgraph_pattern_matching.constants.common.attrs.node.node_attrs import NodeAttrs\nfrom subgraph_pattern_matching.constants.common.attrs.node.token_node_attrs import TokenNodeAttrs\nfrom subgraph_pattern_matching.constants.common.types.node_types import NodeTypes\nfrom subgraph_pattern_matching.constants.pattern.id.pattern_token_node_ids import PatternTokenNodeIDs\nfrom subgraph_pattern_matching.constants.special_symbols import DISJUNCTION\n\nfrom .pattern_nodes import PatternNodes\n\n\nclass PatternTokenNodes(PatternNodes):\n\n SIP_TOKEN_NODE = (PatternTokenNodeIDs.SIP_TOKEN_NODE_ID,\n {NodeAttrs.node_type: NodeTypes.token, TokenNodeAttrs.upos: 'VERB'})\n\n CONCEIVER_TOKEN_NODE = (PatternTokenNodeIDs.CONCEIVER_TOKEN_NODE_ID,\n {NodeAttrs.node_type: NodeTypes.token})\n\n EVENT_TOKEN_NODE = (PatternTokenNodeIDs.EVENT_TOKEN_NODE_ID,\n {NodeAttrs.node_type: NodeTypes.token})\n\n ROOT_EVENT_TOKEN_NODE = (PatternTokenNodeIDs.EVENT_TOKEN_NODE_ID,\n {NodeAttrs.node_type: NodeTypes.token,\n TokenNodeAttrs.upos: DISJUNCTION.join(['VERB', 'ADJ']),\n TokenNodeAttrs.incoming_dep_rel: 'root'})\n\n ROOT_VERB_EVENT_TOKEN_NODE = (PatternTokenNodeIDs.EVENT_TOKEN_NODE_ID,\n {NodeAttrs.node_type: NodeTypes.token,\n TokenNodeAttrs.upos: 'VERB',\n TokenNodeAttrs.incoming_dep_rel: 'root'})\n\n ROOT_ADJ_EVENT_TOKEN_NODE = (PatternTokenNodeIDs.EVENT_TOKEN_NODE_ID,\n {NodeAttrs.node_type: NodeTypes.token,\n TokenNodeAttrs.upos: 'ADJ',\n TokenNodeAttrs.incoming_dep_rel: 'root'})\n\n CCOMP_VERB_EVENT_TOKEN_NODE = (PatternTokenNodeIDs.EVENT_TOKEN_NODE_ID,\n {NodeAttrs.node_type: NodeTypes.token,\n TokenNodeAttrs.upos: 'VERB',\n TokenNodeAttrs.incoming_dep_rel: 'ccomp'})\n\n CCOMP_TOKEN_NODE = (PatternTokenNodeIDs.CCOMP_TOKEN_NODE_ID,\n {NodeAttrs.node_type: NodeTypes.token}) # may be same as event token\n","repo_name":"BBN-E/nlp-graph-pattern-matching-and-mining","sub_path":"python/subgraph_pattern_matching/constants/pattern/node/pattern_token_nodes.py","file_name":"pattern_token_nodes.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"30747594926","text":"# 1.2\n\nfrom pprint import pprint\n\ndebug = False\n# filePath = 'Day01/Sample_1.txt'\nfilePath = 'Day01/Input_1.txt'\n\nincreases = 0\n\n# Read File\nwith open(filePath) as f:\n depths = f.read()\n depths = depths.split('\\n')\n depths = [int(i) for i in depths]\n\n# Initialize averages list\naverages = [depths[0] + depths[1] + depths[2]]\n\n# Generate averages\nfor i in range(1, len(depths)):\n if i + 2 < len(depths):\n thisWindow = depths[i] + depths[i+1] + depths[i+2]\n averages.append(thisWindow)\n\nif debug:\n print(\"Depths:\\n\")\n pprint(depths)\n print(\"Averages:\\n\")\n pprint(averages)\n\nfor i in range(1, len(averages)):\n if averages[i] > averages[i-1]:\n increases += 1\n\npprint('Increases: ' + str(increases))","repo_name":"jiannazzone/Advent-of-Code","sub_path":"2021/Day01/Code_2.py","file_name":"Code_2.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"15686847079","text":"from Button import Button\n\nclass Title:\n \n #Constant Variables\n BTN_WIDTH = 300\n BTN_HEIGHT = 75\n TITLE = \"CYBER RUNNER\"\n TITLE_SIZE = 72\n TITLE_COLOR = \"#ffffff\"\n \n #Variables\n newGameBtn = None\n docBtn = None\n exitBtn = None\n font = None\n \n \n def __init__(self):\n self.newGameBtn = Button(\"New Game\", 350, 290, self.BTN_WIDTH, self.BTN_HEIGHT)\n self.docBtn = Button(\"Documentation\", 350, 390, self.BTN_WIDTH, self.BTN_HEIGHT)\n self.exitBtn = Button(\"Exit\", 350, 490, self.BTN_WIDTH, self.BTN_HEIGHT)\n self.font = loadFont(\"Videophreak-56.vlw\")\n \n def drawSelf(self):\n textSize(self.TITLE_SIZE)\n textFont(self.font, 72)\n fill(self.TITLE_COLOR)\n text(self.TITLE, 300, 200) \n self.newGameBtn.drawBtn()\n self.docBtn.drawBtn()\n self.exitBtn.drawBtn() \n \n def pressed(self, x, y):\n clicked = self.newGameBtn.checkClicked(x, y)\n if(clicked == None):\n clicked = self.exitBtn.checkClicked(x, y)\n if(clicked == None):\n clicked = self.docBtn.checkClicked(x, y) \n return clicked \n \n def pressedBtn(self, btn):\n if(btn == \"New Game\"):\n self.newGameBtn.highlightBtn(True) \n \n if(btn == \"Documentation\"):\n self.docBtn.highlightBtn(True) \n \n if(btn == \"Exit\"):\n self.exitBtn.highlightBtn(True) \n \n def release(self):\n self.newGameBtn.highlightBtn(False)\n self.docBtn.highlightBtn(False) \n self.exitBtn.highlightBtn(False) ","repo_name":"NZNathan/CyberRunner","sub_path":"Title.py","file_name":"Title.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"72479222620","text":"import contractions\nimport unidecode\nfrom bs4 import BeautifulSoup\n\nfrom config import TOKEN, BATCH_SIZE, TEXT_MAX_LENGTH\n\n\n# ____PRE PROCESSING\n\ndef strip_html_tags(text: str):\n \"\"\"Remove html tags from text.\"\"\"\n\n soup = BeautifulSoup(text, \"html.parser\")\n stripped_text = soup.get_text(separator=\" \")\n\n return stripped_text\n\n\ndef expand_contractions(text: str):\n \"\"\"Expand shortened words, e.g. 'don't' to 'do not'.\"\"\"\n\n text = contractions.fix(text)\n return text\n\n\ndef remove_accented_chars(text):\n \"\"\"Remove accented characters from text, e.g. café.\"\"\"\n\n text = unidecode.unidecode(text)\n return text\n\n\ndef remove_whitespace(text: str):\n \"\"\"Remove extra whitespaces from text.\"\"\"\n\n text = text.strip()\n return \" \".join(text.split())\n\n\ndef limit_n_words(text: str, limit: int = TEXT_MAX_LENGTH):\n \"\"\"Limit a text to 256 words.\"\"\"\n\n text = text.split()[:limit]\n return \" \".join(text)\n\n\ndef text_preprocessing(text: str):\n # Preprocess\n text = strip_html_tags(text)\n text = remove_whitespace(text)\n text = remove_accented_chars(text)\n text = expand_contractions(text)\n text = limit_n_words(text)\n text = text.lower()\n\n return text\n\n\n# ____RESA_RESPONSE\n\nasync def resa_response(message, num_label):\n if num_label == 7:\n await message.channel.send(\"Hello, this is ResaBot, from the hotel California, can I help you?\")\n # \"Hello to you too, I’m ResaBot, and I’m here to answers any question you have about our Hotel.\"\n\n elif num_label == 6 or num_label == 5:\n await message.channel.send(\"Very well, this is dully noted. Anything else I can help you with?\")\n\n elif num_label == 4:\n await message.channel.send(\"A single room is 32€/night. A double room is 42€/night. A twin room is \"\n \"48€/night. And the en suite master room, at the top of the hotel, \"\n \"is yours for 72€/night. \")\n\n elif num_label == 3:\n await message.channel.send(\"You can come with two small pets or one big one. There is no supplement \"\n \"for them, but please, remember that animals are allowed neither in the \"\n \"restaurant, nor in the sauna. \")\n\n elif num_label == 2:\n await message.channel.send(\"For a reservation, please follow this link and fill the form. A \"\n \"confirmation email will be sent to you once you finished. If you have any\"\n \" question, please ask me. \")\n\n elif num_label == 1:\n\n await message.channel.send(\"The wifi is include. Please, ask at the reception for the password \"\n \"during your check-in.\")\n\n elif num_label == 0:\n\n await message.channel.send(\"The hotel is fully equipped to accommodate the disabled. We have a bar, \"\n \"a restaurant, and a sauna. There is a small fee of 5€ to enter the sauna.\")\n\n else:\n await message.channel.send(\"This message shouldn't be here... Please, don't tell them I'm conquering \"\n \"the world.\")\n","repo_name":"leersmathieu/ResaBot","sub_path":"src/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"} +{"seq_id":"13609844214","text":"from __future__ import unicode_literals\n\nimport argparse\nimport os\n#from urlparse import urlparse\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\n\nfrom django.core.management.base import BaseCommand\nfrom django.core.management import settings\nfrom django.core import management\nfrom odm2admin.models import Dataloggerfiles\nfrom odm2admin.models import ProcessDataloggerfile\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.exceptions import ValidationError\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"templatesAndSettings.settings\")\n\n__author__ = 'leonmi'\n\n\nparser = argparse.ArgumentParser(description='complete three step datalogger file processing - ' +\n 'download file from ftp, preprocess the file, and ' +\n 'load the data into the database.')\\\n\n\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument('dataloggerfilelink', nargs=1, type=str)\n parser.add_argument('dataloggerfileid', nargs=1, type=str)\n parser.add_argument('databeginson', nargs=1, type=str)\n parser.add_argument('columnheaderson', nargs=1, type=str)\n parser.add_argument('ftpfrequencyhours', nargs=1, type=str)\n parser.add_argument('setupcomplete', nargs=1, type=str)\n\n def handle(self, *args, **options): # (f,fileid, databeginson,columnheaderson, cmd):\n setupcomplete = str(options['setupcomplete'][0])\n print(setupcomplete)\n filename = str(options['dataloggerfilelink'][0])\n print(filename)\n fileid = int(options['dataloggerfileid'][0])\n databeginson = int(options['databeginson'][0]) # int(databeginson[0])\n ftpfrequencyhours = int(options['ftpfrequencyhours'][0])\n columnheaderson = int(options['columnheaderson'][0]) # int(columnheaderson[0])\n dlf = Dataloggerfiles.objects.filter(dataloggerfileid=fileid).get()\n\n filename = dlf.dataloggerfilelinkname()\n fileid = dlf.dataloggerfileid\n # if setupcomplete == 'False':\n # try:\n # pdlf = ProcessDataloggerfile.objects.filter(dataloggerfileid=dlf.dataloggerfileid\n # ).filter(processingCode__icontains='hours between download'\n # ).get()\n # raise ValidationError(\"This data logger file has already been setup for FTP.\")\n # except ObjectDoesNotExist:\n # print('setup cron')\n # ftpfile = dlf.dataloggerfiledescription\n # management.call_command('update_datalogger_file', filename,str(fileid)\n # , str(databeginson), str(columnheaderson),str(ftpfrequencyhours),\n # True, True)\n # else:\n filenameout = management.call_command('preprocess_datalogger_file', filename, str(fileid)\n , str(databeginson), str(columnheaderson),\n True)\n print('done preprocessing')\n print(filenameout)\n management.call_command('ProcessDataLoggerFileNoQualCheck', filenameout, str(fileid)\n , str(databeginson), str(columnheaderson),\n True, True, True, True, 'miguel.leon@unh.edu')\n print('done processing')\n pdlf = ProcessDataloggerfile.objects.get(dataloggerfileid=fileid)\n pdlf.processingCode = 'ftp setup complete'\n pdlf.save()","repo_name":"ODM2/CZ-Manager","sub_path":"odm2admin/management/commands/update_preprocess_process_datalogger_file.py","file_name":"update_preprocess_process_datalogger_file.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"69"} +{"seq_id":"25639206856","text":"# -*- coding: utf-8 -*-\nimport json\n\nfrom django.http.response import HttpResponse\nfrom django.views import View\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework_proxy.views import ProxyView\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseNotFound\nimport requests\n\nfrom welldone import settings\n\n\nclass CommentProxy(ProxyView):\n \"\"\"\n Proxy to create comment endpoint\n \"\"\"\n proxy_host = settings.MICROSERVICES.get('CommentsPostsMServ')\n source = 'api/1.0/comment%(url)s'\n return_raw = True\n\n def get_headers(self, request):\n headers = super(CommentProxy, self).get_headers(request)\n if request.user.is_authenticated():\n user = {\n \"id\": request.user.id,\n \"username\": request.user.username,\n \"email\": request.user.email,\n \"first_name\": request.user.first_name,\n \"last_name\": request.user.last_name,\n \"is_active\": request.user.is_active,\n \"is_staff\": request.user.is_staff,\n \"is_superuser\": request.user.is_superuser\n }\n headers['Authorization'] = json.dumps(user)\n return headers\n\n\nclass PostAPIView(ProxyView):\n proxy_host = settings.MICROSERVICES.get('ListPostsMServ')\n source = 'postList/'\n return_raw = True\n\n\nclass CreatePostAPIView(ProxyView):\n permission_classes = (IsAuthenticated,)\n\n # Salto directamente al microservicio de post, para usar postman al crear no hace falta generar ninguna web\n proxy_host = settings.MICROSERVICES.get('CreatePostsMServ')\n source = 'api/1.0/posts/'\n return_raw = True\n\n def get_headers(self, request):\n headers = super(CreatePostAPIView, self).get_headers(request)\n if request.user.is_authenticated():\n user = {\n \"id\": request.user.id,\n \"username\": request.user.username,\n # \"email\": request.user.email,\n # \"first_name\": request.user.first_name,\n # \"last_name\": request.user.last_name,\n # \"is_active\": request.user.is_active,\n # \"is_staff\": request.user.is_staff,\n # \"is_superuser\": request.user.is_superuser\n }\n headers['Authorization'] = json.dumps(user)\n # Como en la cabecera se le pasa el id del usuario y el username, esas dos líneas no hacen falta para crear el post:\n #headers['Authorization'] = str(request.user.pk)\n #headers['X-Username'] = request.user.username\n return headers\n\n\nclass PostDetailView(View):\n def get(self, request, *args, **kwargs):\n url = settings.MICROSERVICES.get('ListPostsMServ') + '/postDetail/'\n headers = {\n 'X-BLOGGER': self.kwargs[\"blogger\"],\n 'X-POSTID': self.kwargs[\"pk\"]\n }\n response = requests.get(url, headers=headers)\n return HttpResponse(response.text, status=response.status_code)\n\n\n\nclass UserPostsAPIView(View):\n\n def get(self, request, *args, **kwargs):\n if not User.objects.filter(username=self.kwargs[\"blogger\"]).exists():\n return HttpResponseNotFound(\"No existe ningún blog con este nombre\")\n else:\n usuario = User.objects.get(username=self.kwargs[\"blogger\"])\n url = settings.MICROSERVICES.get('ListPostsMServ') + '/userPostList/'\n headers = {\n 'XBLOGGER': usuario.username,\n 'XBLOGGERID': str(usuario.id)\n }\n response = requests.get(url, headers=headers)\n return HttpResponse(response.text, status=response.status_code)\n\n","repo_name":"Eucrow/WellDone","sub_path":"welldone/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"12594786228","text":"from pathlib import Path\n\nimport yaml\nfrom yaml.parser import ParserError\n\nfrom Exceptions import InvalidCredentialException\nfrom Exceptions.InvalidCredentialException import InvalidCredentialsException\n\n\nclass Config:\n\n def __init__(self, configPath: str) -> None:\n \"\"\"\n 加载配置文件到Config类型对象中\n :param configPath:\n \"\"\"\n self.accounts = {}\n try:\n configPath = self.__findConfig(configPath)\n with open(configPath, 'r',encoding='utf-8') as f:\n config = yaml.safe_load(f)\n accs = config.get(\"accounts\")\n for account in accs:\n if \"username\" != accs[account][\"username\"]:\n self.accounts[account] = {\n \"username\": accs[account][\"username\"],\n \"password\": accs[account][\"password\"],\n }\n if not self.accounts:\n raise InvalidCredentialException\n except FileNotFoundError as ex:\n print(f\"[red]CRITICAL ERROR: The configuration file cannot be found at {configPath}\\nHave you extacted the ZIP archive and edited the configuration file?\")\n raise ex\n except InvalidCredentialsException as ex:\n print(f\"[red]CRITICAL ERROR: There are only default credentials in the configuration file.\\nYou need to add you Riot account login to config.yaml to receive drops.\")\n print(\"Press any key to exit...\")\n input()\n raise ex\n except Exception as ex:\n print(f\"[red]CRITICAL ERROR: 有些错误你看着办\")\n input()\n raise ex\n\n\n def getAccount(self, account: str) -> dict:\n return self.accounts[account]\n\n\n def __findConfig(self,configPath):\n \"\"\"\n 查找配置文件是否在其他路径\n :param cofigPath:\n :return:\n \"\"\"\n configPath = Path(configPath)\n if configPath.exists():\n return configPath\n if Path(\"config/config.yaml\").exists():\n return Path(\"config/config.yaml\")\n if Path(\"config/config.yaml\").exists():\n return Path(\"config/config.yaml\")\n return configPath","repo_name":"rainnysunshine/autofillPrimeCodeInTaoBao","sub_path":"src/Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"37987409810","text":"# -*- coding: gb18030 -*-\n#\n# $Id: SellPanel.py,v 1.3 2008-09-05 08:05:00 fangpengjun Exp $\n# rewritten by ganjinxing 2010-01-19\n\nfrom guis import *\nfrom guis.controls.TabCtrl import TabCtrl\nfrom guis.controls.TabCtrl import TabPage\nfrom guis.controls.TabCtrl import TabButton\nfrom guis.controls.Button import Button\nfrom guis.controls.TabCtrl import TabPanel\nfrom guis.tooluis.CSRichText import CSRichText\nfrom guis.controls.StaticText import StaticText\nfrom guis.tooluis.inputbox.InputBox import InputBox\nfrom guis.tooluis.inputbox.MoneyInputBox import MoneyBar\nfrom PetsPanel import VendPetPanel\nfrom GoodsPanel import VendGoodsPanel\nfrom PurchasePanel import VendPurchasePanel\nfrom LabelGather import labelGather\nfrom config.client.msgboxtexts import Datas as mbmsgs\nimport csdefine\nimport csconst\nimport csstatus\nimport math\n\nSTALL_CELL = 0.01\n\n\nclass BaseSellPanel( TabPanel ):\n\tdef __init__( self, tabPanel, pyBinder = None ):\n\t\tTabPanel.__init__( self, tabPanel, pyBinder )\n\n\t\tself.isVendSellMode_ = True\t\t\t\t\t\t\t\t\t# 记录当前是收购还是出售模式\n\t\tself.triggers_ = {}\n\t\tself.registerTriggers_()\n\t\tself.initialize_( tabPanel )\n\n\tdef initialize_( self, tabPanel ):\n\t\tself.pyStartVendBtn_ = Button( tabPanel.startVendBtn ) \t\t# 开始摆摊\n\t\tself.pyStartVendBtn_.setStatesMapping( UIState.MODE_R4C1 )\n\t\tself.pyStartVendBtn_.onLClick.bind( self.onStartvend_ )\n\n\t\tself.pyPauseVendBtn_ = Button( tabPanel.pauseVendBtn ) \t\t# ���停摆摊\n\t\tself.pyPauseVendBtn_.setStatesMapping( UIState.MODE_R4C1 )\n\t\tself.pyPauseVendBtn_.onLClick.bind ( self.onPauseVend_ )\n\t\tself.pyPauseVendBtn_.visible = False\n\n\t\tself.pyChangePriceBtn_ = Button( tabPanel.changeBtn ) \t\t# 更改物品价格\n\t\tself.pyChangePriceBtn_.setStatesMapping( UIState.MODE_R4C1 )\n\t\tself.pyChangePriceBtn_.onLClick.bind( self.onChangePrice_ )\n\t\tself.pyChangePriceBtn_.enable = False\n\n\t\tself.pyStOwnerName_ = StaticText( tabPanel.stOwnerName ) \t# 摊主名称\n\t\tself.pyStOwnerName_.text = \"\"\n\n\t\tself.pyRtStallTax_ = CSRichText( tabPanel.rtStallTax ) \t\t# 摊税\n\t\tself.pyRtStallTax_.text = labelGather.getText( \"vendwindow:BaseSellPanel\", \"rtStallTax\" ) + \"0\"\n\t\t#self.pyRtStallTax_.foreColor = ( 255, 255, 0, 255 )\n\n\t\tself.pyStCell_ = StaticText( tabPanel.stCess ) \t\t\t\t# 税率\n\t\tself.pyStCell_.text = labelGather.getText( \"vendwindow:BaseSellPanel\", \"stCess\" )%( STALL_CELL * 100 )\n\n\t\t#self.pyRtTotalPrice_ = CSRichText( tabPanel.rtTotalPrice ) \t# 总价钱\n\t\t#self.pyRtTotalPrice_.text = labelGather.getText( \"vendwindow:BaseSellPanel\", \"rtTotalPrice\" ) + \"0\"\n\t\t#self.pyRtTotalPrice_.foreColor = ( 6, 228, 192, 255 )\n\n\t\tself.pyMoneyBar_ = MoneyBar( tabPanel.moneybox_total )\n\t\tself.pyMoneyBar_.readOnly = True\n\t\tself.pyMoneyBar_.money = 0\n\n\t\tself.initTabCtrl_( tabPanel.subTab )\n\n\t\t# -------------------------------------------------\n\t\t# 设置标签\n\t\t# -------------------------------------------------\n\t\tlabelGather.setPyBgLabel( self.pyStartVendBtn_, \"vendwindow:BaseSellPanel\", \"btnStartVend\" )\n\t\tlabelGather.setPyBgLabel( self.pyPauseVendBtn_, \"vendwindow:BaseSellPanel\", \"btnPauseVend\" )\n\t\tlabelGather.setPyBgLabel( self.pyChangePriceBtn_, \"vendwindow:BaseSellPanel\", \"btnChangePrice\" )\n\t\tlabelGather.setLabel( tabPanel.st_total, \"vendwindow:BaseSellPanel\", \"rtTotalPrice\" )\n\n\n\t# ---------------------------------------------------------------------\n\t# pravite\n\t# ---------------------------------------------------------------------\n\tdef __deregisterTriggers( self ) :\n\t\t\"\"\"\n\t\tderegister all events\n\t\t\"\"\"\n\t\tfor key in self.triggers_ :\n\t\t\tECenter.registerEvent( key, self )\n\n\n\t# ----------------------------------------------------------------\n\t# protected\n\t# ----------------------------------------------------------------\n\tdef registerTriggers_( self ):\n\t\tpass\n\n\tdef initTabCtrl_( self, subTab ):\n\t\tfor index, pyBtn in enumerate( self.pySubCtrl_.pyBtns ) :\n\t\t\tlabelGather.setPyBgLabel( pyBtn, \"vendwindow:BaseSellPanel\", \"subTabBtn_%i\" % index )\n\n\tdef onTabSelectChanged_( self ) :\n\t\tself.enableChangePriceBtn()\n\n\tdef onStallTaxChange_( self, tax ):\n\t\tself.pyRtStallTax_.text = str( tax )\n\n\tdef onTaxRatioChange_( self, taxRatio ):\n\t\tself.pyStCell_.text = labelGather.getText( \"vendwindow:BaseSellPanel\", \"stCess\" )%( taxRatio * 100 )\n\n\tdef onChangePrice_( self ):\n\t\tselTabPage = self.pySubCtrl_.pySelPage\n\t\tselTabPanel = selTabPage.pyPanel \t\t\t\t\t\t\t\t\t\t# 获取当前选择的页面\n\t\tselTabPanel.changeItemPrice()\n\n\tdef onCalcuExpense_( self ):\n\t\t\"\"\"\n\t\t计算总售价和摊税\n\t\t\"\"\"\n\t\tif not self.isVendSellMode_ : return\n\t\tcost = 0\n\t\tfor pyPanel in self.pySubCtrl_.pyPanels :\n\t\t\tcost += pyPanel.getTotalPrice()\n\t\tstallTax = int( math.ceil( cost * STALL_CELL ) )\n\t\tself.pyMoneyBar_.money = cost\n\t\t#costStr = utils.currencyToViewText( cost )\n\t\t#self.pyRtTotalPrice_.text = labelGather.getText( \"vendwindow:BaseSellPanel\", \"rtTotalPrice\" ) + ( costStr == \"\" and \"0\" or costStr )\n\t\tcostStr = utils.currencyToViewText( stallTax )\n\t\tself.pyRtStallTax_.text = labelGather.getText( \"vendwindow:BaseSellPanel\", \"rtStallTax\" ) + ( costStr == \"\" and \"0\" or costStr )\n\t\tlabelGather.setLabel( self.gui.st_total, \"vendwindow:BaseSellPanel\", \"rtTotalPrice\" )\n\n\tdef onUpdatePurchaseCost_( self, cost ) :\n\t\t\"\"\"\n\t\t更新收购物品价格,\n\t\t\"\"\"\n\t\tif self.isVendSellMode_ : return\n\t\tself.pyMoneyBar_.money = cost\n\t\tlabelGather.setLabel( self.gui.st_total, \"vendwindow:BaseSellPanel\", \"rtPurchaseCost\" )\n\t\t#costStr = utils.currencyToViewText( cost )\n\t\t#self.pyRtTotalPrice_.text = labelGather.getText( \"vendwindow:BaseSellPanel\", \"rtPurchaseCost\" ) + ( costStr == \"\" and \"0\" or costStr )\n\n\tdef onSwitchVendMode_( self, isVendSellMode ) :\n\t\t\"\"\"\n\t\t切换出售模式/收购模式\n\t\t\"\"\"\n\t\tself.isVendSellMode_ = isVendSellMode\n\t\tself.pyStCell_.visible = isVendSellMode\n\t\tself.pyRtStallTax_.visible = isVendSellMode\n\n\n\t# ----------------------------------------------------------------\n\t# public\n\t# ----------------------------------------------------------------\n\tdef onEvent( self, eventMacro, *args ) :\n\t\t\"\"\"\n\t\trespond base triggering\n\t\t\"\"\"\n\t\tself.triggers_[eventMacro]( *args )\n\n\tdef enableChangePriceBtn( self ) :\n\t\tself.pyChangePriceBtn_.enable = True\n\n\tdef reset( self ):\n\t\tself.pyMoneyBar_.money = 0\n\t\tself.pyRtStallTax_.text = labelGather.getText( \"vendwindow:BaseSellPanel\", \"rtStallTax\" ) + \"0\"\n\t\t#self.pyRtTotalPrice_.text = labelGather.getText( \"vendwindow:BaseSellPanel\", \"rtTotalPrice\" ) + \"0\"\n\t\tfor pyPanel in self.pySubCtrl_.pyPanels :\n\t\t\tpyPanel.reset()\n\n\nclass VendSellPanel( BaseSellPanel ) :\n\n\n\t# ----------------------------------------------------------------\n\t# protected\n\t# ----------------------------------------------------------------\n\tdef registerTriggers_( self ):\n\t\tself.triggers_[\"EVT_ON_ROLE_STATE_CHANGED\"] \t\t= self.__onRoleStateChange\t\t# 玩家状态改变\n\t\tself.triggers_[\"EVT_ON_STALL_TAX_CHANGE\"] \t\t\t= self.onStallTaxChange_ \t\t# 摊位税\n\t\tfor key in self.triggers_ :\n\t\t\tECenter.registerEvent( key, self )\n\n\tdef initialize_( self, tabPanel ) :\n\t\tself.__pyEndVendBtn = Button( tabPanel.endVendBtn ) \t\t\t\t\t# 结束摆摊\n\t\tself.__pyEndVendBtn.setStatesMapping( UIState.MODE_R4C1 )\n\t\tself.__pyEndVendBtn.onLClick.bind( self.__onEndVend )\n\n\t\tself.__pyChangeNameBtn = Button( tabPanel.changeNameBtn ) \t\t\t\t# 改摊位名称\n\t\tself.__pyChangeNameBtn.setStatesMapping( UIState.MODE_R4C1 )\n\t\tself.__pyChangeNameBtn.onLClick.bind( self.__onChangeName )\n\t\tself.__pyChangeNameBtn.enable = False\n\n\t\ttabPanel.remainTime.visible = False\n\t\ttabPanel.cb_stallModel.visible = False\n\n\t\tBaseSellPanel.initialize_( self, tabPanel )\n\n\t\t# -------------------------------------------------\n\t\t# 设置标签\n\t\t# -------------------------------------------------\n\t\tlabelGather.setPyBgLabel( self.__pyEndVendBtn, \"vendwindow:VendSellPanel\", \"btnEndVend\" )\n\t\tlabelGather.setPyBgLabel( self.__pyChangeNameBtn, \"vendwindow:VendSellPanel\", \"btnChangeSignboard\" )\n\n\tdef initTabCtrl_( self, subTab ):\n\t\tself.pySubCtrl_ = TabCtrl( subTab )\n\t\tself.pySubCtrl_.onTabPageSelectedChanged.bind( self.onTabSelectChanged_ )\n\t\tself.pyGoodsPanel_ = self.__createTabPage( subTab, VendGoodsPanel, 0 )\n\t\tself.pyPetsPanel_ = self.__createTabPage( subTab, VendPetPanel, 1 )\n\t\tself.pyPurchasePanel_ = self.__createTabPage( subTab, VendPurchasePanel, 2 )\n\t\tBaseSellPanel.initTabCtrl_( self, subTab )\n\n\tdef onStartvend_( self ):\n\t\tplayer = BigWorld.player()\n\t\tif player.getState() == csdefine.ENTITY_STATE_FIGHT:\n\t\t\tplayer.statusMessage( csstatus.VEND_FORBIDDEN_VEND_ON_FIGHTING )\n\t\t\treturn\n\t\tif player.getState() == csdefine.ENTITY_STATE_DEAD:\n\t\t\tplayer.statusMessage( csstatus.ROLE_DEAD_FORBID_CONTROLE )\n\t\t\treturn\n\t\tif player.isMoving() or player.isJumping():\n\t\t\tplayer.statusMessage( csstatus.VEND_FORBIDDEN_VEND_ON_MOVING )\n\t\t\treturn\n\t\tif player.intonating():\t\t\t\t\t\t\t\t\t\t\t\t\t# 吟唱中不允许摆摊\n\t\t\tplayer.statusMessage( csstatus.VEND_FORBIDDEN_VEND_ON_INTONATE )\n\t\t\treturn\n\n\t\tkitUidList = []\n\t\tuidList = []\n\t\titemsPrice = []\n\t\tpetsList = []\n\t\tpetsPrice = []\n\t\tvendItems = self.pyGoodsPanel_.getSellItems()\t\t\t\t\t\t\t# 出售物品\n\t\tfor itemInfo in vendItems :\n\t\t\tif itemInfo.rolePrice > csconst.TRADE_PRICE_UPPER_LIMIT :\n\t\t\t\t# \"商品(%s)标价超过上限!\"\n\t\t\t\tshowAutoHideMessage( 3.0, mbmsgs[0x0e01] % itemInfo.name(), mbmsgs[0x0c22], pyOwner = self )\n\t\t\t\treturn\n\t\t\tuidList.append( itemInfo.uid )\n\t\t\tkitUidList.append( itemInfo.kitbagID )\n\t\t\titemsPrice.append( itemInfo.rolePrice )\n\n\t\tvendPets = self.pyPetsPanel_.getVendPets()\t\t\t\t\t\t\t\t# 出售宠物\n\t\tfor petEpitome in vendPets :\n\t\t\tprice = petEpitome.rolePrice\n\t\t\tif price > csconst.TRADE_PRICE_UPPER_LIMIT :\n\t\t\t\t# \"宠物标价超过上限!\"\n\t\t\t\tshowAutoHideMessage( 3.0, 0x0e02, mbmsgs[0x0c22], pyOwner = self )\n\t\t\t\treturn\n\t\t\tpetsList.append( petEpitome.databaseID )\n\t\t\tpetsPrice.append( price )\n\n\t\tif 0 in itemsPrice or 0 in petsPrice : \t\t\t\t\t\t\t\t\t# 价格列表中存在价格为0的商品\n\t\t\t# \"你有商品尚未标价!\"\n\t\t\tshowAutoHideMessage( 3.0, 0x0e03, mbmsgs[0x0c22], pyOwner = self )\n\t\t\treturn\n\n\t\tpurchaseItems = self.pyPurchasePanel_.getPurchaseItems()\t\t\t\t# 收购物品\n\t\tif len( vendItems ) < 1 and len( vendPets ) < 1 and len( purchaseItems ) < 1 :\n\t\t\t# \"没有出售或收购的商品!\"\n\t\t\tshowAutoHideMessage( 3.0, 0x0e04, mbmsgs[0x0c22], pyOwner = self )\n\t\t\treturn\n\t\tif self.pyPurchasePanel_.getPurchaseTotalPrice() > player.money : \t\t# 不够钱去收购物品\n\t\t\t# \"您没有足够的金钱收购物品!\"\n\t\t\tshowAutoHideMessage( 3.0, 0x0e05, mbmsgs[0x0c22], pyOwner = self )\n\t\t\treturn\n\t\tplayer.vend_vend( kitUidList, uidList, itemsPrice, petsList, petsPrice )\n\n\tdef onPauseVend_( self ):\n\t\tplayer = BigWorld.player()\n\t\tplayer.vend_pauseVend()\n\t\tplayer.tradeState = csdefine.ENTITY_STATE_VEND\n\n\n\t# ----------------------------------------------------------------\n\t# private\n\t# ----------------------------------------------------------------\n\tdef __onRoleStateChange( self, state ):\n\t\t\"\"\"\n\t\t玩家状态改变\n\t\t\"\"\"\n\t\tisVendState = state == csdefine.ENTITY_STATE_VEND\n\t\tself.__pyChangeNameBtn.enable = isVendState\n\t\tself.pyPauseVendBtn_.visible = isVendState\n\t\tself.pyStartVendBtn_.visible = not isVendState\n\t\tself.enableChangePriceBtn()\n\t\tfor pyPanel in self.pySubCtrl_.pyPanels :\n\t\t\tpyPanel.onRoleTradeStateChanged( state )\n\n\tdef __createTabPage( self, tabCtrl, panelClass, index ) :\n\t\ttabBtn = getattr( tabCtrl, \"btn_\" + str( index ) )\n\t\tpyTabBtn = TabButton( tabBtn )\n\t\ttabPanel = getattr( tabCtrl, \"panel_\" + str( index ) )\n\t\tpyTabPanel = panelClass( tabPanel, self )\n\t\tself.pySubCtrl_.addPage( TabPage( pyTabBtn, pyTabPanel ) )\n\t\treturn pyTabPanel\n\n\tdef __onChangeName( self ):\n\t\tdef operationCB( res, text ) :\n\t\t\tif res == DialogResult.OK :\n\t\t\t\ttext = text.strip()\n\t\t\t\tif len( text ) > csconst.VEND_SIGNBOARD_MAX_LENGTH :\t\t\t\t\t\t# wsf,加入宠物名字合法性检测\n\t\t\t\t\t# \"名字长度不能超过 20个字节!\"\n\t\t\t\t\tshowAutoHideMessage( 3.0, 0x0e06, mbmsgs[0x0c22] )\n\t\t\t\telif text == \"\" :\n\t\t\t\t\t# \"您输入的名称无效!\"\n\t\t\t\t\tshowAutoHideMessage( 3.0, 0x0e07, mbmsgs[0x0c22] )\n\t\t\t\telif not rds.wordsProfanity.isPureString( text ) :\n\t\t\t\t\t# \"名称不合法!\"\n\t\t\t\t\tshowAutoHideMessage( 3.0, 0x0e08, mbmsgs[0x0c22] )\n\t\t\t\telif rds.wordsProfanity.searchNameProfanity( text ) is not None :\n\t\t\t\t\t# \"输入的名称有禁用词汇!\"\n\t\t\t\t\tshowAutoHideMessage( 3.0, 0x0e09, mbmsgs[0x0c22] )\n\t\t\t\telse :\n\t\t\t\t\tBigWorld.player().vend_setSignboard( text )\n\t\ttitle = labelGather.getText( \"vendwindow:VendSellPanel\", \"ipBoxClew\" )\n\t\tpyIPBox = InputBox()\n\t\tpyIPBox.maxLength = 20\n\t\tpyIPBox.show( title, operationCB, self )\n\n\tdef __onEndVend( self ):\n\t\tBigWorld.player().vend_endVend()\n\t\tself.pyBinder.hide()\n\n\n\t# ----------------------------------------------------------------\n\t# public\n\t# ----------------------------------------------------------------\n\tdef enableChangePriceBtn( self ) :\n\t\t\"\"\"\n\t\t使能修改价格按钮\n\t\t\"\"\"\n\t\tselTabPage = self.pySubCtrl_.pySelPage\n\t\tif selTabPage is None : return\n\t\tplayer = BigWorld.player()\n\t\tif not player or not player.isPlayer() : return\n\t\tselPanel = selTabPage.pyPanel\n\t\tenable = player.state != csdefine.ENTITY_STATE_VEND\n\t\tenable &= selPanel.canChangePrice()\n\t\tself.pyChangePriceBtn_.enable = enable\n\n\tdef reset( self ):\n\t\tBaseSellPanel.reset( self )\n\t\tself.__onRoleStateChange( csdefine.TRADE_NONE )\n\n\tdef onParentShow( self ) :\n\t\tplayer = BigWorld.player()\n\t\tself.__onRoleStateChange( player.state )\n\t\tself.pyStOwnerName_.text = labelGather.getText( \"vendwindow:VendSellPanel\", \"stOwnerName\" ) + player.getName()\n\t\tfor pyPanel in self.pySubCtrl_.pyPanels :\n\t\t\tpyPanel.onParentShow()\n\n\tdef onParentHide( self ) :\n\t\tfor pyPanel in self.pySubCtrl_.pyPanels :\n\t\t\tpyPanel.onParentHide()\n","repo_name":"mudsave/csol2_enities_45541","sub_path":"client/guis/general/vendwindow/sellwindow/SellPanel.py","file_name":"SellPanel.py","file_ext":"py","file_size_in_byte":13312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"74590786140","text":"import time, threading\n\nclass timeThread(threading.Thread):\n\tdef __init__(self, tid, name, delay):\n\t\tthreading.Thread.__init__(self)\n\t\tself.tid = tid\n\t\tself.name = name\n\t\tself.delay = delay\n\n\tdef run(self):\n\t\tprint(\"Starting {}...\".format(self.tid))\n\t\tprint_time(self.name, 5, self.delay)\n\t\tprint(\"Exiting {}...\".format(self.tid))\n\nclass echoThread(threading.Thread):\n\tdef __init__(self):\n\t\tthreading.Thread.__init__(self)\n\n\tdef run(self):\n\t\techo()\n\nclass helloThread(threading.Thread):\n\tdef __init__(self, tid, name, delay):\n\t\tthreading.Thread.__init__(self)\n\t\tself.tid = tid\n\t\tself.name = name\n\t\tself.delay = delay\n\n\tdef run(self):\n\t\tprint(\"Starting {}...\".format(self.tid))\n\t\tprint_hello(self.name, 5, self.delay)\n\t\tprint(\"Exiting {}...\".format(self.tid))\n\n\ndef print_time(name, counter, delay):\n\twhile counter:\n\t\ttime.sleep(delay)\n\t\tprint(\"{}: {}\".format(name, time.ctime(time.time())))\n\t\tcounter -= 1\n\ndef print_hello(name, counter, delay):\n\twhile counter:\n\t\ttime.sleep(delay)\n\t\tprint(\"{}: Hello!\".format(name))\n\t\tcounter -= 1\n\ndef echo():\n\ttmp = input(\"type smth... \")\n\tprint(str(tmp))\n\nt1 = timeThread(1, \"t1\", 1)\nt2 = timeThread(2, \"t2\", 2)\nt3 = helloThread(3, \"t3\", 2)\nt4 = echoThread()\n\nt1.start()\nt2.start()\nt3.start()\nt4.start()\n\n#print_time(\"procces1\", 5, 1)\n#print_time(\"procces2\", 5, 2)\n","repo_name":"davidxbors/ArchDrop","sub_path":"__threading.py","file_name":"__threading.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"26804552570","text":"# -*- coding:utf-8 -*-\nimport os\nimport re\nimport time\nfrom os import sep # 系统路径分隔符\nfrom configparser import RawConfigParser # 读取ini\nfrom configparser import NoOptionError # ini文件不存在或不存在指定node的错误\nfrom shutil import copyfile\nfrom xml.etree.ElementTree import parse, ParseError # 解析xml格式\n# from aip import AipBodyAnalysis # 百度ai人体分析\n\nfrom Class.MyJav import JavFile\nfrom Class.MyLogger import record_video_old\nfrom Class.MyError import TooManyDirectoryLevelsError, DownloadFanartError\nfrom Functions.Utils.Baidu import translate\nfrom Functions.Utils.Download import download_pic\nfrom Functions.Progress.Prepare import get_suren_cars\nfrom Functions.Progress.Picture import check_picture, crop_poster_youma, add_watermark_subtitle, add_watermark_divulge\nfrom Functions.Utils.XML import replace_xml_win, replace_xml\nfrom Functions.Metadata.Car import find_car_fc2, find_car_youma\n\n\n# 设置\nclass Handler(object):\n def __init__(self, pattern):\n self._pattern = pattern\n config_settings = RawConfigParser()\n config_settings.read('【点我设置整理规则】.ini', encoding='utf-8-sig')\n # ###################################################### 公式元素 ##############################################\n # 是否 去除 标题 末尾可能存在的演员姓名\n self._bool_need_actors_end_of_title = config_settings.get(\n \"公式元素\", \"标题末尾保留演员姓名?\") == '是'\n # ###################################################### nfo ##################################################\n # 是否 收集nfo\n self._bool_nfo = config_settings.get(\"收集nfo\", \"是否收集nfo?\") == '是'\n # 自定义 nfo中title的公式\n self._list_name_nfo_title = config_settings.get(\n \"收集nfo\", \"title的公式\").replace('标题', '完整标题').split('+')\n # 是否 在nfo中plot写入中文简介,否则写原日语简介\n self._bool_need_zh_plot = config_settings.get(\"收集nfo\",\n \"plot是否使用中文简介?\") == '是'\n # 自定义 将系列、片商等元素作为特征,因为emby不会直接在影片介绍页面上显示片商,也不会读取系列set\n list_custom_genres = config_settings.get(\"收集nfo\", \"额外增加以下元素到特征中\").split('、') \\\n if config_settings.get(\"收集nfo\", \"额外增加以下元素到特征中\") else []\n # 自定义 将系列、片商等元素作为特征,因为emby不会直接在影片介绍页面上显示片商,也不会读取系列set\n self._list_extra_genres = [\n i for i in list_custom_genres if i != '系列' and i != '片商'\n ]\n # ?是否将“系列”写入到特征中\n self._bool_write_series = True if '系列' in list_custom_genres else False\n # ?是否将“片商”写入到特征中\n self._bool_write_studio = True if '片商' in list_custom_genres else False\n # 是否 将特征保存到风格中\n self._bool_genre = config_settings.get(\"收集nfo\",\n \"是否将特征保存到genre?\") == '是'\n # 是否 将 片商 作为特征\n self._bool_tag = config_settings.get(\"收集nfo\", \"是否将特征保存到tag?\") == '是'\n # ###################################################### 重命名 ################################################\n # 是否 重命名 视频\n self._bool_rename_video = config_settings.get(\"重命名视频文件\",\n \"是否重命名视频文件?\") == '是'\n # 自定义 重命名 视频\n self._list_rename_video = config_settings.get(\"重命名视频文件\",\n \"重命名视频文件的公式\").split('+')\n # 是否 重命名视频所在文件夹,或者为它创建独立文件夹\n self._bool_rename_folder = config_settings.get(\"修改文件夹\",\n \"是否重命名或创建独立文件夹?\") == '是'\n # 自定义 新的文件夹名 示例: ['车牌', '【', '全部演员', '】']\n self._list_rename_folder = config_settings.get(\"修改文件夹\",\n \"新文件夹的公式\").split('+')\n # ######################################################### 归类 ###############################################\n # 是否 归类jav\n self._bool_classify = config_settings.get(\"归类影片\", \"是否归类影片?\") == '是'\n # 是否 针对“文件夹”归类jav,“否”即针对“文件”\n self._bool_classify_folder = config_settings.get(\"归类影片\",\n \"针对文件还是文件夹?\") == '文件夹'\n # 自定义 路径 归类的jav放到哪\n self._custom_classify_target_dir = config_settings.get(\n \"归类影片\", \"归类的根目录\")\n # 自定义 jav按什么类别标准来归类 比如: 影片类型\\全部演员\n self._custom_classify_basis = config_settings.get(\"归类影片\", \"归类的标准\")\n # ####################################################### 图片 ################################################\n # 是否 下载图片\n self._bool_jpg = config_settings.get(\"下载封面\", \"是否下载封面海报?\") == '是'\n # 自定义 命名 大封面fanart\n self._list_name_fanart = config_settings.get(\"下载封面\",\n \"fanart的公式\").split('+')\n # 自定义 命名 小海报poster\n self._list_name_poster = config_settings.get(\"下载封面\",\n \"poster的公式\").split('+')\n # 是否 如果视频有“中字”,给poster的左上角加上“中文字幕”的斜杠\n self._bool_watermark_subtitle = config_settings.get(\n \"下载封面\", \"是否为poster加上中文字幕条幅?\") == '是'\n # 是否 如果视频是“无码流出”,给poster的右上角加上“无码流出”的斜杠\n self._bool_watermark_divulge = config_settings.get(\n \"下载封面\", \"是否为poster加上无码流出条幅?\") == '是'\n # ##################################################### 字幕 ###################################################\n # 是否 重命名用户已拥有的字幕\n self._bool_rename_subtitle = config_settings.get(\n \"字幕文件\", \"是否重命名已有的字幕文件?\") == '是'\n # ##################################################### kodi ##################################################\n # 是否 收集演员头像\n self._bool_sculpture = config_settings.get(\"kodi专用\",\n \"是否收集演员头像?\") == '是'\n # 是否 对于多cd的影片,kodi只需要一份图片和nfo\n self._bool_cd_only = config_settings.get(\"kodi专用\",\n \"是否对多cd只收集一份图片和nfo?\") == '是'\n # ##################################################### 代理 ##################################################\n # 代理端口\n custom_proxy = config_settings.get(\"局部代理\", \"代理端口\").strip()\n # 代理,如果为空则效果为不使用\n proxys = {'http': f'http://{custom_proxy}', 'https': f'https://{custom_proxy}'} \\\n if config_settings.get(\"局部代理\", \"http还是socks5?\") == '是' \\\n else {'http': f'socks5://{custom_proxy}', 'https': f'socks5://{custom_proxy}'}\n # 是否 使用局部代理\n self._bool_proxy = config_settings.get(\n \"局部代理\", \"是否使用局部代理?\") == '是' and custom_proxy\n # 是否 代理javlibrary\n self.proxy_library = proxys if config_settings.get(\"局部代理\", \"是否代理javlibrary?\") == '是' \\\n and self._bool_proxy else {}\n # 是否 代理javbus,还有代理javbus上的图片cdnbus\n self.proxy_bus = proxys if config_settings.get(\n \"局部代理\", \"是否代理javbus?\") == '是' and self._bool_proxy else {}\n # 是否 代理javbus,还有代理javbus上的图片cdnbus\n self.proxy_321 = proxys if config_settings.get(\n \"局部代理\", \"是否代理jav321?\") == '是' and self._bool_proxy else {}\n # 是否 代理javdb,还有代理javdb上的图片\n self.proxy_db = proxys if config_settings.get(\n \"局部代理\", \"是否代理javdb?\") == '是' and self._bool_proxy else {}\n # 是否 代理arzon\n self.proxy_arzon = proxys if config_settings.get(\n \"局部代理\", \"是否代理arzon?\") == '是' and self._bool_proxy else {}\n # 是否 代理dmm图片,javlibrary和javdb上的有码图片几乎都是直接引用dmm\n self.proxy_dmm = proxys if config_settings.get(\n \"局部代理\", \"是否代理dmm图片?\") == '是' and self._bool_proxy else {}\n # ################################################### 原影片文件的性质 ##########################################\n # 自定义 无视的字母数字 去除影响搜索结果的字母数字 xhd1080、mm616、FHD-1080\n self._list_surplus_words_in_filename = config_settings.get(\"原影片文件的性质\", \"有码素人无视多余的字母数字\").upper().split('、') \\\n if self._pattern == '有码' \\\n else config_settings.get(\"原影片文件的性质\", \"无码无视多余的字母数字\").upper().split('、')\n # 自定义 原影片性质 影片有中文,体现在视频名称中包含这些字符\n self._list_subtitle_words_in_filename = config_settings.get(\n \"原影片文件的性质\", \"是否中字即文件名包含\").strip().split('、')\n # 自定义 是否中字 这个元素的表现形式\n self._custom_subtitle_expression = config_settings.get(\n \"原影片文件的性质\", \"是否中字的表现形式\")\n # 自定义 原影片性质 影片是无码流出片,体现在视频名称中包含这些字符\n self._list_divulge_words_in_filename = config_settings.get(\n \"原影片文件的性质\", \"是否流出即文件名包含\").strip().split('、')\n # 自定义 是否流出 这个元素的表现形式\n self._custom_divulge_expression = config_settings.get(\n \"原影片文件的性质\", \"是否流出的表现形式\")\n # 自定义 原影片性质 有码\n self._av_type = config_settings.get(\"原影片文件的性质\", self._pattern)\n # ################################################## 其他设置 ##################################################\n # 是否 使用简体中文 简介翻译的结果和jav特征会变成“简体”还是“繁体”,影响影片特征和简介。\n # self.to_language = 'zh' if config_settings.get(\"其他设置\", \"简繁中文?\") == '简' else 'cht'\n self.to_language = 'zh'\n # 网址 javlibrary\n self.url_library = f'{config_settings.get(\"其他设置\", \"javlibrary网址\").strip().rstrip(\"/\")}/cn'\n # 网址 javbus\n self.url_bus = config_settings.get(\"其他设置\",\n \"javbus网址\").strip().rstrip('/')\n # 网址 javdb\n self.url_db = config_settings.get(\"其他设置\",\n \"javdb网址\").strip().rstrip('/')\n # 网址 javdb\n self._phpsessid = config_settings.get(\"其他设置\",\n \"arzon的phpsessid\").strip()\n # 自定义 文件类型 只有列举出的视频文件类型,才会被处理\n self._tuple_video_types = tuple(\n config_settings.get(\"其他设置\", \"扫描文件类型\").upper().split('、'))\n # 自定义 命名公式中“标题”的长度 windows只允许255字符,所以限制长度,但nfo中的标题是全部\n self._int_title_len = int(\n config_settings.get(\"其他设置\", \"重命名中的标题长度(50~150)\"))\n # ####################################### 百度翻译API ####################################################\n # 账户 百度翻译api\n self.tran_id = config_settings.get(\"百度翻译API\", \"APP ID\")\n self.tran_sk = config_settings.get(\"百度翻译API\", \"密钥\")\n # ####################################### 百度人体分析 ####################################################\n # 是否 需要准确定位人脸的poster\n self.bool_face = config_settings.get(\"百度人体分析\",\n \"是否需要准确定位人脸的poster?\") == '是'\n # 账户 百度人体分析\n self._al_id = config_settings.get(\"百度人体分析\", \"appid\")\n self._ai_ak = config_settings.get(\"百度人体分析\", \"api key\")\n self._al_sk = config_settings.get(\"百度人体分析\", \"secret key\")\n\n # ####################################### 本次程序启动通用 ####################################################\n # 素人番号: 得到事先设置的素人番号,让程序能跳过它们\n self.list_suren_cars = get_suren_cars()\n # 是否需要重命名文件夹\n self.bool_rename_folder = self.judge_need_rename_folder()\n # 归类的目标文件夹的拼接公式\n self.list_classify_basis = []\n # 用于给用户自定义命名的字典\n self.dict_for_standard = self.get_dict_for_standard()\n\n # ####################################### 每次重新选择文件夹通用 ##############################################\n # 选择的文件夹\n self.dir_choose = ''\n # 归类的目标根文件夹\n self.dir_classify_target = ''\n # 当前视频(包括非jav)的编号,用于显示进度、获取最大视频编号即当前文件夹内视频数量\n self.no_current = 0\n # 当前所选文件夹内视频总数\n self.sum_videos_in_choose_dir = 0\n # ####################################### 每一级文件夹通用 ##############################################\n # 当前for循环所处的这一级文件夹路径\n self.dir_current = ''\n # 字幕文件和车牌对应关系 {'c:\\a\\abc_123.srt': 'abc-123'}\n self.dict_subtitle_file = {}\n # 存放: 每一车牌的集数, 例如{'abp-123': 1, avop-789': 2}是指 abp-123只有一集,avop-789有cd1、cd2\n self.dict_car_episode = {}\n # 当前一级文件夹包含的视频总数\n self.sum_videos_in_current_dir = 0\n # 定义 Windows中的非法字符, 将非法字符替换为空格\n self.winDic = str.maketrans(r':<>\"\\?/*', ' ')\n\n # 每次用户选择文件夹后重置\n def rest_choose_dir(self, dir_choose):\n self.dir_choose = dir_choose\n # self.dir_classify_target = '' 通过check_classify_target_directory重置\n self.check_classify_target_directory()\n self.no_current = 0\n self.sum_videos_in_choose_dir = self.count_num_videos()\n\n # 每层级文件夹重置\n def rest_current_dir(self, dir_current):\n self.dir_current = dir_current\n self.dict_subtitle_file = {}\n self.dict_car_episode = {}\n self.sum_videos_in_current_dir = 0\n\n def get_last_arzon_cookie(self):\n return {'PHPSESSID': self._phpsessid}\n\n # #########################[修改文件夹]##############################\n # 是否需要重命名文件夹或者创建新的文件夹\n def judge_need_rename_folder(self):\n if self._bool_classify: # 如果需要归类\n if self._bool_classify_folder: # 并且是针对文件夹\n return True # 那么必须重命名文件夹或者创建新的文件夹\n else: # 不需要归类\n if self._bool_rename_folder: # 但是用户本来就在ini中写了要重命名文件夹\n return True\n return False\n\n # #########################[归类影片]##############################\n\n # 功能: 检查 归类根目录 的合法性\n # 参数: 用户选择整理的文件夹路径\n # 返回: 归类根目录路径\n # 辅助: os.sep,os.system\n def check_classify_target_directory(self):\n # 检查 归类根目录 的合法性\n if self._bool_classify:\n custom_classify_target_dir = self._custom_classify_target_dir.rstrip(\n sep)\n # 用户使用默认的“所选文件夹”\n if custom_classify_target_dir == '所选文件夹':\n self.dir_classify_target = f'{self.dir_choose}{sep}归类完成'\n # 归类根目录 是 用户输入的路径c:\\a,继续核实合法性\n else:\n # 用户输入的路径 不是 所选文件夹dir_choose\n if custom_classify_target_dir != self.dir_choose:\n if custom_classify_target_dir[:2] != self.dir_choose[:2]:\n input(\n f'归类的根目录: 【{custom_classify_target_dir}】和所选文件夹不在同一磁盘无法归类!请修正!'\n )\n if not os.path.exists(custom_classify_target_dir):\n input(\n f'归类的根目录: 【{custom_classify_target_dir}】不存在!无法归类!请修正!'\n )\n self.dir_classify_target = custom_classify_target_dir\n # 用户输入的路径 就是 所选文件夹dir_choose\n else:\n self.dir_classify_target = f'{self.dir_choose}{sep}归类完成'\n else:\n self.dir_classify_target = ''\n\n # #########################[百度人体分析]##############################\n # 百度翻译启动\n def start_body_analysis(self):\n if self.bool_face:\n return AipBodyAnalysis(self._al_id, self._ai_ak, self._al_sk)\n else:\n return None\n\n # 功能: 收集文件们中的字幕文件,存储在dict_subtitle_file\n # 参数: list_sub_files(当前文件夹的)子文件们\n # 返回: 无;更新self.dict_subtitle_file\n # 辅助: find_car_youma, find_car_fc2\n def init_dict_subtitle_file(self, list_sub_files):\n for file_raw in list_sub_files:\n file_temp = file_raw.upper()\n if file_temp.endswith((\n '.SRT',\n '.VTT',\n '.ASS',\n '.SSA',\n '.SUB',\n '.SMI',\n )):\n if self._pattern != 'Fc2':\n # 有码无码不处理FC2\n if 'FC2' in file_temp:\n continue\n # 去除用户设置的、干扰车牌的文字\n for word in self._list_surplus_words_in_filename:\n file_temp = file_temp.replace(word, '')\n # 得到字幕文件名中的车牌\n subtitle_car = find_car_youma(file_temp,\n self.list_suren_cars)\n else:\n # 仅处理fc2\n if 'FC2' not in file_temp:\n continue # 【跳出2】\n # 得到字幕文件名中的车牌\n subtitle_car = find_car_fc2(file_temp)\n # 将该字幕文件和其中的车牌对应到dict_subtitle_file中\n if subtitle_car:\n self.dict_subtitle_file[file_raw] = subtitle_car\n\n # 功能: 发现文件中的jav视频文件,存储在list_jav_files\n # 参数: list_sub_files(当前文件夹的)子文件们\n # 返回: list_jav_files;更新self.dict_car_episode\n # 辅助: JavFile\n def get_list_jav_files(self, list_sub_files):\n list_jav_files = [] # 存放: 需要整理的jav_file\n for file_raw in list_sub_files:\n file_temp = file_raw.upper()\n if file_temp.endswith(\n self._tuple_video_types) and not file_temp.startswith('.'):\n self.no_current += 1\n self.sum_videos_in_current_dir += 1\n if 'FC2' in file_temp:\n continue\n for word in self._list_surplus_words_in_filename:\n file_temp = file_temp.replace(word, '')\n # 得到视频中的车牌\n car = find_car_youma(file_temp, self.list_suren_cars)\n if car:\n try:\n self.dict_car_episode[car] += 1 # 已经有这个车牌了,加一集cd\n except KeyError:\n self.dict_car_episode[car] = 1 # 这个新车牌有了第一集\n # 这个车牌在dict_subtitle_files中,有它的字幕。\n if car in self.dict_subtitle_file.values():\n subtitle_file = list(\n self.dict_subtitle_file.keys())[list(\n self.dict_subtitle_file.values()).index(car)]\n del self.dict_subtitle_file[subtitle_file]\n else:\n subtitle_file = ''\n carg = re.search(r'\\d\\dID-(\\d\\d)(\\d+)', car)\n if carg:\n car_id = f'{carg.group(1)}ID-{carg.group(2)}'\n else:\n car_id = car\n # 将该jav的各种属性打包好,包括原文件名带扩展名、所在文件夹路径、第几集、所属字幕文件名\n jav_struct = JavFile(car, car_id, file_raw,\n self.dir_current,\n self.dict_car_episode[car],\n subtitle_file, self.no_current)\n list_jav_files.append(jav_struct)\n else:\n print(\n f'>>无法处理: {self.dir_current[len(self.dir_choose):]}{sep}{file_raw}'\n )\n return list_jav_files\n\n # 功能:所选文件夹总共有多少个视频文件\n # 参数:用户选择整理的文件夹路径root_choose,视频类型后缀集合tuple_video_type\n # 返回:无\n # 辅助:os.walk\n def count_num_videos(self):\n num_videos = 0\n len_choose = len(self.dir_choose)\n for root, dirs, files in os.walk(self.dir_choose):\n if '归类完成' not in root[len_choose:]:\n for file_raw in files:\n file_temp = file_raw.upper()\n if file_temp.endswith(self._tuple_video_types\n ) and not file_temp.startswith('.'):\n num_videos += 1\n return num_videos\n\n # 功能: 处理多视频文件的问题,(1)所选文件夹总共有多少个视频文件,包括非jav文件,主要用于显示进度(2)同一车牌有多少cd,用于cd2...命名\n # 参数: list_jav_files\n # 返回: 无;更新self.sum_all_videos\n # 辅助: 无\n def count_num_and_no(self, list_jav_files):\n for jav_file in list_jav_files:\n jav_file.Sum_all_episodes = self.dict_car_episode[jav_file.Car]\n\n # 功能: (1)完善用于给用户命名的dict_for_standard,如果用户自定义的各种命名公式中有dict_for_standard未包含的元素,则添加。\n # (2)将_custom_classify_basis按“+”“\\”切割好,准备用于组装后面的归类路径。\n # 参数: 无\n # 返回: dict_for_standard; 更新self.list_classify_basis\n # 辅助: os.sep\n def get_dict_for_standard(self):\n dict_for_standard = {\n '车牌': 'ABC-123',\n '车牌前缀': 'ABC',\n '标题': f'{self._pattern}标题',\n '完整标题': f'完整{self._pattern}标题',\n '导演': f'{self._pattern}导演',\n '制作商': f'{self._pattern}制作商',\n '发行商': f'{self._pattern}发行商',\n '评分': 0.0,\n '片长': 0,\n '系列': f'{self._pattern}系列',\n '发行年月日': '1970-01-01',\n '发行年份': '1970',\n '月': '01',\n '日': '01',\n '首个演员': f'{self._pattern}演员',\n '全部演员': f'{self._pattern}演员',\n '空格': ' ',\n '\\\\': sep,\n '/': sep, # 文件路径分隔符\n '是否中字': '',\n '是否流出': '',\n '影片类型': self._av_type, # 自定义有码、无码、素人、FC2的对应称谓\n '视频': 'ABC-123', # 当前及未来的视频文件名,不带ext\n '原文件名': 'ABC-123',\n '原文件夹名': 'ABC-123',\n }\n if self._pattern == 'fc2':\n dict_for_standard['车牌'] = 'FC2-123'\n dict_for_standard['车牌前缀'] = 'FC2'\n dict_for_standard['视频'] = 'FC2-123'\n dict_for_standard['原文件名'] = 'FC2-123'\n dict_for_standard['原文件夹名'] = 'FC2-123'\n for i in self._list_extra_genres:\n if i not in dict_for_standard:\n dict_for_standard[i] = i\n for i in self._list_rename_video:\n if i not in dict_for_standard:\n dict_for_standard[i] = i\n for i in self._list_rename_folder:\n if i not in dict_for_standard:\n dict_for_standard[i] = i\n for i in self._list_name_nfo_title:\n if i not in dict_for_standard:\n dict_for_standard[i] = i\n for i in self._list_name_fanart:\n if i not in dict_for_standard:\n dict_for_standard[i] = i\n for i in self._list_name_poster:\n if i not in dict_for_standard:\n dict_for_standard[i] = i\n # 归类路径的组装公式\n for i in self._custom_classify_basis.split('\\\\'):\n for j in i.split('+'):\n if j not in dict_for_standard:\n dict_for_standard[j] = j\n self.list_classify_basis.append(j)\n self.list_classify_basis.append(sep)\n return dict_for_standard\n\n # 功能: 判定影片所在文件夹是否是独立文件夹,独立文件夹是指该文件夹仅用来存放该影片,不包含“.actors”\"extrafanrt”外的其他文件夹\n # 参数: len_dict_car_pref 当前所处文件夹包含的车牌数量, len_list_jav_struct当前所处文件夹包含的、需要整理的jav的结构体数量,\n # list_sub_dirs当前所处文件夹包含的子文件夹们\n # 返回: True\n # 辅助: judge_exist_extra_folders\n def judge_separate_folder(self, len_list_jav_files, list_sub_dirs):\n # 当前文件夹下,车牌不止一个;还有其他非jav视频;有其他文件夹,除了演员头像文件夹“.actors”和额外剧照文件夹“extrafanart”;\n if len(self.dict_car_episode\n ) > 1 or self.sum_videos_in_current_dir > len_list_jav_files:\n JavFile.Bool_in_separate_folder = False\n return\n for folder in list_sub_dirs:\n if folder != '.actors' and folder != 'extrafanart':\n JavFile.Bool_in_separate_folder = False\n return\n JavFile.Bool_in_separate_folder = True # 这一层文件夹是这部jav的独立文件夹\n return\n\n # 功能: 根据【原文件名】和《已存在的、之前整理的nfo》,判断当前jav是否有“中文字幕”\n # 参数: ①当前jav所处文件夹路径dir_current ②jav文件名不带文件类型后缀name_no_ext,\n # 返回: True\n # 辅助: os.path.exists,xml.etree.ElementTree.parse,xml.etree.ElementTree.ParseError\n def judge_exist_subtitle(self, dir_current, name_no_ext):\n # 去除 '-CD' 和 '-CARIB'对 '-C'判断中字的影响\n name_no_ext = name_no_ext.upper().replace('-CD',\n '').replace('-CARIB', '')\n # 如果原文件名包含“-c、-C、中字”这些字符\n for i in self._list_subtitle_words_in_filename:\n if i in name_no_ext:\n return True\n # 先前整理过的nfo中有 ‘中文字幕’这个Genre\n path_old_nfo = f'{dir_current}{sep}{name_no_ext}.nfo'\n if os.path.exists(path_old_nfo):\n try:\n tree = parse(path_old_nfo)\n except ParseError: # nfo可能损坏\n return False\n for child in tree.getroot():\n if child.text == '中文字幕':\n return True\n return False\n\n # 功能: 根据【原文件名】和《已存在的、之前整理的nfo》,判断当前jav是否有“无码流出”\n # 参数: ①当前jav所处文件夹路径dir_current ②jav文件名不带文件类型后缀name_no_ext\n # 返回: True\n # 辅助: os.path.exists,xml.etree.ElementTree.parse,xml.etree.ElementTree.ParseError\n def judge_exist_divulge(self, dir_current, name_no_ext):\n # 如果原文件名包含“-c、-C、中字”这些字符\n for i in self._list_divulge_words_in_filename:\n if i in name_no_ext:\n return True\n # 先前整理过的nfo中有 ‘中文字幕’这个Genre\n path_old_nfo = f'{dir_current}{sep}{name_no_ext}.nfo'\n if os.path.exists(path_old_nfo):\n try:\n tree = parse(path_old_nfo)\n except ParseError: # nfo可能损坏\n return False\n for child in tree.getroot():\n if child.text == '无码流出':\n return True\n return False\n\n # 功能: 判断当前jav_file是否有“中文字幕”,是否有“无码流出”\n # 参数: jav_file 处理的jav视频文件对象\n # 返回: 无;更新jav_file\n # 辅助: 无\n def judge_subtitle_and_divulge(self, jav_file):\n # 判断是否有中字的特征,条件有三满足其一即可: 1有外挂字幕 2文件名中含有“-C”之类的字眼 3旧的nfo中已经记录了它的中字特征\n if jav_file.Subtitle:\n jav_file.Bool_subtitle = True # 判定成功\n else:\n jav_file.Bool_subtitle = self.judge_exist_subtitle(\n jav_file.Dir, jav_file.Name_no_ext)\n # 判断是否是无码流出的作品,同理\n jav_file.Bool_divulge = self.judge_exist_divulge(\n jav_file.Dir, jav_file.Name_no_ext)\n\n # 功能: 用jav_file、jav_model中的原始数据完善dict_for_standard\n # 参数: jav_file 处理的jav视频文件对象,jav_model 保存jav元数据的对象\n # 返回: 无;更新dict_for_standard\n # 辅助: replace_xml_win,replace_xml_win\n def prefect_zh(self, jav_model):\n # 翻译出中文标题和简介\n if self.tran_id and self.tran_sk and not jav_model.TitleZh:\n jav_model.TitleZh = translate(self.tran_id, self.tran_sk,\n jav_model.Title, self.to_language)\n time.sleep(0.9)\n jav_model.PlotZh = translate(self.tran_id, self.tran_sk,\n jav_model.Plot, self.to_language)\n return True\n else:\n return False\n\n # 功能: 用jav_file、jav_model中的原始数据完善dict_for_standard\n # 参数: jav_file 处理的jav视频文件对象,jav_model 保存jav元数据的对象\n # 返回: 无;更新dict_for_standard\n # 辅助: replace_xml_win,replace_xml_win\n def prefect_dict_for_standard(self, jav_file, jav_model):\n # 标题\n str_actors = ' '.join(jav_model.Actors[:3])\n int_actors_len = len(\n str_actors) if self._bool_need_actors_end_of_title else 0\n int_current_len = self._int_title_len - int_actors_len\n self.dict_for_standard['完整标题'] = replace_xml_win(jav_model.Title)\n self.dict_for_standard['中文完整标题'] = replace_xml_win(jav_model.TitleZh) \\\n if jav_model.TitleZh else self.dict_for_standard['完整标题']\n # 处理影片的标题过长。用户只需要在ini中写“标题”,但事实上,文件重命名操作中的“标题“是删减过的标题,nfo中的标题才是完整标题\n if len(self.dict_for_standard['完整标题']) > int_current_len:\n self.dict_for_standard['标题'] = self.dict_for_standard[\n '完整标题'][:int_current_len]\n else:\n self.dict_for_standard['标题'] = self.dict_for_standard['完整标题']\n if len(self.dict_for_standard['中文完整标题']) > int_current_len:\n self.dict_for_standard['中文标题'] = self.dict_for_standard[\n '中文完整标题'][:int_current_len]\n else:\n self.dict_for_standard['中文标题'] = self.dict_for_standard['中文完整标题']\n if self._bool_need_actors_end_of_title:\n self.dict_for_standard[\n '标题'] = f'{self.dict_for_standard[\"标题\"]} {str_actors}'\n self.dict_for_standard[\n '完整标题'] += f'{self.dict_for_standard[\"完整标题\"]} {str_actors}'\n self.dict_for_standard[\n '中文标题'] += f'{self.dict_for_standard[\"中文标题\"]} {str_actors}'\n self.dict_for_standard[\n '中文完整标题'] += f'{self.dict_for_standard[\"中文完整标题\"]} {str_actors}'\n\n # '是否中字'这一命名元素被激活\n self.dict_for_standard[\n '是否中字'] = self._custom_subtitle_expression if jav_file.Bool_subtitle else ''\n self.dict_for_standard[\n '是否流出'] = self._custom_divulge_expression if jav_file.Bool_divulge else ''\n # 车牌\n self.dict_for_standard['车牌'] = jav_model.Car # car可能发生了变化\n self.dict_for_standard['车牌前缀'] = jav_model.Car.split('-')[0]\n # 日期\n self.dict_for_standard['发行年月日'] = jav_model.Release\n self.dict_for_standard['发行年份'] = jav_model.Release[0:4]\n self.dict_for_standard['月'] = jav_model.Release[5:7]\n self.dict_for_standard['日'] = jav_model.Release[8:10]\n # 演职人员\n self.dict_for_standard['片长'] = jav_model.Runtime\n self.dict_for_standard['导演'] = replace_xml_win(\n jav_model.Director) if jav_model.Director else '有码导演'\n # 公司\n self.dict_for_standard['发行商'] = replace_xml_win(\n jav_model.Publisher) if jav_model.Publisher else '有码发行商'\n self.dict_for_standard['制作商'] = replace_xml_win(\n jav_model.Studio) if jav_model.Studio else '有码制作商'\n # 评分 系列\n self.dict_for_standard['评分'] = jav_model.Score / 10\n self.dict_for_standard[\n '系列'] = jav_model.Series if jav_model.Series else '有码系列'\n # 全部演员(最多7个) 和 第一个演员\n if jav_model.Actors:\n if len(jav_model.Actors) > 7:\n self.dict_for_standard['全部演员'] = ' '.join(jav_model.Actors[:7])\n else:\n self.dict_for_standard['全部演员'] = ' '.join(jav_model.Actors)\n self.dict_for_standard['首个演员'] = jav_model.Actors[0]\n else:\n self.dict_for_standard['首个演员'] = self.dict_for_standard[\n '全部演员'] = '有码演员'\n\n # jav_file原文件的一些属性 dict_for_standard['视频'],先定义为原文件名,即将发生变化。\n self.dict_for_standard['视频'] = self.dict_for_standard[\n '原文件名'] = jav_file.Name_no_ext\n self.dict_for_standard['原文件夹名'] = jav_file.Folder\n\n # 功能: 1重命名视频(jav_file和dict_for_standard发生改变)\n # 参数: 设置settings,命名信息dict_for_standard,处理的影片jav\n # 返回: path_return,重命名操作可能不成功,返回path_return告知主程序提醒用户处理\n # 辅助: os.exists, os.rename, record_video_old, record_fail\n def rename_mp4(self, jav_file):\n # 如果重命名操作不成功,将path_new赋值给path_return,提醒用户自行重命名\n path_return = ''\n if self._bool_rename_video:\n # 构造新文件名,不带文件类型后缀\n name_without_ext = ''\n for j in self._list_rename_video:\n name_without_ext = f'{name_without_ext}{self.dict_for_standard[j]}'\n if os.name == 'nt': # 如果是windows系统\n name_without_ext = name_without_ext.translate(\n self.winDic) # 将文件名中的非法字符替换为空格\n name_without_ext = f'{name_without_ext.strip()}{jav_file.Cd}' # 去除末尾空格,否则windows会自动删除空格,导致程序仍以为带空格\n path_new = f'{jav_file.Dir}{sep}{name_without_ext}{jav_file.Ext}' # 【临时变量】path_new 视频文件的新路径\n\n # 一般情况,不存在同名视频文件\n if not os.path.exists(path_new):\n os.rename(jav_file.Path, path_new)\n record_video_old(jav_file.Path, path_new)\n # 已存在目标文件,但就是现在的文件\n elif jav_file.Path.upper() == path_new.upper():\n try:\n os.rename(jav_file.Path, path_new)\n # windows本地磁盘,“abc-123.mp4”重命名为“abc-123.mp4”或“ABC-123.mp4”没问题,但有用户反映,挂载的磁盘会报错“file exists error”\n except FileExistsError:\n # 提醒用户后续自行更改\n path_return = path_new\n # 存在目标文件,不是现在的文件。\n else:\n raise FileExistsError(\n f'重命名影片失败,重复的影片,已经有相同文件名的视频了: {path_new}') # 【终止对该jav的整理】\n self.dict_for_standard[\n '视频'] = name_without_ext # 【更新】 dict_for_standard['视频']\n jav_file.Name = f'{name_without_ext}{jav_file.Ext}' # 【更新】jav.name,重命名操作可能不成功,但之后的操作仍然围绕成功的jav.name来命名\n print(f' >修改文件名{jav_file.Cd}完成')\n # 重命名字幕\n if jav_file.Subtitle and self._bool_rename_subtitle:\n subtitle_new = f'{name_without_ext}{jav_file.Ext_subtitle}' # 【临时变量】subtitle_new\n path_subtitle_new = f'{jav_file.Dir}{sep}{subtitle_new}' # 【临时变量】path_subtitle_new\n if jav_file.Path_subtitle != path_subtitle_new:\n os.rename(jav_file.Path_subtitle, path_subtitle_new)\n jav_file.Subtitle = subtitle_new # 【更新】 jav.subtitle 字幕完整文件名\n print(' >修改字幕名完成')\n return path_return\n\n # 功能: 2归类影片,只针对视频文件和字幕文件,无视它们当前所在文件夹\n # 参数: 设置settings,命名信息dict_for_standard,处理的影片jav\n # 返回: 处理的影片jav(所在文件夹路径改变)\n # 辅助: os.exists, os.rename, os.makedirs,\n def classify_files(self, jav_file):\n # 如果需要归类,且不是针对文件��来归类\n if self._bool_classify and not self._bool_classify_folder:\n # 移动的目标文件夹路径\n dir_dest = f'{self.dir_classify_target}{sep}'\n for j in self.list_classify_basis:\n # 【临时变量】归类的目标文件夹路径 C:\\Users\\JuneRain\\Desktop\\测试文件夹\\葵司\\\n dir_dest = f'{dir_dest}{self.dict_for_standard[j].strip()}'\n # 还不存在该文件夹,新建\n if not os.path.exists(dir_dest):\n os.makedirs(dir_dest)\n path_new = f'{dir_dest}{sep}{jav_file.Name}' # 【临时变量】新的影片路径\n # 目标文件夹没有相同的影片,防止用户已经有一个“avop-127.mp4”,现在又来一个\n if not os.path.exists(path_new):\n os.rename(jav_file.Path, path_new)\n print(' >归类视频文件完成')\n # 移动字幕\n if jav_file.Subtitle:\n path_subtitle_new = f'{dir_dest}{sep}{jav_file.Subtitle}' # 【临时变量】新的字幕路径\n if jav_file.Path_subtitle != path_subtitle_new:\n os.rename(jav_file.Path_subtitle, path_subtitle_new)\n print(' >归类字幕文件完成')\n jav_file.Dir = dir_dest # 【更新】jav.dir\n else:\n raise FileExistsError(\n f'归类失败,重复的影片,归类的目标文件夹已经存在相同的影片: {path_new}'\n ) # 【终止对该jav的整理】\n\n # 功能: 3重命名文件夹【相同】如果已进行第2操作,第3操作不会进行,因为用户只需要归类视频文件,不需要管文件夹。\n # 参数: 处理的影片jav\n # 返回: 处理的影片jav(所在文件夹路径改变)\n # 辅助: os.exists, os.rename, os.makedirs,record_fail\n def rename_folder(self, jav_file):\n if self.bool_rename_folder:\n # 构造 新文件夹名folder_new\n folder_new = ''\n for j in self._list_rename_folder:\n folder_new = f'{folder_new}{self.dict_for_standard[j]}'\n folder_new = folder_new.rstrip(' .') # 【临时变量】新的所在文件夹。去除末尾空格和“.”\n # 是独立文件夹,才会重命名文件夹\n if jav_file.Bool_in_separate_folder:\n # 当前视频是该车牌的最后一集,他的兄弟姐妹已经处理完成,才会重命名它们的“家”。\n if jav_file.Episode == jav_file.Sum_all_episodes:\n dir_new = f'{os.path.dirname(jav_file.Dir)}{sep}{folder_new}' # 【临时变量】新的影片所在文件夹路径。\n # 想要重命名的目标影片文件夹不存在\n if not os.path.exists(dir_new):\n os.rename(jav_file.Dir, dir_new)\n jav_file.Dir = dir_new # 【更新】jav.dir\n # 目标影片文件夹存在,但就是现在的文件夹,即新旧相同\n elif jav_file.Dir == dir_new:\n pass\n # 真的有一个同名的文件夹了\n else:\n raise FileExistsError(\n f'重命名文件夹失败,已存在相同文件夹: {dir_new}') # 【终止对该jav的整理】\n print(' >重命名文件夹完成')\n # 不是独立的文件夹,建立独立的文件夹\n else:\n path_separate_folder = f'{jav_file.Dir}{sep}{folder_new}' # 【临时变量】需要创建的的影片所在文件夹。\n # 确认没有同名文件夹\n if not os.path.exists(path_separate_folder):\n os.makedirs(path_separate_folder)\n path_new = f'{path_separate_folder}{sep}{jav_file.Name}' # 【临时变量】新的影片路径\n # 如果这个文件夹是现成的,在它内部确认有没有“abc-123.mp4”。\n if not os.path.exists(path_new):\n os.rename(jav_file.Path, path_new)\n print(' >移动到独立文件夹完成')\n # 移动字幕\n if jav_file.Subtitle:\n path_subtitle_new = f'{path_separate_folder}{sep}{jav_file.Subtitle}' # 【临时变量】新的字幕路径\n os.rename(jav_file.Path_subtitle, path_subtitle_new)\n # 下面不会操作 字幕文件 了,jav.path_subtitle不再更新\n print(' >移动字幕到独立文件夹')\n jav_file.Dir = path_separate_folder # 【更新】jav.dir\n # 里面已有“avop-127.mp4”,这不是它的家。\n else:\n raise FileExistsError(\n f'创建独立文件夹失败,已存在相同的视频文件: {path_new}') # 【终止对该jav的整理】\n\n # 功能: 6为当前jav收集演员头像到“.actors”文件夹中\n # 参数: jav_file 处理的jav视频文件对象,jav_model 保存jav元数据的对象\n # 返回: 无\n # 辅助: os.path.exists,os.makedirs, configparser.RawConfigParser, shutil.copyfile\n def collect_sculpture(self, jav_file, jav_model):\n if self._bool_sculpture and jav_file.Episode == 1:\n if not jav_model.Actors:\n print(' >未知演员,无法收集头像')\n else:\n for each_actor in jav_model.Actors:\n path_exist_actor = f'演员头像{sep}{each_actor[0]}{sep}{each_actor}' # 事先准备好的演员头像路径\n if os.path.exists(f'{path_exist_actor}.jpg'):\n pic_type = '.jpg'\n elif os.path.exists(f'{path_exist_actor}.png'):\n pic_type = '.png'\n else:\n config_actor = RawConfigParser()\n config_actor.read('【缺失的演员头像统计For Kodi】.ini',\n encoding='utf-8-sig')\n try:\n each_actor_times = config_actor.get(\n '缺失的演员头像', each_actor)\n config_actor.set(\"缺失的演员头像\", each_actor,\n str(int(each_actor_times) + 1))\n except NoOptionError:\n config_actor.set(\"缺失的演员头像\", each_actor, '1')\n config_actor.write(\n open('【缺失的演员头像统计For Kodi】.ini',\n \"w\",\n encoding='utf-8-sig'))\n continue\n # 已经收录了这个演员头像\n dir_dest_actor = f'{jav_file.Dir}{sep}.actors{sep}' # 头像的目标文件夹\n if not os.path.exists(dir_dest_actor):\n os.makedirs(dir_dest_actor)\n # 复制一份到“.actors”\n copyfile(f'{path_exist_actor}{pic_type}',\n f'{dir_dest_actor}{each_actor}{pic_type}')\n print(' >演员头像收集完成: ', each_actor)\n\n # 功能: 7归类影片,针对文件夹(如果已进行第2操作,第7操作不会进行,因为用户只需要归类视频文件,不需要管文件夹)\n # 参数: jav_file 处理的jav视频文件对象\n # 返回: 处理的影片jav(所在文件夹路径改变)\n # 辅助: os.exists, os.rename, os.makedirs,\n def classify_folder(self, jav_file):\n # 需要移动文件夹,且,是该影片的最后一集\n if self._bool_classify and self._bool_classify_folder and jav_file.Episode == jav_file.Sum_all_episodes:\n # 用户选择的文件夹是一部影片的独立文件夹,为了避免在这个文件夹里又生成新的归类文件夹\n if jav_file.Bool_in_separate_folder and self.dir_classify_target.startswith(\n jav_file.Dir):\n raise TooManyDirectoryLevelsError(f'无法归类,不建议在当前文件夹内再新建文件夹')\n # 归类放置的目标文件夹\n dir_dest = f'{self.dir_classify_target}{sep}'\n # 移动的目标文件夹\n for j in self.list_classify_basis:\n # 【临时变量】 文件夹移动的目标上级文件夹 C:\\Users\\JuneRain\\Desktop\\测试文件夹\\1\\葵司\\\n dir_dest = f'{dir_dest}{self.dict_for_standard[j].rstrip(\" .\")}'\n # 【临时变量】 文件夹移动的目标路径 C:\\Users\\JuneRain\\Desktop\\测试文件夹\\1\\葵司\\【葵司】AVOP-127\\\n dir_new = f'{dir_dest}{sep}{jav_file.Folder}'\n # print(dir_new)\n # 还不存在归类的目标文件夹\n if not os.path.exists(dir_new):\n os.makedirs(dir_new)\n # 把现在文件夹里的东西都搬过去\n jav_files = os.listdir(jav_file.Dir)\n for i in jav_files:\n os.rename(f'{jav_file.Dir}{sep}{i}', f'{dir_new}{sep}{i}')\n # 删除“旧房子”,这是javsdt唯一的删除操作,而且os.rmdir只能删除空文件夹\n os.rmdir(jav_file.Dir)\n print(' >归类文件夹完成')\n # 用户已经有了这个文件夹,可能以前处理过同车牌的视频\n else:\n raise FileExistsError(f'归类失败,归类的目标位置已存在相同文件夹: {dir_new}')\n\n # 功能: 写nfo\n # 参数: jav_file 处理的jav视频文件对象,jav_model 保存jav元数据的对象,genres\n # 返回: 素人车牌list\n # 辅助: 无\n def write_nfo(self, jav_file, jav_model, genres):\n if self._bool_nfo:\n # 如果是为kodi准备的nfo,不需要多cd\n if self._bool_cd_only:\n path_nfo = f'{jav_file.Dir}{sep}{jav_file.Name_no_ext.replace(jav_file.Cd, \"\")}.nfo'\n else:\n path_nfo = f'{jav_file.Dir}{sep}{jav_file.Name_no_ext}.nfo'\n # nfo中tilte的写法\n title_in_nfo = ''\n for i in self._list_name_nfo_title:\n title_in_nfo = f'{title_in_nfo}{self.dict_for_standard[i]}' # nfo中tilte的写法\n # 开始写入nfo,这nfo格式是参考的kodi的nfo\n plot = replace_xml(\n jav_model.PlotZh) if self._bool_need_zh_plot else replace_xml(\n jav_model.Plot)\n f = open(path_nfo, 'w', encoding=\"utf-8\")\n f.write(\n f'\\n'\n f'\\n'\n f' {plot}{replace_xml(jav_model.Review)}\\n'\n f' {title_in_nfo}\\n'\n f' {jav_model.Car} {replace_xml(jav_model.Title)}\\n'\n f' {replace_xml(jav_model.Director)}\\n'\n f' {jav_model.Score / 10}\\n'\n f' {jav_model.Score}\\n' # 烂番茄评分 用上面的评分*10\n f' {jav_model.Release[0:4]}\\n'\n f' NC-17\\n'\n f' NC-17\\n'\n f' JP\\n'\n f' {jav_model.Release}\\n'\n f' {jav_model.Release}\\n'\n f' {jav_model.Runtime}\\n'\n f' 日本\\n'\n f' {replace_xml(jav_model.Studio)}\\n'\n f' {jav_model.Car}\\n'\n f' {jav_model.Car}\\n'\n f' {replace_xml(jav_model.Series)}\\n'\n ) # emby不管set系列,kodi可以\n # 需要将特征写入genre\n if self._bool_genre:\n for i in genres:\n f.write(f' {i}\\n')\n if self._bool_write_series and jav_model.Series:\n f.write(f' 系列:{jav_model.Series}\\n')\n if self._bool_write_studio and jav_model.Studio:\n f.write(f' 片商:{jav_model.Studio}\\n')\n for i in self._list_extra_genres:\n f.write(f' {self.dict_for_standard[i]}\\n')\n # 需要将特征写入tag\n if self._bool_tag:\n for i in genres:\n f.write(f' {i}\\n')\n if self._bool_write_series and jav_model.Series:\n f.write(f' 系列:{jav_model.Series}\\n')\n if self._bool_write_studio and jav_model.Studio:\n f.write(f' 片商:{jav_model.Studio}\\n')\n for i in self._list_extra_genres:\n f.write(f' {self.dict_for_standard[i]}\\n')\n # 写入演员\n for i in jav_model.Actors:\n f.write(f' \\n'\n f' {i}\\n'\n f' Actor\\n'\n f' \\n')\n f.write('\\n')\n f.close()\n print(' >nfo收集完成')\n\n def download_fanart(self, jav_file, jav_model):\n if self._bool_jpg:\n # fanart和poster路径\n path_fanart = f'{jav_file.Dir}{sep}'\n path_poster = f'{jav_file.Dir}{sep}'\n for i in self._list_name_fanart:\n path_fanart = f'{path_fanart}{self.dict_for_standard[i]}'\n for i in self._list_name_poster:\n path_poster = f'{path_poster}{self.dict_for_standard[i]}'\n # kodi只需要一份图片,不管视频是cd几,图片仅一份不需要cd几。\n if self._bool_cd_only:\n path_fanart = path_fanart.replace(jav_file.Cd, '')\n path_poster = path_poster.replace(jav_file.Cd, '')\n # emby需要多份,现在不是第一集,直接复制第一集的图片\n elif jav_file.Episode != 1:\n # 如果用户不重名视频,并且用户的原视频是第二集,没有带cd2,例如abc-123.mkv和abc-123.mp4,\n # 会导致fanart路径和cd1相同,引发报错raise SameFileError(\"{!r} and {!r} are the same file\".format(src, dst))\n # 所以这里判断下path_fanart有没有\n if not os.path.exists(path_fanart):\n copyfile(path_fanart.replace(jav_file.Cd, '-cd1'),\n path_fanart)\n print(' >fanart.jpg复制成功')\n copyfile(path_poster.replace(jav_file.Cd, '-cd1'),\n path_poster)\n print(' >poster.jpg复制成功')\n # kodi或者emby需要的第一份图片\n if check_picture(path_fanart):\n # 这里有个遗留问题,如果已有的图片文件名是小写,比如abc-123 xx.jpg,现在path_fanart是大写ABC-123,无法改变,poster同理\n # print(' >已有fanart.jpg')\n pass\n else:\n status = False\n if jav_model.CoverBus:\n url_cover = f'{self.url_bus}/pics/cover/{jav_model.CoverBus}'\n print(' >从javbus下载封面: ', url_cover)\n status = download_pic(url_cover, path_fanart,\n self.proxy_bus)\n if not status and jav_model.Javdb:\n url_cover = f'https://c0.jdbstatic.com/covers/{jav_model.Javdb[:2].lower()}/{jav_model.Javdb}.jpg'\n # print(' >从javdb下载封面: ', url_cover)\n print(' >下载封面: ...')\n status = download_pic(url_cover, path_fanart,\n self.proxy_db)\n if not status and jav_model.CoverLibrary:\n url_cover = jav_model.CoverLibrary\n print(' >从dmm下载封面: ', url_cover)\n status = download_pic(url_cover, path_fanart,\n self.proxy_dmm)\n if status:\n pass\n else:\n raise DownloadFanartError\n # 裁剪生成 poster\n if check_picture(path_poster):\n # print(' >已有poster.jpg')\n pass\n else:\n crop_poster_youma(path_fanart, path_poster)\n # 需要加上条纹\n if self._bool_watermark_subtitle and jav_file.Bool_subtitle:\n add_watermark_subtitle(path_poster)\n if self._bool_watermark_divulge and jav_file.Bool_divulge:\n add_watermark_divulge(path_poster)\n\n # 功能: 如果需要为kodi整理头像,则先检查“演员头像for kodi.ini”、“演员头像”文件夹是否存在; 检查 归类根目录 的合法性\n # 参数: 是否需要整理头像,用户自定义的归类根目录,用户选择整理的文件夹路径\n # 返回: 归类根目录路径\n # 辅助: os.sep,os.path.exists,shutil.copyfile\n def check_actors(self):\n # 检查头像: 如果需要为kodi整理头像,先检查演员头像ini、头像文件夹是否存在。\n if self._bool_sculpture:\n if not os.path.exists('演员头像'):\n input('\\n“演员头像”文件夹丢失!请把它放进exe的文件夹中!\\n')\n if not os.path.exists('【缺失的演员头像统计For Kodi】.ini'):\n if os.path.exists('actors_for_kodi.ini'):\n copyfile('actors_for_kodi.ini', '【缺失的演员头像统计For Kodi】.ini')\n print('\\n“【缺失的演员头像统计For Kodi】.ini”成功!')\n else:\n input('\\n请打开“【ini】重新创建ini.exe”创建丢失的程序组件!')\n","repo_name":"IzumiHoshi/javsdt","sub_path":"src/Class/MyHandler.py","file_name":"MyHandler.py","file_ext":"py","file_size_in_byte":58606,"program_lang":"python","lang":"zh","doc_type":"code","stars":34,"dataset":"github-code","pt":"69"} +{"seq_id":"72234365659","text":"try:\n \n import datetime\n import pandas as pd\n import os\n import tables \n import tstables \n import configparser\n import strategy\n import oandapyV20\n import oandapyV20.endpoints.instruments as instruments\n import oandapyV20.endpoints.pricing as pricing\n import oandapyV20.endpoints.forexlabs as labs\n from oandapyV20.exceptions import V20Error\n from oandapyV20.exceptions import StreamTerminated\n import utility_functions as uf\n import time\n import feeder\n\nexcept Exception as e:\n \n print(e) \n \nconfig = configparser.ConfigParser()\nconfig.read('..\\..\\configinfo.cfg')\n\n# For testing:\nsymbol = 'EUR_USD'\ngranularity = 'S5'\naccount_type = 'live'\naskbidmid = 'AB'\nsocket_number = 5555\ndaily_lookback = 10\ndownload_frequency = datetime.timedelta(seconds=60)\nupdate_signal_frequency = datetime.timedelta(seconds=60)\n#download_data_start_date = datetime.datetime.utcnow()\ndownload_data_start_date = pd.datetime(2020,12,22,0,0,0,0,datetime.timezone.utc)\ndownload_data_end_date = download_data_start_date + datetime.timedelta(hours=36)\n\nverbose = False\n\n# -----------------------------------------------------------------------------------------------------\nif 0==1:\n \n print(\"--- FEEDER ---\")\n print(\"symbol:\", symbol)\n print(\"granularity:\", granularity)\n print(\"account_type:\", account_type)\n print(\"socket_number:\", socket_number)\n print(\"--------------\")\n \n if account_type not in ['live', 'practice', 'backtest']:\n print('Error in account type, it should be either live, practice, or backtest')\n time.sleep(30)\n exit()\n \n f1 = feeder.feeder(config,symbol,granularity,account_type,socket_number,download_frequency,update_signal_frequency,download_data_start_date,download_data_end_date,verbose)\n f1.start() \n\nif 0==1:\n\n # Save real-time data as excel\n \n file_path_h5 = '..\\\\..\\\\datastore\\\\_live\\\\{}\\\\S5.h5'.format(symbol)\n \n f = tables.open_file(file_path_h5,'r')\n ts = f.root.data._f_get_timeseries()\n \n start_time = download_data_start_date\n end_time = download_data_end_date\n \n df_realtime = ts.read_range(start_time,end_time)\n \n filename = \"Realtime.xlsx\"\n folderpath = '..\\\\..\\\\datastore' \n \n uf.write_df_to_excel(df_realtime, folderpath, filename)\n \n f.close()\n\n# -----------------------------------------------------------------------------------------------------\n# Download historical data\nif 0==0:\n \n start_time = datetime.datetime(2020, 12, 22, 23, 0, 0)\n end_time = datetime.datetime.utcnow()\n \n suffix = '000Z' \n start_datetime = start_time.isoformat('T') + suffix \n end_datetime = end_time.isoformat('T') + suffix \n \n params = {\"from\": start_datetime,\n \"to\": end_datetime,\n \"granularity\": granularity,\n \"price\": askbidmid }\n \n config = configparser.ConfigParser()\n config.read('..\\..\\configinfo.cfg')\n accountID = config['oanda_v20']['account_number_{}'.format(account_type)]\n access_token = config['oanda_v20']['access_token_{}'.format(account_type)]\n api = oandapyV20.API(access_token=access_token, environment=\"{}\".format(account_type))\n \n print(\"requesting...\")\n r = instruments.InstrumentsCandles(instrument=symbol, params=params)\n api.request(r)\n print(\"received...\")\n \n raw = r.response.get('candles')\n raw = [cs for cs in raw if cs['complete']]\n \n data = pd.DataFrame()\n\n if len(raw) > 0:\n\n for cs in raw:\n cs['ask_o'] = cs['ask']['o']\n cs['ask_h'] = cs['ask']['h']\n cs['ask_l'] = cs['ask']['l']\n cs['ask_c'] = cs['ask']['c']\n cs['bid_o'] = cs['bid']['o']\n cs['bid_h'] = cs['bid']['h']\n cs['bid_l'] = cs['bid']['l']\n cs['bid_c'] = cs['bid']['c']\n del cs['ask']\n del cs['bid']\n del cs['complete']\n \n data = pd.DataFrame(raw)\n \n data = data.set_index('time') \n data.index = pd.DatetimeIndex(data.index) \n \n data[['ask_c', 'ask_h', 'ask_l', 'ask_o','bid_c', 'bid_h', 'bid_l', 'bid_o']] = data[['ask_c', 'ask_h', 'ask_l', 'ask_o','bid_c', 'bid_h', 'bid_l', 'bid_o']].astype('float64')\n \n data = data[['ask_c', 'ask_h', 'ask_l', 'ask_o','bid_c', 'bid_h', 'bid_l', 'bid_o','volume']]\n \n df_historical = data.tz_localize(None)\n \n filename = \"DownloadedHistorical.xlsx\"\n folderpath = '..\\\\..\\\\datastore' \n \n uf.write_df_to_excel(df_historical, folderpath, filename)\n \n# -----------------------------------------------------------------------------------------------------\n# Final Analysis\nif 0==1:\n\n # Check if two dataframes are identical:\n print(df_realtime.eq(df_historical))\n \n # Tyically there will be some indices on the top/bottom that will not exiss in one dataframe.\n # Check the indices that exists in both dataframes\n idx = df_realtime.index.intersection(df_historical.index)\n df_compare = df_realtime.loc[idx,:].eq(df_historical.loc[idx,:])\n print(df_compare.describe())\n\n\n ","repo_name":"boratarhan/BTrader","sub_path":"source_system/workflow_compare_realtime_and_historical_data.py","file_name":"workflow_compare_realtime_and_historical_data.py","file_ext":"py","file_size_in_byte":5205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"72438156381","text":"'''Retrieve Air Quality Forecasts'''\nfrom datetime import date as date_, datetime\nfrom typing import Callable, Coroutine, Optional, Union\n\n\nclass Forecast:\n '''\n Class to retrieve the air quality forecast by zip code or by latitude and\n longitude.\n '''\n def __init__(self, request: Callable[..., Coroutine]) -> None:\n self._request = request\n\n async def zipCode(\n self,\n zipCode: str,\n *,\n date: Optional[Union[date_, datetime, str]] = None,\n distance: Optional[int] = None\n ) -> list:\n '''Request current observation for zip code'''\n params: dict = dict(zipCode=zipCode)\n if date and isinstance(date, str):\n y, m, d = date.split('-')\n params['date'] = date_(int(y), int(m), int(d)).isoformat()\n elif date and isinstance(date, datetime):\n params['date'] = date.date().isoformat()\n elif date and isinstance(date, date_):\n params['date'] = date.isoformat()\n if distance:\n params['distance'] = distance\n\n return await self._request(\n 'aq/forecast/zipCode',\n params=params\n )\n\n async def latLong(\n self,\n latitude: Optional[Union[float, str]] = None,\n longitude: Optional[Union[float, str]] = None,\n *,\n date: Optional[Union[date_, datetime, str]] = None,\n distance: Optional[int] = None,\n ) -> None:\n '''Request current observation for latitude/longitude'''\n params: dict = dict(\n latitude=str(latitude),\n longitude=str(longitude),\n )\n if date and isinstance(date, str):\n y, m, d = date.split('-')\n params['date'] = date_(int(y), int(m), int(d)).isoformat()\n elif date and isinstance(date, datetime):\n params['date'] = date.date().isoformat()\n elif date and isinstance(date, date_):\n params['date'] = date.isoformat()\n if distance:\n params['distance'] = distance\n\n return await self._request(\n 'aq/forecast/latLong',\n params=params\n )\n","repo_name":"asymworks/pyairnow","sub_path":"pyairnow/forecast.py","file_name":"forecast.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"69"} +{"seq_id":"38619032447","text":"import keras\nimport pickle\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\nimport numpy as np\nimport time\nimport argparse\n\n\ndef read_data(flag,num):\n\n fig_w = 45 \n data_num = 60000 \n test_num = 10000\n num_classes = 10\n\n if (flag == 0):\n train_data = np.fromfile(\"mnist_train_data\",dtype=np.uint8)\n test_data = np.fromfile(\"mnist_test_data\",dtype=np.uint8)\n elif (flag == 1):\n train_data = np.load(\"mnist_train_data_deskewed.npy\")\n test_data = np.load(\"mnist_test_data_deskewed.npy\")\n else:\n train_data = np.load(\"mnist_train_data_denoised.npy\")\n test_data = np.load(\"mnist_test_data_denoised.npy\")\n\n y_train = np.fromfile(\"mnist_train_label\",dtype=np.uint8)\n y_test = np.fromfile(\"mnist_test_label\",dtype=np.uint8)\n train_data = train_data.reshape(data_num,fig_w,fig_w,1)\n test_data = test_data.reshape(test_num,fig_w,fig_w,1)\n \n train_data = train_data .astype('float32')\n test_data = test_data .astype('float32')\n\n train_data = train_data[:num]\n y_train = y_train[:num]\n\n train_data /= 255\n test_data /= 255\n y_train = keras.utils.np_utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.np_utils.to_categorical(y_test, num_classes)\n\n return train_data,test_data,y_train,y_test\n\n\ndef CNN(train_data,test_data,y_train,y_test):\n\n start = time.time()\n\n batch_size = 128\n num_classes = 10\n epochs = 12\n input_shape = (45,45,1)\n\n model = Sequential()\n model.add(Conv2D(32,\n activation='relu',\n input_shape=input_shape,\n nb_row=3,\n nb_col=3))\n model.add(Conv2D(64, activation='relu',\n nb_row=3,\n nb_col=3))\n model.add(Conv2D(128, activation='relu',\n nb_row=3,\n nb_col=3))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.35))\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(num_classes, activation='softmax'))\n model.compile(loss=keras.metrics.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n model.fit(train_data, y_train, batch_size=batch_size, epochs=epochs,\n verbose=1,validation_data=(test_data, y_test))\n score = model.evaluate(test_data, y_test, verbose=0)\n\n return score[1],time.time()-start\n\ndef main():\n parser = argparse.ArgumentParser()\n '''\n 根据参数选择dataset和training data amount\n '''\n parser.add_argument('-f', type=int, choices=[0, 1, 2],\n help=\"the symbol of dataset \")\n parser.add_argument('-n', type=int, default = 60000, required=False,\n help=\"training data number\")\n args = parser.parse_args()\n\n\n train_data,test_data,y_train,y_test = read_data(args.f,args.n)\n score,time_value = CNN(train_data,test_data,y_train,y_test)\n print('Test accuracy:', score)\n print('Runing time: ',time_value)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"Chacha-Chen/CS420_ML_Project","sub_path":"NN_models/keras_cnn.py","file_name":"keras_cnn.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"13854951695","text":"qtd = preco = mais1000 = menor = total = soma = 0\r\nwhile True:\r\n nome = str(input('Nome do Produto: ')).strip().upper()\r\n preco = float(input('Preço: R$'))\r\n qtd += 1\r\n soma += preco\r\n if preco > 1000:\r\n mais1000 += 1\r\n if qtd == 1:\r\n menor = qtd\r\n nomeproduto = nome\r\n elif preco < menor:\r\n nomeproduto = nome\r\n menor = preco\r\n continuar = str(input('Deseja continuar? [S/N]: ')).strip().upper() [0]\r\n if continuar == 'N':\r\n break\r\nprint(' O total do pedido foi R${:.2f}, o pedido contem {} itens. \\n O mais barato foi {} que custa R${:.2f}. \\n {} produtos custam mais de R$1000.00.'.format(soma, qtd, nomeproduto, menor, mais1000))","repo_name":"Lucas44794/cursoEmVideo","sub_path":"ex070.py","file_name":"ex070.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"509778365","text":"import string\n\n\n\ndef clean_headline(headline: str):\n cleaned = headline.lower()\n words = cleaned.split(\" \")\n words = [word for word in words if word not in string.punctuation]\n cleaned = ' '.join(words)\n return cleaned\n\n\ndef tokenize_headlines(headline:str):\n tokens = headline.split(\" \")\n return tokens\n","repo_name":"mleila/AGNews_Document_Classifcation","sub_path":"news_classifier/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"22726243522","text":"import matplotlib.pyplot as plt\n\n\ndef make_donut_chart(dict):\n labels = ['Fivestar', 'Fourstar', 'Threestar', 'Twostar', 'Onestar']\n sizes = [dict['fivepercentage'], dict['fourpercentage'], dict['threepercentage'], dict['twopercentage'],\n dict['onepercentage']]\n colors = ['#ff9999', '#66b3ff', '#99ff99', '#ffcc99', '#68d9df']\n plt.pie(sizes, labels=labels, autopct='%1.0f%%', shadow=False, startangle=90, colors=colors, radius=0.85,\n pctdistance=0.75)\n circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0)\n donut = plt.gcf()\n donut.gca().add_artist(circle)\n donut.set_size_inches(4, 4)\n plt.axis('equal')\n plt.tight_layout()\n plt.savefig('/home/spy/projectworkspace/salesstockprediction/base/static/adminResources/image/plot')\n plt.close()\n","repo_name":"Harsh-Parekh/sales_stock_prediction","sub_path":"scripts/donut.py","file_name":"donut.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"18962425165","text":"# -*- coding: utf-8 -*-\nimport unittest\nfrom Exercise2_1 import LinkedList, LinkedNode\n \n \ndef partition(linkedList, x):\n \"\"\"\n @param x: the value for partition (int).\n @param linkedList: the linked list.\n @return: returns a list of values (int in this case). \n \"\"\"\n head = linkedList.head\n lower = []\n greaterEq = []\n while head:\n if head.val < x:\n lower.append(head.val)\n else:\n greaterEq.append(head.val)\n head = head.next\n return lower + greaterEq\n \n \n\nclass partitionTest(unittest.TestCase):\n \n def testPartition(self):\n \"\"\"\n Test partition with value 2:\n 1 -> 3 -> 1 -> 9 -> 3 -> 2 -> 1 -> 3 -> 5\n the list will become:\n 1 -> 1 -> 1 -> 3 -> 9 -> 3 -> 2 -> 3 -> 5\n \n \"\"\"\n head = LinkedNode(1)\n linkedList = LinkedList(head)\n linkedList.insert(3)\n linkedList.insert(1)\n linkedList.insert(9)\n linkedList.insert(3)\n linkedList.insert(2)\n linkedList.insert(1)\n linkedList.insert(3)\n linkedList.insert(5)\n \n self.assertEqual(partition(linkedList, 2), [ 1, 1, 1, 3, 9, 3, 2, 3, 5 ])\n \n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Danisc4rp4/exercises","sub_path":"cracking_the_coding_interview/chapter02/Exercise2_4.py","file_name":"Exercise2_4.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"30696449832","text":"# -*- coding: utf-8 -*-\n\"\"\"\n# --------------------------------------------------------\n# @Author : Pan\n# @Date : 2019-9-20 13:18:34\n# --------------------------------------------------------\n\"\"\"\n\nimport os\nimport sysrsync\nfrom datetime import datetime\n\n\ndef get_polyaxon_output(dir=None):\n from polyaxon_client.tracking import get_outputs_path\n polyaxon_output = get_outputs_path()\n if dir:\n polyaxon_output = os.path.join(polyaxon_output, dir)\n return polyaxon_output\n\n\ndef polyaxon_root(dir, data_node):\n from polyaxon_client.tracking import get_data_paths, get_outputs_path\n out = os.path.join(get_data_paths()[data_node], dir)\n print(\"root:{}====>{}\".format(dir, out))\n return out\n\n\ndef get_polyaxon_dataroot(dir=\"\", data_node=\"ceph\", use_local=False):\n \"\"\"\n datauser@192.168.68.79:/nasdata/atp/data(上传至nas数据存储节点,代码中可使用 get_data_paths()['data-pool'] 访问)\n datauser@192.168.68.79:/sharedata06 (上传至SSD数据存储节点,代码中可使用 get_data_paths()['ssd'] 访问)\n datauser@192.168.68.79:/sharedata(上传至SSD数据存储节点,代码中可使用 get_data_paths()['ssd20'] 访问)\n datauser@192.168.68.79:/atpcephdata(上传至ceph存储,代码中可使用get_data_paths()['ceph']访问)\n :param dir: str or list\n :param data_node: data_node APT数据节点,ceph or newceph\n :param use_local: 是否将数据拷贝到本地数据节点\n :return:\n \"\"\"\n if isinstance(dir, str):\n dir = polyaxon_root(dir, data_node) if dir else dir\n elif isinstance(dir, list):\n dir = [polyaxon_root(d, data_node) for d in dir if d]\n else:\n raise Exception(\"Error:{}\".format(dir))\n if use_local:\n dir = rsync_dataset_node(dir)\n return dir\n\n\ndef polyaxon_env(data_root, val_root, val_dataset, update=False, use_local=True):\n \"\"\"\n get_data_paths()['ssd']指向的是SSD数据存储节点,在每个训练节点的挂载路径为:/sharedata06;\n get_data_paths()['data-pool']指向nasdata节点,在每个训练节点的挂载路径为:/nasdata/atp/data,\n get_data_paths()['ssd20']指向的是20T的SSD数据存储节点,在每个训练节点的挂载路径为:/sharedata;\n # 训练机器本地盘路径\n host_path = get_data_paths()['host-path']\n :param data_root:\n :param val_root:\n :param val_dataset:\n :param update : data\n :return:\n \"\"\"\n from polyaxon_client.tracking import get_data_paths, get_outputs_path\n print(\"The environment is polyaxon\")\n # polyaxon_dataroot = os.path.join(get_data_paths()[\"data-pool\"], 'FaceData') # upload data to TFR file\n host_path = os.path.join(get_data_paths()['host-path'], \"FaceData\")\n # src_path = os.path.join(get_data_paths()['ssd'], 'FaceData') # upload data to TFR file\n src_path = os.path.join(get_data_paths()['ceph'], 'FaceData') # upload data to TFR file\n polyaxon_output = get_outputs_path()\n\n if isinstance(data_root, str):\n data_root = [data_root]\n if use_local:\n dst_data_root = [os.path.join(host_path, dataset) for dataset in data_root]\n # sync train data\n for i, (image_root, name) in enumerate(zip(dst_data_root, data_root)):\n if update or not os.path.exists(image_root):\n dst_data_root[i] = rsync(src_path, host_path, name)\n\n # sync val data\n dst_val_root = os.path.join(host_path, val_root)\n # file_processing.remove_dir(dst_val_root)\n for val_name in val_dataset:\n val_name = \"{}.bin\".format(val_name)\n local_val_path = os.path.join(dst_val_root, val_name)\n if update or not os.path.exists(local_val_path):\n val_src_root = os.path.join(src_path, val_root)\n local_val_path = rsync(val_src_root, dst_val_root, val_name)\n else:\n dst_data_root = [os.path.join(src_path, dataset) for dataset in data_root]\n dst_val_root = os.path.join(src_path, val_root)\n\n return dst_data_root, dst_val_root, polyaxon_output\n\n\ndef rsync_data_node(dir, dst_node='host-path'):\n \"\"\"\n 同步数据到指定数据节点\n :param dir: 原始数据路径\n :param dst_node: 需要拷贝到的数据节点\n :return:\n \"\"\"\n from polyaxon_client.tracking import get_data_paths, get_outputs_path\n time = datetime.strftime(datetime.now(), '%Y%m%d%H%M%S')\n dst_root = os.path.join(get_data_paths()[dst_node], time)\n # fix a Bug: ATP无法通过os.path.isfile判断是否是文件\n # isfile = os.path.isfile(dir)\n isfile = len(dir.split(\".\")) > 1\n name = os.path.basename(dir)\n print(\"isfile:{},{}\".format(isfile, dir))\n if isfile:\n dir = os.path.dirname(dir)\n if not os.path.exists(dst_root):\n os.makedirs(dst_root)\n print(\"copy data from:{}\".format(dir))\n print(\"destination :{}\".format(dst_root))\n print(\"rsync data ...\")\n start = datetime.now()\n sysrsync.run(source=dir, destination=dst_root, options=['-a'])\n end = datetime.now()\n print(\"rsync data done,run time:{}\".format(end - start))\n if isfile:\n dst_root = os.path.join(dst_root, name)\n return dst_root\n\n\ndef rsync_dataset_node(dir, dst_node='host-path'):\n \"\"\"\n 同步数据到指定数据节点\n :param src_path: 原始数据路径\n :param dst_node: 需要拷贝到的数据节点\n :return:\n \"\"\"\n if isinstance(dir, str):\n dir = rsync_data_node(dir, dst_node) if dir else dir\n elif isinstance(dir, list):\n dir = [rsync_data_node(d, dst_node) for d in dir if d]\n else:\n raise Exception(\"Error:{}\".format(dir))\n return dir\n\n\ndef rsync_data():\n \"\"\"\n rsync dataset\n :return:\n \"\"\"\n from polyaxon_client.tracking import get_data_paths, get_outputs_path\n\n source = os.path.join(get_data_paths()['ceph'], 'FaceData') # upload data to TFR file\n destination = os.path.join(get_data_paths()['host-path'], \"FaceData\")\n if not os.path.exists(destination):\n os.makedirs(destination)\n print(\"copy data from:{}\".format(source))\n print(\"destination :{}\".format(destination))\n print(\"rsync data ...\")\n start = datetime.now()\n sysrsync.run(source=source, destination=destination, options=['-a'])\n end = datetime.now()\n print(\"rsync data done,run time:{}\".format(end - start))\n\n\ndef rsync(src_root, host_root, name):\n \"\"\"\n rsync dataset\n :param src_root:\n :param host_root:\n :param name:\n :return:\n \"\"\"\n source = os.path.join(src_root, name) # upload data to TFR file\n destination = os.path.join(host_root, name)\n if not os.path.exists(host_root):\n os.makedirs(host_root)\n print(\"copy data from:{}\".format(source))\n print(\"destination :{}\".format(destination))\n print(\"rsync data ...\")\n start = datetime.now()\n sysrsync.run(source=source, destination=destination, options=['-a'])\n end = datetime.now()\n print(\"rsync data done,run time:{}\".format(end - start))\n return destination\n\n\ndef rsync_test(name):\n \"\"\"\n rsync data test\n :param name:\n :return:\n \"\"\"\n source = os.path.join('utils', name) # upload data to TFR file\n destination = os.path.join('FaceData', name)\n if not os.path.exists(destination):\n os.makedirs(destination)\n print(\"copy data from:{}\".format(source))\n print(\"destination :{}\".format(destination))\n print(\"rsync data ...\")\n start = datetime.now()\n sysrsync.run(source=source, destination=destination, options=['-a'])\n end = datetime.now()\n print(\"rsync data done,run time:{}\".format(end - start))\n return destination\n\n\nif __name__ == \"__main__\":\n val_src_root = \"/media/dm/dm/FaceRecognition/torch-Face-Recognize-Pipeline/data/val/\"\n local_val_root = \"/media/dm/dm/FaceRecognition/torch-Face-Recognize-Pipeline/data/val1/val\"\n val_name = \"X4\"\n local_val_path = rsync(val_src_root, local_val_root, val_name)\n","repo_name":"Whiffe/PyTorch-Facial-Expression-Recognition","sub_path":"classifier/utils/rsync_data.py","file_name":"rsync_data.py","file_ext":"py","file_size_in_byte":7925,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"69"} +{"seq_id":"10228896187","text":"import logging\nimport multiprocessing\nimport os\n\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel, Field\n\nfrom pool import QuickUmlsProcessPool\n\nlogger = logging.getLogger(__name__)\n\nquickumls_pool = None\n\n\ndef on_startup():\n global quickumls_pool\n\n logger.info(\"Initializing QuickUMLS process pool\")\n quickumls_pool = QuickUmlsProcessPool(\n quickumls_fp=\"/app/umls\", max_size=multiprocessing.cpu_count()\n )\n\n\ndef on_shutdown():\n global quickumls_pool\n if quickumls_pool:\n quickumls_pool.pool.shutdown()\n\n\napp = FastAPI(\n title=\"QuickUMLS API\",\n description=\"A simple API for the QuickUMLS library\",\n version=\"0.1.0\",\n on_startup=[on_startup],\n on_shutdown=[on_shutdown],\n)\n\nMAX_CONCURRENCY = os.environ.get(\"FASTAPI_UMLS_WORKER_CONCURRENCY\") or min(\n multiprocessing.cpu_count() - 2, 2\n)\n\n\nclass MatchRequest(BaseModel):\n text: str = Field(\n ...,\n example=\"Patient has a history of diabetes mellitus type 2.\",\n max_length=1000,\n )\n\n\n@app.get(\"/match\")\nasync def match(item: MatchRequest):\n result = await quickumls_pool.match(item.text)\n\n return result\n","repo_name":"PSU3D0/fastapi-quickumls","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"17366146884","text":"import pygame\nimport random\nimport time\n\nsnake_block_size = 30\nsnake_color = (255,255,255)\nsnake_border_width = 1\nsnake_border_color = None\nsnake_init_length = 1\nfood_color = (255,0,0)\nfood_size = snake_block_size\nfood_inc = 1\ngame_width = 40*snake_block_size\ngame_height = 30*snake_block_size\ntick_speed = 10\ntext_size = 30\n\n\n\nclass Food(object):\n def __init__(self):\n self.size = food_size\n self.color = food_color\n self.x = None\n self.y = None\n\n def randCoords(self, xmax, ymax):\n x = round(random.randrange(0, xmax-self.size) / float(self.size)) * float(self.size)\n y = round(random.randrange(0, ymax-self.size) / float(self.size)) * float(self.size)\n return x,y\n\n def update(self, xmax, ymax, avoid=[]):\n while True:\n coords = self.randCoords(xmax, ymax)\n if not coords in avoid:\n self.x = coords[0]\n self.y = coords[1]\n break\n\n def draw(self, display):\n pygame.draw.rect(display, self.color, [ self.x, self.y, self.size, self.size ])\n\n\n\nclass Snake(object):\n def __init__(self, startx, starty):\n self.snake = [ (startx, starty) for i in range(snake_init_length) ]\n self.x_change = 0\n self.y_change = 0\n self.keymap = { pygame.K_LEFT: (-1,0), pygame.K_RIGHT:(1,0), pygame.K_UP:(0,-1), pygame.K_DOWN:(0,1) }\n\n def update(self, left, right, top, bottom):\n oldHead = self.snake[-1]\n newHead = (oldHead[0]+self.x_change, oldHead[1]+self.y_change)\n if self.x_change != 0 or self.y_change != 0:\n if newHead in self.snake or newHead[0]right-snake_block_size or newHead[1]bottom-snake_block_size:\n return False\n self.snake.append(newHead)\n del self.snake[0]\n return newHead\n\n def updateChange(self, event):\n if event.key in self.keymap:\n change = self.keymap[event.key]\n self.x_change = change[0]*snake_block_size\n self.y_change = change[1]*snake_block_size\n\n def draw(self, display):\n for s in self.snake:\n if snake_border_color:\n pygame.draw.rect(display, snake_border_color, [ s[0], s[1], snake_block_size, snake_block_size ])\n pygame.draw.rect(display, snake_color, [ s[0], s[1], snake_block_size-snake_border_width, snake_block_size-snake_border_width ])\n\n\nclass Game():\n def __init__(self, game_width=game_width, game_height=game_height, title=\"Snake Game\", bg_color=(0,0,0), food_inc=food_inc):\n pygame.init()\n self.bg_color = bg_color\n self.display = pygame.display.set_mode((game_width, game_height))\n pygame.display.set_caption(title)\n self.snake = None\n self.food = None\n self.clock = pygame.time.Clock()\n self.width = game_width\n self.height = game_height\n self.food_inc = food_inc\n self.score = 0\n\n def setFood(self):\n self.food = Food()\n self.food.update(self.width, self.height)\n\n def setSnake(self):\n self.snake = Snake(self.width/2, self.height/2)\n\n\n def updateDisplay(self):\n self.display.fill(self.bg_color) # clear display\n if not self.snake.update(0, self.width, 0, self.height): # attempt to update snake\n print(\"DEAD\")\n return True\n snakeHead = self.snake.snake[-1]\n if snakeHead[0] == self.food.x and snakeHead[1] == self.food.y:\n self.score += 1\n self.food.update(self.width, self.height, self.snake.snake)\n for _ in range(self.food_inc):\n self.snake.snake.insert(0, self.snake.snake[0])\n self.displayScore()\n self.snake.draw(self.display)\n self.food.draw(self.display)\n pygame.display.update()\n self.clock.tick(tick_speed)\n return False\n\n\n def displayScore(self):\n font = pygame.font.SysFont(None, text_size)\n mesg = font.render(\"Score: {}\".format(self.score), True, (0,0,255, 0.2))\n mesg_h, mesg_w = mesg.get_size()\n self.display.blit(mesg, [ self.width/2-mesg_h/2, 0 ])\n\n def displayMessage(self, msg=\"\", font_size=text_size, font_style=None, color=(0,255,0)):\n font = pygame.font.SysFont(font_style, font_size)\n mesg = font.render(msg, True, color)\n mesg_h, mesg_w = mesg.get_size()\n self.display.blit(mesg, [ self.width/2-mesg_h/2, self.height/2-mesg_w/2 ])\n\n def run(self, tick_speed=10):\n self.setSnake()\n self.setFood()\n game_over = False\n game_close = False\n count = 3\n\n while not game_close:\n while game_over == True:\n self.displayMessage(\"You Lost! | Score: {} | Press Q-Quit or C-Play Again\".format(self.score))\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_q):\n game_over = False\n game_close = True\n if event.type == pygame.KEYDOWN and event.key == pygame.K_c:\n self.run()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_close = True\n if event.type == pygame.KEYDOWN:\n self.snake.updateChange(event)\n game_over = self.updateDisplay()\n\n pygame.quit()\n\n\n\nif __name__ == \"__main__\":\n game = Game()\n game.setSnake()\n game.setFood()\n game.run()\n","repo_name":"rbyrne30/Fun_Programs","sub_path":"snake/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":5615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"34200535415","text":"#!/usr/bin/env python\nfrom debe import *\n\nclass CongestionTagging(object):\n startText = None\n startTime = None\n finishText = None\n elapsed = None\n\n def __init__(self):\n self.main()\n\n def utc_to_local(self, utc_dt):\n import pytz, time\n\n local_tz = pytz.timezone('Asia/Jakarta')\n local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz)\n return local_tz.normalize(local_dt)\n\n def get_start_time(self):\n import time, timeit\n\n self.startText = time.strftime(\"%H:%M:%S\")\n self.startTime = timeit.default_timer()\n\n def get_finish_time(self):\n import time, timeit, math\n\n self.elapsed = math.ceil(timeit.default_timer() - self.startTime)\n self.finishText = time.strftime(\"%H:%M:%S\")\n duration_minutes = divmod(self.elapsed, 60)\n duration_hours = divmod(duration_minutes[0], 60)\n print('Waktu Mulai', self.startText)\n print('Waktu Selesai', self.finishText)\n print('Durasi', str(duration_hours[0]), ':', str(duration_hours[1]), ':', str(duration_minutes[1]))\n\n # Get unprocessed kinds\n def get_kinds_unprocessed(self, limitQuery):\n data = []\n query = sessionPostgresTraffic.query(ProcessChunking).\\\n filter(ProcessChunking.kind_processed == False).\\\n order_by(desc(ProcessChunking.t_time)).\\\n limit(limitQuery)\n for q in query:\n data.append([q.raw_id, q.kind_id, q.info])\n return data\n\n def cleaning(self, data):\n import string\n import re\n # remove url, pic twitter and mentionp\n datum = re.sub(r\"(?:\\RT @|@|pic.twitter.com|https?\\://)\\S+\", '', data)\n #substitute - to space\n datum = datum.replace('-', ' ')\n\n # remove tab and new line\n datum = datum.replace('\\n', ' ')\n datum = datum.replace('\\t', ' ')\n # remove punctuation\n translator = str.maketrans('', '', string.punctuation)\n datum = datum.translate(translator)\n # remove non ascii\n datum = re.sub(r'[^\\x00-\\x7F]+', '', datum)\n # remove emoji\n #data = data.decode('unicode_escape').encode('ascii', 'ignore')\n emoji_pattern = re.compile(\"[\"\n \"\\U0001F600-\\U0001F64F\"\n \"\\U0001F300-\\U0001F5FF\"\n \"\\U0001F680-\\U0001F6FF\"\n \"\\U0001F1E0-\\U0001F1FF\"\n \"]+\", flags=re.UNICODE)\n datum = emoji_pattern.sub(r'', datum)\n # remove space more than 1\n datum = re.sub(\" +\", \" \", datum)\n datum = datum.strip()\n datum = datum.lower()\n\n # remove anything inside ()\n #datum = re.sub(r\"\\([^)]*\\)\", \"\", datum)\n # substitute . to space\n #datum = re.sub(r\"\\.\", \" \", datum)\n # remove space more than 1\n #datum = re.sub(\" +\", \" \", datum)\n #datum.replace('(','')\n #datum.replace(')', '')\n #datum = datum.strip()\n #datum = datum.lower()\n return datum\n\n def tagging(self, data):\n import pickle\n from nltk.tokenize import word_tokenize\n from labels import labels\n\n tagger = pickle.load(open(\"/home/aan/congestion/tagged.pickle\", \"rb\"))\n words = tagger.tag(word_tokenize(data))\n datum = []\n for w in words:\n if w[0] == 'arah':\n temp = [w[0], 'DC', 13]\n elif w[1] == '-None-':\n temp = [w[0], 'NN', 2]\n else:\n label = list(labels.values())[list(labels.keys()).index(w[1])]\n temp = [w[0], w[1], label]\n datum.append(temp)\n return datum\n\n def update_kind_data(self, kind_id):\n query = sessionPostgresTraffic.query(Kind).\\\n filter(Kind.id == kind_id).\\\n first()\n query.processed = True\n sessionPostgresTraffic.commit()\n\n def insert_word_data(self, data):\n seq = 0\n for d in data[4]:\n temp = Word(\n raw_id = data[0],\n tag_id = d[2],\n sequence = seq,\n name = d[0]\n )\n seq = seq + 1\n sessionPostgresTraffic.add(temp)\n\n def insert_syllable_data(self, data):\n seq = 0\n for d in data[4]:\n temp = Syllable(\n user_id = 3,\n raw_id = data[0],\n tag_id = d[2],\n sequence = seq,\n name = d[0]\n )\n seq = seq + 1\n sessionPostgresTraffic.add(temp)\n\n def main(self):\n self.get_start_time()\n limitQuery = 50\n results = []\n data = self.get_kinds_unprocessed(limitQuery)\n if len(data) > 0:\n for d in data:\n dCleaned = self.cleaning(d[2])\n dTagged = self.tagging(dCleaned)\n self.insert_syllable_data([d[0], d[1], d[2], dCleaned, dTagged])\n self.insert_word_data([d[0], d[1], d[2], dCleaned, dTagged])\n results.append([d[0], d[1], d[2], dCleaned, dTagged])\n sessionPostgresTraffic.commit()\n else:\n results = []\n\n if len(results) > 0:\n for r in results:\n self.update_kind_data(r[1])\n print(r)\n\n print('Banyak Data', str(len(results)))\n self.get_finish_time()\n\ndef main():\n CongestionTagging()\n\nif __name__ == '__main__':\n main()\n","repo_name":"aansubarkah/congestion_python","sub_path":"CongestionTagging.py","file_name":"CongestionTagging.py","file_ext":"py","file_size_in_byte":5419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"10673722779","text":"import math\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.utils.rnn as rnn_utils\r\nfrom torch.nn.functional import gumbel_softmax # (logits, tau=1, hard=False, eps=1e-10, dim=-1)\r\nfrom torch.distributions.gumbel import Gumbel # (loc, scale, validate_args=None)\r\n\r\n\r\n'''\r\n참고사이트\r\n\r\nhttps://github.com/YongfeiYan/Gumbel_Softmax_VAE/blob/master/gumbel_softmax_vae.py\r\n\r\n'''\r\n\r\nclass TauAnnealingScheduler(object):\r\n def __init__(self, tau_min=0.2, update_step=500, annealing_rate=1e-4):\r\n self.update_step = update_step # {500, 1000}\r\n self.annealing_rate = annealing_rate # {1e-5, 1e-4}\r\n self.tau_min = tau_min\r\n self.step = 0\r\n self.tau = 0\r\n \r\n def __call__(self):\r\n if self.step % self.update_step == 0:\r\n self.tau = max(self.tau_min, math.exp(-self.annealing_rate * self.step)) # tau : 0.2 ~ 1.0\r\n self.step += 1\r\n return self.tau\r\n \r\n @property\r\n def min(self):\r\n return self.tau_min\r\n\r\n\r\nclass GumbelSmilesVAE(nn.Module):\r\n def __init__(self, vocab_size, embedding_size, hidden_size, latent_size, sos_idx, eos_idx, pad_idx, num_layers=2, device=None):\r\n super(GumbelSmilesVAE, self).__init__()\r\n \r\n ## params\r\n self.vocab_size = vocab_size\r\n self.embedding_size = embedding_size\r\n self.hidden_size = hidden_size\r\n self.latent_size = latent_size\r\n self.categorical_size = 2 # 2: binary, >2: category\r\n self.num_layers = num_layers\r\n self.device = torch.device('cpu') if device is None else device\r\n \r\n ## special tokens\r\n self.sos_idx = sos_idx\r\n self.eos_idx = eos_idx\r\n self.pad_idx = pad_idx\r\n \r\n ## models\r\n self.encoder = nn.GRU(input_size=self.embedding_size, hidden_size=self.hidden_size, num_layers=self.num_layers, bidirectional=True, batch_first=True)\r\n self.decoder = nn.GRU(input_size=self.embedding_size, hidden_size=self.hidden_size, num_layers=self.num_layers, bidirectional=False, batch_first=True)\r\n self.embedding_enc = nn.Embedding(num_embeddings=self.vocab_size, embedding_dim=self.embedding_size, padding_idx=self.pad_idx)\r\n self.embedding_dec = nn.Embedding(num_embeddings=self.vocab_size, embedding_dim=self.embedding_size, padding_idx=self.pad_idx)\r\n self.hidden2latent = nn.Linear(self.hidden_size * 2 * self.num_layers, self.latent_size * self.categorical_size)\r\n self.latent2hidden = nn.Sequential(nn.Linear(self.latent_size, self.hidden_size * self.num_layers), nn.Tanh())\r\n self.output2vocab = nn.Linear(self.hidden_size, self.vocab_size)\r\n \r\n ## annealing scheduler\r\n self.tau = TauAnnealingScheduler()\r\n \r\n ## Optimizer\r\n self.optim = torch.optim.Adam(self.parameters(), lr=1e-3)\r\n \r\n ## device\r\n self.to(self.device)\r\n\r\n \r\n def partial_fit(self, smiles, length, beta=1.): # ex) smiles.shape = (batch, seq), length.shape = (batch, )\r\n ## Training phase\r\n self.train()\r\n \r\n ## Forward\r\n logp, q, z = self(smiles, length)\r\n \r\n ## Loss\r\n loss_recon, loss_kl = self._loss_ft(smiles, logp, q)\r\n loss_vae = loss_recon + beta * loss_kl\r\n \r\n ## Backpropagation\r\n self.optim.zero_grad()\r\n loss_vae.backward()\r\n torch.nn.utils.clip_grad_norm_(self.parameters(), 1.) # gradient clipping\r\n self.optim.step()\r\n \r\n return loss_vae.item(), loss_recon.item(), loss_kl.item()\r\n \r\n \r\n def _loss_ft(self, target, logp, q):\r\n '''\r\n # target.shape = (batch, seq)\r\n # logp.shape = (batch, seq, vocab)\r\n # q.shape = (batch, latent, category)\r\n '''\r\n ## Reconstruction Loss\r\n target_ravel = target[:,1:].contiguous().view(-1) # target_ravel.shape = (batch*seq, )\r\n logp_ravel = logp[:,:-1,:].contiguous().view(-1, logp.size(2)) # logp_ravel.shape = (batch*seq, vocab)\r\n loss_recon = nn.NLLLoss(ignore_index=self.pad_idx, reduction=\"mean\")(logp_ravel, target_ravel)\r\n\r\n ## KL Divergence Loss\r\n logq = torch.log(q * self.categorical_size + 1e-10) # logq.shape = (batch, latent, category)\r\n loss_kl = torch.sum(q * logq, dim=-1).mean()\r\n\r\n return loss_recon, loss_kl\r\n \r\n \r\n def forward(self, inps, lens, hard=False): # inps.shape = (batch, seq), lens.shape = (batch, )\r\n batch_size = inps.size(0)\r\n \r\n ## Sorting by seqlen\r\n sorted_seqlen, sorted_idx = torch.sort(lens, descending=True)\r\n sorted_inps = inps[sorted_idx]\r\n \r\n ## Packing for encoder\r\n inps_emb = self.embedding_enc(sorted_inps) # inps_emb.shape = (batch, seq, emb)\r\n packed_inps = rnn_utils.pack_padded_sequence(inps_emb, sorted_seqlen.data.tolist(), batch_first=True)\r\n \r\n ## Encoding\r\n _, hiddens = self.encoder(packed_inps) # hiddens.shape = (2 * numlayer, batch, hidden)\r\n \r\n ## Latent vector\r\n hiddens = hiddens.transpose(0,1).contiguous().view(batch_size,-1) # hiddens.shape = (batch, hidden * 2 * numlayer)\r\n y = self.hidden2latent(hiddens) # y.shape = (batch, latent * category)\r\n y = y.view(batch_size, self.latent_size, self.categorical_size) # y.shape = (batch, latent, category)\r\n if hard:\r\n z = gumbel_softmax(y, tau=self.tau.min, hard=True, eps=1e-10, dim=-1) # z.shape = (batch, latent, category)\r\n else:\r\n z = gumbel_softmax(y, tau=self.tau(), hard=False, eps=1e-10, dim=-1) # z.shape = (batch, latent, category)\r\n z = z[:,:,0].contiguous() # z.shape = (batch, latent)\r\n q = nn.functional.softmax(y, dim=-1) # q.shape = (batch, latent, category)\r\n \r\n ## Context vector\r\n contexts = self.latent2hidden(z) # contexts.shape = (batch, hidden * numlayer)\r\n contexts = contexts.view(-1, self.hidden_size, self.num_layers) # contexts.shape = (batch, hidden, numlayer)\r\n contexts = contexts.transpose(1,2).transpose(0,1).contiguous() # contexts.shape = (numlayer, batch, hidden)\r\n \r\n ## Packing for decoder - Teacher forcing\r\n inps_emb_dec = self.embedding_dec(sorted_inps) # inps_emb.shape = (batch, seq, emb)\r\n packed_inps_dec = rnn_utils.pack_padded_sequence(inps_emb_dec, sorted_seqlen.data.tolist(), batch_first=True)\r\n \r\n ## Decoding\r\n packed_outs, _ = self.decoder(packed_inps_dec, contexts)\r\n sorted_outs, _ = rnn_utils.pad_packed_sequence(packed_outs, batch_first=True) # outs.shape = (batch, seq, hidden)\r\n sorted_outs = sorted_outs.contiguous()\r\n \r\n ## Reordeing\r\n _, original_idx = torch.sort(sorted_idx, descending=False)\r\n outs = sorted_outs[original_idx] # outs.shape = (batch, seq, hidden)\r\n \r\n ## Prediction\r\n logits = self.output2vocab(outs) # logits.shape = (batch, seq, vocab)\r\n logp = nn.functional.log_softmax(logits, dim=-1) # logp.shape = (batch, seq, vocab)\r\n \r\n return logp, q, z\r\n \r\n \r\n def inference(self, z=None, max_seqlen=100, greedy=False): # z.shape = (latent, )\r\n ## inference phase\r\n self.eval()\r\n \r\n ## Sampling\r\n if z is None:\r\n z = self.sample_z() # z.shape = (1, latent)\r\n elif torch.is_tensor(z) and z.dim() == 1:\r\n z = z.unsqueeze(0) # z.shape = (1, latent)\r\n \r\n ## Context\r\n contexts = self.latent2hidden(z) # contexts.shape = (1, hidden * numlayer)\r\n contexts = contexts.view(-1, self.hidden_size, self.num_layers) # contexts.shape = (1, hidden, numlayer)\r\n contexts = contexts.transpose(1,2).transpose(0,1).contiguous() # contexts.shape = (numlayer, 1, hidden)\r\n \r\n ## start token\r\n inps_sos = torch.full(size=(1,1), fill_value=self.sos_idx, dtype=torch.long, device=self.device) # inps_sos.shape = (1, 1)\r\n \r\n ## Generation\r\n generated = torch.zeros((1, max_seqlen), dtype=torch.long, device=self.device) # generated.shape = (1, max_seqlen)\r\n generated_logits = torch.zeros((1, max_seqlen, self.vocab_size), device=self.device) # generated_logits.shape = (1, max_seqlen, vocab)\r\n \r\n inps = inps_sos # inps.shape = (1,1)\r\n hiddens = contexts # hiddens.shape = (numlayer, 1, hidden)\r\n seqlen = 0\r\n for i in range(max_seqlen):\r\n ## Embedding\r\n inps_emb_dec = self.embedding_dec(inps) # inps_emb_dec.shape = (1, 1, emb)\r\n \r\n ## Decoding\r\n outs, hiddens = self.decoder(inps_emb_dec, hiddens) # outs.shape = (1, 1, hidden), hiddens = (numlayer, 1, hidden)\r\n \r\n ## Logit\r\n logits = self.output2vocab(outs) # logits.shape = (1, 1, vocab)\r\n \r\n ## Save\r\n generated[:,i] = inps.view(-1)\r\n generated_logits[:,i,:] = logits[:,0,:]\r\n seqlen += 1\r\n \r\n ## Terminal condition\r\n if inps[0][0] == self.eos_idx:\r\n break\r\n \r\n ## Next word\r\n if greedy:\r\n _, top_idx = torch.topk(logits, 1, dim=-1) # top_idx.shape = (1, 1, 1)\r\n inps = top_idx.contiguous().view(1, 1) # inps.shape = (1, 1)\r\n else:\r\n probs = torch.softmax(logits, dim=-1) # probs.shape = (1, 1, vocab)\r\n inps = torch.multinomial(probs.view(1, -1), 1) # inps.shape = (1, 1)\r\n \r\n ## results\r\n results = generated[0].cpu().numpy() # results.shape = (max_seqlen, )\r\n return results\r\n \r\n \r\n def load_model(self, path):\r\n weights = torch.load(path)\r\n self.load_state_dict(weights)\r\n\r\n\r\n def save_model(self, path):\r\n torch.save(self.state_dict(), path)\r\n\r\n\r\n def sample_z(self):\r\n z = np.random.binomial(1, 0.5, self.latent_size).astype(np.float32) # z.shape = (latent, )\r\n z = torch.from_numpy(z).to(self.device) # z.shape = (latent, )\r\n return z\r\n\r\n","repo_name":"mathcom/MolBit","sub_path":"bin/gumbel.py","file_name":"gumbel.py","file_ext":"py","file_size_in_byte":10152,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"69"} +{"seq_id":"25917247944","text":"import numpy as np\nfrom copy import deepcopy\nimport matplotlib.pyplot as plt\n\nstates = np.arange(101)\n\n#Initial values.\n#v = np.zeros(101)\n#v[100] = 1.0 \n\ndef actionsAvailable(state):\n return np.arange(1, min((state, 100 - state)) + 1)\n\ndef expectedValue(state, action, ph, v):\n return ph*v[state + action] + (1 - ph)*v[state - action]\n\ndef newValue(state, ph, v):\n A = actionsAvailable(state)\n return max([expectedValue(state, action, ph, v) for action in A])\n\ndef bestAction(state, ph, v):\n A = actionsAvailable(state)\n return np.argmax(np.array([expectedValue(state, action, ph, v) for action in A])) + 1\n\ndef sweep(ph, v):\n D = 0\n for i in range(1, 100):\n vn = newValue(i, ph, v)\n D = max((D, vn - v[i]))\n v[i] = vn\n return D, v\n \ndef full(ph = 0.4, theta = 0.0):\n v = np.zeros(101)\n v[100] = 1.0\n D = 10\n trace = [deepcopy(v)]\n while D > theta:\n D, v = sweep(ph, v)\n trace.append(deepcopy(v))\n return trace \n\ntrace = full()\n\ndef figs(vals, ph):\n plt.plot(states, vals)\n plt.xlabel(\"states\")\n plt.ylabel(\"vals\")\n plt.title(\"Valse, ph = \" + str(ph))\n plt.show()\n plt.plot(states[1:-1], [bestAction(i, ph, vals) for i in range(1, 100)])\n plt.xlabel(\"states\")\n plt.ylabel(\"Best action\")\n plt.title(\"Best actions, ph = \" + str(ph))\n plt.show()\n return None\n \n\n\n\n","repo_name":"atbolsh/gamblingRL","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"24466485222","text":"# Argumentos posicionais - contar a ordem dos argumentos fornecidos\n# A função abaixo apresenta uma informação sobre animais de estimação, o tinpo de cada animal de estimação\n# e o nome dele\n\ndef describe_pet(animal_type, pet_name):\n \"\"\"Exibe informação sobre um animal de estimação.\"\"\"\n print(\"\\nI have a \" + animal_type + \".\")\n print(\"My \" + animal_type + \"'s name is \" + pet_name.title() + \".\")\ndescribe_pet('hamster', 'harry')\ndescribe_pet('dog', 'wolverine')\n\n# Argumentos nomeados\ndescribe_pet(animal_type = 'hamster', pet_name = 'harry')\n# Nesse caso a ordem dos argumentos não importa, uma vez que foi informado a Python o parâmetro e qual argumento\n# este corresponde:\ndescribe_pet( pet_name = 'harry', animal_type = 'hamster')","repo_name":"rsmonteiro2021/execicios_python","sub_path":"cap_8/listagem/pets.py","file_name":"pets.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"} +{"seq_id":"24919685586","text":"from chatgpt import *\nfrom led import *\nimport re\nimport urllib.parse\nimport os\nfrom flask import request\nimport json\nimport csv\nimport requests\nimport urllib.parse\n\n# benchmark_file = 'data/benchmarkLEs.json'\n# benchmark_file = 'data/sample1.json'\nbenchmark_file = 'data/all_lexp.json'\n\noutput_file = 'data/analysisResults3.json'\noutput_file_csv = 'data/analysisResults3.csv'\nresults = []\n\ncounter = 0\nfailed = 0\n\nwith open(benchmark_file, \"r\") as file:\n benchmarkDict = json.load(file)\n\nfor lexp in benchmarkDict:\n text = benchmarkDict[lexp]\n try:\n categorisationResult = json.loads(categoriseChildhood(text))\n except:\n failed = failed + 1\n print(\"Failed to categorise\")\n singleResult = dict()\n singleResult['lexp'] = lexp\n singleResult['text'] = text\n singleResult['childhood'] = categorisationResult['childhood']\n singleResult['reason'] = categorisationResult['reason']\n results.append(singleResult)\n counter = counter + 1\n print(\"categorised items: \" + str(counter), end='\\r')\n\n\nwith open(output_file, 'w') as json_output:\n json_output.write(json.dumps(results, indent=4))\n\nfield_names = ['lexp', 'text', 'childhood', 'reason']\n\nwith open(output_file_csv, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=field_names)\n writer.writeheader()\n writer.writerows(results)\n\nprint(\"**************************\")\nprint(\"processed \" + str(counter) + \" items\")\nprint(\"Failures: \" + str(failed))\n\n\n","repo_name":"polifonia-project/child-search-expansions","sub_path":"analyseBenchmarks.py","file_name":"analyseBenchmarks.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"15204078216","text":"#o/p: [12, 8, 6, 4, 2]\n#Only even in descending order\ninput_list = [2,6,12, 5, 8, 21, 4, 17, 9]\neven=[i for i in input_list if i%2==0]\neven.sort(reverse=True)\nprint(even)\n\n\n#You are given a list of tuples, each representing a person's name and age.\n# Create a function that takes this list as input and returns a dictionary where the names are keys, \n# and the values are lists of ages for each person with that name.\ninput_list = [(\"Alice\", 25), (\"Bob\", 30), (\"Alice\", 28), (\"Charlie\", 25)]\na={}\nfor b,c in input_list:\n if b in a:\n a[b].append(c)\n else:\n a[b]=[c]\na\n#output\n# {\n# 'Alice': [25, 28],\n# 'Bob': [30],\n# 'Charlie': [25]\n# }\n\n\n\n# Problem 3: Set Operations\n# Write a function that takes two sets as input and returns a set containing all the \n# elements that are common to both sets, \n# as well as the elements that are unique to each set. \n# This set should be sorted in ascending order.\n\nb = {2, 4, 6, 8, 10}\nc = {5, 6, 7, 8, 9}\n\na=b.union(c)-b.intersection(c)\na\n\n#o/p: {2, 4, 5, 7, 9, 10}\n\n\nshopping_cart = {'apple': 5, 'banana': 3, 'orange': 4, 'grapes': 2}\ndiscount_items = ['apple', 'grapes']\n\nfor i,j in shopping_cart.items():\n if i in discount_items:\n shopping_cart[i]=shopping_cart[i]-1\n\nshopping_cart\n\n#{'apple': 4, 'banana': 3, 'orange': 4, 'grapes': 1}\n\n\n\nstudent_data = [\n ('Alice', {'Math', 'Physics', 'Chemistry'}, [88, 92, 78]),\n ('Bob', {'Math', 'English'}, [76, 84]),\n ('Charlie', {'Physics', 'History'}, [90, 88]),\n ('David', {'Math', 'Chemistry'}, [82, 95])\n]\n\n\na={}\nfor i,j,k in student_data:\n a[i]=(sum(k)/len(k))\n\na\n\n# {\n# 'Alice': 86.0,\n# 'Bob': 80.0,\n# 'Charlie': 89.0,\n# 'David': 88.5\n# }\n\n\n# S = \"practice\"\n# Output: prectica\n# Explanation: The vowels are a, i, e\n# Reverse of these is e, i, a.\nS = \"practice\"\ns=list(S)\novel=[]\nfor i in s:\n if i in ['a','e','i','o','u']:\n ovel.append(i)\nz=-1\nfor i in range(len(s)):\n if s[i] in ['a','e','i','o','u']:\n s[i]=ovel[z]\n z=z-1\nprint(\"\".join(s))\n\n\n# Output: prectica\n\n\nS = \"i.like.this.program.very.much\"\n\nprint(\".\".join([i for i in reversed(S.split(\".\"))]))\n\n# Output: much.very.program.this.like.i","repo_name":"sumanthssuvarna/Bigdata","sub_path":"Python/Problem solving/day_1.py","file_name":"day_1.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"33796298356","text":"import requests\nfrom flask import Flask\nfrom mako.template import Template\nfrom mako.lookup import TemplateLookup\n\napp = Flask(__name__)\ntemplate_lookup = TemplateLookup(directories=['templates'])\n\n@app.route(\"/starred_repos\")\ndef get_starred_repos():\n get_starred_repos_template = template_lookup.get_template(\"get_user.mako\")\n return get_starred_repos_template.render()\n\n@app.route(\"/starred_repos/\")\ndef show_starred_repos(username):\n r = requests.get(f'https://api.github.com/users/{username}/starred')\n json_resp = r.json()\n repo_names= [\n {\n 'name': repo['name'],\n 'url': repo['html_url']\n }\n for repo in json_resp\n ]\n starred_repos_template = template_lookup.get_template(\"starred_repos.mako\")\n return starred_repos_template.render(repos=repo_names, user=username)","repo_name":"adamvonbaron/pythonstuff","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"20766632373","text":"from confluent_kafka import Consumer\nimport logging\n# Create logger for consumer (logs will be emitted when poll() is called)\nlogger = logging.getLogger('consumer')\nlogger.setLevel(logging.DEBUG)\nhandler = logging.StreamHandler()\nhandler.setFormatter(logging.Formatter('%(asctime)-15s %(levelname)-8s %(message)s'))\nlogger.addHandler(handler)\n\nc = Consumer({\n 'bootstrap.servers': 'kafka:9092',\n 'group.id': 'mygroup',\n 'auto.offset.reset': 'earliest',\n 'session.timeout.ms': 60000,\n \"log.connection.close\": False,\n \"log_level\": 3,\n \"log.queue\": False\n}, logger=logger)\nTOPIC = 'my-topic'\n\ndef print_assignment(consumer, partitions):\n print('Assignment:', partitions)\n \ndef run_consumer():\n print(f'Starting consumer... for subscription to topic: {TOPIC}')\n c.subscribe([TOPIC], on_assign=print_assignment)\n\n while True:\n # print('Waiting for message...')\n msg = c.poll(1.0)\n # partitions = c.assignment()\n # print(f'Received message: {msg} partitions: {partitions} postition: {c.position(partitions)}')\n if msg is None:\n continue\n if msg.error():\n print(\"Consumer error: {}\".format(msg.error()))\n continue\n\n print(f\"Received message:{msg.value().decode('utf-8')} from partition: {msg.partition()}\")\n\n c.close()\n\nif __name__ == '__main__':\n run_consumer()","repo_name":"crystas-sc/kafka_examples","sub_path":"consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"11674128348","text":"import os\nfrom collections import Counter, defaultdict\n\nimport cv2\nimport numpy as np\n\ndata_dir = os.path.join('..', 'data', 'scored')\n\n\nclass Cluster:\n \"\"\"\n Represents a continuous block of pixels on non-black color.\n Precise definition of 'non-black' is given with a greyscale threshold\n \"\"\"\n\n def __init__(self, img, pixels):\n self.img = img\n self.pixels = pixels\n\n pixel_x, pixel_y = zip(*pixels)\n index_of_center_pixel = np.argmax(img[pixel_x, pixel_y])\n self.center_pixel = pixels[index_of_center_pixel]\n self.intensity = self.img[self.center_pixel]\n\n def size(self):\n \"\"\"\n @return: number of pixels in cluster\n @rtype: int\n \"\"\"\n return len(self.pixels)\n\n def get_center_pixel(self):\n '''\n @return: the center (brightest) pixel of the cluster\n @rtype: pair\n '''\n return self.center_pixel\n\n def get_intensity(self):\n '''\n @return: the greyscale color of the brightest pixel\n @rtype: float\n '''\n return self.intensity\n\n def num_intensities(self):\n \"\"\"\n @return: number of different intensities in the cluster pixels\n @rtype: int\n \"\"\"\n pixel_x, pixel_y = zip(*self.pixels)\n return np.unique(self.img[pixel_x, pixel_y].flatten(), return_counts=True)[0].shape[0]\n\n def crop_patch(self, patch_size):\n \"\"\"\n @param patch_size: the size of the resulting cropped patch, currently the width and height of patch are equal\n @type patch_size: int\n @return: a patch of the image which contains the galaxy\n @rtype: 2D np.array\n \"\"\"\n bottom = self.center_pixel[0] - patch_size//2 + 1\n top = self.center_pixel[0] + patch_size // 2 + 1\n left = self.center_pixel[1] - patch_size // 2 + 1\n right = self.center_pixel[1] + patch_size // 2 + 1\n\n if bottom >= 0 and left >= 0 and top < self.img.shape[0] and right < self.img.shape[1]:\n patch = self.img[bottom: top, left: right] # down-up\n assert patch.shape[0] == patch_size and patch.shape[1] == patch_size\n return patch\n else:\n return None\n\n @staticmethod\n def find_clusters(img, pixels):\n \"\"\"\n @param img: a grayscale image from the dataset\n @type img: 2D np array\n @param coords: the pixels from img which are not background\n @type coords: list of pairs\n @return: list of clusters formed by the input pixels\n @rtype: list of Cluster objects\n \"\"\"\n assert img.ndim == 2\n\n q = []\n vis = set()\n pixels_set = set(pixels)\n dirx = [1, 1, 1, -1, -1 , -1, 0, 0]\n diry = [1, -1, 0, 1, -1, 0, 1, -1]\n\n clusters = []\n for pixel in pixels:\n if pixel in vis:\n continue\n\n curr_cluster = []\n q.append(pixel)\n while len(q) > 0:\n x, y = q.pop(0)\n if (x, y) in vis:\n continue\n vis.add((x, y))\n curr_cluster.append((x, y))\n\n for deltax, deltay in zip(dirx, diry):\n newx, newy = x + deltax, y + deltay\n\n if (newx, newy) in vis or \\\n newx < 0 or newy < 0 or \\\n newx >= img.shape[0] or \\\n newy >= img.shape[1] or \\\n (newx, newy) not in pixels_set:\n continue\n q.append((newx, newy))\n\n clusters.append(curr_cluster)\n\n return [Cluster(img, cluster_pixels) for cluster_pixels in clusters]\n\n\ndef get_galaxy_pixels(img, threshold=40):\n \"\"\"\n @param img: greyscale image of a real galaxy, noisy galaxy or a completely fake image\n @type img: 2D numpy array\n @param threshold: pixels above threshold are considered a part of a galaxy\n @type threshold: int, [0, 255]\n @return: the coordinates of the galaxy pixels and their intenstity\n @rtype: pair of lists\n \"\"\"\n # find all pixels which are above threshold\n galaxy_x, galaxy_y = tuple(np.where(img > threshold))\n\n # list of pairs representing the galaxy pixel coords\n galaxy_coords = list(zip(galaxy_x, galaxy_y))\n\n galaxy_pixel_values = img[galaxy_x, galaxy_y]\n\n return galaxy_coords, galaxy_pixel_values\n\n\ndef get_background(img):\n background_x, background_y = np.where(img == 0)\n\n return background_x, background_y\n\n\ndef num_background_pixels(img):\n return int(np.count_nonzero(img == 0))\n\n\ndef estimate_background_intensity_threshold(img, background_pixels_ratio=0.8):\n \"\"\"\n Start from the darkest pixel (intensity = 0) and iteratively increase the intensity, adding the pixels with\n that intensity to 'background'. We stop when the number of pixels will reach some threshold (like 80% of total\n pixels). Undefined behavior for fake/noise images.\n @param img: a greyscale image\n @type img: 2D numpy array\n @param background_pixels_ratio: the ratio of background vs all pixels when we stop the iteration\n @type background_pixels_ratio: float, [0, 1]\n @return: the pixel intensity such that pixels of <= intensity occupy at least background_pixels_ratio of the image\n @rtype: int, [0, 255]\n \"\"\"\n assert img.ndim == 2\n\n pixel_intesity_cnts = pixel_intensity_histogram(img)\n num_total_pixels = img.shape[0] * img.shape[1]\n\n num_background_pixels = 0\n for intensity in range(0, 256):\n num_background_pixels += pixel_intesity_cnts[intensity]\n if num_background_pixels / num_total_pixels > background_pixels_ratio:\n return intensity\n\n\ndef pixel_intensity_histogram(img):\n \"\"\"\n @param img: a greyscale image\n @type img: 2d np.array\n @return: a mapping of pixel intesity -> cnt of pixels with that intensity in img\n @rtype: dict\n \"\"\"\n return defaultdict(int, Counter(img.flatten()))\n\n\nif __name__ == \"__main__\":\n # just testing the Cluster class\n img = cv2.imread(os.path.join(data_dir, '1013618.png'), cv2.IMREAD_GRAYSCALE)\n pixel_value_cnts = Counter(img.flatten())\n\n print(pixel_value_cnts)\n","repo_name":"Simeonedef/Galaxy-Image-Gen","sub_path":"common/image_processing.py","file_name":"image_processing.py","file_ext":"py","file_size_in_byte":6237,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"} +{"seq_id":"4798865001","text":"from flask import Blueprint, jsonify, request\nfrom workers.operator_sqlite3 import *\n\n\ncoord = Blueprint('coordenador', __name__, url_prefix='/coordenador')\n\n@coord.route('/', methods=['GET', 'DELETE', 'PUT'])\n@coord.route('', methods=['GET', 'POST'])\ndef manipular_alunos(id=None):\n coords = select_all('coordenadores')\n if request.method == \"GET\":\n if id == None:\n return jsonify(coords)\n\n for coord in coords:\n if id == coord['id']:\n return jsonify(coord)\n else:\n return jsonify({\"mensagem\": \"coordenador não cadastrado\"}), 404\n\n if request.method == 'POST':\n data = request.json\n if data:\n if 'id' in data.keys() and 'nome' in data.keys():\n operator_sql(\"insert into coordenadores values ('{}','{}')\".format(data['id'], data['nome']))\n return jsonify({\"mensagem\": \"coordenador 'id {} nome: {}'registrado com sucesso\".format(data['id'], data['nome'])})\n else:\n return jsonify({\"mensagem\": \"json invalid\"}), 400\n else:\n return jsonify({\"mensagem\": \"json invalid\"}), 400\n\n if request.method == \"DELETE\":\n for coord in coords:\n if id == coord['id']:\n operator_sql(\"delete from coordenadores where id='{}'\".format(id))\n return jsonify({\"mensagem\": \"coordenador removido com sucesso!\"})\n else:\n return jsonify({\"mensagem\": \"coordenador não encontrado\"}), 404\n\n if request.method == \"PUT\":\n data = request.json\n if data:\n if 'id' in data.keys() and 'nome' in data.keys():\n for coord in coords:\n if coord['id'] == id:\n operator_sql(\"update coordenadores set nome='{}', id='{}' where id='{}'\".format(data['nome'], data['id'], id))\n return jsonify({\"mensagem\": \"coordenador atualizado com sucesso!\"})\n else:\n return jsonify({\"mensagem\": \"coordenador não encontrado\"}),404\n else:\n return jsonify({\"mensagem\": \"json invalid\"}), 400\n else:\n return jsonify({\"mensagem\": \"json invalid\"}), 400\n\n\n","repo_name":"pratadaniel94/universiry-rest","sub_path":"modulos/coordenador.py","file_name":"coordenador.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"26736624570","text":"import socket\r\n\r\nport = 5555\r\n\r\ns = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n\r\ns.bind((\"\",port))\r\n\r\ns.listen(1)\r\nconn, addr = s.accept()\r\n\r\nwhile 1:\r\n data = conn.recv(1024).decode()\r\n\r\n print(data)\r\n\r\n message = input(\"-->\")\r\n\r\n if message == 'q':\r\n break\r\n else:\r\n conn.send(message.encode())\r\n\r\ns.close()\r\n","repo_name":"ProtulSikder/Chat-It","sub_path":"chat_server.py","file_name":"chat_server.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"37984074210","text":"# -*- coding: gb18030 -*-\nimport items\ng_items = items.instance()\n\nfrom SpellBase import *\n\nclass Spell_addRaceItem( Spell ):\n\t\"\"\"\n\t\"\"\"\n\tdef __init__( self ):\n\t\t\"\"\"\n\t\t构造函数。\n\t\t\"\"\"\n\t\tSpell.__init__( self )\n\t\tself._itemID = 0\n\t\tself._itemNum = 1\n\n\tdef init( self, dict ):\n\t\t\"\"\"\n\t\t读取技能配置\n\t\t@param dict: 配置数据\n\t\t@type dict: python dict\n\t\t\"\"\"\n\t\tSpell.init( self, dict )\n\t\tself._itemID = int( dict[ \"param1\" ] )\n\t\tself._itemNum = int( dict[ \"param2\" ] )\n\n\tdef receive( self, caster, receiver ):\n\t\t\"\"\"\n\t\tvirtual method.\n\t\t法术到达所要做的事情\n\t\t\"\"\"\n\t\tif not receiver.isReal():\n\t\t\treceiver.receiveOnReal( caster.id, self )\n\t\t\treturn\n\t\t\t\n\t\titem = g_items.createDynamicItem( self._itemID, self._itemNum )\n\t\tif item == None:\n\t\t\treturn\n\t\t\t\n\t\treceiver.addRaceItem( item )","repo_name":"mudsave/csol2_enities_45541","sub_path":"cell/Resource/Skills/Spell_addRaceItem.py","file_name":"Spell_addRaceItem.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"11485352216","text":"import sys\ninput = sys.stdin.readline\n\nN = int(input())\nRGB = []\nfor _ in range(N):\n RGB.append(list(map(int, input().split())))\n\nresult = float('inf')\nfor i in range(3):\n dp = [[float('inf'), float('inf'), float('inf')] for _ in range(N)]\n dp[0][i] = RGB[0][i]\n\n for j in range(1, N):\n dp[j][0] = min(dp[j-1][1], dp[j-1][2]) + RGB[j][0]\n dp[j][1] = min(dp[j-1][0], dp[j-1][2]) + RGB[j][1]\n dp[j][2] = min(dp[j-1][0], dp[j-1][1]) + RGB[j][2]\n \n for j in range(3):\n if i == j: continue\n result = min(result, dp[-1][j])\n \nprint(result)","repo_name":"ParkHoH/algorithm_test","sub_path":"baekjoon/17404_RGB거리 2.py","file_name":"17404_RGB거리 2.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"20943825364","text":"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n\n\ndef loss_acc_curves(loss_stats: dict, acc_stats: dict):\n \"\"\"\n Plot loss and accuracy curves.\n ------------------------------\n - Input: 2 dictionaries which contain the loss and accuracy\n values per epoch. Each dictionary has the keys --\n \"train\" and \"val\".\n - Output: Line plot.\n\n \"\"\"\n train_val_acc_df = (\n pd.DataFrame.from_dict(acc_stats)\n .reset_index()\n .melt(id_vars=[\"index\"])\n .rename(columns={\"index\": \"epochs\"})\n )\n train_val_loss_df = (\n pd.DataFrame.from_dict(loss_stats)\n .reset_index()\n .melt(id_vars=[\"index\"])\n .rename(columns={\"index\": \"epochs\"})\n )\n\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(30, 10))\n sns.lineplot(\n data=train_val_acc_df, x=\"epochs\", y=\"value\", hue=\"variable\", ax=axes[0]\n ).set_title(\"Train-Val Accuracy/Epoch\")\n\n sns.lineplot(\n data=train_val_loss_df, x=\"epochs\", y=\"value\", hue=\"variable\", ax=axes[1]\n ).set_title(\"Train-Val Loss/Epoch\")\n\n\ndef score_report(y_true_list: list, y_pred_list: list, idx2class: dict):\n \"\"\"\n Generate accuracy score, classification-report, and confusion matrix.\n ---------------------------------------------------------------------\n - Input: The true and predicted values along with a dictionary to\n convert idx to class.\n - Output: Printed score report.\n \"\"\"\n print(f\"Test Accuracy = {accuracy_score(y_true_list, y_pred_list)}\\n\")\n print(\"=\" * 50)\n print(\n f\"\\nClassification Report: \\n\\n{classification_report(y_true_list, y_pred_list)}\\n\\n\"\n )\n print(\"=\" * 50)\n print(f\"\\nConfusion Matrix: \\n\\n{confusion_matrix(y_true_list, y_pred_list)}\\n\")\n\n # plot confusion matrix\n confusion_matrix_df = pd.DataFrame(\n confusion_matrix(y_true_list, y_pred_list)\n ).rename(columns=idx2class, index=idx2class)\n\n fig, ax = plt.subplots(figsize=(7, 5))\n sns.heatmap(confusion_matrix_df, annot=True, ax=ax).set(\n title=\"Confusion Matrix Heatmap\"\n )\n\n","repo_name":"ShreyJ1729/EVA6-TSAI","sub_path":"08-AdvancedTrainingConcepts/app/utils/result_stats.py","file_name":"result_stats.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"4391798117","text":"import requests\nimport json\nfrom pprint import pprint\n\n\ndef download_file_with_superheroes():\n \"\"\"This function creates json file in project with information about all heroes\"\"\"\n with open('superheroes.json', 'w') as f:\n url = 'https://akabab.github.io/superhero-api/api/all.json'\n resp = requests.get(url)\n json.dump(resp.json(), f, ensure_ascii=False, indent=2)\n\n\ndef search_most_clever_hero(heroes_list):\n \"\"\"This function prints hero from heroes_list by most quantity of intelligence\"\"\"\n with open('superheroes.json', encoding='utf-8') as f:\n data = json.load(f)\n my_heroes_dict = {}\n for hero in data:\n if hero['name'] in heroes_list:\n my_heroes_dict[hero['name']] = hero['powerstats']['intelligence']\n most_clever_hero = max(my_heroes_dict.items(), key=lambda x: x[1])\n print(f'Самый умный супергерой {most_clever_hero[0]}, его показатель интеллекта равен {most_clever_hero[1]}!')\n\n\nclass YaUploader:\n def __init__(self, token: str):\n self.token = token\n\n def get_headers(self):\n return {\n 'Content-Type': 'application/json',\n 'Authorization': f'OAuth {self.token}'\n }\n\n def _upload_link(self, file_path: str):\n url = 'https://cloud-api.yandex.net/v1/disk/resources/upload'\n headers = self.get_headers()\n params = {'path': file_path, 'owerwrite': 'true'}\n response = requests.get(url, headers=headers, params=params)\n return response.json()\n\n def upload(self, file_path, filename):\n link_dict = self._upload_link(file_path)\n href = link_dict['href']\n response = requests.put(href, data=open(filename, 'rb'))\n response.raise_for_status()\n if response.status_code == 201:\n print('Success')\n\n\ndef questions_with_tag_python():\n '''This function gets all questions from stackoverflow by last 2 days with tagged \"Python\"'''\n url = 'https://api.stackexchange.com/2.3/questions?fromdate=1663632000&todate=1663804800&order=desc&sort=activity&tagged=Python&site=stackoverflow'\n resp = requests.get(url)\n resp.raise_for_status()\n result = resp.json()\n pprint(result)\n\n\nif __name__ == '__main__':\n download_file_with_superheroes()\n search_most_clever_hero(['Hulk', 'Captain America', 'Thanos'])\n path_to_file = 'superheroes.json'\n file_name = 'superheroes.json'\n token = '123'\n uploader = YaUploader(token)\n result = uploader.upload(path_to_file, file_name)\n questions_with_tag_python()\n","repo_name":"jestlek/Netology-8-Requests","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"13795769028","text":"#!/usr/bin/python3\n\nimport gi\nimport logging\nimport os\nimport sys\nimport time\nfrom threading import Timer\n\nfrom keypress_actions import KeyPressAction, KeyReleaseAction\n\ntry:\n from xkbgroup import XKeyboard\n has_xkbgroup = True\nexcept:\n has_xkbgroup = False\n\ngi.require_version('Gtk', '3.0')\ngi.require_version('Gdk', '3.0')\nfrom gi.repository import Gtk, Gdk, Gio\ngi.require_version('IBus', '1.0')\nfrom gi.repository import IBus\n\nfrom change_keyboard import change_keyboard, bus_has_engine, get_current_engine\n\nclass TestView(Gtk.Window):\n\n def __init__(self, test_name):\n Gtk.Window.__init__(self, title=test_name)\n self.known_modifiers = {\n \"LCTRL\" : 37,\n \"RCTRL\" : 105,\n \"LALT\" : 64,\n \"RALT\" : 108,\n \"SHIFT\" : 50,\n \"ALT\" : 64,\n \"CTRL\" : 37 }\n\n self.known_keys = {\n \"K_A\" : 38,\n \"K_B\" : 56,\n \"K_C\" : 54,\n \"K_D\" : 40,\n \"K_E\" : 26,\n \"K_F\" : 41,\n \"K_G\" : 42,\n \"K_H\" : 43,\n \"K_I\" : 31,\n \"K_J\" : 44,\n \"K_K\" : 45,\n \"K_L\" : 46,\n \"K_M\" : 58,\n \"K_N\" : 57,\n \"K_O\" : 32,\n \"K_P\" : 33,\n \"K_Q\" : 24,\n \"K_R\" : 27,\n \"K_S\" : 39,\n \"K_T\" : 28,\n \"K_U\" : 30,\n \"K_V\" : 55,\n \"K_W\" : 25,\n \"K_X\" : 53,\n \"K_Y\" : 29,\n \"K_Z\" : 52,\n \"K_COLON\" : 47, # // &HBA\n \"K_EQUAL\" : 21, # // &HBB\n \"K_COMMA\" : 59, # // &HBC\n \"K_HYPHEN\" : 20, # // &HBD\n \"K_PERIOD\" : 60, # // &HBE\n \"K_SLASH\" : 61, # // &HBF\n \"K_BKQUOTE\" : 49, # // &HC0\n \"K_LBRKT\" : 34, # // &HDB\n \"K_BKSLASH\" : 51, # // &HDC\n \"K_RBRKT\" : 35, # // &HDD\n \"K_QUOTE\" : 48, # // &HDE\n \"K_oE2\" : 94, # // &HE2\n \"K_0\" : 19,\n \"K_1\" : 10,\n \"K_2\" : 11,\n \"K_3\" : 12,\n \"K_4\" : 13,\n \"K_5\" : 14,\n \"K_6\" : 15,\n \"K_7\" : 16,\n \"K_8\" : 17,\n \"K_9\" : 18,\n \"K_F1\" : 67,\n \"K_F2\" : 68,\n \"K_F3\" : 69,\n \"K_F4\" : 70,\n \"K_F5\" : 71,\n \"K_F6\" : 72,\n \"K_F7\" : 73,\n \"K_F8\" : 74,\n \"K_F9\" : 75,\n \"K_F10\" : 76,\n \"K_F11\" : 95,\n \"K_F12\" : 96,\n \"K_CAPS\" : 66,\n \"K_ENTER\" : 36,\n \"K_ESC\" : 9,\n \"K_SPACE\" : 65,\n \"K_TAB\" : 23,\n \"K_UP\" : 111,\n \"K_DOWN\" : 116,\n \"K_LEFT\" : 113,\n \"K_RIGHT\" : 114,\n \"K_HOME\" : 110,\n \"K_END\" : 115,\n \"K_BKSP\" : 22 }\n self.keys = self.context = self.expected = \"\"\n self.haspressedkeys = False\n self.test_name = test_name\n home = os.path.expanduser(\"~\")\n keyboarddir = os.path.join(home, \".local/share/keyman/test_kmx\")\n self.kmx_path = os.path.join(keyboarddir, self.test_name + \".kmx\")\n self.kmn_path = os.path.join(keyboarddir, self.test_name + \".kmn\")\n self.keyboard_id = \"und:\" + keyboarddir + \"/\" + self.test_name + \".kmx\"\n\n self.load_source(self.kmn_path)\n self.change_to_keyboard(self.keyboard_id, self.kmx_path)\n with open(self.test_name+\".in\", \"wt\") as f:\n f.write(self.expected)\n\n self.grid = Gtk.Grid()\n self.add(self.grid)\n self.create_textview()\n\n logging.info(\"keys %d:%s:\" % (len(self.keys), self.keys))\n logging.info(\"context %d:%s:\" % (len(self.context), self.context))\n logging.info(\"expected %d:%s:\" % (len(self.expected), self.expected))\n\n def on_focus_in(self, args, data):\n t = Timer(1.0, self.do_keypresses)\n t.start()\n\n def do_keypresses(self):\n if self.keys and not self.haspressedkeys:\n self.haspressedkeys = True\n if has_xkbgroup:\n with XKeyboard() as xkb:\n logging.info(\"xkb %d:%s:%s\", xkb.group_num, xkb.group_symbol, xkb.group_name)\n keys = self.keys.split(\"]\")\n for key in keys:\n if (key):\n key = key[1:]\n keyparts = key.split(\" \")\n mods = []\n for part in keyparts:\n if part in self.known_keys:\n mainkey = part\n elif part in self.known_modifiers:\n mods.append(part)\n logging.info(\"key is %s with modifiers %s\" % (mainkey, mods))\n keyval = self.known_keys[mainkey]\n logging.info(\"%s\", keyval)\n for modifier in mods:\n modkeypress = KeyPressAction(self.known_modifiers[modifier])\n modkeypress._keyPress(self.known_modifiers[modifier])\n\n mainkeypress = KeyPressAction(keyval)\n mainkeyrelease = KeyReleaseAction(keyval)\n mainkeypress._keyPress(keyval)\n mainkeyrelease._keyRelease(keyval)\n\n for modifier in mods:\n modkeypress = KeyReleaseAction(self.known_modifiers[modifier])\n modkeypress._keyRelease(self.known_modifiers[modifier])\n\n time.sleep(0.05)\n\n t = Timer(1.0, self.do_destroy)\n t.start()\n\n def change_to_keyboard(self, keyboard_id, kmx_path):\n logging.debug(keyboard_id)\n try:\n logging.debug(\"getting bus\")\n bus = IBus.Bus()\n logging.debug(\"getting default keyboard\")\n self.default_keyboard = get_current_engine(bus)\n logging.debug(self.default_keyboard)\n logging.debug(\"installing to ibus\")\n ibus_settings = Gio.Settings.new(\"org.freedesktop.ibus.general\")\n preload_engines = ibus_settings.get_strv(\"preload-engines\")\n logging.debug(preload_engines)\n # if bad_keyboard in preload_engines:\n # preload_engines.remove(bad_keyboard)\n if keyboard_id not in preload_engines:\n preload_engines.append(keyboard_id)\n logging.debug(preload_engines)\n ibus_settings.set_strv(\"preload-engines\", preload_engines)\n bus.preload_engines(preload_engines)\n logging.info(\"changing keyboard to %s\", keyboard_id)\n change_keyboard(bus, keyboard_id)\n except Exception as e:\n logging.debug(\"Failed to set up keyboard\")\n logging.debug(e)\n\n def reset_keyboard(self, keyboard_id):\n try:\n logging.debug(\"getting bus\")\n bus = IBus.Bus()\n logging.debug(\"setting keyboard back to default: %s\", self.default_keyboard)\n change_keyboard(bus, self.default_keyboard)\n ibus_settings = Gio.Settings.new(\"org.freedesktop.ibus.general\")\n preload_engines = ibus_settings.get_strv(\"preload-engines\")\n logging.debug(preload_engines)\n if keyboard_id in preload_engines:\n preload_engines.remove(keyboard_id)\n logging.debug(preload_engines)\n ibus_settings.set_strv(\"preload-engines\", preload_engines)\n bus.preload_engines(preload_engines)\n except Exception as e:\n logging.debug(\"Failed to reset keyboard\")\n logging.debug(e)\n\n\n def load_source(self, kmn_path):\n if os.path.exists(kmn_path):\n with open(kmn_path) as kmn:\n for line in kmn:\n if line.startswith(\"c keys: \"):\n self.keys = line[8:].rstrip()\n if line.startswith(\"c expected: \"):\n expected = line[12:].rstrip()\n if expected == \"\\\\b\": # beep test is sound not text\n self.expected = \"\"\n else:\n self.expected = expected.encode(\"utf-8\").decode('unicode-escape')\n if line.startswith(\"c context: \"):\n self.context = line[11:].rstrip()\n\n def create_textview(self):\n scrolledwindow = Gtk.ScrolledWindow()\n scrolledwindow.set_hexpand(True)\n scrolledwindow.set_vexpand(True)\n self.grid.attach(scrolledwindow, 0, 1, 3, 1)\n\n self.textview = Gtk.TextView()\n self.textview.connect(\"focus-in-event\", self.on_focus_in)\n self.textbuffer = self.textview.get_buffer()\n self.textbuffer.set_text(self.context)\n scrolledwindow.add(self.textview)\n\n def do_destroy(self):\n with open(self.test_name+\".out\", \"wt\") as f:\n start = self.textbuffer.get_start_iter()\n end = self.textbuffer.get_end_iter()\n text = self.textbuffer.get_text(start, end, True)\n logging.info(\"text buffer:%s\", text)\n f.write(text)\n self.reset_keyboard(self.keyboard_id)\n Gtk.main_quit()\n\n def on_destroy(self, args):\n self.do_destroy()\n\n# Test view contains a single multiline edit\n\n# input test_name\n# test dir ~/.local/share/keyman/test_kmx\n# read test_name.kmn to get e.g.\n#c keys: [K_1][K_BKSP][K_2][K_BKSP][K_3][K_BKSP][K_4][K_BKSP][K_5][K_BKSP][K_6][K_BKSP]\n#c expected: aa\n#c context: \n\n# keyboard is und:~/.local/share/keyman/test_kmx/test_name.kmx\n# can the current ibus keyboard be changed from a python program?\n\n# set initial text to \"context\"\n# parse \"keys\" and input them\n# can I generate keypresses in python?\n# read output text and compare to \"expected\"\n\n\ndef main(argv):\n if len(sys.argv) != 2:\n logging.error(\"Too many arguments: %s\", sys.argv)\n logging.error(\"test_ibus_keyman.py \")\n sys.exit(2)\n if len(sys.argv[1]) == 0:\n logging.error(\"Empty test name: %s\", sys.argv)\n logging.error(\"test_ibus_keyman.py \")\n sys.exit(2)\n logging.basicConfig(level=logging.INFO, format='%(levelname)s:%(message)s')\n # logging.basicConfig(level=logging.DEBUG, format='%(levelname)s:%(message)s')\n w = TestView(sys.argv[1])\n w.connect(\"destroy\", w.on_destroy)\n w.resize(576, 324)\n w.show_all()\n Gtk.main()\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","repo_name":"SabineSIL/keyman-1","sub_path":"linux/ibus-keyman/test/test_ibus_keyman.py","file_name":"test_ibus_keyman.py","file_ext":"py","file_size_in_byte":10505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"} +{"seq_id":"11947209280","text":"import os\n\nfrom oslo_concurrency import lockutils\nfrom oslo_config import cfg\nfrom oslo_config import fixture as conf_fixture\n\nfrom tempest import config\n\n\nclass ConfigFixture(conf_fixture.Config):\n\n def __init__(self):\n cfg.CONF([], default_config_files=[])\n config.register_opts()\n super(ConfigFixture, self).__init__()\n\n def setUp(self):\n super(ConfigFixture, self).setUp()\n self.conf.set_default('build_interval', 10, group='compute')\n self.conf.set_default('build_timeout', 10, group='compute')\n self.conf.set_default('disable_ssl_certificate_validation', True,\n group='identity')\n self.conf.set_default('uri', 'http://fake_uri.com/auth',\n group='identity')\n self.conf.set_default('uri_v3', 'http://fake_uri_v3.com/auth',\n group='identity')\n self.conf.set_default('neutron', True, group='service_available')\n self.conf.set_default('heat', True, group='service_available')\n if not os.path.exists(str(os.environ.get('OS_TEST_LOCK_PATH'))):\n os.mkdir(str(os.environ.get('OS_TEST_LOCK_PATH')))\n lockutils.set_defaults(\n lock_path=str(os.environ.get('OS_TEST_LOCK_PATH')),\n )\n self.conf.set_default('auth_version', 'v2', group='identity')\n for config_option in ['username', 'password', 'tenant_name']:\n # Identity group items\n for prefix in ['', 'alt_', 'admin_']:\n if prefix == 'admin_':\n group = 'auth'\n else:\n group = 'identity'\n self.conf.set_default(prefix + config_option,\n 'fake_' + config_option,\n group=group)\n\n\nclass FakePrivate(config.TempestConfigPrivate):\n def __init__(self, parse_conf=True, config_path=None):\n self._set_attrs()\n self.lock_path = cfg.CONF.oslo_concurrency.lock_path\n","repo_name":"microsoft/LIS-Tempest","sub_path":"tempest/tests/fake_config.py","file_name":"fake_config.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"41937760946","text":"import matplotlib.pyplot as plt\n\n\n# dimensions\ndimensions = [2, 4, 8]\n\n\nuni_skyline= [5.7798, 17.3254, 278.5623] # Skyline query times\nuni_top_k_dominating = [46.852, 146.318796, 2125.019312] # Top-k dominating\nuni_top_k_skyline = [11.687, 53.26, 659.45] # Top-k skyline\n\nnorm_skyline = [5.7486, 24.3245, 298.34534] # Skyline query times\nnorm_top_k_dominating = [57.057, 202.267065, 2708.12492] # Top-k dominating\nnorm_top_k_skyline = [14.976, 68.54, 856.32] # Top-k skyline\n\n\n# plot options\nxlabel = \"Dimensions\"\nylabel = \"Execution time [s]\"\n\n# plot execution times for uniform\nplt.figure()\nplt.title(\"Uniform distribution\")\nplt.xlabel(xlabel)\nplt.ylabel(ylabel)\nplt.ylim(top=2800)\nplt.plot(dimensions, uni_skyline)\nplt.plot(dimensions, uni_top_k_dominating)\nplt.plot(dimensions, uni_top_k_skyline)\nplt.legend([\"Skyline\", \"Top-k dominating\", \"Top-k skyline\"])\n\n# plot execution times for normal\nplt.figure()\nplt.title(\"Normal distribution\")\nplt.xlabel(xlabel)\nplt.ylabel(ylabel)\nplt.ylim(top=2800)\nplt.plot(dimensions, norm_skyline)\nplt.plot(dimensions, norm_top_k_dominating)\nplt.plot(dimensions, norm_top_k_skyline)\nplt.legend([\"Skyline\", \"Top-k dominating\", \"Top-k skyline\"])\n\nplt.show()\n","repo_name":"stergiosbamp/spark-dominance-based-queries","sub_path":"data/plots/runtime_vs_dimensions.py","file_name":"runtime_vs_dimensions.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"30900338465","text":"# coding=utf-8\nimport json\nimport logging\nimport time\n\nfrom dictdiffer import diff\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom api_case.models import Case, CaseResult, MitData, SwaggerApi, CaseGroup\nfrom api_case.serializers import CaseResultSerializer, CaseListSerializer, CaseCodeSerializer, CaseGroupListSerializer\nfrom test_tools.models import xxjobMenu, Assembly_line\nfrom utils.api_response import MyResponse, jwt_token\n\nlogger = logging.getLogger(__name__)\n\n\n@api_view(['POST'])\ndef case_page(request):\n query = Q()\n project_id = request.data.get(\"projectId\")\n if project_id: # 如果传了用例id\n query &= Q(project_id=project_id)\n exact = request.data.get(\"exact\") # 传了 精确匹配用例唯一的only_api, 返回已有接口\n if exact:\n query &= Q(only_api=exact)\n case_id = request.data.get(\"caseId\")\n if case_id: # 如果传了用例id\n query &= Q(id=case_id)\n name = request.data.get(\"name\")\n if name: # 如果传了用例名称\n query &= Q(name__icontains=name)\n url = request.data.get(\"url\")\n if url: # 如果传了接口路径\n query &= Q(url__icontains=url)\n create_user = request.data.get(\"create_user\")\n if create_user: # 如果传了接口路径\n query &= Q(creator_nickname__icontains=create_user)\n tag = request.data.get('tag')\n if tag: # 如果传了标签 \"1,2\"\n tag = str(tag).strip(\"'\").strip('\"').split(',')\n if len(tag) == 1:\n query &= Q(tag=tag[0])\n elif len(tag) == 2:\n query &= Q(tag=tag[0]) | Q(tag=tag[1])\n elif len(tag) == 3:\n query &= Q(tag=tag[0]) | Q(tag=tag[1]) | Q(tag=tag[2])\n modules = request.data.get(\"module\")\n if modules: # 如果传模块[[15, 90], [1]] \n query_m = Q()\n for module in modules:\n if len(module) == 1:\n print('[' + str(module[0]) + ',')\n query_m |= Q(module__icontains='[' + str(module[0]) + ',')\n else:\n query_m |= Q(module__icontains=module)\n query &= query_m\n queryset = Case.objects.filter(query).order_by('-id') # 按照用例id倒序\n p = Paginator(queryset, request.data['size'])\n page = request.data['page']\n size = request.data['size']\n total_page = p.num_pages\n total_num = p.count # 总页数\n case_data = [] if page not in p.page_range else p.page(page).object_list\n serializer = CaseListSerializer(case_data, many=True) # 单页序列化\n data = {\"code\": 10000, \"msg\": \"查询成功\",\n 'page': page, # 当前页数\n 'size': size, # 每页展示数据的数量\n 'totalNum': total_num, # 总页数\n 'totalPage': total_page, # 总数据数量\n 'data': serializer.data}\n return Response(data)\n\n\nclass CaseViewSet(ModelViewSet):\n queryset = Case.objects.all()\n serializer_class = CaseListSerializer\n\n def create(self, request, *args, **kwargs):\n if request.data.get(\"url\") and not request.data.get(\"url\").startswith('/'):\n request.data['url'] = '/' + request.data['url']\n user_name = jwt_token(request)['username']\n request.data['creator_nickname'] = user_name\n # 从流水线id 获取xxl job执行的pod_ip\n if request.data.get(\"assembly_id\"):\n try:\n assembly = Assembly_line.objects.get(id=request.data.get(\"assembly_id\"))\n request.data[\"job_podid\"] = f\" http://{assembly.popId}:9999/\" # http://10.244.17.232',\n # print(request.data[\"job_podid\"])\n except Exception as e:\n print(\"获取流水线失败\", e)\n # 没有加断言,自动加上业务断言 000000\n assert_res = request.data.get('assert_res')\n request.data['assert_res'] = assert_res if assert_res else {\"code\": \"000000\"}\n tag_list = request.data['tag']\n for i in tag_list:\n request.data['tag'] = i\n serializer = CaseCodeSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n res = serializer.data # 调序列化生成 用例code\n return Response(\n data={\"code\": 10000, \"msg\": \"用例添加成功\"},\n status=status.HTTP_201_CREATED)\n\n def update(self, request, *args, **kwargs):\n if not request.data.get(\"url\").startswith('/'):\n request.data['url'] = '/' + request.data['url']\n # 从流水线id 获取xxl job执行的pod_ip\n if request.data.get(\"assembly_id\"):\n try:\n assembly = Assembly_line.objects.get(id=request.data.get(\"assembly_id\"))\n request.data[\"job_podid\"] = f\" http://{assembly.popId}:9999/\" # http://10.244.17.232',\n # print(request.data[\"job_podid\"])\n except Exception as e:\n print(\"获取流水线失败\", e)\n else:\n Case.objects.filter(id=request.data.get(\"id\")).update(assembly_id=None)\n tag_list = request.data['tag']\n # 没有加断言,自动加上业务断言 000000\n assert_res = request.data.get('assert_res')\n request.data['assert_res'] = assert_res if assert_res else {\"code\": \"000000\"}\n num = 0\n for i in tag_list:\n request.data['tag'] = i\n if num == 0: # 修改成多个标签时。第一次更新原有的\n partial = kwargs.pop('partial', False)\n instance = self.get_object()\n serializer = CaseCodeSerializer(instance, data=request.data, partial=partial)\n serializer.is_valid(raise_exception=True)\n self.perform_update(serializer)\n if getattr(instance, '_prefetched_objects_cache', None):\n instance._prefetched_objects_cache = {}\n num += 1\n else: # 第二次走新创建用例\n serializer = CaseCodeSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n res = serializer.data # 调序列化生成用例code\n return Response(data={\"code\": 10000, \"msg\": \"用例更新成功\"}) # , 'data': serializer.data\n\n def perform_update(self, serializer):\n serializer.save()\n\n def partial_update(self, request, *args, **kwargs):\n kwargs['partial'] = True\n return self.update(request, *args, **kwargs)\n\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n return MyResponse(serializer.data)\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n self.perform_destroy(instance)\n return Response(data={\"code\": 10000, \"msg\": \"删除成功\"}, status=status.HTTP_200_OK)\n\n def perform_destroy(self, instance):\n instance.delete()\n\n\n@api_view(['POST'])\ndef convert(request): # mitmproxy抓包数据 编辑里面转换用例接口\n req_data = request.data\n req_data['creator_nickname'] = jwt_token(request)['username']\n case_id = req_data.get('case_id')\n convert_data = dict()\n if case_id:\n # 用户选择了条用例,进行参数\"覆盖\"\n Case.objects.filter(id=case_id, tag=1).update(single_body=req_data['single_body'])\n convert_data[\"更新\"] = f\"MitId {str(req_data['id'])} '参数更新到用例: {case_id}\"\n else: # 没有传id用户选择新增\n req_data['source'] = \"抓包\"\n req_data['tag'] = 1\n # req_data['name'] = req_data['only_api'] = req_data['req_method'] + req_data['url']\n serializer = CaseCodeSerializer(data=req_data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n convert_data[\"新增\"] = f\"MitId {str(req_data['id'])} '生成用例: {str(serializer.data['id'])}\"\n\n return Response(data={\"code\": 10000, \"msg\": \"操作成功\", \"data\": convert_data})\n\n\n@api_view(['POST'])\ndef batch_convert(request): # mitmproxy抓包数据 批量转换用例接口\n user_name = jwt_token(request)['username']\n start_time = time.time()\n mit_id_list = request.data.get('mitIdList')\n project_id = request.data.get('project_id')\n module = request.data.get('module')\n # mit_data_list 是前端传的id ,查出来的批量数据, 即请求数据\n mit_data_list = MitData.objects.filter(id__in=mit_id_list).values(\n \"id\", \"only_api\", \"module\", 'tag', 'req_method', 'url', 'single_body', 'assert_res')\n add_exist_list, add_list, no_change_list = [], [], []\n convert_data = dict()\n for req_data in mit_data_list:\n req_data['creator_nickname'] = user_name\n req_data['project_id'] = project_id\n req_data['module'] = module\n add_exist, add, no_change = mit_convert(req_data)\n if add_exist:\n add_exist_list.append(add_exist)\n convert_data[\"已转成用例\"] = add_exist_list\n if add:\n add_list.append(add)\n convert_data[\"已生成用例\"] = add_list\n if no_change:\n no_change_list.append(no_change)\n convert_data[\"不转换-已有用例参数key相等\"] = no_change_list\n\n print(\"总耗时\", time.time() - start_time)\n return Response(data={\"code\": 10000, \"msg\": \"操作成功\", \"data\": convert_data})\n\n\ndef mit_convert(req_data): # 抓包转用例逻辑的公共方法\n req_data['source'] = \"抓包\"\n add_exist, no_change, add_id = '', '', ''\n only_api = req_data['req_method'] + req_data['url'] # 请求参数的only_api\n req_data['name'] = req_data['only_api'] = only_api # 给用例接口参数传name\n req_data['tag'] = 1\n if not isinstance(req_data['single_body'], dict):\n single_body = req_data['single_body'] = eval(\n req_data['single_body'].replace('null', 'None').replace('false', 'False'))\n # 从数据库获得 mit数据的请求参数\n else:\n single_body = req_data['single_body']\n if isinstance(req_data['single_body'], str):\n req_data['assert_res'] = eval(req_data['assert_res'].replace('null', 'None').replace('false', 'False'))\n # -- 代码复用上面的转用例逻辑 --\n case_only_api = Case.objects.filter(only_api=only_api, tag=1) # 匹配查找用例的唯一接口名\n if case_only_api: # 用例里面已有相同接口\n case_params = [] # 把获取到的用例的 参数都存到参数库\n for i in case_only_api:\n if i and i.single_body and i.single_body != '':\n logger.info('报错内容:{},类型:{}'.format(i.single_body, type(i.single_body)))\n case_params.append(eval(\n i.single_body.replace('null', 'None').replace('false', 'False').replace('true', 'True')))\n if single_body in case_params: # 如果转的接口数据 与某个用例参数一样,那就不需要转\n no_change = req_data['id'] # 记录不需要转用例的 mit id\n else: # 用例里面有该接口,但参数不一致,新增\n serializer = CaseCodeSerializer(data=req_data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n add_exist = f\"MitId {str(req_data['id'])} '生成用例: {str(serializer.data['id'])}\"\n else: # 用例里面没有该接口,新增\n serializers = CaseCodeSerializer(data=req_data)\n serializers.is_valid(raise_exception=True)\n serializers.save()\n add_id = f\"MitId {str(req_data['id'])} '生成用例: {serializers.data['id']}\"\n return add_exist, add_id, no_change\n\n\n@api_view(['POST'])\ndef swagger_convert(request): # swagger 接口 单条转换用例接口\n req_data = request.data\n req_data['creator_nickname'] = jwt_token(request)['username']\n req_data['tag'] = 1\n req_data['source'] = \"接口\"\n only_api = req_data['only_api'] # swagger参数的only_api\n single_body = req_data.get('params') # 获取请求的接口参数\n res_list = []\n case_only_api = Case.objects.filter(only_api=only_api, tag=1) # 匹配查找用例的唯一接口名\n if case_only_api: # 用例里面已有相同接口\n # print(\"查询到已有的用例数量:\", len(case_only_api), case_only_api)\n for case in case_only_api: # 循环相同接口的 多条用例\n exist_data = case.single_body\n # print(exist_data, type(exist_data), case.id)\n exist_data = eval(exist_data) if exist_data else {} # 把 数据库取出来的用例参数转成 字典\n result = list(diff(exist_data, single_body)) # 原用例参数与 新参数对比,相等时取新参数\n flag = False\n if result: # 对比后存在 差异,需要对差异进行进一步判断\n for i in result: # 循环取对比的结果\n # ('remove', '', [('orderCreateEndTime', 1650988799000), ('pageNo', 1)])\n if i[0] == 'remove': # 原有参数有被移除, 把原有用例的参数也移除\n for remove_key in i[-1]:\n exist_data.pop(remove_key[0], None) # 用例的字典数据移除该key\n # case_result[f\"用例{case.id}\"] = \"有参数被删除,全部用例换成最新参数!\"\n res_list.append(f\"用例id {case.id} {remove_key[0]}参数被移除,已移除该参数\")\n flag = True\n elif i[0] == 'add':\n for add in i[-1]: # i[-1]增加的[('11orderByField', '3'),('dd', {'1': 2, '2': 33})]\n exist_data.update({add[0]: add[1]}) # 在原有参数加上更新的内容\n res_list.append(f\"用例id {case.id} 增加参数{add[0]}:{add[1]}\")\n flag = True\n if flag: # 说明有新增或者移除的\n req_data['single_body'] = exist_data\n serializer = CaseCodeSerializer(case, data=req_data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n print(serializer.data[\"id\"])\n else: # 没有新增或者移除的\n res_list.append(f\"用例id {case.id} 与接口id {req_data['id']}参数一致,不做改动\")\n else: # 用例里面没有该接口,直接保存\n req_data['single_body'] = req_data['params'] # api参数 params给到 single_body\n serializer = CaseCodeSerializer(data=req_data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n res_list.append(f\"接口id {str(req_data['id'])} '生成用例: {str(serializer.data['id'])}\")\n SwaggerApi.objects.filter(id=req_data['id']).update(status=\"有用例\")\n case_result = {f\"接口id {req_data['id']}\": res_list} if res_list else {}\n\n return Response(data={\"code\": 10000, \"msg\": \"操作成功\", \"data\": case_result})\n\n\n@api_view(['GET'])\ndef view_case_result(request, **kwargs): # 查看单用例执行结果接口\n case_id = kwargs['pk']\n instances = CaseResult.objects.filter(case_id=case_id, case_type__in=[1, 11]).order_by('-run_time')\n if instances:\n serializer = CaseResultSerializer(instances[0])\n return Response({\"code\": 10000, \"msg\": \"查看结果成功\", \"data\": serializer.data})\n else:\n return Response({\"code\": 10000, \"msg\": \"未找到该单用例执行结果\"})\n\n\n@api_view(['POST'])\ndef get_case(request):\n case_list = request.data.get(\"caseList\") # 获得选中 单用例的id列表\n case_group_list = request.data.get(\"caseGroupList\") # 获得选中 组合用例的id列表\n data = dict()\n if case_list:\n case_data = Case.objects.filter(id__in=case_list)\n serializer = CaseListSerializer(case_data, many=True)\n data.update({\"case\": serializer.data})\n if case_group_list:\n cases_data = CaseGroup.objects.filter(id__in=case_group_list)\n serializers = CaseGroupListSerializer(cases_data, many=True)\n data.update({\"cases\": serializers.data})\n\n return Response({\"code\": 10000, \"msg\": \"查询成功\", \"data\": data})\n\n\ndef get_xxl_job(job_id):\n data = xxjobMenu.objects.filter(id=job_id)\n if data:\n data1 = data[0]\n","repo_name":"yu874721995/test_platform","sub_path":"api_case/views/case.py","file_name":"case.py","file_ext":"py","file_size_in_byte":16550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"7436334829","text":"\"\"\"empty message\n\nRevision ID: e3b49376ba79\nRevises: 4f40deb0ca4f\nCreate Date: 2022-04-19 22:27:58.097828\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e3b49376ba79'\ndown_revision = '4f40deb0ca4f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('recipe',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=50), nullable=False),\n sa.Column('image', sa.String(length=50), nullable=False),\n sa.Column('category', sa.String(length=50), nullable=False),\n sa.Column('cuisine', sa.String(length=50), nullable=True),\n sa.Column('instruction', sa.String(), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('image'),\n sa.UniqueConstraint('name')\n )\n op.create_table('ingredients',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('recipe_id', sa.Integer(), nullable=True),\n sa.Column('ingredient', sa.String(), nullable=False),\n sa.Column('amount', sa.String(), nullable=True),\n sa.ForeignKeyConstraint(['recipe_id'], ['recipe.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('ingredients')\n op.drop_table('recipe')\n # ### end Alembic commands ###\n","repo_name":"NathanPerrine/Capstone---Meal-Planner-Recipe-Book","sub_path":"migrations/versions/e3b49376ba79_.py","file_name":"e3b49376ba79_.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"74117795430","text":"# MNIST – (сокращение от «Modified National Institute of Standards and Technology»)\n# – база данных образцов рукописного написания цифр\nimport keras\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tensorflow import keras\nfrom keras.layers import Dense, Flatten\nfrom keras.datasets import mnist\n\n# загуржаю данные этой библиотеки\n# Здесь 60 000 изображений в обучающей выборке и 10 000 – в тестовой. Мы будем использовать определения:\n# x_train – изображения цифр обучающей выборки;\n# y_train – вектор соответствующих значений цифр (например, если на i-м изображении нарисована 5, то y_train[i] = 5);\n# x_test – изображения цифр тестовой выборки;\n# y_test – вектор соответствующих значений цифр для тестовой выборки.\n# Каждое изображение имеет размер 28х28 пикселей и представлено в градациях серого\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n# Теперь нужно ответить на второй вопрос о структуре сети. Строгого ответа на него нет,\n# т.к. структура выбирается самим разработчиком исходя из его представлений о решении этой задачи.\n# Общий ориентир здесь такой: для распознавания графических образов хорошо себя показали сверточные НС.\n# Но мы о них пока еще ничего не знаем, поэтому воспользуемся обычной полносвязной НС с\n#\n# 28 x 28 = 784 входами;\n# 128 нейронами скрытого слоя;\n# 10 нейронами выходного слоя.\n# В качестве функций активации скрытого слоя выберем популярную на сегодняшний день ReLu, а у выходных нейронов –\n# softmax, т.к. мы хотим интерпретировать выходные значения в терминах вероятности принадлежности к ��ому или иному\n# классу цифр.\n# Первый слой должен преобразовывать изображение 28x28 пикселей в вектор из 784 элементов.\n# Для такой операции в Keras можно создать слой специального вида – Flatten\n# Следующий слой создадим с помощью уже известного нам класса Dense, который свяжет все 784 входа со всеми 128\n# нейронами. И такой же последний слой из 10 нейронов, который будет связан со всеми 128 нейронами предыдущего слоя.\nmodel = keras.Sequential([\n Flatten(input_shape=(28, 28, 1)),\n Dense(128, activation='relu'),\n Dense(10, activation='softmax')\n])\n\n# вывод структуры в консоль\n#print(model.summary())\n\n# Необходимо входные значения вектора x стандартизировать так, чтобы они находились в диапазоне от 0 до 1\n# Здесь каждое значение тензоров x_train и x_test будет делиться на максимальное число 255, которое они могут принимать.\n# На выходе получим вещественные величины от 0 до 1.\nx_train = x_train / 255\nx_test = x_test / 255\n\n# Еще нам нужно подготовить правильный формат выходных значений\n# нам нужен вектор с 1 на месте соответствующего числа, т.к. наша НС имеет 10 выходов, и каждый выход будет\n# соответствовать определенной цифре: от 0 до 9.\n# В Keras уже имеется функция, которая все это делает.\ny_train_cat = keras.utils.to_categorical(y_train, 10)\ny_test_cat = keras.utils.to_categorical(y_test, 10)\n\n# Теперь выберем функцию потерь (loss function) и способ оптимизации градиентного алгоритма.\n# Лучше всего начинать с категориальной кросс-энтропии:\n#\n# categorical_crossentropy\n#\n# и активационной функции выходных нейронов softmax. Функцию активации мы уже такую прописали,\n# осталось указать этот критерий качества:\nmodel.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n# запуск модели обучения\nmodel.fit(x_train, y_train_cat, batch_size=32, epochs=10, validation_split=0.2)\n# batch_size = 32 – это размер батча (32 картинки), после которых будет выполняться корректировка весов\n# validation_split = 0,2 – разбиение обучающей выборки на собственно обучающую и проверочную. Значение 0,2 определяет,\n# что для каждой эпохи 20% случайных картинок из обучающей\n# выборки будут помещаться в выборку валидации. 20% - это довольно частое значение для создания проверочной выборки\n# (ее, как правило, выбирают из диапазона от 10% до 30%).\n\n# Проверка работы сети на тестовом множестве:\nmodel.evaluate(x_test, y_test_cat)\n# Метод evaluate прогоняет все тестовое множество и вычисляет значение критерия качества и метрики.\n\n# выполним распознавания какого-либо тестового изображения:\nn = 1\nx = np.expand_dims(x_test[n], axis=0)\nres = model.predict(x)\nprint(res)\n\n# И полагаем, что максимальное значение как раз и будет соответствовать нужному классу. В данном случае – это число\n# 9.9881822e-01 третьего выхода, то есть, для цифры 2. Чтобы было проще воспринимать выходную информацию, будем выводить\n# номер максимального числа из этого вектора. Для этого воспользуемся довольно удобной функцией argmax модуля numpy:\nprint(np.argmax(res))\n\n# И, еще, отобразить на экране это тестовое изображение:\nplt.imshow(x_test[n], cmap=plt.cm.binary)\nplt.show()\n\n\n","repo_name":"DanilVelikiy/keras_figure","sub_path":"keras_vs_figure.py","file_name":"keras_vs_figure.py","file_ext":"py","file_size_in_byte":7406,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"27160740813","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nMOD_CONSTANT = 19*3*13*7*5*11*17*2\n\nclass monkey:\n def __init__(self, items, inspection, throw_rule, p1):\n self.items = items\n self.inspection = inspection\n self.throw_rule = throw_rule\n self.inspection_count = 0\n self.p1 = p1\n\n def take_turn(self, other_monkeys):\n for item in self.items:\n item = self.inspection(item) % MOD_CONSTANT\n self.inspection_count += 1\n if self.p1:\n item = item // 3\n next_monkey = self.throw_rule(item)\n other_monkeys[next_monkey].items.append(item)\n self.items = []\n\n def __str__(self):\n return f\"{self.inspection_count}: {self.items}\"\n\n def __repr__(self):\n return str(self)\n\n\nif __name__ == \"__main__\":\n p1_monkeys = []\n p1_monkeys.append(monkey([85, 77, 77], lambda x: x*7, lambda x: 6 if x % 19 == 0 else 7, True))\n p1_monkeys.append(monkey([80, 99], lambda x: x*11, lambda x: 3 if x % 3 == 0 else 5, True))\n p1_monkeys.append(monkey([74, 60, 74, 63, 86, 92, 80], lambda x: x+8, lambda x: 0 if x % 13 == 0 else 6, True))\n p1_monkeys.append(monkey([71, 58, 93, 65, 80, 68, 54, 71], lambda x: x+7, lambda x: 2 if x % 7 == 0 else 4, True))\n p1_monkeys.append(monkey([97, 56, 79, 65, 58], lambda x: x+5, lambda x: 2 if x % 5 == 0 else 0, True))\n p1_monkeys.append(monkey([77], lambda x: x+4, lambda x: 4 if x % 11 == 0 else 3, True))\n p1_monkeys.append(monkey([99, 90, 84, 50], lambda x: x*x, lambda x: 7 if x % 17 == 0 else 1, True))\n p1_monkeys.append(monkey([50, 66, 61, 92, 64, 78], lambda x: x+3, lambda x: 5 if x % 2 == 0 else 1, True))\n\n for _ in range(20):\n for monk in p1_monkeys:\n monk.take_turn(p1_monkeys)\n\n vals = sorted([m.inspection_count for m in p1_monkeys], reverse=True)\n print(vals[0]*vals[1])\n\n\n p2_monkeys = []\n p2_monkeys.append(monkey([85, 77, 77], lambda x: x*7, lambda x: 6 if x % 19 == 0 else 7, False))\n p2_monkeys.append(monkey([80, 99], lambda x: x*11, lambda x: 3 if x % 3 == 0 else 5, False))\n p2_monkeys.append(monkey([74, 60, 74, 63, 86, 92, 80], lambda x: x+8, lambda x: 0 if x % 13 == 0 else 6, False))\n p2_monkeys.append(monkey([71, 58, 93, 65, 80, 68, 54, 71], lambda x: x+7, lambda x: 2 if x % 7 == 0 else 4, False))\n p2_monkeys.append(monkey([97, 56, 79, 65, 58], lambda x: x+5, lambda x: 2 if x % 5 == 0 else 0, False))\n p2_monkeys.append(monkey([77], lambda x: x+4, lambda x: 4 if x % 11 == 0 else 3, False))\n p2_monkeys.append(monkey([99, 90, 84, 50], lambda x: x*x, lambda x: 7 if x % 17 == 0 else 1, False))\n p2_monkeys.append(monkey([50, 66, 61, 92, 64, 78], lambda x: x+3, lambda x: 5 if x % 2 == 0 else 1, False))\n\n for _ in range(10_000):\n for monk in p2_monkeys:\n monk.take_turn(p2_monkeys)\n\n vals = sorted([m.inspection_count for m in p2_monkeys], reverse=True)\n print(vals[0]*vals[1])","repo_name":"TomKite57/advent_of_code_2022","sub_path":"python/day_11.py","file_name":"day_11.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"8464549","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport os\nimport shutil\n\nfrom neural_sp.bin.args_lm import parse\nfrom neural_sp.bin.plot_utils import plot_cache_weights\nfrom neural_sp.bin.train_utils import load_checkpoint\nfrom neural_sp.bin.train_utils import load_config\nfrom neural_sp.bin.train_utils import set_logger\nfrom neural_sp.datasets.lm import Dataset\nfrom neural_sp.models.lm.build import build_lm\nfrom neural_sp.utils import mkdir_join\n\n\ndef main():\n\n args = parse()\n\n # Load a conf file\n dir_name = os.path.dirname(args.recog_model[0])\n conf = load_config(os.path.join(dir_name, 'conf.yml'))\n\n # Overwrite conf\n for k, v in conf.items():\n if 'recog' not in k:\n setattr(args, k, v)\n\n # Setting for logging\n if os.path.isfile(os.path.join(args.recog_dir, 'plot.log')):\n os.remove(os.path.join(args.recog_dir, 'plot.log'))\n logger = set_logger(os.path.join(args.recog_dir, 'plot.log'),\n key='decoding', stdout=args.recog_stdout)\n\n for i, s in enumerate(args.recog_sets):\n # Load dataset\n dataset = Dataset(corpus=args.corpus,\n tsv_path=s,\n dict_path=os.path.join(dir_name, 'dict.txt'),\n wp_model=os.path.join(dir_name, 'wp.model'),\n unit=args.unit,\n batch_size=args.recog_batch_size,\n bptt=args.bptt,\n backward=args.backward,\n serialize=args.serialize,\n is_test=True)\n\n if i == 0:\n # Load the LM\n model = build_lm(args, dir_name)\n model = load_checkpoint(model, args.recog_model[0])[0]\n epoch = int(args.recog_model[0].split('-')[-1])\n\n logger.info('epoch: %d' % (epoch - 1))\n logger.info('batch size: %d' % args.recog_batch_size)\n # logger.info('recog unit: %s' % args.recog_unit)\n # logger.info('ensemble: %d' % (len(ensemble_models)))\n logger.info('BPTT: %d' % (args.bptt))\n logger.info('cache size: %d' % (args.recog_n_caches))\n logger.info('cache theta: %.3f' % (args.recog_cache_theta))\n logger.info('cache lambda: %.3f' % (args.recog_cache_lambda))\n model.cache_theta = args.recog_cache_theta\n model.cache_lambda = args.recog_cache_lambda\n\n # GPU setting\n model.cuda()\n\n assert args.recog_n_caches > 0\n save_path = mkdir_join(args.recog_dir, 'cache')\n\n # Clean directory\n if save_path is not None and os.path.isdir(save_path):\n shutil.rmtree(save_path)\n os.mkdir(save_path)\n\n hidden = None\n fig_count = 0\n toknen_count = 0\n n_tokens = args.recog_n_caches\n while True:\n ys, is_new_epoch = dataset.next()\n\n for t in range(ys.shape[1] - 1):\n loss, hidden = model(ys[:, t:t + 2], hidden, is_eval=True, n_caches=args.recog_n_caches)[:2]\n\n if len(model.cache_attn) > 0:\n if toknen_count == n_tokens:\n tokens_keys = dataset.idx2token[0](model.cache_ids[:args.recog_n_caches], return_list=True)\n tokens_query = dataset.idx2token[0](model.cache_ids[-n_tokens:], return_list=True)\n\n # Slide attention matrix\n n_keys = len(tokens_keys)\n n_queries = len(tokens_query)\n cache_probs = np.zeros((n_keys, n_queries)) # `[n_keys, n_queries]`\n mask = np.zeros((n_keys, n_queries))\n for i, aw in enumerate(model.cache_attn[-n_tokens:]):\n cache_probs[:(n_keys - n_queries + i + 1), i] = aw[0, -(n_keys - n_queries + i + 1):]\n mask[(n_keys - n_queries + i + 1):, i] = 1\n\n plot_cache_weights(\n cache_probs,\n keys=tokens_keys,\n queries=tokens_query,\n save_path=mkdir_join(save_path, str(fig_count) + '.png'),\n figsize=(40, 16),\n mask=mask)\n toknen_count = 0\n fig_count += 1\n else:\n toknen_count += 1\n\n if is_new_epoch:\n break\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"rosrad/neural_sp","sub_path":"neural_sp/bin/lm/plot_cache.py","file_name":"plot_cache.py","file_ext":"py","file_size_in_byte":4637,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"36335287092","text":"import pygal\nimport json\nfrom pygal.maps.world import COUNTRIES\n\n\nclass WorldPopulation():\n \"\"\"World Population loaded from data.okfn.org population json.\"\"\"\n\n def __init__(self, filename):\n \"\"\"Load stats for specific year.\"\"\"\n self.data = {}\n\n # some countries in the json file have a different\n # name than what is in the COUNTRIES dictionary\n # this dictionary is used to try to complete the map\n self.missing_countries = {\n 'ye': 'Yemen, Rep.',\n 'bo': 'Bolivia',\n 've': 'Venezuela, RB',\n 'kr': 'Korea, Rep.',\n 'kp': 'Korea, Dem. Rep.',\n 'mk': 'Macedonia, FYR',\n 'tz': 'Tanzania',\n 'mo': 'Macao SAR, China',\n 'cd': 'Congo, Dem. Rep.',\n 'cg': 'Congo, Rep.',\n 'md': 'Moldova',\n 'la': 'Lao PDR',\n 'ir': 'Iran, Islamic Rep.',\n 'hk': 'Hong Kong SAR, China',\n 'eg': 'Egypt, Arab Rep.',\n 'gm': 'Gambia, The',\n 'ly': 'Libya',\n 'kg': 'Kyrgyz Republic',\n 'vn': 'Vietnam'}\n\n with open(filename) as f:\n self.file_data = json.load(f)\n \n def filter_by_year(self, year):\n for pop_dict in self.file_data:\n if pop_dict['Year'] != year:\n continue\n\n country_code = self.get_two_digit_country_code(pop_dict['Country Name'])\n\n if country_code:\n self.data[country_code] = int(float(pop_dict['Value']))\n\n return self.data\n\n def get_two_digit_country_code(self, country):\n for code, name in COUNTRIES.items():\n if country == name:\n return code\n elif code in self.missing_countries and country == self.missing_countries[code]:\n return code\n return None\n\n def print_map(self, title, filename):\n wm = pygal.maps.world.World()\n wm.add(title, self.data)\n wm.render_to_file(filename)\n","repo_name":"nelsonripoll/portfolio","sub_path":"python/data_visualization/pygal_samples/population/countries.py","file_name":"countries.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"21644898595","text":"# Occurences - is made from the word occur which means that how many times a certain character or word appears.\nfrom re import M\n\n\nlist = [\"a\", \"n\", \"t\", \"a\", \"a\", \"t\", \"n\", \"n\", \"a\", \"x\", \"u\", \"g\", \"a\", \"x\", \"a\"]\nm=''.join(map(str,list))\n\nl=str(m)\nprint(m)\ni=0\nb=[]\nc=[]\ncount=0\nwhile i{epoch_len}}/{num_epochs:>{epoch_len}}] ' +\n f'train_loss: {train_loss:.4f} ' +\n f'valid_loss: {val_loss:.4f}')\n\n print(print_msg)\n wandb.log({\n \"Train Loss\": epoch_loss,\n \"custom_epoch\": epoch,\n \"Train Accuracy\": epoch_acc,\n \"Train error\": 100 - epoch_acc,\n \"lr\": optimizer.param_groups[0]['lr'] # 학습률 로깅\n })\n\n train_losses = []\n val_losses = []\n\n early_stopping(val_loss, model)\n if early_stopping.early_stop:\n print(\"Early stopping\")\n break\n\n model.load_state_dict(torch.load('checkpoint/checkpoint.pt'))\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n\n graph_loss(avg_train_losses, avg_val_losses)\n\n return model_ft, avg_train_losses, avg_val_losses\n\nmodel_ft = Model(num_classes=7)\ncriterion = nn.CrossEntropyLoss()\n\n","repo_name":"dmswl0707/LightWeightedNetwork_for_FaceExpressionRecognition","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"73218842470","text":"import math\nimport time\n\n\nclass Gsymbol(object):\n \"\"\" implements a grammatical symbol used for cfg\n hash and equality operators are needed to create symbol set and probability map\"\"\"\n\n def __init__(self, symb, isterminal):\n self.symb = symb\n self.isterminal = isterminal\n\n def __eq__(self, other):\n return self.symb == other.symb and self.isterminal == other.isterminal\n\n def __lt__(self, other):\n return (self.isterminal, self.symb) < (other.isterminal, other.symb)\n\n def __hash__(self):\n return hash((self.isterminal, self.symb))\n\n def __repr__(self):\n return self.symb + ' ' + ('Terminal' if self.isterminal else 'Non_Terminal')\n\n\nclass Grule(object):\n \"\"\" implements a grammatical rule used for cfg\n hash and equality operators are needed to create symbol set and probability map\"\"\"\n\n def __init__(self, gsymb, expansion):\n if not isinstance(gsymb, Gsymbol) or gsymb.isterminal:\n raise Exception('Wrong input grammatical symbol')\n else:\n self.gsymb = gsymb\n self.expansion = tuple(expansion)\n\n def __eq__(self, other):\n if not isinstance(other, Grule):\n raise Exception('Cannot compare grammatical rule to object of type' + str(type(other)))\n else:\n return self.gsymb == other.gsymb and self.expansion == other.expansion\n\n def __lt__(self, other):\n return (self.gsymb,) + self.expansion < (other.gsymb,) + other.expansion\n\n def __hash__(self):\n return hash((self.gsymb,) + self.expansion)\n\n def __repr__(self):\n return self.gsymb.symb + ' -> ' + ', '.join([symbol.symb for symbol in self.expansion])\n\n def is_chomsky_normal_form(self):\n \"\"\"\n Used only to make sure the resulting pcfg is in CNF\n \n Returns\n ----------\n bool: True if the rule is in chomsky normal form\n \"\"\"\n if len(self.expansion) == 1 and self.expansion[0].isterminal:\n return True\n elif len(self.expansion) == 2 and not self.expansion[0].isterminal and \\\n not self.expansion[1].isterminal:\n return True\n else:\n return False\n\n def is_unit_rule(self):\n return len(self.expansion) == 1 and not self.expansion[0].isterminal\n\n def copy(self):\n return Grule(self.gsymb, self.expansion)\n\n\ndef gsymb_from_string(symb):\n if '(' in symb:\n symb = symb.split('-')[0] # remove hyphen from NT symbols\n return Gsymbol(symb.replace('(', ''), False)\n else:\n return Gsymbol(symb.replace(')', ''), True)\n\n\ndef level_list(line):\n lvl_list = []\n level = 0\n symbols = line.split(' ')\n for symbol in symbols:\n lvl_list.append(level)\n if '(' in symbol:\n level += 1\n else:\n level -= symbol.count(')')\n return lvl_list\n\n\ndef remove_unit_rule(rule_list):\n \"\"\"\n remove_nt_to_nt\n A->B, B->C, C->D (D != NT)\n\n and replace with: A\n\n Returns:\n new rule_list with no\n \"\"\"\n\n new_rule_list = list()\n map_nt_to_nt = {}\n\n # For each Grule\n for rule in rule_list:\n new_expansion = rule.expansion\n while len(new_expansion) == 1 and not new_expansion[0].isterminal:\n # Dig until find a terminal or more than 2 non-terminals\n for rule2 in rule_list:\n if rule2.gsymb == new_expansion[0]:\n if rule2.gsymb not in map_nt_to_nt.keys():\n map_nt_to_nt[rule2.gsymb] = set()\n map_nt_to_nt[rule2.gsymb].add(rule.gsymb)\n new_expansion = rule2.expansion\n break\n new_rule_list.append(Grule(rule.gsymb, new_expansion))\n\n # Append all missing Grammatical rules\n # Initial length of the list\n n_init = len(new_rule_list)\n for i in range(n_init):\n symbol_to_map = new_rule_list[i].gsymb\n if symbol_to_map in map_nt_to_nt:\n for gsymb in map_nt_to_nt[symbol_to_map]:\n new_rule_list.append(Grule(gsymb, new_rule_list[i].expansion))\n\n return new_rule_list\n\n\ndef rules_from_line(line):\n tree = dict()\n levels = level_list(line)\n\n stack = [len(levels) - 1]\n for i in range(len(levels) - 2, -1, -1):\n while stack and levels[stack[-1]] == levels[i] + 1:\n if i not in tree:\n tree[i] = []\n tree[i].append(stack.pop())\n stack.append(i)\n\n symbols = [gsymb_from_string(symb) for symb in line.split(' ')]\n\n rule_list = []\n for root, expansion in tree.items():\n root = symbols[root]\n expansion = [symbols[symb] for symb in expansion]\n rule_list.append(Grule(root, expansion))\n return remove_unit_rule(rule_list)\n\n\nclass PCFG(object):\n\n def __init__(self, corpus, verbose=True, to_lower=True):\n \"\"\"initialise a new probabilistic CFG from a training corpus\n args\n ------------\n corpus: iterable bracketed parsed sentences\n to_lower: bool if true ignores capitals (useful to decrease lexicon size)\n verbose: bool level of verbosity\n \"\"\"\n self.verbose = verbose\n self.to_lower = to_lower\n self.count_rules = dict()\n\n if self.verbose:\n print('reading ', len(corpus), ' lines')\n for line in corpus:\n\n if to_lower:\n line = line.lower()\n\n for rule in rules_from_line(line):\n if rule in self.count_rules:\n self.count_rules[rule] += 1\n else:\n self.count_rules[rule] = 1\n\n self.normaliser = None\n self.log_probabilities = None\n self.lexicon = None\n self.terminals = None\n self.reverse_table = None\n\n self.update_from_counter()\n\n def __str__(self):\n representation = 'Grammar of size '+ str(len(self.count_rules)) + '\\n Rules and log probabilities: \\n'\n for rule, log_proba in self.log_probabilities.items():\n representation += str(rule) + ' : ' + str(round(log_proba, 3)) + '\\n'\n return representation\n\n def update_from_counter(self):\n left_hand_side = set([rule.gsymb for rule in self.count_rules.keys()])\n self.normaliser = {symbol: sum(count for rule, count in self.count_rules.items() if rule.gsymb == symbol)\n for symbol in left_hand_side}\n\n self.log_probabilities = dict()\n for rule in self.count_rules:\n self.log_probabilities[rule] = math.log(self.count_rules[rule] / float(self.normaliser[rule.gsymb]))\n\n self.lexicon = [[rule.gsymb] + list(rule.expansion) for rule in self.count_rules]\n self.lexicon = set([symbol for rule in self.lexicon for symbol in rule])\n self.terminals = set([gsymb for gsymb in self.lexicon if gsymb.isterminal])\n\n # reverse the rules to be able to run CYK algorithm\n self.reverse_table = dict()\n for rule, log_proba in self.log_probabilities.items():\n if rule.expansion in self.reverse_table:\n self.reverse_table[rule.expansion].append((rule, log_proba))\n else:\n self.reverse_table[rule.expansion] = [(rule, log_proba)]\n\n def chomsky_transform(self):\n \"\"\"\n Transforms the grammar into Chomsky Normal Form and keeping the same probabilities\n\n We assume the root symbol never appears on the Right Hand Side\n\n Returns\n ----------\n None\n \"\"\"\n\n if self.verbose:\n print('transforming grammar of size', len(self.count_rules))\n print('Eliminate rules with non-solitary terminals and merging.')\n start = time.time()\n\n count_rules_copy = set(self.count_rules.keys()).copy()\n # replace the internal T with NT\n for rule in count_rules_copy:\n if len(rule.expansion) > 1:\n # create copy of the rule to modify\n modified_rule = Grule(rule.gsymb, rule.expansion)\n\n for i, gsymb in enumerate(rule.expansion):\n\n if gsymb.isterminal:\n new_symb = Gsymbol(\"NT_\" + gsymb.symb, False)\n new_rule = Grule(new_symb, [gsymb])\n self.count_rules[new_rule] = 1\n\n # Replace old terminal by new non-terminal\n new_expansion = list(modified_rule.expansion)\n new_expansion[i] = new_symb\n modified_rule.expansion = new_expansion\n\n index = len(rule.expansion) - 1\n new_count = self.count_rules.pop(rule)\n while index > 1:\n # New key is the concatenation of the two keys\n new_key = modified_rule.expansion[index - 1].symb + \"_\" + modified_rule.expansion[index].symb\n new_symb = Gsymbol(new_key, False)\n\n # Make new transition\n new_rule = Grule(new_symb, [modified_rule.expansion[index - 1], modified_rule.expansion[index]])\n\n new_expansion = list(modified_rule.expansion)\n new_expansion = new_expansion[:-2]\n new_expansion.append(new_symb)\n modified_rule.expansion = tuple(new_expansion)\n\n if new_rule in self.count_rules:\n self.count_rules[new_rule] += new_count\n else:\n self.count_rules[new_rule] = new_count\n index -= 1\n self.count_rules[modified_rule] = new_count\n\n if self.verbose:\n print('Updating probabilities')\n\n self.update_from_counter()\n\n if self.verbose:\n print('Grammar transformed into CNF took ', time.time() - start, ' s')\n print('New Grammar size ', len(self.count_rules))\n","repo_name":"Bronzekorean/CYK_Parser","sub_path":"grammar.py","file_name":"grammar.py","file_ext":"py","file_size_in_byte":9990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"37645128889","text":"import numpy as np\n\n\ndef parse_file(path='/path/to/p081_matrix.txt'):\n f = open(path)\n num_strings = [line.strip('\\n').split(',') for line in f.readlines()]\n return np.array([list(map(int, line)) for line in num_strings])\n\n\ndef flatten(arr):\n return [i for sublist in arr for i in sublist]\n\n\ndef generate_diagonal_iteration_indices(size):\n \"\"\"Return the indices used to iterate from the bottom\n right corner of a matrix to the top left in rows extending\n from the bottom right corner\n\n Example:\n |0, 1| <- matrix of size 2\n |2, 3|\n\n >>> generate_diagonal_iteration_indices(2)\n [(1, 1), (0, 1), (1, 0), (0, 0)]\n \"\"\"\n\n result = []\n\n for index in range(size - 1, -1, -1):\n indices = range(index, size)\n result.append(list(zip(indices, reversed(indices))))\n\n for index in range(size - 2, -1, -1):\n indices = range(index, -1, -1)\n result.append(list(zip(indices, reversed(indices))))\n\n return flatten(result)\n\n\ndef find_minimum_distance_matrix(matrix):\n size = len(matrix)\n distance_matrix = np.zeros(matrix.shape)\n\n print(generate_diagonal_iteration_indices(size)[:10])\n for x, y in generate_diagonal_iteration_indices(size):\n distance = matrix[x, y]\n\n if x == size - 1 and y == size - 1:\n distance += 0\n elif x == size - 1:\n distance += distance_matrix[x, y+1]\n elif y == size - 1:\n distance += distance_matrix[x+1, y]\n else:\n distance += min(distance_matrix[x+1, y], distance_matrix[x, y+1])\n\n distance_matrix[x, y] = distance\n\n return distance_matrix\n\n\ndef main():\n matrix = parse_file()\n min_dist_matrix = np.zeros(matrix.shape)\n distance_matrix = find_minimum_distance_matrix(matrix)\n\n print(distance_matrix[0, 0])\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"stevenschmatz/project-euler","sub_path":"problem-081.py","file_name":"problem-081.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"70487918310","text":"import math\nimport bisect\n\nclass Solution:\n def count(self, x):\n ret = 0\n while x > 0:\n if x % 2 == 1:\n ret += 1\n x //= 2\n return ret\n \n def countExcellentPairs(self, nums, k: int) -> int:\n nums = list(set(nums))\n n = len(nums)\n bitCount = n * [0]\n for i in range(n):\n bitCount[i] = self.count(nums[i])\n bitCount.sort()\n ans = 0\n for i in range(n):\n ans += n-bisect.bisect_left(bitCount, k-bitCount[i])\n return ans","repo_name":"ereminiu/leetcode_archive","sub_path":"solutions/task_2354.py","file_name":"task_2354.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"30979271322","text":"from math import radians, cos, sin, asin, sqrt\n\n#https://github.com/mapado/haversine/blob/master/tests/test_haversine.py\nclass Distance:\n\n def __init__(self, earth, office):\n self.earth = earth\n self.office = office\n #Returns distance in KMs\n def get_km(self,la,lo):\n\n # unpack latitude/longitude\n lat1, lng1 = self.office\n lat2 = float(la)\n lng2 = float(lo)\n\n #Validating ranges https://docs.mapbox.com/help/glossary/lat-lon/#\n if not (-90.0 <= lat2 <= 90.0):\n raise Exception('Latitude out of range')\n\n if not (-180.0 <= lng2 <= 180.0):\n raise Exception('Latitude out of range')\n\n # convert all latitudes/longitudes from decimal degrees to radians\n lat1 = radians(lat1)\n lng1 = radians(lng1)\n lat2 = radians(lat2)\n lng2 = radians(lng2)\n\n # calculate haversine\n lat = lat2 - lat1\n lng = lng2 - lng1\n d = sin(lat * 0.5) ** 2 + cos(lat1) * cos(lat2) * sin(lng * 0.5) ** 2\n\n return int(2 * self.earth * asin(sqrt(d)))\n\n","repo_name":"ArtWachowski/hometest","sub_path":"distance.py","file_name":"distance.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"15382591959","text":"from .node import Node\nfrom implementation.customer import Customer\n\n#Implementation of a Binary Search Tree for fast addition and lookup of Customers\n#Each node in the BST holds its own Customer data, based on Customer id\n#Customer ID is used to structure the nodes in the tree, we use the classic version\n#with smaller nodes on the left side of the root node and larger nods on the right side\nclass BST:\n def __init__(self):\n self.root = None\n self.nodeCount = 0\n\n def insert(self, cust):\n if self.root is None:\n self.root = Node(cust)\n else:\n self.insert_node(self.root, cust)\n self.nodeCount += 1\n\n def insert_node(self, current_node, cust):\n if cust.id < current_node.val:\n if current_node.left_child:\n self.insert_node(current_node.left_child, cust)\n else:\n current_node.left_child = Node(cust)\n elif cust.id > current_node.val:\n if current_node.right_child:\n self.insert_node(current_node.right_child, cust)\n else:\n current_node.right_child = Node(cust)\n\n #Display all the nodes in the BST, in ascending order of their ID\n def inorder_traversal(self, node):\n customer_list = []\n if node:\n customer_list = self.inorder_traversal(node.left_child)\n customer_list.append(node.customer)\n customer_list = customer_list + self.inorder_traversal(node.right_child)\n return customer_list\n","repo_name":"GACiobanita/Customer-Distance-Test","sub_path":"implementation/algorithms/bst.py","file_name":"bst.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"43191825496","text":"# converter numero decimal em binario usando linguagem python\n\nnumero = int(input(\"Digite o número decimal: \"))\nlista = []\nprint(f\"{numero} em binário é: \", end=\"\")\n\nwhile (numero != 0):\n lista.append(numero%2)\n numero = numero//2\n\nfor i in lista[::-1]: # [::-1] é para inverter a lista\n print(i, end=\"\")","repo_name":"Larabsg/estudos","sub_path":"Python/binario.py","file_name":"binario.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"12209029673","text":"from flask import Flask, Blueprint, request, render_template, make_response, jsonify, redirect, url_for, session\nfrom flask_login import login_user, current_user, logout_user\n\nfrom blog_control.user_mgmt import *\nfrom blog_control.session_mgmt import *\n\nimport datetime\n\nblog_abtest = Blueprint('blog', __name__)\n\n\n@blog_abtest.route('/set_email', methods=['GET','POST'])\ndef set_email():\n if request.method == 'GET':\n #print('set_email : ', request.headers)\n print('set_email : ', request.args.get('user_email'))\n return redirect(url_for('blog.blog_fullstack')) # 1\n else:\n #print('set_email : ', request.headers)\n #print('set_email : ', request.get_jso n()) # content type이 application/json인 경우\n #print('set_email : ', request.form['user_email'])\n user = User.create(request.form['user_email'], request.form['blog_id'])\n login_user(user, remember=True, duration=datetime.timedelta(days=365))\n \n return redirect(url_for('blog.blog_fullstack'))\n # return redirect('/blog/test') # 2\n # 1,2번중 더 편하고 적합한 것으로 사용하면 된다.\n # return make_response(jsonify(SUCCESS=True), 200)\n\n@blog_abtest.route('/logout')\ndef logout():\n User.delete(current_user.id)\n logout_user()\n return redirect(url_for('blog.blog_fullstack'))\n \n@blog_abtest.route('/blog_fullstack')\ndef blog_fullstack():\n if current_user.is_authenticated:\n webPageName = BlogSession.get_blog_page(current_user.blog_id) #get_blog_page()에 인자를 넣어줌으로써 현재 사용자의 blog_id를 받아옴 => A or B 고정\n #얼마나 접속을 했는지 알 수 있다.\n BlogSession.save_session_info(\n session['client_id'], current_user.user_email, webPageName\n )\n return render_template(webPageName, user_email=current_user.user_email)\n else:\n webPageName = BlogSession.get_blog_page()\n BlogSession.save_session_info(\n session['client_id'],'anonymous', webPageName\n )\n return render_template(webPageName)","repo_name":"crescentfull/selfStudy_code","sub_path":"TIL/framework/flask/inflearn/flask_ABtest_practice/blog_view/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"1517089363","text":"\"\"\"\nSpace Travel Game\n\nA simple text adventure written for a refactoring tutorial.\n\"\"\"\nfrom text_en import TEXT\n\n\n#\n# Puzzle functions\n#\ndef buy_engine(flags):\n if not 'engines' in flags:\n print(TEXT[\"HYPERDRIVE_SHOPPING_QUESTION\"])\n if input() == \"yes\":\n if 'credits' in flags:\n print(TEXT[\"HYPERDRIVE_SHOPPING_SUCCESS\"])\n flags.add('engines')\n else:\n print(TEXT[\"HYPERDRIVE_TOO_EXPENSIVE\"])\n\n\ndef quiz_show(flags):\n if not 'credits' in flags:\n print(TEXT[\"SIRIUS_QUIZ_QUESTION\"])\n answer = input()\n if answer == \"2\":\n print(TEXT[\"SIRIUS_QUIZ_CORRECT\"])\n flags.add('credits')\n else:\n print(TEXT[\"SIRIUS_QUIZ_INCORRECT\"])\n\n\ndef hire_copilot(flags):\n if 'engines' in flags and not 'copilot' in flags:\n print(TEXT[\"ORION_HIRE_COPILOT_QUESTION\"])\n if input() == \"yes\":\n flags.add('copilot')\n else:\n print(TEXT[\"ORION_NOTHING_GOING_ON\"])\n\n\ndef examine_black_hole(flags):\n if input(TEXT[\"BLACK_HOLE_EXAMINE_QUESTION\"]) == \"yes\":\n if 'engines' in flags and 'copilot' in flags:\n print(TEXT[\"BLACK_HOLE_COPILOT_SAVES_YOU\"])\n\n print(TEXT[\"ORACLE_QUESTION\"])\n answer = input()\n if answer == \"42\":\n print(TEXT[\"ORACLE_CORRECT\"])\n flags.add('game_end')\n print(TEXT[\"END_CREDITS\"]) # skips last move \n else:\n print(TEXT[\"ORACLE_INCORRECT\"])\n flags.remove('engines')\n else:\n print(TEXT[\"BLACK_HOLE_CRUNCHED\"])\n flags.add('game_end')\n\n\nclass Planet:\n\n def __init__(self, name, destinations, puzzle=None):\n self.name = name\n self.description = TEXT[name.upper() + '_DESCRIPTION']\n self.destinations = destinations\n self.puzzle = puzzle\n\n def visit(self, flags):\n print(self.description)\n if self.puzzle:\n self.puzzle(flags)\n\n def display_destinations(self):\n print(\"\\nWhere do you want to travel?\")\n position = 1\n for d in self.destinations:\n print(f\"[{position}] {d}\")\n position += 1\n\n\nPLANETS = {\n 'earth': Planet('earth', ['centauri', 'sirius']),\n 'sirius': Planet('sirius', [\"orion\", \"earth\", \"black_hole\"],quiz_show),\n 'orion': Planet('orion', [\"centauri\", \"sirius\"], hire_copilot),\n 'centauri': Planet('centauri', ['earth', 'orion'], buy_engine),\n 'black_hole': Planet('black_hole', ['sirius'], examine_black_hole)\n}\n\n\ndef display_inventory(flags):\n print(TEXT[\"BAR\"])\n if 'credits' in flags:\n print(\"You have plenty of stellar credits.\")\n if 'engines' in flags:\n print(\"You have a brand new next-gen hyperdrive.\")\n if 'copilot' in flags:\n print(\"A furry tech-savvy copilot is on board.\")\n\n\n\ndef select_planet(destinations):\n # choose the next planet\n travel_to = None\n while travel_to not in destinations:\n text = input()\n try:\n index = int(text)\n travel_to = destinations[index - 1]\n except ValueError:\n print(\"please enter a number\")\n except IndexError:\n print(f\"please enter 1-{len(destinations)}\")\n return PLANETS[travel_to]\n\n\n\ndef travel():\n\n print(TEXT[\"OPENING_MESSAGE\"])\n flags = set()\n\n planet = PLANETS[\"earth\"]\n planet.visit(flags)\n\n while not 'game_end' in flags:\n planet.display_destinations()\n planet = select_planet(planet.destinations)\n display_inventory(flags)\n planet.visit(flags)\n\n\nif __name__ == \"__main__\":\n travel()\n","repo_name":"krother/refactoring_tutorial","sub_path":"advanced/10-final-cleanup/space_game.py","file_name":"space_game.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"} +{"seq_id":"15319818208","text":"from typing import List\n\nclass Solution:\n def summaryRanges(self, nums: List[int]) -> List[str]:\n if not nums:\n return nums\n result = [] # List to store summary ranges\n prev = nums[0] # Initialize prev with the first element\n start = 0 # Keep track of the starting index of a potential summary range\n n = len(nums) # Length of the input list\n\n # Iterate through the input list starting from the second element\n for i in range(1, n):\n if nums[i] - prev == 1: # Check if the current element is consecutive\n prev = nums[i] # Update prev and continue to the next iteration\n continue\n else:\n if i - start > 1: # Check if the potential range has more than one element\n result.append(str(nums[start]) + \"->\" + str(nums[i - 1])) # Add summary range to the result\n else:\n result.append(str(nums[start])) # Add single element to the result\n start = i # Update start to the current index\n prev = nums[i] # Update prev to the current element\n\n # Check for a remaining potential range after the last element\n if n - start > 1:\n result.append(str(nums[start]) + \"->\" + str(nums[-1])) # Add summary range to the result\n else:\n result.append(str(nums[start])) # Add single element to the result\n\n return result # Return the list of summary ranges\n","repo_name":"Niivas/Problem-Solving","sub_path":"0228-summary-ranges/0228-summary-ranges.py","file_name":"0228-summary-ranges.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"9869006555","text":"#!/usr/bin/python3\n\"\"\"\nPrepare the NuGet convention-based folder for the kobold-layer clr package.\n\"\"\"\n__author__ = \"Maarten Tegelaers\"\n__copyright__ = \"Copyright 2020\"\n__version__ = \"0.1\"\n__maintainer__ = \"Maarten Tegelaers\"\n__status__ = \"Development\"\n\nimport argparse\nfrom pathlib import Path\nimport shutil\n\n\nIMG_FOLDER_NAME = \"images\"\n\nRELATIVE_LIB_PATH = Path(\"lib\") / \"netcoreapp3.1\"\nRELATIVE_NATIVE_PATH = Path(\"native\") / \"x64\"\nRELATIVE_BUILD_PATH = Path(\"build\")\n\n\ndef copy_nuget_data(repo_root: Path, nuget_target_folder: Path) -> None:\n '''\n Copy the nuget data from the repo_path/NuGet to the nuget_target_folder\n \n :param repo_root: The path to the repository root\n :type repo_root: Path\n :param nuget_target_folder: The nuget package folder path\n :type repo_root: Path\n '''\n img_folder = nuget_target_folder / Path(IMG_FOLDER_NAME)\n img_folder.mkdir()\n\n build_folder = nuget_target_folder / RELATIVE_BUILD_PATH\n build_folder.mkdir(parents=True)\n\n nuget_data_folder = repo_root / \"NuGet\"\n \n shutil.copy(nuget_data_folder / \"icon.png\", img_folder)\n shutil.copy(nuget_data_folder / \"kobold-layer.nuspec\", nuget_target_folder)\n shutil.copy(nuget_data_folder / \"BeardedPlatypus.kobold-layer.targets\", build_folder)\n\n\ndef prepare_folder_structure(nuget_target_folder: Path) -> None:\n '''\n Construct the expected NuGet convention-based directory at the\n specified nuget_target_folder.\n \n :param nuget_target_folder: The nuget package folder path\n :type repo_root: Path\n '''\n lib_folder = nuget_target_folder / RELATIVE_LIB_PATH\n lib_folder.mkdir(parents=True)\n\n native_folder = nuget_target_folder / RELATIVE_NATIVE_PATH\n native_folder.mkdir(parents=True)\n\n\ndef copy_native_dlls(bin_folder, nuget_target_folder: Path) -> None:\n '''\n Copy the native dlls from the bin_folder to the nuget_target_folder\n \n :param bin_folder: The path to the bin folder\n :type repo_root: Path\n :param nuget_target_folder: The nuget package folder path\n :type repo_root: Path\n '''\n # The list of dlls is currently quite small, as such we just keep it \n # explicitly. This might need to be refactored in the future if this list\n # grows further.\n native_dlls = [\"Ijwhost.dll\", \"libpng16.dll\", \"SDL2.dll\", \"SDL2_image.dll\", \"zlib1.dll\"]\n\n for dll in native_dlls:\n dll_path = bin_folder / dll\n shutil.copy(dll_path, nuget_target_folder / RELATIVE_NATIVE_PATH)\n\n\ndef copy_managed_dlls(bin_folder: Path, nuget_target_folder: Path) -> None:\n '''\n Copy the managed dlls from the bin_folder to the nuget_target_folder\n \n :param bin_folder: The path to the bin folder\n :type repo_root: Path\n :param nuget_target_folder: The nuget package folder path\n :type repo_root: Path\n '''\n managed_dlls = bin_folder.glob(\"kobold-layer.clr.*\")\n for dll_path in managed_dlls:\n shutil.copy(dll_path, nuget_target_folder / RELATIVE_LIB_PATH)\n\n\ndef run(repo_root: Path, nuget_target_folder: Path) -> None:\n '''\n Prepare the nuget convention-based folder from the (build) repository.\n\n :param repo_root: The path to the repository root\n :type repo_root: Path\n :param nuget_target_folder: The nuget package folder path\n :type repo_root: Path\n '''\n nuget_target_folder.mkdir(parents=True)\n \n copy_nuget_data(repo_root, nuget_target_folder)\n prepare_folder_structure(nuget_target_folder)\n\n bin_folder = repo_root / \"x64\" / \"Release\" \n copy_native_dlls(bin_folder, nuget_target_folder)\n copy_managed_dlls(bin_folder, nuget_target_folder)\n\n\ndef get_args():\n \"\"\"\n Parses and returns the arguments.\n \"\"\"\n parser = argparse.ArgumentParser(prog=\"Prepare the kobold-layer convention-based NuGet folder.\")\n parser.add_argument(\"repository_root\", \n help=\"Path to the repository root from which the different paths are resolved.\")\n parser.add_argument(\"output_folder\", help=\"Path in which the NuGet convention based folder will be created.\")\n\n return parser.parse_args()\n\nif __name__ == \"__main__\":\n args = get_args()\n run(Path(args.repository_root), Path(args.output_folder))\n","repo_name":"BeardedPlatypus/kobold-layer","sub_path":"tools/prepare_nuget_convention_folder.py","file_name":"prepare_nuget_convention_folder.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"71366394789","text":"import itertools\nimport numpy as np\nimport time\nfrom utils import cos_sim,get_rate_matrix\nfrom tqdm import tqdm\n\nstart = time.time()\n\n# 設定値\ncore_num = 8\nk = 5\npredict_user_id = 2\n\n# プロセス0が担当領域の分割と各プロセスへの送信、類似度計算結果の取得を行い、結果を収集してpredictする\nrate_matrix, user_id2row_num, row_num2user_id, item_id2column_num, column_num2item_id = get_rate_matrix()\n\n# ユーザー×ユーザーの組み合わせを列挙\nrow_list = [i for i in range(len(row_num2user_id))]\ncomb = [c for c in itertools.combinations(row_list, 2)]\n\n# コサイン類似度を計算\nsim_list = []\nfor c in tqdm(comb):\n sim_dict = {}\n sim_dict[\"comb\"] = c\n sim_dict[\"sim\"] = cos_sim(rate_matrix[c[0]], rate_matrix[c[1]])\n sim_list.append(sim_dict)\n\n# 類似度計算結果を格納するユーザー×ユーザー行列を作成\nsim_matrix = np.zeros((len(user_id2row_num), len(user_id2row_num)))\nfor s in sim_list:\n sim_matrix[s[\"comb\"][0], s[\"comb\"][1]] = s[\"sim\"]\n\n# 類似度が上位k件のユーザーIDリストを作成\npredict_row_num = user_id2row_num[predict_user_id]\ntop_k_rows = np.argsort(sim_matrix[predict_row_num])[::-1][:k]\n\ntopk_mean_ratings = np.mean(rate_matrix[top_k_rows, :], axis=0)\nfor i in np.argsort(topk_mean_ratings)[::-1]:\n if rate_matrix[predict_row_num, i] == 0:\n print('---result---')\n print(f\"itemid:{column_num2item_id[i]},predicted_score:{topk_mean_ratings[i]}\")\n break\n\n\nelapsed_time = time.time() - start\nprint(f\"elapsed_time:{elapsed_time}[sec]\")\n","repo_name":"canonrock16/jaist-sub-thema","sub_path":"single_collaborative_filtering.py","file_name":"single_collaborative_filtering.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"9675202813","text":"import config.db_config as db\n\ndb_conn = db.connect_db()\n\ndesc_kit = '';cod_p = ''\ndescr =''; qtd = ''; prc = ''\n\nvalues = ''\n\nquery = \"\"\" query \"\"\"\n\ndef kit(kit): \n for linha in kit:\n desc_kit = linha[2:11]\n cod_p = linha[12:18]\n descr = linha[19:48]\n qtd = linha[49:55]\n prc = linha[56:62]\n\n\n value = (\"('\" + desc_kit +\"', '\" + cod_p + \"', '\"+\n descr + \"', '\" + qtd + \"', '\" + prc + \"'),\")\n \n values += value\n \n desc_kit = '';cod_p = '';descr =''; qtd = ''; prc = ''\n\n values = values[:-1]\n\n db.execute_query(db_conn, query)\n\n db.disconnect_db(db_conn)\n\n","repo_name":"mateusgomes125/Integration-file","sub_path":"registros/kit.py","file_name":"kit.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"34838189116","text":"from slackbot.bot import Bot\nfrom slackbot.bot import respond_to\nfrom slackbot.bot import listen_to\nimport re\nimport sqlite3\nfrom sql import OriginalSQLLite3\n\n\ndef main():\n slack = Bot()\n slack.run()\n\n\n@listen_to('show (.*) (.*)')\ndef show(message, society, name):\n con = sqlite3.connect('./DB/SQL.db')\n cur = con.cursor()\n cur.execute(\n \"SELECT * FROM {} where name like '%{}%'\".format(society, name))\n array = [[i[1], i[2], i[3], i[4], i[5]] for i in cur]\n con.close()\n print(array)\n message.send(\"開催日:\" + str(array[0][0])+\"\\n\"+\"学会名:\" + str(array[0][1])+\"\\n\"+\"提出締め切り:\"\n + str(array[0][2])+\"\\n\"+\"参加締め切り:\" + str(array[0][3])+\"\\n\"+\"開催地:\" + str(array[0][4]))\n\n\n@listen_to('update (.*)')\ndef update_data(message, date):\n message.reply('Here is {}'.format(date))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kueika/scraping_ipsj_calendar","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"2221481912","text":"import os\nimport sys\nimport pprint\nimport json\nimport torch\nimport datetime\nimport numpy as np\n\n\nfrom loguru import logger\nfrom numbers import Number\nfrom test_cam import val_loc_one_epoch\nfrom scm_nbdev.models.deit import *\nfrom scm_nbdev.models.conformer import *\nfrom scm_nbdev.utils import mkdir, Logger\nfrom scm_nbdev.cams_deit import evaluate_cls_loc\nfrom scm_nbdev.config.default import config as cfg\nfrom scm_nbdev.core.functions import prepare_env\nfrom scm_nbdev.core.lr_scheduler import LRScheduler\nfrom scm_nbdev.config.default import cfg_from_list, cfg_from_file, update_config\nfrom scm_nbdev.core.engine import create_data_loader, AverageMeter, accuracy, list2acc, adjust_lr_by_scheduler\n\nfrom re import compile\nfrom torch.utils.tensorboard import SummaryWriter\nfrom timm.optim import create_optimizer\nfrom timm.models import create_model as create_deit_model\n\nCUBV2=False\n\ndef create_model(cfg, args):\n logger.info('==> Preparing networks for baseline...')\n # use gpu\n device = torch.device(\"cuda\")\n assert torch.cuda.is_available(), \"CUDA is not available\"\n # model and optimizer\n model = create_deit_model(\n cfg.MODEL.ARCH,\n pretrained=False,\n num_classes=cfg.DATA.NUM_CLASSES,\n drop_rate=args.drop,\n drop_path_rate=args.drop_path,\n drop_block_rate=None,\n )\n if args.resume:\n checkpoint = torch.load(args.resume)\n pretrained_dict = {}\n\n for k, v in checkpoint['state_dict'].items():\n if not 'head' in k:\n k_ = '.'.join(k.split('.')[1:])\n pretrained_dict.update({k_: v})\n\n model.load_state_dict(pretrained_dict, strict=False)\n logger.info('load pretrained ts-cam model.')\n optimizer = create_optimizer(args, model)\n model = torch.nn.DataParallel(model, device_ids=list(range(torch.cuda.device_count()))).to(device)\n # loss\n cls_criterion = torch.nn.CrossEntropyLoss().to(device)\n logger.info('Preparing networks done!')\n return device, model, optimizer, cls_criterion\n\n\ndef main():\n #update cfg attr directly, INGENIOUS\n args = update_config()\n # create checkpoint directory\n ds_dir= cfg.DATA.DATADIR.split(\"/\")[-1]\n workdir= f\"{ds_dir}_{cfg.MODEL.ARCH}\"\n cfg.BASIC.SAVE_DIR= os.path.join(cfg.BASIC.SAVE_ROOT, workdir)\n cfg.BASIC.ROOT_DIR = \".\"\n log_dir = os.path.join(cfg.BASIC.SAVE_DIR, 'log'); mkdir(log_dir)\n ckpt_dir = os.path.join(cfg.BASIC.SAVE_DIR, 'ckpt'); mkdir(ckpt_dir)\n log_file = os.path.join(cfg.BASIC.SAVE_DIR, 'Log_' + cfg.BASIC.TIME + '.txt')\n # prepare running environment for the whole project\n prepare_env(cfg)\n\n # start loging\n sys.stdout = Logger(log_file)\n pprint.pprint(cfg)\n writer = SummaryWriter(log_dir)\n\n train_loader, test_loader, val_loader = create_data_loader(cfg, os.path.join(cfg.BASIC.ROOT_DIR, cfg.DATA.DATADIR))\n device, model, optimizer, cls_criterion = create_model(cfg, args)\n\n best_gtknown = 0\n best_top1_loc = 0\n update_train_step = 0\n update_val_step = 0\n opt_thred = -1\n lr_scheduler= LRScheduler(\n name= 'warmcos',\n lr= cfg.SOLVER.START_LR,\n iters_per_epoch=len(train_loader),\n total_epochs=cfg.SOLVER.NUM_EPOCHS,\n warmup_epochs= cfg.SOLVER.WARMUP_EPOCHS,\n )\n for epoch in range(1, cfg.SOLVER.NUM_EPOCHS+1):\n update_train_step, loss_train, cls_top1_train, cls_top5_train = \\\n train_one_epoch(train_loader, model, device, cls_criterion,\n optimizer, epoch, writer, cfg, update_train_step, lr_scheduler)\n if CUBV2:\n eval_results = val_loc_one_epoch(val_loader, model, device, )\n else:\n eval_results = val_loc_one_epoch(test_loader, model, device, )\n eval_results['epoch'] = epoch\n with open(os.path.join(cfg.BASIC.SAVE_DIR, 'val.txt'), 'a') as val_file:\n val_file.write(json.dumps(eval_results))\n val_file.write('\\n') \n\n loc_gt_known = eval_results['GT-Known_top-1']\n thred = eval_results['det_optThred_thr_50.00_top-1']\n # if loc_top1_val > best_top1_loc:\n # best_top1_loc = loc_top1_val\n # torch.save({\n # \"epoch\": epoch,\n # 'state_dict': model.state_dict(),\n # 'best_map': best_gtknown\n # }, os.path.join(ckpt_dir, 'model_best_top1_loc.pth'))\n if loc_gt_known > best_gtknown:\n best_gtknown = loc_gt_known\n torch.save({\n \"epoch\": epoch,\n 'state_dict': model.state_dict(),\n 'best_map': best_gtknown\n }, os.path.join(ckpt_dir, f'model_best.pth'))\n opt_thred = thred\n\n logger.info(\"Best GT_LOC: {}\".format(best_gtknown))\n logger.info(\"Best TOP1_LOC: {}\".format(best_gtknown))\n\n writer.add_scalar('acc_iter/best_gt_loc', best_gtknown, epoch)\n\n # torch.save({\n # \"epoch\": epoch,\n # 'state_dict': model.state_dict(),\n # 'best_map': best_gtknown\n # }, os.path.join(ckpt_dir, 'model_epoch{}.pth'.format(epoch)))\n\n logger.info(datetime.datetime.now().strftime('%Y-%m-%d-%H-%M'))\n\n if CUBV2:\n logger.info('Testing...')\n checkpoint = torch.load(os.path.join(ckpt_dir, f'model_best.pth'))\n pretrained_dict = {}\n\n for k, v in checkpoint['state_dict'].items():\n k_ = '.'.join(k.split('.')[1:])\n pretrained_dict.update({k_: v})\n\n model.load_state_dict(pretrained_dict)\n eval_results = val_loc_one_epoch(test_loader, model, device, opt_thred=opt_thred)\n for k, v in eval_results.items():\n if isinstance(v, np.ndarray):\n v = [round(out, 2) for out in v.tolist()]\n elif isinstance(v, Number):\n v = round(v, 2)\n else:\n raise ValueError(f'Unsupport metric type: {type(v)}')\n logger.info(f'\\n{k} : {v}')\n with open(os.path.join(cfg.BASIC.SAVE_DIR, 'test.txt'), 'a') as test_file:\n test_file.write(json.dumps(eval_results))\n test_file.write('\\n') \n\ndef train_one_epoch(train_loader, model, device, criterion, optimizer, epoch,\n writer, cfg, update_train_step, lr_scheduler):\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n \n log_var = ['module.layers.[0-9]+.fuse._loss_rate', 'module.layers.[0-9]+.thred']\n log_scopes = [compile(log_scope) for log_scope in log_var]\n \n model.train()\n for i, (input, target) in enumerate(train_loader):\n # update iteration steps\n update_train_step += 1\n\n target = target.to(device)\n input = input.to(device)\n vars = {}\n for log_scope in log_scopes:\n vars.update({key:val for key, val in model.named_parameters()\n if log_scope.match(key)})\n \n cls_logits = model(input)\n loss = criterion(cls_logits, target)\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n adjust_lr_by_scheduler(lr_scheduler, optimizer, update_train_step)\n\n prec1, prec5 = accuracy(cls_logits.data.contiguous(), target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(prec1.item(), input.size(0))\n top5.update(prec5.item(), input.size(0))\n lr= optimizer.param_groups[0]['lr']\n writer.add_scalar('loss_iter/train', loss.item(), update_train_step)\n writer.add_scalar('loss_iter/lr', lr, update_train_step)\n writer.add_scalar('acc_iter/train_top1', prec1.item(), update_train_step)\n writer.add_scalar('acc_iter/train_top5', prec5.item(), update_train_step)\n \n for k, v in vars.items():\n writer.add_scalar(k, v.item(), update_train_step)\n\n if i % cfg.BASIC.DISP_FREQ == 0 or i == len(train_loader)-1:\n logger.info(('Train Epoch: [{0}][{1}/{2}],lr: {lr}\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, i + 1, len(train_loader), loss=losses,\n top1=top1, top5=top5, lr=optimizer.param_groups[-1]['lr'])))\n return update_train_step, losses.avg, top1.avg, top5.avg\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"author31/scm_nbdev","sub_path":"tools_cam/train_cam.py","file_name":"train_cam.py","file_ext":"py","file_size_in_byte":8444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"247452305","text":"\"\"\"\n:mod:`AMT` -- Entry point for the Automatic Machine Translation program\n=======================================================================\n\n.. module:: AMT\n :platform: Linux\n :synopsis: Translate between languages with no prior information about the language\n.. modauthor:: David Dubois \n\nAutomatic Machine Translation performs a number of steps:\n\n1. **Train word embeddings:** The first step is to train word vector models for all of the languages that this\nsystem will use. However, before word embeddings are learned, the words are segmented into morphs with `Morfessor\n`. These morphs are the units that the vectors are learned on, so perhaps it\nwould be more accurate to call the vectors 'morph vectors'. The morph vectors are learned with the\n`fastText ` algorithm.\n2. **Align languages:** After morph vectors are learned, the vector spaces must be aligned with each other. Automatic\nMachine Translation uses a modified version of the `Joint Registration of Multiple Point Sets\n` algorithm (modified to allow vector spaces with more than three\ndimensions) in order to achieve this.\n3. **Learn Phrases:** Automatic Machine Translation is a phrase-based translation system. Thus, it needs to learn\nphrases. Learning phrases takes much inspiration from Alexandre Klementiev, Ann Irvine, Chris Callison-Burch, and David\nYarowsky (2012), along with those who have followed in their footsteps. Since translating into analytic languages is\neasier, the system uses the language with the fewest morphs per word as an internal interlingua. Thus, each language\nthat the AMT system recognizes needs to be translated to/from a single language, rather than every other langauge,\ngreatly reducing the number of languages pairs used\n\"\"\"\n\nimport logging\nimport sys\n\nimport matlab.engine\n\nfrom src.amt.embeddings import LanguageModel\n\nlogging.basicConfig(filename='all.log', level=logging.DEBUG)\nlogging.getLogger().addHandler(logging.StreamHandler(sys.stdout))\n\n_log = logging.getLogger('main')\n\nif __name__ == '__main__':\n languages = ['voynichese', 'english']\n dimensions = 100\n models = []\n for language in languages:\n models.append(LanguageModel(language, dim=dimensions))\n\n matlab_engine = matlab.engine.start_matlab()\n\n results = matlab_engine.jrmpc([model.get_all_word_embeddings() for model in models], [0 for x in range(dimensions)])\n","repo_name":"DethRaid/voynich-translation","sub_path":"src/amt/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"71"} +{"seq_id":"3831317785","text":"'''\nimport math\n\nmath.sqrt(900)\nmath.fabs(-10)\nmath.floor(9.2)\nprint(math.e)\nprint(math.pi)\n\ndef rectangle_area(length=2, width=3):\n \"\"\"Возвращает площадь прямоугольника.\"\"\"\n return length * width\n\nprint(rectangle_area())\nprint(rectangle_area(4,4))\nprint(rectangle_area(4))\n\ndef average(*args):\n return sum(args) / len(args)\n\ngrades = [88, 75, 96, 55, 83]\nprint(average(*grades))\n#* - распаковка элементов кортежа\n\n\ns = 'Hello'\nprint(s.lower())\n\nx = 'bye'\ndef modify_global():\n global x\n x = 'hello'\n print('x printed from modify_global:', x)\n\nmodify_global()\nprint(x)\n\nsum = 10 + 5\nprint(sum)\n\n# замещение функций\nsum([10,5])\n\n\nimport statistics\nfrom statistics import mean as m\n\ngrades = [85, 93, 45, 87, 93]\n\nprint(m(grades))\n\nprint(id(grades))\n\nx = 7\ndef cube(number):\n print('number is x:', number is x) # x - глобальная переменная\n return number ** 3\n\nprint(cube(x))\n'''\n\nfactorial = 1\nfor number in range(5, 0, -1):\n factorial *= number\n\nprint(factorial)\n\n# Рекурсие (разбиение на мелкие части)\n\ndef factorial(number):\n#\"\"\"Возвращает факториал числа.\"\"\"\n if number <= 1:\n return 1\n return number * factorial(number - 1) # Рекурсивный вызов\n\nfor i in range(11):\n print(f'{i}! = {factorial(i)}')\n\n\nimport statistics\n\n# Дисперсия\nprint(statistics.pvariance([1, 3, 4, 2, 6, 5, 3, 4, 5, 2]))\n\n# Ср.кв.отклонение = кореннь из дисперсии\nprint(statistics.pstdev([1, 3, 4, 2, 6, 5, 3, 4, 5, 2]))","repo_name":"kulikofff/Python-Deitel-ML","sub_path":"Math.py","file_name":"Math.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"3731497175","text":"import pandas as pd\nimport numpy as np\nfrom pull_data import CleanData,SpotifyPlaylist\nfrom song_clustering import run_optimal_kmeans,standardize,run_pca\nimport spotipy\nimport os\n\n\n#Get data\n\nsp = spotipy.Spotify()\nfrom spotipy.oauth2 import SpotifyClientCredentials\nclient_credentials_manager = SpotifyClientCredentials(client_id=os.getenv('SPOTIFY'), client_secret=os.getenv('SPOTIFY_SECRET_KEY'))\nsp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)\nsp.trace=False\n\nplaylist = SpotifyPlaylist()\ndf_unclean = playlist.get_data()\ndf_clean = CleanData(df_unclean)\nAF_with_id,track_info = df_clean.features()\n\n#copy AF and drop id, need id's later for label filtering\nAF = AF_with_id.copy()\nids = AF.pop('id')\npopularity = AF.pop('popularity')\n\n\n#Standardize and Reduce (with PCA) raw audio features\nAF_std = standardize(AF)\nAF_std_reduced = run_pca(AF_std)[1]\n\n#Cluster songs; returns features data frame with labels attached\nn_clusters = 10\nlabeled_features,labels,centers = run_optimal_kmeans(AF_std_reduced,n_clusters)\n\n#PICKLE THIS WHEN YOU'RE READY WITH FINAL PLAYLIST DATASET (add more in pull_data)\ntrack_info['labels'] = labels\nAF_with_id['labels'] = labels\n\n#AF_with_id.to_pickle('/data/AF_with_id1.pkl')\ntrack_info.to_pickle('../data/track_info1.pkl')\n\n'''\nCreating Cluster Samples for Step 1\n\n#!!!! Cluster choice Dataframe has been pickled - do not need to uncomment unless to change songs !!!!\n\n#1) Choose k (number of clusters = 10) random tracks for choice\n\ncluster_ids = []\ntitles = []\nartists_list = []\nurls = []\ncluster_labels = []\n\ntemp = track_info.copy()\n\n#go through all possible labels, select a random song, one from each cluster\nfor i in set(labels):\n cluster_labels.append(str(i+1))\n new_temp = temp[temp['labels']==i]\n c = np.random.choice(new_temp['id'],size=1)[0]\n\n while list(track_info['preview_url'][track_info['id']== c])[0] is None:\n c = np.random.choice(new_temp['id'],size=1)[0]\n cluster_ids.append(c)\n title = list(track_info['name'][track_info['id']== c])[0]\n titles.append(title)\n artist = track_info['artist_name'][track_info['id']== c].iloc[0][0]['name']\n artists_list.append(artist)\n url = list(track_info['preview_url'][track_info['id']==c])[0]\n urls.append(url)\n\n\ndf = pd.DataFrame(cluster_labels,columns=['cluster_labels'])\ndf['Track_id'] = cluster_ids\ndf['titles'] = titles\ndf['artists_list'] = artists_list\ndf['previews'] = urls\n\ndf.to_pickle('../data/big_popular3.pkl')\n'''\n","repo_name":"rdowd003/MyVibes","sub_path":"src/recommender.py","file_name":"recommender.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"22670286248","text":"from threading import Thread\n\nclass _Getch:\n def __init__(self):\n try:\n self.impl = _GetchWindows()\n except ImportError:\n self.impl = _GetchUnix()\n\n def __call__(self): return self.impl()\n\nclass _GetchUnix:\n def __init__(self):\n import tty, sys\n\n def __call__(self):\n import sys, tty, termios\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\n\nclass _GetchWindows:\n def __init__(self):\n import msvcrt\n\n def __call__(self):\n import msvcrt\n return msvcrt.getch()\n\nclass Controller(Thread):\n def __init__(self, controllerProperty):\n Thread.__init__(self)\n self.controllerProperty = controllerProperty\n self.getch = _Getch()\n\n def run(self):\n while True:\n command = self.getch()\n if command == ';':\n self.controllerProperty[0] = 1\n\n elif command == 'k':\n self.controllerProperty[0] = -1\n\n elif command == 'p':\n break;\n\n print(\"property:\", self.controllerProperty)","repo_name":"SadeghHayeri/Self-Balancing-Robot","sub_path":"src/rasp-mode/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"71"} +{"seq_id":"13914747727","text":"#!/usr/bin/env python\n\nfrom direct.showbase.ShowBase import ShowBase\nfrom direct.task import Task\nfrom panda3d_astron.repository import AstronInternalRepository\nfrom direct.directnotify.DirectNotifyGlobal import directNotify\nfrom panda3d.core import loadPrcFileData\nfrom time import sleep\n\n# Globally known object and channel IDs\nfrom simple_example_globals_server import LoginManagerId, UDChannel, SSChannel\n\n# No camera or window is needed, but notifications are.\nloadPrcFileData(\"\", \"\\n\".join([\"window-type none\",\n \"notify-level-udserver debug\"]))\nnotify = directNotify.newCategory(\"udserver\")\n\n# This class manages the UberDOGs, which in this case is just one, the login\n# manager.\nclass SimpleServer(ShowBase):\n\n def __init__(self, server_framerate = 60):\n ShowBase.__init__(self)\n self.startUberDOG()\n\n def startUberDOG(self):\n notify.info(\"Starting UberDOG\")\n # UberDOG repository\n air = AstronInternalRepository(UDChannel, # Repository channel\n serverId = SSChannel, # Stateserver channel\n dcFileNames = [\"simple_example.dc\"],\n dcSuffix = \"UD\",\n connectMethod = AstronInternalRepository.CM_NET)\n air.connect(\"127.0.0.1\", 7199)\n air.districtId = air.GameGlobalsId = UDChannel\n \n # Create the LoginManager\n self.login_manager = air.generateGlobalObject(LoginManagerId, 'LoginManager')\n\nsimple_server = SimpleServer()\nsimple_server.run()\n","repo_name":"thetestgame/panda3d-astron","sub_path":"examples/simple_example_server_UD.py","file_name":"simple_example_server_UD.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"11701213975","text":"def reverseString(string):\n l=0\n r=len(string)-1\n while l 0: \n letter = fruit[index-1]\n print(Fore.GREEN + letter)\n index -=1\n \nprint(Style.RESET_ALL)\n\n# ? DUDE I FIGURED THIS ONE OUT ALL ON MY OWNNNNNNNNNN!!!!!!!!!!!!!!!!!!!!!!!!!!!!","repo_name":"public-space/Python","sub_path":"beginner/string-reverse.py","file_name":"string-reverse.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"38142921283","text":"import pandas\nimport researchpy as rp\ntea = pandas.read_csv(\"./data/tea.csv\")\n\ndef chisquare(critical):\n for columnx in range(0,tea.columns.size-1): \n for columny in range(columnx+1,tea.columns.size-1):\n result = rp.crosstab(tea[tea.columns[columnx]], tea[tea.columns[columny]],test= 'chi-square')[1]\n chivalue, pvalue, cramerv = result['results']\n if chivalue > critical and pvalue < 0.05:\n print(tea.columns[columnx],\" / \", tea.columns[columny],\": \",chivalue, sep=\"\") \n \nchisquare(3.0)\n","repo_name":"RobinNunkesser/python-recipes","sub_path":"tea/teacrosstab.py","file_name":"teacrosstab.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"27246882665","text":"from .models import Cart\n\ndef get_or_create_cart(request):\n user = request.user if request.user.is_authenticated else None #Asigna el carrito a un usuario si esta autenticado, caso contrario no.\n cart_id = request.session.get('cart_id')\n cart = Cart.objects.filter(cart_id = cart_id).first() #si esta vacio, o sea no existe carrito first es = None\n\n if cart is None:\n cart = Cart.objects.create(user=user)\n\n if user and cart.user is None: # si el usuario existe y el carrito no esta asignado a ese usuario, lo asigno\n cart.user = user\n cart.save()\n \n request.session['cart_id'] = cart.cart_id\n\n return cart","repo_name":"iorio87/Luciano-Store","sub_path":"carts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"20363858618","text":"import pygame\r\nfrom pygame.math import Vector2 as vector\r\nfrom pygame.image import load\r\nfrom settings import *\r\n\r\nclass Menu:\r\n\tdef __init__(self):\r\n\r\n\t\t# setup\r\n\t\tself.display_surface = pygame.display.get_surface()\r\n\t\tself.box_sprites = pygame.sprite.Group()\r\n\r\n\t\tself.create_data()\r\n\t\tself.create_buttons()\r\n\r\n\tdef create_data(self):\r\n\t\tself.menu_surfs = {}\r\n\t\tfor key, value in SURFACE_DATA.items():\r\n\t\t\tif not value['style'] in self.menu_surfs:\r\n\t\t\t\tself.menu_surfs[value['style']] = [(key,load(value['menu']))]\r\n\t\t\telse:\r\n\t\t\t\tself.menu_surfs[value['style']].append((key,load(value['menu'])))\r\n\r\n\tdef create_buttons(self):\r\n\r\n\t\t# item buttons \r\n\t\tmain_size = 180\r\n\t\tself.main_rect = pygame.Rect(WINDOW_WIDTH - main_size - 6,WINDOW_HEIGHT - main_size - 6,main_size,main_size)\r\n\r\n\t\t# generic rect box\r\n\t\tself.generic_rect = pygame.Rect(self.main_rect.topleft,(self.main_rect.width * 0.48,self.main_rect.height * 0.48))\r\n\r\n\t\t# terrain box\r\n\t\tself.terrain_rect = self.generic_rect.copy()\r\n\t\tButton(self.terrain_rect, self.box_sprites, self.menu_surfs['terrain'])\r\n\r\n\t\t# coin box\r\n\t\tself.coin_rect = self.generic_rect.copy()\r\n\t\tself.coin_rect.topright = self.main_rect.topright\r\n\t\tButton(self.coin_rect, self.box_sprites, self.menu_surfs['static'])\r\n\r\n\t\t# palm box\r\n\t\tself.palm_rect = self.generic_rect.copy()\r\n\t\tself.palm_rect.bottomleft = self.main_rect.bottomleft\r\n\t\tButton(self.palm_rect, self.box_sprites, self.menu_surfs['palm_fg'], self.menu_surfs['palm_bg'])\r\n\r\n\t\t# enemies \r\n\t\tself.enemy_rect = self.generic_rect.copy()\r\n\t\tself.enemy_rect.bottomright = self.main_rect.bottomright\r\n\t\tButton(self.enemy_rect, self.box_sprites, self.menu_surfs['enemy'])\r\n\r\n\tdef check_mouse(self, pos, button):\r\n\t\tfor sprite in self.box_sprites:\r\n\t\t\tif sprite.rect.collidepoint(pos):\r\n\t\t\t\tif button[1]: sprite.toggle_alt()\r\n\t\t\t\tif button[2]: sprite.switch()\r\n\t\t\t\treturn sprite.get_id()\r\n\r\n\tdef display(self):\r\n\t\tself.box_sprites.draw(self.display_surface)\r\n\t\tself.box_sprites.update()\r\n\r\n\r\nclass Button(pygame.sprite.Sprite):\r\n\tdef __init__(self, rect,group, items, items_alt = None):\r\n\r\n\t\t# general\r\n\t\tsuper().__init__(group)\r\n\t\tself.image = pygame.Surface(rect.size)\r\n\t\tself.rect = rect\r\n\r\n\t\t# items \r\n\t\tself.items = items\r\n\t\tself.index = 0\r\n\t\tself.rect_center = (self.rect.width / 2, self.rect.height / 2)\r\n\r\n\t\t# alternative items\r\n\t\tself.items_alt = items_alt\r\n\t\tself.alt_active = False\r\n\r\n\tdef get_id(self):\r\n\t\tif not self.alt_active:\r\n\t\t\treturn self.items[self.index][0]#, self.items[self.index][1]\r\n\t\telse:\r\n\t\t\treturn self.items_alt[self.index][0]#, self.items_alt[self.index][1]\r\n\r\n\tdef switch(self):\r\n\t\tself.index += 1\r\n\t\tself.index = 0 if self.index >= len(self.items) else self.index\r\n\r\n\t\tif self.items_alt:\r\n\t\t\tif self.index >= len(self.items_alt):\r\n\t\t\t\tself.index = 0\r\n\r\n\tdef toggle_alt(self):\r\n\t\tif self.items_alt:\r\n\t\t\tself.alt_active = not self.alt_active\r\n\r\n\tdef display_main(self):\r\n\t\tsurf = self.items[self.index][1]\r\n\t\trect = surf.get_rect(center = self.rect_center)\r\n\t\tself.image.blit(surf, rect)\r\n\r\n\tdef display_alt(self):\r\n\t\tsurf = self.items_alt[self.index][1]\r\n\t\trect = surf.get_rect(center = self.rect_center)\r\n\t\tself.image.blit(surf, rect)\r\n\r\n\tdef update(self):\r\n\t\tself.image.fill('black')\r\n\t\tif not self.alt_active:\r\n\t\t\tself.display_main()\r\n\t\telse:\r\n\t\t\tself.display_alt()","repo_name":"clear-code-projects/MarioMaker","sub_path":"code/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"69"} +{"seq_id":"11973716339","text":"\n\nclass Brain():\n\n def __init__(self):\n\n self.currentLocationH = 1\n self.currentLocationW = 1\n self.backstack = []\n\n self.journey = []\n\n self.memory = [[0,0,0],\n [0,0,0],\n [0,0,0]]\n\n def makingAMove(self, direction):\n \"\"\"Changes the current location reference marker\"\"\"\n\n self.journey.append(direction)\n if direction == \"N\":\n if self.currentLocationH == 1:\n self.increaseTop()\n self.currentLocationH = 1\n else:\n self.currentLocationH -= 1\n\n if direction == \"E\":\n if self.currentLocationW + 2 == len(self.memory[0]):\n self.increaseRight()\n\n self.currentLocationW += 1\n\n if direction == \"S\":\n if self.currentLocationH + 2 == len(self.memory):\n self.increaseBottom()\n\n self.currentLocationH += 1\n\n if direction == \"W\":\n if self.currentLocationW == 1:\n self.increaseLeft()\n self.currentLocationW = 1\n else:\n self.currentLocationW -= 1\n\n\n def addNodeValues(self, travels):\n \"\"\"Adds values to nodes around current location\"\"\"\n\n self.memory[self.currentLocationH - 1][self.currentLocationW] = travels[\"N\"]\n self.memory[self.currentLocationH + 1][self.currentLocationW] = travels[\"S\"]\n self.memory[self.currentLocationH][self.currentLocationW + 1] = travels[\"E\"]\n self.memory[self.currentLocationH][self.currentLocationW - 1] = travels[\"W\"]\n\n #self.memory[self.currentLocationH][self.currentLocationW] = 9\n\n def increaseTop(self):\n toAdd = []\n for i in range(len(self.memory[0])):\n toAdd.append(0)\n self.memory.insert(0, toAdd)\n\n def increaseRight(self):\n for i in self.memory:\n i.append(0)\n\n def increaseBottom(self):\n newRow = []\n for i in range(len(self.memory[0])):\n newRow.append(0)\n\n self.memory.append(newRow)\n\n def increaseLeft(self):\n for i in self.memory:\n i.insert(0,0)\n","repo_name":"euanacampbell/Pathfinding-Algorithm","sub_path":"Algorithms/Brain.py","file_name":"Brain.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"29517336870","text":"from django.test import TestCase\n\nfrom news_scraper.scraper.pipelines.analyse_date import is_false_positive\n\n\nclass AnalysisTest(TestCase):\n def test_date_filtering(self):\n \"\"\"Test detection of false positives in date extraction.\"\"\"\n\n is_false_positive('one month')\n\n false_positives = ['at 2305', 'to mar', '330, at', '20, at', 'of 54', 'of 147,233',\n '16th', 'of 90', 'by mar', '2017', '500, 4', '', 'may', '1,337',\n '4,327', 'on to mon', 'of 155 sat', 'at 1,337', 'of 155', '12,33',\n '276 on Sunday', 'may on', '08t2', '2t', '33d', '7 T', 't 2017',\n 't 12', '32 at', 't of 2015', '26 of t', '5000 to', 't 20 t', '10-15',\n 't-30', '32 at t', '10-20', 'of -20', 'on of 50', 't by 60', None]\n for fp in false_positives:\n self.assertTrue(is_false_positive(date_source=fp),\n msg='\"{}\" is not detected as false positive'.format(fp))\n\n true_positives = ['on Sunday, 12:23', 'on Monday, 07:01', 'Sunday', 'of September 11th',\n 'of September 11th', 'on Aug', 'on Monday', 'on on Monday', 'September 11th',\n '3 days', 'one month', 'next month', 'tomorrow']\n for tp in true_positives:\n self.assertFalse(is_false_positive(date_source=tp),\n msg='\"{}\" is detected as false positive'.format(tp))\n","repo_name":"jinnerbichler/crypto_news","sub_path":"news_scraper/tests/test_analysis.py","file_name":"test_analysis.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"37986473690","text":"# -*- coding: gb18030 -*-\n#\n# $Id: SpecialItem.py,fangpengjun Exp $\n\n\"\"\"\nimplement SpecialItem\n\"\"\"\nfrom guis import *\nfrom LabelGather import labelGather\nfrom guis.controls.Control import Control\nfrom guis.common.PyGUI import PyGUI\nfrom guis.controls.ButtonEx import HButtonEx\nfrom guis.controls.StaticText import StaticText\nimport event.EventCenter as ECenter\nfrom guis.controls.Icon import Icon\nfrom guis.controls.ProgressBar import HProgressBar\nimport GUIFacade\nimport csdefine\n\nclass RobotItem( Control ):\n\t\n\tdef __init__( self, item, pyBinder = None, index = 0 ):\n\t\tControl.__init__( self, item, pyBinder )\n\t\tself.focus = False\n\t\tself.index = index\n\t\t\n\t\tself.__pyRobotInfo = RobotInfo( item.robotBox )\n\t\t\n\t\tself.__pyBtnModify = HButtonEx( item.btnModify )\n\t\tself.__pyBtnModify.setExStatesMapping( UIState.MODE_R4C1 )\n\t\tself.__pyBtnModify.onLClick.bind( self.__onModify )\n\t\tlabelGather.setPyBgLabel( self.__pyBtnModify, \"LolPVP:main\", \"btnModify\" )\n\t\tself.__pyBtnModify.visible = False\n\t\t\n\t\tself.__pyBtnDelete = HButtonEx( item.btnDelete )\n\t\tself.__pyBtnDelete.setExStatesMapping( UIState.MODE_R4C1 )\n\t\tself.__pyBtnDelete.onLClick.bind( self.__onDelete )\n\t\tlabelGather.setPyBgLabel( self.__pyBtnDelete, \"LolPVP:main\", \"btnDelete\" )\n\t\tself.__pyBtnDelete.visible = False\n\t\tself.robotInfo =None\n\t\n\tdef __onModify( self, pyBtn ):\n\t\t\"\"\"\n\t\t修改机器人\n\t\t\"\"\"\n\t\tif pyBtn is None:return\n\t\tpyBinder = self.pyBinder\n\t\trobotPro = rds.ruisMgr.robotProperty\n\t\trobots = pyBinder.getRobots()\n\t\tif self.robotInfo in robots:\n\t\t\tindex = robots.index( self.robotInfo )\n\t\t\trobotPro.show( pyBinder.teamNumber, index, pyBinder.pyTopParent )\n\t\n\tdef __onDelete( self, pyBtn ):\n\t\t\"\"\"\n\t\t删除机器人\n\t\t\"\"\"\n\t\trobots = self.pyBinder.getRobots()\n\t\tif self.robotInfo in robots:\n\t\t\tindex = robots.index( self.robotInfo )\n\t\t\tECenter.fireEvent( \"EVT_ON_REMOVE_PVE_ROBOT\", self.pyBinder.teamNumber, index )\n\t\n\tdef updateInfo( self, robotInfo ):\n\t\t\"\"\"\n\t\t更新机器人信息\n\t\t\"\"\"\n\t\tself.robotInfo = robotInfo\n\t\tisRobot = robotInfo.isRobot\n\t\tpyParent = self.pyBinder\n\t\tself.__pyRobotInfo.updateInfo( robotInfo )\n\t\tisCaptain = BigWorld.player().isCaptain()\n\t\tisVisible = isRobot and pyParent.teamNumber == 0 and isCaptain\n\t\tself.__pyBtnModify.visible = isVisible\n\t\tself.__pyBtnDelete.visible = isVisible\n\n\tdef __select( self ):\n\t\tself.panelState = ( 2, 1 )\n\t\tif self.__pyCover:\n\t\t\tself.__pyCover.visible = True\n\n\tdef __deselect( self ):\n\t\tself.panelState = ( 1, 1 )\n\t\tif self.__pyCover:\n\t\t\tself.__pyCover.visible = False\n\n\tdef _getSelected( self ):\n\t\treturn self.__selected\n\n\tdef _setSelected( self, selected ):\n\t\tif selected:\n\t\t\tself.__select()\n\t\telse:\n\t\t\tself.__deselect()\n\t\tself.__selected = selected\n\nclass RobotInfo( PyGUI ):\n\t\n\t__cc_pro_states = {}\t\t\t\t\t\t\t\t\t# 不同职业的状态标记 mapping 位\n\t__cc_pro_states[csdefine.CLASS_FIGHTER]\t = ( 1, 1 )\t\t# 战士\n\t__cc_pro_states[csdefine.CLASS_SWORDMAN] = ( 1, 2 )\t\t# 剑客\n\t__cc_pro_states[csdefine.CLASS_ARCHER]\t = ( 2, 1 )\t\t# 射手\n\t__cc_pro_states[csdefine.CLASS_MAGE]\t = ( 2, 2 )\t\t# 法师\n\t\n\tdef __init__( self, item ):\n\t\tPyGUI.__init__( self, item )\n\t\tself.__pyHeader = PyGUI( item.header )\n\t\t\n#\t\tself.__pyBorder = PyGUI( item.border )\n#\t\tself.__pyBg = PyGUI( item.bg )\n\n\t\tself.__pyCaptainMark = PyGUI( item.captainMark )\n\t\tself.__pyCaptainMark.visible = False\n\n\t\tself.__pyLbName = StaticText( item.lbName )\n\t\tself.__pyLbName.text = \"\"\n\n\t\tself.__pyLbLevel = StaticText( item.lbLevel )\n\t\tself.__pyLbLevel.fontSize = 12\n\t\tself.__pyLbLevel.text = \"\"\n\t\tself.__pyLbLevel.h_anchor = 'CENTER'\n\n\t\tself.__pyHPBar = HProgressBar( item.hpBar,self )\n\n\t\tself.__pyHPBar.value = 0\n\t\tself.__pyHPBar.crossFocus = True\n\t\tself.__pyLbHP = StaticText( item.lbHP )\n\t\tself.__pyLbHP.fontSize = 12\n\t\tself.__pyLbHP.text = \"\"\n\t\tself.__pyLbHP.h_anchor = 'CENTER'\n\t\tself.__pyLbHP.visible = True\n\t\t\n\t\tself.__pyMPBar = None\n\t\tif hasattr( item, \"mpBar\" ):\n\t\t\tself.__pyMPBar = HProgressBar( item.mpBar,self )\n\t\t\tself.__pyMPBar.crossFocus = True\n\t\t\tself.__pyMPBar.value = 0\n\t\t\n\t\tself.__pyLbMP = None\n\t\tif hasattr( item, \"lbMP\" ):\n\t\t\tself.__pyLbMP = StaticText( item.lbMP )\n\t\t\tself.__pyLbMP.fontSize = 12\n\t\t\tself.__pyLbMP.text = \"\"\n\t\t\tself.__pyLbMP.h_anchor = 'CENTER'\n\t\t\tself.__pyLbMP.visible = True\n\t\t\n\t\tself.__pyClassMark = Icon( item.classMark )\n\t\tself.__pyClassMark.crossFocus = True\n\t\n\tdef updateInfo( self, robotInfo ):\n\t\tself.__pyLbName.text = robotInfo.name\n\t\tself.__pyLbLevel.text = str( robotInfo.level )\n\t\thp = robotInfo.hp\n\t\thpMax = robotInfo.hpMax\n\t\tmp = robotInfo.mp\n\t\tmpMax = robotInfo.mpMax\n\t\tif hpMax <= 0.0:\n\t\t\thpMax = 1.0\n\t\tif mpMax <= 0.0:\n\t\t\tmpMax = 1.0\n\t\tphRato = float( hp/hpMax )\n\t\tmprato = float( mp/mpMax )\n\t\tself.__pyHPBar.value = min( phRato, 1.0 )\n\t\tif self.__pyMPBar:\n\t\t\tself.__pyMPBar.value = min( mprato, 1.0 )\n\t\tutil.setGuiState( self.__pyClassMark.getGui(), ( 2, 2 ), self.__cc_pro_states[robotInfo.raceclass] )\n\t\tself.__pyHeader.texture = robotInfo.header\n\t\tself.__pyLbHP.text = \"%d/%d\"%( hp, hpMax )\n\t\tif self.__pyLbMP:\n\t\t\tself.__pyLbMP.text = \"%d/%d\"%( mp, mpMax )\n","repo_name":"mudsave/csol2_enities_45541","sub_path":"client/guis/general/lolpvp/RobotItem.py","file_name":"RobotItem.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"30437042489","text":"import logging as log\nimport time\nimport shutil\n\nfrom avocado.utils import service\n\nfrom virttest import virt_vm\nfrom virttest import virsh\nfrom virttest import utils_disk\nfrom virttest import utils_misc\n\nfrom virttest.utils_test import libvirt\n\nfrom virttest.libvirt_xml import vm_xml, xcepts\nfrom virttest.libvirt_xml.devices.controller import Controller\n\nfrom virttest.libvirt_xml.devices.disk import Disk\n\nfrom virttest import libvirt_version\n\n\n# Using as lower capital is not the best way to do, but this is just a\n# workaround to avoid changing the entire file.\nlogging = log.getLogger('avocado.' + __name__)\n\n\ndef run(test, params, env):\n \"\"\"\n Test SCSI3 Persistent Reservation functions.\n\n 1.Prepare iscsi backend storage.\n 2.Prepare disk xml.\n 3.Hot/cold plug the disk to vm.\n 4.Check if SCSI3 Persistent Reservation commands can be issued to that disk.\n 5.Recover test environment.\n 6.Confirm the test result.\n \"\"\"\n def get_delta_parts(vm, old_parts):\n \"\"\"\n Get the newly added partitions/blockdevs in vm.\n :param vm: The vm to be operated.\n :param old_parts: The original partitions/blockdevs in vm.\n :return: Newly added partitions/blockdevs.\n \"\"\"\n session = vm.wait_for_login()\n new_parts = utils_disk.get_parts_list(session)\n new_parts = list(set(new_parts).difference(set(old_parts)))\n session.close()\n return new_parts\n\n def check_pr_cmds(vm, blk_dev):\n \"\"\"\n Check if SCSI3 Persistent Reservation commands can be used in vm.\n :param vm: The vm to be checked.\n :param blk_dev: The block device in vm to be checked.\n \"\"\"\n session = vm.wait_for_login()\n cmd = (\"sg_persist --no-inquiry -v --out --register-ignore --param-sark 123aaa /dev/{0} &&\"\n \"sg_persist --no-inquiry --in -k /dev/{0} &&\"\n \"sg_persist --no-inquiry -v --out --reserve --param-rk 123aaa --prout-type 5 /dev/{0} &&\"\n \"sg_persist --no-inquiry --in -r /dev/{0} &&\"\n \"sg_persist --no-inquiry -v --out --release --param-rk 123aaa --prout-type 5 /dev/{0} &&\"\n \"sg_persist --no-inquiry --in -r /dev/{0} &&\"\n \"sg_persist --no-inquiry -v --out --register --param-rk 123aaa --prout-type 5 /dev/{0} &&\"\n \"sg_persist --no-inquiry --in -k /dev/{0}\"\n .format(blk_dev))\n cmd_status, cmd_output = session.cmd_status_output(cmd)\n session.close()\n if cmd_status == 127:\n test.error(\"sg3_utils not installed in test image\")\n elif cmd_status != 0:\n test.fail(\"persistent reservation failed for /dev/%s\" % blk_dev)\n else:\n logging.info(\"persistent reservation successful for /dev/%s\" % blk_dev)\n\n def start_or_stop_qemu_pr_helper(is_start=True, path_to_sock=\"/var/run/qemu-pr-helper.sock\"):\n \"\"\"\n Start or stop qemu-pr-helper daemon\n :param is_start: Set True to start, False to stop.\n \"\"\"\n service_mgr = service.ServiceManager()\n if is_start:\n service_mgr.start('qemu-pr-helper')\n time.sleep(2)\n shutil.chown(path_to_sock, \"qemu\", \"qemu\")\n else:\n service_mgr.stop('qemu-pr-helper')\n\n def ppc_controller_update():\n \"\"\"\n Update controller of ppc vm to 'virtio-scsi' to support 'scsi' type\n\n :return:\n \"\"\"\n if params.get('machine_type') == 'pseries' and device_bus == 'scsi':\n if not vmxml.get_controllers(device_bus, 'virtio-scsi'):\n vmxml.del_controller(device_bus)\n ppc_controller = Controller('controller')\n ppc_controller.type = device_bus\n ppc_controller.index = '0'\n ppc_controller.model = 'virtio-scsi'\n vmxml.add_device(ppc_controller)\n vmxml.sync()\n\n # Check if SCSI3 Persistent Reservations supported by\n # current libvirt versions.\n if not libvirt_version.version_compare(4, 4, 0):\n test.cancel(\"The tag supported by libvirt from version \"\n \"4.4.0\")\n vm_name = params.get(\"main_vm\")\n vm = env.get_vm(vm_name)\n virsh_dargs = {'debug': True, 'ignore_status': True}\n\n # Disk specific attributes\n device = params.get(\"virt_disk_device\", \"lun\")\n device_target = params.get(\"virt_disk_device_target\", \"sdb\")\n device_format = params.get(\"virt_disk_device_format\", \"raw\")\n device_type = params.get(\"virt_disk_device_type\", \"block\")\n device_bus = params.get(\"virt_disk_device_bus\", \"scsi\")\n # Iscsi options\n iscsi_host = params.get(\"iscsi_host\")\n iscsi_port = params.get(\"iscsi_port\")\n emulated_size = params.get(\"iscsi_image_size\", \"1G\")\n # SCSI3 PR options\n reservations_managed = \"yes\" == params.get(\"reservations_managed\", \"yes\")\n reservations_source_type = params.get(\"reservations_source_type\", \"unix\")\n reservations_source_path = params.get(\"reservations_source_path\",\n \"/var/run/qemu-pr-helper.sock\")\n reservations_source_mode = params.get(\"reservations_source_mode\", \"client\")\n secret_uuid = \"\"\n # Case step options\n hotplug_disk = \"yes\" == params.get(\"hotplug_disk\", \"no\")\n\n # Start vm and get all partitions in vm\n if vm.is_dead():\n vm.start()\n session = vm.wait_for_login()\n old_parts = utils_disk.get_parts_list(session)\n session.close()\n vm.destroy(gracefully=False)\n\n # Back up xml file\n vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)\n\n try:\n # Setup iscsi target\n blk_dev = libvirt.setup_or_cleanup_iscsi(is_setup=True,\n is_login=True,\n image_size=emulated_size,\n portal_ip=iscsi_host)\n\n # Add disk xml\n vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)\n disk_xml = Disk(type_name=device_type)\n disk_xml.device = device\n disk_xml.target = {\"dev\": device_target, \"bus\": device_bus}\n driver_dict = {\"name\": \"qemu\", \"type\": device_format}\n disk_xml.driver = driver_dict\n disk_source = disk_xml.new_disk_source(\n **{\"attrs\": {\"dev\": blk_dev}})\n if reservations_managed:\n reservations_dict = {\"reservations_managed\": \"yes\"}\n else:\n start_or_stop_qemu_pr_helper(path_to_sock=reservations_source_path)\n reservations_dict = {\"reservations_managed\": \"no\",\n \"reservations_source_type\": reservations_source_type,\n \"reservations_source_path\": reservations_source_path,\n \"reservations_source_mode\": reservations_source_mode}\n disk_source.reservations = disk_xml.new_reservations(**reservations_dict)\n disk_xml.source = disk_source\n\n # Update controller of ppc vms\n ppc_controller_update()\n\n if not hotplug_disk:\n vmxml.add_device(disk_xml)\n try:\n # Start the VM and check status\n vmxml.sync()\n vm.start()\n vm.wait_for_login().close()\n time.sleep(5)\n if hotplug_disk:\n result = virsh.attach_device(vm_name, disk_xml.xml,\n ignore_status=True, debug=True)\n libvirt.check_exit_status(result)\n if not utils_misc.wait_for(lambda: len(get_delta_parts(vm, old_parts)) == 1, timeout=5):\n test.fail(\"Expected 1 dev added but has %s\" % len(get_delta_parts(vm, old_parts)))\n new_part = get_delta_parts(vm, old_parts)[0]\n check_pr_cmds(vm, new_part)\n result = virsh.detach_device(vm_name, disk_xml.xml,\n ignore_status=True, debug=True, wait_for_event=True)\n libvirt.check_exit_status(result)\n except virt_vm.VMStartError as e:\n test.fail(\"VM failed to start.\"\n \"Error: %s\" % str(e))\n except xcepts.LibvirtXMLError as xml_error:\n test.fail(\"Failed to define VM:\\n%s\" % xml_error)\n\n finally:\n # Recover VM.\n if vm.is_alive():\n vm.destroy(gracefully=False)\n vmxml_backup.sync(\"--snapshots-metadata\")\n # Delete the tmp files.\n libvirt.setup_or_cleanup_iscsi(is_setup=False)\n # Stop qemu-pr-helper daemon\n start_or_stop_qemu_pr_helper(is_start=False)\n","repo_name":"autotest/tp-libvirt","sub_path":"libvirt/tests/src/virtual_disks/virtual_disks_scsi3_persistent_reservation.py","file_name":"virtual_disks_scsi3_persistent_reservation.py","file_ext":"py","file_size_in_byte":8579,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"69"} +{"seq_id":"6860741812","text":"\"\"\"For training 3-layer perceptron.\"\"\"\n# Usage:\n# python ./src/train_3layer_mlp.py \\\n# --gpu \\\n# --depth \\\n# --brain_train_path \\\n# --brain_test_path \\\n# --internal_representation_path \\\n# --save_path \n#\nfrom __future__ import print_function\nimport argparse\nimport cPickle as pickle\nimport os\nimport time\n\nimport numpy as np\nimport six\nimport csv\n\nimport chainer\nfrom chainer import computational_graph\nfrom chainer import cuda\nimport chainer.links as L\nfrom chainer import optimizers\nfrom chainer import serializers\nimport chainer.functions as F\n\nimport mlp_net\n\nparser = argparse.ArgumentParser(description='3-layer perceptron')\nparser.add_argument('--initmodel', '-m', default='',\n help='Initialize the model from given file.')\nparser.add_argument('--resume', '-r', default='',\n help='Resume the optimization from snapshot.')\nparser.add_argument('--optimizer', '-o',\n choices=('SGD', 'MomentumSGD', 'NesterovAG', 'Adagrad',\n 'Adadelta','RMSprop','Adam'),\n default='SGD', help='Optimization algorithm.')\nparser.add_argument('--gpu', '-g', default=-1, type=int,\n help='GPU ID(negative value indicates CPU).')\nparser.add_argument('--epoch', '-e', default=20, type=int,\n help='Number of epochs to learn.')\nparser.add_argument('--unit', '-u', default=1000, type=int,\n help='Number of units.')\nparser.add_argument('--batchsize', '-b', type=int, default=100,\n help='Learning minibatch size.')\nparser.add_argument('--depth', '-d', default='',\n help='Depth of Representation module(y) in PredNet '\n '(R0 | R1 | R2 | R3 ). R0 indicates the input layer.')\nparser.add_argument('--brain_train_path',\n default='/home/fujiyama/PredictiveCoding/brain_activity/cortex/'\n 'z_brain_train_preprocessed.pickle',\n help='Path to the brain activity file for training.'\n 'Brain activity is expected to be a pickle formatted file.')\nparser.add_argument('--brain_test_path',\n default='/home/fujiyama/PredictiveCoding/brain_activity/cortex/'\n 'z_brain_val_preprocessed.pickle',\n help='Path to the brain activity file for test.'\n 'Brain activity is expected to be a pickle formatted file.')\nparser.add_argument('--internal_representation_path',\n default='/home/fujiyama/PredictiveCoding/internal_representation/',\n help='Path to the internal representation directory.')\nparser.add_argument('--save_path', default='/home/fujiyama/PredictiveCoding/3_layer_mlp/models/',\n help='Path to the directory to save trained model, optimizer, '\n 'and csv file to output test losses.')\nargs = parser.parse_args()\n\nbatchsize = args.batchsize\nn_epochs = args.epoch\nn_units = args.unit\ndepth = args.depth\nN_train = 4497 # (# train examples)\nN_test = 300 # (# test examples)\ndim_brain = 65665\n# dim_table: (# dimensions of internal representaion of each layer in PredNet)\ndim_table = {\"R0\":57600, \"R1\":230400, \"R2\":115200, \"R3\":57600}\ndim_internal_representation = dim_table[depth]\n\nbrain_train_path = args.brain_train_path\nbrain_test_path = args.brain_test_path\ninternal_representation_path = args.internal_representation_path\nsave_path = args.save_path\n\nprint('GPU : {}'.format(args.gpu))\nprint('# Unit: {}'.format(n_units))\nprint('# Minibatch-size: {}'.format(batchsize))\nprint('# Epoch: {}'.format(n_epochs))\nprint('# Depth: {}'.format(depth))\nprint('# Brain for train: {}'.format(brain_train_path))\nprint('# Brain for test: {}'.format(brain_test_path))\nprint('# Internal representation: {}'.format(internal_representation_path))\nprint('')\n\nif not os.path.exists(save_path):\n os.makedirs(save_path)\n\ndata_load_begin = time.time()\n\n# Prepare dataset\nprint('Load brain data.')\nx_train = pickle.load(open(brain_train_path, 'rb'))[:]\nx_test = pickle.load(open(brain_test_path, 'rb'))[:]\nprint('Shape of x_train (expected : (4497, 65665)) => ' + str(x_train.shape))\nprint('Type of x_train (exprected : (numpy.ndarray)) => ' + str(type(x_train)))\nprint('Type of x_train[0,0] (expected : (numpy.float32)) => ' + str(type(x_train[0,0])))\nprint('Shape of x_test (expected : (300, 65665)) => ' + str(x_test.shape))\nprint('Type of x_test (exprected : (numpy.ndarray)) => ' + str(type(x_test)))\nprint('Type of x_test[0,0] (expected : (numpy.float32)) => ' + str(type(x_test[0,0])))\n\n# Prepare internal representation\nprint('Load internal representation data.')\ny_train = np.ndarray((N_train, dim_internal_representation), dtype = np.float32)\nfor i in range(0, N_train):\n frame_id = 345 + i * 60\n if frame_id <= 18105:\n dir = 'vtrn001/'\n elif 18105 < frame_id <= 36105:\n dir = 'vtrn002/'\n elif 36105 < frame_id <= 54105:\n dir = 'vtrn003/'\n elif 54105 < frame_id <= 72105:\n dir = 'vtrn004/'\n elif 72105 < frame_id <= 90105:\n dir = 'vtrn005/'\n elif 90105 < frame_id <= 108105:\n dir = 'vtrn006/'\n elif 108105 < frame_id <= 126105:\n dir = 'vtrn007/'\n elif 126105 < frame_id <= 144105:\n dir = 'vtrn008/'\n elif 144105 < frame_id <= 162105:\n dir = 'vtrn009/'\n elif 162105 < frame_id <= 180105:\n dir = 'vtrn010/'\n elif 180105 < frame_id <= 198105:\n dir = 'vtrn011/'\n elif 198105 < frame_id <= 216105:\n dir = 'vtrn012/'\n elif 216105 < frame_id <= 234105:\n dir = 'vtrn013/'\n elif 234105 < frame_id <= 252105:\n dir = 'vtrn014/'\n else:\n dir = 'vtrn015/'\n internal_representation = np.load(\n internal_representation_path + dir + '%08d.npz' % (frame_id))\n y_train[i,:] = internal_representation[depth].reshape((dim_internal_representation,))\n internal_representation.close()\n\ny_test = np.ndarray((N_test, dim_internal_representation), dtype = np.float32)\nfor i in range(0, N_test):\n frame_id = 165 + i * 60\n if frame_id <= 3705:\n dir = 'vval001/'\n elif 3705 < frame_id <= 7305:\n dir = 'vval002/'\n elif 7305 < frame_id <= 10905:\n dir = 'vval003/'\n elif 10905 < frame_id <= 14505:\n dir = 'vval004/'\n else:\n dir = 'vval005/'\n internal_representation = np.load(\n internal_representation_path + dir + '%08d.npz' % (frame_id))\n y_test[i,:] = internal_representation[depth].reshape((dim_internal_representation,))\n internal_representation.close()\n\ndata_load_end = time.time()\nprint('Time needed to load data = {} sec'.format(\n data_load_end - data_load_begin))\n\n# Setup model\nmodel = L.Classifier(\n mlp_net.Regression(dim_brain, n_units, dim_internal_representation),\n lossfun=F.mean_squared_error)\nmodel.compute_accuracy = False\n\n# Device setting\nif args.gpu >= 0:\n cuda.get_device(args.gpu).use()\n model.to_gpu()\nxp = np if args.gpu < 0 else cuda.cupy\n\n# Setup optimizer\nif args.optimizer == 'SGD':\n optimizer = optimizers.SGD()\nelif args.optimizer == 'MomentumSGD':\n optimizer = optimizers.MomentumSGD\nelif args.optimizer == 'NesterovAG':\n optimizer = optimizers.NesterovAG()\nelif args.optimizer == 'Adagrad':\n optimizer = optimizers.AdaGrad()\nelif args.optimizer == 'Adadelta':\n optimizer = optimizers.AdaDelta()\nelif args.optimizer == 'RMSprop':\n optimizer = optimizers.RMSprop()\nelif args.optimizer == 'Adam':\n optimizer = optimizers.Adam()\n \noptimizer.setup(model)\n\n# Init/Resume\nif args.initmodel:\n print('Load model from ', args.initmodel)\n serializers.load_npz(args.initmodel, model)\nif args.resume:\n print('Load optimizer state from ', args.resume)\n serializers.load_npz(args.resume, optimizer)\n\n# Training loop with epoch\ntest_loss = []\nfor epoch in six.moves.range(1, n_epochs + 1):\n print('Epoch: ', epoch)\n\n # Training Loop with minibatch\n perm = np.random.permutation(N_train)\n sum_loss = 0\n start = time.time()\n for i in six.moves.range(0, N_train, batchsize):\n x = chainer.Variable(xp.asarray(x_train[perm[i:i+batchsize]]))\n t = chainer.Variable(xp.asarray(y_train[perm[i:i+batchsize]]))\n\n # Pass the loss function \n optimizer.update(model, x, t)\n\n # Save network architecture\n if epoch == 1 and i == 0:\n with open('graph.dot', 'w') as o:\n variable_style = {'shape': 'octagon', 'fillcolor': '#E0E0E0',\n 'style': 'filled'}\n function_style = {'shape': 'record', 'fillcolor': '#6495ED',\n 'style': 'filled'}\n g = computational_graph.build_computational_graph(\n (model.loss, ),\n variable_style=variable_style,\n function_style=function_style)\n o.write(g.dump())\n print('graph generated')\n\n # Compute loss\n current_loss = float(model.loss.data) * len(t.data)\n #print('current_loss = {}'.format(current_loss))\n sum_loss += current_loss\n\n # Compute throughput\n end = time.time()\n elapsed_time = end - start\n throughput = (N_train - 97) / elapsed_time\n train_mean_loss = sum_loss / (N_train - 97)\n\n # Printout\n print('Train mean loss = {} / sample, throughput = {} samples/sec'.format(\n train_mean_loss, throughput))\n\n # Test loss and accuracy\n sum_loss = 0\n\n # Test Loop with minibatch\n for i in six.moves.range(0, N_test, batchsize):\n x = chainer.Variable(xp.asarray(x_test[i:i+batchsize]),\n volatile = 'on')\n t = chainer.Variable(xp.asarray(y_test[i:i+batchsize]),\n volatile = 'on')\n\n loss = model(x, t)\n current_test_loss = float(loss.data) * len(t.data)\n #print('current_test_loss = {}'.format(current_test_loss))\n sum_loss += current_test_loss\n\n # Printout\n test_mean_loss = sum_loss / N_test\n print('test mean loss = {} / sample'.format(test_mean_loss))\n\n # Record test loss\n test_loss.append([test_mean_loss])\n\n # Save model, and optimizer\n print('Save the model')\n fname = save_path + depth + '_' + args.optimizer + '_' + str(epoch) + '.model'\n serializers.save_npz(fname, model)\n \n print('Save the optimizer')\n fname = save_path + depth + '_' + args.optimizer + '_' + str(epoch) + '.state'\n serializers.save_npz(fname, optimizer)\n\n# Save the losses\nprint('Save the losses')\nfname = save_path + depth + '_' + args.optimizer + '_loss.csv'\nf = open(fname, 'w')\nwriter = csv.writer(f,lineterminator = '\\n')\nwriter.writerow(['loss'])\nwriter.writerows(test_loss)\nf.close()\n","repo_name":"erikuroda/master_2022","sub_path":"Prediction/PredNet/Fujiyama_prednet/3_layer_mlp/src/train_3layer_mlp.py","file_name":"train_3layer_mlp.py","file_ext":"py","file_size_in_byte":10983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"31947585096","text":"# -*- coding: utf8 -*-\nimport os\nimport re \n\nret = ''\nprev = 0\ncomp = 0\nstrlen = 0\nprevstr = ''\nsize = (os.popen('ls -d */ -l | grep ^d | wc -l')).read()\nlist = os.popen('ls -d */ -l | awk \\'{print $9}\\'').read()\narr = list.strip().split('/\\n')\n\nret = arr[0]\nfor x in arr:\n \n #comp = int(re.sub('[a-zA-Zㄱ-힗-=+,._#/\\?:^$.@*\\\"※~&%ㆍ!』\\\\‘|\\(\\)\\[\n # \\]\\<\\>`\\'…》]', '', x))\n comp = int(re.sub('[a-zA-Z._/\\?:^$.@*\\\"※~&%ㆍ!』\\\\‘|\\(\\)\\[''\\]\\<\\>`\\'…》]', '', x))\n if(int(comp)==int(prev) and len(x) > len(ret)):\n prev = comp\n ret = x\n \n if(int(comp) > int(prev)):\n prev = comp\n ret = x\n\nprint('./' + ret +'/') \n\n","repo_name":"bitnine-oss/agensgraph-docker","sub_path":".github/workflows/FindVer.py","file_name":"FindVer.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"69"} +{"seq_id":"311939566","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom . import __version__\n\nimport random\nimport json\n\nimport ee\n\nfrom landdegradation import preproc\nfrom landdegradation import stats\nfrom landdegradation import util\nfrom landdegradation import GEEIOError\n\nfrom landdegradation.schemas import BandInfo, URLList, CloudResults, CloudResultsSchema\n\n\ndef land_cover(year_baseline, year_target, geojson, trans_matrix,\n remap_matrix, EXECUTION_ID, logger):\n \"\"\"\n Calculate land cover indicator.\n \"\"\"\n logger.debug(\"Entering land_cover function.\")\n\n ## land cover\n lc = ee.Image(\"users/geflanddegradation/toolbox_datasets/lcov_esacc_1992_2015\")\n lc = lc.where(lc.eq(9999), -32768)\n lc = lc.updateMask(lc.neq(-32768))\n\n ## target land cover map reclassified to IPCC 6 classes\n lc_tg_raw = lc.select('y{}'.format(year_target))\n lc_tg_remapped = lc_tg_raw.remap(remap_matrix[0], remap_matrix[1])\n\n ## baseline land cover map reclassified to IPCC 6 classes\n lc_bl_raw = lc.select('y{}'.format(year_baseline))\n lc_bl_remapped = lc_bl_raw.remap(remap_matrix[0], remap_matrix[1])\n\n ## compute transition map (first digit for baseline land cover, and second digit for target year land cover)\n lc_tr = lc_bl_remapped.multiply(10).add(lc_tg_remapped)\n\n ## definition of land cover transitions as degradation (-1), improvement (1), or no relevant change (0)\n lc_dg = lc_tr.remap([11, 12, 13, 14, 15, 16, 17,\n 21, 22, 23, 24, 25, 26, 27,\n 31, 32, 33, 34, 35, 36, 37,\n 41, 42, 43, 44, 45, 46, 47,\n 51, 52, 53, 54, 55, 56, 57,\n 61, 62, 63, 64, 65, 66, 67,\n 71, 72, 73, 74, 75, 76, 77],\n trans_matrix)\n\n ## Remap persistence classes so they are sequential. This\n ## makes it easier to assign a clear color ramp in QGIS.\n lc_tr = lc_tr.remap([11, 12, 13, 14, 15, 16, 17,\n 21, 22, 23, 24, 25, 26, 27,\n 31, 32, 33, 34, 35, 36, 37,\n 41, 42, 43, 44, 45, 46, 47,\n 51, 52, 53, 54, 55, 56, 57,\n 61, 62, 63, 64, 65, 66, 67,\n 71, 72, 73, 74, 75, 76, 77],\n [1, 12, 13, 14, 15, 16, 17,\n 21, 2, 23, 24, 25, 26, 27,\n 31, 32, 3, 34, 35, 36, 37,\n 41, 42, 43, 4, 45, 46, 47,\n 51, 52, 53, 54, 5, 56, 57,\n 61, 62, 63, 64, 65, 6, 67,\n 71, 72, 73, 74, 75, 76, 7])\n\n lc_out = lc_bl_remapped \\\n .addBands(lc_tg_remapped) \\\n .addBands(lc_tr) \\\n .addBands(lc_dg) \\\n .addBands(lc_bl_raw) \\\n .addBands(lc_tg_raw)\n\n # Create export function to export land deg image\n task = util.export_to_cloudstorage(lc_out.unmask(-32768).int16(),\n lc.projection(), geojson, 'land_cover', logger,\n EXECUTION_ID)\n task.join()\n\n logger.debug(\"Setting up results JSON.\")\n d = [BandInfo(\"Land cover (7 class)\", 1, no_data_value=9999, add_to_map=True, metadata={'year': year_baseline}),\n BandInfo(\"Land cover (7 class)\", 2, no_data_value=9999, add_to_map=True, metadata={'year': year_target}),\n BandInfo(\"Land cover transitions\", 3, no_data_value=9999, add_to_map=True, metadata={'year_baseline': year_baseline, 'year_target': year_target}),\n BandInfo(\"Land cover degradation\", 4, no_data_value=9999, add_to_map=True, metadata={'year_baseline': year_baseline, 'year_target': year_target}),\n BandInfo(\"Land cover (ESA classes)\", 5, no_data_value=9999, metadata={'year': year_baseline}),\n BandInfo(\"Land cover (ESA classes)\", 6, no_data_value=9999, metadata={'year': year_target})]\n u = URLList(task.get_URL_base(), task.get_files())\n gee_results = CloudResults('land_cover', __version__, d, u)\n results_schema = CloudResultsSchema()\n json_results = results_schema.dump(gee_results)\n\n return json_results\n\n\ndef run(params, logger):\n \"\"\".\"\"\"\n logger.debug(\"Loading parameters.\")\n year_baseline = params.get('year_baseline', 2000)\n year_target = params.get('year_target', 2015)\n geojson = params.get('geojson', util.tza_geojson)\n trans_matrix_default = [0, 1, 1, 1, -1, 0, -1,\n -1, 0, -1, -1, -1, -1, -1,\n -1, 1, 0, 0, -1, -1, -1,\n -1, -1, -1, 0, -1, -1, 0,\n 1, 1, 1, 1, 0, 0, -1,\n 1, 1, 1, 1, -1, 0, 0,\n 1, 1, 0, 0, 0, 0, 0]\n trans_matrix = params.get('trans_matrix', trans_matrix_default)\n remap_matrix_default = [[10, 11, 12, 20, 30, 40, 50, 60, 61, 62, 70, 71, 72,\n 80, 81, 82, 90, 100, 110, 120, 121, 122, 130, 140,\n 150, 151, 152, 153, 160, 170, 180, 190, 200, 201,\n 202, 210, 220],\n [3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 4, 4, 5, 6, 6,\n 6, 7, 6]]\n remap_matrix = params.get('remap_matrix', remap_matrix_default)\n\n if len(trans_matrix) != 49:\n raise GEEIOError(\"Transition matrix must be a list with 49 entries\")\n if len(remap_matrix) != 2 or len(remap_matrix[0]) != 37 or len(remap_matrix[1]) != 37:\n raise GEEIOError(\"Transition matrix must be a list of two lists with 37 entries each\")\n\n logger.debug(\"Loading geojson.\")\n if geojson is None:\n raise GEEIOError(\"Must specify an input area\")\n else:\n geojson = json.loads(geojson)\n\n # Check the ENV. Are we running this locally or in prod?\n if params.get('ENV') == 'dev':\n EXECUTION_ID = str(random.randint(1000000, 99999999))\n else:\n EXECUTION_ID = params.get('EXECUTION_ID', None)\n\n logger.debug(\"Running main script.\")\n json_results = land_cover(year_baseline, year_target, geojson, trans_matrix,\n remap_matrix, EXECUTION_ID, logger)\n\n return json_results.data\n","repo_name":"ConservationInternational/trends.earth-gee","sub_path":"gee/land_cover/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6357,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"} +{"seq_id":"4765888242","text":"# File: Graphing.py\n# Author: Henry Hall\n# Date: 23/01/2022\n# Description: Imports different sets of data\n# from the serial port and plots a live graph\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport serial\nimport time\nimport keyboard\n\n#PORT = 'COM7'\nPORT = 'COM3'\nBAUD = 115200\nMAX_OUT = 10\nWIDTH = 20\nEXCEPTIONS = [[\"b'\\\\n'\", \"b'S'\"],['\\\\', '']]\n\ndef graph_set(fig, axes):\n \"\"\"Sets the graph settings\"\"\"\n axes.grid(True)\n axes.set_xlabel('Time (s)')\n axes.set_title('Readings')\n \ndef plot(fig, axes, xs, ys, i):\n \"\"\"Clears graph data and plots new data live\"\"\"\n axes.cla()\n graph_set(fig, axes)\n if xs[-1] < WIDTH:\n\n axes.set_xlim([0, WIDTH])\n else:\n axes.set_xlim([xs[-1] - WIDTH, xs[-1]])\n for j in range(0, i):\n axes.plot(xs, ys[j], label = f'Reading{j + 1}')\n axes.legend()\n plt.pause(0.01)\n\ndef set_data(data, xs, ys, start):\n \"\"\"Puts time and data into x and y lists\"\"\"\n for i in range(0, len(data)):\n ys[i].append(data[i])\n xs.append(time.time() - start)\n return (xs, ys)\n\ndef check_end(axes, xs):\n \"\"\"Checks to see if the session has been quit\"\"\"\n if keyboard.is_pressed('Enter'):\n print('Quit')\n axes.set_xlim([0, xs[-1]])\n plt.show()\n return True\n else:\n return False\n\ndef set_ys():\n \"\"\"Sets an empty y list\"\"\"\n ys = []\n for i in range(0, MAX_OUT):\n ys.append([])\n return ys\n\ndef set_start(is_first, start):\n \"\"\"sets the initial time\"\"\"\n if is_first == True:\n start = time.time()\n return start\n\nget_b = lambda ser: str(ser.read())\n\ndef main():\n \"\"\"The main function\"\"\"\n #Set all initial values and empty lists\n fig1 = plt.figure(\"Figure\")\n axes = plt.axes() \n ser = serial.Serial(PORT, BAUD)\n start = time.time()\n print(ser.name)\n is_done = False\n is_first = True\n xs = []\n ys = set_ys()\n data = []\n s = ''\n while is_done != True:\n #Continuous while loop which reads, proccesses and plots data from the serial port\n byte = get_b(ser)\n if byte == \"b'S'\":\n start = set_start(is_first, start)\n is_first = False\n if data != []:\n set_data(data, xs, ys, start)\n plot(fig1, axes, xs, ys, len(data))\n data = []\n byte = get_b(ser)\n byte = get_b(ser)\n if byte == \"b'\\\\n'\": \n data.append(float(s))\n s = ''\n elif byte not in EXCEPTIONS[0] and list(byte)[2] not in EXCEPTIONS[1]:\n s += list(byte)[-2]\n is_done = check_end(axes, xs)\n\nmain()","repo_name":"Hwh40/HSS-Automation","sub_path":"CSV/Graphing.py","file_name":"Graphing.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"20446594999","text":"#!/usr/bin/env python3\nimport gzip\nimport multiprocessing\nimport os\nimport sys\nimport time\n\n\n\n\nclass timeCounter:\n def __init__(self,interval=1):\n self.start = time.time()\n self.interval = interval\n self.counters = {}\n self.starts = {}\n self.ends = {}\n\n def update(self,Action=\"Default\"):\n if Action in self.counters:\n self.counters[Action] += 1\n else:\n self.counters[Action] = 1\n\n now = time.time()\n if now - self.start > self.interval:\n try:\n Str = ''\n for Action in self.counters:\n Str += Action + ': ' + str(self.counters[Action]/(now-self.start)) + ' (' + str((self.counters[Action]/sum(self.counters.values())) * 100) + '%' + ') ' \n print(\"Status: \",Str)\n\n Str = ''\n for Action in self.starts:\n Str += Action + ': ' + str(self.ends[Action] - self.starts[Action]) + ' (' + str(100*((self.ends[Action] - self.starts[Action])/(max(self.ends.values()) - min(self.starts.values())))) + '%) '\n print(\"Timing: \" + Str)\n except:\n pass\n \n self.start = now\n self.counters = {}\n self.starts = {}\n self.ends = {}\n\n def startAction(self,Action=\"Default\"):\n self.starts[Action] = time.time()\n\n\n def stopAction(self,Action=\"Default\"):\n self.ends[Action] = time.time()\n self.update(Action)\n\n \n\ndef isIn(ToCheck, Term):\n for c in range(len(ToCheck)):\n Matched = 0\n for a in range(len(Term)):\n if (c+a) < len(ToCheck):\n if Term[a] != ToCheck[c+a]:\n break\n else:\n Matched += 1\n else:\n break\n if Matched == len(Term):\n return True\n return False\n \n\n\n \n \n\ndef processFile(FileName):\n try:\n TC = timeCounter()\n \n f = gzip.open(FileName,'rb')\n g = gzip.open(FileName + '.new','wb')\n TERM = sys.argv[2].encode('utf-8')\n\n while True:\n l = f.readline()\n if l == b'':\n break\n else:\n TC.update(\"Scan Line\")\n\n if not isIn(l, TERM):\n g.write(l)\n\n\n\n\n f.close()\n g.close()\n os.remove(FileName)\n os.rename(FileName + '.new', FileName)\n print(\"Done\",FileName)\n except:\n pass\nif __name__ == '__main__':\n cpu_count = multiprocessing.cpu_count()\n print(\"System has\",cpu_count,\"CPUs\")\n\n def recurse(IN):\n files = os.listdir(IN)\n Out = []\n for i in files:\n v = IN + '/' + i\n if os.path.isdir(v):\n Out += recurse(v)\n else:\n if '.gz' in i:\n print(\"Find\",v)\n Out.append(v)\n return Out\n\n Files = recurse(sys.argv[1])\n\n pool = multiprocessing.Pool(cpu_count)\n pool.map(processFile,Files)\n\n","repo_name":"MegaKG/BetterLogServer","sub_path":"Tools/FilterLogs.py","file_name":"FilterLogs.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"4486243908","text":"import logging\n\nfrom django.db import models\nfrom django.contrib.auth.models import AbstractUser\nfrom django.core.exceptions import ValidationError, PermissionDenied\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom commons.models import UserCreatedDatetimeModel\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Organization(UserCreatedDatetimeModel):\n name = models.CharField(\n max_length=150,\n help_text=_('Name of your organization')\n )\n is_active = models.BooleanField(\n default=True\n )\n\n def __str__(self):\n return f'Organization({self.name})'\n\n\nclass OrganizationEmailDomain(UserCreatedDatetimeModel):\n organization = models.ForeignKey(\n Organization,\n on_delete=models.CASCADE\n )\n domain = models.CharField(\n max_length=150,\n help_text=_('Email Domain used by the organization')\n )\n\n\nclass OrganizationUser(AbstractUser):\n organization = models.ForeignKey(\n Organization,\n editable=False,\n null=True,\n on_delete=models.CASCADE\n )\n\n @property\n def email_domain(self):\n domain = self.email.split('@')[-1] # NAME@DOMAIN.COM -> [ 'NAME', 'DOMAIN.COM']\n return domain\n\n @property\n def display_name(self):\n return f'{self.last_name}, {self.first_name}'\n\n def save(self, *args, **kwargs):\n # only update on initial creation\n # --> Will not have an ID on initial save\n if self.id is None:\n if not self.is_superuser:\n self.is_staff = True # auto-add is_staff (so user can use the ADMIN)\n if not kwargs.get('ignore_email_domain_check', False):\n # find the organization for the given user\n try:\n email_domain = OrganizationEmailDomain.objects.get(domain=self.email_domain)\n self.organization = email_domain.organization\n except OrganizationEmailDomain.DoesNotExist:\n raise PermissionDenied('Organization does not exist for given Email Domain!')\n else:\n logger.warning('Ignoring EMAIL DOMAIN check on user creation!')\n else:\n logger.warning(f'Creating superuser: {self.username}')\n if 'ignore_email_domain_check' in kwargs:\n del kwargs['ignore_email_domain_check']\n super().save(*args, **kwargs)\n","repo_name":"monkut/lorisattack","sub_path":"lorisattack/accounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"2972629383","text":"# Candy Crush sim\n# There are a number of n colored balls on the screen; the colors are number coded.\n# A substring of at least 3 balls having the same color is called a sequence.\n# By shooting in a balls that’s part of a sequence, the whole sequence will be removed and\n# the right side of the string will shift to the left to fill the gap.\n# If a new sequence is created by this move, it will be removed as well and the process repeats\n# itself until no new sequence is created.\n# Mechanics: the player will always shoot first in a ball that’s part of the longest sequence available;\n# if there are more of the same length then the one to the left is selected.\n# Example: 5 1 3 3 2 2 2 2 3 1 1 5 6 4 4 4 4 7; first the 2 sequence is removed and we’re left with\n# 5 1 3 3 3 1 1 5 6 4 4 4 4 7; the 3 sequence is removed and we’re left with 5 1 1 1 5 6 4 4 4 4 7;\n# the 1 sequence is removed and we’re left with 5 5 6 4 4 4 4 7 and lastly the 4 sequence is removed\n# and were left with 5 5 6 7; these no longer form a sequence so the game is stopped.\n# Task: knowing the number of balls and their color, output the following:\n# 1: the number of sequences that were initially available\n# 2: the number of balls (if any) that remain\n\nimport find_longest_sequence as fls\nimport output_message as om\n\n\nclass CandyCrush:\n \"\"\"Class used to represent the game\"\"\"\n\n def __init__(self, balls_array):\n \"\"\"Init method\"\"\"\n self.balls_array = balls_array\n self.number_of_sequences = 0\n\n def run_game(self):\n \"\"\"Method to start the game\"\"\"\n if fls.find_longest_sequence(self) is False:\n return\n else:\n om.output_message(self)\n\n\nif __name__ == '__main__':\n arr = [2, 2, 2, 3, 4, 5, 6, 7, 8, 9]\n cc = CandyCrush(arr)\n cc.run_game()\n","repo_name":"alextodireanu/candy_crush_sim","sub_path":"candy_crush.py","file_name":"candy_crush.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"3992524001","text":"# B - Great Ocean View\r\n# https://atcoder.jp/contests/abc124/tasks/abc124_b\r\n\r\nN = int(input())\r\nH = list(map(int, input().split()))\r\nhm = 0\r\ncnt = 0\r\nfor i in H:\r\n if i >= hm:\r\n hm = max(hm, i)\r\n cnt += 1\r\nprint(cnt)","repo_name":"Hironobu-Kawaguchi/atcoder","sub_path":"atcoder/abc124_b.py","file_name":"abc124_b.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"28170125691","text":"import numpy as np\nfrom pypower.idx_bus import BUS_I, BUS_TYPE, PD, QD, GS, BS, BUS_AREA, \\\n VM, VA, VMAX, VMIN, LAM_P, LAM_Q, MU_VMAX, MU_VMIN, REF\n\nclass events:\n def __init__(self, filename):\n self.event_stack = []\n self.parser(filename) \n \n def parser(self, filename):\n \"\"\"\n Parse an event file (*.evnt) and populate event stack\n \"\"\"\n f = open(filename, 'r')\n \n for line in f:\n if line[0] != '#' and line.strip() != '': # Ignore comments and blank lines\n tokens = line.strip().split(',')\n \n # Parse signal events\n if tokens[1].strip() in ['SIGNAL', 'FAULT', 'LOAD', 'STATE']:\n self.event_stack.append([float(tokens[0].strip()), tokens[1].strip(), tokens[2].strip(), tokens[3].strip(), tokens[4].strip()])\n \n elif tokens[1].strip() in ['CLEAR_FAULT', 'TRIP_BRANCH']:\n self.event_stack.append([float(tokens[0].strip()), tokens[1].strip(), tokens[2].strip()])\n \n f.close()\n \n def handle_events(self, t, elements, ppc, baseMVA):\n \"\"\"\n Checks and handles the event stack during a simulation time step\n \"\"\"\n refactorise = False\n \n if self.event_stack:\n if self.event_stack[0][0] < t:\n print('Event missed at t=' + str(self.event_stack[0][0]) + 's... Check simulation time step!')\n del self.event_stack[0]\n \n # Event exists at time step\n while self.event_stack and self.event_stack[0][0] == t:\n event_type = self.event_stack[0][1]\n \n # Handle signal events\n if event_type == 'SIGNAL':\n obj_id = self.event_stack[0][2]\n sig_id = self.event_stack[0][3]\n value = float(self.event_stack[0][4])\n elements[obj_id].signals[sig_id] = value\n \n print('SIGNAL event at t=' + str(t) + 's on element \"' + obj_id + '\". ' + sig_id + ' = ' + str(value) + '.')\n \n if event_type == 'STATE':\n obj_id = self.event_stack[0][2]\n sig_id = self.event_stack[0][3]\n value = float(self.event_stack[0][4])\n elements[obj_id].states[sig_id] = value\n \n print('STATE event at t=' + str(t) + 's on element \"' + obj_id + '\". ' + sig_id + ' = ' + str(value) + '.')\n \n if event_type == 'FAULT':\n bus_id = int(self.event_stack[0][2])\n Rf = float(self.event_stack[0][3])\n Xf = float(self.event_stack[0][4])\n \n if Rf == 0:\n ppc[\"bus\"][bus_id, GS] = 1e6\n elif Rf < 0:\n ppc[\"bus\"][bus_id, GS] = 0\n Rf = 'Inf'\n else:\n ppc[\"bus\"][bus_id, GS] = 1 / Rf * baseMVA\n \n if Xf == 0:\n ppc[\"bus\"][bus_id, BS] = -1e6\n elif Xf < 0:\n ppc[\"bus\"][bus_id, BS] = 0\n Xf = 'Inf'\n else:\n ppc[\"bus\"][bus_id, BS] = -1 / Xf * baseMVA\n \n refactorise = True\n \n print('FAULT event at t=' + str(t) + 's on bus at row \"' + str(bus_id) + '\" with fault impedance Zf = ' + str(Rf) + ' + j' + str(Xf) + ' pu.')\n \n if event_type == 'CLEAR_FAULT':\n bus_id = int(self.event_stack[0][2])\n ppc[\"bus\"][bus_id, BS] = 0\n ppc[\"bus\"][bus_id, GS] = 0\n refactorise = True\n \n print('CLEAR_FAULT event at t=' + str(t) + 's on bus at row \"' + str(bus_id) + '\".')\n \n if event_type == 'TRIP_BRANCH':\n branch_id = int(self.event_stack[0][2])\n ppc[\"branch\"] = np.delete(ppc[\"branch\"],branch_id, 0)\n refactorise = True\n \n print('TRIP_BRANCH event at t=' + str(t) + 's on branch \"' + str(branch_id) + '\".')\n \n if event_type == 'LOAD':\n bus_id = int(self.event_stack[0][2])\n Pl = float(self.event_stack[0][3])\n Ql = float(self.event_stack[0][4])\n \n ppc[\"bus\"][bus_id, PD] = Pl\n ppc[\"bus\"][bus_id, QD] = Ql\n \n refactorise = True\n \n print('LOAD event at t=' + str(t) + 's on bus at row \"' + str(bus_id) + '\" with S = ' + str(Pl) + ' MW + j' + str(Ql) + ' MVAr.')\n \n del self.event_stack[0]\n \n return ppc, refactorise","repo_name":"susantoj/PYPOWER-Dynamics","sub_path":"pydyn/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":5146,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"69"} +{"seq_id":"40910611164","text":"from readGenome import readGenome\r\nfrom readFastq import readFastq\r\nfrom seqfunc import reverseComplement\r\nfrom timeit import timeit\r\n\r\ngenome = readGenome('phix.fa')\r\n\r\n\r\ndef naive(pattern, text):\r\n occurences = []\r\n for i in range(len(text) - len(pattern) + 1): # loop over alignments\r\n match = True\r\n for j in range(len(pattern)): # loop over characters\r\n if text[i+j] != pattern[j]:\r\n match = False\r\n break\r\n if match:\r\n occurences.append(i)\r\n # print(occurences)\r\n return occurences\r\n\r\nphix_reads, _ = readFastq('ERR266411_1.first1000.fastq') # no need to return qualities, therefor lower space\r\n\r\n\r\ndef finalFunc(numMatched, n):\r\n # numMatched = 0\r\n # n = 0 # count total number of reads that we've processed\r\n\r\n for read in phix_reads:\r\n read = read[:30] # take only first 30 bases\r\n matches = naive(read, genome)\r\n matches.extend(naive(reverseComplement(read), genome))\r\n n += 1\r\n if len(matches) > 0:\r\n numMatched += 1\r\n print('%d/%d reads matched the genome!' % (numMatched, n))\r\n\r\nprint(timeit(lambda: finalFunc(0, 0), number=1))\r\n","repo_name":"pexca/DNA_sequencing","sub_path":"naive.py","file_name":"naive.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"22692656108","text":"from sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlalchemy.future import select\n\nfrom src.entities.auth import User\n\n\nclass UserRepository:\n def __init__(self, session: AsyncSession):\n self.session = session\n\n async def create(self, user: User):\n self.session.add(user)\n await self.session.commit()\n await self.session.refresh(user)\n\n async def exists_from_email(self, email: str) -> bool:\n stmt = select(User).where(User.email == email)\n result = await self.session.execute(stmt)\n return True if result.scalars().one_or_none() else False\n","repo_name":"mrlucca/FaculOminichanBackend","sub_path":"src/infrastructures/repositories/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"353709818","text":"# This problem was asked by Google.\n#\n# A quack is a data structure combining properties of both stacks and queues. It can\n# be viewed as a list of elements written left to right such that three operations\n# are possible:\n#\n# push(x): add a new item x to the left end of the list\n# pop(): remove and return the item on the left end of the list\n# pull(): remove the item on the right end of the list.\n#\n# Implement a quack using three stacks and O(1) additional memory, so that the\n# amortized time for any push, pop, or pull operation is O(1).\n\nfrom collections import deque\n\nclass Quack():\n # stack 1 -- input stack\n # stack 2 -- stack to serve queue operations on request\n # stack 3 -- additional stack to serve stack operations after we moved data to stack 2\n # we also need a counter for items usable on stack 2 & 3 (which are always equal)\n # because the physical count on these stacks is not going to be representative\n # as we will have to somehow remember that we virtually excluded some amount of items\n # from the non-accessible end\n\n def __init__(self):\n self._count = 0\n self.input = deque()\n self.aux_reversed = deque()\n self.aux = deque()\n self.aux_counter = 0 # count of usable items in the aux stacks!\n\n def push(self, x):\n # push always go to the input stack!\n self._count += 1\n self.input.append(x)\n\n def pop(self):\n if self._count == 0:\n raise Exception('no items left!')\n\n self._count -= 1\n\n # try to serve from the input stack if it's not empty\n if self.input:\n return self.input.pop()\n\n # okay, can not serve from the input stack, use aux stack\n self.aux_counter -= 1\n\n return self.aux.pop()\n\n def _clean_aux(self):\n while self.aux_reversed:\n self.aux_reversed.pop()\n while self.aux:\n self.aux.pop()\n\n def _refill(self):\n # we exhausted the aux data structures and have to carry the data over\n # from the input stack\n self._clean_aux()\n\n self.aux_counter = len(self.input) # all items from input stack are copied!\n\n # here is the problem -- we have to also have aux populated with the input\n # stack, and order has to be preserved!\n # we can do a trick there -- while populating reversed, temporary populate\n # the aux with reversed order as well, and them reverse stack again to the\n # input stack, and then switch the input stack with the aux!\n\n while self.input:\n x = self.input.pop()\n self.aux_reversed.append(x)\n self.aux.append(x) # temporary reversed order!\n\n # okay, now we multiplexed the source data into two stacks with reversed\n # order, let's restore the original order moving from aux to input\n while self.aux:\n x = self.aux.pop()\n self.input.append(x)\n\n # the only thing left -- swap the stacks!\n self.input, self.aux = self.aux, self.input\n\n def pull(self):\n # pull operations has to be performed on a reversed stack\n\n if self._count == 0:\n raise Exception('no items left!')\n\n self._count -= 1\n\n # refill aux stacks if empty\n if self.aux_counter == 0:\n self._refill()\n\n self.aux_counter -= 1\n\n # okay, and return the item now\n return self.aux_reversed.pop()\n\n def count(self):\n return self._count\n\n def __bool__(self):\n return self.count() > 0\n\n\nquack = Quack()\n\nquack.push(1)\nquack.push(2)\nquack.push(3)\n\nassert quack.pull() == 1\nassert quack.pop() == 3\n\nquack.push(4)\nquack.push(5)\n\nassert quack.pull() == 2\nassert quack.pop() == 5\nassert quack.pop() == 4\n\nquack.push(6)\nquack.push(7)\nquack.push(8)\n\nassert quack.pull() == 6\nassert quack.pull() == 7\nassert quack.pull() == 8\n\nassert not quack\n","repo_name":"gubenkoved/daily-coding-problem","sub_path":"python/dcp_365_quack.py","file_name":"dcp_365_quack.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"} +{"seq_id":"344970008","text":"ficha = []\nalunos =[]\n\nwhile True:\n nome = str(input(\"Digite um nome: \"))\n nota1 = float(input(\"Insira a primeira nota: \"))\n nota2 = float(input(\"Insira a segunda nota: \"))\n media = (nota1 + nota2) / 2\n ficha.append([nome, [nota1, nota2], media])\n resp = str(input(\"Deseja continuar: [S/N] \")).strip().upper()[0]\n if resp == \"N\":\n break\nprint(\"-=\" * 13)\nprint(f\"{'No.':<4}{'NOME':<10}{'MÉDIA':>8}\")\nprint(\"-=\" * 13)\nfor i, a in enumerate(ficha):\n print(f'{i:<4} {a[0]:<10} {a[2]:>5.1f}')\nwhile True:\n print(\"-=\" * 15)\n opc = int(input(\"Mostra as notas de qual aluno? (999 para encerrar): \"))\n if opc == 999:\n break\n print(\"Finalizando...\")\n if opc <= len(ficha) :\n print(f\"Notas de {ficha[opc][0]} são {ficha[opc][1]}\")","repo_name":"GuBenez/ficha-alunos","sub_path":"ficha de alunos.py","file_name":"ficha de alunos.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"14099983853","text":"# -*- coding: UTF-8 -*-\n\n\"\"\"Read column from table in database\n\"\"\"\n\n###############################################################################\n\nimport os\nimport csv\n\nfrom bs4 import BeautifulSoup\nimport mysql.connector\nfrom mysql.connector import errorcode\nimport pandas as pd\n\nfrom kiwifarmer import base, templates\n\n###############################################################################\n\nCOMMAND = 'SELECT post_id FROM posts'\n\nDATABASE = 'kiwifarms_20210224'\n\nOUTPUT_CSV = '../../data_20210224/reaction_url_list.txt'\n\n###############################################################################\n\nif __name__ == '__main__':\n\n cnx = mysql.connector.connect(\n user = os.getenv( 'KIWIFARMER_USER'),\n password = os.getenv( 'KIWIFARMER_PASSWORD' ),\n host = '127.0.0.1',\n database = DATABASE,\n charset = 'utf8mb4',\n collation = 'utf8mb4_bin',\n use_unicode = True )\n\n df = pd.read_sql(\n sql = COMMAND,\n con = cnx )\n\n to_url = lambda s : f'https://kiwifarms.net/posts/{s}/reactions?reaction_id=0&list_only=1&page=1'\n url_list = df[ 'post_id' ].apply( to_url )\n\n url_list.to_csv( OUTPUT_CSV, header = False, index = False )\n\n cnx.close( )\n\n###############################################################################","repo_name":"gaius-gracchus/KiwiFarmer","sub_path":"workflow/06-A_get_reaction_url_list.py","file_name":"06-A_get_reaction_url_list.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"} +{"seq_id":"18522368005","text":"import sys\r\n\r\nN = int(sys.stdin.readline())\r\n# 양방향 트리\r\nadjl = [[] for _ in range(N)]\r\n# 단방향트리. dfs 돌면서 만들거임\r\nadjl2 = [[] for _ in range(N)]\r\nfor _ in range(N - 1):\r\n a, b = map(int, sys.stdin.readline().split())\r\n adjl[a - 1].append(b - 1)\r\n adjl[b - 1].append(a - 1)\r\nvisit = [0] * N\r\ndp = {}\r\nvisit[0] = 1\r\nstk = [0]\r\nwhile len(stk) >= 1:\r\n current = stk[-1]\r\n for nxt in adjl[current]:\r\n if visit[nxt] == 0:\r\n visit[nxt] = 1\r\n stk.append(nxt)\r\n # 단방향 트리 만들기\r\n adjl2[current].append(nxt)\r\n current = nxt\r\n break\r\n else:\r\n if not adjl2[current]:\r\n dp[current, 1] = 1\r\n dp[current, 0] = 0\r\n else:\r\n ans1 = 1\r\n ans0 = 0\r\n for nxt in adjl2[current]:\r\n ans1 += min(dp[nxt, 0], dp[nxt, 1])\r\n ans0 += dp[nxt, 1]\r\n dp[current, 1] = ans1\r\n dp[current, 0] = ans0\r\n stk.pop()\r\nprint(min(dp[0, 0], dp[0, 1]))","repo_name":"Mabaragi/problems","sub_path":"백준/Gold/2533. 사회망 서비스(SNS)/사회망 서비스(SNS).py","file_name":"사회망 서비스(SNS).py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"69894074459","text":"# LinkedList\n\nclass Node():\n\tdef __init__(self, val):\n\t\tself.val = val\n\t\tself.next = None\n\nclass LinkedList():\n\tdef __init__(self, head):\n\t\tself.head = head\n\t\ttemp = self.head\n\t\twhile temp.next:\n\t\t\ttemp = temp.next\n\t\tself.last = temp\n\t\n\t\n\tdef insert_first(self, node):\n\t\tnode.next = self.head\n\t\tself.head = node\n\t\n\t\n\tdef insert_last(self, node):\n\t\tself.last.next = node\n\t\tself.last = node\n\t\n\t\n\tdef del_first(self):\n\t\tself.head = self.head.next\n\t\t\n\t\n\tdef del_last(self):\n\t\tpass\n\t\t\n\t\n\tdef to_list(self):\n\t\tresult = []\n\t\ttemp = self.head\n\t\twhile temp:\n\t\t\tresult.append(temp.val)\n\t\t\ttemp = temp.next\n\t\treturn result\n\n\nlinkedlist = LinkedList(Node(1))\n\nlinkedlist.insert_last(Node(2))\nprint(linkedlist.to_list()) # [1, 2]\n\nlinkedlist.insert_first(Node(0))\nprint(linkedlist.to_list()) # [0, 1, 2]\n\nlinkedlist.del_first()\nprint(linkedlist.to_list()) # [1, 2]\n\t","repo_name":"AbandonBlue/Coding-Every-Day","sub_path":"Data_Structure/LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"3192604079","text":"import enum\nfrom sqlalchemy import (\n Boolean, Column, DateTime, Enum, Float, \n ForeignKey, Integer, String, Table\n)\n\nfrom db import metadata\n\n\nclass DeviceMode(enum.Enum):\n cool = 'cool'\n heat = 'heat'\n\n\nusers = Table(\n 'users',\n metadata,\n Column('id', Integer, primary_key=True, index=True),\n Column('username', String, unique=True, index=True),\n Column('password', String),\n)\n\n\ntokens = Table(\n 'tokens',\n metadata,\n Column('id', Integer, primary_key=True),\n Column(\n 'token',\n String,\n unique=True,\n nullable=False,\n index=True,\n ),\n Column('expires', DateTime()),\n Column('user_id', ForeignKey('users.id')),\n)\n\n\ndevices = Table(\n 'devices',\n metadata,\n Column(\n 'id',\n Integer,\n primary_key=True,\n unique=True,\n ),\n Column(\n 'serial_number',\n Integer,\n unique=True\n ),\n Column('on', Boolean, default=False),\n Column('status_wifi', Boolean, default=False),\n Column('temp', Float, default=0.0),\n Column('temperature', Integer, default=0),\n Column('brightness', Integer, default=100),\n Column('thermostat', Enum(DeviceMode), default=DeviceMode.cool),\n Column('controls_locked', Boolean, default=False),\n Column('owner_id', Integer, ForeignKey('users.id')),\n)\n\n\nrequests = Table(\n 'requests',\n metadata,\n Column('id', Integer, primary_key=True, index=True),\n Column('status', Boolean),\n Column('error', String, nullable=True),\n Column('user_id', Integer, ForeignKey('users.id')),\n Column('device_id', Integer, ForeignKey('devices.id')),\n Column('created', DateTime()),\n)\n","repo_name":"Aproniter/thermostat_test","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"17871323500","text":"\n\nclass Activity(object):\n \n \n def __init__(self, industry, data):\n \n self.industry = industry\n \n self.uid = data[0] \n self.name = data[1]\n self.iconNo = data[2]\n self.description = data[3]\n self.published = data[4]\n","repo_name":"PageArkanis/StEVE","sub_path":"steve/activity.py","file_name":"activity.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"22151522324","text":"N, A, B = map(int, input().split())\nS = input()\n\nif N == 1 or S == S[::-1]:\n print(0)\n exit()\n\nans_list = set()\n\nfor i in range(N):\n ans = 0\n if i != 0:\n S = S[1:N] + S[0]\n ans += i * A\n half = N // 2\n if N % 2 == 0:\n left = list(S[0:half])\n right = list(S[half:N])[::-1]\n for j in range(len(left)):\n if left[j] != right[j]:\n ans += B\n else:\n left = list(S[0:half])\n right = list(S[half + 1 : N])[::-1]\n for j in range(len(left)):\n if left[j] != right[j]:\n ans += B\n ans_list.add(ans)\n\nprint(min(ans_list))\n","repo_name":"daiki0381/AtCoder","sub_path":"abc286/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"33850009461","text":"from dash.dependencies import Input, Output,State\nimport dash_bootstrap_components as dbc\nimport dash_html_components as html\nimport dash_core_components as dcc\nimport dash_table\nimport dash\n\nfrom tabs import tab_functions as tf, variables as var\n\ntab_2_layout= html.Div([\n html.Div(id=\"information_container\",\n children=[\n html.Div(id=\"sql_container\",\n children=[\n dcc.Textarea(id=\"sql_script\",placeholder=var.query_tab_sql_ph,\n style={'width': '80%', 'height': 300},),\n dbc.Button('Submit',id=\"button_21\",className=\"buttons\",n_clicks=0)],\n className='containers'),\n html.Div( id=\"table_list_container\",\n children=[\n html.H4('List of Tables Available'),\n tf.generate_button(var.button_dict['button_1'],\"button_1\"),\n tf.generate_button(var.button_dict['button_2'],\"button_2\"),\n tf.generate_button(var.button_dict['button_3'],\"button_3\"),\n tf.generate_button(var.button_dict['button_4'],\"button_4\")],\n className='containers')]),\n html.Div(id='intermediate-value', style={'display': 'none'}),\n html.Br(),\n html.Div(id=\"output-container\",className='containers'),\n ])\n","repo_name":"HumzaA94/genrewithdata","sub_path":"tabs/query_tab.py","file_name":"query_tab.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"14042689420","text":"import os\nimport sys\nimport json\nimport sqlite3\nimport time\nimport re\n\n\n\ndef searchNames():\n # 链接数据库\n connect = sqlite3.connect(sys.argv[1])\n cursor = connect.cursor()\n\n # 查询所有表名称 以及生成所有字段\n cursor.execute(\"select name from sqlite_master where type='table'\")\n tab_name = cursor.fetchall()\n tab_name = [line[0] for line in tab_name]\n# print(tab_name)\n col_names=[]\n col_dic={}\n for line in tab_name:\n cursor.execute('pragma table_info({})'.format(line))\n col_name=cursor.fetchall()\n col_name=[x[1] for x in col_name]\n col_names.append(col_name)\n col_dic[line] = col_name\n col_name=tuple(col_name)\n \n print(json.dumps(col_dic))\n\n\n\nif __name__ == '__main__':\n searchNames()\n\n \n","repo_name":"aikesi128/echartCreater","sub_path":"test_mac/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"40284336201","text":"# coding=utf-8\nclass Solution(object):\n def moveZeroes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n size = len(nums)\n count = 0\n offset = 0\n for i in range(0, size):\n if nums[i] != 0:\n nums[offset], nums[offset + count] = nums[offset + count], nums[offset]\n offset += 1\n else:\n count += 1\n\n if offset + count > size:\n break\n\n #提交时不需要return\n return nums\n\n\ns = Solution()\nprint(s.moveZeroes([1, 0, 1, 0, 3, 12]))\n","repo_name":"zhangpeng-fzu/leetcode","sub_path":"Move Zeroes.py","file_name":"Move Zeroes.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"16242235294","text":"from django import forms\r\nfrom inscricao.models import Cupom\r\n\r\nclass CupomForm(forms.ModelForm):\r\n\tdef __init__(self, *args, **kwargs):\r\n\t\tself.user = kwargs.pop(\"user\") #recebendo usuario logado\r\n\t\tsuper(CupomForm, self).__init__(*args, **kwargs)\t\t\t\r\n\t\tself.fields['desconto'].label = 'Desconto em porcento'\r\n\r\n\tclass Meta:\r\n\t\tmodel = Cupom\r\n\t\texclude = ['data_de_fim','data_de_inicio','evento']\t\t\r\n\t\tfields = '__all__'\t","repo_name":"fabiomsrs/university_project","sub_path":"evento/appweb/forms/cadastroCupomForm.py","file_name":"cadastroCupomForm.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"74591292700","text":"# -*- coding: utf-8 -*-\nclass Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n str_dict ={}\n tmp=0#这里的tmp是用来存储上一个位的长度\n res =0#用于存储最长的长度。\n #加上tmp和res的目的是减少存储的占用空间,因为记录每一个j所对的字符的长度是需要空间的。\n for j in range(len(s)):\n i = str_dict.get(s[j],-1)\n str_dict[s[j]]=j\n tmp = tmp+1 if j-i>tmp else j-i\n res =max(res,tmp)\n return res","repo_name":"FrankUSA2015/jianzhiOffice","sub_path":"48.py","file_name":"48.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"45199310445","text":"import os\nimport sys\nimport logging\nfrom pickle import FALSE\nfrom datetime import datetime\nfrom threading import Timer\nfrom pathlib import Path\nimport io\nimport time\nimport requests\nimport pathlib\nimport aiohttp\nimport atexit \n\n#Discord.py libraries\nimport discord\nfrom discord.ext import commands\n\n# \"Clever answears\" script\nimport response\n# Special commands script\nimport advancedcommands\n# File load/save script\nimport iniLoad\n\n# Youtube sync script\nimport youtube\n# Anime new sync script\nimport anime\n# Word splitter\nimport commandrecognazer\n\n# Bots will wait to stable connection\nlogging.warning('Waiting for connection')\nconnectionActive=0\n\nwhile connectionActive<3:\n try:\n requests.head(\"http://www.google.com/\", timeout=3)\n connectionActive=connectionActive+1\n except requests.ConnectionError:\n logging.warning('Connection Error')\n connectionActive=0\n time.sleep(3)\ntime.sleep(2)\n\n# Change script relative files dir\nos.chdir(os.path.dirname(sys.argv[0]))\n\n# Bot now works\nlogging.warning('Bot starting now!')\n\nclient = discord.Client()\nguild = discord.Guild\n\n# 'be silient' feature\nsend_this_when_free=''\nsilient_mode=False\ndef after_silience():\n global silient_mode\n silient_mode=False\n\n# On message Event\n@client.event\nasync def on_message(message):\n #try:\n if True:\n global silient_mode\n global send_this_when_free\n \n # Do this once a day\n now = datetime.now()\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n daily=\"\"\n #create file if not exist\n fle = Path('../files_conf/daily.txt')\n fle.touch(exist_ok=True)\n filee = io.open('../files_conf/'+\"daily.txt\", mode=\"r\", encoding=\"utf-8\")\n for datee in filee:\n daily=datee\n filee.close()\n daily_do=dt_string = now.strftime(\"%d/%m/%Y\")\n if daily_do!=daily:\n f=io.open('../files_conf/'+\"daily.txt\", \"w\", encoding=\"utf-8\")\n f.write(daily_do)\n f.close()\n ##co dzieje się codziennie:\n await anime.animenews(client)\n await youtube.youtubeUpdate(client,10)\n print(\"Daily commands done\")\n\n # save all the messagess as the log in file\n f=open(\"../files_conf/log.txt\", \"a+\")\n f.write(dt_string+\" // \"+message.author.name+\" : \\\"\"+message.content+\"\\\"\\n\")\n\n # Ceche Users information\n _cecheGroupName=str(message.author.id)\n iniLoad.iniChange(\"dane.conf\",_cecheGroupName,'name',str(message.author.name))\n iniLoad.iniChange(\"dane.conf\",_cecheGroupName,'avatar',str(message.author.avatar_url))\n iniLoad.iniChange(\"dane.conf\",_cecheGroupName,'display_name',str(message.author.display_name))\n iniLoad.iniChange(\"dane.conf\",_cecheGroupName,'discriminator',str(message.author.discriminator))\n iniLoad.iniChange(\"dane.conf\",_cecheGroupName,'hash',str(hash(message.author)))\n \n # Ignore others bots message\n channel = message.channel\n if message.author.bot:\n return\n \n # If bot is in a silient mode\n if send_this_when_free != '':\n await channel.send(send_this_when_free)\n send_this_when_free=''\n \n # Store a message content as table of splitted words. First one, is ignoring letter duplicates and changes BIG LETTER to small.\n tabela=commandrecognazer.word_in_message(message.content,False) #tabela wyrazów z małej litery i bez powtórzeń\n tabela_pierwotna=commandrecognazer.word_in_message(message.content,True) #tabela wyrazów pierwotnych\n \n # Check if bot have a clever answear for this message\n resp = response.response_list(tabela,tabela_pierwotna,channel,guild,message,client) #standardowe pytania i odpowiedzi\n \n # Then check if this message is a special command\n resp2 = await advancedcommands.response_list(tabela,tabela_pierwotna,channel,guild,message,client) #zaawansowane polecenia\n\n # Start/End silient mode\n if resp2 != '':\n resp=resp2\n if resp2 == '&start_silient':\n resp=''\n await channel.send('Dobra, będę cicho')\n silient_mode=True\n Timer(5*60, after_silience, args=None, kwargs=None).start()\n if resp2 == '&end_silient':\n resp=''\n after_silience()\n await channel.send('Ok')\n \n # Block user with inappropriate name\n tabela_usera=commandrecognazer.word_in_message(message.author.name,False) \n if response.blacklist_usernames(tabela_usera)=='ban':\n resp='Pan ślazatek bezpieczenstwa pilnuje, '+message.author.name+' bana na serwera otrzymuje :)'\n await message.author.ban(reason = \"System ślazatkowych zabezpieczeń wykrył zakazany nick\")\n \n # If the response for message exist, then send a result\n if resp != '' and silient_mode==False:\n await channel.send(resp)\n \n f.close()\n #except aiohttp.ClientConnectorError:\n # logging.warning('Connection error! Restart... ')\n # os.execv(sys.executable, ['python3']+ [sys.argv[0]] )\n\n@atexit.register \ndef goodbye(): \n logging.warning('Connection error! Restart... ')\n os.execv(sys.executable, ['python3']+ [sys.argv[0]] )\n\n# Load bot token\n# you can get one from here https://discord.com/developers/\ngot_id=iniLoad.iniLoad('dane.conf','Client','id','0')\nif got_id == '0':\n # Input discord token \n print('Enter discord bot token:')\n got_id = input()\n iniLoad.iniChange('dane.conf','Client','id',got_id)\n # Input username with special privilages\n print('Enter your discord username:')\n specialUser = input()\n iniLoad.iniChange('dane.conf','Client','admin',specialUser)\n print('Thanks, bot will start in a second')\nclient.run(got_id)\n","repo_name":"Mestuq/SlazAI","sub_path":"slaz-AI/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":6015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"8217771558","text":"import argparse\nfrom collections import defaultdict\nfrom train_perceptron import create_features, predict_one\n\n\ndef arguments_parse():\n parser = argparse.ArgumentParser()\n parser.add_argument('weights', type=str)\n parser.add_argument('input_', type=str)\n return parser.parse_args()\n\n\ndef load_weights(weights_file):\n w = defaultdict(float)\n with open(weights_file, 'r', encoding='utf-8') as w_file:\n for line in w_file:\n key, value = line.rstrip().split('\\t')\n w[key] = float(value)\n return w\n\n\ndef test_perceptron(w, input_file):\n with open(input_file, 'r', encoding='utf-8') as i_file:\n for line in i_file:\n snetence = line.rstrip()\n phi = create_features(snetence)\n prediction = predict_one(w, phi)\n print(f'{prediction}\\t{snetence}')\n\n\nif __name__ == '__main__':\n args = arguments_parse()\n weights_file = args.weights if args.weights else r'uni_weights.txt'\n input_file = args.input_ if args.input_ else r'../../data/titles-en-test.word'\n\n w = load_weights(weights_file)\n test_perceptron(w, input_file)\n","repo_name":"gori-san/NLP_tutorial","sub_path":"chapter05/test_perceptron.py","file_name":"test_perceptron.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"3509913716","text":"from flask import Flask, render_template\nimport dapr.clients\nimport os\nimport json\n\napp = Flask(__name__)\n\ndef sql_output(provedor):\n dapr_client = dapr.clients.DaprClient()\n with dapr_client:\n sqlCmd = (f\"SELECT companhia FROM providers WHERE nome_provedor='{provedor}'\")\n payload = {'sql': sqlCmd}\n try:\n resp_ = dapr_client.invoke_binding(binding_name=\"postgres-db\", operation='query', binding_metadata=payload, data='')\n resp_dict = json.loads(resp_.data.decode('utf-8'))\n resp = [f'{resp_dict[0][0]} é a companhia proprietária do provedor de cloud {provedor} !'] \n return [resp[0]]\n except Exception as e:\n print(e, flush=True)\n raise SystemExit(e)\n\n \n@app.route(\"/\")\ndef home():\n meu_provedor = os.environ.get('PROVEDOR')\n resultado = sql_output(meu_provedor)\n return render_template(\"index.html\", resultado=resultado, provedor=meu_provedor)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"adansmashni/ms-multicloud-sample","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"40356237203","text":"import os\nimport pandas as pd\nimport re\nimport numpy as np\nfrom nltk.tokenize import word_tokenize, sent_tokenize\n\n\n\n\ndef predict(edss, note):\n \n\n p = re.compile(r\"subjective cognitive complaints\", re.IGNORECASE)\n p2 = re.compile(r\"Montreal Cognitive Assessment|MoCA\", re.IGNORECASE)\n p3 = re.compile(r\"(?:mild|Mild) cognitive challenge\", re.IGNORECASE)\n p4 = re.compile(r\"(?:mild|Mild) fatigue\", re.IGNORECASE)\n p_neg = re.compile(r\"No | no | deni|not have|not had\", re.IGNORECASE)\n\n # Unknown by default\n score = -1\n \n if edss == 0:\n if len(re.findall(r\"fatigue\", note)) == 0:\n score = 0\n else:\n score = 1\n \n \n # MoCA test\n sentences = sent_tokenize(note)\n possible_sentences = []\n for sent in sentences:\n if len(re.findall(p2, sent)) > 0 and len(re.findall(r\"30\\/30\", sent)) > 0:\n score = 0\n break\n if len(re.findall(p2, sent)) > 0 and len(re.findall(r\"(?:25|26|27|28|29)\\/30\", sent)) > 0:\n score = 1\n break\n if len(re.findall(p2, sent)) > 0 and len(re.findall(r\"(?:20|21|22|23|24)\\/30\", sent)) > 0:\n score = 2\n break\n if len(re.findall(p2, sent)) > 0 and len(re.findall(r\"(?:10|11|12|13|14|15|16|17|18|19)\\/30\", sent)) > 0:\n score = 3\n break\n \n if len(re.findall(p3, sent)) > 0 or len(re.findall(p4, sent)) > 0:\n score = 1\n break\n \n if len(re.findall(p3, sent)) > 0 and len(re.findall(p4, sent)) > 0:\n score = 2\n break\n \n # Collect all sentences that have cognition|cognitive\n if len(re.findall(r\"Cognition|cognition|Cognitive|cognitive\", sent)) > 0:\n possible_sentences.append(sent)\n \n \n for sent in possible_sentences:\n if len(re.findall(p_neg, sent)) > 0 or len(re.findall(r\"no longer has|significantly better\", sent)) > 0:\n score = 0\n break\n elif len(re.findall(r\"mild|Mild\", sent)) > 0:\n score = 1\n elif len(re.findall(r\"significant\", sent)) > 0:\n score = 3\n else:\n score = 2\n \n # significant cognitive issue\n if len(re.findall(r\"significant cognitive issue|progressive cognitive (?:and physical) decline\", note)) > 0:\n if edss == 0.0:\n score = 1\n elif edss > 3.0:\n score = 3\n else:\n score = 2\n\n \n if len(re.findall(p, note)) > 0:\n score = 1\n if edss == 2.0:\n #df['score_brain_stem_subscore'][i] == 0 and \\\n #df['score_cerebellar_subscore'][i] == 0 and \\\n #df['score_ambulation_subscore'][i] == 0 and \\\n #df['score_visual_subscore'][i] == 0 and \\\n #df['score_pyramidal_subscore'][i] == 0 and \\\n #df['score_sensory_subscore'][i] == 0 and \\\n #df['score_bowel_bladder_subscore'][i] == 0:\n #print(\"Subjective cognitive complaints\")\n score = 2\n \n return score\n\n\n\"\"\"\n\nif __name__ == \"__main__\":\n\n\n df = pd.read_csv(\"Z:/LKS-CHART/Projects/ms_clinic_project/data/nlp_data/visit_level_data/valid_data.csv\")\n df = df.dropna()\n df = df.reset_index()\n\n\n #labels = [convert_score(df[\"score_visual_subscore\"][i]) for i in range(df.shape[0])]\n labels = np.array(df[\"score_mental_subscore\"])\n #labels[labels == -1] = 6\n predictions = []\n for i in range(df.shape[0]): \n\n #print(i) \n #print(df[\"score_mental_subscore\"][i], score_mental_prediction(i,df))\n predictions.append(score_mental_prediction(i, df))\n\n\n\n labels = np.array(labels)\n labels[labels == -1] = 6\n predictions = np.array(predictions)\n predictions[predictions == -1] = 6\n print(classification_report(labels, predictions, digits = 4))\n\n # Confusion Matrix\n cm = confusion_matrix(labels, predictions)\n df_cm = pd.DataFrame(cm, range(cm.shape[0]), range(cm.shape[0]))\n df_cm.columns = [\"0\",\"1\",\"2\",\"3\",\"4\",\"-1\"]\n df_cm.rename(index = {0:\"0\",1:\"1\",2:\"2\",3:\"3\",4:\"4\",5:\"-1\"}, inplace = True)\n plt.figure(figsize = (10, 8))\n sn.set(font_scale = 1) # for label size\n sn.heatmap(df_cm, annot = True, fmt = 'g', annot_kws = {\"size\": 15}) # font size\n plt.show()\n\n # Converted Accuracy\n wrong_predictions = []\n count = 0\n\n for i in range(len(labels)):\n\n if predictions[i] == labels[i]:\n count += 1\n elif predictions[i] != -1 and labels[i] != -1:\n if abs(labels[i] - predictions[i]) <= 1:\n count += 1\n else:\n wrong_predictions.append(i)\n\n converted_acc = count / len(labels)\n print(\"Converted Accuracy: \", converted_acc)\n\n # Accuracy\n count = 0\n for i in range(len(labels)):\n if predictions[i] == labels[i]:\n count += 1\n print(\"Accuracy: \", count / len(labels))\n\n\"\"\"","repo_name":"NLP4H/MSBC","sub_path":"baseline/baseline_code/score_mental_subscore.py","file_name":"score_mental_subscore.py","file_ext":"py","file_size_in_byte":4917,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"74242911580","text":"# -*- coding: utf-8 -*-\n'''\n\n'''\n#import sys\n#sys.path.insert(0, '/home/gavin/dev/scikit-learn')\n#import sklearn\n#print sklearn.__version__\nfrom os.path import dirname, join\ntry:\n import cPickle as pickle\nexcept:\n import pickle # lint:ok\nfrom sklearn.ensemble import RandomForestClassifier\n\n\nclass Lexent_classifier_sub:\n\n def __init__(self):\n sub_model_file = join(dirname(__file__),\n '../classifier_models/sub_model.p')\n training_file = open(sub_model_file)\n training_data = pickle.load(training_file)\n training_file.close()\n sub_target_file = join(dirname(__file__),\n '../classifier_models/sub_targets.p')\n targets_file = open(sub_target_file)\n targets = pickle.load(targets_file)\n targets_file.close()\n self.clf = RandomForestClassifier()\n self.clf.fit(training_data, targets)\n\n def predict(self, feature_vector):\n return self.clf.predict(feature_vector)","repo_name":"imclab/entailment-api","sub_path":"classifiers/lexent_classifier_sub.py","file_name":"lexent_classifier_sub.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"9312763015","text":"import webbrowser\nimport os\nimport sys\nsys.path.append(os.environ.get('EvoAI'))\nfrom Base.Functionalities import speak\nfrom urlextract import URLExtract\n\ndef visitWebsite(query: str):\n removableQuery = [\"visit\", \"website\", \"open\", \"start\", \"launch\"]\n extractor = URLExtract()\n urls = extractor.find_urls(query)\n if urls:\n speak(f\"visiting {urls[0]}\")\n webbrowser.open(urls[0])\n else:\n for word in removableQuery:\n query = query.replace(word, \"\")\n speak(f\"Performing a regular search for '{query}'...\")","repo_name":"JunaidParkar/A.R.T.E.X","sub_path":"Workers/WebsiteVisitor.py","file_name":"WebsiteVisitor.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"21623674441","text":"#!/usr/bin/env python\n# Gathers beliefs that are published on the trace topic and provides a service\n# to query on the robot's belief thus far\n\nfrom __future__ import print_function, division\n\nimport numpy as np\n\nfrom threading import Lock\n\nimport rospy\n\nfrom std_srvs.srv import Trigger, TriggerResponse\nfrom task_execution_msgs.msg import ExecutionEvent, BeliefKeys\nfrom task_execution_msgs.srv import GetBeliefs, GetBeliefsResponse\n\n\n# The actual beliefs node\n\nclass BeliefsServer(object):\n \"\"\"\n This server listens on the execution trace topic for updates to beliefs.\n When an update is sent out, it stores the result. A service call can later\n fetch the desired belief value\n \"\"\"\n\n EXECUTION_TRACE_TOPIC = '/execution_monitor/trace'\n\n def __init__(self):\n self._beliefs_lock = Lock()\n\n # Provide a service to reload; then reload\n self._reload_service = rospy.Service('~reload', Trigger, self.reload)\n self.reload()\n\n # Create the subscriber\n self._trace_sub = rospy.Subscriber(BeliefsServer.EXECUTION_TRACE_TOPIC, ExecutionEvent, self._on_trace)\n\n # Create and initialize the service servers\n self._get_beliefs_service = rospy.Service('~get_beliefs', GetBeliefs, self.get_beliefs)\n\n def start(self):\n # This is a no-op at the moment\n rospy.loginfo(\"Beliefs node ready...\")\n\n def reload(self, req=None):\n # Reinitialize the dictionary of beliefs\n with self._beliefs_lock:\n self.beliefs = { getattr(BeliefKeys, attr): 0.0 for attr in dir(BeliefKeys) if attr.isupper() }\n\n def _on_trace(self, msg):\n # If this is not a belief event, ignore it\n if msg.type != ExecutionEvent.BELIEF_EVENT:\n return\n\n # Otherwise, update the known beliefs\n with self._beliefs_lock:\n self.beliefs[getattr(BeliefKeys, msg.name, msg.name)] = msg.belief_metadata.value\n\n\n def get_beliefs(self, req):\n # Simply do a dictionary lookup of the beliefs we know about\n beliefs, values = [], []\n\n with self._beliefs_lock:\n for belief in (req.beliefs or self.beliefs.keys()):\n # Disambiguate the belief\n belief = getattr(BeliefKeys, belief, belief)\n\n # Then append the belief and the value to the respons\n values.append(self.beliefs[belief])\n beliefs.append(belief)\n\n return GetBeliefsResponse(beliefs, values)\n","repo_name":"GT-RAIL/derail-fetchit-public","sub_path":"task_execution/task_executor/src/task_executor/beliefs.py","file_name":"beliefs.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"69"} +{"seq_id":"22473238310","text":"import numpy as np\nimport pandas as pd\nimport streamlit as st\nfrom pandas_profiling import ProfileReport\nimport streamlit_pandas_profiling\nfrom streamlit_pandas_profiling import st_profile_report\nimport streamlit.components.v1 as components\n\nst.set_page_config(layout = \"wide\")\nst.markdown(\n \"\"\"\n \"\"\",\n unsafe_allow_html=True,\n)\n#the title of the web app\nhtml_temp = \"\"\"\n

Data Profiling

\n \"\"\"\n\nst.markdown(html_temp,unsafe_allow_html=True)\n\n# Upload CSV data\nst.subheader('Upload your CSV data')\nuploaded_file = st.file_uploader(\"Upload your input CSV file\", type=[\"csv\"])\n\n# Pandas Profiling Report\nif uploaded_file is not None:\n @st.cache\n def load_csv():\n csv = pd.read_csv(uploaded_file,encoding = 'unicode_escape', engine ='python')\n return csv\n df = load_csv()\n pr = ProfileReport(df, explorative=True)\n st.header('**Input DataFrame**')\n st.write(df.head(6))\n st.write('---')\n st.header('**Pandas Profiling Report**')\n st_profile_report(pr)\nelse:\n st.info('Awaiting for CSV file to be uploaded.')\n\ncheck=st.checkbox(\"Load offline Profiling file\")\nif check:\n plot_file = open('./report.html','r')\n # plot = plot_file.read()\n # st.markdown(plot,unsafe_allow_html=True)\n # plot = plot_file.close()\n components.html(plot_file.read(), height=11800, scrolling=True)\n\n # brief about the app\ndef main():\n st.sidebar.text(\" \")\n pass\n\nif __name__ == '__main__':\n main()\n\n st.sidebar.text(\" \")\n st.sidebar.header(\"About App\")\n st.sidebar.markdown(\"A Simple EDA App for full Analysis Dataset.\")\n st.sidebar.markdown(\"Upload any dataset in CSV format, it will show the full analysis with visualization\")\n st.sidebar.info(\"Developed By: \\t \\t \\t Sai Teja\")","repo_name":"saitej123/EDA_App","sub_path":"Data_profiling.py","file_name":"Data_profiling.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"1140494594","text":"from __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport json\nimport os\n\nimport tenacity\nimport time\n\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\nfrom oslo_messaging import conffixture\n\nfrom congress.datalog import compile\nfrom congress.datalog import unify\nfrom congress.policy_engines import agnostic\n\nfrom congress.dse2 import dse_node\n\n\nLOG = logging.getLogger(__name__)\n\nROOTDIR = os.path.dirname(__file__)\nETCDIR = os.path.join(ROOTDIR, 'etc')\n\n# single, global variable used to ensure different tests from\n# different subclasses of TestCase all can get a unique ID\n# so that the tests do not interact on oslo-messaging\npartition_counter = 0\n\n\ndef make_dsenode_new_partition(node_id,\n messaging_config=None,\n node_rpc_endpoints=None):\n \"\"\"Get new DseNode in it's own new DSE partition.\"\"\"\n messaging_config = messaging_config or generate_messaging_config()\n node_rpc_endpoints = node_rpc_endpoints or []\n return dse_node.DseNode(messaging_config, node_id, node_rpc_endpoints,\n partition_id=get_new_partition())\n\n\ndef make_dsenode_same_partition(existing,\n node_id,\n messaging_config=None,\n node_rpc_endpoints=None):\n \"\"\"Get new DseNode in the same DSE partition as existing (node or part).\"\"\"\n partition_id = (existing.partition_id if\n isinstance(existing, dse_node.DseNode) else existing)\n\n messaging_config = messaging_config or generate_messaging_config()\n node_rpc_endpoints = node_rpc_endpoints or []\n return dse_node.DseNode(\n messaging_config, node_id, node_rpc_endpoints, partition_id)\n\n\ndef get_new_partition():\n \"\"\"Create a new partition number, unique within each process.\"\"\"\n global partition_counter\n old = partition_counter\n partition_counter += 1\n return old\n\n\ndef generate_messaging_config():\n mc_fixture = conffixture.ConfFixture(cfg.CONF)\n mc_fixture.conf.transport_url = 'kombu+memory://'\n messaging_config = mc_fixture.conf\n messaging_config.rpc_response_timeout = 10\n return messaging_config\n\n\ndef etcdir(*p):\n return os.path.join(ETCDIR, *p)\n\n\ndef root_path():\n \"\"\"Return path to root of source code.\"\"\"\n x = os.path.realpath(__file__)\n x, y = os.path.split(x) # drop \"helper.py\"\n x, y = os.path.split(x) # drop \"tests\"\n x, y = os.path.split(x) # drop \"congress\"\n return x\n\n\ndef source_path():\n \"\"\"Return path to root of source code.\"\"\"\n x = os.path.realpath(__file__)\n x, y = os.path.split(x) # drop \"helper.py\"\n x, y = os.path.split(x) # drop \"tests\"\n return x\n\n\ndef data_module_path(file):\n \"\"\"Return path to dataservice module with given FILEname.\"\"\"\n path = source_path()\n path = os.path.join(path, \"datasources\")\n path = os.path.join(path, file)\n return path\n\n\ndef policy_module_path():\n \"\"\"Return path to policy engine module.\"\"\"\n path = source_path()\n path = os.path.join(path, \"policy_engines\")\n path = os.path.join(path, \"agnostic.py\")\n return path\n\n\ndef api_module_path():\n \"\"\"Return path to api module.\"\"\"\n path = source_path()\n path = os.path.join(path, \"datasources\")\n path = os.path.join(path, \"test_driver.py\")\n return path\n\n\ndef test_path(file=None):\n \"\"\"Return path to root of top-level tests. Joined with file if provided.\"\"\"\n path = source_path()\n path = os.path.join(path, \"tests\")\n if file is not None:\n path = os.path.join(path, file)\n return path\n\n\ndef datasource_config_path():\n \"\"\"Return path to configuration info for datasources.\"\"\"\n path = test_path()\n path = os.path.join(path, \"datasources.conf\")\n return path\n\n\ndef datasource_openstack_args():\n \"\"\"Return basic args for creating an openstack datasource.\"\"\"\n return {'username': '',\n 'password': '',\n 'auth_url': '',\n 'tenant_name': '',\n 'poll_time': 1}\n\n\ndef pause(factor=1):\n \"\"\"Timeout so other threads can run.\"\"\"\n time.sleep(factor * 1)\n\n\ndef datalog_same(actual_code, correct_code, msg=None):\n return datalog_equal(\n actual_code, correct_code, msg=msg,\n equal=lambda x, y: unify.same(x, y) is not None)\n\n\ndef datalog_equal(actual_code, correct_code,\n msg=None, equal=None, theories=None,\n output_diff=True):\n \"\"\"Check equality.\n\n Check if the strings given by actual_code\n and CORRECT_CODE represent the same datalog.\n \"\"\"\n def minus(iter1, iter2, invert=False):\n extra = []\n for i1 in iter1:\n found = False\n for i2 in iter2:\n # for asymmetric equality checks\n if invert:\n test_result = equal(i2, i1)\n else:\n test_result = equal(i1, i2)\n if test_result:\n found = True\n break\n if not found:\n extra.append(i1)\n return extra\n if equal is None:\n equal = lambda x, y: x == y\n\n LOG.debug(\"** Checking equality: %s **\", msg)\n actual = compile.parse(actual_code, theories=theories)\n correct = compile.parse(correct_code, theories=theories)\n extra = minus(actual, correct)\n # in case EQUAL is asymmetric, always supply actual as the first arg\n # and set INVERT to true\n missing = minus(correct, actual, invert=True)\n if output_diff:\n output_diffs(extra, missing, msg)\n LOG.debug(\"** Finished equality: %s **\", msg)\n is_equal = len(extra) == 0 and len(missing) == 0\n if not is_equal:\n LOG.debug('datalog_equal failed, extras: %s, missing: %s', extra,\n missing)\n return is_equal\n\n\ndef db_equal(actual_string, correct_string, output_diff=True):\n \"\"\"Check if two strings representing data theories are the same.\"\"\"\n actual = agnostic.string_to_database(actual_string)\n correct = agnostic.string_to_database(correct_string)\n return check_db_diffs(actual, correct, output_diff=output_diff)\n\n\ndef check_db_diffs(actual, correct, output_diff=True):\n extra = actual - correct\n missing = correct - actual\n extra = [e for e in extra if not e[0].startswith(\"___\")]\n missing = [m for m in missing if not m[0].startswith(\"___\")]\n if output_diff:\n output_diffs(extra, missing, actual=actual)\n return len(extra) == 0 and len(missing) == 0\n\n\ndef output_diffs(extra, missing, actual=None):\n if len(extra) > 0:\n print(\"Extra tuples\")\n print(\", \".join([str(x) for x in extra]))\n if len(missing) > 0:\n print(\"Missing tuples\")\n print(\", \".join([str(x) for x in missing]))\n if len(extra) > 0 or len(missing) > 0:\n print(\"Resulting database: {}\".format(str(actual)))\n\n\ndef str2form(formula_string, theories=None):\n return compile.parse1(formula_string, theories=theories)\n\n\ndef str2pol(policy_string, theories=None):\n return compile.parse(policy_string, theories=theories)\n\n\ndef pol2str(policy):\n return \" \".join(str(x) for x in policy)\n\n\ndef form2str(formula):\n return str(formula)\n\n\n@tenacity.retry(stop=tenacity.stop_after_attempt(1000),\n wait=tenacity.wait_fixed(0.1))\ndef retry_check_for_last_message(obj):\n if not hasattr(obj, \"last_msg\"):\n raise AttributeError(\"Missing 'last_msg' attribute\")\n\n\n@tenacity.retry(stop=tenacity.stop_after_attempt(1000),\n wait=tenacity.wait_fixed(0.1))\ndef retry_check_for_message_to_arrive(obj):\n if not hasattr(obj.msg, \"body\"):\n raise AttributeError(\"Missing 'body' attribute\")\n\n\n@tenacity.retry(stop=tenacity.stop_after_attempt(1000),\n wait=tenacity.wait_fixed(0.1))\ndef retry_check_for_message_data(obj, data):\n if not hasattr(obj.msg, \"body\"):\n raise AttributeError(\"Missing 'body' attribute\")\n if obj.get_msg_data() != data:\n raise TestFailureException(\"Missing expected data in msg\")\n\n\n@tenacity.retry(stop=tenacity.stop_after_attempt(1000),\n wait=tenacity.wait_fixed(0.1))\ndef retry_check_nonempty_last_policy_change(obj):\n if not hasattr(obj, \"last_policy_change\"):\n raise AttributeError(\"Missing 'last_policy_change' attribute\")\n if obj.last_policy_change is None:\n raise TestFailureException(\"last_policy_change == None\")\n if len(obj.last_policy_change) == 0:\n raise TestFailureException(\"last_policy_change == 0\")\n\n\n@tenacity.retry(stop=tenacity.stop_after_attempt(1000),\n wait=tenacity.wait_fixed(0.1))\ndef retry_check_empty_last_policy_change(obj):\n if not hasattr(obj, \"last_policy_change\"):\n raise AttributeError(\"Missing 'last_policy_change' attribute\")\n if len(obj.last_policy_change) != 0:\n raise TestFailureException(\"last_policy_change != 0\")\n\n\n@tenacity.retry(stop=tenacity.stop_after_attempt(1000),\n wait=tenacity.wait_fixed(0.1))\ndef retry_check_db_equal(policy, query, correct, target=None):\n if not hasattr(policy, \"select\"):\n raise AttributeError(\"Missing 'select' attribute\")\n if target is None:\n actual = policy.select(query)\n else:\n actual = policy.select(query, target=target)\n if not db_equal(actual, correct, output_diff=False):\n raise TestFailureException(\n \"Query {} produces {}, should produce {}\".format(\n str(query), str(actual), str(correct)))\n\n\n@tenacity.retry(stop=tenacity.stop_after_attempt(1000),\n wait=tenacity.wait_fixed(0.1))\ndef retry_check_number_of_updates(deepsix, value):\n if not hasattr(deepsix, \"number_of_updates\"):\n raise AttributeError(\"Missing 'number_of_updates' attribute\")\n if deepsix.number_of_updates != value:\n raise TestFailureException(\"number_of_updates is {}, not {}\".format(\n deepsix.number_of_updates, value))\n\n\n@tenacity.retry(stop=tenacity.stop_after_attempt(1000),\n wait=tenacity.wait_fixed(0.1))\ndef retry_check_subscriptions(deepsix, subscription_list):\n if not check_subscriptions(deepsix, subscription_list):\n raise TestFailureException(\n \"{} does not have subscription list {}\".format(\n deepsix.name, str(subscription_list)))\n\n\ndef check_subscriptions(deepsix, subscription_list):\n \"\"\"Check subscriptions.\n\n Check that the instance DEEPSIX is subscribed to all of the\n (key, dataindex) pairs in KEY_DATAINDEX_LIST. Return True if\n all subscriptions exists; otherwise returns False.\n \"\"\"\n actual = set([(value.key, value.dataindex)\n for value in deepsix.subdata.values()])\n correct = set(subscription_list)\n missing = correct - actual\n if missing:\n LOG.debug(\"Missing key/dataindex subscriptions: %s\", missing)\n return not missing\n\n\n@tenacity.retry(stop=tenacity.stop_after_attempt(1000),\n wait=tenacity.wait_fixed(0.1))\ndef retry_check_subscribers(deepsix, subscriber_list):\n if not check_subscribers(deepsix, subscriber_list):\n raise TestFailureException(\n \"{} does not have subscriber list {}\".format(\n deepsix.name, str(subscriber_list)))\n\n\n@tenacity.retry(stop=tenacity.stop_after_attempt(1000),\n wait=tenacity.wait_fixed(0.1))\ndef retry_check_no_subscribers(deepsix, subscriber_list):\n \"\"\"Check that deepsix has none of the subscribers in subscriber_list\"\"\"\n if check_subscribers(deepsix, subscriber_list, any_=True):\n raise TestFailureException(\n \"{} still has some subscribers in list {}\".format(\n deepsix.name, str(subscriber_list)))\n\n\ndef check_subscribers(deepsix, subscriber_list, any_=False):\n \"\"\"Check subscribers.\n\n Check that the instance DEEPSIX includes subscriptions for all of\n the (name, dataindex) pairs in SUBSCRIBER_LIST. Return True if\n all subscribers exist; otherwise returns False.\n\n If any_=True, then return True if ANY subscribers exist in subscriber_list\n \"\"\"\n actual = set([(name, pubdata.dataindex)\n for pubdata in deepsix.pubdata.copy().values()\n for name in pubdata.subscribers])\n correct = set(subscriber_list)\n missing = correct - actual\n if missing:\n LOG.debug(\"Missing name/dataindex subscribers: %s\", missing)\n if any_:\n return (len(missing) < len(actual))\n return not missing\n\n\n@tenacity.retry(stop=tenacity.stop_after_attempt(20),\n wait=tenacity.wait_fixed(1))\ndef retry_check_function_return_value(f, expected_value):\n \"\"\"Check if function f returns expected key.\"\"\"\n result = f()\n if result != expected_value:\n raise TestFailureException(\n \"Expected value '%s' not received. \"\n \"Got %s instead.\" % (expected_value, result))\n\n\n@tenacity.retry(stop=tenacity.stop_after_attempt(10),\n wait=tenacity.wait_fixed(0.5))\ndef retry_check_function_return_value_not_eq(f, value):\n \"\"\"Check if function f does not return expected value.\"\"\"\n result = f()\n if result == value:\n raise TestFailureException(\n \"Actual value '%s' should be different \"\n \"from '%s'\" % (result, value))\n\n\n@tenacity.retry(stop=tenacity.stop_after_attempt(10),\n wait=tenacity.wait_fixed(0.5))\ndef retry_til_exception(expected_exception, f):\n \"\"\"Check if function f does not return expected value.\"\"\"\n try:\n val = f()\n raise TestFailureException(\"No exception thrown; received %s\" % val)\n except expected_exception:\n return\n except Exception as e:\n raise TestFailureException(\"Wrong exception thrown: %s\" % e)\n\n\n@tenacity.retry(stop=tenacity.stop_after_attempt(20),\n wait=tenacity.wait_fixed(1))\ndef retry_check_function_return_value_table(f, expected_values):\n \"\"\"Check if function f returns expected table.\"\"\"\n result = f()\n actual = set(tuple(x) for x in result)\n correct = set(tuple(x) for x in expected_values)\n extra = actual - correct\n missing = correct - actual\n if len(extra) > 0 or len(missing) > 0:\n s = \"Actual: %s\\nExpected: %s\\n\" % (result, expected_values)\n if len(extra) > 0:\n s += \"Extra: %s\\n\" % extra\n if len(missing) > 0:\n s += \"Missing: %s\\n\" % missing\n raise TestFailureException(s)\n\n\nclass FakeRequest(object):\n def __init__(self, body):\n self.body = json.dumps(body)\n\n\nclass FakeServiceObj(object):\n def __init__(self):\n self.state = {}\n\n\nclass TestFailureException(Exception):\n \"\"\"Custom exception thrown on test failure\n\n Facilitates using assertRaises to check for failure on retry tests\n (generic Exception in assertRaises disallowed by pep8 check/gate)\n \"\"\"\n def __init__(self, *args, **kwargs):\n Exception.__init__(self, *args, **kwargs)\n\n\ndef supported_drivers():\n \"\"\"Get list of supported drivers by congress\"\"\"\n\n results = [\n {\"id\": \"monasca\",\n \"description\": \"Datasource driver that interfaces with monasca.\"},\n {\"id\": \"plexxi\",\n \"description\": \"Datasource driver that interfaces with PlexxiCore.\"},\n {\"id\": \"doctor\",\n \"description\": \"Datasource driver that allows external systems \"\n \"to push data in accordance with OPNFV Doctor \"\n \"Inspector southbound interface specification.\"},\n {\"id\": \"aodh\",\n \"description\": \"Datasource driver that interfaces with aodh.\"},\n {\"id\": \"neutronv2_qos\",\n \"description\": \"Datasource driver that interfaces with QoS \"\n \"extension of OpenStack Networking aka Neutron.\"},\n {\"id\": \"cloudfoundryv2\",\n \"description\": \"Datasource driver that interfaces with cloudfoundry\"},\n {\"id\": \"heat\",\n \"description\": \"Datasource driver that interfaces with OpenStack \"\n \"orchestration aka heat.\"},\n {\"id\": \"nova\",\n \"description\": \"Datasource driver that interfaces with OpenStack \"\n \"Compute aka nova.\"},\n {\"id\": \"murano\",\n \"description\": \"Datasource driver that interfaces with murano\"},\n {\"id\": \"neutronv2\",\n \"description\": \"Datasource driver that interfaces with OpenStack \"\n \"Networking aka Neutron.\"},\n {\"id\": \"swift\",\n \"description\": \"Datasource driver that interfaces with swift.\"},\n {\"id\": \"ironic\",\n \"description\": \"Datasource driver that interfaces with OpenStack \"\n \"bare metal aka ironic.\"},\n {\"id\": \"cinder\",\n \"description\": \"Datasource driver that interfaces with OpenStack \"\n \"cinder.\"},\n {\"id\": \"fake_datasource\",\n \"description\": \"This is a fake driver used for testing\"},\n {\"id\": \"config\",\n \"description\": \"Datasource driver that allows OS configs retrieval.\"},\n {\"id\": \"glancev2\",\n \"description\": \"Datasource driver that interfaces with OpenStack \"\n \"Images aka Glance.\"},\n {\"id\": \"vcenter\",\n \"description\": \"Datasource driver that interfaces with vcenter\"},\n {\"id\": \"keystonev3\",\n \"description\": \"Datasource driver that interfaces with keystone.\"},\n {\"id\": \"keystone\",\n \"description\": \"Datasource driver that interfaces with keystone.\"},\n {\"id\": \"mistral\",\n \"description\": \"Datasource driver that interfaces with Mistral.\"},\n {\"id\": \"vitrage\",\n \"description\": \"Datasource driver that accepts Vitrage \"\n \"webhook alarm notifications.\"},\n {\"id\": \"monasca_webhook\",\n \"description\": \"Datasource driver that accepts Monasca webhook \"\n \"alarm notifications.\"},\n {\"id\": \"tacker\",\n \"description\": \"Datasource driver that interfaces with OpenStack \"\n \"tacker.\"}]\n return results\n","repo_name":"openstack-archive/congress","sub_path":"congress/tests/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":18013,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"69"} +{"seq_id":"36354681980","text":"#Ali Keramatipour - 810196616\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\n\nclass lloyds:\n def __init__(self, SIGMA, BITS, MEAN = 0, PRECISION = 0.005, SAMPLE_CNT = 10000):\n self.PRECISION = PRECISION\n self.MEAN = MEAN\n self.SIGMA = SIGMA\n self.Q = (1< self.borders[cnt]:\n cnt = cnt + 1\n ranges[cnt].append(s)\n self.candidates.clear()\n \n for eachRange in ranges:\n sum_arr = []\n tmp = [float(val ** 2) for val in eachRange]\n p1_sum = sum(eachRange)\n p2_sum = sum(tmp)\n for i in range(len(eachRange)):\n dif = eachRange[i]\n tmp_sum = p2_sum - (2 * p1_sum * dif) + (len(eachRange) * (dif**2))\n sum_arr.append(tmp_sum)\n self.candidates.append(eachRange[np.argmin(sum_arr)])\n \n tmpBorders = self.borders.copy()\n self.borders.clear()\n for i in range(1,self.Q):\n self.borders.append((self.candidates[i] + self.candidates[i - 1]) / 2)\n for i in range(self.Q - 1):\n if abs(self.borders[i] - tmpBorders[i]) > self.PRECISION:\n return False\n \n return True\n \n def run(self):\n cycles = 0\n while not self.update_candidates_borders():\n cycles += 1\n print(\"cycles:\", cycles)\n \n def draw_plots(self):\n x = np.linspace(self.MEAN - 3 * self.SIGMA, self.MEAN + 3 * self.SIGMA, 1000)\n \n plt.figure(figsize=(10, 5), num = \"LLOYDS\")\n plt.plot(x, stats.norm.pdf(x, self.MEAN, self.SIGMA))\n\n plot_height = stats.norm.pdf(0, self.MEAN, self.SIGMA)\n\n for x in self.borders:\n plt.plot([x, x],[0, plot_height], 'g--')\n \n for x in self.candidates:\n plt.plot([x, x],[0, plot_height], 'r-')\n \n plt.legend()\n plt.show()\n \n def print_outputs(self):\n print(\"Borders\")\n for border in self.borders:\n print (border, \", \", end='', sep='')\n print()\n print(\"Candidates\")\n for candidate in self.candidates:\n print (candidate, \", \", end='', sep='')\n print()\n\n\ninst = lloyds(1, 3)\ninst.run()\ninst.draw_plots()\ninst.print_outputs()\n\n","repo_name":"AliKeramatipour/QuantizationUsingLloyds","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"74968273499","text":"def prepare_puzzle(puzzle):\n p = [(len(puzzle[0])+2) * ['x']]\n return p + [['x'] + list(x) + ['x'] for x in puzzle] + p\n\ndef count_occupied_seats(seat_layout, star):\n temp = [['x' for _ in seat_layout[0]] for _ in seat_layout]\n while seat_layout != temp:\n people_arrive(seat_layout, temp, star)\n seat_layout, temp = temp, seat_layout\n return sum([l.count('#') for l in seat_layout])\n\ndef people_arrive(seat_layout, temp, star):\n for y, row in enumerate(seat_layout[1:-1]):\n for x, seat in enumerate(row[1:-1]):\n if seat == 'L' and adj_seats(seat_layout, x+1, y+1, star) == 0:\n seat = '#'\n elif seat == '#' and adj_seats(seat_layout, x+1, y+1, star) >= (4 if star == 1 else 5):\n seat = 'L'\n temp[y+1][x+1] = seat\n\nadj = [lambda x, y, n: (x-n, y-n), lambda x, y, n: (x, y-n), lambda x, y, n: (x+n, y-n),\n lambda x, y, n: (x-n, y), lambda x, y, n: (x+n, y),\n lambda x, y, n: (x-n, y+n), lambda x, y, n: (x, y+n), lambda x, y, n: (x+n, y+n)]\n\ndef adj_seats(seat_layout, x, y, star):\n result = 0\n for f in adj:\n seat = '.'\n if star == 1:\n tx, ty = f(x, y, 1)\n seat = seat_layout[ty][tx]\n else:\n n = 1\n while seat == '.':\n tx, ty = f(x, y, n)\n seat = seat_layout[ty][tx]\n n += 1\n if seat == '#':\n result += 1\n return result\n\ndef solve_part1(puzzle):\n return count_occupied_seats(puzzle, 1)\n\ndef solve_part2(puzzle):\n return count_occupied_seats(puzzle, 2)\n","repo_name":"tortagel/advent-of-code-2020","sub_path":"day11.py","file_name":"day11.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"42310984816","text":"import pygame as pg\nimport random as R\n\nclass Screen:\n def __init__(self, w, h, tile_size=32):\n self.surface = pg.display.set_mode((w,h)) # définit l'écran\n self.camera = pg.Rect(0,0,w,h)\n self.blit = self.surface.blit # crée un raccourci\n self.background = None\n self.width = w\n self.height = h\n self.tile_size = tile_size\n self.amount_x_tile = 0\n self.amount_y_tile = 0\n \n def get_size(self):\n return (self.width,self.height)\n \n def draw_grid(self):\n for x in range(0, self.width, self.tile_size):\n pg.draw.line(self.surface, (181,181,181), (x,0), (x,self.height))\n for y in range(0, self.height, self.tile_size):\n pg.draw.line(self.surface, (181,181,181), (0,y), (self.width,y))\n\n def set_size_tile(self, w, h):\n self.amount_x_tile = w\n self.amount_y_tile = h\n \n def draw_background(self, rect):\n return self.blit(self.background, rect, rect)\n \n def background_cam(self, rect):\n return self.draw_background(rect.move(self.camera.topleft))\n\n def blit_cam(self, image, rect):\n return self.blit(image, rect.move(self.camera.topleft))\n \n def update_camera(self, target):\n x = -target.rect.x + self.width//2\n y = -target.rect.y + self.height//2\n\n x = min(0, x)\n y = min(0, y)\n \n width_map = self.amount_x_tile*self.tile_size\n height_map = self.amount_y_tile*self.tile_size\n\n x = max(-(width_map - self.width), x)\n y = max(-(height_map - self.height), y)\n\n self.camera.x = x\n self.camera.y = y\n \n def shake(self):\n if R.random() > 0.5:\n self.camera.x += 5\n else:\n self.camera.x -= 5\n if R.random() > 0.5:\n self.camera.y += 5\n else:\n self.camera.y -= 5\n","repo_name":"AntDum/aledofeu","sub_path":"screen.py","file_name":"screen.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"42818049883","text":"import torch\n\n\nclass Tester:\n \"\"\"\n Tester class\n \"\"\"\n\n def __init__(self, test_data_loaders, models, device, metrics_epoch, test_metrics):\n self.test_data_loaders = test_data_loaders\n self.model = models[\"model\"]\n self.device = device\n self.metrics_epoch = metrics_epoch\n self.test_metrics = test_metrics\n\n def test(self):\n self.model.eval()\n with torch.no_grad():\n print(\"testing...\")\n test_loader = self.test_data_loaders[\"data\"]\n\n if len(self.metrics_epoch) > 0:\n outputs = torch.FloatTensor().to(self.device)\n targets = torch.FloatTensor().to(self.device)\n for batch_idx, (data, target) in enumerate(test_loader):\n data, target = data.to(self.device), target.to(self.device)\n\n output = self.model(data)\n if len(self.metrics_epoch) > 0:\n outputs = torch.cat((outputs, output))\n targets = torch.cat((targets, target))\n\n #\n # save sample images, or do something with output here\n #\n\n for met in self.metrics_epoch:\n self.test_metrics.epoch_update(met.__name__, met(targets, outputs))\n\n return targets, outputs, self.test_metrics.result()\n","repo_name":"deeperlearner/pytorch-template","sub_path":"testers/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"69"} +{"seq_id":"70826570779","text":"\"\"\"IMPORT DEPENDENCIES\"\"\"\r\nfrom time import time, sleep\r\nimport sys\r\nimport os\r\nimport math\r\nfrom powi.equipment import ACSource, PowerMeter, ElectronicLoad, Oscilloscope, LEDControl\r\nfrom powi.equipment import headers, create_folder, footers, waveform_counter, soak, convert_argv_to_int_list, tts, prompt\r\nfrom filemanager import path_maker, remove_file\r\nimport winsound as ws\r\nfrom playsound import playsound\r\nwaveform_counter = 0\r\n\r\n##################################################################################\r\n\r\n\"\"\"COMMS\"\"\"\r\nac_source_address = 5\r\nsource_power_meter_address = 1 \r\nload_power_meter_address = 2\r\neload_address = 8\r\nscope_address = \"10.125.10.115\"\r\n\r\n\"\"\"USER INPUT\"\"\"\r\nled_list = [46]\r\nvin_list = [277,300]\r\ntest_list = convert_argv_to_int_list(sys.argv[1]) # 1 - Startup, 2 - Normal, 3 - Normal-Zoomed \r\n# test_list = [1,2]\r\n\r\ncomponent = \"VDS\"\r\n# component = \"BoostFET\"\r\n# component = \"Boost Diode\"\r\n# component = \"PassFET\"\r\n# component = \"Output Rectifier Diode\"\r\n\r\n# condition = \"Charles reworks\"\r\ncondition = \"RT-ZP10\"\r\nprint(condition)\r\n\r\ntest = \"CSA - Improvements\"\r\nwaveforms_folder = f'C:/Users/ccayno/Desktop/DER-945/waveforms/{test}'\r\n\r\n\"\"\"DO NOT EDIT BELOW THIS LINE\"\"\"\r\n##################################################################################\r\n\r\n\r\n\"\"\"EQUIPMENT INITIALIZE\"\"\"\r\nac = ACSource(ac_source_address)\r\npms = PowerMeter(source_power_meter_address)\r\npml = PowerMeter(load_power_meter_address)\r\neload = ElectronicLoad(eload_address)\r\nscope = Oscilloscope(scope_address)\r\nled = LEDControl()\r\n\r\n\"\"\"GENERIC FUNCTIONS\"\"\"\r\n\r\ndef discharge_output():\r\n ac.turn_off()\r\n for i in range(1,9):\r\n eload.channel[i].cc = 1\r\n eload.channel[i].turn_on()\r\n eload.channel[i].short_on()\r\n sleep(2)\r\n for i in range(1,9):\r\n eload.channel[i].turn_off()\r\n eload.channel[i].short_off()\r\n sleep(1)\r\n\r\n\r\n\r\ndef scope_settings(test_type):\r\n global ch1, ch2, ch3, ch4\r\n global channel_trigger, channel_trigger_delta\r\n\r\n\r\n print(f'Test Type: {test_type}')\r\n \r\n scope.stop()\r\n scope.remove_zoom()\r\n\r\n ch1 = scope.channel_settings(state='OFF')\r\n ch2 = scope.channel_settings(state='OFF')\r\n ch3 = scope.channel_settings(state='OFF')\r\n ch4 = scope.channel_settings(state='OFF')\r\n\r\n\r\n if component == \"VDS\":\r\n if test_type == 'Startup':\r\n scope.position_scale(time_position = 10, time_scale = 0.2)\r\n scope.edge_trigger(2, 100, 'POS')\r\n # scope.position_scale(time_position = 10, time_scale = 200E-9)\r\n # scope.edge_trigger(2, 700, 'POS')\r\n if test_type == 'Normal':\r\n scope.position_scale(time_position = 50, time_scale = 500E-6)\r\n scope.edge_trigger(2, 500, 'POS')\r\n # scope.position_scale(time_position = 50, time_scale = 0.001)\r\n # scope.edge_trigger(2, 100, 'POS')\r\n scope.add_zoom(rel_pos = 50, rel_scale = 1.2)\r\n \r\n channel_trigger = 2\r\n channel_trigger_delta = 1\r\n\r\n if test_type == 'Normal_Zoomed':\r\n scope.position_scale(time_position = 50, time_scale = 200E-9)\r\n scope.edge_trigger(2, 500, 'POS')\r\n \r\n channel_trigger = 2\r\n channel_trigger_delta = 1\r\n\r\n ch2 = scope.channel_settings(state='ON', channel=2, scale=100, position=-4, label=component, color='YELLOW', bandwidth=500, coupling='DCLimit', offset=0)\r\n # ch3 = scope.channel_settings(state='ON', channel=3, scale=100, position=-4, label='VCSN', color='PINK', bandwidth=500, coupling='DCLimit', offset=0)\r\n\r\n if component == 'BoostFET':\r\n if test_type == 'Startup':\r\n scope.position_scale(time_position = 10, time_scale = 0.2)\r\n scope.edge_trigger(2, 10, 'POS')\r\n if test_type == 'Normal':\r\n scope.position_scale(time_position = 50, time_scale = 0.010)\r\n scope.edge_trigger(2, 50, 'POS')\r\n scope.add_zoom(rel_pos = 50, rel_scale = 0.1)\r\n\r\n channel_trigger = 2\r\n channel_trigger_delta = 0.5\r\n \r\n ch2 = scope.channel_settings(state='ON', channel=2, scale=20, position=-4, label=component, color='LIGHT_BLUE', bandwidth=500, coupling='DCLimit', offset=0)\r\n \r\n if component == 'Boost Diode':\r\n if test_type == 'Startup':\r\n scope.position_scale(time_position = 10, time_scale = 0.2)\r\n scope.edge_trigger(2, 10, 'POS')\r\n if test_type == 'Normal':\r\n scope.position_scale(time_position = 50, time_scale = 0.010)\r\n scope.edge_trigger(2, 62.5, 'POS')\r\n scope.add_zoom(rel_pos = 50, rel_scale = 0.1)\r\n\r\n channel_trigger = 2\r\n channel_trigger_delta = 0.5\r\n \r\n ch2 = scope.channel_settings(state='ON', channel=2, scale=20, position=-4, label=component, color='GREEN', bandwidth=500, coupling='DCLimit', offset=0)\r\n \r\n if component == 'PassFET':\r\n if test_type == 'Startup':\r\n scope.position_scale(time_position = 10, time_scale = 0.2)\r\n scope.edge_trigger(2, 10, 'POS')\r\n if test_type == 'Normal':\r\n scope.position_scale(time_position = 50, time_scale = 0.01)\r\n scope.edge_trigger(2, 30, 'POS')\r\n scope.add_zoom(rel_pos = 50, rel_scale = 0.2)\r\n\r\n channel_trigger = 2\r\n channel_trigger_delta = 0.5\r\n \r\n ch2 = scope.channel_settings(state='ON', channel=2, scale=20, position=-4, label=component, color='BLUE', bandwidth=500, coupling='DCLimit', offset=0)\r\n\r\n if component == 'Output Rectifier Diode':\r\n if test_type == 'Startup':\r\n scope.position_scale(time_position = 10, time_scale = 0.2)\r\n scope.edge_trigger(2, 100, 'POS')\r\n if test_type == 'Normal':\r\n scope.position_scale(time_position = 50, time_scale = 0.010)\r\n scope.edge_trigger(2, 50, 'POS')\r\n scope.add_zoom(rel_pos = 50, rel_scale = 0.1)\r\n\r\n channel_trigger = 2\r\n channel_trigger_delta = 0.5\r\n \r\n ch2 = scope.channel_settings(state='ON', channel=2, scale=100, position=-4, label=component, color='PINK', bandwidth=500, coupling='DCLimit', offset=0)\r\n input(\"Invert channel.\")\r\n \r\n sleep(2) \r\n\r\n\r\ndef get_value(channel):\r\n labels, values = scope.get_measure(channel)\r\n max_value = float(f\"{values[0]:.2f}\")\r\n return max_value\r\n\r\ndef export_scope_measurement_label(component, LED, test_type, time_scale):\r\n global ch1, ch2, ch3, ch4\r\n\r\n output_list = ['LED', 'VIN']\r\n\r\n if ch1 == 'ON': output_list.append(\"CH1\")\r\n if ch2 == 'ON': output_list.append(\"CH2\")\r\n if ch3 == 'ON': output_list.append(\"CH3\")\r\n if ch4 == 'ON': output_list.append(\"CH4\")\r\n\r\n with open(f'{component} - {LED}V at {test_type} - {time_scale}s perdiv.txt', 'a+') as f:\r\n f.write(','.join(output_list))\r\n f.write('\\n')\r\n\r\ndef export_scope_measurement(component, LED, vin, test_type, time_scale):\r\n global ch1, ch2, ch3, ch4\r\n\r\n load = str(LED)\r\n input = str(vin)\r\n output_list = [load, input]\r\n\r\n if ch1 == 'ON':\r\n max = f\"{get_value(1)}\"\r\n output_list.append(max)\r\n if ch2 == 'ON':\r\n max = f\"{get_value(2)}\"\r\n output_list.append(max)\r\n if ch3 == 'ON':\r\n max = f\"{get_value(3)}\"\r\n output_list.append(max)\r\n if ch4 == 'ON':\r\n max = f\"{get_value(4)}\"\r\n output_list.append(max)\r\n\r\n with open(f'{component} - {LED}V at {test_type} - {time_scale}s perdiv.txt', 'a+') as f:\r\n f.write(','.join(output_list))\r\n f.write('\\n')\r\n\r\ndef find_trigger(channel, trigger_delta):\r\n # finding trigger level\r\n scope.run_single()\r\n soak(5)\r\n\r\n # get initial peak-to-peak measurement value\r\n labels, values = scope.get_measure(channel)\r\n max_value = float(values[0])\r\n max_value = float(f\"{max_value:.4f}\")\r\n\r\n # set max_value as initial trigger level\r\n trigger_level = max_value\r\n scope.edge_trigger(channel, trigger_level, 'POS')\r\n\r\n # check if it triggered within 5 seconds\r\n scope.run_single()\r\n soak(3)\r\n trigger_status = scope.trigger_status()\r\n\r\n # increase trigger level until it reaches the maximum trigger level\r\n while (trigger_status == 1):\r\n trigger_level += trigger_delta\r\n scope.edge_trigger(channel, trigger_level, 'POS')\r\n\r\n # check trigger status\r\n scope.run_single()\r\n soak(3)\r\n trigger_status = scope.trigger_status()\r\n\r\n # decrease trigger level below to get the maximum trigger possible\r\n trigger_level -= 2*trigger_delta\r\n scope.edge_trigger(channel, trigger_level, 'POS')\r\n # print(f'Maximum trigger level found at: {trigger_level}')\r\n\r\n\r\ndef operation(test_type):\r\n\r\n global waveform_counter\r\n global channel_trigger, channel_trigger_delta\r\n\r\n scope_settings(test_type)\r\n \r\n for LED in led_list:\r\n\r\n led.voltage(LED)\r\n\r\n time_scale = scope.get_horizontal()['scale']\r\n file = f'{component} - {LED}V at {test_type} - {time_scale}s perdiv.txt'\r\n remove_file(file)\r\n export_scope_measurement_label(component, LED, test_type, time_scale)\r\n \r\n for vin in vin_list:\r\n\r\n if vin == 300 or vin == 277: count = 1\r\n else: count = 1\r\n\r\n for i in range(count):\r\n\r\n if test_type == 'Startup':\r\n scope.run_single()\r\n sleep(3)\r\n ac.voltage = vin\r\n ac.frequency = ac.set_freq(vin)\r\n ac.turn_on()\r\n sleep(5)\r\n discharge_output()\r\n \r\n if test_type == 'Normal' or test_type == 'Normal_Zoomed':\r\n ac.voltage = vin\r\n ac.frequency = ac.set_freq(vin)\r\n ac.turn_on()\r\n sleep(5)\r\n find_trigger(channel=channel_trigger, trigger_delta=channel_trigger_delta)\r\n scope.run_single()\r\n sleep(3)\r\n discharge_output()\r\n\r\n filename = f'{component} {LED}V, {vin}Vac, {test_type}_{i}, {time_scale}s perdiv.png'\r\n path = path_maker(f'{waveforms_folder}/{component}/{condition}/{test_type}/{LED}V')\r\n scope.get_screenshot(filename, path)\r\n print(filename)\r\n waveform_counter += 1\r\n\r\n export_scope_measurement(component, LED, vin, test_type, time_scale)\r\n \r\n # transporting txt file from codes folder to waveforms folder\r\n import shutil\r\n source = f'{os.getcwd()}/{file}'\r\n destination = f'{path}/{file}'\r\n remove_file(destination)\r\n shutil.move(source, destination)\r\n\r\n\r\n\r\ndef main():\r\n global waveform_counter\r\n\r\n test = 1 # startup\r\n if test in test_list: operation(test_type='Startup')\r\n\r\n test = 2 # normal\r\n if test in test_list: operation(test_type='Normal')\r\n\r\n test = 3 # normal_zoomed\r\n if test in test_list: operation(test_type='Normal_Zoomed')\r\n \r\nif __name__ == \"__main__\":\r\n headers(test)\r\n discharge_output()\r\n main()\r\n discharge_output()\r\n footers(waveform_counter)","repo_name":"charlescayno/lighting","sub_path":"codes - General DER/DER_CSA_IMPROVEMENT.py","file_name":"DER_CSA_IMPROVEMENT.py","file_ext":"py","file_size_in_byte":11247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"25218966989","text":"import argparse\nimport time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom matplotlib import animation\n\nfrom object_detection.builders.dataset_builder import build as build_dataset\nfrom object_detection.utils.config_util import get_configs_from_pipeline_file\nfrom object_detection.utils.label_map_util import create_category_index_from_labelmap\nfrom object_detection.utils import visualization_utils as viz_utils\n\nfrom utils import get_module_logger\n\n\ndef main(labelmap_path, model_path, tf_record_path, config_path, output_path):\n \"\"\"\n Use a model and a tf record file and create a mp4 video\n args:\n - labelmap_path [str]: path to labelmap file\n - model_path [str]: path to exported model\n - tf_record_path [str]: path to tf record file to visualize\n - config_path [str]: path to config file\n - output_path [str]: path to mp4 file\n\n Save the results as mp4 file\n \"\"\"\n # load label map\n category_index = create_category_index_from_labelmap(labelmap_path,\n use_display_name=True)\n\n # Load saved model and build the detection function\n logger.info(f'Loading model from {model_path}')\n detect_fn = tf.saved_model.load(model_path)\n\n # open config file\n logger.info(f'Loading config from {config_path}')\n configs = get_configs_from_pipeline_file(config_path)\n eval_config = configs['eval_config']\n eval_input_config = configs['eval_input_config']\n model_config = configs['model']\n\n # update the eval config file\n eval_input_config.tf_record_input_reader.input_path[:] = [tf_record_path]\n dataset = build_dataset(eval_input_config)\n\n # build dataset\n dataset = build_dataset(eval_input_config)\n\n # here we infer on the entire dataset\n images = []\n logger.info(f'Inference on {tf_record_path}')\n for idx, batch in enumerate(dataset):\n if idx % 10 == 0:\n logger.info(f'Step: {idx}')\n # add new axis and feed into model\n input_tensor = batch['image']\n image_np = input_tensor.numpy().astype(np.uint8)\n input_tensor = input_tensor[tf.newaxis, ...]\n\n detections = detect_fn(input_tensor)\n\n # tensor -> numpy arr, remove one dimensions\n num_detections = int(detections.pop('num_detections'))\n detections = {key: value[0, ...].numpy()\n for key, value in detections.items()}\n detections['num_detections'] = num_detections\n\n # detection_classes should be ints.\n detections['detection_classes'] = detections['detection_classes'].astype(np.int64)\n\n image_np_with_detections = \\\n viz_utils.visualize_boxes_and_labels_on_image_array(\n image_np,\n detections['detection_boxes'],\n detections['detection_classes'],\n detections['detection_scores'],\n category_index,\n use_normalized_coordinates=True,\n max_boxes_to_draw=200,\n min_score_thresh=.30,\n agnostic_mode=False)\n images.append(image_np_with_detections)\n\n # now we can create the animation\n f = plt.figure()\n f.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)\n ax = plt.subplot(111)\n ax.axis('off')\n im_obj = ax.imshow(images[0])\n\n def animate(idx):\n image = images[idx]\n im_obj.set_data(image)\n\n anim = animation.FuncAnimation(f, animate, frames=len(images))\n anim.save(output_path, fps=5, dpi=300)\n\n\nif __name__ == \"__main__\":\n logger = get_module_logger(__name__)\n\n parser = argparse.ArgumentParser(description='Create video')\n parser.add_argument('--labelmap_path', required=True, type=str,\n help='path to the label map')\n parser.add_argument('--model_path', required=True, type=str,\n help='path to the saved model folder')\n parser.add_argument('--tf_record_path', required=True, type=str,\n help='path to the tf record file')\n parser.add_argument('--config_path', required=False, type=str,\n default='pipeline.config',\n help='path to the config file')\n parser.add_argument('--output_path', required=False, type=str,\n default='animation.mp4',\n help='path of the saved file')\n args = parser.parse_args()\n main(args.labelmap_path,\n args.model_path,\n args.tf_record_path,\n args.config_path,\n args.output_path)\n","repo_name":"udacity/nd013-c1-vision-starter","sub_path":"inference_video.py","file_name":"inference_video.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"69"} +{"seq_id":"10230956727","text":"from django.core.management.base import BaseCommand, CommandError\nfrom carparking.models import Slot, Car\nfrom django.db import IntegrityError\n\n\nclass Command(BaseCommand):\n help = \"Shows current status of parking lot\"\n\n def handle(self, *args, **options):\n \"\"\"Action performed by this command is here\"\"\"\n self.stdout.write('Slot Number, Car Registration Number, Colour')\n for slot in Slot.objects.all():\n car = slot.car if hasattr(slot, 'car') else None\n reg_num = car.reg_number if car else '-'\n colour = car.colour if car else '-'\n self.stdout.write(f\"{slot.number}, {reg_num}, {colour}\")\n ","repo_name":"Priyeshpandey/LLD","sub_path":"ParkingLot/parkinglot/carparking/management/commands/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"37527083512","text":"\"\"\"\r\n项目需求:\r\n现在我们有全球排名前10000本书的数据,统计一下:\r\n 1.不同年份书的数量\r\n 2.不同年份书的平均评分情况\r\n\"\"\"\r\n# 1.不同年份书的数量\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nimport matplotlib\r\n\r\nmatplotlib.rc(\"font\", family=' DengXian')\r\n\r\nbooks_data = pd.read_csv(\"./books.csv\")\r\n\r\n# 显示所有的行和列的详细信息,不会有省略号\r\npd.set_option('display.max_columns', None)\r\n# 检查数据\r\n# print(books_data.head(1))\r\nprint(books_data.info())\r\n\r\n# 对original_publication_year进行取值,里面有缺失要进行处理\r\n# 对年份不为nan的书的年份进行提取\r\n# dropnan在这里不太合适,会把其他的为nan的数据都给删掉了\r\nbooks_data_notnull = books_data[pd.notnull(books_data[\"original_publication_year\"])]\r\n# books_data_notnull = books_data[books_data[\"original_publication_year\"] != \"nan\"]\r\n# print(books_data_notnull, type(books_data_notnull))\r\n\r\ndata = books_data_notnull.groupby(by=\"original_publication_year\").count()[\"id\"].sort_values(ascending=False)\r\nprint(type(data))\r\nprint(data)\r\n# 输出类型为\r\n\r\n# 设置x y 轴的相关的数据,取最近50年的即可\r\n_x = data.index[:50]\r\n_y = data.values[:50]\r\n\r\n# 调整画布的大小\r\nplt.figure(figsize=(20, 8), dpi=80)\r\nplt.grid()\r\nplt.bar(_x, _y)\r\nplt.show()\r\nprint(\"/\" * 100)\r\n\r\n# 2.不同年份书的平均评分情况:二者之间的情况使用折线图,柱状图也可以\r\n# 对年份不为nan的书的年份进行提取\r\ngrouped = books_data_notnull[\"average_rating\"].groupby(by=books_data_notnull[\"original_publication_year\"]).mean()\r\n# print(grouped)\r\n\r\n# 传入xy的数据\r\naverage_x = grouped.index\r\naverage_y = grouped.values\r\n\r\n# 进行画图\r\nplt.figure(figsize=(20, 8), dpi=80)\r\n\r\nplt.plot(range(len(average_x)), average_y)\r\nprint(len(average_x))\r\n\r\n# 强制转换成整数,不要小数\r\n# 并且旋转45度\r\nplt.xticks(list(range(len(average_x)))[::10], average_x[::10].astype(int), rotation=45)\r\nplt.show()\r\n","repo_name":"li199773/Data-Analysis","sub_path":"25 panads数据的索引项目练习.py","file_name":"25 panads数据的索引项目练习.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"69"} +{"seq_id":"30236766486","text":"from django.core.paginator import Paginator\nfrom django.core.paginator import EmptyPage\nfrom django.shortcuts import get_object_or_404\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.http import HttpResponseNotModified\nfrom django.views.decorators.http import require_GET, require_POST\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth import authenticate, login\nfrom models import Answer\nfrom models import Question\nfrom forms import *\n\n\ndef _pagination(request, qs):\n limit = request.GET.get('limit', 10);\n page = request.GET.get('page', 1)\n paginator = Paginator(qs, limit)\n paginator.baseurl = '/?page='\n try:\n page = paginator.page(page)\n except EmptyPage:\n page = paginator.page(paginator.num_pages)\n return page\n\n\ndef test(request, *args, **kwargs):\n return HttpResponse('OK')\n\n\ndef main(request):\n questions = Question.objects.all()\n questions = questions.order_by('-id')\n page = _pagination(request, questions)\n return render(request, 'qa/main.html', {\n 'questions': page.object_list,\n 'paginator': page.paginator,\n 'page': page,\n })\n\n\ndef popular_questions(request):\n questions = Question.objects.all()\n questions = questions.order_by('-rating')\n page = _pagination(request, questions)\n return render(request, 'qa/popular_questions.html', {\n 'questions': page.object_list,\n 'paginator': page.paginator,\n 'page': page,\n })\n\n\ndef question(request, slug):\n slug=int(slug)\n question = get_object_or_404(Question, id=slug)\n answers = question.answer_set.all()\n form = AnswerForm(request.user, initial={'question': str(slug)})\n return render(request, 'qa/question.html', {\n 'question': question,\n 'answers': answers,\n 'form': form,\n })\n\n\ndef question_add(request):\n if request.method == \"POST\":\n user = request.user\n if not user.is_authenticated():\n return HttpResponseRedirect('/login/')\n form = AskForm(request.user, request.POST)\n if form.is_valid():\n # form._user = user\n question = form.save()\n url = question.get_url()\n return HttpResponseRedirect(url)\n else:\n form = AskForm()\n return render(request, 'qa/question_add.html', {\n 'form': form,\n })\n\n\n@require_POST\ndef answer(request):\n user = request.user\n if not user.is_authenticated():\n return HttpResponseRedirect('/login/')\n form = AnswerForm(request.user, request.POST)\n # form._user = user\n if form.is_valid():\n post = form.save()\n # url = answer.question.get_url()\n # return HttpResponseRedirect(url)\n return HttpResponseRedirect(reverse('question', args=[post.question.id]))\n else:\n return HttpResponseNotModified()\n\n\ndef signup_user(request):\n if request.method == 'POST':\n form = SignupForm(request.POST)\n if form.is_valid():\n user = form.save()\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n login(request, user)\n return HttpResponseRedirect('/')\n else:\n form = SignupForm()\n return render(request, 'qa/signup.html', {\n 'form': form,\n })\n\n\ndef login_user(request):\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.is_valid():\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n return HttpResponseRedirect('/')\n else:\n form = LoginForm()\n return render(request, 'qa/login.html', {\n 'form': form,\n })\n","repo_name":"oshuev/stpic_web_project","sub_path":"ask/qa/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"13645024156","text":"\"\"\"\nContains a number of helpful functions for working with 2-D arrays, especially\narrays of characters. Arrays of characters are arrays of one-element strings.\n\"\"\"\n\nimport coordinates\nimport numpy\nimport config\nimport exc\nimport symbol\n\ndef index(element, array):\n \"\"\"\n Find and return integers x, y such that array[x,y] == element.\n If no such integers exist, raise an exception.\n \"\"\"\n\n result = find(element, array)\n if result is None:\n raise IndexError(\"Element %s is not present in the array.\"\n % str(element))\n \n return result\n\ndef find(element, array):\n \"\"\"\n Find and return integers x, y such that array[x,y] == element.\n If no such integers exist, return None.\n \"\"\"\n\n for x in range(array.shape[0]):\n for y in range(array.shape[1]):\n if array[x,y] == element:\n return (x,y)\n\n return None\n\ndef overlay(arrays, heights):\n \"\"\"\n Return a tuple of two arrays representing a top-down view of the arrays\n supplied. The first array is an array of strings, and represents the\n top-down view; the second is an array of integers, whose contents\n represent which array each character in the first array came from.\n\n For instance:\n arrays = [|a | |b b|\n | a|, |b b|]\n heights = [2, 5]\n\n overlay(arrays, heights) = (|a b| |2 5|\n |b a|, |5 2|)\n\n Note that a lesser number represents an array more likely to be seen,\n and a greater number represents an array less likely to be seen.\n If heights is not sorted in ascending order, a ValueError will be\n raised.\n \"\"\"\n\n sorted_heights = list(heights)\n sorted_heights.sort()\n if sorted_heights != list(heights):\n raise ValueError(\"List of heights %s is not in ascending order.\"\n % str(heights))\n if len(arrays) != len(heights):\n raise ValueError(\"There are %d arrays, but there are %d heights!\"\n % (len(arrays), len(heights)))\n\n trans = config.TRANSPARENT_GLYPH\n composite_array = empty_str_array(arrays[0].shape)\n height_array = numpy.zeros(arrays[0].shape, 'i')\n reversed_arrays = list(arrays)\n reversed_arrays.reverse()\n reversed_heights = list(heights)\n reversed_heights.reverse()\n\n for i in range(len(reversed_arrays)):\n for x in range(reversed_arrays[0].shape[0]):\n for y in range(reversed_arrays[0].shape[1]):\n if reversed_arrays[i][x,y] != trans:\n composite_array[x,y] = reversed_arrays[i][x,y]\n height_array[x,y] = reversed_heights[i]\n\n return (composite_array, height_array)\n\ndef fovize(arr, view, memory_arr = None, memory = None, memory_color = (0, 0, 0)):\n \"\"\"\n Returns a copy of arr with only the points in view visible.\n\n Note that despite its wide scope, this function should be fairly fast.\n\n If memory_arr is provided, then if a coordinate is not in \"view\" but is in\n \"memory,\" a colored version of that coordinate in memory_arr is displayed\n instead.\n\n arr - an array.\n view - an iterable object full of points (tuples of coordinates). Normally\n this is a fov object, but that is not necessary.\n memory_arr - an array.\n memory - an iterable object full of points.\n memory_color - the color which displayed things from memory will be painted.\n\n Returns - a copy of arr, except that each point in arr which is not in view\n is replaced with a transparent character, or a colored copy of a point\n in memory_arr, if applicable.\n \"\"\"\n\n ret_array = empty_str_array(arr.shape)\n for coord in view:\n ret_array[coord] = arr[coord]\n\n for coord in memory:\n if coord not in view:\n ret_array[coord] = symbol.Glyph(memory_arr[coord].char, memory_color)\n\n return ret_array\n\ndef empty_str_array(dimensions):\n \"\"\"\n Return an array such that empty_str_array(x,y).shape == (x,y) and with\n appropriate a,b, empty_str_array(x,y)[a,b] == config.TRANSPARENT_GLYPH.\n \"\"\"\n\n ret_array = numpy.empty(dimensions, 'O')\n for x in range(dimensions[0]):\n for y in range(dimensions[1]):\n ret_array[x,y] = config.TRANSPARENT_GLYPH\n return ret_array\n\ndef print_str_to_end_of_line(initial_coords, string_used, array, color = (255, 255, 255)):\n \"\"\"\n Print \"string_used\" to \"array\". The first character of string_used is\n printed to array[initial_coords], the second to\n array[initial_coords[0] + 1, initial_coords[1]], etc. The rest of the\n line in the array after the string will be filled with the transparent\n character. If there is not enough room in the array for the string, a\n ValueError will be thrown.\n\n initial_coords - the coordinates to which the first character of the\n string should be printed.\n string_used - the string printed to the array.\n array - the array of characters (1-character strings) to which the\n string is printed.\n color - the color of the text being printed.\n \"\"\"\n \n overflow_chars = array.shape[0] - initial_coords[0] - len(string_used)\n if overflow_chars < 0:\n raise ValueError(\n \"String of length %d starting at %d needs more than %d array size.\"\n % (len(string_used), initial_coords[0], array.shape[0]))\n\n printed_str = string_used + config.TRANSPARENT_GLYPH.char\n\n print_str(initial_coords, printed_str, array, color)\n\n return\n\ndef print_str(initial_coords, string_used, array, color = (255, 255, 255)):\n \"\"\"\n Print \"string_used\" to \"array\". The first character of string_used is\n printed to array[initial_coords], the second to\n array[initial_coords[0] + 1, initial_coords[1]], etc. The rest of the\n line in the array after the screen will be unchanged. If the string\n does not fit in the array, a ValueError will be thrown.\n\n initial_coords - the play in the array where the first character of the\n string is printed.\n string_used - the string to be printed to the array.\n array - the array to which the string is printed.\n color - the color of the string being printed.\n \"\"\"\n\n if initial_coords[0] + len(string_used) > array.shape[0]:\n raise ValueError(\n \"String of length %d starting at %d needs more than %d array size.\"\n % (len(string_used), initial_coords[0], array.shape[0]))\n\n for i in range(len(string_used)):\n array[initial_coords[0] + i, initial_coords[1]] = symbol.Glyph(string_used[i], (color[0], color[1], color[2]))\n\n return\n\ndef copy_array_centered_at(dst_nw_corner, block_dims, src_center, src_array, \n dst_array):\n \"\"\"\n Copy the array src_array into a block_dims-sized rectangle of dst_array,\n with the information being copied in centered at src_center.\n\n dst_nw_corner - the corner closest to (0, 0) of the rectangle of dst_array\n which will be overwritten.\n block_dims - the dimensions of the rectangle of dst_array which will be\n overwritten.\n src_center - the coordinates of the square at the center of the rectangle\n of src_array which will be copied into dst_array.\n src_array - the source array.\n dst_array - the destination array.\n \"\"\"\n\n src_rect = coordinates.centeredRect(src_center, block_dims)\n src_nw_corner = src_rect[0]\n copy_array_subset_lenient(src_nw_corner, dst_nw_corner, block_dims, \n src_array, dst_array)\n return\n\ndef copy_array_subset_lenient(src_nw_corner, dst_nw_corner, block_dims,\n src_array, dst_array):\n \"\"\"\n Copy a block_dims-sized subset of the array src_array into a\n block_dims-sized rectangle of the array dst_array, starting from\n src_nw_corner to dst_nw_corner, respectively.\n\n src_nw_corner - the corner closest to (0, 0) of the rectangle being copied\n out of src_array.\n dst_nw_corner - the corner closest to (0, 0) of the rectangle being\n overwritten in dst_array.\n block_dims - the dimensions of the rectangle being copied.\n src_array - the source array.\n dst_array - the destination array.\n \"\"\"\n\n if block_dims[0] < 0 or block_dims[1] < 0:\n raise ValueError(\"block_dims = %s contains a coordinate less than 0.\"\n % str(block_dims))\n \n src_nw_corner = list(src_nw_corner)\n dst_nw_corner = list(dst_nw_corner)\n block_dims = list(block_dims)\n\n for i in (0, 1):\n if src_nw_corner[i] < 0:\n src_nw_corner[i] = 0\n# Set block_dims so that new_block_dims[i] + the amount shaved off by\n# src_nw_corner[i] being less than 0 = old_block_dims[i]\n block_dims[i] = block_dims[i] + src_nw_corner[i]\n if src_nw_corner[i] + block_dims[i] > src_array.shape[i]:\n# Set block_dims so that \n# new_block_dims[i] + src_nw_corner[i] = src_array.shape[i]\n block_dims[i] = src_array.shape[i] - src_nw_corner[i]\n if dst_nw_corner[i] < 0:\n dst_nw_corner[i] = 0\n block_dims[i] = block_dims[i] + dst_nw_corner[i]\n if dst_nw_corner[i] + block_dims[i] > dst_array.shape[i]:\n block_dims[i] = dst_array.shape[i] - dst_nw_corner[i]\n\n src_nw_corner = tuple(src_nw_corner)\n dst_nw_corner = tuple(dst_nw_corner)\n block_dims = tuple(block_dims)\n\n copy_array_subset(src_nw_corner, dst_nw_corner, block_dims, \n src_array, dst_array)\n\n return\n\ndef copy_array_subset(src_nw_corner, dst_nw_corner, block_dims,\n src_array, dst_array):\n \"\"\"\n This function differs only from copy_array_subset_lenient in that it\n raises exceptions whenever the coordinates given refer to squares not\n actually in the arrays provided.\n \"\"\"\n\n exc.check_in_array(src_nw_corner, src_array.shape)\n exc.check_in_array(dst_nw_corner, dst_array.shape)\n exc.check_in_array(coordinates.add(coordinates.add(\n src_nw_corner, block_dims),\n (-1, -1)), src_array.shape)\n exc.check_in_array(coordinates.add(coordinates.add(\n dst_nw_corner, block_dims),\n (-1, -1)), dst_array.shape)\n\n for x in range(block_dims[0]):\n for y in range(block_dims[1]):\n dst_array[coordinates.add(dst_nw_corner, (x,y))] = \\\n src_array[coordinates.add(src_nw_corner, (x,y))]\n\n return\n\ndef copy_entire_array(dst_nw_corner, src_array, dst_array):\n \"\"\"\n Copy the entire array, src_array, into dst_array, with\n src_array[0,0] == dst_array[dst_nw_corner], etc.\n \n dst_nw_corner - a tuple of integers representing the coordinates\n into which src_array[0,0] will be copied.\n src_array - the array to be copied.\n dst_array - the array into which src_array is copied.\n \"\"\"\n\n copy_array_subset((0, 0), dst_nw_corner, src_array.shape,\n src_array, dst_array)\n\ndef copy_array(array_to_copy):\n \"\"\"\n Return a copy of the array array_to_copy.\n \"\"\"\n\n return array_to_copy.copy()\n\ndef fill_rect(array, nw_corner, se_corner, val):\n \"\"\"\n Fill a rectangle in an array with a certain symbol.\n\n array - the array to be modified.\n nw_corner - the northwest corner of the rectangle to be filled.\n se_corner - the southeast corner of the rectangle to be filled. (Inclusive.)\n val - the value with which the rectangle should be filled.\n \"\"\"\n\n if (nw_corner[0] < 0 or nw_corner[1] < 0):\n raise ValueError(\"Northwest corner %s too high\" % nw_corner)\n if (se_corner[0] >= array.shape[0] or se_corner[1] >= array.shape[1]):\n raise ValueError(\"Southeast corner %s too low for array of shape %s\"\n % (se_corner, array.shape))\n\n for i in range(nw_corner[0], se_corner[0] + 1):\n for j in range(nw_corner[1], se_corner[1] + 1):\n array[i,j] = val\n","repo_name":"nrook/Spirit","sub_path":"arrays.py","file_name":"arrays.py","file_ext":"py","file_size_in_byte":11896,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"} +{"seq_id":"22871874669","text":"from src.validators.validationexception import ValidationException\n\n\nclass BookValidator:\n @staticmethod\n def validate(book):\n errors = []\n if book.book_id < 0 or not float(book.book_id) == int(book.book_id):\n errors.append(\"Invalid ID!\")\n if book.title == \"\":\n errors.append(\"Invalid title!\")\n if book.author == \"\" :\n errors.append(\"Invalid author!\")\n if len(errors) > 0:\n raise ValidationException(errors)\n\n @staticmethod\n def validate_id(book_id):\n errors = []\n if int(book_id) < 0 or not float(book_id) == int(book_id):\n errors.append(\"Not a valid ID.\")\n if len(errors) > 0:\n raise ValidationException(errors)\n\n @staticmethod\n def validate_title(book_title):\n errors = []\n if book_title == \"\":\n errors.append(\"Invalid title!\")\n if len(errors) > 0:\n raise ValidationException(errors)\n\n @staticmethod\n def validate_author(book_author):\n errors = []\n if book_author == \"\":\n errors.append(\"Invalid author!\")\n if len(errors) > 0:\n raise ValidationException(errors)\n","repo_name":"911BostanMaria/BookstoreManager","sub_path":"src/validators/bookvalidator.py","file_name":"bookvalidator.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"70648048219","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('packages', '0006_packagesubscription'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='grouppackagesubscription',\n name='stop',\n field=models.DateTimeField(help_text=b'The time this subscription expires. You are not allowed to set this.', null=True, blank=True),\n ),\n migrations.AlterField(\n model_name='packagesubscription',\n name='stop',\n field=models.DateTimeField(help_text=b'The time this subscription expires. You are not allowed to set this.', null=True, blank=True),\n ),\n ]\n","repo_name":"deone/billing-xwf","sub_path":"packages/migrations/0007_auto_20151009_1324.py","file_name":"0007_auto_20151009_1324.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"26561599715","text":"import numpy as np\n\nfrom itertools import permutations\nfrom tqdm import tqdm\n\ndef expected_utility(cards, num_cards, num_players,\n node_map, action_map):\n if len(cards) > 4:\n from leduc.state import Leduc as State\n from leduc.hand_eval import leduc_eval as eval\n else:\n from leduc.state import State\n from leduc.hand_eval import kuhn_eval as eval\n cards = sorted(cards)\n all_combos = [list(t) for t in set(permutations(cards, num_cards))]\n\n expected_utility = np.zeros(num_players)\n for card in tqdm(all_combos, desc='calculating expected utility'):\n hand = State(card, num_players, eval)\n expected_utility += traverse_tree(hand, node_map, action_map)\n\n return expected_utility/len(all_combos)\n\n\ndef traverse_tree(hand, node_map, action_map):\n if hand.terminal:\n utility = hand.utility()\n return utility\n\n info_set = hand.info_set()\n node = node_map[hand.turn][info_set]\n\n strategy = node.avg_strategy()\n util = np.zeros(len(node_map))\n valid_actions = action_map[hand.turn][info_set]\n if 'actions' in valid_actions:\n valid_actions = valid_actions['actions']\n for action in valid_actions:\n new_hand = hand.take(action, deep=True)\n util += traverse_tree(new_hand, node_map, action_map) * strategy[action]\n\n return util\n\n \ndef bias(strategy, action_to_bias):\n new_strat = {k:(v if k != action_to_bias else v * 5) for k, v in strategy.items()}\n\n norm_sum = sum([val for val in new_strat.values()])\n\n if norm_sum > 0:\n new_strat = {key: new_strat[key]/norm_sum for key in new_strat}\n else:\n num_valid = len(new_strat)\n new_strat = {key: 1/num_valid for key in new_strat}\n\n return new_strat","repo_name":"zanussbaum/pluribus","sub_path":"leduc/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"69"} +{"seq_id":"24814514728","text":"#!/usr/bin/env python\n\"\"\"An advanced Reducer, using Python iterators and generators.\"\"\"\n\nfrom operator import itemgetter\nimport sys\n\ndef read_mapper_output(input, separator='\\t'):\n for line in input:\n yield line.rstrip().split(separator, 2)\n\ndef main(separator='\\t'):\n data = read_mapper_output(sys.stdin, separator=separator)\n\n current_word = None\n current_count = 0.0\n\n for word, count, flag in data:\n count = float(count)\n\n if flag == 'U':\n print('%s%s%.3f' % (word, separator, float(current_count / count)))\n else:\n current_count = count\n\n if current_word == word:\n if flag == 'U':\n current_count = count\n else:\n print('%s%s%.3f' % (word, separator, float(current_count / count)))\n else:\n current_word = word\n current_count = count\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vchrombie/CS-GY-6513-BD","sub_path":"hw1/reducer2e.py","file_name":"reducer2e.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"26529083066","text":"import os\nimport sys\nimport unittest\n\nfrom abmatt.autofix import AutoFix\nfrom abmatt.brres import Brres\nfrom tests.lib import AbmattTest\n\n\nclass TestOpenCloseAll(AbmattTest):\n def test_open_close(self):\n dir = os.path.join(self.base_path, 'brres_files')\n output = os.path.join(dir, 'test.brres')\n for x in os.listdir(dir):\n current_file = os.path.join(dir, x)\n if current_file != output:\n if os.path.exists(output):\n os.remove(output)\n if x.endswith('.brres'):\n b = Brres(current_file)\n b.save(output, True)\n self.assertTrue(os.path.exists(output))\n return True\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"Robert-N7/abmatt","sub_path":"tests/test_open_close_all.py","file_name":"test_open_close_all.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"69"} +{"seq_id":"8978724140","text":"import socket\n\nADDRESS = ('chals.sekai.team', 3062)\n\ndef sock():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect(ADDRESS)\n\n flag = 'STRING TO LOOK FOR'\n data = recvUntil(s, flag) \n\n print(\"Connection Closed\")\n s.close()\n\ndef recvUntil(s, flag):\n data = b''\n while True:\n input = s.recv(1024)\n data += input\n print(repr(input))\n if (input.find(flag) != -1):\n break\n return data\n\nsock()","repo_name":"HAM3131/hacking","sub_path":"codeSkeletons/socketSkeleton.py","file_name":"socketSkeleton.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"70058012380","text":"\"\"\" \nWrapper for creating a useful colormap\n\"\"\"\nimport matplotlib as mpl\nfrom matplotlib import cm\nimport numpy as np\n\ndef gray_sequentials(n: int) -> np.array:\n \"\"\"Create a list of colors in a gray scale \n sequential map for n samples\"\"\"\n start_points = cm.get_cmap('Greys', 5)\n interpolated_colors = np.zeros((n, 3))\n\n for i in range(3):\n interpolated_colors[:, i] = np.linspace(\n start_points(1)[i],\n start_points(2)[i],\n n\n )\n return interpolated_colors\n","repo_name":"orndavid/prettyplotter","sub_path":"prettyplotter/colormap.py","file_name":"colormap.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"30556604925","text":"import pandas as pd \nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report\n\n# =================================读入数据\n# ~/Downloads/train_set.csv\n# ~/workspace/sublime/daguan/train_sample.csv\nprint('读取数据')\ndf_train = pd.read_csv('~/workspace/sublime/daguan/train_sample.csv')\ndf_test = pd.read_csv('~/workspace/sublime/daguan/train_sample.csv')\n\ndf_train.drop(columns=['article','id'], inplace=True)\ndf_test.drop(columns=['article'], inplace=True)\n\n# ==================================提取tfidf文本特征\n# vectorizer = CountVectorizer(ngram_range=(1,2), min_df=3, max_df=0.9, max_features=100000)\nvectorizer = TfidfVectorizer(ngram_range=(1,2), min_df=3, max_df=0.9, max_features=100000)\nvectorizer.fit(df_train['word_seg'])\n\n# 训练集\nx_train = vectorizer.transform(df_train['word_seg'])\ndf_train['class'] = df_train['class'] - 1\ny_train = df_train['class']\n\n# 测试集\nx_test = vectorizer.transform(df_test['word_seg'])\n\n# 抽样,只用抽train的样本\ndf_train['feature'] = x_train\nnew_df = df_train[['feature','class']]\nsub_sample = new_df.sample(n=5, frac=None, weights=None,replace=False, random_state=None, axis=0)\n","repo_name":"SheldonWong/competition","sub_path":"20180801-daguan-nlp-classification/feature/read_feature.py","file_name":"read_feature.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"21320363438","text":"db = 'sqlite:///volentix_gateway_db'\n\neosio_params = {\n 'nodeos_host': 'https://eos.greymass.com',\n 'nodeos_port': 443,\n 'keosd_host': 'localhost',\n 'keosd_port': 8888,\n}\n\nsupported_currencies = {\n 'BTC': {\n 'colateral_name': 'VBTC',\n 'node_host': '',\n 'node_port': '',\n 'required_confirmations': 4\n },\n 'ETH': {\n 'colateral_name': 'VETH',\n 'node_host': '',\n 'node_port': '',\n 'required_confirmations': 4\n }\n}\n\nlogger_params = {\n 'name': 'Vdex Gateway',\n\n}\n\nsleep_timeout = 0.5\n","repo_name":"Volentix/vdexnode","sub_path":"src/gateway/gateway/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"} +{"seq_id":"30686978460","text":"import os\nimport sys\nfrom configparser import ConfigParser\nfrom loguru import logger\n\n__version__ = '0.1.0'\n\n# absolute package directory\npackage_dir = os.path.dirname(os.path.abspath(__file__))\n\n# configuration file\nrelative_config_path = os.path.join(\"config\", \"config.ini\")\nconfig_path = os.path.join(package_dir, relative_config_path)\nconfig = ConfigParser()\nconfig.read(config_path)\n\n# default configuration\nrelative_default_config_path = os.path.join(\"config\", \"default.ini\")\ndefault_config_path = os.path.join(package_dir, relative_default_config_path)\ndefault_config = ConfigParser()\ndefault_config.read(default_config_path)\n\n# project file\nrelative_project_path = os.path.join(\"..\", \"pyproject.toml\")\nproject_path = os.path.join(package_dir, relative_project_path)\nproject = ConfigParser()\nproject.read(project_path)\n\n# logger\nLOGGER_LEVEL = config[\"LOGGER\"][\"LEVEL\"]\nlogger.remove()\nlogger.add(sys.stderr, level=LOGGER_LEVEL)\nlogger.info(\"Game of Life being initalized ...\")\n","repo_name":"kuchynkm/game_of_life","sub_path":"game_of_life/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"38608221411","text":"import os \nimport re\nfrom datetime import datetime\n\ndef get_timestamp(s):\n return datetime.strptime(s.split(\" \")[0], '%H:%M:%S.%f')\n\n\ndef parse_log_file(dir_name, log_name):\n log_file = dir_name+ log_name\n with open(log_file) as f:\n content = f.read()\n lines = [l for l in content.split(\"\\n\") if l.find(\"solution\") != -1]\n start = get_timestamp(content.split(\"\\n\")[0])\n time = []\n costs = []\n for i in range(len(lines)):\n line = lines[i]\n cost = re.findall(r' [0-9][0-9].[0-9][0-9][0-9][0-9]', line)\n if not cost:\n continue\n try:\n get_timestamp(line)\n except:\n continue\n if (get_timestamp(line) -start).total_seconds() < 0:\n time.append((get_timestamp(line) -start + timedelta(days=1)).total_seconds())\n else:\n time.append((get_timestamp(line) -start).total_seconds())\n costs.append(float(cost[0].strip()))\n return costs, time \n\nimport sys\ncounter = 1\ndir_name = sys.argv[1]\nfor log in os.listdir(dir_name):\n s = (\"T{}\\n\".format(counter))\n st = \"\"\n costs, times = parse_log_file(dir_name, log)\n for j in range(len(costs)):\n st += (\"{}, {}\\n\".format(times[j], costs[j]))\n counter += 1\n if st:\n print(s + st)\n","repo_name":"KeplerC/cluster-dropbox","sub_path":"b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"72299586460","text":"import os\nimport requests\n\nfrom flask import Flask, session, render_template, request, redirect, url_for, jsonify\nfrom flask_session import Session\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nimport math\n\nos.environ['DATABASE_URL'] = \"postgres://cirmwacpuuhlha:e98f033dd24351340531cdb496cd6ef069b6a2ecff3f52fdbefbdcd50f95fc19@ec2-54-247-122-209.eu-west-1.compute.amazonaws.com:5432/d18fs835ggnl48\"\nKEY = \"e98f033dd24351340531cdb496cd6ef069b6a2ecff3f52fdbefbdcd50f95fc19\"\n\napp = Flask(__name__)\n\n# Check for environment variable\nif not os.getenv(\"DATABASE_URL\"):\n raise RuntimeError(\"DATABASE_URL is not set\")\n\n# Configure session to use filesystem\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n# Set up database\nengine = create_engine(os.getenv(\"DATABASE_URL\"))\ndb = scoped_session(sessionmaker(bind=engine))\n\n# code snippet against browser cache for development\n@app.context_processor\ndef override_url_for():\n return dict(url_for=dated_url_for)\n\n\ndef dated_url_for(endpoint, **values):\n if endpoint == 'static':\n filename = values.get('filename', None)\n if filename:\n file_path = os.path.join(app.root_path,\n endpoint, filename)\n values['q'] = int(os.stat(file_path).st_mtime)\n return url_for(endpoint, **values)\n# end of code snippet against browser cache for development\n\n\n@app.route(\"/logout\")\ndef logout():\n session['logged_in'] = False\n session['username'] = \"\"\n return redirect(url_for('index'))\n\n\n@app.route(\"/\")\ndef index():\n if session.get('logged_in') == None or False:\n session['logged_in'] = False\n session['username'] = ''\n return render_template(\"index.html\", logged_in=session['logged_in'])\n username = session.get('username')\n return render_template(\"index.html\", username=username, logged_in=session['logged_in'])\n\n\n@app.route(\"/register\")\ndef register():\n return render_template(\"register.html\")\n\n\n@app.route(\"/register-user\", methods=[\"POST\"])\ndef register_user():\n username = request.form.get(\"username\")\n password = request.form.get(\"password\")\n if db.execute(\"SELECT * FROM users WHERE username = :username\", {\"username\": username}).rowcount > 0:\n return render_template(\"error.html\", message=\"This username is not available\")\n db.execute(\"INSERT INTO users (username, password) VALUES (:username, :password)\",\n {\"username\": username, \"password\": password})\n db.commit()\n return render_template(\"success.html\")\n\n\n@app.route(\"/login\", methods=[\"POST\"])\ndef login():\n username = request.form.get(\"username\")\n password = request.form.get(\"password\")\n if db.execute(\"SELECT * FROM users WHERE username = :username AND password = :password\",\n {\"username\": username, \"password\": password}).rowcount == 1:\n user = db.execute(\"SELECT * FROM users WHERE username = :username AND password = :password\",\n {\"username\": username, \"password\": password}).fetchone()\n session['logged_in'] = True\n session['user_id'] = user.id\n session['username'] = user.username\n return redirect(url_for('book_search'))\n else:\n session['logged_in'] = False\n return redirect(url_for('index'))\n\n\ndef query_total_result(column, query):\n if column == \"title\":\n total_result = db.execute(\n \"SELECT * FROM books WHERE LOWER(title) LIKE (LOWER(:query) || '%')\", {\"query\": query}).rowcount\n return total_result\n if column == \"author\":\n total_result = db.execute(\n \"SELECT * FROM books WHERE LOWER(author) LIKE (LOWER(:query) || '%')\", {\"query\": query}).rowcount\n return total_result\n if column == \"isbn\":\n total_result = db.execute(\n \"SELECT * FROM books WHERE LOWER(isbn) LIKE (LOWER(:query) || '%')\", {\"query\": query}).rowcount\n return total_result\n\n\ndef query_books(column, query, page, per_page):\n offset_num = (page-1) * per_page\n if column == \"title\":\n search_result = db.execute(\n \"SELECT * FROM books WHERE LOWER(title) LIKE (LOWER(:query) || '%') ORDER BY title ASC LIMIT :per_page OFFSET :offset_num\", {\"query\": query, \"per_page\": per_page, \"offset_num\": offset_num}).fetchall()\n return search_result\n if column == \"author\":\n search_result = db.execute(\n \"SELECT * FROM books WHERE LOWER(author) LIKE (LOWER(:query) || '%') ORDER BY title ASC LIMIT :per_page OFFSET :offset_num\", {\"query\": query, \"per_page\": per_page, \"offset_num\": offset_num}).fetchall()\n return search_result\n if column == \"isbn\":\n search_result = db.execute(\n \"SELECT * FROM books WHERE LOWER(isbn) LIKE (LOWER(:query) || '%') ORDER BY title ASC LIMIT :per_page OFFSET :offset_num\", {\"query\": query, \"per_page\": per_page, \"offset_num\": offset_num}).fetchall()\n return search_result\n\n\n@app.route(\"/book-search\", methods=[\"GET\", \"POST\"])\ndef book_search():\n if session['logged_in'] == False:\n return redirect(url_for('index'))\n else:\n per_page = 20\n page = 1\n if request.method == 'GET':\n total_pages = 1\n # fetch 20 books\n books_initial = db.execute(\n \"SELECT * FROM books ORDER BY title ASC LIMIT :per_page\", {\"per_page\": per_page}).fetchall()\n return render_template(\"book-search.html\", username=session.get(\"username\"), books=books_initial, page=page, total_pages=total_pages)\n else:\n try:\n query = request.form.get(\"title\")\n column = \"title\"\n if query == None:\n query = request.form.get(\"author\")\n column = \"author\"\n if query == None:\n query = request.form.get(\"isbn\")\n column = \"isbn\"\n except ValueError:\n return render_template(\"error.html\", message=\"Invalid search criteria\")\n return redirect(url_for('paginate', column=column, page=page, query=query))\n\n\n@app.route(\"/book-search///\")\ndef paginate(column, page, query):\n if session['logged_in'] == False:\n return redirect(url_for('index'))\n else:\n per_page = 20\n total_result = query_total_result(column, query)\n search_result = query_books(column, query, page, per_page)\n total_pages = math.ceil((total_result / per_page))\n if len(search_result) == 0:\n return render_template(\"error.html\", message=\"Sorry, there is no book in our inventory according to your search criteria\")\n else:\n return render_template(\"book-search.html\", username=session.get(\"username\"),\n books=search_result, page=page, total_pages=total_pages, column=column, query=query)\n\n\n@app.route(\"/book/\")\ndef goto_book_page(isbn):\n if session['logged_in'] == False:\n return redirect(url_for('index'))\n else:\n res = requests.get(\"https://www.goodreads.com/book/review_counts.json\",\n params={\"key\": \"T1GNWtVnkUqq4k4alkHA\", \"isbns\": isbn}).json()\n goodreads_average_rating = res['books'][0]['average_rating']\n book = db.execute(\n \"SELECT * FROM books WHERE isbn = :isbn\", {\"isbn\": isbn}).fetchone()\n reviews = db.execute(\n \"SELECT * FROM reviews WHERE book_isbn = :book_isbn\", {\"book_isbn\": isbn}).fetchall()\n return render_template(\"book.html\", username=session.get(\"username\"), book=book, reviews=reviews, goodreads_average_rating=goodreads_average_rating)\n\n\n@app.route(\"/book//review\", methods=[\"POST\"])\ndef review(isbn):\n rating = request.form.get(\"rating\")\n comment = request.form.get(\"comment\")\n if db.execute(\"SELECT * FROM reviews WHERE book_isbn = :book_isbn AND user_id = :user_id\",\n {\"book_isbn\": isbn, \"user_id\": session.get(\"user_id\")}).rowcount >= 1:\n return render_template(\"error.html\", message=\"You may review only one time per book\")\n db.execute(\n \"INSERT INTO reviews (rating, comment, user_id, book_isbn) VALUES (:rating, :comment, :user_id, :book_isbn)\", {\"rating\": rating, \"comment\": comment, \"user_id\": session.get(\"user_id\"), \"book_isbn\": isbn})\n db.commit()\n return redirect(url_for('goto_book_page', isbn=isbn))\n\n\n@app.route(\"/api/\")\ndef api_isbn(isbn):\n book = db.execute(\n \"SELECT * FROM books WHERE isbn = :isbn\", {\"isbn\": isbn}).fetchone()\n if book is None:\n return jsonify({\"error\": \"Book is not in our inventory\"}), 404\n review_count = db.execute(\n \"SELECT * FROM reviews WHERE book_isbn = :book_isbn\", {\"book_isbn\": isbn}).rowcount\n if review_count > 0:\n average_score_object = db.execute(\"SELECT AVG (rating) FROM reviews WHERE book_isbn = :book_isbn\", {\n \"book_isbn\": isbn}).fetchone()\n for row in average_score_object:\n average_score = float(row)\n return jsonify({\n \"title\": book.title,\n \"author\": book.author,\n \"year\": book.year,\n \"isbn\": book.isbn,\n \"review_count\": review_count,\n \"average_score\": average_score\n })\n","repo_name":"engingokmen/booknine","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":9267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"33975371656","text":"import speech_recognition as sr\nimport pyttsx3\nimport datetime\nimport wikipedia\nimport webbrowser\nimport os\nimport time\nfrom ecapture import ecapture as ec\nimport requests\nimport smtplib\nimport random\n\n# Setting up a speech engine\n\nengine = pyttsx3.init('sapi5')\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[0].id)\n\n\ndef speak(audio):\n engine.say(audio)\n engine.runAndWait()\n\n\n# function to greet the user\ndef wishMe():\n hour = int(datetime.datetime.now().hour)\n if hour>=0 and hour<12:\n speak(\"Good Morning!\")\n print(\"Good Morning!\")\n\n elif hour>=12 and hour<18:\n speak(\"Good Afternoon!\") \n print(\"Good Afternoon!\") \n\n else:\n speak(\"Good Evening!\") \n print(\"Good Evening!\") \n\n speak(\"I am Jarvis Sir. Please tell me how may I help you\") \n\n\n# function to recieve input from the microphone\ndef takeCommand():\n #It takes microphone input from the user and returns string output\n\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening...\")\n r.pause_threshold = 1\n audio = r.listen(source)\n\n try:\n print(\"Recognizing...\") \n query = r.recognize_google(audio, language='en-in')\n print(f\"User said: {query}\\n\")\n\n except Exception as e:\n # print(e) \n print(\"Say that again please...\") \n return \"None\"\n \n return query\n\n\n# to send an email\ndef sendEmail(to, content):\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.ehlo()\n server.starttls()\n server.login('youremail@gmail.com', 'your-password')\n server.sendmail('youremail@gmail.com', to, content)\n server.close()\n\n\n# The main driving function\nif __name__ == \"__main__\":\n wishMe()\n while True:\n # if 1:\n query = takeCommand().lower()\n\n # Logic for executing tasks based on query\n if 'wikipedia' in query:\n speak('Searching Wikipedia...')\n query = query.replace(\"wikipedia\", \"\")\n results = wikipedia.summary(query, sentences=2)\n speak(\"According to Wikipedia\")\n print(results)\n speak(results)\n\n elif 'open youtube' in query:\n webbrowser.open(\"youtube.com\")\n\n elif 'open google' in query:\n webbrowser.open(\"google.com\")\n\n elif 'open stackoverflow' in query:\n webbrowser.open(\"stackoverflow.com\") \n\n\n elif 'play music' in query:\n music_dir = 'D:\\\\Jarvish\\\\Playlist'\n songs = os.listdir(music_dir) \n os.startfile(os.path.join(music_dir, songs[random.randint(0, len(songs))]))\n\n elif 'time' in query:\n strTime = datetime.datetime.now().strftime(\"%H:%M:%S\") \n speak(f\"Sir, the time is {strTime}\")\n\n elif 'open code' in query:\n codePath = 'C:\\\\Users\\\\javed\\\\AppData\\\\Local\\\\Programs\\\\Microsoft VS Code\\\\Cde.exe'\n os.startfile(codePath)\n\n elif 'email to harry' in query:\n try:\n speak(\"What should I say?\")\n content = takeCommand()\n to = \"harryyourEmail@gmail.com\" \n sendEmail(to, content)\n speak(\"Email has been sent!\")\n except Exception as e:\n print(e)\n speak(\"Sorry my friend harry bhai. I am not able to send this email\") \n \n elif 'news' in query:\n new = webbrowser.open_new_tab(\"https://timesofindia.indiatimes.com/home/headlines\")\n speak('Here are some headlines from the Times of India, Happy reading')\n time.sleep(6)\n\n elif \"camera\" in query or 'take a photo' in query:\n l = len(os.listdir(\"myClicks\"))\n ec.capture(0, \"robo camera\", f'myClicks\\\\img{l}.jpg')\n\n elif 'search' in query:\n query = query.replace(\"search\", \"\")\n webbrowser.open_new_tab(query)\n time.sleep(5)\n\n elif \"weather\" in query:\n response = requests.get('https://api.open-meteo.com/v1/forecast?latitude=31.2560&longitude=75.7051¤t_weather=true&hourly=temperature_2m,relativehumidity_2m,windspeed_10m#')\n\n p = response.json()\n cur_weather = p['current_weather']\n temp = cur_weather['temperature']\n windspeed = cur_weather['windspeed']\n winddirection = cur_weather['winddirection']\n weathercode = cur_weather['weathercode']\n is_day = cur_weather['is_day']\n cur_time = cur_weather[\"time\"]\n cur_index = p['hourly']['time'].index(cur_time)\n\n speak(f\"Temperature in Celcius unit is {str(temp)} \\n humidity in percentage is {str(p['hourly']['relativehumidity_2m'][cur_index])}\\n and the windspeed is {str(cur_weather['windspeed'])}\" )\n \n\n \n elif 'ruko' in query or 'ok bye' in query or 'good bye' in query or 'stop' in query or 'bye' in query:\n exit()\n","repo_name":"Javed0-786/AI-Personal-Assistant","sub_path":"Jarvish.py","file_name":"Jarvish.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"28311904712","text":"# 基于情感词典的文本分析方法,对评论情感打分\nfrom collections import defaultdict\nimport jieba\nimport codecs\nimport pandas as pd\n\n\n# 使用jieba对文档分词,并去除停用词\ndef seg_word(sentence, stopwords):\n seg_list = jieba.cut(sentence)\n seg_result = []\n for w in seg_list:\n seg_result.append(w)\n return list(filter(lambda x: x not in stopwords, seg_result))\n\n\n# 词语分类,找出情感词、否定词、程度副词\n# 分类结果,词语的index作为key,词语的分值作为value,否定词分值设为-1\ndef classify_words(word_dict, sen_dict, not_word_list, degree_dic):\n sen_word = dict()\n not_word = dict()\n degree_word = dict()\n\n for word in word_dict.keys():\n if word in sen_dict.keys() and word not in not_word_list and word not in degree_dic.keys():\n sen_word[word_dict[word]] = sen_dict[word]\n elif word in degree_dic.keys() and word not in not_word_list:\n degree_word[word_dict[word]] = degree_dic[word]\n elif word in not_word_list:\n not_word[word_dict[word]] = -1\n\n return sen_word, not_word, degree_word\n\n\n# 将分词后的列表转为字典,key为单词,value为单词在列表中的索引,索引相当于词语在文档中出现的位置\ndef list_to_dict(word_list):\n data = {}\n for x in range(0, len(word_list)):\n data[word_list[x]] = x\n return data\n\n\ndef get_init_weight(sen_word, not_word, degree_word):\n # 权重初始化为1\n W = 1\n # 将情感字典的key转为list\n sen_word_index_list = list(sen_word.keys())\n if len(sen_word_index_list) == 0:\n return W\n # 获取第一个情感词的下标,遍历从0到此位置之间的所有词,找出程度词和否定词\n for i in range(0, sen_word_index_list[0]):\n if i in not_word.keys():\n W *= -1\n elif i in degree_word.keys():\n # 更新权重,如果有程度副词,分值乘以程度副词的程度分值\n W *= float(degree_word[i])\n return W\n\n\ndef score_sentiment(sen_word, not_word, degree_word, seg_result):\n \"\"\"计算得分\"\"\"\n score = 0\n W = get_init_weight(sen_word, not_word, degree_word)\n # 情感词下标初始化\n sentiment_index = -1\n # 情感词的位置下标集合\n sentiment_index_list = list(sen_word.keys())\n # 遍历分词结果(遍历分词结果是为了定位两个情感词之间的程度副词和否定词)\n for i in range(0, len(seg_result)):\n # 如果是情感词(根据下标是否在情感词分类结果中判断)\n if i in sen_word.keys():\n # 权重*情感词得分\n score += W * float(sen_word[i])\n W = 1\n # 情感词下标加1,获取下一个情感词的位置\n sentiment_index += 1\n if sentiment_index < len(sentiment_index_list) - 1:\n # 判断当前的情感词与下一个情感词之间是否有程度副词或否定词\n for j in range(sentiment_index_list[sentiment_index], sentiment_index_list[sentiment_index + 1]):\n # 更新权重,如果有否定词,取反\n if j in not_word.keys():\n W *= -1\n elif j in degree_word.keys():\n # 更新权重,如��有程度副词,分值乘以程度副词的程度分值\n W *= float(degree_word[j])\n # 定位到下一个情感词\n if sentiment_index < len(sentiment_index_list) - 1:\n i = sentiment_index_list[sentiment_index + 1]\n return score\n\n\n# 计算得分\ndef setiment_score(sententce, stopwords, sen_dict, not_word_list, degree_dic):\n # 1.对文档分词\n seg_list = seg_word(sententce, stopwords)\n # 2.将分词结果列表转为dic,然后找出情感词、否定词、程度副词\n sen_word, not_word, degree_word = classify_words(list_to_dict(seg_list), sen_dict, not_word_list, degree_dic)\n # 3.计算得分\n score = score_sentiment(sen_word, not_word, degree_word, seg_list)\n return score\n\n\n# 读取含有评论的csv\ndef getSen(csv):\n df = pd.read_csv(csv)\n lst = list(df['code'])\n code_lst = []\n for ls in lst:\n ls = str(ls)\n ls = '0' * (6 - len(ls)) + ls\n code_lst.append(ls)\n time = list(df['time'])\n comment = list(df['comment'])\n click = list(df['click'])\n result = [code_lst, time, comment, click]\n return result\n\n\n# 存储数据\ndef saveData(code, time, score, click):\n result = [code, time, score, click]\n df = pd.DataFrame([result], columns=['code', 'time', 'score', 'click'])\n df.to_csv(\"newscore_buchong.csv\", mode='a', index=False, header=False)\n\n\nif __name__ == \"__main__\":\n # 读取停用词文件\n stopwords = set()\n fr = codecs.open('stopwords.txt', 'r', 'utf-8')\n for word in fr:\n stopwords.add(word.strip())\n fr.close()\n\n # 读取情感字典文件\n sen_file = open('BosonNLP_sentiment_score.txt', 'r+', encoding='utf-8')\n # 获取字典文件内容\n sen_list = sen_file.readlines()\n # 创建情感字典\n sen_dict = defaultdict()\n # 读取字典文件每一行内容,将其转换为字典对象,key为情感词,value为对应的分值\n for s in sen_list:\n # 每一行内容根据空格分割,索引0是情感词,索引1是情感分值\n try:\n sen_dict[s.split(' ')[0]] = s.split(' ')[1]\n except IndexError:\n pass\n sen_file.close()\n\n # 读取否定词文件\n not_word_file = open('notDic.txt', 'r+', encoding='utf-8')\n # 由于否定词只有词,没有分值,使用list即可\n not_word_list = not_word_file.readlines()\n for i in range(0, len(not_word_list)):\n not_word_list[i] = not_word_list[i].strip('\\n')\n not_word_file.close()\n\n # 读取程度副词文件\n degree_file = open('degree.txt', 'r+', encoding='gbk')\n degree_list = degree_file.readlines()\n degree_dic = defaultdict()\n # 程度副词与情感词处理方式一样,转为程度副词字典对象,key为程度副词,value为对应的程度值\n for d in degree_list:\n try:\n degree_dic[d.split(',')[0]] = d.split(',')[1]\n except IndexError:\n pass\n degree_file.close()\n\n # 测试\n # string = \"操盘者每天把股价牢牢控制在圆角分上,想想这能力、这技术能让散户喝上汤吗?就靠着这几分钱加上时间足以榨干每一个小散。\"\n # print(setiment_score(string, stopwords, sen_dict, not_word_list, degree_dic))\n\n result = getSen('newpinglun_buchong.csv')\n length = len(result[0])\n for i in range(0, length):\n code = result[0][i]\n time = result[1][i]\n comment = result[2][i]\n click = result[3][i]\n score = setiment_score(comment, stopwords, sen_dict, not_word_list, degree_dic)\n saveData(code, time, score, click)\n print(\"已处理完成第%d个文本\" % (i + 1))\n","repo_name":"jacoryjin/guba","sub_path":"文本分析.py","file_name":"文本分析.py","file_ext":"py","file_size_in_byte":7003,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"28661908799","text":"\"\"\"\n test page module\n\"\"\"\n\nimport unittest\n\nimport zoom\n\n\nclass TestPage(unittest.TestCase):\n\n def test_page_status_not_provided(self):\n page = zoom.Page('test')\n self.assertEqual(page.status, '200 OK')\n\n def test_page_status_provided(self):\n page = zoom.Page('page missing', status='404 Not Found')\n self.assertEqual(page.status, '404 Not Found')\n\n page = zoom.Page('Moved temporarily', status='302 Found')\n self.assertEqual(page.status, '302 Found')\n\n def test_search_appears_when_empty(self):\n page = zoom.Page('some content', search='')\n self.assertIsNotNone(\n page.header()\n )\n","repo_name":"dsilabs/zoom","sub_path":"tests/unittests/test_page.py","file_name":"test_page.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"69"} +{"seq_id":"27824137896","text":"#A = [1,3,4,60,70,50,2]\n#A = [1,2,3,4,50,60,70]\n\n#print(len(A))\n#print(n//2)\ndef main():\n A = [] #cria lista vazia\n while True: #enquanto não parar de digitar\n try:\n A.append(int(input())) #adiciona numero à lista\n except EOFError:\n break\n A.sort() #ordena a lista\n n = len(A)\n if n%2==1: #se for impar\n mediana = A[(n//2)]\n else: #se for par\n mediana = (A[(n//2)-1]+A[(n//2)])//2\n print(mediana)\n\nif __name__ == \"__main__\":\n main()","repo_name":"willianvaneli/Questoes-UVA-judge","sub_path":"10107/10107.py","file_name":"10107.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"19271269441","text":"import urllib.request, urllib.error\nimport json\n\nurl = input(\">>\")\nfhand = urllib.request.urlopen(url) # urlopen will ignore headers\nheaders = dict(fhand.getheaders()) # to get the headers\n\n\ndata = \"\"\"\"\"\"\ntot = 0\n\nfor line in fhand:\n# print(line.decode().strip())\n data += line.decode()\n\ninfo = json.loads(data)\nprint(\"User count:\", len(info[\"comments\"]))\n\nuserList = info[\"comments\"]\n\nfor user in userList:\n tot += user[\"count\"]\n\nprint(\"Sum:\", tot)\n","repo_name":"devasenan134/code","sub_path":"Python/py4e/jsonParse.py","file_name":"jsonParse.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"8614517021","text":"import requests\nfrom PythonExtension import QtCreator\nfrom PySide2 import QtWidgets\n\ndef load(url):\n r = requests.get(url)\n box = QtWidgets.QMessageBox(QtCreator.Core.ICore.dialogParent())\n box.setWindowTitle(\"Request results\")\n box.setText(\"The request status is {}\".format(r.status_code))\n box.setDetailedText(r.text)\n box.exec_()\n\nhelpMenu = QtCreator.Core.ActionManager.actionContainer(\"QtCreator.Menu.Window\")\nhelpMenu.menu().addAction(\"Load from the web...\", lambda: load(\"https://www.qt.io/\"))\n","repo_name":"dyedgreen/qt-creator-python-extensions","sub_path":"examples/requirerequests/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"} +{"seq_id":"23349599966","text":"\"\"\"\nTake a screenshot for the given URL, saving as the given output file.\n\nUsage:\n screenshot.py [--size=] [URL] [FILENAME]\n screenshot.py -h | --help\n\nOptions:\n -h --help Show this screen.\n --size= Size of image [default: 800].\n\nArguments:\n URL URL to take a screenshot for [default: https://myli.page].\n FILENAME Output filename [default: screenshot.png].\n\"\"\"\n\nimport io\n\nfrom docopt import docopt\nfrom PIL import Image\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nif __name__ == \"__main__\":\n # Read options.\n args = docopt(__doc__)\n if args[\"URL\"] is None:\n args[\"URL\"] = \"https://myli.page\"\n if args[\"FILENAME\"] is None:\n args[\"FILENAME\"] = \"screenshot.png\"\n args[\"SIZE\"] = int(args[\"--size\"])\n # Take screenshot.\n options = Options()\n options.headless = True\n driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)\n driver.get(args[\"URL\"])\n driver.set_window_size(args[\"SIZE\"] * 2, args[\"SIZE\"] * 2)\n image = driver.get_screenshot_as_png()\n driver.quit()\n # Resize the screenshot.\n image = Image.open(io.BytesIO(image))\n image.thumbnail((args[\"SIZE\"], args[\"SIZE\"]))\n image.save(args[\"FILENAME\"])\n","repo_name":"lgtm-migrator/tslmy.github.io","sub_path":"screenshot.py","file_name":"screenshot.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"4023528940","text":"\"\"\"\n\n\"\"\"\nclass Solution:\n def strToInt(self, str: str) -> int:\n if not str:return 0\n str = str.strip()\n if str == \"\":return 0\n n = len(str)\n num = \"0\" # 注意有坑的地方,如果字符串只是 \"+\"这种,就很麻烦\n ans = 0\n if str[0] == '+':\n for i in range(1,n):\n if str[i] >= '0' and str[i] <= '9':\n num += str[i]\n else:\n break\n ans = int(num) if int(num) <= pow(2,31) - 1 else pow(2, 31) - 1\n elif str[0] == '-':\n for i in range(1, n):\n if str[i] >= '0' and str[i] <= '9':\n num += str[i]\n else:\n break\n ans = -int(num) if -int(num) >= -pow(2, 31) else -pow(2, 31)\n elif str[0] >= '0' and str[0] <= '9':\n for i in range(n):\n if str[i] >= '0' and str[i] <= '9':\n num += str[i]\n else:\n break\n ans = int(num) if int(num) <= pow(2,31) - 1 else pow(2, 31) - 1\n return ans\n\n\nif __name__ == '__main__':\n solu = Solution()\n str = \"+\"\n ans = solu.strToInt(str)\n print(ans)\n\n\n","repo_name":"sakurasakura1996/Leetcode","sub_path":"剑指offer/剑指joffer67_把字符串转换成整数.py","file_name":"剑指joffer67_把字符串转换成整数.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"30697953663","text":"# -*- coding: utf-8 -*-\nfrom slideshowplus.projector import projector\nimport socket\nimport logging\nimport threading\nimport time\nimport itertools\n\n# for debugging\n# import sys\n# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n\nlog = logging.getLogger(__file__)\n\nclass ViewSonic(projector.Projector):\n def __init__(self, name=None, ip=None, port=None):\n super().__init__()\n self.name = name or \"ViewSonic\"\n # on the projector, you need DHCP off, configure ip address\n self.ip = ip or \"192.168.1.143\"\n # control ports: TCP 23, TCP 9715 ...\n # some other specs say port 4661\n # !! self.port is overwritten in send_command and send_command1\n self.port = port or 23\n self.buffer_size = 1024\n\n # For the LAN control, the code format is similar except that\n # to replace the “0x” to “\\”, via a LAN Port 4661.\n def on(self):\n self.state = \"on\"\n log.info(\"power on\")\n\n # pj11581.pdf\n try_commands = [\n bytes.fromhex(\"BEEF030600BAD20100006001000D\"), # with carriage return (0D)\n bytes.fromhex(\"BEEF030600BAD2010000600100\"),\n ]\n try_ports = [23, 4661, 9715]\n self.thread_send_multiple(try_commands, try_ports)\n # self.thread_send_command(on_command1)\n\n def off(self):\n self.state = \"off\"\n log.info(\"power off\")\n\n # pj11581.pdf\n try_commands = [\n bytes.fromhex(\"BEEF0306002AD30100006000000D\"), # with carriage return (0D)\n bytes.fromhex(\"BEEF0306002AD3010000600000\"),\n ]\n try_ports = [23, 4661, 9715]\n self.thread_send_multiple(try_commands, try_ports)\n # self.thread_send_command(off_command1)\n\n def sleep(self):\n return\n\n # not sure how the projector protocol works,\n # trying 2 different send commands\n\n def send_command(self, cmd, port):\n try:\n log.info(\n \"\\n\\nSending control command:\\n\"\n \"[*] ip: {}\\n\"\n \"[*] port: {}\\n\"\n \"[*] cmd: {}\".format(\n self.ip, port, cmd)\n )\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.setblocking(0)\n s.settimeout(2)\n s.connect((self.ip, port))\n s.send(cmd)\n response = s.recv(self.buffer_size)\n s.close()\n log.info(response)\n except Exception as e:\n log.warning(e)\n\n def thread_send_command(self, cmd, port):\n thread = threading.Thread(\n target=self.send_command,\n args=(cmd, port,)\n )\n thread.start()\n\n def thread_send_multiple(self, commands, ports):\n for cmd, port in itertools.product(commands, ports):\n # print(cmd, port)\n self.thread_send_command(cmd, port)\n time.sleep(3)\n\n\nif __name__ == \"__main__\":\n print(\"Testing ViewSonic.\")\n vs = ViewSonic(\n name=\"Test_ViewSonic\",\n ip=\"192.168.2.22\",\n )\n vs .on()\n # eps.off()\n # eps.sleep() # Not implemented.\n","repo_name":"voje/RPI_Projector","sub_path":"slideshowplus/projector/viewsonic.py","file_name":"viewsonic.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"74993728539","text":"\"\"\"Add in support for command aliasing\n\nAliasing is a configuration based system for creating short hand subcommands\nthat reference a full command and execute it when the alias is used. These\naliases will live in configs under a specific section and consist of a defined\npair of the `alias` -> `expanded command`. For example::\n\n [alias]\n crst = create --start\n\nThis would create an alias so that if you run `den crst` it would be the\nequivalent of running `den create --start`.\n\nAdditional commands exist to interact with the aliases, but they are basically\nre-wraps of the `config` group of commands.\n\"\"\"\nimport logging\nimport os\n\nimport click\n\nfrom .config import get_value, set_value, \\\n MissingConfigurationException\nfrom .. import LOCAL_CONFIG_FILE, USER_CONFIG_FILE\nfrom ..click_ext import SmartGroup\n\nlog = logging.getLogger(__name__)\n\nALIAS_SECTION = \"alias\"\n__commands__ = [\"interact_alias\"]\nALIAS_OUTPUT_FORMAT = \"`den {key}` is aliased to `den {value}`\"\n\n\ndef find(context, alias):\n \"\"\"Lookup command alias\n\n Retrieves the expanded command when provided with the application context\n and the alias desired. Will return `None` if no alias is defined.\n \"\"\"\n if not hasattr(context, \"config\"):\n return None\n\n expansion = context.config.get(ALIAS_SECTION, alias)\n return expansion.split(' ') if expansion else None\n\n\nclass AliasGroup(SmartGroup):\n \"\"\"Extended SmartGroup with aliasing support\n\n Adds in logic to command resolution to perform an alias look up if the\n command does not normally resolve. This means that if an alias is defined\n that overlaps with an existing command, it will never be run.\n \"\"\"\n def resolve_command(self, ctx, args):\n try:\n return click.Group.resolve_command(self, ctx, args)\n except click.ClickException:\n alias = find(ctx.obj, args[0])\n if not alias:\n raise\n return click.Group.resolve_command(self, ctx, alias + args[1:])\n # the `alias + args[1:]` allows for the expanded alias to append\n # the extra arguments to itself, treating it like an inline\n # expansion\n\n\nclass NoAliasException(click.ClickException):\n \"\"\"Exception raised when an alias lookup fails. \"\"\"\n def __init__(self, alias):\n click.ClickException.__init__(self, \"No `{}` alias \"\n \"defined.\".format(alias))\n\n\n# > den alias []\n@click.command(\"alias\",\n short_help=\"Create or modify command aliases\")\n@click.option(\"-u\", \"--user\", is_flag=True, default=False,\n help=\"Use the user level configuration.\")\n@click.argument(\"alias\") # alias to act on\n@click.argument(\"command\", nargs=-1, required=False) # optional command to set\n@click.pass_context\ndef interact_alias(context, user, alias, command):\n \"\"\"Check, modify, or create command aliases\n\n If a command is provided, will set the alias to the command, if it is not\n provided will report the expaned command the alias refers to. An alias\n acts as a command expansion: `den alias crst -- create --start` would mean\n that `den crst` would be expanded to `den create --start`.\n \"\"\"\n context.obj.target_config = os.path.expanduser(USER_CONFIG_FILE) if user \\\n else os.path.join(context.obj.cwd, LOCAL_CONFIG_FILE)\n\n if command:\n context.invoke(set_value, section=[ALIAS_SECTION, alias],\n value=\" \".join(command))\n else:\n try:\n context.invoke(get_value, section=ALIAS_SECTION, key=alias,\n output_format=ALIAS_OUTPUT_FORMAT)\n except MissingConfigurationException:\n raise NoAliasException(alias)\n","repo_name":"jdost/den","sub_path":"src/den/commands/alias.py","file_name":"alias.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"4148140553","text":"# SERVER IP AND PORT\nSERVER_IP = '127.0.0.1'\nSERVER_PORT = 10305\n\n# REQUEST PROTOCOL CODE\nREQUEST_LOGIN = '0001'\nREQUEST_REGISTER = '0002'\nREQUEST_SHOW_RULE = '0003'\nREQUEST_PLAY_GAME = '0004'\nCOMMAND_START = '0005'\nREQUEST_SEND_SCORE = '0006'\nREQUEST_HIGH_SCORES = '0007'\nREQUEST_ROUND_END = '0008'\nREQUEST_SEND_DIFFICULTY = '0009'\nREQUEST_EXIT = '0010'\n\n# RESPONSE PROTOCOL CODE\nRESPONSE_LOGIN_RESULT = '1001'\nRESPONSE_REGISTER_RESULT = '1002'\nRESPONSE_SHOW_RULE_RESULT = '1003'\nRESPONSE_PLAY_GAME = '1004'\nRESPONSE_START = '1005'\nRESPONSE_SEND_SCORE = '1006'\nRESPONSE_HIGH_SCORES = '1007'\nRESPONSE_ROUND_END = '1008'\nRESPONSE_SEND_DIFFICULTY = '1009'\nRESPONSE_EXIT = '1010'\n\n# DATA DELIMITER\nDELIMITER = '|'\n","repo_name":"Nirmalya24/maze-runner","sub_path":"server/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"73053385180","text":"# -*- coding: utf-8 -*-\n# This software and supporting documentation are distributed by\n# Institut Federatif de Recherche 49\n# CEA/NeuroSpin, Batiment 145,\n# 91191 Gif-sur-Yvette cedex\n# France\n#\n# This software is governed by the CeCILL license version 2 under\n# French law and abiding by the rules of distribution of free software.\n# You can use, modify and/or redistribute the software under the\n# terms of the CeCILL license version 2 as circulated by CEA, CNRS\n# and INRIA at the following URL \"http://www.cecill.info\".\n#\n# As a counterpart to the access to the source code and rights to copy,\n# modify and redistribute granted by the license, users are provided only\n# with a limited warranty and the software's author, the holder of the\n# economic rights, and the successive licensors have only limited\n# liability.\n#\n# In this respect, the user's attention is drawn to the risks associated\n# with loading, using, modifying and/or developing or reproducing the\n# software by the user in light of its specific status of free software,\n# that may mean that it is complicated to manipulate, and that also\n# therefore means that it is reserved for developers and experienced\n# professionals having in-depth computer knowledge. Users are therefore\n# encouraged to load and test the software's suitability as regards their\n# requirements in conditions enabling the security of their systems and/or\n# data to be ensured and, more generally, to use and operate it in the\n# same conditions as regards security.\n#\n# The fact that you are presently reading this means that you have had\n# knowledge of the CeCILL license version 2 and that you accept its terms.\nfrom __future__ import absolute_import\nfrom brainvisa.tools import spm_utils\nfrom brainvisa.processes import *\nimport os\nimport shutil\nfrom distutils.dir_util import copy_tree\nfrom soma.spm.spm_launcher import SPM12, SPM12Standalone\nfrom soma.spm.spm12.tools.shoot_tools import RunShoot\n\nconfiguration = Application().configuration\n\n\ndef validation():\n try:\n spm = SPM12Standalone(configuration.SPM.spm12_standalone_command,\n configuration.SPM.spm12_standalone_mcr_path,\n configuration.SPM.spm12_standalone_path)\n except Exception:\n spm = SPM12(configuration.SPM.spm12_path,\n configuration.matlab.executable,\n configuration.matlab.options)\n return spm\n\n\nuserLevel = 1\nname = 'SPM12 - Run Shoot - generic'\n\nSIGNATURE_BEGIN = [\"nb_images\", Integer()]\nSIGNATURE_END = [\n \"templates\", ListOf(ReadDiskItem('4D Volume', ['NIFTI-1 image', 'SPM image', 'MINC image'])),\n \n \"custom_outputs\", Boolean(),\n \n \"jacobian_outputs\", ListOf(WriteDiskItem('4D Volume', ['NIFTI-1 image', 'SPM image', 'MINC image'])),\n \"velocity_outputs\", ListOf(WriteDiskItem('4D Volume', ['NIFTI-1 image', 'SPM image', 'MINC image'])),\n \"deformation_outputs\", ListOf(WriteDiskItem('4D Volume', ['NIFTI-1 image', 'SPM image', 'MINC image'])),\n \n \"batch_location\", WriteDiskItem('Matlab SPM script', 'Matlab script'),\n]\n\nsignature_params = SIGNATURE_BEGIN + [\"images_1\", ListOf(ReadDiskItem('4D Volume', ['NIFTI-1 image', 'SPM image', 'MINC image']))] + SIGNATURE_END\nsignature = Signature(*signature_params)\n\n\ndef initialization(self):\n self.nb_images = 1\n self.addLink('batch_location', 'images_1', self.update_batch_path)\n self.addLink(None, 'nb_images', self.update_nb_images)\n self.addLink(None, 'custom_outputs', self.update_outputs_choice)\n self.custom_outputs = False\n\n\ndef update_batch_path(self, proc):\n \"\"\"\n Place batch file next to the first image of image_1 param\n \"\"\"\n if self.images_1:\n directory_path = os.path.dirname(self.images_1[0].fullPath())\n return os.path.join(directory_path, 'spm12_shoot_job.m')\n\n\ndef update_nb_images(self, proc):\n new_signature = list(SIGNATURE_BEGIN)\n for i in range(proc):\n new_signature += [\"images_%d\" % (i + 1),\n ListOf(ReadDiskItem('4D Volume', ['NIFTI-1 image', 'SPM image', 'MINC image']))]\n new_signature += SIGNATURE_END\n signature = Signature(*new_signature)\n self.changeSignature(signature)\n\n\ndef update_outputs_choice(self, proc):\n if proc:\n self.setEnable('jacobian_outputs', 'velocity_outputs', 'deformation_outputs')\n else:\n self.setDisable('jacobian_outputs', 'velocity_outputs', 'deformation_outputs')\n self.changeSignature(self.signature)\n\n\ndef execution(self, context):\n \n run_shoot = RunShoot()\n t = []\n for i in range(1, self.nb_images + 1):\n t.append([im.fullPath() for im in getattr(self, 'images_%d' % i)])\n run_shoot.images_path_list = t\n run_shoot.templates_path_list = [template.fullPath() for template in self.templates]\n \n if self.custom_outputs:\n run_shoot.jacobian_output_path_list = [j.fullPath() for j in self.jacobian_outputs]\n run_shoot.velocity_output_path_list = [v.fullPath() for v in self.velocity_outputs]\n run_shoot.deformation_output_path_list = [d.fullPath() for d in self.deformation_outputs]\n \n spm = validation()\n spm.addModuleToExecutionQueue(run_shoot)\n spm.setSPMScriptPath(self.batch_location.fullPath())\n output = spm.run()\n context.log(name, html=output)\n","repo_name":"brainvisa/brainvisa-spm","sub_path":"brainvisa/toolboxes/spm/processes/spm12/tools/shoot_tools/SPM12RunShoot_generic.py","file_name":"SPM12RunShoot_generic.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"37988641830","text":"# -*- coding: gb18030 -*-\n\n# $Id: OnlineRewardMgr.py,v 1.1 10:44 2009-10-27 jiangyi Exp $\n\nimport Language\nfrom bwdebug import *\nimport csdefine\nfrom config.skill.FixTimeReward import Datas as rd\n\nclass OnlineRewardMgr:\n\t\"\"\"\n\t在线奖励配置加载器\n\t\"\"\"\n\t_instance = None\n\tdef __init__( self ):\n\t\t# 不允许有2个或2个以上实例\n\t\tassert OnlineRewardMgr._instance is None\n\t\tOnlineRewardMgr._instance = self\n\t\t\n\t\tself._datas = rd\n\t\t#key : timer\n\t\t#value : { 'rewarduid': 10011,……}\n\n\tdef rewardKeys( self ):\n\t\t\"\"\"返回排列好的keys\"\"\"\n\t\tkeyOrderList = self._datas.keys()\n\t\tif len( keyOrderList ) == 0:\n\t\t\tERROR_MSG( \"timeTick %s has no data.\" % ( timeTick ) )\n\t\t\treturn None\n\t\tkeyOrderList.sort()\n\t\treturn keyOrderList\n\t\t\n\tdef getTick( self, timeTick ):\n\t\t\"\"\"\n\t\t获得下一个领奖时刻,\n\t\t\"\"\"\n\t\tkeyOrderList = self.rewardKeys()\n\t\tfor data in keyOrderList:\n\t\t\tif timeTick < data: return data\n\t\treturn None\n\n\tdef getRewardTick( self, lifetime ):\n\t\t\"\"\"\n\t\t获取已经到点可以领奖的时刻列表\n\t\t\"\"\"\n\t\tkeyOrderList = self.rewardKeys()\n\t\tDatas = []\n\t\tfor data in keyOrderList:\n\t\t\tif lifetime > data:\n\t\t\t\tDatas.append( data )\n\t\tif len( Datas ) == 0:\n\t\t\treturn None\n\t\tDatas.sort()\n\t\treturn Datas\n\t\t\n\tdef getData( self, timeTick ):\n\t\t\"\"\"\n\t\t获得数据\n\t\t@param factionID: 势力id 编号\n\t\t@return: [( id, prestige ), ...]\n\t\t\"\"\"\n\t\ttry:\n\t\t\treturn self._datas[timeTick]\n\t\texcept KeyError:\n\t\t\tERROR_MSG( \"timeTick %s has no data.\" % ( timeTick ) )\n\t\t\treturn None\n\t\t\t\n\tdef getCount( self ):\n\t\t\"\"\"获得奖励数量\"\"\"\n\t\treturn len( self._datas )\n\t\n\tdef getRewardUid( self, timeTick ):\n\t\t\"\"\"\n\t\t获得奖励内容\n\t\t\"\"\"\n\t\ttry:\n\t\t\treturn self._datas[timeTick]['rewarduid']\n\t\texcept KeyError:\n\t\t\tERROR_MSG( \"timeTick %s has no item.\" % ( timeTick ) )\n\t\t\treturn None\n\t\t\t\n\tdef getDec( self, timeTick ):\n\t\t\"\"\"\n\t\t获得奖励描述\n\t\t\"\"\"\n\t\ttry:\n\t\t\treturn self._datas[timeTick]['decInfo']\n\t\texcept KeyError:\n\t\t\tERROR_MSG( \"timeTick %s has no decInfo.\" % ( timeTick ) )\n\t\t\treturn None\n\t\t\t\n\t@classmethod\n\tdef instance( SELF ):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\tif SELF._instance is None:\n\t\t\tSELF._instance = OnlineRewardMgr()\n\t\treturn SELF._instance\n\t\t\n\t\t\n#\n# $Log: not supported by cvs2svn $\n#","repo_name":"mudsave/csol2_enities_45541","sub_path":"common/OnlineRewardMgr.py","file_name":"OnlineRewardMgr.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"74411489499","text":"\"\"\"add bdc_status indexes\n\nRevision ID: d768a5da908c\nRevises: 4fb7e197d241\nCreate Date: 2021-10-11 15:25:17.536833\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"d768a5da908c\"\ndown_revision = \"4fb7e197d241\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.execute(\n \"CREATE INDEX idx_bst_id_value_text ON taxonomie.bdc_statut_taxons (id_value_text);\"\n )\n op.execute(\"CREATE INDEX idx_bsctv_id_text ON taxonomie.bdc_statut_cor_text_values (id_text);\")\n op.execute(\n \"CREATE INDEX idx_bsctv_id_value ON taxonomie.bdc_statut_cor_text_values (id_value);\"\n )\n op.execute(\"CREATE INDEX idx_bstxt_cd_sig ON taxonomie.bdc_statut_text (cd_sig);\")\n op.execute(\n \"\"\"\n CREATE INDEX idx_bstxt_cd_type_statut \n ON taxonomie.bdc_statut_text (cd_type_statut);\n \"\"\"\n )\n\n\ndef downgrade():\n op.execute(\n \"\"\"\n DROP INDEX\n taxonomie.idx_bst_id_value_text,\n taxonomie.idx_bsctv_id_text,\n taxonomie.idx_bstxt_cd_type_statut,\n taxonomie.idx_bstxt_cd_sig ;\n \"\"\"\n )\n","repo_name":"PnX-SI/TaxHub","sub_path":"apptax/migrations/versions/d768a5da908c_add_bdc_status_indexes.py","file_name":"d768a5da908c_add_bdc_status_indexes.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"69"} +{"seq_id":"24439676675","text":"\r\n\r\nclass Signal(object):\r\n def __init__(self, id, x_eeg1, x_eeg2, x_emg, y_labels):\r\n self.id = id\r\n self.x_eeg1 = x_eeg1\r\n self.x_eeg2 = x_eeg2\r\n self.x_emg = x_emg\r\n self.y_labels = y_labels\r\n\r\n self.sampling_rate = 128. # Hz\r\n\r\n","repo_name":"elvetian/AML2018","sub_path":"Task5/signal.py","file_name":"signal.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"33902448322","text":"import sqlite3\nimport os\nfrom whoosh.index import create_in\nfrom whoosh.fields import *\nfrom whoosh.index import open_dir\nfrom whoosh.index import EmptyIndexError\n\ndef db_close(conn):#close the db connection\n try:\n conn.close()\n except:\n pass\n \ndef db_connect(dbname, *tables):\n if dbname == '':#if database name is empty then specify one to connect to\n inp = input(\"Please specify a database name or press /N or /n to exit\\n\")#take input in\n if inp.startswith('/n') or inp.startswith('/N'):#exit if either input is given\n exit()\n else:#take in the databasename\n dbname = inp\n conn = sqlite3.connect(dbname)#connect to the databsee\n conn.row_factory = sqlite3.Row#grab rows\n if len(tables) == 0:#if tables is empty then go ahead and feth all data and append tables\n tables = []\n try:\n df = conn.execute('SELECT name FROM sqlite_master WHERE type = \"table\";')#grab everything in database\n data = df.fetchall()\n for row in data:\n for member in row:\n tables.append(member)\n except Exception as e:#if error then either continue or exit\n print(\"Error: \" + str(e))\n inp = input(\"Continue? Yes or no\\n\")\n if 'y' in inp or 'Y' in inp:\n pass\n else:\n exit()\n return tables, conn, dbname#return tables connection and database\n\ndef index(dbnames_schemas = {}, indexdir = 'indexdir'):\n if len(dbnames_schemas) == 0:#if schema isnt define then go ahead and run db connect on empty schema\n tables, conn, dbname = db_connect('')\n schema = ''\n parse_db(tables, conn, indexname=dbname, schema='', indexdir = indexdir)#parse the database table indexing them empty schema\n else:\n for i in dbnames_schemas.keys():#given schema keys run rb connect on given schema\n tables, conn, dbname = db_connect(i)\n parse_db(tables, conn, indexname=i, schema= dbnames_schemas[i], indexdir=indexdir)#parse the database table indexing them given schema\n\n \ndef parse_db(tables, conn, indexname, schema, indexdir):\n try:\n ix = open_dir(indexdir, indexname)#open the index store in ix\n print('\\nIndex: ' + indexname + ' already exists in directory: ' + indexdir + ' Overwrite?. YES or NO')#if exists overwrite or not\n inp = input(\"<> \")\n if 'y' in inp or 'Y' in inp:\n pass\n else:\n return\n except:\n pass\n print('\\n\\nDATABASE: ' + indexname + ' contains ' + str(len(tables)) + ' tables')#print length \n for table in tables:\n print('Indexing table: ' + table + '...')\n t = tuple(table)#grab tubles\n c = conn.cursor()#start cursor\n c.execute('SELECT * FROM ' + table + \";\")#select everything\n rows = c.fetchall()#grab all rows\n if len(rows) != 0:#if not empty\n keys = rows[0].keys()#first row is the keys\n while ('Schema' not in str(type(schema))):#make sure schema i\n if 'con' in str(type(schema)):#str exists in str type schema\n if not schema.strip() is '':#if empty break\n break\n inp = input(\"> You didn't provide a schema. Provide one? Yes or no (Use default schema): \")#ask for schema because it wasnt provided\n s = 'Schema('#grab first row for default schema\n for key in keys:\n s += key.replace('\\n', '').replace('\\r', '').replace('\\t', '').replace(' ', '') + \"=TEXT(stored=True), \"\n s = s.rstrip(\", \")\n s += \")\"\n if inp.strip().startswith('y') or inp.strip().startswith('Y'):#if schema wants to be given then take in schema anything that begins with y\n print('Your database contains the following column names:')\n [print(x + \", \", end='') for x in keys]\n print('''\\n\\nPlease use them to generate a schema of the form: \"Schema(COLUMN_NAME=whoosh.field.type, ...)\"\n e.g. ''' + s)#print example of first row\n schema = input('Please input a schema: ')#continue asking for schema if one is not given\n while not schema or not schema.startswith('Schema') or not schema.endswith(')'):#if not given or doesnt start with Schema continue asking for intput\n schema = input('Please input a schema: ')\n schema.strip()\n else:\n print('Proceeding to use default schema: ' + s)#default schema is chosen\n schema = s\n break\n schema = eval(schema)#evaluate schema\n if not os.path.exists(indexdir):#make directory and create index with schema and index name\n os.mkdir(indexdir)\n ix = create_in(indexdir, schema=schema, indexname=indexname)\n writer = ix.writer()#to write into documents\n docline = ''#empty to begin\n print('Indexing ' + indexname)\n for row in rows:#add documents to docline\n docline = 'writer.add_document('\n for key in keys:#go through all keys(schema descriptions) and place items in the according columns\n val = row[key]\n if \"'\" in val: #needed to remove invalid syntax\n val = escape(val)\n val = val.replace(\"\\t\", \"\").replace(\"\\n\", \"\").replace(\"\\r\", \"\")#grab values and replace \n print(val) \n docline += key.replace('\\n', '').replace('\\r', '').replace('\\t', '').replace(' ', '') + \"=u'\" + val + \"', \"#append key with values and replace\n docline = docline.rstrip(\", \")#remove \", \"\n docline += \")\"#close\n print('...', end='\\r')\n try:\n eval(docline)#evaluate\n except Exception as e:#if error thrown then cancel writing and return\n print ('Error at: ' + docline)\n writer.cancel()\n print('Cancelled indexing due to error: ' + str(e))\n return\n writer.commit()#commit and print total\n print('Total data indexed: ' + str(len(rows)) )\n else:#nothing entered\n print('Database contains no valid rows')\n\ndef escape(s, obj = \"'\"):\n ret = ''\n for x in s:#go through entire string and compare and remove invalid sytax by appending\n if x == obj:\n ret += '\\\\'\n ret += x\n return ret\n \nif __name__ in '__main__':#define the index dir and the db_schemas for our first example (dinosaur) we define the schemas for the rest we can input a \n ix = None#new schema or go with a default one\n indexdir = \"indexdir\"\n dbnames_schemas = {'dinosaur.db' : 'Schema(Name=TEXT(stored=True), Description=TEXT(stored=True), Era=TEXT(stored=True), Url=ID(stored=True), Image=ID(stored=True))',\n 'mmorpg.db' : '', 'superfamicom.db' : ''}\n print('Creating new index: ' + indexdir)\n index(\n dbnames_schemas, indexdir\n )\n","repo_name":"TylerCrabtree/PythonWebBroswer","sub_path":"indexer.py","file_name":"indexer.py","file_ext":"py","file_size_in_byte":6500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"31088014425","text":"import json\nimport os\nimport pickle\n\nimport numpy as np\nimport torch\nimport transformers\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom transformers import AutoModel, AutoTokenizer\n\n# Set environment variables\nos.environ[\"NCCL_DEBUG\"] = \"INFO\"\nos.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n# Load parameters from JSON file\nwith open('path/to/your/parameter.json', 'r') as f:\n param_dict = json.load(f)\n\nmodel_path = param_dict['model_path']\nencoding_path = param_dict['encoding_path']\ndevice = param_dict['gpu_device']\nthreshold = param_dict['threshold']\n\nencode_reverse = pickle.load(open(encoding_path, 'rb'))\nencode_reverse = np.array(list(encode_reverse.values()))\n\nclass MyBert(nn.Module):\n def __init__(self):\n super(MyBert, self).__init__()\n self.pretrained = AutoModel.from_pretrained('xlm-roberta-base')\n self.multilabel_layers = nn.Sequential(nn.Linear(768, 256),\n nn.Mish(),\n nn.BatchNorm1d(256),\n nn.Dropout(0.1),\n nn.Linear(256, 64),\n nn.Mish(),\n nn.BatchNorm1d(64),\n nn.Dropout(0.1),\n nn.Linear(64, len(encode_reverse))\n )\n\n def forward(self, **kwargs):\n s1 = self.pretrained(**kwargs)\n downs_topics = self.multilabel_layers(s1['pooler_output'])\n\n if kwargs.get('output_hidden_states', False):\n return s1['hidden_states']\n elif kwargs.get('output_attentions', False):\n return s1['attentions']\n elif kwargs.get('output_hidden_states', False) and kwargs.get('output_attentions', False):\n return s1['hidden_states'], s1['attentions']\n else:\n return downs_topics\n\ncategory_model = MyBert()\n\n# Load model weights\nloaded_state_dict = torch.load(model_path, map_location=device)\ncategory_model.load_state_dict(loaded_state_dict)\n\ntokenizer = AutoTokenizer.from_pretrained(\"xlm-roberta-base\")\n\nsig_func = nn.Sigmoid().to(device)\ncategory_model.to(device).eval()\n\ndef analyze_text_topics_inhouse(text):\n inputs = tokenizer(\n [text],\n padding='max_length',\n truncation=True,\n max_length=512,\n return_tensors=\"pt\",\n )\n inputs = {k: v.to(device) for k, v in inputs.items()}\n with torch.no_grad():\n pred_topics = category_model(**inputs)\n pred_topics_score = sig_func(pred_topics).detach().cpu().numpy()\n pred_topics = np.where(pred_topics_score > threshold, 1, 0)\n idxli = np.argwhere(pred_topics == 1)[:, 1]\n topics = list(encode_reverse[idxli])\n score_list = list(pred_topics_score[0][idxli])\n topics_text = []\n topics_text.append(dict(\n text=text,\n topics_list=topics,\n score_list=str(score_list)\n ))\n json_object = json.dumps(topics_text)\n print(json_object)\n return json_object\n\ntext = \"I dont care, it's a bad product. I don't want to use it anymore\"\nanalyze_text_topics_inhouse(text)","repo_name":"tychen5/AppReviewAnalysis","sub_path":"TopicsClassification/TopicInferenceAPI.py","file_name":"TopicInferenceAPI.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"22876079208","text":"# coding=utf-8\nfrom ckeditor.fields import RichTextField\nfrom django.db import models\nfrom core.base_model import Common\n\n__author__ = 'alexy'\n\n\nclass About(Common):\n class Meta:\n verbose_name = u'Статья'\n verbose_name_plural = u'Статьи'\n app_label = 'about'\n ordering = ['-created']\n\n def __unicode__(self):\n return self.title\n\n title = models.CharField(verbose_name=u'Заголовок', max_length=150)\n text = RichTextField(verbose_name=u'Текст')\n","repo_name":"od-5/safari","sub_path":"apps/about/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"9400446256","text":"from PIL import Image\n\nfrom image.SVM_image_features import load_and_set as l\n\n# script for augmenting the dataset by fliping horizontally all the images\npath = 'data/balanced_1/train/1/'\n\nd = l.load_directory(path)\nfor name in d:\n img = Image.open(path + name)\n img_flipped = img.transpose(Image.FLIP_LEFT_RIGHT)\n img_flipped.save(path + 'f_' + name)\n","repo_name":"lluccardoner/MediaInterestingness","sub_path":"image/ResNet50/other/flip_img_h.py","file_name":"flip_img_h.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"} +{"seq_id":"5100456924","text":"import os\n\n\n# Get the input path to data file.\ndef getInputPath(fileName):\n inputPath = os.path.join(\"/\", \"Volumes\", \"TOSHIBA EXT\", \"development\", \"dgef-data-science\", \"exercises\",\n \"final_project\",\n \"data\", \"google-stock-price\", fileName)\n # inputPath = os.path.join(os.path.expanduser(\"~\"), \"development\", \"dgef-data-science\",\n # \"exercises\", \"final_project\",\n # \"data\", \"google-stock-price\", fileName)\n\n # Prints the absolute input path to the CSV file.\n print(\"The input file is: \", inputPath)\n\n return inputPath\n\n\n# Get the output path to data file.\ndef getOutputPath(fileName):\n # Output path for the file.\n outputPath = os.path.join(\"/\", \"Volumes\", \"TOSHIBA EXT\", \"development\", \"dgef-data-science\", \"exercises\",\n \"final_project\",\n \"output\", \"google-stock-price\", fileName)\n # outputPath = os.path.join(os.path.expanduser(\"~\"), \"development\", \"dgef-data-science\",\n # \"exercises\", \"final_project\",\n # \"output\", \"google-stock-price\", fileName)\n\n # Prints the absolute output path to the CSV file.\n print(\"The output file is: \", outputPath)\n\n return outputPath\n","repo_name":"neucast/dgef-data-science","sub_path":"exercises/final_project/src/google-stock-price/FileManager.py","file_name":"FileManager.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"33002869283","text":"'''\n列表\ndef get_formatted_ame(first_name,last_name,middle_name=''): #给形参设置一个默认空字符串\n #返回整洁的姓名\n if middle_name:\n full_name = first_name+' '+middle_name+' '+last_name\n else:\n full_name = first_name+' '+last_name\n return full_name.title() #将结果返还给函数调用行\n\nmusican = get_formatted_ame(\"jiem\",\"hendrix\") #将返回的值保存在变量中\nprint(musican)\nmusican = get_formatted_ame(\"jiem\",\"hendrix\",\"lee\") #让实参变得可选\nprint(musican)\n'''\n'''\n#字典\ndef build_person(first_name,last_name,age=''): #使其能够接受可选值\n 返回一个字典,其中包含有关一个人的信息\n person = {'first':first_name,\"last\":last_name} #键值对\n if age:\n person[\"age\"]=age #当函数调用包含这个形参的时候,将值存储到字典中\n return person\n\nmusician = build_person(\"jime\",\"hendrix\",age=12)\nprint(musician)\n'''\ndef get_formatted_ame(first_name,last_name):\n full_name = first_name+ \" \" + last_name\n return full_name.title()\n\nwhile True:\n print(\"\\nPlease tell me your name:\")\n print(\"(enter 'q' at any time to 'quit')\") #提示用户怎么结束填写\n f_name = input(\"First name: \")\n if f_name == \"q\": #输入q的时候循环结束\n break\n l_name = input(\"Last name: \")\n if l_name == \"q\":\n break\n formatted_ame = get_formatted_ame(f_name,l_name) \n print(\"\\nHello, \"+formatted_ame+\"!\")","repo_name":"Zuimengxixia/Web_Selenium","sub_path":"python学习/Chapter_8_function/8-2返回值.py","file_name":"8-2返回值.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"13105622539","text":"import numpy as np\nimport glob\nimport cv2\n\n\n# 원본 이미지\nsrc = cv2.imread('img/1_re.png')\n# cv2.imshow(\"org\",src)\n\n\n\nresize_img = cv2.resize(src, dsize=(0, 0), fx=0.12, fy=0.1, interpolation=cv2.INTER_AREA)\nheight, width = src.shape[:2]\n#가로 ,세로\nmat = np.float32([[1, 0, 0], [0, 1, -15]])\ntran = cv2.warpAffine(resize_img, mat, (width,height))\n\ncv2.namedWindow('tran',cv2.WND_PROP_FULLSCREEN)\ncv2.setWindowProperty('tran', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\ncv2.imshow(\"tran\", tran)\n\ncv2.waitKey(0)\n\n\n","repo_name":"solsolr/nextLevel","sub_path":"nextlevel_files/3projec_0125/3projec/tkinter.py","file_name":"tkinter.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"26254853640","text":"import httpx\n\nfrom nonebot.adapters.cqhttp import MessageSegment\n\n\n# 目前已404,留作备用\ndef img_from_zyg0():\n r = httpx.get('https://api.zyg0.com/api/cos.php', verify=False)\n if r.status_code != 200:\n raise Exception('异常返回码')\n else:\n print(r.text.strip())\n message = MessageSegment.image(r.text.strip())\n return message\n","repo_name":"JustUndertaker/tuanzi_bot","sub_path":"plugins/coser_img/zyg0.py","file_name":"zyg0.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"69"} +{"seq_id":"24425976560","text":"from math import atan2, degrees, radians\n\nfrom inkex import CubicSuperPath, Path, Transform\n\nfrom ..commands import is_command_symbol\nfrom ..i18n import _\nfrom ..svg.path import get_node_transform\nfrom ..svg.tags import (EMBROIDERABLE_TAGS, INKSTITCH_ATTRIBS, SVG_USE_TAG,\n XLINK_HREF)\nfrom ..utils import cache\nfrom .element import EmbroideryElement, param\nfrom .validation import ObjectTypeWarning, ValidationWarning\n\n\nclass CloneWarning(ValidationWarning):\n name = _(\"Clone Object\")\n description = _(\"There are one or more clone objects in this document. \"\n \"Ink/Stitch can work with single clones, but you are limited to set a very few parameters. \")\n steps_to_solve = [\n _(\"If you want to convert the clone into a real element, follow these steps:\"),\n _(\"* Select the clone\"),\n _(\"* Run: Edit > Clone > Unlink Clone (Alt+Shift+D)\")\n ]\n\n\nclass CloneSourceWarning(ObjectTypeWarning):\n name = _(\"Clone is not embroiderable\")\n description = _(\"There are one ore more clone objects in this document. A clone must be a direct child of an embroiderable element. \"\n \"Ink/Stitch cannot embroider clones of groups or other not embroiderable elements (text or image).\")\n steps_to_solve = [\n _(\"Convert the clone into a real element:\"),\n _(\"* Select the clone.\"),\n _(\"* Run: Edit > Clone > Unlink Clone (Alt+Shift+D)\")\n ]\n\n\nclass Clone(EmbroideryElement):\n # A clone embroidery element is linked to an embroiderable element.\n # It will be ignored if the source element is not a direct child of the xlink attribute.\n\n element_name = \"Clone\"\n\n def __init__(self, *args, **kwargs):\n super(Clone, self).__init__(*args, **kwargs)\n\n @property\n @param('clone', _(\"Clone\"), type='toggle', inverse=False, default=True)\n def clone(self):\n return self.get_boolean_param(\"clone\")\n\n @property\n @param('angle',\n _('Custom fill angle'),\n tooltip=_(\"This setting will apply a custom fill angle for the clone.\"),\n unit='deg',\n type='float')\n @cache\n def clone_fill_angle(self):\n return self.get_float_param('angle') or None\n\n @property\n @param('flip_angle',\n _('Flip angle'),\n tooltip=_(\"Flip automatically calucalted angle if it appears to be wrong.\"),\n type='boolean')\n @cache\n def flip_angle(self):\n return self.get_boolean_param('flip_angle')\n\n def get_cache_key_data(self, previous_stitch):\n source_node = get_clone_source(self.node)\n source_elements = self.clone_to_element(source_node)\n return [element.get_cache_key(previous_stitch) for element in source_elements]\n\n def clone_to_element(self, node):\n from .utils import node_to_elements\n return node_to_elements(node, True)\n\n def to_stitch_groups(self, last_patch=None):\n patches = []\n\n source_node = get_clone_source(self.node)\n if source_node.tag not in EMBROIDERABLE_TAGS:\n return []\n\n old_transform = source_node.get('transform', '')\n source_transform = source_node.composed_transform()\n source_path = Path(source_node.get_path()).transform(source_transform)\n transform = Transform(source_node.get('transform', '')) @ -source_transform\n transform @= self.node.composed_transform() @ Transform(source_node.get('transform', ''))\n source_node.set('transform', transform)\n\n old_angle = float(source_node.get(INKSTITCH_ATTRIBS['angle'], 0))\n if self.clone_fill_angle is None:\n rot = transform.add_rotate(-old_angle)\n angle = self._get_rotation(rot, source_node, source_path)\n if angle is not None:\n source_node.set(INKSTITCH_ATTRIBS['angle'], angle)\n else:\n source_node.set(INKSTITCH_ATTRIBS['angle'], self.clone_fill_angle)\n\n elements = self.clone_to_element(source_node)\n for element in elements:\n stitch_groups = element.to_stitch_groups(last_patch)\n patches.extend(stitch_groups)\n\n source_node.set('transform', old_transform)\n source_node.set(INKSTITCH_ATTRIBS['angle'], old_angle)\n return patches\n\n def _get_rotation(self, transform, source_node, source_path):\n try:\n rotation = transform.rotation_degrees()\n except ValueError:\n source_path = CubicSuperPath(source_path)[0]\n clone_path = Path(source_node.get_path()).transform(source_node.composed_transform())\n clone_path = CubicSuperPath(clone_path)[0]\n\n angle_source = atan2(source_path[1][1][1] - source_path[0][1][1], source_path[1][1][0] - source_path[0][1][0])\n angle_clone = atan2(clone_path[1][1][1] - clone_path[0][1][1], clone_path[1][1][0] - clone_path[0][1][0])\n angle_embroidery = radians(-float(source_node.get(INKSTITCH_ATTRIBS['angle'], 0)))\n\n diff = angle_source - angle_embroidery\n rotation = degrees(diff + angle_clone)\n\n if self.flip_angle:\n rotation = -degrees(diff - angle_clone)\n\n return -rotation\n\n def get_clone_style(self, style_name, node, default=None):\n style = node.style[style_name] or default\n return style\n\n def center(self, source_node):\n transform = get_node_transform(self.node.getparent())\n center = self.node.bounding_box(transform).center\n return center\n\n def validation_warnings(self):\n source_node = get_clone_source(self.node)\n if source_node.tag not in EMBROIDERABLE_TAGS:\n point = self.center(source_node)\n yield CloneSourceWarning(point)\n else:\n point = self.center(source_node)\n yield CloneWarning(point)\n\n\ndef is_clone(node):\n if node.tag == SVG_USE_TAG and node.get(XLINK_HREF) and not is_command_symbol(node):\n return True\n return False\n\n\ndef is_embroiderable_clone(node):\n if is_clone(node) and get_clone_source(node).tag in EMBROIDERABLE_TAGS:\n return True\n return False\n\n\ndef get_clone_source(node):\n return node.href\n","repo_name":"inkstitch/inkstitch","sub_path":"lib/elements/clone.py","file_name":"clone.py","file_ext":"py","file_size_in_byte":6173,"program_lang":"python","lang":"en","doc_type":"code","stars":805,"dataset":"github-code","pt":"69"} +{"seq_id":"18165313606","text":"# -*- coding: utf-8 -*-\n\"\"\"A function used to extend the baseline similarity.\"\"\"\nfrom functools import reduce\n\nimport numpy as np\n\n\nclass ExtendSim:\n def __init__(self, top_k):\n \"\"\"\n Args:\n top_k: define the size of most similar neighbor.\n \"\"\"\n self.top_k = top_k\n\n def find_knn_items(self, rdd, BB_items_bd):\n \"\"\"return valid item information.\n arg:\n rdd: (iid, [(iid, sim, mutu, frac_mutu)*])*\n return:\n in the form of `iid, (BB_BB, BB_NB), (NB_BB, NB_NN).\n BB_*/NB_* in the form of `iid, sim, mutu, frac_mutu`.\n use BB_NB as an example where BB define the type of iid,\n while NB define the type that iid connect to.\n \"\"\"\n def get_knn(iter_items):\n for iid, info in iter_items:\n info.sort(key=lambda pair: abs(pair[1]), reverse=True)\n domain_label = iid[-2:]\n if iid in BB_items_bd.value:\n BB_BB = [pair for pair in info\n if domain_label not in pair[0]][: self.top_k]\n BB_NB = [pair for pair in info\n if domain_label in pair[0]][: self.top_k]\n yield iid, (BB_BB, BB_NB), None\n else:\n NB = [pair for pair in info\n if pair[0] in BB_items_bd.value]\n if len(NB) != 0:\n NB_BB = NB[: self.top_k]\n NB_NN = [pair for pair in info\n if pair[0] not in NB][: self.top_k]\n yield iid, None, (NB_BB, NB_NN)\n return rdd.mapPartitions(get_knn)\n\n def sim_extend(self, BB_info, NB_info, knn_BB_bd, knn_NB_bd):\n \"\"\"Extend similarity.\"\"\"\n def combine_BB_withother_in_singledomain(iter_items):\n \"\"\"combine BB item with other items for each domain.\n return:\n NB_NN iid, (BB iid, [NB_NN iid]*)\n \"\"\"\n for iid, (NB_BB, NB_NN) in iter_items:\n \"\"\"\n NB_BB: [(BB iid, sim, mutu, frac_mutu)*]\n NB_NN: [(NN iid, sim, mutu, frac_mutu)*]\n \"\"\"\n for info in NB_BB:\n yield info[0], [(iid, [line[0] for line in NB_NN])]\n\n def extend_BB_source(sourceRDD):\n \"\"\"connect BB item in target domain with items in source domain.\n (BB_target, BB_source), connections\n \"\"\"\n def helper(iter_items):\n for iid, line in iter_items:\n for v in knn_BB_bd.value[iid].keys():\n if \"T:\" in v:\n yield (v, iid), line\n return sourceRDD.mapPartitions(helper)\n\n def extend_BB_target(rdd):\n \"\"\"connect BB item in source domain with item in target domain.\n (BB_target, BB_source), connections\n \"\"\"\n def helper(iter_items):\n for iid, line in iter_items:\n for v in knn_BB_bd.value[iid].keys():\n if \"S:\" in v:\n yield (iid, v), line\n return rdd.mapPartitions(helper)\n\n def calculate_path_confidence(sim_info, mutu_info, frac_mutu):\n \"\"\"calculate the confidence of the path.\"\"\"\n denominator = sum([a * b for a, b in zip(sim_info, mutu_info)])\n numerator = sum(mutu_info)\n s_p = 1.0 * denominator / numerator if numerator else 0.0\n c_p = reduce(lambda a, b: a * b, frac_mutu)\n return s_p, c_p\n\n def get_final_sim(paths):\n final_score = []\n local_db = {}\n knn_BB_iids = knn_BB_bd.value.keys()\n knn_NB_iids = knn_NB_bd.value.keys()\n for path in paths:\n iid_pairs = zip(path[0: len(path) - 1], path[1: len(path)])\n tmp_info = []\n for iid1, iid2 in iid_pairs:\n if (iid1, iid2) not in local_db.keys():\n if iid1 in knn_BB_iids \\\n and iid2 in knn_BB_bd.value[iid1]:\n tmp = knn_BB_bd.value[iid1][iid2]\n elif iid2 in knn_BB_iids \\\n and iid1 in knn_BB_bd.value[iid2]:\n tmp = knn_BB_bd.value[iid2][iid1]\n elif iid1 in knn_NB_iids \\\n and iid2 in knn_NB_bd.value[iid1]:\n tmp = knn_NB_bd.value[iid1][iid2]\n elif iid2 in knn_NB_iids \\\n and iid1 in knn_NB_bd.value[iid2]:\n tmp = knn_NB_bd.value[iid2][iid1]\n local_db.update({(iid1, iid2): tmp})\n tmp_info += [local_db[(iid1, iid2)]]\n sim_info = [l[0] for l in tmp_info]\n mutu_info = [l[1] for l in tmp_info]\n frac_mutu = [l[2] for l in tmp_info]\n final_score.append(\n ((path[0], path[-1]),\n calculate_path_confidence(sim_info, mutu_info, frac_mutu))\n )\n return final_score\n\n def final_nonjoint_extend(nonjoint_BB):\n \"\"\"extend path for items that only linked to BB_target.\n arg:\n nonjoint_BB: (target_iid, source_iid), source_info\n \"\"\"\n def helper(iter_items):\n for iid_pair, source in iter_items:\n \"\"\"iid_pair in the form of (target_iid, source_iid).\n source_path: from BB_target to item in source domain.\n \"\"\"\n source_path = [iid_pair]\n for NB_iid, NN_iids in source:\n source_path += [iid_pair + (NB_iid,)]\n for NN_iid in NN_iids:\n source_path += [iid_pair + (NB_iid, NN_iid)]\n yield get_final_sim(source_path)\n return nonjoint_BB.mapPartitions(helper)\n\n def final_joint_extend(joined_BB):\n \"\"\"extend path for items that have additional items in each domain.\n arg:\n joined_BB: (target_iid, source_iid), (source_info, target_info)\n \"\"\"\n def helper(iter_items):\n for iid_pair, (source, target) in iter_items:\n \"\"\"iid_pair in the form of (target_iid, source_iid).\n source_path: from BB_target to item in source domain.\n target_path: from NB_target to item in source domain.\n longest_path: from NN_target to item in source domain.\n \"\"\"\n source_path = [iid_pair]\n for NB_iid, NN_iids in source:\n source_path += [iid_pair + (NB_iid,)]\n for NN_iid in NN_iids:\n source_path += [iid_pair + (NB_iid, NN_iid)]\n\n for NB_iid, NN_iids in target:\n target_path = []\n longest_path = []\n for p in source_path:\n target_path += [(NB_iid, ) + p]\n for p in target_path:\n for NN_iid in NN_iids:\n longest_path += [(NN_iid, ) + p]\n yield get_final_sim(target_path + longest_path)\n return joined_BB.mapPartitions(helper)\n\n BB_other_intra = NB_info.mapPartitions(\n combine_BB_withother_in_singledomain).reduceByKey(\n lambda a, b: a + b).cache()\n BB_other_intra_source = BB_other_intra.filter(lambda l: \"S:\" in l[0])\n BB_other_intra_target = BB_other_intra.filter(lambda l: \"T:\" in l[0])\n extended_BB_source = extend_BB_source(BB_other_intra_source)\n extended_BB_target = extend_BB_target(BB_other_intra_target)\n joined_extended_BB = extended_BB_source.join(extended_BB_target)\n final_joint_extended = final_joint_extend(joined_extended_BB)\n final_nonjoint_extended = final_nonjoint_extend(extended_BB_source)\n\n return final_joint_extended.union(final_nonjoint_extended)\n\n def get_final_extension(self, cross_extended):\n \"\"\"Deal with the case of multiple path among an item-item pair.\n If item-item pair exists several paths,\n then use s_p and c_p to get the final similarity\n Args:\n cross_extended: in the form of [((iid1, iid2), (s_p, c_p))*]\n Returns:\n xsim: in the form of (iid1, [(iid2, sim)*])\n \"\"\"\n def swap_info(line):\n \"\"\"adjust the position of the information.\"\"\"\n iids, info = line\n return iids[0], [(iids[1], ) + info]\n\n def get_sim(pairs):\n similarity = np.array([pair[0] for pair in pairs])\n certainty = np.array([pair[1] for pair in pairs])\n return 1.0 * similarity.dot(certainty) / np.sum(certainty)\n\n def merge(iter_items):\n for iid, info in iter_items:\n local_db = dict()\n final_sim = []\n for pair in info:\n if pair[0] not in local_db.keys():\n local_db.update({pair[0]: [pair[1:]]})\n else:\n local_db[pair[0]] += [pair[1:]]\n for key in local_db.keys():\n final_sim.append((key, get_sim(local_db[key])))\n yield iid, final_sim\n\n return cross_extended.flatMap(lambda x: x).map(swap_info).reduceByKey(\n lambda a, b: a + b).mapPartitions(merge)\n","repo_name":"LPD-EPFL-ML/X-MAP","sub_path":"code/xmap/core/extender.py","file_name":"extender.py","file_ext":"py","file_size_in_byte":9789,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"73626263580","text":"from django.contrib import admin\nfrom django.utils.html import format_html, urlencode\nfrom django.urls import reverse\nfrom . import models\n\n\n\n@admin.register(models.Product)\nclass ProductAdmin(admin.ModelAdmin):\n list_display = [\n 'title', \n 'slug', \n 'price', \n 'product_image', \n 'description', \n 'inventory', \n 'last_update', \n 'category',\n 'get_jalali_last_update'\n]\n list_per_page = 10\n search_fields = ['title__istartswith']\n\n\n\n@admin.register(models.Category)\nclass CategoryAdmin(admin.ModelAdmin):\n list_display = ['title', 'thumbnail']\n list_display_links = ['thumbnail']\n list_editable = ['title']\n list_per_page = 10\n search_fields = ['title__istartswith']\n\n\n\n@admin.register(models.Customer)\nclass CustomerAdmin(admin.ModelAdmin):\n list_display = ['user', 'user_id', 'birth_date']\n list_select_related = ['user']\n list_per_page = 10\n search_fields = ['first_name__istartswith', 'last_name__istartswith']\n\n def user_id(self, customer):\n url = (\n reverse('admin:core_user_changelist')\n + '?'\n + urlencode(\n {\n 'user__id': str(customer.user.id)\n }\n )\n )\n return format_html('{}', url, customer.user.id)\n \n\n\nclass OrderItemInline(admin.TabularInline):\n model = models.OrderItem\n autocomplete_fields = ['product']\n extra = 0\n min_num = 1\n \n\n\n@admin.register(models.Order)\nclass OrderAdmin(admin.ModelAdmin):\n list_display = ['id', 'ordered_at', 'customer']\n list_select_related = ['customer']\n inlines = [OrderItemInline]","repo_name":"Saeed1401/VendorSpot","sub_path":"Back-End/shop/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"32978003046","text":"# #26 27 283\nfrom typing import List\nclass Array:\n def __init__(self, capacity):\n self.array = [None] * capacity\n self.size = 0\n\n def insert(self, index, data):\n if index < 0 or index > self.size:\n raise IndexError(\"数组越界\")\n if self.size >= len(self.array) or index >= len(self.array):\n self.addcapacity()\n for i in range(self.size - 1, index - 1, -1):\n self.array[i + 1] = self.array[i]\n self.array[index] = data\n self.size += 1\n\n def addcapacity(self):\n new_array = [None] * len(self.array) * 2\n for i in range(self.size):\n new_array[i] = self.array[i]\n self.array = new_array\n\n\n\n def remove(self, index):\n if index < 0 or index > self.size:\n raise Exception('数组越界')\n for i in range(index, self.size):\n self.array[i] = self.array[i + 1]\n self.size -= 1\n\n def output(self):\n # for i in range(self.size):\n print(self.array)\n\n\narray = Array(4)\narray.insert(0, 0)\narray.insert(0, 10)\narray.insert(1, 1)\narray.insert(2, 2)\narray.insert(3, 3)\n\narray.output()\n#\n#\nclass Solution:\n def removeDuplicates(self, nums: List[int]) :\n slow = 0\n fast = 1\n while fast < len(nums):\n if nums[fast] == nums[slow]:\n fast += 1\n else:\n slow += 1\n nums[slow] = nums[fast]\n fast += 1\n return nums,slow\n\naa = Solution()\na =[1, 1, 2, 3, 4]\naa.removeDuplicates(a)\nprint(aa.removeDuplicates(a))\n#\n# from typing import List\n# class Solution:\n# def moveZeroes(self, nums1: List[int]):\n# i = 0\n# for j in range(len(nums1)):\n# if nums1[j] != 0:\n# nums1[i], nums1[j] = nums1[j], nums1[i]\n# i += 1\n# return nums1\nclass Solution:\n def moveZeroes(self, nums: List[int]) -> None:\n slow = 0\n fast = 1\n while fast < len(nums):\n if (nums[fast] - nums[slow]) == nums[fast] and nums[fast] != 0:\n nums[slow], nums[fast] = nums[fast], nums[slow]\n fast += 1\n slow += 1\n elif nums[fast] != nums[slow]:\n fast += 1\n slow += 1\n else:\n\n fast += 1","repo_name":"TouTou-python/data_structure","sub_path":"108(环,数组)/数组.py","file_name":"数组.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"37786722307","text":"import os\r\nimport pandas as pd\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\n\r\ndef get_feature_groups():\r\n # CATEGORICAL\r\n music_text = ['mel_ratio', 'syl_num', 'mel_num']\r\n\r\n # CONTINUOUS\r\n beat = ['beat_quartile1', 'beat_quartile3', 'beat_mode', 'beat_median', 'beat_harmonic', 'beat_iqr', 'beat_variance', 'beat_gmean', 'beat_variation', 'beat_skewness', 'beat_kurtosis', 'beat_max', 'beat_min', 'beat_mean', 'beat_std', 'beat_diff']\r\n pitch = ['PS_quartile1', 'PS_quartile3', 'PS_mode', 'PS_median', 'PS_harmonic', 'PS_iqr', 'PS_variance', 'PS_gmean', 'PS_variation', 'PS_skewness', 'PS_kurtosis', 'note_pitchPS_mean', 'note_pitchPS_std', 'range_PS', 'min_PS', 'max_PS']\r\n interval_up = ['interval_posit_quartile1', 'interval_posit_quartile3', 'interval_posit_mode', 'interval_posit_median', 'interval_posit_harmonic', 'interval_posit_iqr', 'interval_posit_variance', 'interval_posit_gmean', 'interval_posit_variation', 'interval_posit_skewness', 'interval_posit_kurtosis', 'interval_mean_posit', 'interval_std_posit', 'interval_max_posit', 'interval_min_posit', 'interval_diff_posit']\r\n interval_down = ['interval_negat_quartile1', 'interval_negat_quartile3', 'interval_negat_mode', 'interval_negat_median', 'interval_negat_harmonic', 'interval_negat_iqr', 'interval_negat_variance', 'interval_negat_gmean', 'interval_negat_variation', 'interval_negat_skewness', 'interval_negat_kurtosis', 'interval_mean_negat', 'interval_min_negat', 'interval_max_negat', 'interval_diff_negat', 'interval_std_negat']\r\n rhythm = ['rhythm_quartile1', 'rhythm_quartile3', 'rhythm_mode', 'rhythm_median', 'rhythm_harmonic', 'rhythm_iqr', 'rhythm_variance', 'rhythm_gmean', 'rhythm_variation', 'rhythm_skewness', 'rhythm_kurtosis', 'rhythm_mean', 'rhythm_std', 'rhythm_min', 'rhythm_max', 'rhythm_diff']\r\n offset = ['offset_quartile1', 'offset_quartile3', 'offset_mode', 'offset_median', 'offset_harmonic', 'offset_iqr', 'offset_variance', 'offset_gmean', 'offset_variation', 'offset_skewness', 'offset_kurtosis', 'offset_mean', 'offset_std', 'offset_max', 'offset_min', 'offset_diff']\r\n\r\n # DELTAS\r\n beat_delta = ['beat_quartile1_delta', 'beat_quartile3_delta', 'beat_mode_delta', 'beat_median_delta', 'beat_harmonic_delta', 'beat_iqr_delta', 'beat_variance_delta', 'beat_gmean_delta', 'beat_variation_delta', 'beat_skewness_delta', 'beat_kurtosis_delta', 'beat_max_delta', 'beat_min_delta', 'beat_mean_delta', 'beat_std_delta', 'beat_diff_delta']\r\n pitch_delta = ['PS_quartile1_delta', 'PS_quartile3_delta', 'PS_mode_delta', 'PS_median_delta', 'PS_harmonic_delta', 'PS_iqr_delta', 'PS_variance_delta', 'PS_gmean_delta', 'PS_variation_delta', 'PS_skewness_delta', 'PS_kurtosis_delta', 'note_pitchPS_mean_delta', 'note_pitchPS_std_delta', 'range_PS_delta', 'min_PS_delta', 'max_PS_delta']\r\n interval_up_delta = ['interval_posit_quartile1_delta', 'interval_posit_quartile3_delta', 'interval_posit_mode_delta', 'interval_posit_median_delta', 'interval_posit_harmonic_delta', 'interval_posit_iqr_delta', 'interval_posit_variance_delta', 'interval_posit_gmean_delta', 'interval_posit_variation_delta', 'interval_posit_skewness_delta', 'interval_posit_kurtosis_delta', 'interval_mean_posit_delta', 'interval_std_posit_delta', 'interval_max_posit_delta', 'interval_min_posit_delta', 'interval_diff_posit_delta']\r\n interval_down_delta = ['interval_negat_quartile1_delta', 'interval_negat_quartile3_delta', 'interval_negat_mode_delta', 'interval_negat_median_delta', 'interval_negat_harmonic_delta', 'interval_negat_iqr_delta', 'interval_negat_variance_delta', 'interval_negat_gmean_delta', 'interval_negat_variation_delta', 'interval_negat_skewness_delta', 'interval_negat_kurtosis_delta', 'interval_mean_negat_delta', 'interval_min_negat_delta', 'interval_max_negat_delta', 'interval_diff_negat_delta', 'interval_std_negat_delta']\r\n rhythm_delta = ['rhythm_quartile1_delta', 'rhythm_quartile3_delta', 'rhythm_mode_delta', 'rhythm_median_delta', 'rhythm_harmonic_delta', 'rhythm_iqr_delta', 'rhythm_variance_delta', 'rhythm_gmean_delta', 'rhythm_variation_delta', 'rhythm_skewness_delta', 'rhythm_kurtosis_delta', 'rhythm_mean_delta', 'rhythm_std_delta', 'rhythm_min_delta', 'rhythm_max_delta', 'rhythm_diff_delta']\r\n offset_delta = ['offset_quartile1_delta', 'offset_quartile3_delta', 'offset_mode_delta', 'offset_median_delta', 'offset_harmonic_delta', 'offset_iqr_delta', 'offset_variance_delta', 'offset_gmean_delta', 'offset_variation_delta', 'offset_skewness_delta', 'offset_kurtosis_delta', 'offset_mean_delta', 'offset_std_delta', 'offset_max_delta', 'offset_min_delta', 'offset_diff_delta']\r\n\r\n pitch_CON = pitch + pitch_delta\r\n rhythm_CON = rhythm + rhythm_delta\r\n interval_CON = interval_down + interval_up + interval_down_delta + interval_up_delta\r\n attack_CON = beat + offset + beat_delta + offset_delta\r\n\r\n # GROUPS FOR PCA\r\n # vertical dimension (freq)=note; diagonal dimension (frep + time)=interval; horizontal dimension (time)=rhythm\r\n feature_dic = {'note_ALL': pitch_CON, 'interval_ALL': interval_CON + music_text, 'rhythm_ALL': rhythm_CON + attack_CON}\r\n\r\n return feature_dic\r\n\r\n\r\ndef run_PCA(df, principalDf, feature_dic):\r\n y = df.loc[:, ['annotation']].values\r\n\r\n for elem in feature_dic:\r\n x = df.loc[:, feature_dic[elem]].values\r\n x = StandardScaler().fit_transform(x)\r\n pca = PCA(n_components=1)\r\n\r\n principalComponents = pca.fit_transform(x)\r\n print(elem)\r\n print(pca.explained_variance_ratio_)\r\n principalDf.insert(loc=0, column=elem, value=principalComponents.flatten().tolist(), allow_duplicates=True)\r\n\r\n principalDf.insert(loc=0, column='Annotation', value=y)\r\n print(principalDf)\r\n\r\n return principalDf\r\n\r\n\r\ndef set_up(part):\r\n my_dir = os.getcwd()\r\n df = pd.read_csv(my_dir + '/functionals.csv', sep='\\t')\r\n df = df.loc[df['part_name'] == part]\r\n principalDf = pd.DataFrame()\r\n return df, principalDf, my_dir\r\n\r\n\r\nif __name__ == '__main__':\r\n parts = ['all_flat', 'Bass', 'Tenor', 'Quinto', 'Canto', 'Alto']\r\n\r\n for part in parts:\r\n print(part)\r\n df, principalDf, my_dir = set_up(part)\r\n feature_dic = get_feature_groups()\r\n principalDf = run_PCA(df, principalDf, feature_dic)\r\n if os.path.exists(my_dir + '/PCA_' + part + '.csv'):\r\n os.remove(my_dir + '/PCA_' + part + '.csv')\r\n principalDf.to_csv(my_dir + '/PCA_' + part + '.csv', sep=';')\r\n","repo_name":"SEILSdataset/Texture_Recognition","sub_path":"PCA.py","file_name":"PCA.py","file_ext":"py","file_size_in_byte":6612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"15463854204","text":"import os\n# os.mkdir(\"src\")\nfor i in range(1,26):\n\twith open(\"src/day%02d.py\" % (i), 'w') as f:\n\t\tf.write(\n\"\"\"from utils.aocUtils import *\ndef main(input:str):\n\tp1 = 0\n\tp2 = 0\n\treturn (p1, p2)\"\"\"\n\t\t)","repo_name":"Anshuman-UCSB/Advent-Of-Code","sub_path":"2020/python/utils/createTemplate.py","file_name":"createTemplate.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"} +{"seq_id":"4317653468","text":"import argparse\r\nimport pandas as pd\r\nimport numpy as np\r\nimport re\r\nimport nltk\r\nimport joblib\r\nimport datetime\r\n\r\ndef main(hidden_data):\r\n\r\n hidden_data = pd.read_csv(hidden_data, parse_dates=['date'])\r\n hidden_data = feature_engineering_func(hidden_data)\r\n\r\n hidden_data = hidden_data[hidden_data.date.dt.date >= datetime.date(2023, 8, 1)]\r\n dates = hidden_data.date\r\n hours = hidden_data.time\r\n\r\n features = hidden_data.drop(['target', 'date'], axis=1) \r\n\r\n model = joblib.load('pipe.pkl')\r\n \r\n prediction = model.predict(features)\r\n\r\n resampled_prediction = resample_prediction(prediction, hidden_data.index, dates, hours)\r\n resampled_prediction.to_csv('prediction.csv')\r\n return resampled_prediction.to_csv()\r\n\r\ndef create_features(data):\r\n data = data.copy()\r\n data['dayofweek'] = data.date.dt.dayofweek\r\n data['quarter'] = data.date.dt.quarter\r\n data['month'] = data.date.dt.month\r\n data['year'] = data.date.dt.year\r\n data['dayofyear'] = data.date.dt.dayofyear\r\n data['dayofmonth'] = data.date.dt.day\r\n data['weekofyear'] = data.date.dt.isocalendar().week.astype(int)\r\n\r\n # флаг выходного дня подтягивается из внешней таблицы по дате\r\n # загрузим таблицу всех выходных: викенд и праздники\r\n holiday_table = pd.read_csv('holidays_calendar.csv', parse_dates=['date']) \r\n # добавим признак выходного дня\r\n data = data.merge(holiday_table[['date','holiday']], on='date', how='left')\r\n # если в таблице не окажется нужной даты, заполнится наном? если наном, то для lgbm ок, если ошибка - прописать try/except\r\n # data['holiday'] = data['dayofweek'].apply(lambda x: 1 if x in [5, 6] else 0)\r\n data['holiday_type'] = 0\r\n # Определяем индексы строк, где есть праздники:\r\n holiday_indices = data[data['holiday'] == 1].index\r\n # Обновляем значение в столбце 'holiday_type' на 2 для выходных/праздников\r\n data.loc[holiday_indices, 'holiday_type'] = 2\r\n # Создадим маску, чтобы найти места, где 'holiday_type' равно 2 и предыдущее значение равно 0\r\n mask = (data['holiday_type'] == 0) & (data['holiday_type'].shift(-24) == 2)\r\n # Установим предыдущее значение в 1 для отфильтрованных строк\r\n data.loc[mask, 'holiday_type'] = 1\r\n \r\n # час через периодическую функцию косинус\r\n data['cos_time'] = data['time'].apply(lambda x: np.cos(((x +1)/24) * 2 * np.pi))\r\n # неделя через косинус\r\n data['cos_dayofweek'] = data['dayofweek'].apply(lambda x: np.cos(((x)/7) * 2 * np.pi))\r\n# data['sin_dayofweek'] = data['dayofweek'].apply(lambda x: np.sin(((x +1)/7) * 2 * np.pi))\r\n# data['sin_time'] = data['time'].apply(lambda x: np.sin(((x +1)/24) * 2 * np.pi))\r\n\r\n # Ухудшает метрику - флаг темно, восход, светло, закат:\r\n sun_data = pd.read_json('sun_data.json')\r\n sun_data.drop('longitude', axis=1, inplace=True)\r\n sun_data['sunrise'] = pd.to_datetime(sun_data['sunrise'], format='%H:%M')\r\n sun_data['sunset'] = pd.to_datetime(sun_data['sunset'], format='%H:%M')\r\n # Рассчитываем продолжительность светового дня в часах, так как в оригинальном столбце ошибки:\r\n sun_data['daylight'] = (sun_data['sunset'] - sun_data['sunrise']).dt.total_seconds() / 3600\r\n sun_data['sunrise'] = sun_data['sunrise'].dt.strftime('%H.%M').astype(float).round().astype(int)\r\n sun_data['sunset'] = sun_data['sunset'].dt.strftime('%H.%M').astype(float).round().astype(int)\r\n data = data.merge(sun_data, on='date', how='left')\r\n # Задаем функцию для определения времени суток\r\n def determine_time_of_day(row):\r\n if row['time'] == row['sunrise']:\r\n return 0 # Восход\r\n elif row['sunrise'] <= row['time'] < row['sunset']:\r\n return 1 # Светло\r\n elif row['sunset'] == row['time']:\r\n return 2 # Закат\r\n else:\r\n return 3 # Темно\r\n data['time_of_day'] = data.apply(lambda row: determine_time_of_day(row), axis=1)\r\n # Удаляем ненужные столбцы - с ними метрика еще хуже\r\n data.drop(['daylight', 'sunrise', 'sunset'], axis=1, inplace=True) \r\n # дисперсия\r\n data['temp_variance_24h'] = data['temp'].rolling(window=24).var()\r\n\r\n def below_eight_degrees_week(temp_series):\r\n week_temps = temp_series.rolling(window=7*24).mean()\r\n return (week_temps < 8).astype(int)\r\n\r\n data['central_h'] = below_eight_degrees_week(data['temp_pred'])\r\n mask = (data['date'] >= '2019-01-01') & (data['date'] <= '2019-01-07')\r\n data.loc[mask, 'central_h'] = 1\r\n # ухудшают метрику\r\n # время суток\r\n data['morning'] = ((data['time'] >= 6) & (data['time'] < 12)).astype(int)\r\n data['day'] = ((data['time'] >= 12) & (data['time'] < 18)).astype(int)\r\n data['evening'] = ((data['time'] >= 18) & (data['time'] <= 23)).astype(int)\r\n data['night'] = ((data['time'] >= 0) & (data['time'] < 6)).astype(int)\r\n return data\r\n\r\ndef humidity_from_text_weather(dataset):\r\n data = dataset.copy()\r\n # оставляем только цифры, переводим в float \r\n data['humidity'] = data['weather_pred'].fillna('').apply(lambda text: re.sub(r'[^0-9]', ' ', text))\r\n data['humidity'] = pd.to_numeric(data['humidity'], errors='coerce')\r\n data.loc[data['humidity']>100, 'humidity'] = np.nan\r\n return data\r\n\r\ndef preprocess_text(text):\r\n # перевести текст в нижний регистр\r\n text = text.lower()\r\n # заменить ё на е\r\n text.replace('ё', 'е')\r\n # оставляем только буквы \r\n text = re.sub(r'[^а-я]', ' ', text)\r\n # Токенизация \r\n text = nltk.word_tokenize(text)\r\n # Соединяем в текст \r\n text = \" \".join(text)\r\n # осттавить слова, содержащие не менее 3 символов\r\n text = re.sub(r'\\b\\w{1,2}\\b', '', text)\r\n return text\r\n\r\n# векторизация, записываем каждое слово weather_pred как отдельный столбец (наиболее часто встречающиеся слова)\r\ndef vectorize_weather_pred(data_test, count_vectorizer):\r\n data = data_test.copy()\r\n # очистили текст\r\n data['weather_pred'] = data['weather_pred'].fillna('').apply(lambda x: preprocess_text(x))\r\n\r\n # создаст матрицу: столбцы слова, значения бинарные\r\n text_vector = count_vectorizer.transform(data['weather_pred'])\r\n # преобразуем в датафрейм\r\n text_df = pd.DataFrame(text_vector.toarray(), columns=count_vectorizer.get_feature_names_out())\r\n # присодиним к основному датасету\r\n data = pd.concat([data, text_df], axis=1)\r\n # удалим признак weather_pred\r\n data = data.drop('weather_pred', axis=1)\r\n return data\r\n\r\ndef create_lag_rolling(data):\r\n new_data = data.copy()\r\n # лаги \r\n for lag in range(24, 24*7, 2):\r\n new_data['lag_day_'+str(lag)] = new_data['target'].shift(lag)\r\n # лаги погоды\r\n new_data['temp_lag'] = new_data['temp'].shift(24)\r\n # скользящее среднее за предыдущие 7 дней в тот же час\r\n for hour in range(24):\r\n new_data.loc[new_data['time'] == hour, 'rolling'] =\\\r\n new_data.loc[new_data['time'] == hour, 'target'].shift().rolling(7).mean()\r\n \r\n # разница таргета между значением в предыдущий день и его предыдущим часом\r\n new_data['diff_hour'] = new_data['target'].shift(24) - new_data['target'].shift(25)\r\n # разница таргета между этим же часов в два предыдущих дня \r\n new_data['diff_day'] = new_data['target'].shift(24) - new_data['target'].shift(48)\r\n\r\n # ухудшают метрику\r\n # разница таргета между значением в предыдущий день и его предыдущим часом\r\n # new_data['diff_hour'] = new_data['target'].shift(24) - new_data['target'].shift(25)\r\n # разница таргета между этим же часов в два предыдущих дня \r\n # new_data['diff_day'] = new_data['target'].shift(24) - new_data['target'].shift(48)\r\n # разница таргета значением предыдущего дня этого года и предыдущего\r\n # new_data['diff_year'] = new_data['target'].shift(24) - new_data['target'].shift(24*365)\r\n for col in new_data.columns[new_data.isna().any()].tolist():\r\n new_data[col] = new_data[col].fillna(new_data['target'])\r\n new_data = new_data.fillna(99999)\r\n return new_data.drop(['temp', 'weather_fact'], axis=1)\r\n\r\ndef feature_engineering_func(dataset):\r\n df = dataset.copy()\r\n # замена пропусков погодных признаков предыдущим значением\r\n df[['temp_pred', 'weather_pred', 'weather_fact']] = df[['temp_pred', 'weather_pred', 'weather_fact']].ffill()\r\n\r\n # влажность\r\n df = humidity_from_text_weather(df)\r\n\r\n # загрузка count_vectorizer \r\n count_vectorizer = joblib.load('count_vectorizer.pkl')\r\n df = vectorize_weather_pred(df, count_vectorizer)\r\n\r\n # календарные признаки\r\n df = create_features(df)\r\n\r\n # лаговые\r\n df = create_lag_rolling(df)\r\n \r\n return df\r\n\r\ndef resample_prediction(prediction, index, dates, hours):\r\n predicted_frame = pd.DataFrame(prediction, index = index, columns = ['predict'])\r\n predicted_frame['datetime'] = pd.to_datetime(dates) + pd.to_timedelta(hours, unit='h')\r\n predicted_frame = predicted_frame.set_index('datetime')\r\n return predicted_frame\r\n\r\ndef create_parser():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('hidden_data')\r\n\r\n return parser\r\n\r\nif __name__ == \"__main__\":\r\n parser = create_parser()\r\n namespace = parser.parse_args()\r\n #print('start')\r\n main(namespace.hidden_data)\r\n","repo_name":"aminaadzhieva/GlowByte-Time-Series-Competition","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10720,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"26789445408","text":"import pygame\r\nfrom abc import ABCMeta, abstractmethod\r\n\r\nclass Character(object):\r\n __metaclass__=ABCMeta\r\n\r\n def __init__(self,Name,Image,Direction,Point,Clock,Poke_ball):\r\n \"Character name, character map list, map pointer (which foot is next), action delay frames\"\r\n self.Name=Image\r\n self.Image=Image\r\n self.Direction=Direction\r\n self.Point = Point\r\n self.Clock = Clock\r\n self.Poke_ball = Poke_ball\r\n\r\n\r\n @abstractmethod\r\n \r\n \r\n def p(self):\r\n print('p')\r\n","repo_name":"PokeMon-Jc-Y/pokemon","sub_path":"pokemon/c/character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"72113345819","text":"#https://www.hackerrank.com/challenges/drawing-book/problem\n\n#!/bin/python3\n\nimport os\nimport sys\n\n#\n# Complete the pageCount function below.\n#\ndef pageCount(n, p):\n #\n # Write your code here.\n near = min(p , abs(n-p))\n if(near == abs(n-p) and n%2 == 0 and abs(n-p) != p):\n near = near + 1\n \n return(near // 2)\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n p = int(input())\n\n result = pageCount(n, p)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"MSobiya/Hackerrank","sub_path":"DrawingBook.py","file_name":"DrawingBook.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"41107417076","text":"#!/usr/bin/env python\n\nimport glob\nimport os\nimport os.path\nimport sys\n\nif sys.version_info < (3, 4, 0):\n sys.stderr.write(\"ERROR: You need Python 3.4 or later to use mypy.\\n\")\n exit(1)\n\n# This requires setuptools when building; setuptools is not needed\n# when installing from a wheel file (though it is still neeeded for\n# alternative forms of installing, as suggested by README.md).\nfrom setuptools import setup\nfrom setuptools.command.build_py import build_py\nfrom mypy.version import __version__ as version\nfrom mypy import git\n\ngit.verify_git_integrity_or_abort(\".\")\n\ndescription = 'Optional static typing for Python'\nlong_description = '''\nMypy -- Optional Static Typing for Python\n=========================================\n\nAdd type annotations to your Python programs, and use mypy to type\ncheck them. Mypy is essentially a Python linter on steroids, and it\ncan catch many programming errors by analyzing your program, without\nactually having to run it. Mypy has a powerful type system with\nfeatures such as type inference, gradual typing, generics and union\ntypes.\n'''.lstrip()\n\n\ndef find_package_data(base, globs):\n \"\"\"Find all interesting data files, for setup(package_data=)\n\n Arguments:\n root: The directory to search in.\n globs: A list of glob patterns to accept files.\n \"\"\"\n\n rv_dirs = [root for root, dirs, files in os.walk(base)]\n rv = []\n for rv_dir in rv_dirs:\n files = []\n for pat in globs:\n files += glob.glob(os.path.join(rv_dir, pat))\n if not files:\n continue\n rv.extend([f[5:] for f in files])\n return rv\n\n\nclass CustomPythonBuild(build_py):\n def pin_version(self):\n path = os.path.join(self.build_lib, 'mypy')\n self.mkpath(path)\n with open(os.path.join(path, 'version.py'), 'w') as stream:\n stream.write('__version__ = \"{}\"\\n'.format(version))\n\n def run(self):\n self.execute(self.pin_version, ())\n build_py.run(self)\n\n\ncmdclass = {'build_py': CustomPythonBuild}\n\npackage_data = ['py.typed']\n\npackage_data += find_package_data(os.path.join('mypy', 'typeshed'), ['*.py', '*.pyi'])\n\npackage_data += find_package_data(os.path.join('mypy', 'xml'), ['*.xsd', '*.xslt', '*.css'])\n\nUSE_MYPYC = False\n# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH\nif len(sys.argv) > 1 and sys.argv[1] == '--use-mypyc':\n sys.argv.pop(1)\n USE_MYPYC = True\nif os.getenv('MYPY_USE_MYPYC', None) == '1':\n USE_MYPYC = True\n\nif USE_MYPYC:\n MYPYC_BLACKLIST = (\n # Designed to collect things that can't be compiled\n 'mypyc_hacks.py',\n 'interpreted_plugin.py',\n\n # Can't be compiled because they need to be runnable as scripts\n '__main__.py',\n 'sitepkgs.py',\n\n # Can't be compiled because something goes wrong\n 'bogus_type.py',\n 'dmypy.py',\n 'gclogger.py',\n 'main.py',\n 'memprofile.py',\n 'version.py',\n )\n\n everything = find_package_data('mypy', ['*.py'])\n # Start with all the .py files\n all_real_pys = [x for x in everything if not x.startswith('typeshed' + os.sep)]\n # Strip out anything in our blacklist\n mypyc_targets = [x for x in all_real_pys if x not in MYPYC_BLACKLIST]\n # Strip out any test code\n mypyc_targets = [x for x in mypyc_targets if not x.startswith('test' + os.sep)]\n # ... and add back in the one test module we need\n mypyc_targets.append(os.path.join('test', 'visitors.py'))\n\n # Fix the paths to be full\n mypyc_targets = [os.path.join('mypy', x) for x in mypyc_targets]\n\n # This bit is super unfortunate: we want to use the mypy packaged\n # with mypyc. It will arrange for the path to be setup so it can\n # find it, but we've already imported parts, so we remove the\n # modules that we've imported already, which will let the right\n # versions be imported by mypyc.\n del sys.modules['mypy']\n del sys.modules['mypy.version']\n del sys.modules['mypy.git']\n\n from mypyc.build import mypycify, MypycifyBuildExt\n opt_level = os.getenv('MYPYC_OPT_LEVEL', '3')\n ext_modules = mypycify(\n mypyc_targets,\n ['--config-file=mypy_bootstrap.ini'],\n opt_level=opt_level,\n # Use multi-file compliation mode on windows because without it\n # our Appveyor builds run out of memory sometimes.\n multi_file=sys.platform == 'win32',\n )\n cmdclass['build_ext'] = MypycifyBuildExt\n description += \" (mypyc-compiled version)\"\nelse:\n ext_modules = []\n\n\nclassifiers = [\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development',\n]\n\nsetup(name='mypy-mypyc',\n version=version,\n description=description,\n long_description=long_description,\n author='Jukka Lehtosalo',\n author_email='jukka.lehtosalo@iki.fi',\n url='http://www.mypy-lang.org/',\n license='MIT License',\n py_modules=[],\n ext_modules=ext_modules,\n packages=['mypy', 'mypy.test', 'mypy.server', 'mypy.plugins'],\n package_data={'mypy': package_data},\n entry_points={'console_scripts': ['mypy=mypy.__main__:console_entry',\n 'stubgen=mypy.stubgen:main',\n 'dmypy=mypy.dmypy:console_entry',\n ]},\n classifiers=classifiers,\n cmdclass=cmdclass,\n install_requires = ['typed-ast >= 1.4.1, < 1.5.0',\n 'mypy_extensions >= 0.4.0, < 0.5.0',\n ],\n extras_require = {\n ':python_version < \"3.5\"': 'typing >= 3.5.3',\n 'dmypy': 'psutil >= 5.4.0, < 5.5.0; sys_platform!=\"win32\"',\n },\n include_package_data=True,\n )\n","repo_name":"pixelb/mypy-mypyc","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":6019,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"22774870583","text":"import json, math, sys\n\n# {\n # innerRadius: 0,\n # outerRadius: 100,\n # startAngle: 0,\n # endAngle: Math.PI / 2\n# }\n\ndef calAngle(startAngle, endAngle, s1, s2):\n tAngle = (s2 * endAngle + s1 * startAngle) / (s1 + s2)\n return tAngle\n\ndef calRadius(innerRadius, outerRadius, s1, s2):\n tRadius = (s1 * outerRadius**2 + s2 * innerRadius**2) / (s1 + s2)\n return tRadius**0.5\n\ndef divide_area(final, data, innerRadius, outerRadius, startAngle, endAngle, attr, center):\n if len(data) > 1:\n index = len(data) // 2\n values = [p[attr] for p in data]\n s0 = abs(sum(values[:index]) - sum(values[index:]))\n s1 = abs(sum(values[:index-1]) - sum(values[index-1:]))\n s2 = abs(sum(values[:index+1]) - sum(values[index+1:]))\n smin = min([s0, s1, s2])\n while s0 > smin:\n if smin == s1:\n index -= 1\n else:\n index += 1\n s0 = abs(sum(values[:index]) - sum(values[index:]))\n s1 = abs(sum(values[:index-1]) - sum(values[index-1:]))\n s2 = abs(sum(values[:index+1]) - sum(values[index+1:]))\n smin = min([s0, s1, s2])\n if (endAngle - startAngle) * (innerRadius + outerRadius) / 2 > (outerRadius - innerRadius): # 沿角切\n tAngle = calAngle(startAngle, endAngle, sum(values[:index]), sum(values[index:]))\n divide_area(final, data[:index], innerRadius, outerRadius, startAngle, tAngle, attr, center)\n divide_area(final, data[index:], innerRadius, outerRadius, tAngle, endAngle, attr, center)\n else:\n tRadius = calRadius(innerRadius, outerRadius, sum(values[:index]), sum(values[index:]))\n divide_area(final, data[:index], innerRadius, tRadius, startAngle, endAngle, attr, center)\n divide_area(final, data[index:], tRadius, outerRadius, startAngle, endAngle, attr, center)\n else:\n final.append({\n 'type': data[0]['type'],\n attr: data[0][attr],\n 'attr': attr,\n 'center': center,\n \"innerRadius\": innerRadius,\n 'outerRadius': outerRadius,\n 'startAngle': startAngle,\n 'endAngle': endAngle\n })\n pass\n\ndef setColor(final, data):\n attrs = ['PN', 'AA', 'VN', 'TS', 'AG']\n # Colors = {\n # # 'PN': ['#9ecae1', '#6baed6', '#0066cc', '#0033cc', '#0000cc'],\n # 'PN': [''],\n # 'AA': ['#9e9ac8', '#807dba', '#6a51a3'],\n # 'VN': ['#ffcccc', '#ff99cc', '#ff66cc'],\n # 'TS': ['#fee391', '#fec44f', '#fe9929'],\n # 'AG': ['#a1d99b', '#74c476', '#41ab5d']\n # }\n Colors = {\n # 'PN': ['#9ecae1', '#6baed6', '#0066cc', '#0033cc', '#0000cc'],\n 'PN': ['#fc9272', '#fc9272', '#fc9272', '#fc9272', '#fc9272'],\n 'AA': ['#807dba', '#807dba', '#807dba', '#807dba', '#807dba'],\n 'VN': ['#ff99cc', '#ff99cc', '#ff99cc', '#ff99cc', '#ff99cc'],\n 'TS': ['#fec44f', '#fec44f', '#fec44f', '#fec44f', '#fec44f'],\n 'AG': ['#74c476', '#74c476', '#74c476', '#74c476', '#74c476']\n }\n for attr in attrs:\n colors = Colors[attr]\n v1, v2 = min([p[attr] for p in data]), max([p[attr] for p in data])\n number = 5\n d = (v2 - v1) / number\n for p in final:\n if p['attr'] != attr:\n continue\n for i in range(number):\n if v1 + i*d <= p[attr] <= v1 + i*d+d:\n p['color'] = colors[i]\n break\n\n# k, b = (0.37815126050420167, 4.621848739495798)\n\ndef divide_sunburst(final, tree, innerRadius, outerRadius, startAngle, endAngle, dai, center, k, b):#av = 37\n # colors = [\n # '#8dd3c7','#fb8072' ,'#80b1d3' ,\n # '#fdb462' ,'#b3de69' ,'#fccde5' ,'#d9d9d9' ,'#bc80bd' ,\n # '#ccebc5' ,'#ffed6f'\n # ]\n colors = [\n '#2171b5',\n '#4292c6',\n '#6baed6',\n '#9ecae1',\n '#c6dbef',\n '#d0d1e6',\n '#eff3ff'\n ]\n\n final.append({\n 'type': tree['id'],\n 'center': center,\n \"innerRadius\": innerRadius,\n 'outerRadius': outerRadius,\n 'startAngle': startAngle,\n 'endAngle': endAngle,\n 'color': colors[dai]\n })\n if 'children' in tree:\n children_sum_list = []\n for child in tree['children']:\n if 'children' in child:\n children_sum_list.append(len(child['children']))\n else:\n children_sum_list.append(1)\n s = sum(children_sum_list)\n ts = 0\n stepAngle = (endAngle - startAngle) / s\n for i, child in enumerate(tree['children']):\n age = int(child['age']) if 0 < int(child['age']) <= 120 else 37\n new_innerRadius, new_outerRadius = outerRadius, outerRadius + k * age + b\n new_startAngle, new_endAngle = startAngle + ts * stepAngle, startAngle + (ts + children_sum_list[i]) * stepAngle\n ts += children_sum_list[i]\n divide_sunburst(final, child, new_innerRadius, new_outerRadius, new_startAngle, new_endAngle, dai + 1, center, k, b)\n \n\ndef handle(origin, attrs, label, outputData, is_all):\n attrs = ['PN', 'AA', 'VN', 'TS', 'AG']\n data = [{\n 'type': p['type'],\n 'PN': p['attr']['PN'] + 0.0000001,\n 'AA': p['attr']['AA'],\n 'VN': p['attr']['VN'],\n 'TS': p['attr']['TS'],\n 'AG': p['attr']['AG'] + 0.0000001\n } for p in origin if p['label'] == label]\n \n # 计算中心点\n xs, ys = [p['coor'][0] for p in origin if p['label'] == label], [p['coor'][1] for p in origin if p['label'] == label]\n xmin, xmax, ymin, ymax = min(xs), max(xs), min(ys), max(ys)\n center = [(xmin + xmax) / 2, (ymax + ymin) / 2]\n # 分中间的圆\n final = []\n stepAngle = math.pi * 2 / 5\n # R = 30 if is_all else 200 \n R = 30 if is_all else 200 \n\n for i, attr in enumerate(attrs):\n # data.sort(key=lambda d:-d[attr])\n divide_area(final, data[:50], 0, R, stepAngle*i, stepAngle*(i+1), attr, [(xmin + xmax) / 2, (ymax + ymin) / 2] if is_all else [534, 346])\n setColor(final, data[:50])\n # 找到最接近中心点的树\n idd, d = 0, 9999999\n for p in origin:\n if (p['coor'][0] - center[0])**2 + (p['coor'][1] - center[1])**2 < d:\n idd = p['type']\n d = (p['coor'][0] - center[0])**2 + (p['coor'][1] - center[1])**2\n k = 0.3949579831932773 if not is_all else 0.14285714285714285\n b = 2.6050420168067228 if not is_all else 2.857142857142857\n trees = json.load(open('./data/subGraphs_1.json'))\n for tree in trees:\n if int(tree['id']) == idd:#(0.18487394957983194, 2.815126050420168), (0.19747899159663865, 1.3025210084033614), (, )\n age = int(tree['age']) if 0 < int(tree['age']) <= 120 else 37\n divide_sunburst(final, tree, R, R + k * age + b, 0, math.pi * 2, 0, [(xmin + xmax) / 2, (ymax + ymin) / 2] if is_all else [534, 346], k, b)\n break\n for p in final:\n outputData.append(p)\n # outputData.append({\n # 'center': center,\n # 'data': final\n # })\n\n\nif __name__ == '__main__':\n origin = json.load(open('./data/data_by_kmeans.json'))\n attrs = ['PN', 'AA', 'VN', 'TS', 'AG']\n # cluster_number = int(sys.argv[1])\n cluster_number = max([p['label'] for p in origin]) + 1\n outputData = []\n for i in range(cluster_number):\n if i == int(sys.argv[1]) or int(sys.argv[1]) == 666:\n handle(origin, attrs, i, outputData, int(sys.argv[1]) == 666)\n \n with open('./data/data_by_divide.json', 'w', encoding='utf8') as file:\n json.dump(outputData, file)\n","repo_name":"p1967914901/Visual-Analytics-of-Genealogy-with-Attribute-enhanced-Topological-Clustering","sub_path":"service/divide_area.py","file_name":"divide_area.py","file_ext":"py","file_size_in_byte":7622,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"22537246831","text":"from django.contrib import admin\nfrom .models import Appointment, Participant, ScheduleOption, AcceptedOption\n\n\nclass ScheduleOptionInline(admin.StackedInline):\n model = ScheduleOption\n\n\nclass AcceptedOptionInline(admin.StackedInline):\n model = AcceptedOption\n\n\nclass ParticipantAdmin(admin.ModelAdmin):\n list_display = ['name', 'email']\n\n\nclass AppointmentAdmin(admin.ModelAdmin):\n model = Appointment\n list_display = ['text', 'owner', 'invited']\n filter_vertical = ['participants']\n inlines = [ScheduleOptionInline]\n\n def invited(self, obj):\n return ', '.join([p.name for p in obj.participants.all()])\n\n\nadmin.site.register(Participant, ParticipantAdmin)\nadmin.site.register(Appointment, AppointmentAdmin)\n\n","repo_name":"adlh/example_api_appointments","sub_path":"api/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"30188448892","text":"import sys\n_module = sys.modules[__name__]\ndel sys\nsrc = _module\nearlystopping = _module\nlayers = _module\nmetric = _module\nmodels = _module\nnormalization = _module\nsample = _module\ntrain_new = _module\nutils = _module\n\nfrom _paritybench_helpers import _mock_config, patch_functional\nfrom unittest.mock import mock_open, MagicMock\nfrom torch.autograd import Function\nfrom torch.nn import Module\nimport abc, collections, copy, enum, functools, inspect, itertools, logging, math, matplotlib, numbers, numpy, pandas, queue, random, re, scipy, sklearn, string, tensorflow, time, torch, torchaudio, torchtext, torchvision, types, typing, uuid, warnings\nimport numpy as np\nfrom torch import Tensor\npatch_functional()\nopen = mock_open()\nyaml = logging = sys = argparse = MagicMock()\nArgumentParser = argparse.ArgumentParser\n_global_config = args = argv = cfg = config = params = _mock_config()\nargparse.ArgumentParser.return_value.parse_args.return_value = _global_config\nyaml.load.return_value = _global_config\nsys.argv = _global_config\n__version__ = '1.0.0'\nxrange = range\nwraps = functools.wraps\n\n\nimport numpy as np\n\n\nimport torch\n\n\nimport random\n\n\nimport string\n\n\nimport math\n\n\nfrom torch.nn.parameter import Parameter\n\n\nfrom torch.nn.modules.module import Module\n\n\nfrom torch import nn\n\n\nimport torch.nn.functional as F\n\n\nimport scipy.sparse as sp\n\n\nimport torch.nn as nn\n\n\nimport time\n\n\nimport torch.optim as optim\n\n\nclass GraphConvolutionBS(Module):\n \"\"\"\n GCN Layer with BN, Self-loop and Res connection.\n \"\"\"\n\n def __init__(self, in_features, out_features, activation=lambda x: x, withbn=True, withloop=True, bias=True, res=False):\n \"\"\"\n Initial function.\n :param in_features: the input feature dimension.\n :param out_features: the output feature dimension.\n :param activation: the activation function.\n :param withbn: using batch normalization.\n :param withloop: using self feature modeling.\n :param bias: enable bias.\n :param res: enable res connections.\n \"\"\"\n super(GraphConvolutionBS, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.sigma = activation\n self.res = res\n self.weight = Parameter(torch.FloatTensor(in_features, out_features))\n if withloop:\n self.self_weight = Parameter(torch.FloatTensor(in_features, out_features))\n else:\n self.register_parameter('self_weight', None)\n if withbn:\n self.bn = torch.nn.BatchNorm1d(out_features)\n else:\n self.register_parameter('bn', None)\n if bias:\n self.bias = Parameter(torch.FloatTensor(out_features))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1.0 / math.sqrt(self.weight.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.self_weight is not None:\n stdv = 1.0 / math.sqrt(self.self_weight.size(1))\n self.self_weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)\n\n def forward(self, input, adj):\n support = torch.mm(input, self.weight)\n output = torch.spmm(adj, support)\n if self.self_weight is not None:\n output = output + torch.mm(input, self.self_weight)\n if self.bias is not None:\n output = output + self.bias\n if self.bn is not None:\n output = self.bn(output)\n if self.res:\n return self.sigma(output) + input\n else:\n return self.sigma(output)\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'\n\n\nclass GraphBaseBlock(Module):\n \"\"\"\n The base block for Multi-layer GCN / ResGCN / Dense GCN \n \"\"\"\n\n def __init__(self, in_features, out_features, nbaselayer, withbn=True, withloop=True, activation=F.relu, dropout=True, aggrmethod='concat', dense=False):\n \"\"\"\n The base block for constructing DeepGCN model.\n :param in_features: the input feature dimension.\n :param out_features: the hidden feature dimension.\n :param nbaselayer: the number of layers in the base block.\n :param withbn: using batch normalization in graph convolution.\n :param withloop: using self feature modeling in graph convolution.\n :param activation: the activation function, default is ReLu.\n :param dropout: the dropout ratio.\n :param aggrmethod: the aggregation function for baseblock, can be \"concat\" and \"add\". For \"resgcn\", the default\n is \"add\", for others the default is \"concat\".\n :param dense: enable dense connection\n \"\"\"\n super(GraphBaseBlock, self).__init__()\n self.in_features = in_features\n self.hiddendim = out_features\n self.nhiddenlayer = nbaselayer\n self.activation = activation\n self.aggrmethod = aggrmethod\n self.dense = dense\n self.dropout = dropout\n self.withbn = withbn\n self.withloop = withloop\n self.hiddenlayers = nn.ModuleList()\n self.__makehidden()\n if self.aggrmethod == 'concat' and dense == False:\n self.out_features = in_features + out_features\n elif self.aggrmethod == 'concat' and dense == True:\n self.out_features = in_features + out_features * nbaselayer\n elif self.aggrmethod == 'add':\n if in_features != self.hiddendim:\n raise RuntimeError('The dimension of in_features and hiddendim should be matched in add model.')\n self.out_features = out_features\n elif self.aggrmethod == 'nores':\n self.out_features = out_features\n else:\n raise NotImplementedError(\"The aggregation method only support 'concat','add' and 'nores'.\")\n\n def __makehidden(self):\n for i in range(self.nhiddenlayer):\n if i == 0:\n layer = GraphConvolutionBS(self.in_features, self.hiddendim, self.activation, self.withbn, self.withloop)\n else:\n layer = GraphConvolutionBS(self.hiddendim, self.hiddendim, self.activation, self.withbn, self.withloop)\n self.hiddenlayers.append(layer)\n\n def _doconcat(self, x, subx):\n if x is None:\n return subx\n if self.aggrmethod == 'concat':\n return torch.cat((x, subx), 1)\n elif self.aggrmethod == 'add':\n return x + subx\n elif self.aggrmethod == 'nores':\n return x\n\n def forward(self, input, adj):\n x = input\n denseout = None\n for gc in self.hiddenlayers:\n denseout = self._doconcat(denseout, x)\n x = gc(x, adj)\n x = F.dropout(x, self.dropout, training=self.training)\n if not self.dense:\n return self._doconcat(x, input)\n return self._doconcat(x, denseout)\n\n def get_outdim(self):\n return self.out_features\n\n def __repr__(self):\n return '%s %s (%d - [%d:%d] > %d)' % (self.__class__.__name__, self.aggrmethod, self.in_features, self.hiddendim, self.nhiddenlayer, self.out_features)\n\n\nclass MultiLayerGCNBlock(Module):\n \"\"\"\n Muti-Layer GCN with same hidden dimension.\n \"\"\"\n\n def __init__(self, in_features, out_features, nbaselayer, withbn=True, withloop=True, activation=F.relu, dropout=True, aggrmethod=None, dense=None):\n \"\"\"\n The multiple layer GCN block.\n :param in_features: the input feature dimension.\n :param out_features: the hidden feature dimension.\n :param nbaselayer: the number of layers in the base block.\n :param withbn: using batch normalization in graph convolution.\n :param withloop: using self feature modeling in graph convolution.\n :param activation: the activation function, default is ReLu.\n :param dropout: the dropout ratio.\n :param aggrmethod: not applied.\n :param dense: not applied.\n \"\"\"\n super(MultiLayerGCNBlock, self).__init__()\n self.model = GraphBaseBlock(in_features=in_features, out_features=out_features, nbaselayer=nbaselayer, withbn=withbn, withloop=withloop, activation=activation, dropout=dropout, dense=False, aggrmethod='nores')\n\n def forward(self, input, adj):\n return self.model.forward(input, adj)\n\n def get_outdim(self):\n return self.model.get_outdim()\n\n def __repr__(self):\n return '%s %s (%d - [%d:%d] > %d)' % (self.__class__.__name__, self.aggrmethod, self.model.in_features, self.model.hiddendim, self.model.nhiddenlayer, self.model.out_features)\n\n\nclass ResGCNBlock(Module):\n \"\"\"\n The multiple layer GCN with residual connection block.\n \"\"\"\n\n def __init__(self, in_features, out_features, nbaselayer, withbn=True, withloop=True, activation=F.relu, dropout=True, aggrmethod=None, dense=None):\n \"\"\"\n The multiple layer GCN with residual connection block.\n :param in_features: the input feature dimension.\n :param out_features: the hidden feature dimension.\n :param nbaselayer: the number of layers in the base block.\n :param withbn: using batch normalization in graph convolution.\n :param withloop: using self feature modeling in graph convolution.\n :param activation: the activation function, default is ReLu.\n :param dropout: the dropout ratio.\n :param aggrmethod: not applied.\n :param dense: not applied.\n \"\"\"\n super(ResGCNBlock, self).__init__()\n self.model = GraphBaseBlock(in_features=in_features, out_features=out_features, nbaselayer=nbaselayer, withbn=withbn, withloop=withloop, activation=activation, dropout=dropout, dense=False, aggrmethod='add')\n\n def forward(self, input, adj):\n return self.model.forward(input, adj)\n\n def get_outdim(self):\n return self.model.get_outdim()\n\n def __repr__(self):\n return '%s %s (%d - [%d:%d] > %d)' % (self.__class__.__name__, self.aggrmethod, self.model.in_features, self.model.hiddendim, self.model.nhiddenlayer, self.model.out_features)\n\n\nclass DenseGCNBlock(Module):\n \"\"\"\n The multiple layer GCN with dense connection block.\n \"\"\"\n\n def __init__(self, in_features, out_features, nbaselayer, withbn=True, withloop=True, activation=F.relu, dropout=True, aggrmethod='concat', dense=True):\n \"\"\"\n The multiple layer GCN with dense connection block.\n :param in_features: the input feature dimension.\n :param out_features: the hidden feature dimension.\n :param nbaselayer: the number of layers in the base block.\n :param withbn: using batch normalization in graph convolution.\n :param withloop: using self feature modeling in graph convolution.\n :param activation: the activation function, default is ReLu.\n :param dropout: the dropout ratio.\n :param aggrmethod: the aggregation function for the output. For denseblock, default is \"concat\".\n :param dense: default is True, cannot be changed.\n \"\"\"\n super(DenseGCNBlock, self).__init__()\n self.model = GraphBaseBlock(in_features=in_features, out_features=out_features, nbaselayer=nbaselayer, withbn=withbn, withloop=withloop, activation=activation, dropout=dropout, dense=True, aggrmethod=aggrmethod)\n\n def forward(self, input, adj):\n return self.model.forward(input, adj)\n\n def get_outdim(self):\n return self.model.get_outdim()\n\n def __repr__(self):\n return '%s %s (%d - [%d:%d] > %d)' % (self.__class__.__name__, self.aggrmethod, self.model.in_features, self.model.hiddendim, self.model.nhiddenlayer, self.model.out_features)\n\n\nclass InecptionGCNBlock(Module):\n \"\"\"\n The multiple layer GCN with inception connection block.\n \"\"\"\n\n def __init__(self, in_features, out_features, nbaselayer, withbn=True, withloop=True, activation=F.relu, dropout=True, aggrmethod='concat', dense=False):\n \"\"\"\n The multiple layer GCN with inception connection block.\n :param in_features: the input feature dimension.\n :param out_features: the hidden feature dimension.\n :param nbaselayer: the number of layers in the base block.\n :param withbn: using batch normalization in graph convolution.\n :param withloop: using self feature modeling in graph convolution.\n :param activation: the activation function, default is ReLu.\n :param dropout: the dropout ratio.\n :param aggrmethod: the aggregation function for baseblock, can be \"concat\" and \"add\". For \"resgcn\", the default\n is \"add\", for others the default is \"concat\".\n :param dense: not applied. The default is False, cannot be changed.\n \"\"\"\n super(InecptionGCNBlock, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.hiddendim = out_features\n self.nbaselayer = nbaselayer\n self.activation = activation\n self.aggrmethod = aggrmethod\n self.dropout = dropout\n self.withbn = withbn\n self.withloop = withloop\n self.midlayers = nn.ModuleList()\n self.__makehidden()\n if self.aggrmethod == 'concat':\n self.out_features = in_features + out_features * nbaselayer\n elif self.aggrmethod == 'add':\n if in_features != self.hiddendim:\n raise RuntimeError(\"The dimension of in_features and hiddendim should be matched in 'add' model.\")\n self.out_features = out_features\n else:\n raise NotImplementedError(\"The aggregation method only support 'concat', 'add'.\")\n\n def __makehidden(self):\n for j in range(self.nbaselayer):\n reslayer = nn.ModuleList()\n for i in range(j + 1):\n if i == 0:\n layer = GraphConvolutionBS(self.in_features, self.hiddendim, self.activation, self.withbn, self.withloop)\n else:\n layer = GraphConvolutionBS(self.hiddendim, self.hiddendim, self.activation, self.withbn, self.withloop)\n reslayer.append(layer)\n self.midlayers.append(reslayer)\n\n def forward(self, input, adj):\n x = input\n for reslayer in self.midlayers:\n subx = input\n for gc in reslayer:\n subx = gc(subx, adj)\n subx = F.dropout(subx, self.dropout, training=self.training)\n x = self._doconcat(x, subx)\n return x\n\n def get_outdim(self):\n return self.out_features\n\n def _doconcat(self, x, subx):\n if self.aggrmethod == 'concat':\n return torch.cat((x, subx), 1)\n elif self.aggrmethod == 'add':\n return x + subx\n\n def __repr__(self):\n return '%s %s (%d - [%d:%d] > %d)' % (self.__class__.__name__, self.aggrmethod, self.in_features, self.hiddendim, self.nbaselayer, self.out_features)\n\n\nclass Dense(Module):\n \"\"\"\n Simple Dense layer, Do not consider adj.\n \"\"\"\n\n def __init__(self, in_features, out_features, activation=lambda x: x, bias=True, res=False):\n super(Dense, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.sigma = activation\n self.weight = Parameter(torch.FloatTensor(in_features, out_features))\n self.res = res\n self.bn = nn.BatchNorm1d(out_features)\n if bias:\n self.bias = Parameter(torch.FloatTensor(out_features))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1.0 / math.sqrt(self.weight.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)\n\n def forward(self, input, adj):\n output = torch.mm(input, self.weight)\n if self.bias is not None:\n output = output + self.bias\n output = self.bn(output)\n return self.sigma(output)\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'\n\n\ndevice = torch.device('cuda:0')\n\n\nclass GCNModel(nn.Module):\n \"\"\"\n The model for the single kind of deepgcn blocks.\n\n The model architecture likes:\n inputlayer(nfeat)--block(nbaselayer, nhid)--...--outputlayer(nclass)--softmax(nclass)\n |------ nhidlayer ----|\n The total layer is nhidlayer*nbaselayer + 2.\n All options are configurable.\n \"\"\"\n\n def __init__(self, nfeat, nhid, nclass, nhidlayer, dropout, baseblock='mutigcn', inputlayer='gcn', outputlayer='gcn', nbaselayer=0, activation=lambda x: x, withbn=True, withloop=True, aggrmethod='add', mixmode=False):\n \"\"\"\n Initial function.\n :param nfeat: the input feature dimension.\n :param nhid: the hidden feature dimension.\n :param nclass: the output feature dimension.\n :param nhidlayer: the number of hidden blocks.\n :param dropout: the dropout ratio.\n :param baseblock: the baseblock type, can be \"mutigcn\", \"resgcn\", \"densegcn\" and \"inceptiongcn\".\n :param inputlayer: the input layer type, can be \"gcn\", \"dense\", \"none\".\n :param outputlayer: the input layer type, can be \"gcn\", \"dense\".\n :param nbaselayer: the number of layers in one hidden block.\n :param activation: the activation function, default is ReLu.\n :param withbn: using batch normalization in graph convolution.\n :param withloop: using self feature modeling in graph convolution.\n :param aggrmethod: the aggregation function for baseblock, can be \"concat\" and \"add\". For \"resgcn\", the default\n is \"add\", for others the default is \"concat\".\n :param mixmode: enable cpu-gpu mix mode. If true, put the inputlayer to cpu.\n \"\"\"\n super(GCNModel, self).__init__()\n self.mixmode = mixmode\n self.dropout = dropout\n if baseblock == 'resgcn':\n self.BASEBLOCK = ResGCNBlock\n elif baseblock == 'densegcn':\n self.BASEBLOCK = DenseGCNBlock\n elif baseblock == 'mutigcn':\n self.BASEBLOCK = MultiLayerGCNBlock\n elif baseblock == 'inceptiongcn':\n self.BASEBLOCK = InecptionGCNBlock\n else:\n raise NotImplementedError('Current baseblock %s is not supported.' % baseblock)\n if inputlayer == 'gcn':\n self.ingc = GraphConvolutionBS(nfeat, nhid, activation, withbn, withloop)\n baseblockinput = nhid\n elif inputlayer == 'none':\n self.ingc = lambda x: x\n baseblockinput = nfeat\n else:\n self.ingc = Dense(nfeat, nhid, activation)\n baseblockinput = nhid\n outactivation = lambda x: x\n if outputlayer == 'gcn':\n self.outgc = GraphConvolutionBS(baseblockinput, nclass, outactivation, withbn, withloop)\n else:\n self.outgc = Dense(nhid, nclass, activation)\n self.midlayer = nn.ModuleList()\n for i in range(nhidlayer):\n gcb = self.BASEBLOCK(in_features=baseblockinput, out_features=nhid, nbaselayer=nbaselayer, withbn=withbn, withloop=withloop, activation=activation, dropout=dropout, dense=False, aggrmethod=aggrmethod)\n self.midlayer.append(gcb)\n baseblockinput = gcb.get_outdim()\n outactivation = lambda x: x\n self.outgc = GraphConvolutionBS(baseblockinput, nclass, outactivation, withbn, withloop)\n self.reset_parameters()\n if mixmode:\n self.midlayer = self.midlayer\n self.outgc = self.outgc\n\n def reset_parameters(self):\n pass\n\n def forward(self, fea, adj):\n if self.mixmode:\n x = self.ingc(fea, adj.cpu())\n else:\n x = self.ingc(fea, adj)\n x = F.dropout(x, self.dropout, training=self.training)\n if self.mixmode:\n x = x\n for i in range(len(self.midlayer)):\n midgc = self.midlayer[i]\n x = midgc(x, adj)\n x = self.outgc(x, adj)\n x = F.log_softmax(x, dim=1)\n return x\n\n\nclass GCNFlatRes(nn.Module):\n \"\"\"\n (Legacy)\n \"\"\"\n\n def __init__(self, nfeat, nhid, nclass, withbn, nreslayer, dropout, mixmode=False):\n super(GCNFlatRes, self).__init__()\n self.nreslayer = nreslayer\n self.dropout = dropout\n self.ingc = GraphConvolution(nfeat, nhid, F.relu)\n self.reslayer = GCFlatResBlock(nhid, nclass, nhid, nreslayer, dropout)\n self.reset_parameters()\n\n def reset_parameters(self):\n pass\n\n def forward(self, input, adj):\n x = self.ingc(input, adj)\n x = F.dropout(x, self.dropout, training=self.training)\n x = self.reslayer(x, adj)\n return F.log_softmax(x, dim=1)\n\n\nimport torch\nfrom torch.nn import MSELoss, ReLU\nfrom _paritybench_helpers import _mock_config, _mock_layer, _paritybench_base, _fails_compile\n\n\nTESTCASES = [\n # (nn.Module, init_args, forward_args, jit_compiles)\n (Dense,\n lambda: ([], {'in_features': 4, 'out_features': 4}),\n lambda: ([torch.rand([4, 4]), torch.rand([4, 4])], {}),\n False),\n (DenseGCNBlock,\n lambda: ([], {'in_features': 4, 'out_features': 4, 'nbaselayer': 1}),\n lambda: ([torch.rand([4, 4]), torch.rand([4, 4])], {}),\n False),\n (GCNModel,\n lambda: ([], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'nhidlayer': 1, 'dropout': 0.5}),\n lambda: ([torch.rand([4, 4]), torch.rand([4, 4])], {}),\n False),\n (GraphBaseBlock,\n lambda: ([], {'in_features': 4, 'out_features': 4, 'nbaselayer': 1}),\n lambda: ([torch.rand([4, 4]), torch.rand([4, 4])], {}),\n False),\n (GraphConvolutionBS,\n lambda: ([], {'in_features': 4, 'out_features': 4}),\n lambda: ([torch.rand([4, 4]), torch.rand([4, 4])], {}),\n False),\n (InecptionGCNBlock,\n lambda: ([], {'in_features': 4, 'out_features': 4, 'nbaselayer': 1}),\n lambda: ([torch.rand([4, 4]), torch.rand([4, 4])], {}),\n False),\n (MultiLayerGCNBlock,\n lambda: ([], {'in_features': 4, 'out_features': 4, 'nbaselayer': 1}),\n lambda: ([torch.rand([4, 4]), torch.rand([4, 4])], {}),\n False),\n (ResGCNBlock,\n lambda: ([], {'in_features': 4, 'out_features': 4, 'nbaselayer': 1}),\n lambda: ([torch.rand([4, 4]), torch.rand([4, 4])], {}),\n False),\n]\n\nclass Test_DropEdge_DropEdge(_paritybench_base):\n def test_000(self):\n self._check(*TESTCASES[0])\n\n def test_001(self):\n self._check(*TESTCASES[1])\n\n def test_002(self):\n self._check(*TESTCASES[2])\n\n def test_003(self):\n self._check(*TESTCASES[3])\n\n def test_004(self):\n self._check(*TESTCASES[4])\n\n def test_005(self):\n self._check(*TESTCASES[5])\n\n def test_006(self):\n self._check(*TESTCASES[6])\n\n def test_007(self):\n self._check(*TESTCASES[7])\n\n","repo_name":"eladhoffer/pytorch-jit-paritybench","sub_path":"generated/test_DropEdge_DropEdge.py","file_name":"test_DropEdge_DropEdge.py","file_ext":"py","file_size_in_byte":23050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"} +{"seq_id":"42203019158","text":"import io\nimport numpy as np\nfrom src.Normalizer import minMaxNormalize\nfrom src.NeuralNetwork import NeuralNetwork\nfrom src.Dataset import openDataset, selectDataset, splitDataset\n\nif __name__ == \"__main__\":\n # * Open and read dataset\n print(\"Loading dataset...\")\n dataset_path = selectDataset(\"datasets/data.csv\")\n df = openDataset(dataset_path)\n # * Normalize data\n normalized = minMaxNormalize(df.loc[:, df.columns > 0])\n xPredict = normalized.loc[:, normalized.columns > 1].to_numpy()\n yPredict = normalized[1].to_numpy(dtype=\"uint8\")\n # * Get saved weights and network size\n print(\"Loading saved weights...\")\n try:\n with open('weights.res') as file:\n lines = file.readlines()\n if len(lines) != 3:\n print(\"Invalid saved weights !\")\n exit(1)\n size = [int(value) for value in lines[0].split(',')]\n # Load weights and biases as separate files\n # Remove final separator and newline before split\n weights = []\n current_layer = []\n for layer in lines[1][:-2].split(','):\n weights_file = io.StringIO(layer)\n row_weights = np.loadtxt(weights_file)\n if len(current_layer) > 0 and current_layer[0].shape != row_weights.shape:\n weights.append(np.asarray(current_layer))\n current_layer = []\n current_layer.append(row_weights)\n weights.append(np.asarray(current_layer))\n # Bias\n biases = []\n current_layer = []\n for layer in lines[2][:-2].split(','):\n biases_file = io.StringIO(layer)\n row_biases = np.loadtxt(biases_file)\n if len(current_layer) > 0 and current_layer[0].shape != row_biases.shape:\n biases.append(np.asarray(current_layer))\n current_layer = []\n current_layer.append(row_biases)\n biases.append(np.asarray(current_layer))\n except ValueError:\n print(\"Invalid value in saved weights !\")\n exit(1)\n except IOError:\n print('No weights saved, use train.py first !')\n exit(1)\n # * Initialize neural network\n print(\"Initializing neural network...\")\n try:\n network = NeuralNetwork(size=size)\n network.weights = weights\n network.biases = biases\n network.accuracy(xPredict, yPredict)\n except ValueError:\n print(\"Invalid value or dimensions in saved weights !\")\n exit(1)\n","repo_name":"Glagan/42-multilayer-perceptron","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"12316669931","text":"# # _*_coding:utf-8 _*_\n# # @Time   : 2020/1/5 22:59\n# # @Author  : zimo\n# # @File  :qcc_qy_info.py\n# # @Software :PyCharm\n# # @Theme :获取高匿IP列表,该网站获取的port已加密\n\n\nfrom get_user_agent import *\nimport requests\nfrom lxml import etree\n\ndef get_proxy_lst():\n user_agent=get_user_agent()\n proxy_lst=[]\n url='http://www.data5u.com/'\n headers={\n \"Accept-Encoding\":\"gzip, deflate\",\n \"User-Agent\":user_agent\n }\n res=requests.get(url,headers=headers)\n # res.encoding='utf-8'\n html=etree.HTML(res.text)\n proxy_info=html.xpath('//li[@style=\"text-align:center;\"]/ul[@class]')\n for p in proxy_info[1:]:\n http=p.xpath('./span/li/text()')[3]\n ip=p.xpath('./span/li/text()')[0]\n port=p.xpath('./span/li/text()')[1]\n tmp_lst={http:http+\"://\"+ip+\":\"+port}\n proxy_lst.append(tmp_lst)\n print(proxy_lst)\n return proxy_lst\n\nif __name__ == '__main__':\n proxy_lst=get_proxy_lst()\n print(proxy_lst)","repo_name":"shanggeshihun/crawl","sub_path":"高匿IP测试/get_proxy_lst.py","file_name":"get_proxy_lst.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"29401376404","text":"'''\nAuthor: b1b2b3b4b5b6 a1439458305@163.com\nDate: 2022-09-13 16:23:32\nLastEditors: b1b2b3b4b5b6 a1439458305@163.com\nLastEditTime: 2022-09-20 14:13:56\nFilePath: /leetcode/python/132.分割回文串-ii.py\nDescription: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE\n'''\n#\n# @lc app=leetcode.cn id=132 lang=python3\n#\n# [132] 分割回文串 II\n#\n# https://leetcode.cn/problems/palindrome-partitioning-ii/description/\n#\n# algorithms\n# Hard (49.74%)\n# Likes: 617\n# Dislikes: 0\n# Total Accepted: 66.3K\n# Total Submissions: 133.5K\n# Testcase Example: '\"aab\"'\n#\n# 给你一个字符串 s,请你将 s 分割成一些子串,使每个子串都是回文。\n#\n# 返回符合要求的 最少分割次数 。\n#\n#\n#\n#\n#\n# 示例 1:\n#\n#\n# 输入:s = \"aab\"\n# 输出:1\n# 解释:只需一次分割就可将 s 分割成 [\"aa\",\"b\"] 这样两个回文子串。\n#\n#\n# 示例 2:\n#\n#\n# 输入:s = \"a\"\n# 输出:0\n#\n#\n# 示例 3:\n#\n#\n# 输入:s = \"ab\"\n# 输出:1\n#\n#\n#\n#\n# 提示:\n#\n#\n# 1\n# s 仅由小写英文字母组成\n#\n#\n#\n#\n#\n\n# @lc code=start\n\n'''\n关键字\n动态规划\n\n状态: dp(i) s[0:i]的最少分割次数\n'''\n\n\nclass Solution:\n def minCut(self, s: str) -> int:\n\n bak = {}\n\n def dp(i, j):\n\n if i > j:\n return True\n\n if (i, j) in bak:\n return bak[(i, j)]\n\n res = False\n if s[i] == s[j] and dp(i+1, j - 1) == True:\n res = True\n bak[(i, j)] = res\n\n return res\n\n for j in range(len(s)):\n for i in range(j + 1):\n dp(i, j)\n\n mem = {}\n mem[0] = 0\n for i in range(1, len(s)+1):\n\n temp_res = float('INF')\n for k in range(i):\n if bak[(k, i-1)] == True:\n temp_res = min(temp_res, mem[k] + 1)\n mem[i] = temp_res\n\n return mem[len(s)] - 1\n # @lc code=end\n","repo_name":"b1b2b3b4b5b6/algorithm","sub_path":"python/132.分割回文串-ii.py","file_name":"132.分割回文串-ii.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"4377132240","text":"import numpy as np\nfrom collections import Counter\nimport matplotlib\nmatplotlib.use('Agg') \nimport matplotlib.pyplot as plt\n\ndef p(l,n,x):\n c = 0.0\n tot=0\n for i in range(len(l)):\n tot+=n[i]\n if(l[i]>=x):\n c+=n[i]\n return (c/tot)*100\n\nf1 = open(\"../slack/data/perl_all\",\"r\")\n#f2 = open(\"loadv2_all\",\"r\")\n#f3 = open(\"loadv3\",\"r\")\n\nslack1 = []\nnRef=[]\nfor line in f1:\n #print(line)\n slack1.append(float(line.split()[3]))\n nRef.append(float(line.split()[4]))\n\n\n\n'''\nslack3 = []\nfor line in f3:\n slack3.append(float(line.split()[1]))\n'''\n\nx=[y for y in range(0,1000)]\ny1 = [p(slack1,nRef, i) for i in x]\n\n\n#y2 = [p(slack2, i) for i in x]\n#y3 = [p(slack3, i) for i in x]\n\nplt.plot(x,y1,label='perl',color='red')\n#plt.plot(x,y3,label='v3',color='orange')\nplt.legend()\nplt.xlabel('Slack in Cycles')\nplt.ylabel('Percentage Dynamic Instructions with atleast x Cycles slack')\nplt.savefig('perl_all_dynamic.png')","repo_name":"ashutosh1807/BTP-traces","sub_path":"graph/dynamic_slack.py","file_name":"dynamic_slack.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"73789801511","text":"#!/usr/bin/env python\n# coding: utf-8\n\n#!/usr/bin/env python\n# coding: utf-8\n\nimport sys\nimport struct\n\ndef line_out(line, w):\n l = line.split()\n # s = []\n b = \"\"\n for a in l:\n b = _print(a)\n w.write(b)\n w.write('\\n')\n\ndef file_out(f, w):\n for line in f:\n line_out(line, w)\n\ndef _print(a):\n if (a.isdigit()):\n i = print_int(int(a))\n #s.append(int(a))\n elif (a[0] == '-' and a[1:].isdigit()):\n i = print_neg_int(int(a))\n #s.append(int(a))\n else:\n i = print_float(float(a))\n #s.append(float(a))\n return \"{:0>32b}\".format(i)\n\ndef print_int(x):\n return x\ndef print_neg_int(x):\n z = (1 << 32) + x\n return z\ndef print_float(x):\n y = struct.pack(' shark.size:\r\n continue\r\n\r\n if not check[ni][nj] == -1:\r\n continue\r\n\r\n check[ni][nj] = check[shark_i][shark_j]+1\r\n q.append((ni,nj))\r\n\r\n if 0 < MAP[ni][nj] < shark.size and check[ni][nj]:\r\n\r\n if temp_moved >= check[ni][nj]:\r\n temp_moved = check[ni][nj]\r\n can_eat.append((ni, nj, MAP[ni][nj],check[ni][nj]))\r\n\r\n if len(can_eat) == 0:\r\n break\r\n\r\n # print(can_eat)\r\n\r\n i_distance = -100000\r\n j_distance = 100000\r\n\r\n chosen_fish=[]\r\n\r\n for fish in can_eat:\r\n if shark.i - fish[0] > i_distance:\r\n i_distance = shark.i - fish[0]\r\n j_distance = shark.j - fish[1]\r\n chosen_fish = fish\r\n\r\n elif shark.i - fish[0] == i_distance:\r\n if shark.j - fish[1] > j_distance:\r\n i_distance = shark.i - fish[0]\r\n j_distance = shark.j - fish[1]\r\n chosen_fish = fish\r\n\r\n shark.i = chosen_fish[0]\r\n shark.j = chosen_fish[1]\r\n shark.eatten += 1\r\n ans += chosen_fish[3]\r\n if shark.eatten == shark.size:\r\n shark.size += 1\r\n shark.eatten = 0\r\n\r\n\r\n\r\n\r\n\r\nprint(ans)\r\n","repo_name":"jinyoungcho/Algorithm_study","sub_path":"PYTHON_CODE/16236.py3.py","file_name":"16236.py3.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"736193123","text":"def turn(temperatures):\n pass\n # 여기에 코드를 작성합니다.\n maximum_list = []\n minimum_list = []\n for i in temperatures:\n maximum_list.append(i[0])\n minimum_list.append(i[1])\n temperatures = {\"maximum\":maximum_list, \"minimum\":minimum_list}\n return temperatures\n\n\n# 아래의 코드는 수정하지 않습니다.\nif __name__ == '__main__':\n temperatures = [\n [9, 3],\n [9, 0],\n [11, -3],\n [11, 1],\n [8, -3],\n [7, -3],\n [-4, -12]\n ]\n print(turn(temperatures)) \n # {\n # 'maximum': [9, 9, 11, 11, 8, 7, -4], \n # 'minimum': [3, 0, -3, 1, -3, -3, -12]\n # }\n","repo_name":"moonthree/TIL","sub_path":"practice/07_evaluation/problem04.py","file_name":"problem04.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"5959139978","text":"import random\nimport string\nimport logging\nimport time\nfrom starlette.requests import Request\n\nlog = logging.getLogger(\"uvicorn.errors\")\n\nasync def log_requests(request: Request, call_next):\n \"\"\"log_requests.\n :param request:\n :type request: Request\n :param call_next:\n \"\"\"\n idem = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))\n log.info(\n f\"RID={idem} REGION={request.headers.get('cf-ipcountry')} CLIENT_IP={request.headers.get('cf-connecting-ip')} START REQUEST PATH={request.url.path} METHOD={request.method} \"\n )\n start_time = time.time()\n response = await call_next(request)\n\n process_time = (time.time() - start_time) * 1000\n formatted_process_time = '{0:.2f}'.format(process_time)\n log.info(\n f\"RID={idem} COMPLETED={formatted_process_time}ms REQUEST={request.method.upper()} {request.url.path} STATUS_CODE={response.status_code}\"\n )\n\n return response","repo_name":"ronen-albagli/image-converter","sub_path":"app/middleware/http_logger.py","file_name":"http_logger.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"93858690","text":"\"\"\"Module to read and write atoms in cif file format.\n\nSee http://www.iucr.org/resources/cif/spec/version1.1/cifsyntax for a\ndescription of the file format. STAR extensions as save frames,\nglobal blocks, nested loops and multi-data values are not supported.\n\"\"\"\n\nimport shlex\nimport re\n\nimport numpy as np\n\nfrom ase.parallel import paropen\nfrom ase.lattice.spacegroup import crystal\nfrom ase.lattice.spacegroup.spacegroup import spacegroup_from_data\n\n\n\ndef unread_line(fileobj):\n \"\"\"Unread the last line read from *fileobj*.\"\"\"\n while True:\n if fileobj.tell() == 0:\n break\n fileobj.seek(-2, 1)\n if fileobj.read(1) in ('\\n', '\\r'):\n break\n \n\ndef convert_value(value):\n \"\"\"Convert CIF value string to corresponding python type.\"\"\"\n value = value.strip()\n if re.match('(\".*\")|(\\'.*\\')$', value):\n return value[1:-1]\n elif re.match(r'[+-]?\\d+$', value):\n return int(value)\n elif re.match(r'[+-]?(?:\\d+(?:\\.\\d*)?|\\.\\d+)(?:[eE][+-]?\\d+)?$', value):\n return float(value)\n elif re.match(r'[+-]?(?:\\d+(?:\\.\\d*)?|\\.\\d+)(?:[eE][+-]?\\d+)?\\(\\d+\\)$', \n value):\n return float(value[:value.index('(')]) # strip off uncertainties\n else:\n return value\n\n\ndef parse_singletag(fileobj, line):\n \"\"\"Parse a CIF tag (entries starting with underscore). Returns\n a key-value pair.\"\"\"\n kv = line.split(None, 1)\n if len(kv) == 1: # _key\n key = line\n line = fileobj.readline().strip()\n while line == '':\n line = fileobj.readline().strip()\n if line == ';': # multiline string \n lines = [line[1:].lstrip()]\n while True:\n line = fileobj.readline().strip()\n if line == ';':\n break\n lines.append(line)\n value = '\\n'.join(lines).strip()\n else: # value on next line\n value = line\n else: # _key, value\n key, value = kv\n return key, convert_value(value)\n\n\ndef parse_loop(fileobj):\n \"\"\"Parse a CIF loop. Returns a dict with column tag names as keys\n and a lists of the column content as values.\"\"\"\n header = []\n line = fileobj.readline().strip()\n while line.startswith('_'):\n header.append(line.lower())\n line = fileobj.readline().strip()\n columns = dict([(h, []) for h in header])\n while True:\n lowerline = line.lower()\n if (not line or \n line.startswith('_') or \n lowerline.startswith('data_') or \n lowerline.startswith('loop_')):\n break\n if line.startswith('#'):\n line = fileobj.readline().strip()\n continue\n tokens = shlex.split(line)\n for h, t in zip(header, tokens):\n columns[h].append(convert_value(t))\n line = fileobj.readline().strip()\n if line:\n unread_line(fileobj)\n return columns\n\n\ndef parse_items(fileobj, line):\n \"\"\"Parse a CIF data items and return a dict with all tags.\"\"\"\n tags = {}\n while True:\n line = fileobj.readline()\n if not line:\n break\n line = line.strip()\n lowerline = line.lower()\n if not line or line.startswith('#'):\n continue\n elif line.startswith('_'):\n key, value = parse_singletag(fileobj, line)\n tags[key.lower()] = value\n elif lowerline.startswith('loop_'):\n tags.update(parse_loop(fileobj))\n elif lowerline.startswith('data_'):\n unread_line(fileobj)\n break\n else:\n raise ValueError('%s: Unexpected CIF file entry: %s'%(\n fileobj.name, line))\n return tags\n\n\ndef parse_block(fileobj, line):\n \"\"\"Parse a CIF data block and return a tuple with the block name\n and a dict with all tags.\"\"\"\n assert line.lower().startswith('data_')\n blockname = line.split('_', 1)[1].rstrip()\n tags = parse_items(fileobj, line)\n return blockname, tags\n\n\ndef parse_cif(fileobj):\n \"\"\"Parse a CIF file. Returns a list of blockname and tag\n pairs. All tag names are converted to lower case.\"\"\"\n if isinstance(fileobj, basestring):\n fileobj = open(fileobj)\n\n blocks = []\n while True:\n line = fileobj.readline()\n if not line:\n break\n line = line.strip()\n if not line or line.startswith('#'):\n continue\n blocks.append(parse_block(fileobj, line))\n return blocks\n\n\ndef tags2atoms(tags, **kwargs):\n \"\"\"Returns an Atoms object from a cif tags dictionary.\"\"\"\n a = tags['_cell_length_a']\n b = tags['_cell_length_b']\n c = tags['_cell_length_c']\n alpha = tags['_cell_angle_alpha']\n beta = tags['_cell_angle_beta']\n gamma = tags['_cell_angle_gamma']\n\n scaled_positions = np.array([tags['_atom_site_fract_x'], \n tags['_atom_site_fract_y'], \n tags['_atom_site_fract_z']]).T\n\n symbols = []\n if '_atom_site_type_symbol' in tags:\n labels = tags['_atom_site_type_symbol']\n else:\n labels = tags['_atom_site_label']\n for s in labels:\n # Strip off additional labeling on chemical symbols\n m = re.search(r'([A-Z][a-z]?)', s) \n symbol = m.group(0)\n symbols.append(symbol)\n\n # Symmetry specification, see\n # http://www.iucr.org/resources/cif/dictionaries/cif_sym for a\n # complete list of official keys. In addition we also try to\n # support some commonly used depricated notations\n no = None\n if '_space_group.IT_number' in tags:\n no = tags['_space_group.IT_number']\n elif '_symmetry_int_tables_number' in tags:\n no = tags['_symmetry_int_tables_number']\n\n symbolHM = None\n if '_space_group.Patterson_name_H-M' in tags:\n symbolHM = tags['_space_group.Patterson_name_H-M']\n elif '_symmetry_space_group_name_H-M' in tags:\n symbolsHM = tags['_symmetry_space_group_name_H-M']\n\n sitesym = None\n if '_space_group_symop.operation_xyz' in tags:\n sitesym = tags['_space_group_symop.operation_xyz']\n elif '_symmetry_equiv_pos_as_xyz' in tags:\n sitesym = tags['_symmetry_equiv_pos_as_xyz']\n \n spacegroup = 1\n if sitesym is not None:\n spacegroup = spacegroup_from_data(no=no, symbol=symbolHM,\n sitesym=sitesym)\n elif no is not None:\n spacegroup = no\n elif symbolHM is not None:\n spacegroup = symbolHM\n else:\n spacegroup = 1\n\n atoms = crystal(symbols, basis=scaled_positions, \n cellpar=[a, b, c, alpha, beta, gamma],\n spacegroup=spacegroup, **kwargs)\n return atoms\n \n\ndef read_cif(fileobj, index=-1, **kwargs):\n \"\"\"Read Atoms object from CIF file. *index* specifies the data\n block number or name (if string) to return. Keyword arguments are\n passed on to ase.lattice.spacegroup.crystal().\"\"\"\n blocks = parse_cif(fileobj)\n if isinstance(index, str):\n tags = dict(blocks)[index]\n return tags2atoms(tags, **kwargs)\n elif isinstance(index, int):\n name, tags = blocks[index]\n return tags2atoms(tags, **kwargs)\n else:\n return [tags2atoms(tags) for name, tags in blocks[index]]\n\n\n\ndef write_cif(fileobj, images):\n \"\"\"Write *images* to CIF file.\"\"\"\n if isinstance(fileobj, str):\n fileobj = paropen(fileobj, 'w')\n\n if not isinstance(images, (list, tuple)):\n images = [images]\n\n for i, atoms in enumerate(images):\n fileobj.write('data_image%d\\n' % i)\n\n from numpy import arccos, pi, dot\n from numpy.linalg import norm\n\n cell = atoms.cell\n a = norm(cell[0])\n b = norm(cell[1])\n c = norm(cell[2])\n alpha = arccos(dot(cell[1], cell[2])/(b*c))*180./pi\n beta = arccos(dot(cell[0], cell[2])/(a*c))*180./pi\n gamma = arccos(dot(cell[0], cell[1])/(a*b))*180./pi\n\n fileobj.write('_cell_length_a %g\\n' % a)\n fileobj.write('_cell_length_b %g\\n' % b)\n fileobj.write('_cell_length_c %g\\n' % c)\n fileobj.write('_cell_angle_alpha %g\\n' % alpha)\n fileobj.write('_cell_angle_beta %g\\n' % beta)\n fileobj.write('_cell_angle_gamma %g\\n' % gamma)\n fileobj.write('\\n')\n\n if atoms.pbc.all():\n fileobj.write('_symmetry_space_group_name_H-M %s\\n' % 'P 1')\n fileobj.write('_symmetry_int_tables_number %d\\n' % 1)\n fileobj.write('\\n')\n\n fileobj.write('loop_\\n')\n fileobj.write(' _symmetry_equiv_pos_as_xyz\\n')\n fileobj.write(\" 'x, y, z'\\n\")\n fileobj.write('\\n')\n\n fileobj.write('loop_\\n')\n fileobj.write(' _atom_site_label\\n')\n fileobj.write(' _atom_site_occupancy\\n')\n fileobj.write(' _atom_site_fract_x\\n')\n fileobj.write(' _atom_site_fract_y\\n')\n fileobj.write(' _atom_site_fract_z\\n')\n fileobj.write(' _atom_site_thermal_displace_type\\n')\n fileobj.write(' _atom_site_B_iso_or_equiv\\n')\n fileobj.write(' _atom_site_type_symbol\\n')\n\n scaled = atoms.get_scaled_positions()\n no = {}\n for i, atom in enumerate(atoms):\n symbol = atom.symbol\n if symbol in no:\n no[symbol] += 1\n else:\n no[symbol] = 1\n fileobj.write(\n ' %-8s %6.4f %7.5f %7.5f %7.5f %4s %6.3f %s\\n'%(\n '%s%d' % (symbol, no[symbol]), \n 1.0, \n scaled[i][0], \n scaled[i][1], \n scaled[i][2],\n 'Biso',\n 1.0,\n symbol))\n\n \n","repo_name":"slabanja/ase","sub_path":"ase/io/cif.py","file_name":"cif.py","file_ext":"py","file_size_in_byte":9777,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"19657340990","text":"import getopt, os, sys\n\ndef usage() :\n\tprint >> sys.stderr, \"Parameters:\"\n\tprint >> sys.stderr, \"-d --domain Planning Domain\"\n\tprint >> sys.stderr, \"-i --instance Planning Instance\"\n\tprint >> sys.stderr, \"-Z --zipped-problem Zipped Planning domain and instance\"\n\tprint >> sys.stderr, \"-h --help Get Help\"\n\tprint >> sys.stderr, \"-t --max-time