diff --git "a/2475.jsonl" "b/2475.jsonl" new file mode 100644--- /dev/null +++ "b/2475.jsonl" @@ -0,0 +1,1990 @@ +{"seq_id":"30598046542","text":"import math as m\r\ndef isprime(num):\r\n s=int(m.sqrt(num))\r\n for i in range(2,s+1):\r\n if num==1:\r\n return 0\r\n if num%i==0:\r\n return 0\r\n return 1 \r\ndef findprime(n,data):\r\n c=[]\r\n np=[]\r\n for i in data:\r\n if isprime(i):\r\n c.append(i)\r\n else:\r\n np.append(i)\r\n return c,np\r\n \r\n\r\n\r\n\r\n\r\n\r\nn=int(input())\r\ndata=list(map(int,input().split()))\r\nprime=findprime(n,data)\r\nprint(*prime)\r\n","repo_name":"gowthamkhandavalli/becomecoder__python","sub_path":"prime num using list.py","file_name":"prime num using list.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36289005160","text":"from typing import Tuple, List, Dict\n\nfrom bs4 import NavigableString, Comment\n\n\n# Input: bs4 Tag \n# Output: dict of all fields in the table-row entry\n# Each section here is a within the passed-in \ndef extract_row_data(tr) -> Dict:\n data = {}\n\n # Course\n x, y, z = course(tr)\n data['subject-code'] = x\n data['course-num'] = y\n data['entry-type'] = z\n\n # Title\n x, y, z = title(tr)\n data['title'] = x\n data['designation'] = y\n data['sub-type'] = z\n\n # Class#\n x = class_number(tr)\n data['class-num'] = x\n\n # Instructor\n x = instructor(tr)\n data['instructor'] = x\n\n # Days\n x = days(tr)\n data['days'] = x\n\n # Start\n x = start(tr)\n data['start'] = x\n\n # End\n x = end(tr)\n data['end'] = x\n\n # Location\n x = location(tr)\n data['location'] = x\n\n # Dates\n x, y = dates(tr)\n data['dates'] = x\n data['session-code'] = y\n\n # Units\n x = units(tr)\n data['units'] = x\n\n # Seats Open\n x, y, z = seats_open(tr)\n data['open_seats'] = x\n data['total_seats'] = y\n data['seat_status'] = z\n\n # GS\n x = general_studies(tr)\n data['general_studies'] = x\n\n return data\n\n\n# Input: bs4 Tag \n# Output: (subject code, course num, entry type)\ndef course(tr) -> Tuple[str, str, str]:\n td = tr.find('td', {'class': 'subjectNumberColumnValue'}) # Data cell\n course_span = td.find('span') # Span containing text\n\n # No text is present -> SubEntry\n if not course_span:\n return '', '', 'Sub'\n\n # If text found, split code and number then indicate MainEntry\n text = course_span.text.strip()\n subject_code, course_num = text.split()\n return subject_code, course_num, 'Main'\n\n\n# Input: bs4 Tag \n# Output: (title, designation, sub-type)\ndef title(tr) -> Tuple[str, str, str]:\n td = tr.find('td', {'class': 'titleColumnValue'}) # Data cell\n\n # Optional fields defaulted to empty\n designation = ''\n subtype = ''\n\n # Get title from [div class-results-drawer] or [a course-details-link]\n title_div = td.find('div', {'class': 'class-results-drawer'})\n if title_div:\n title = ' '.join(title_div.text.split())\n else:\n title_a = td.find('a', {'class': 'course-details-link'})\n title = title_a.text.strip()\n\n # Get designation if present\n designation_container = td.find('span', {'class': 'lab-designation'})\n if designation_container:\n comptip = designation_container.find('span', {'class': 'comptip'})\n designation = comptip.text.strip()\n\n # Get subtype if present\n cohort_message = td.find('div', {'id': 'cohortMessage'})\n if cohort_message:\n subtype = cohort_message.find(text=True, recursive=False).strip()\n\n return title, designation, subtype\n\n\n# Input: bs4 Tag \n# Output: class# (str)\ndef class_number(tr) -> str:\n td = tr.find('td', {'class': 'classNbrColumnValue'}) # Data cell\n course_number_link = td.find('a')\n return course_number_link.text.strip()\n\n\n# Input: bs4 Tag \n# Output: instructor string list\ndef instructor(tr) -> List[str]:\n td = tr.find('td', {'class': 'instructorListColumnValue'}) # Data cell\n\n # Get span and list of instructor spans contained inside\n list_span = td.find('span', recursive=False)\n list_items = list_span.find_all('span', recursive=False)\n\n instructors = []\n\n # Go through all instructor spans in list\n for index, item in enumerate(list_items):\n instructor = item.text.strip()\n\n # Remove commas on all but last instructor, add to list\n if index < len(list_items) - 1:\n instructor = instructor[:-1]\n instructors.append(instructor)\n\n return instructors\n\n\n# Input: bs4 Tag \n# Output: days as list of strings, each slot of the list refers to what session\n# of the course the day is for.\ndef days(tr) -> List[str]:\n td = tr.find('td', {'class': 'dayListColumnValue'}) # Data cell\n\n days = []\n\n # For all listed items, each string goes into a list slot\n for child in td.children:\n if type(child) == NavigableString:\n days.append(str(child).strip())\n\n return days\n\n\n# Input: bs4 Tag \n# Output: start times as list of strings, each slot of the list refers to what\n# session of the course the start time is for\ndef start(tr) -> List[str]:\n td = tr.find('td', {'class': 'startTimeDateColumnValue'}) # Data cell\n\n starts = []\n comment = False # Used in tracking comment that messes up formatting\n\n # For all listed items, each string goes into a list slot\n for child in td.children:\n\n # If comment found, raise flag\n if type(child) == Comment:\n comment = True\n\n # If text found after comment, add to starts\n elif type(child) == NavigableString and comment:\n starts.append(str(child).strip())\n\n return starts\n\n\n# Input: bs4 Tag \n# Output: end times as list of strings, each slot of the list refers to what\n# session of the course the end time is for\ndef end(tr) -> List[str]:\n td = tr.find('td', {'class': 'endTimeDateColumnValue'}) # Data cell\n\n ends = []\n\n # For all listed items, each string goes into a list slot\n for child in td.children:\n if type(child) == NavigableString:\n ends.append(str(child).strip())\n\n return ends\n\n\n# Input: bs4 Tag \n# Output: locations as list of strings, each slot of the list refers to what\n# session of the course the location is for\ndef location(tr) -> List[str]:\n td = tr.find('td', {'class': 'locationBuildingColumnValue'}) # Data cell\n\n locations = []\n\n for item in td.children:\n # Either spacing in page or potential location string\n if type(item) == NavigableString:\n text = str(item).strip()\n if text:\n locations.append(text)\n\n # Tag, either formatting
or location in or \n else:\n tag_name = item.name\n if tag_name == 'a':\n locations.append(item.text.strip())\n elif tag_name == 'span':\n # Corrects for blank spans used for cohort_message formatting\n span_text = item.text.strip()\n if span_text:\n locations.append(span_text)\n\n return locations\n\n\n# Input: bs4 Tag \n# Output: date ranges as list of strings, each slot of the list being a\n# 'session' of the class. Session code string 'A'/'B'/'C'/'DYN'\ndef dates(tr) -> Tuple[List[str], str]:\n td = tr.find('td', {'class': 'startDateColumnValue'}) # Data cell\n dates_link = td.find('a', {'class': 'deadlinetip'}) # Link of dates list\n\n dates = []\n\n # Take all date strings in the link and put the in the dates list\n for child in dates_link:\n if type(child) == NavigableString:\n dates.append(str(child).strip())\n\n # Session code is on the last string\n # ['08/22 - 12/06', '09/30 - 09/30' , '10/18 - 10/18', '11/06 - 11/06(C)']\n left_paren = dates[-1].find('(') # 13\n session = dates[-1][left_paren + 1:-1] # session = C\n dates[-1] = dates[-1][:left_paren] # dates[-1] = '11/06 - 11/06'\n\n return dates, session\n\n\n# Input: bs4 Tag \n# Output: string 'X' or 'X-Y'\ndef units(tr):\n td = tr.find('td', {'class': 'hoursColumnValue'}) # Data cell\n return td.text.strip()\n\n\n# Input: bs4 Tag \n# Output: (open seats, total seats, status)\ndef seats_open(tr) -> Tuple[str, str, str]:\n td = tr.find('td', {'class': 'availableSeatsColumnValue'}) # Data cell\n\n # Get columns making up format [X][of][Y][STATUS]\n seats_row = td.find('div', {'class': 'row'}, recursive=False)\n columns = seats_row.find_all('div', {'class': 'col-xs-3'}, recursive=False)\n\n # Get open seats and total seats\n open_seats = columns[0].text.strip()\n total_seats = columns[2].text.strip()\n\n # Get classes of status icon\n icon = columns[3].find('i', {'class': 'fa'})\n icon_classes = icon.attrs['class']\n\n # Circle -> Open\n if 'fa-circle' in icon_classes:\n status = 'Open'\n\n # !Triangle -> Reserved\n elif 'fa-exclamation-triangle' in icon_classes:\n status = 'Reserved'\n\n # X -> None\n elif 'fa-times' in icon_classes:\n status = 'None'\n\n # If icon couldn't be found -> unknown status '?'\n else:\n status = '?'\n\n return open_seats, total_seats, status\n\n\n# Given , return string of possible general studies codes\ndef general_studies(tr):\n td = tr.find('td', {'class': 'tooltipRqDesDescrColumnValue'}) # Data cell\n return td.text.strip()\n","repo_name":"damionmounts/asu-course-search","sub_path":"source/table_row.py","file_name":"table_row.py","file_ext":"py","file_size_in_byte":8556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36659159875","text":"import pandas as pd\nimport numpy as np\nimport json\nfrom scipy import stats\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\n\ndef remove_outliers(df):\n complete_time = df[[\"id\", \"rt\"]].groupby(\"id\").agg(\"mean\")\n total_mean = np.mean(complete_time.values)\n total_std = np.std(complete_time.values)\n\n outliers = []\n for elem in complete_time.itertuples():\n if np.abs(elem.rt - total_mean) > 2 * total_std:\n outliers.append(elem.Index)\n\n df = df[~df[\"id\"].isin(outliers)]\n return df\n\n# Loading dataset\ndf = pd.read_csv(\"../data/rulebased_cat_brand_2022.csv\")\n\n# Removing outliers (> 2 std)\ntraining_phase = remove_outliers(df[df[\"rules_set\"] == \"ruleset1\"])\nrule_change = remove_outliers(df[df[\"rules_set\"] == \"ruleset2a\"])\nnew_rule = remove_outliers(df[df[\"rules_set\"] == \"ruleset2b\"])\nnew_rule_mix = remove_outliers(df[df[\"rules_set\"] == \"ruleset2c\"])\n\n# Collecting results for the training phase and calculate median and mad\nresults = []\ntraining_time = training_phase[[\"id\", \"rt\"]].groupby(\"id\").agg(\"mean\")\nprint(\"Time Training: M={} (MAD={})\".format(np.median(training_time.values), stats.median_abs_deviation(training_time.values)))\nfor elem in training_time.itertuples():\n results.append({\n \"id\" : elem.Index,\n \"type\" : \"Training Phase\",\n \"rt\" : elem.rt\n })\n\n# Collecting results for the rule change test phase and calculate median and mad\nrule_change_time = rule_change[[\"id\", \"rt\"]].groupby(\"id\").agg(\"mean\")\nprint(\"Time Rule Change: M={} (MAD={})\".format(np.median(rule_change_time.values), stats.median_abs_deviation(rule_change_time.values)))\nfor elem in rule_change_time.itertuples():\n results.append({\n \"id\" : elem.Index,\n \"type\" : \"Rule Change\",\n \"rt\" : elem.rt\n })\n\n# Collecting results for the new rule (within) test phase and calculate median and mad\nnew_rule_time = new_rule[[\"id\", \"rt\"]].groupby(\"id\").agg(\"mean\")\nprint(\"Time New Rule: M={} (MAD={})\".format(np.median(new_rule_time.values), stats.median_abs_deviation(new_rule_time.values)))\nfor elem in new_rule_time.itertuples():\n results.append({\n \"id\" : elem.Index,\n \"type\" : \"New Rule\\nWithin division\",\n \"rt\" : elem.rt\n })\n\n# Collecting results for the new rule (across) test phase and calculate median and mad\nnew_rule_mix_time = new_rule_mix[[\"id\", \"rt\"]].groupby(\"id\").agg(\"mean\")\nprint(\"Time New Rule Mix: M={} (MAD={})\".format(np.median(new_rule_mix_time.values), stats.median_abs_deviation(new_rule_mix_time.values)))\nfor elem in new_rule_mix_time.itertuples():\n results.append({\n \"id\" : elem.Index,\n \"type\" : \"New Rule\\nAcross division\",\n \"rt\" : elem.rt\n })\n\n# Compile dataframe for plotting and determine ordering\nresult_df = pd.DataFrame(results)\norder = result_df.groupby(\"type\").agg(\"mean\").sort_values(by=\"rt\", ascending=False).reset_index()[\"type\"]\n\n# Plot times\nsns.set_theme(style=\"whitegrid\", palette=\"colorblind\")\ncolors = [(r,g,b,0.5) for r,g,b in sns.color_palette(\"colorblind\")[:4]]\n\ncmap = {\n \"Training Phase\" : \"C0\",\n \"Rule Change\" : \"C1\",\n \"New Rule\\nWithin division\" : \"C2\",\n \"New Rule\\nAcross division\" : \"C3\"\n}\n\nax = sns.boxplot(x=\"type\", y=\"rt\", data=result_df, order=order, palette=cmap)\n\nfor patch in ax.patches:\n r, g, b, a = patch.get_facecolor()\n patch.set_facecolor((r, g, b, .4))\n\nax = sns.stripplot(x=\"type\", y=\"rt\", data=result_df, size=4, linewidth=0.2, edgecolor=\"#000\", jitter=True, order=order, palette=cmap)\nax.set_xlabel(\"\")\nax.set_ylabel(\"Response Time (ms)\")\nplt.tight_layout()\nplt.savefig(\"time.pdf\")\nplt.show()\n","repo_name":"Shadownox/cogsci-2022-rulebasedcat","sub_path":"analysis/time_plot.py","file_name":"time_plot.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33205390908","text":"from django.shortcuts import render, get_object_or_404\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .models import Music, Artist, Comment\nfrom .serializer import MusicSerializer, ArtistSerializer, CommentSerializer, ArtistDetailSerializer\n\n\n@api_view(['GET'])\ndef music_list(request):\n\n params = {}\n artist_pk = request.GET.get('artist_pk') # url ? 는 request.GET으로 꺼내 씀\n\n if artist_pk is not None:\n params['artist_id'] = artist_pk\n\n # 만약 artist_pk 가 Query Params 로 넘어온다면, artist_pk 로 필터링한 값만 응답.\n # 그렇지 않다면 전체 음악 응답.\n\n # 모든 음악에 대한 정보 받아서 json파일 형식으로 변환시켜서 응답해야 한다\n musics = Music.objects.filter(**params) # 내가 보여주고 싶은 데이터를 DB에서 꺼낸다\n # serializing작업을 해준다 (ModelForm 작성과 비슷)\n serializer = MusicSerializer(musics, many=True)\n # (내가 받고 싶은 data, many=T/F (default = F))\n return Response(serializer.data) # json형태로 응답을 보낼 것\n\n\n# api/vi1/musics/3/\n@api_view(['GET', 'PUT', 'DELETE'])\ndef music_detail_update_delete(request, music_pk):\n music = get_object_or_404(Music, pk=music_pk) # 음악 있는지 없는지 확인\n if request.method == 'GET':\n serializer = MusicSerializer(music) # 하나의 데이터라서 many=True 안 넣음\n return Response(serializer.data)\n elif request.method == 'PUT':\n # 수정할 때 사용자가 넘긴 데이터 / 수정이므로 기존 instance 넣어줌\n serializer = MusicSerializer(data=request.data, instance=music)\n if serializer.is_valid(raise_exception=True): # valid 하지 않을 땐 exception\n serializer.save() # 수정하고\n return Response(serializer.data) # 수정한 결과값 보여주기\n else: # DELETE\n music.delete()\n return Response({'message': 'Music has been deleted!'})\n\n\n@api_view(['GET'])\ndef artist_list(request):\n artists = Artist.objects.all()\n serializer = ArtistSerializer(artists, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef artist_detail_update_delete(request, artist_pk):\n artist = get_object_or_404(Artist, pk=artist_pk)\n if request.method == 'GET':\n serializer = ArtistDetailSerializer(artist) # 하나의 데이터라서 many=True 안 넣음\n return Response(serializer.data)\n if request.method == 'PUT':\n serializer = ArtistSerializer(data=request.data, instance=artist)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data)\n else:\n artist.delete()\n return Response({'message': 'Artist has been deleted!'})\n\n\n@api_view(['GET'])\ndef comment_list(request):\n comments = Comment.objects.all()\n serializer = CommentSerializer(comments, many=True)\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef comments_create(request, music_pk):\n # 사용자가 보낸 데이터 그대로 serializer 에 담겠음\n serializer = CommentSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True): # 검증에 실패하면 400 Bad Request 오류 발생\n serializer.save(music_id=music_pk)\n return Response(serializer.data)\n\n\n@api_view(['PUT', 'DELETE'])\ndef comments_update_and_delete(request, comment_pk):\n comment = get_object_or_404(Comment, pk=comment_pk)\n if request.method == 'PUT':\n serializer = CommentSerializer(data=request.data, instance=comment)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data)\n # if request.method == 'DELETE':\n else:\n comment.delete()\n return Response({'message': 'Comment has been deleted!'})\n","repo_name":"susuminmin/django-rest-framework","sub_path":"musics/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3914,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25776993524","text":"#github.com/thegrayninja\n#for use with: curl -H \"X-ApiKeys: accessKey=; secretKey=\" https://cloud.tenable.com/scanners/1/agents >> all_agents.json \n#currently, simply returns all agent IP, Hostname, OS and ID\n\nimport json\noriginalFile = raw_input(\"Please Enter JSON file name: \")\nwith open (originalFile, \"r\") as data_file:\n\tcounter = 0\n\tresults = \"\"\n\tdata = json.load(data_file)\n\tfor i in (data[\"agents\"]):\n\t\tagent_ip = (data[\"agents\"][counter][\"ip\"])\n\t\tagent_os = (data[\"agents\"][counter][\"platform\"])\n\t\tagent_name = (data[\"agents\"][counter][\"name\"])\n\t\tagent_id = (data[\"agents\"][counter][\"id\"])\n\t\tcounter += 1\n\t\tresults = results + \"\\n%s,%s,%s,%s\" % (agent_ip, agent_name, agent_os, agent_id)\n\n\nnewFile = open(originalFile.replace('.json', '.csv'), \"w\")\nnewFile.write(results)\nprint (\"%s has been saved into your current directory!\" % (originalFile.replace('.json', '.csv'))) \nnewFile.close()\n","repo_name":"thegrayninja/tenable.io","sub_path":"agent_list.py","file_name":"agent_list.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"70280036009","text":"from argparse import ArgumentParser\nimport zipfile\n\nclass BasicSub:\n def __init__(self, arguments=list()):\n parser = ArgumentParser()\n arguments.append(('-o','--output', 'Path to output'))\n for arg in arguments:\n parser.add_argument(arg[0], arg[1], help = arg[2])\n self.args = parser.parse_args()\n\n self.resdr = self.pathify(self.args.output)\n\n def submit(self):\n raise NotImplementedError\n\n @staticmethod\n def pathify(pth: str):\n return pth.strip('/') + '/'\n","repo_name":"magisterbrown/monet","sub_path":"commands/basic_sub.py","file_name":"basic_sub.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14828911457","text":"import c4d\nfrom c4d import gui\n#Welcome to the world of Python\n\n\ndef deleteSelectedAnimation():\n doc.StartUndo()\n selObjs=doc.GetActiveObjects(c4d.GETACTIVEOBJECTFLAGS_SELECTIONORDER)\n if len(selObjs)<=0:\n c4d.StatusSetText(\"No Objects Selected!\")\n return\n\n c4d.DrawViews(c4d.DRAWFLAGS_ONLY_ACTIVE_VIEW|c4d.DRAWFLAGS_NO_THREAD|c4d.DRAWFLAGS_STATICBREAK)\n\n\n for obj in selObjs:\n tracks=obj.GetCTracks()\n if len(tracks)>0:\n for track in tracks:\n doc.AddUndo(c4d.UNDOTYPE_DELETE, track)\n track.Remove()\n\n doc.EndUndo()\n c4d.EventAdd()\n\ndef main():\n deleteSelectedAnimation()\nif __name__=='__main__':\n main()","repo_name":"BretBays/bb-c4d-scripts","sub_path":"animationScripts/deleteSelectedAnimation.py","file_name":"deleteSelectedAnimation.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"20941352684","text":"from Tkinter import *\n\n\nclass FilenameDialog:\n \"\"\"Simple input dialog window\"\"\"\n def __init__(self, parent):\n top = self.top = Toplevel(parent)\n Label(top, text=\"Enter filename\").pack()\n self.e = Entry(top)\n self.e.pack()\n b = Button(top, text=\"OK\", command=self.ok)\n b.pack()\n self.result = \"\"\n \n def ok(self):\n self.result = self.e.get()\n self.top.destroy()\n","repo_name":"vladimir-nazarenko/spbu","sub_path":"GestureRecognition/tk.py","file_name":"tk.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72546393127","text":"#6. Add skin and sound effect\n\nimport pygame,time,random, sys \n\npygame.init()\n\nscreen = pygame.display.set_mode((810,630))\n\npygame.display.set_caption('Snake game by Nhóm 1')\n\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nBLACK = (0,0,0)\nBLUE = (0,0,255)\n\nRunning = True\n\nImgfood = pygame.transform.scale(pygame.image.load('food.png'),(30,30))\nImgsnake = pygame.transform.scale(pygame.image.load('snake.jpg'),(30,30))\n\nfoodx = round(random.randrange(1,25)) * 30.0\nfoody = round(random.randrange(1,19)) * 30.0\n\nsnakepos = [300,300]\n\nsnake = [[300,300]]\n \nx1_change = 0 \ny1_change = 0\n \nsound= pygame.mixer.Sound(\"sound1.mp3\")\n\n\nclock = pygame.time.Clock()\n\ndef game_over():\n\n gfont = pygame.font.SysFont('consolas',40)\n gsurf = gfont.render('Game over!',True,RED)\n screen.blit(gsurf,[300,260])\n pygame.display.flip()\n time.sleep(3)\n\nwhile Running:\n\n screen.fill(BLACK)\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n Running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n x1_change = -30\n y1_change = 0\n elif event.key == pygame.K_RIGHT:\n x1_change = 30\n y1_change = 0\n elif event.key == pygame.K_UP:\n y1_change = -30\n x1_change = 0\n elif event.key == pygame.K_DOWN:\n y1_change = 30\n x1_change = 0\n \n snakepos[0] += x1_change\n snakepos[1] += y1_change\n\n if snakepos[0] >= 800 or snakepos[0] < 0 or snakepos[1] >= 600 or snakepos[1] < 0:\n Running = False\n\n for b in snake[1:]:\n if snakepos[0] == b[0] and snakepos[1] == b[1]:\n Running = False\n\n snake.append(list(snakepos))\n\n if snakepos[0] == foodx and snakepos[1] == foody:\n sound.play()\n foodx = round(random.randrange(1,25)) * 30.0\n foody = round(random.randrange(1,19)) * 30.0\n else:\n snake.pop(0) #xóa vị trí đầu\n\n for i in snake:\n screen.blit(Imgsnake,pygame.Rect(i[0],i[1],30,30))\n\n screen.blit(Imgfood,pygame.Rect(foodx, foody, 30, 30))\n \n\n pygame.display.flip()\n \n clock.tick(10)\n\ngame_over()\n\npygame.quit()\nsys.exit()\n\n\n\n","repo_name":"Chi68P1/Lap_trinh_python","sub_path":"Snake_game/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32383942982","text":"# import jax.numpy as np\nimport numpy as np\n\n# def make_A():\n# \"\"\"This function is used to create the matrix in\n# A . a = d for our linear inverse problem\n# \"\"\"\n\n\ndef make_modes(mults):\n \"\"\"This function is used to build specific modes for synthetic tests.\n \"\"\"\n nlm_array = np.array([], dtype='int') # array containing the n,l,m values of a mode\n \n # counter to be executed only once\n is_first = True\n\n print(mults)\n \n for i, mult in enumerate(mults):\n # we restrict ourselves to m = \\pm 1,\\pm 2 only\n m_arr = np.array([-2,-1,1,2], dtype='int')\n for m in m_arr:\n # if((np.abs(m) == 1) * (mult[1] == 2)): continue\n if(np.abs(m) > mult[1]): continue\n mode = np.array([mult[0], mult[1], m], dtype='int')\n # it is okay to hardcode the shape since it won't change\n mode = np.reshape(mode,(3,1))\n \n # ensure the shape is readjusted correctly the first time\n if(is_first):\n nlm_array = np.append(nlm_array, mode)\n # reshaping to allow correct appending. It is okay\n # to hardcode the shape to (3,1) since that won't change\n nlm_array = np.reshape(nlm_array, (3,1))\n # turning off counter since we have the correct shape of nlm_array now\n is_first = False\n \n # from the second time onwards\n else:\n nlm_array = np.append(nlm_array, mode, axis=1)\n \n print(nlm_array)\n\n return nlm_array\n","repo_name":"srijaniiserprinceton/enseisro","sub_path":"enseisro/synthetics/create_synth_modes.py","file_name":"create_synth_modes.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10898609018","text":"#Leetcode 696\nclass Solution:\n def countBinarySubstrings(self, s: str) -> int:\n groups=[1]\n for i in range(1,len(s)):\n if s[i-1] == s[i]:\n groups[-1] += 1\n else:\n groups.append(1)\n ans = 0\n for i in range(1,len(groups)):\n ans += min(groups[i-1],groups[i])\n return ans\n \n # Time = O(n), as we are visiting each element\n # space = O(n), for groups\n","repo_name":"snagari-coder/Data_Structure_Algorithms","sub_path":"strings/CountBinarySubstrings.py","file_name":"CountBinarySubstrings.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74924153769","text":"# -------------------------------------------------------------------------------------------------------------\n# -------------------------------------------------------------------------------------------------------------\n# Настройки Ethernet\n# -------------------------------------------------------------------------------------------------------------\n# Импортируем Шаблон взаимодействия\n\nfrom JSON_Backend_framework.Service.Template_Devices_Functions.Settings.DeviceSettings.Template_Interface_Ethernet_settings import TemplateInterface_EthernetUM40\n# from JSON_Backend_framework.Service.TemplateDecorator import print_log_use_GET_data # from\n# JSON_Backend_framework.FormJSON.UM40.Settings.DeviceSettings.JSON_Construct_Settings_Interface_Ethernet import\n# SettingsEthernet\n# -------------------------------------------------------------------------------------------------------------\n\n\nclass Interface_Ethernet(TemplateInterface_EthernetUM40):\n \"\"\"\n Настройки Ethernet\n\n \"\"\"\n\n # хедерс - Иногда нужен\n _headers = None\n # куки\n _cookies = None\n\n # # Общие настройки\n # Settings = None\n # # Имя поля настроек\n # _Settings_name = 'Settings'\n # # Настройки по умолчанию\n #\n # # Настройки Ethernet _Eth0 = {'iface': 'eth0', 'dhcp': False, 'ip': '192.168.0.1', 'netmask': '255.255.255.1',\n # 'gw': '', 'dns1': '', 'dns2': ''}\n #\n # _Eth1 = {'iface': 'eth1', 'dhcp': True, 'ip': '', 'netmask': '', 'gw': '', 'dns1': '', 'dns2': ''}\n\n def __init__(self, cookies=None, headers=None, ip_address=None):\n \"\"\"\n Настройки Ethernet\n\n :param cookies:\n :param headers:\n \"\"\"\n if cookies is not None:\n self._cookies = cookies\n if headers is not None:\n self._headers = headers\n\n if ip_address is not None:\n self._ip_address = ip_address\n # # Обнуляем\n # self._define_JSON()\n\n # # Настройки по умолчанию\n #\n # def Read_Settings(self):\n # \"\"\"\n # Читаем данные - GET\n # :return:\n # \"\"\"\n # # делаем запрос - получаем ответ\n # response = self._request_GET()\n #\n # return response\n #\n # def Write_Settings(self, data=None):\n # \"\"\"\n # Добавляем на запись данные - POST\n #\n # :param data:\n # :return:\n # \"\"\"\n #\n # if data is None:\n # data_settings = self._getting_settings()\n # data = {self._Settings_name: data_settings}\n #\n # # Запаковываем\n # data = self._coding(data=data)\n #\n # # делаем запрос - получаем ответ\n # response = self._request_POST(JSON=data)\n #\n # return response\n #\n # def Rewrite_Settings(self, data=None):\n # \"\"\"\n # Перезаписываем данные - PUT\n # :param data:\n # :return:\n # \"\"\"\n # if data is None:\n # data_settings = self._getting_settings()\n # data = {self._Settings_name: data_settings}\n #\n # # Запаковываем\n # data = self._coding(data=data)\n #\n # # делаем запрос - получаем ответ\n # response = self._request_PUT(JSON=data)\n #\n # return response\n #\n # def Delete_Settings(self, data=None):\n # \"\"\"\n # Удаляем данные - DELETE\n # :param data:\n # :return:\n # \"\"\"\n # # Запаковываем\n # if data is not None:\n # data = self._coding(data=data)\n #\n # # делаем запрос - получаем ответ\n # response = self._request_DELETE(JSON=data)\n # else:\n # # делаем запрос - получаем ответ\n # response = self._request_DELETE()\n #\n # return response\n #\n # # Здесь расположим сервисные функции\n # # Первое - Получаем настройки что уже есть\n #\n # def _getting_settings(self):\n #\n # \"\"\"\n # В Классе шаблоне метод получения настроек отвечает за вставку GET запроса\n # \"\"\"\n # data = self._request_setting()\n # return data\n #\n # # Запрос настроек\n # @print_log_use_GET_data\n # def _request_setting(self):\n # \"\"\"\n # Здесь запрашиваем нужные нам настройки\n #\n # \"\"\"\n # data = []\n # try:\n # # делаем запрос - получаем ответ\n # response = self.Read_Settings()\n # # Теперь вытаскиваем нужное\n # if response.get('code') == int(200):\n # answer_setting = response.get('data')\n # # Теперь заполянем наши переменные\n # if answer_setting is not None:\n # Settings = answer_setting.get(self._Settings_name)\n # if Settings is not None:\n # data = Settings\n # except Exception as e:\n #\n # print(\"При считывании параметров возникла ошибка - \" + str(e))\n #\n # return data\n # def _define_JSON(self):\n # \"\"\"\n # Здесь Сбрасываем настройки\n # \"\"\"\n # # Сбрасываем настройки\n # self.Settings = SettingsEthernet()\n #\n # def _getting_settings(self):\n #\n # \"\"\"\n # В Классе шаблоне метод получения настроек отвечает за вставку GET запроса -\n # Здесь переопределяем\n #\n # \"\"\"\n # data = self._getting_settings_Ethernet()\n # data = {self._Settings_name: data}\n # return data\n #\n # def _getting_settings_Ethernet(self):\n #\n # \"\"\"\n # Получение настроек что задали\n # \"\"\"\n #\n # # Пункт первый - читаем какие настройки у нас есть\n # settings_Ethernet = self.Settings.get_settings_Ethernet()\n #\n # Ethernet1 = None\n # Ethernet2 = None\n # Ethernet = settings_Ethernet.get(self._Settings_name)\n #\n #\n #\n # for i in Ethernet:\n # if i.get('iface') == 'eth0':\n # Ethernet1 = i\n # if i.get('iface') == 'eth1':\n # Ethernet2 = i\n #\n # # ТЕПЕРЬ, если у нас оба сейтинга не заданы , запрашиваем :\n # if (Ethernet1 is None) or (Ethernet2 is None):\n # _Ethernet1, _Ethernet2 = self._request_setting()\n # # Теперь смотрим точно что необходимо переназначить\n # if Ethernet1 is None:\n # # Теперь смотрим что считали\n # if _Ethernet1 is None:\n # Ethernet1 = self._Eth0\n # else:\n # Ethernet1 = _Ethernet1\n # if Ethernet2 is None:\n # # Теперь смотрим что считали\n # if _Ethernet2 is None:\n # Ethernet2 = self._Eth1\n # else:\n # Ethernet2 = _Ethernet2\n #\n # # Обнуляем\n # self._define_JSON()\n # # Теперь формируем нужный JSON\n # JSON = [Ethernet1, Ethernet2]\n # return JSON\n #\n # # Запрос настроек\n # @print_log_use_GET_data\n # def _request_setting(self):\n # \"\"\"\n # Здесь запрашиваем нужные нам настройки\n #\n # \"\"\"\n # _Eth0 = None\n # _Eth1 = None\n # try:\n # # делаем запрос - получаем ответ\n # response = self.Read_Settings()\n # # Теперь вытаскиваем нужное\n # if response.get('code') == int(200):\n # sim_setting = response.get('data')\n # # Теперь заполянем наши переменные\n # if sim_setting is not None:\n #\n # # print(sim_setting)\n # Settings = sim_setting[self._Settings_name]\n # # Теперь перебираем все это\n # for idx in Settings:\n # if idx.get('iface') == 'eth0':\n # _Eth0 = idx\n # if idx.get('iface') == 'eth1':\n # _Eth1 = idx\n #\n # except Exception as e:\n #\n # print(\"При считывании параметров возникла ошибка - \" + str(e))\n #\n # return _Eth0, _Eth1\n\n\n# -------------------------------------------------------------------------------------------------------------\n# ПРИМЕР JSON\n# -------------------------------------------------------------------------------------------------------------\n# data = {'Settings': [\n# {'iface': 'eth0', 'dhcp': False, 'ip': '', 'netmask': '', 'gw': '', 'dns1': '', 'dns2': ''},\n# {'iface': 'eth1', 'dhcp': True, 'ip': '', 'netmask': '', 'gw': '', 'dns1': '', 'dns2': ''}\n# ]}\n# -------------------------------------------------------------------------------------------------------------\n","repo_name":"TR1GUN/json_backend_framework","sub_path":"JSON_Backend_framework/Devices_USPD/UM40/Functional/Settings/DeviceSettings/Interface_Ethernet.py","file_name":"Interface_Ethernet.py","file_ext":"py","file_size_in_byte":9935,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21453353885","text":"import heapq\nimport collections\n\nfrom typing import List, Tuple\n\nclass Element:\n def __init__(self, word, count):\n self.word: str = word\n self.count: int = count\n\n def __lt__(self, other):\n if self.count == other.count:\n # when retreiving the answer we need to make sure the word \n # which has smaller lexicographical get popped first\n # as we'll reverse the ans at the end \n return self.word > other.word\n return self.count < other.count\n\n def __eq__(self, other):\n return self.count == other.count and self.word == other.word\n\nclass Solution:\n def topKFrequent(self, words: List[str], k: int) -> List[str]:\n # use Counter to provide counts for each words\n counter = collections.Counter(words)\n # here we maintain a 'min' heap\n pq: List[Element] = []\n heapq.heapify(pq)\n for word, count in counter.items():\n element = Element(word, count)\n heapq.heappush(pq, element)\n # when pq's len > k, we pop the 1st element from the pq \n # as pq is a min heap, the smallest word will always \n # get popped\n if len(pq) > k:\n heapq.heappop(pq)\n ans: List[str] = []\n while pq:\n ans.append(heapq.heappop(pq).word)\n return ans[::-1]\n\n\n\n","repo_name":"jerrt2003/leetcode-in-python","sub_path":"692_Top_K_Frequent_Words/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44976179885","text":"#!/usr/bin/env python\n\nimport gym\ntry:\n import torch\nexcept ImportError:\n import metaschool as ms\n torch = ms.utils._ImportRaiser('torch', 'pip install torch')\n\n\nclass DefaultColors:\n white = 1.0\n gray = 0.5\n black = 0.0\n\n\nclass MSRJumpEnv(gym.Env):\n\n \"\"\"\n [Source]\n\n ## Description\n\n Bare bone re-implementation of the MSR Montreal Jumping task.\n\n ## References\n\n 1. Tachet des Combes et al. 2018. \"Learning Invariances for Policy Generalization\" \\\n ICLR 2018 Workshop Track\n\n ## Example\n\n ~~~python\n env = MSRJumpEnv(\n screen_width=64,\n screen_height=64,\n obstacle_position=25,\n agent_speed=2,\n jumping_height=15,\n obstacle_size=(10, 10),\n agent_size=(10, 10),\n )\n env.reset()\n while True:\n action = env.get_optimal_action()\n s, r, d, _ = env.step(action)\n if d: break\n print('Max achievable reward:', env.reward_range[1])\n ~~~\n\n \"\"\"\n\n def __init__(\n self,\n floor_height=10,\n obstacle_position=20,\n obstacle_size=(9, 10),\n vision_observations=True,\n screen_width=84,\n screen_height=84,\n jumping_height=15,\n agent_speed=1,\n agent_size=(5, 10),\n colors=None,\n device=None,\n num_envs=1,\n max_episode_steps=600,\n ):\n \"\"\"\n ## Arguments\n\n * `floor_height` (int, *optional*, default=10) - Height of the floor.\n * `obstacle_position` (int, *optional*, default=20) - X-position of the obstacle.\n * `obstacle_size` (tuple, *optional*, default=(9, 10)) - Dimensions of obstacle.\n * `vision_observations` (bool, *optional*, default=True) - Use vision observations or physical state.\n * `screen_width` (int, *optional*, default=84) - Witdth of the screen.\n * `screen_height` (int, *optional*, default=84) - Height of the screen\n * `jumping_height` (int, *optional*, default=15) - Height of agent's jump.\n * `agent_speed` (int, *optional*, default=1 - Speed of agent.\n * `agent_size` (tuple, *optional*, default=(5, 10)) - Dimensions of agent.\n * `colors` (class, *optional*, default=None) - Color values for visual observations.\n * `device` (torch.device, *optional*, default=None) - Device for observation tensors.\n * `num_envs` (int, *optional*, default=1) - Number of parallel environments (unsupported).\n * `max_episode_steps` (int, *optional*, default=600) - Horizon length.\n \"\"\"\n super(MSRJumpEnv, self).__init__()\n assert jumping_height+1 >= agent_size[0] + obstacle_size[0], \\\n 'Task unsolvable: increase jumping height.'\n assert num_envs == 1, 'Multiple envs not supported, yet.'\n\n # dynamics\n self.vision_observations = vision_observations\n self.floor_height = floor_height\n self.obstacle_position = obstacle_position\n self.obstacle_size = obstacle_size\n self.colors = DefaultColors() if colors is None else colors\n self.screen_width = screen_width\n self.screen_height = screen_height\n self.max_screen_size = max(self.screen_height, self.screen_width)\n\n # agent\n self.agent_size = agent_size\n self.agent_speed = agent_speed\n self.jumping_height = jumping_height\n self.device = device\n\n # properties\n self.num_envs = num_envs\n self.reward_range = (-1, self.screen_width + 2 - agent_size[0])\n self.action_space = gym.spaces.Discrete(2)\n if vision_observations:\n self.observation_space = gym.spaces.Box(\n low=0.0,\n high=1.0,\n shape=(num_envs, screen_height, screen_width),\n )\n else:\n self.observation_space = gym.spaces.Box(\n low=0.0,\n high=1.0,\n shape=(num_envs, 10),\n )\n self._max_episode_steps = max_episode_steps\n\n # reset\n self.reset()\n\n def reset(self):\n self.agent_position = [0, self.floor_height]\n self.jumping = [False, 0] # [mid-air, delta_y]\n self.done = False\n self._steps = 0\n self._base_observation = self._get_base_observation()\n return self.get_observation()\n\n def _step_dynamics(self, action):\n\n if not self.jumping[0] and action == 1: # jump!\n self.jumping = [True, 1]\n\n # update x position\n self.agent_position[0] += self.agent_speed\n\n if self.jumping[0]:\n # update jump direction\n if self.agent_position[1] > self.floor_height + self.jumping_height:\n self.jumping[1] = -1\n\n # update y position\n self.agent_position[1] += self.jumping[1] * self.agent_speed\n\n # stop jump\n if self.agent_position[1] == self.floor_height:\n self.jumping = [False, 0]\n\n def step(self, action):\n # step dynamics\n self._step_dynamics(action)\n\n # decide termination\n timelimit = self._steps > self._max_episode_steps\n collision = \\\n self.obstacle_position + self.obstacle_size[0] > self.agent_position[0] and \\\n self.obstacle_position < self.agent_position[0] + self.agent_size[0] and \\\n self.floor_height + self.obstacle_size[1] > self.agent_position[1] and \\\n self.floor_height < self.agent_position[1] + self.agent_size[1]\n exited = self.screen_width < self.agent_position[0] + self.agent_size[0]\n done = timelimit or collision or exited\n\n # compute rewards\n if collision:\n reward = -1.0\n elif exited:\n reward = self.agent_speed + 1.0\n else:\n reward = self.agent_speed\n\n # get observation\n observation = self.get_observation()\n\n self._steps += 1\n return observation, reward, done, None\n\n def render(self, mode='human'):\n if mode == 'text':\n raise NotImplementedError()\n elif mode == 'human':\n raise NotImplementedError()\n elif mode == 'rgb_array':\n return self.get_observation(True) * 255.0\n\n def _get_base_observation(self, vision=None):\n if vision is None:\n vision = self.vision_observations\n if vision:\n # draw background\n frame = torch.ones(\n size=(self.screen_height, self.screen_width),\n dtype=torch.float32,\n device=self.device,\n ) * self.colors.black\n\n # draw obstacle\n frame[\n self.obstacle_position:self.obstacle_position+self.obstacle_size[0],\n self.floor_height:self.floor_height+self.obstacle_size[1],\n ].fill_(self.colors.gray)\n\n # draw screen outline\n frame[0:self.screen_height, 0].fill_(self.colors.white)\n frame[0:self.screen_height, self.screen_width-1].fill_(self.colors.white)\n frame[0, 0:self.screen_width].fill_(self.colors.white)\n frame[self.screen_height-1, 0:self.screen_width].fill_(self.colors.white)\n\n # draw floor\n frame[0:self.screen_width, self.floor_height].fill_(self.colors.white)\n return frame\n else:\n return torch.tensor([\n self.agent_position[0],\n self.agent_position[1],\n self.agent_size[0],\n self.agent_size[1],\n self.agent_speed,\n self.jumping_height,\n self.floor_height,\n self.obstacle_position,\n self.obstacle_size[0],\n self.obstacle_size[1],\n ], device=self.device) / self.max_screen_size\n\n def get_observation(self, vision=None):\n if vision is None:\n vision = self.vision_observations\n\n if self.vision_observations == vision:\n obs = self._base_observation.clone()\n else:\n obs = self._get_base_observation(vision)\n\n if vision:\n obs[\n self.agent_position[0]:self.agent_position[0]+self.agent_size[0],\n self.agent_position[1]:self.agent_position[1]+self.agent_size[1],\n ].fill_(self.colors.white)\n return obs.t().unsqueeze(0)\n else:\n obs[0] = self.agent_position[0] / self.max_screen_size\n obs[1] = self.agent_position[1] / self.max_screen_size\n return obs\n\n def get_optimal_action(self):\n \"\"\"\n Returns the optimal action given current state of the world.\n \"\"\"\n dist_to_obstacle = abs(self.obstacle_position - self.agent_position[0])\n jump_margin = self.agent_size[0] + self.obstacle_size[1]\n return int(dist_to_obstacle <= jump_margin - 1)\n\n\nif __name__ == \"__main__\":\n\n # Test optimal action\n env = MSRJumpEnv(\n screen_width=64,\n screen_height=64,\n obstacle_position=25,\n agent_speed=2,\n # jumping_height=15,\n # obstacle_size=(10, 10),\n # agent_size=(10, 10),\n )\n\n random_rewards = 0.0\n env.reset()\n while True:\n _, r, d, _ = env.step(env.action_space.sample())\n random_rewards += r\n if d:\n break\n print('Random rewards:', random_rewards)\n\n optimal_rewards = 0.0\n env.reset()\n while True:\n action = env.get_optimal_action()\n _, r, d, _ = env.step(action)\n # print(action, env.agent_position)\n optimal_rewards += r\n if d:\n break\n print('Optimal rewards:', optimal_rewards)\n print('Max rewards:', env.reward_range[1])\n","repo_name":"learnables/metaschool","sub_path":"metaschool/envs/msr_jumping.py","file_name":"msr_jumping.py","file_ext":"py","file_size_in_byte":9802,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"1960992555","text":"\"\"\"empty message\n\nRevision ID: 954140d6b159\nRevises: 069a4549b999\nCreate Date: 2021-01-29 15:21:26.679711\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '954140d6b159'\ndown_revision = '069a4549b999'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('MosInstance', sa.Column('question', sa.Text(), nullable=True))\n op.add_column('MosInstance', sa.Column('utterance_idx', sa.Integer(), nullable=True))\n op.add_column('MosInstance', sa.Column('voice_idx', sa.Integer(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('MosInstance', 'voice_idx')\n op.drop_column('MosInstance', 'utterance_idx')\n op.drop_column('MosInstance', 'question')\n # ### end Alembic commands ###\n","repo_name":"cadia-lvl/LOBE","sub_path":"migrations/versions/954140d6b159_.py","file_name":"954140d6b159_.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"27017008247","text":"#!/usr/bin/env python3\n\nimport sys\nsys.path.append('/cephfs/users/mbrown/PIPELINES/DNAseq/')\nimport json\nfrom utility.date_time import date_time\nfrom subprocess import call\nfrom utility.log import log\nfrom analysis.dustmask_filter import filter_indel\n\n\ndef parse_config(config_file):\n config_data = json.loads(open(config_file, 'r').read())\n return config_data['tools']['scalpel'], config_data['tools']['bedtools'], config_data['refs']['capture'], \\\n config_data['refs']['fa_ordered'], config_data['params']['threads'], config_data['params']['dustmask_flag'],\\\n config_data['refs']['dustmask'], config_data['params']['wg_flag'], config_data['refs']['project_dir'], \\\n config_data['refs']['project'], config_data['refs']['align_dir']\n\n\ndef wg_mode(scalpel, tumor_bam, normal_bam, fasta, cpus, pair, config_file):\n config_data = json.loads(open(config_file, 'r').read())\n exome = config_data['refs']['exome']\n loc = 'LOGS/' + pair + '_' + pair + '.genome_as_exome.scalpel.log'\n cmd = scalpel + ' --somatic --logs --numprocs ' + cpus + ' --tumor ' + tumor_bam + ' --normal ' \\\n + normal_bam + ' --window 600 --two-pass --bed ' + exome + ' --ref ' + fasta + ' 2> ' + loc\n log(loc, date_time() + cmd + '\\n')\n check = call(cmd, shell=True)\n if check != 0:\n return 1, pair\n return 0, pair\n\n\ndef scalpel_indel(tumor_id, normal_id, log_dir, config_file):\n (scalpel, bedtools, bed, fasta, cpus, dustmask_flag, dustmask_bed, wg, project_dir, project, align) \\\n = parse_config(config_file)\n \n sample_pair = tumor_id + '_' + normal_id\n loc = log_dir + sample_pair + '.scalpel.log'\n bam_dir = project_dir + project + '/' + align\n tumor_bam = bam_dir + '/' + tumor_id + '/BAM/' + tumor_id + '.merged.final.bam'\n normal_bam = bam_dir + '/' + normal_id + '/BAM/' + normal_id + '.merged.final.bam'\n if wg == 'n':\n scalpel_cmd = scalpel + ' --somatic --logs --numprocs ' + cpus + ' --tumor ' + tumor_bam + ' --normal ' \\\n + normal_bam + ' --bed ' + bed + ' --ref ' + fasta + ' 2>> ' + loc\n sys.stderr.write(date_time() + 'Starting indel calls for ' + sample_pair + '\\n')\n log(loc, date_time() + 'Starting indel calls for ' + sample_pair + ' in capture mode with command:\\n'\n + scalpel_cmd + '\\n')\n check = call(scalpel_cmd, shell=True)\n if check != 0:\n sys.stderr.write(date_time() + 'Indel calling failed for pair ' + sample_pair + ' with command:\\n' +\n scalpel_cmd + '\\n')\n exit(1)\n else:\n check = wg_mode(scalpel, tumor_bam, normal_bam, fasta, cpus, sample_pair, config_file)\n if check[0] != 0:\n sys.stderr.write('Scalpel failed for ' + normal_id + ' at ' + tumor_id + '\\n')\n exit(1)\n log(loc, date_time() + 'Indel calling complete for pair ' + sample_pair + ' moving output files\\n')\n mv_cmd = 'mv outdir/main/* .; rmdir outdir/main;'\n log(loc, date_time() + mv_cmd + '\\n')\n call(mv_cmd, shell=True)\n sys.stderr.write(date_time() + 'Completed indel calls for ' + sample_pair + '\\n')\n if dustmask_flag == 'Y':\n log(loc, date_time() + 'Filter dustmask flag given\\n')\n check = filter_indel(bedtools, dustmask_bed, sample_pair, loc)\n if check != 0:\n sys.stderr.write(date_time() + 'Dustmask failed for ' + sample_pair + '\\n')\n exit(1)\n else:\n log(loc, date_time() + 'Dustmask complete for ' + sample_pair + '\\n')\n sys.stderr.write(date_time() + 'Indel call completed\\n')\n return 0\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(\n description='Scalpel indel caller wrapper script. Samples must have been aligned and bams merged '\n 'ahead of time')\n parser.add_argument('-t', '--tumor', action='store', dest='tumor', help='Tumor sample id')\n parser.add_argument('-n', '--normal', action='store', dest='normal', help='Normal sample id')\n parser.add_argument('-j', '--json', action='store', dest='config_file',\n help='JSON config file with tool and ref locations')\n parser.add_argument('-l', '--log', action='store', dest='log_dir', help='LOG directory location')\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n inputs = parser.parse_args()\n (tumor_id, normal_id, log_dir, config_file) = (inputs.tumor, inputs.normal, inputs.log_dir, inputs.config_file)\n scalpel_indel(tumor_id, normal_id, log_dir, config_file)\n","repo_name":"WhiteLab/alignment","sub_path":"analysis/scalpel_indel.py","file_name":"scalpel_indel.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70889797928","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Created on 2023-09-11 10:50:39\n# @Author: zzm\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint as checkpoint\n\nfrom transformers.modeling_utils import CrossEntropyLoss\nfrom transformers.models.gpt2.modeling_gpt2 import GPT2Attention, GPT2MLP, GPT2PreTrainedModel\n\nMAX_LENGTH = 77\nEFFNET_OUT = 512\n\nclass Block(nn.Module):\n def __init__(self, n_ctx, config, scale=False, layer_idx=None):\n super().__init__()\n nx = config.n_embd\n self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)\n self.attn = GPT2Attention(config, layer_idx=layer_idx)#torch.nn.MultiheadAttention(embed_dim = nx, num_heads = 1, dropout = 0.5) \n # self.attn = Attention(nx, n_ctx, config, scale)\n self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)\n self.mlp = GPT2MLP(4 * nx, config)\n \n def forward(self, x, layer_past=None, attention_mask=None, head_mask=None, use_cache=False):\n output_attn = self.attn(\n self.ln_1(x),\n layer_past=layer_past,\n attention_mask=attention_mask,\n head_mask=head_mask,\n use_cache=use_cache,\n )\n a = output_attn[0] # output_attn: a, present, (attentions)\n\n x = x + a\n # print(self.ln_2(x).shape)\n m = self.mlp(self.ln_2(x))\n x = x + m\n \n # print(\"[x]:\", type([x]))\n # print(\"output_attn[1:]:\", type(output_attn[1:]))\n outputs = [x] + list(output_attn[1:])\n return outputs[0], outputs[1]\n\nclass GPT2Model(GPT2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.output_hidden_states = config.output_hidden_states\n self.output_attentions = config.output_attentions\n\n self.wte = nn.Embedding(config.vocab_size, config.n_embd)\n self.wpe = nn.Embedding(config.n_positions, config.n_embd)\n self.drop = nn.Dropout(config.embd_pdrop)\n self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])\n self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.wte\n\n\n def set_input_embeddings(self, new_embeddings):\n self.wte = new_embeddings\n\n\n def _prune_heads(self, heads_to_prune):\n for layer, heads in heads_to_prune.items():\n self.h[layer].attn.prune_heads(heads)\n \n def run_block(self, block, layer_past, attention_mask, head_mask, use_cache):\n def custom_forward(*inputs):\n x, present = block(\n inputs[0],\n layer_past=layer_past,\n attention_mask=attention_mask,\n head_mask=head_mask,\n use_cache=use_cache,\n )\n return x, present\n return custom_forward\n\n def forward(\n self,\n input_ids=None,\n past=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n use_cache=True,\n ):\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n batch_size = input_ids.shape[0]\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size = inputs_embeds.shape[0]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if token_type_ids is not None:\n token_type_ids = token_type_ids.view(-1, input_shape[-1])\n if position_ids is not None:\n position_ids = position_ids.view(-1, input_shape[-1])\n\n if past is None:\n past_length = 0\n past = [None] * len(self.h)\n else:\n past_length = past[0][0].size(-2)\n if position_ids is None:\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)\n position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])\n\n # Attention mask.\n if attention_mask is not None:\n assert batch_size > 0, \"batch_size has to be defined and > 0\"\n attention_mask = attention_mask.view(batch_size, -1)\n attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n\n attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n attention_mask = (1.0 - attention_mask) * -10000.0\n\n head_mask = self.get_head_mask(head_mask, self.config.n_layer)\n\n if inputs_embeds is None:\n inputs_embeds = self.wte(input_ids)\n position_embeds = self.wpe(position_ids)\n if token_type_ids is not None:\n token_type_embeds = self.wte(token_type_ids)\n else:\n token_type_embeds = 0\n hidden_states = inputs_embeds + position_embeds + token_type_embeds\n hidden_states = self.drop(hidden_states)\n\n output_shape = input_shape + (hidden_states.size(-1),)\n\n presents = ()\n all_attentions = []\n all_hidden_states = ()\n for i, (block, layer_past) in enumerate(zip(self.h, past)):\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)\n \n hidden_states, present = checkpoint.checkpoint(\n self.run_block(block, layer_past, attention_mask, head_mask[i], use_cache),\n hidden_states\n )\n \n \n '''\n outputs = block(\n hidden_states,\n layer_past=layer_past,\n attention_mask=attention_mask,\n head_mask=head_mask[i],\n use_cache=use_cache,\n )\n hidden_states, present = outputs[:2]\n '''\n \n if use_cache is True:\n presents = presents + (present,)\n\n if self.output_attentions:\n all_attentions.append(outputs[2])\n\n hidden_states = self.ln_f(hidden_states)\n\n hidden_states = hidden_states.view(*output_shape)\n # Add last hidden state\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if use_cache is True:\n outputs = outputs + (presents,)\n if self.output_hidden_states:\n outputs = outputs + (all_hidden_states,)\n if self.output_attentions:\n # let the number of heads free (-1) so we can extract attention even after head pruning\n attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]\n all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions)\n outputs = outputs + (all_attentions,)\n return outputs # last hidden state, (presents), (all hidden_states), (attentions)\n \nclass GPT2LMHeadModel(GPT2PreTrainedModel):\n def __init__(self, config, hidden1=384, hidden2=256):\n super().__init__(config)\n self.transformer = GPT2Model(config)\n self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n self.init_weights()\n \n #self.efficient = EfficientNet.from_pretrained(efficient, advprop=False)\n # self.efficient = EfficientNetCheck.from_pretrained(efficient, advprop=True)\n # self.efficient = clipmodel\n \n # MLP\n self.encoder1 = nn.Linear(config.n_embd + EFFNET_OUT, hidden1)\n self.decoder1 = nn.Linear(hidden1, config.n_embd)\n self.encoder2 = nn.Linear(config.n_embd, hidden2)\n self.decoder2 = nn.Linear(hidden2, config.n_embd)\n \n self.relu = nn.ReLU()\n \n nn.init.xavier_normal_(self.encoder1.weight, gain=0.1)\n nn.init.xavier_normal_(self.decoder1.weight, gain=0.1)\n nn.init.xavier_normal_(self.encoder2.weight, gain=0.1)\n nn.init.xavier_normal_(self.decoder2.weight, gain=0.1)\n \n def get_output_embeddings(self):\n return self.lm_head\n\n def prepare_inputs_for_generation(self, input_ids, past, **kwargs):\n # only last token for inputs_ids if past is defined in kwargs\n if past:\n input_ids = input_ids[:, -1].unsqueeze(-1)\n\n return {\"input_ids\": input_ids, \"image\": kwargs[\"image\"], \"past\": past, \"use_cache\": kwargs[\"use_cache\"]}\n\n def forward(self, input_ids=None, fused_feature=None, past=None, attention_mask=None, token_type_ids=None,\n position_ids=None, head_mask=None, inputs_embeds=None, labels=None, use_cache=True):\n \n transformer_outputs = self.transformer(input_ids, past=past, attention_mask=attention_mask, \n token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, \n inputs_embeds=inputs_embeds, use_cache=use_cache)\n # print(\"transformer_outputs:\", transformer_outputs[0].shape)\n transformer = transformer_outputs[0]\n # with torch.no_grad():\n # image_feature = clipmodel.encode_image(image)\n # sketch_feature = clipmodel.encode_sketch(sketch)\n # image_feature = image_feature / image_feature.norm(dim=-1, keepdim=True)\n # sketch_feature = sketch_feature / sketch_feature.norm(dim=-1, keepdim=True)\n # fused_feature = clipmodel.feature_fuse(image_feature, sketch_feature)\n if MAX_LENGTH:\n efficient = fused_feature.unsqueeze(1).repeat(1, MAX_LENGTH, 1)\n else:\n efficient = fused_feature.unsqueeze(1).repeat(transformer.shape[0], transformer.shape[1], 1)\n \n # print(\"efficient:\", efficient.shape)\n # print(\"transformer:\", transformer.shape)\n latent = torch.cat((efficient, transformer), 2)\n \n encoded = self.relu(self.encoder1(latent))\n hidden_states = self.decoder1(encoded) + transformer\n \n encoded = self.relu(self.encoder2(hidden_states))\n hidden_states = self.decoder2(encoded) + hidden_states\n \n lm_logits = self.lm_head(hidden_states)\n\n outputs = (lm_logits,) + transformer_outputs[1:]\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = lm_logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n outputs = (loss,) + outputs\n\n return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)\n","repo_name":"zzmshinnosuke/MyTSBIR","sub_path":"code/gpt/gpt.py","file_name":"gpt.py","file_ext":"py","file_size_in_byte":11139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33519305903","text":"# (c) 2014 Amplify Education, Inc. All rights reserved, subject to the license\n# below.\n#\n# Education agencies that are members of the Smarter Balanced Assessment\n# Consortium as of August 1, 2014 are granted a worldwide, non-exclusive, fully\n# paid-up, royalty-free, perpetual license, to access, use, execute, reproduce,\n# display, distribute, perform and create derivative works of the software\n# included in the Reporting Platform, including the source code to such software.\n# This license includes the right to grant sublicenses by such consortium members\n# to third party vendors solely for the purpose of performing services on behalf\n# of such consortium member educational agencies.\n\n\"\"\"\nCreated on Feb 4, 2013\n\n@author: dip, nparoha\n\"\"\"\nimport os\nimport shutil\n\nimport allure\n\nfrom edware_testing_automation.edapi_tests.api_helper import ApiHelper\nfrom edware_testing_automation.frontend_tests.common_session_share_steps import SessionShareHelper\nfrom edware_testing_automation.frontend_tests.extracts_helper import ExtractsHelper\nfrom edware_testing_automation.pytest_webdriver_adaptor.pytest_webdriver_adaptor import browser\nfrom edware_testing_automation.utils.test_base import DOWNLOADS\n\nDOWNLOAD_DIRECTORY = DOWNLOADS + '/'\nUNZIPPED_FILE_PATH = '/tmp/item_level/'\nUNZIPPED_XML_FILE = '/tmp/raw_data'\nUNZIPPED_PDF_PATH = '/tmp/bulk_pdf/'\nUNZIPPED_STU_REG_FILE_PATH = '/tmp/student_reg/'\n\n\n@allure.feature('Smarter: Integration with Extract services')\n@allure.story('PDF report')\nclass TestServicesPdfAPI(ApiHelper, SessionShareHelper, ExtractsHelper):\n def __init__(self, *args, **kwargs):\n ApiHelper.__init__(self, *args, **kwargs)\n\n def setUp(self):\n self.helper_setup(UNZIPPED_FILE_PATH)\n self.helper_setup(UNZIPPED_XML_FILE)\n self.helper_setup(UNZIPPED_PDF_PATH)\n self.helper_setup(UNZIPPED_STU_REG_FILE_PATH)\n if not os.path.exists(DOWNLOAD_DIRECTORY):\n os.makedirs(DOWNLOAD_DIRECTORY)\n self.files_to_cleanup_at_end = []\n\n def tearDown(self):\n self.helper_teardown(UNZIPPED_XML_FILE)\n self.helper_teardown(UNZIPPED_FILE_PATH)\n self.helper_teardown(UNZIPPED_PDF_PATH)\n self.helper_teardown(UNZIPPED_STU_REG_FILE_PATH)\n for file_to_delete in self.files_to_cleanup_at_end:\n if os.path.exists(file_to_delete):\n os.remove(file_to_delete)\n\n def test_post_bulk_pdf_color_los(self):\n self.set_request_cookie('shall')\n self.set_request_header(\"content-type\", \"application/json\")\n self.set_payload({\"districtId\": \"229\", \"schoolId\": \"936\", \"asmtGrade\": [\"04\"], \"asmtYear\": 2016,\n \"asmtType\": \"SUMMATIVE\", \"lang\": \"en\", \"stateCode\": \"NC\", \"dateTaken\": 20160410,\n \"mode\": \"color\"})\n self.send_request(\"POST\", \"/services/pdf/indivStudentReport.html\")\n self.check_response_code(200)\n self.check_not_error_page()\n response = self._response.json()['files']\n zipfile_name = response[0]['fileName']\n down_url = response[0]['web_download_url']\n self.check_bulk_pdf_file(str(down_url), str(zipfile_name), 1)\n\n def test_post_bulk_pdf_gray_los(self):\n self.set_request_cookie('shall')\n self.set_request_header(\"content-type\", \"application/json\")\n self.set_payload({\"districtId\": \"229\", \"schoolId\": \"936\", \"asmtGrade\": [\"04\"], \"asmtYear\": 2016,\n \"asmtType\": \"SUMMATIVE\", \"lang\": \"en\", \"stateCode\": \"NC\", \"dateTaken\": 20160410})\n self.send_request(\"POST\", \"/services/pdf/indivStudentReport.html\")\n self.check_response_code(200)\n self.check_not_error_page()\n response = self._response.json()['files']\n zipfile_name = response[0]['fileName']\n down_url = response[0]['web_download_url']\n self.check_bulk_pdf_file(str(down_url), str(zipfile_name), 1)\n\n def test_post_bulk_pdf_color_cpop_multi_grade(self):\n self.set_request_cookie('shall')\n self.set_request_header(\"content-type\", \"application/json\")\n self.set_payload(\n {\"districtId\": \"229\",\n \"schoolId\": \"936\",\n \"asmtGrade\": [\"03\", \"04\"],\n \"asmtYear\": 2015,\n \"asmtType\": \"SUMMATIVE\",\n \"lang\": \"en\",\n \"stateCode\": \"NC\",\n \"mode\": \"color\"}\n )\n self.send_request(\"POST\", \"/services/pdf/indivStudentReport.html\")\n self.check_response_code(200)\n self.check_not_error_page()\n response = self._response.json()['files']\n zipfile_name = response[0]['fileName']\n down_url = response[0]['web_download_url']\n print(zipfile_name)\n print(down_url)\n self.check_bulk_pdf_file(str(down_url), str(zipfile_name), 2)\n\n def test_post_bulk_pdf_gray_cpop_academic_yr_spanish(self):\n self.set_request_cookie('shall')\n self.set_request_header(\"content-type\", \"application/json\")\n self.set_payload({\"districtId\": \"229\", \"schoolId\": \"942\", \"asmtGrade\": [\"11\", \"12\"], \"asmtYear\": 2015,\n \"asmtType\": \"SUMMATIVE\",\n \"lang\": \"es\", \"stateCode\": \"NC\"})\n self.send_request(\"POST\", \"/services/pdf/indivStudentReport.html\")\n self.check_response_code(200)\n self.check_not_error_page()\n response = self._response.json()['files']\n zipfile_name = response[0]['fileName']\n down_url = response[0]['web_download_url']\n self.check_bulk_pdf_file(str(down_url), str(zipfile_name), 2)\n\n def helper_setup(self, filepath):\n if os.path.exists(filepath):\n shutil.rmtree(filepath)\n os.makedirs(filepath)\n\n def helper_teardown(self, filepath):\n if os.path.exists(filepath):\n shutil.rmtree(filepath)\n\n def check_bulk_pdf_file(self, url, file_name, num_files):\n self.open_requested_page_redirects_login_page(\"state_view_sds\")\n self.enter_login_credentials('shall', 'shall1234')\n self.check_redirected_requested_page(\"state_view_sds\")\n browser().get(url)\n downloaded_file = DOWNLOAD_DIRECTORY + file_name\n print(downloaded_file)\n self.files_to_cleanup_at_end.append(downloaded_file)\n self.check_downloaded_zipfile_present(file_name)\n self.unzip_file_to_directory(downloaded_file, UNZIPPED_PDF_PATH)\n pdf_file_names = self.get_pdf_file_name(UNZIPPED_PDF_PATH)\n self.assertEqual(len(pdf_file_names), num_files, 'expected number of pdf files NOT found')\n for each in pdf_file_names:\n pdf_file_path = os.path.join(UNZIPPED_PDF_PATH, each)\n pdf_file_size = os.path.getsize(pdf_file_path)\n print(\"test_post_los_bulk_pdf_color: bulk pdf file size = {s}\".format(s=pdf_file_size))\n self.assertNotEqual(0, pdf_file_size, \"Empty file\")\n\n def test_get_pdf(self):\n self.set_request_cookie('shall')\n self.set_query_params('studentId', '3fea53dd-effe-48da-8317-e1b21ff9828f')\n self.set_query_params('asmtYear', '2016')\n self.send_request(\"GET\", \"/services/pdf/indivStudentReport.html\")\n self.check_response_code(200)\n","repo_name":"SmarterApp/RDW_DataWarehouse","sub_path":"tests/edware_testing_automation/edapi_tests/test_services_pdf_api.py","file_name":"test_services_pdf_api.py","file_ext":"py","file_size_in_byte":7168,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"8282253035","text":"\"\"\"worker module. Execute code and store results in database, files in the SFTP server.\n\"\"\"\n\nimport errno\nimport os\nimport signal\nimport time\nfrom datetime import datetime, timedelta\nfrom bson.objectid import ObjectId\nimport shutil\nfrom pollenisatorgui.core.components.apiclient import APIClient\nimport pollenisatorgui.core.components.utils as utils\nfrom pollenisatorgui.core.components.settings import Settings\nfrom pollenisatorgui.core.models.interval import Interval\nfrom pollenisatorgui.core.models.tool import Tool\nimport threading\nimport sys\n\nevent_obj = threading.Event()\n\n\ndef sendKill(queue):\n queue.put(\"kill\")\n\ndef executeTool(queue, queueResponse, apiclient, toolId, local=True, allowAnyCommand=False, setTimer=False, infos={}, logger_given=None, worker_name=\"\"):\n \"\"\"\n remote task\n Execute the tool with the given toolId on the given pentest name.\n Then execute the plugin corresponding.\n Any unhandled exception will result in a task-failed event in the class.\n\n Args:\n apiclient: the apiclient instance.\n toolId: the mongo Object id corresponding to the tool to execute.\n local: boolean, set the execution in a local context\n Raises:\n Terminated: if the task gets terminated\n OSError: if the output directory cannot be created (not if it already exists)\n Exception: if an exception unhandled occurs during the bash command execution.\n Exception: if a plugin considered a failure.\n \"\"\"\n import logging\n import sys\n logging.basicConfig(filename='error.log', encoding='utf-8', level=logging.DEBUG)\n logger = logging.getLogger(__name__)\n handler = logging.StreamHandler(stream=sys.stdout)\n logger.addHandler(handler)\n\n def handle_exception(exc_type, exc_value, exc_traceback):\n logger.debug(\"Uncaught exception\", exc_info=(exc_type, exc_value, exc_traceback))\n if issubclass(exc_type, KeyboardInterrupt):\n sys.__excepthook__(exc_type, exc_value, exc_traceback)\n sys.exit(1)\n\n logger.error(\"Uncaught exception\", exc_info=(exc_type, exc_value, exc_traceback))\n sys.__excepthook__(exc_type, exc_value, exc_traceback)\n sys.exit(1)\n\n sys.excepthook = handle_exception\n #register signals\n signal.signal(signal.SIGTERM, lambda signum, sigframe: sendKill(queue))\n # Connect to given pentest\n logger.debug(\"executeTool: Execute tool locally:\" +str(local)+\" setTimer:\"+str(setTimer)+\" toolId:\"+str(toolId))\n APIClient.setInstance(apiclient)\n toolModel = Tool.fetchObject({\"_id\":ObjectId(toolId)})\n logger.debug(\"executeTool: get command for toolId:\"+str(toolId))\n command_dict = toolModel.getCommand()\n if command_dict is None and toolModel.text != \"\":\n command_dict = {\"plugin\":toolModel.plugin_used, \"timeout\":0}\n msg = \"\"\n success, data = apiclient.getCommandLine(toolId, toolModel.text)\n comm, fileext = data[\"comm_with_output\"], data[\"ext\"]\n logger.debug(\"executeTool: got command line for toolId:\"+str(toolId))\n if not success:\n print(str(comm))\n logger.debug(\"Autoscan: Execute tool locally error in getting commandLine : \"+str(toolId))\n toolModel.setStatus([\"error\"])\n sys.exit(1)\n \n outputRelDir = toolModel.getOutputDir(apiclient.getCurrentPentest())\n abs_path = os.path.dirname(os.path.abspath(__file__))\n toolFileName = toolModel.name+\"_\" + \\\n str(time.time()) # ext already added in command\n outputDir = os.path.join(abs_path, \"./results\", outputRelDir)\n # Create the output directory\n try:\n os.makedirs(outputDir)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(outputDir):\n pass\n else:\n print(str(exc))\n logger.debug(\"Autoscan: Execute tool locally error in creating output directory : \"+str(exc))\n toolModel.setStatus([\"error\"])\n sys.exit(1)\n outputPath = os.path.join(outputDir, toolFileName)\n comm = comm.replace(\"|outputDir|\", outputPath)\n settings = Settings()\n my_commands = settings.local_settings.get(\"my_commands\", {})\n bin_path = my_commands.get(toolModel.name)\n if bin_path is None:\n if utils.which_expand_alias(command_dict[\"bin_path\"]):\n bin_path = command_dict[\"bin_path\"]\n else:\n toolModel.setStatus([\"error\"])\n toolModel.notes = str(toolModel.name)+\" : no binary path setted\"\n logger.debug(\"Autoscan: Execute tool locally no bin path setted : \"+str(toolModel.name))\n sys.exit(1)\n comm = bin_path + \" \" + comm\n toolModel.updateInfos({\"cmdline\":comm})\n if \"timedout\" in toolModel.status:\n timeLimit = None\n # Get tool's wave time limit searching the wave intervals\n elif toolModel.wave == \"Custom commands\" or (local and not setTimer):\n timeLimit = None\n else:\n timeLimit = getWaveTimeLimit()\n # adjust timeLimit if the command has a lower timeout\n if command_dict is not None and timeLimit is not None:\n timeLimit = min(datetime.now()+timedelta(0, int(command_dict.get(\"timeout\", 0))), timeLimit)\n ##\n try:\n launchableToolId = toolModel.getId()\n name = worker_name\n toolModel.markAsRunning(name, infos)\n logger.debug(f\"Mark as running tool_iid {launchableToolId}\")\n logger.debug('Autoscan: TASK STARTED:'+toolModel.name)\n logger.debug(\"Autoscan: Will timeout at \"+str(timeLimit))\n print(('TASK STARTED:'+toolModel.name))\n print(\"Will timeout at \"+str(timeLimit))\n # Execute the command with a timeout\n returncode = utils.execute(comm, timeLimit, queue, queueResponse, cwd=outputDir)\n if returncode == -1:\n toolModel.setStatus([\"timedout\"])\n logger.debug(\"Autoscan: TOOL timedout at \"+str(timeLimit))\n sys.exit(-1)\n except Exception as e:\n print(str(e))\n toolModel.setStatus([\"error\"])\n logger.debug(\"Autoscan: TOOL error \"+str(e))\n return False, str(e)\n # Execute found plugin if there is one\n outputfile = outputPath+fileext\n plugin = \"auto-detect\" if command_dict[\"plugin\"] == \"\" else command_dict[\"plugin\"] \n msg = apiclient.importToolResult(toolId, plugin, outputfile)\n if msg != \"Success\":\n #toolModel.markAsNotDone()\n print(str(msg))\n toolModel.setStatus([\"error\"])\n logger.debug(\"Autoscan: import tool result error \"+str(msg))\n sys.exit(1)\n \n # Delay\n if command_dict is not None:\n print(msg)\n sys.exit(0)\n \ndef getWaveTimeLimit():\n \"\"\"\n Return the latest time limit in which this tool fits. The tool should timeout after that limit\n\n Returns:\n Return the latest time limit in which this tool fits.\n \"\"\"\n intervals = Interval.fetchObjects({})\n furthestTimeLimit = datetime.now()\n for intervalModel in intervals:\n if utils.fitNowTime(intervalModel.dated, intervalModel.datef):\n endingDate = intervalModel.getEndingDate()\n if endingDate is not None:\n if endingDate > furthestTimeLimit:\n furthestTimeLimit = endingDate\n return furthestTimeLimit\n\n\n","repo_name":"fbarre96/PollenisatorGUI","sub_path":"pollenisatorgui/autoscanworker.py","file_name":"autoscanworker.py","file_ext":"py","file_size_in_byte":7210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13297136695","text":"import webbrowser\r\n\r\nclass Movie(object):\r\n \"\"\"\r\n This class provides a way to store\r\n movie-related information.\r\n \"\"\"\r\n\r\n def __init__(self, movie_title, movie_storyline, poster_image,\r\n trailer_youtube):\r\n \"\"\"\r\n Constructs an instance of the class Movie.\r\n :inputs: title, storyline, URL for the movie poster image,\r\n URL for the YouTube trailer\r\n :outputs: a new instance/object of the class Movie\r\n \"\"\"\r\n self.title = movie_title\r\n self.storyline = movie_storyline\r\n self.poster_image_url = poster_image\r\n self.trailer_youtube_url = trailer_youtube\r\n ","repo_name":"CodingMBA/movietrailers","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27621155419","text":"import dash\nfrom dash.dependencies import Input, Output\nfrom dash import dcc\nfrom dash import html\nimport pandas as pd\n\n# dashboard app\napp = dash.Dash('Naver Shopping Trend',\n external_stylesheets=['https://codepen.io/chriddyp/pen/bWLwgP.css'])\n\n# app layout-> html 수정.\napp.layout = html.Div([\n dcc.Dropdown(\n id='my-dropdown',\n options=[\n # dropdown 메뉴 만들기!\n {'label': 'Apple', 'value': './naver_shopping(애플).xlsx'},\n {'label': 'Samsung', 'value': './naver_shopping(삼성전자).xlsx'},\n {'label': 'Xiaomi', 'value': './naver_shopping(샤오미).xlsx'}\n ],\n value='./naver_shopping(애플).xlsx' # 기본값 세팅하기\n ),\n dcc.Graph(id='my-graph')\n], style={'width': '600'})\n\n\n@app.callback(Output('my-graph', 'figure'), [Input('my-dropdown', 'value')])\n# dash가 실제로 실행하는 그래프를 update_graph 함수로 정의합니다.\ndef update_graph(selected_dropdown_value):\n # 내가 선택한 label에 해당하는 파일 이름\n df = pd.read_excel(selected_dropdown_value)\n return {\n 'data': [\n # dash가 보여줄 dashboard의 그래프를 dict 형태로 지정합니다.\n {'x': df.index,\n 'y': df[\"리뷰수\"]}\n\n ],\n 'layout': {'margin': {'l': 40, 'r': 0, 't': 20, 'b': 30}}\n }\n\n\n# dash app이 실행됩니다.\n# app.run_server(debug=True, use_reloader=False)\napp.run_server(host='192.168.56.1', port=3003)","repo_name":"ella00100/data_analytics","sub_path":"naverShopping/dash_board.py","file_name":"dash_board.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31269831064","text":"from odoo import fields, models\n\n\nclass EstatePropertyTag(models.Model):\n _name = 'estate.property.tag'\n _description = 'Estate Property Type'\n # _sql_constraints = ['unique_property_tag', 'UNIQUE (name)', 'The property tag should be unique']\n\n _order = \"name\"\n\n name = fields.Char(required=True)\n color = fields.Integer(\"Color Index\", default=0)\n","repo_name":"sabirvirtuoso/OdooStarter","sub_path":"addons/estate/models/estate_property_tag.py","file_name":"estate_property_tag.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40225693730","text":"import sys\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom airflow import AirflowException\nfrom airflow.decorators import dag, task\nfrom airflow.providers.amazon.aws.hooks.s3 import S3Hook\n\n\nsys.path.extend([\"/home/ubuntu/airflow/dags/scrapy_crawler/\"])\nfrom scrapy_crawler import run_crawler\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': 'False',\n 'start_date': datetime(2023, 5, 29),\n 'email': ['airflow@example.com'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'schedule_interval': None,\n 'retry_delay': timedelta(seconds=20)\n}\n\nBUCKET = \"airflow-snowflake-data-pipeline\"\n\n@dag(default_args=default_args, catchup=False)\ndef data_crawler_dag():\n\n @task\n def start_crawler():\n file_date = run_crawler()\n\n return file_date\n\n @task\n def confirm_s3_data_loaded(file_date: str):\n\n # connect to s3\n s3_hook = S3Hook(aws_conn_id='s3_conn')\n\n # Read data\n data_status = s3_hook.check_for_key(\n key=f\"{file_date}.csv\", bucket_name=BUCKET\n )\n if data_status is True:\n print(f\"Object stored in s3: {file_date}\")\n else:\n raise AirflowException(\"Object not successfully loaded on S3\")\n\n confirm_s3_data_loaded(start_crawler())\n\ndata_crawler_dag()\n\n\n","repo_name":"priye-1/airflow_data_pipeline","sub_path":"dags/data_crawler_dag.py","file_name":"data_crawler_dag.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"34520911201","text":"\"\"\"CAVA_DATA_STACK Configs.\"\"\"\nfrom typing import Dict, Optional\n\nimport pydantic\n\n\nclass StackSettings(pydantic.BaseSettings):\n \"\"\"Application settings\"\"\"\n\n class Config:\n \"\"\"model config\"\"\"\n\n env_file = \".env\"\n env_prefix = \"CAVA_DATA_STACK_\"\n\n name: str = \"cava-data\"\n stage: str = \"production\"\n\n owner: Optional[str]\n project: str = \"CAVA\"\n\n vpc: Optional[str]\n security_group: Optional[str]\n user_role: Optional[str]\n\n # Stack environment\n region: str = \"us-west-2\"\n account_id: str = \"123556123145\"\n services_elb: str\n domain_name: Optional[str]\n certificate_arn: Optional[str]\n\n # Default options for cava-data service\n env: Dict = {\n \"DATA_QUEUE\": \"data-queue\",\n \"OOI_USERNAME\": \"XXXXXXXX\",\n \"OOI_TOKEN\": \"XXXXXXXXXXX\",\n \"REDIS_URI\": \"redis://localhost\",\n \"RABBITMQ_URI\": \"amqp://guest@localhost:5672//\",\n \"GOOGLE_SERVICE_JSON\": \"mybucket/service-json.json\"\n }\n is_sqs: bool = False\n\n\n ###########################################################################\n # AWS LAMBDA\n # The following settings only apply to AWS Lambda deployment\n # more about lambda config: https://www.sentiatechblog.com/aws-re-invent-2020-day-3-optimizing-lambda-cost-with-multi-threading\n timeout: int = 10\n memory: int = 1536\n\n # The maximum of concurrent executions you want to reserve for the function.\n # Default: - No specific limit - account limit.\n max_concurrent: Optional[int]\n\n @pydantic.root_validator\n def set_sqs(cls, values):\n env_values = values.get('env')\n rabbitmq_uri = env_values.get('RABBITMQ_URI')\n if not isinstance(rabbitmq_uri, str):\n raise TypeError(\"RABBITMQ_URI must be a string!\")\n \n if rabbitmq_uri.startswith('sqs://'):\n env_values.update({\n 'REGION': values.get('region'),\n })\n values.update({\n 'env': env_values,\n 'is_sqs': True\n })\n return values\n return values\n","repo_name":"cormorack/cava-data","sub_path":"resources/aws/cdk/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74515608807","text":"import functools\nimport typing\n\nimport schemata\n\n\n@functools.singledispatch\ndef get_rich(x, **k):\n if isinstance(x, schemata.Py[\"pandas.DataFrame\"]):\n return get_rich_pandas(x)\n return str(x)\n\n\n@get_rich.register\ndef get_rich_str(str: str, tree=None):\n if isinstance(str, schemata.base.ContentMediaType[\"text/markdown\"]):\n return get_rich_markdown(str)\n return str\n\n\n@get_rich.register\ndef get_rich_dict(dict: dict, tree=None):\n import rich.tree\n\n p = schemata.base.Properties.forms(dict)\n for k, v in p.items():\n if not isinstance(dict[k], p[k]):\n dict[v] = p[k](dict[k])\n\n a = schemata.base.AdditionalProperties.forms(dict)\n if a is not None:\n for k, v in dict.items():\n if k in p:\n continue\n dict[k] = a(dict[k])\n if tree is None:\n tree = rich.tree.Tree(get_rich(type(dict)))\n for key, value in dict.items():\n tree.add(rich.tree.Tree(label=key))\n tree.children[-1].add(get_rich(value))\n return tree\n\n\n@get_rich.register\ndef get_rich_list(list: list, tree=None):\n import rich.tree\n\n if all(isinstance(item, dict) for item in list):\n return get_rich_table(list)\n if tree is None:\n tree = rich.tree.Tree(str(type(list)))\n for item in list:\n tree.add(get_rich(item))\n return tree\n\n\ndef get_rich_table(list: typing.List[dict]):\n import rich.table\n\n table = rich.table.Table()\n columns: set = functools.reduce(set.union, map(set, list))\n for column in columns:\n table.add_column(column)\n\n for item in list:\n table.add_row(*map(get_rich, map(item.get, columns)))\n\n return table\n\n\ndef get_rich_markdown(x, **k):\n import rich.markdown\n\n return rich.markdown.Markdown(x)\n\n\ndef get_rich_code(str, **k):\n import rich.syntax\n\n return rich.syntax.Syntax(str, str.language, line_numbers=True)\n\n\ndef get_rich_pandas(df, table=None, **k):\n if table is None:\n table = rich.table.Table()\n indexes = df.index.names\n columns = indexes + list(df.columns)\n for column in columns:\n table.add_column(column)\n for index, row in df.iterrows():\n if not isinstance(index, tuple):\n index = (index,)\n data = dict(zip(list(indexes), map(\"[b]{}\".format, index)), **row)\n table.add_row(*map(get_rich, map(data.get, columns)))\n return table\n","repo_name":"deathbeds/schemata","sub_path":"src/schemata/compat/rich.py","file_name":"rich.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"32807644822","text":"\n# 입력값 받기\nn, m = map(int, input().split())\ncity = [list(map(int, input().split())) for _ in range (n)]\n\nchicken_list = []\nhouse_list = []\nchoose_chicken = []\nanswer = 100000\n\nfor i in range(n) :\n for j in range(n) :\n if city[i][j] == 2 :\n chicken_list.append((i, j))\n elif city[i][j] == 1 :\n house_list.append((i, j))\n\ndef dfs(depth, idx) :\n global answer\n\n if depth == m :\n sum = 0\n for i in range(house_list) :\n dist = 100000\n for j in range(choose_chicken) :\n temp = abs(i[0] - j[0]) + abs(i[1] - j[1])\n dist = min(dist, temp)\n sum += dist\n answer = min(answer, sum)\n\n for i in range(idx, len(chicken_list)) :\n if chicken_list[i] in choose_chicken :\n continue\n\n choose_chicken.append(chicken_list[i])\n dfs(depth + 1, idx + 1)\n choose_chicken.pop()\n\n\ndfs(0, 0)\nprint(answer)","repo_name":"uss96/CodingTest","sub_path":"삼전코테/구현/치킨 배달 (한 번 더 보기).py","file_name":"치킨 배달 (한 번 더 보기).py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27460320315","text":"#LIBRERÍAS\r\nimport streamlit as st\r\nimport pandas as pd\r\nimport datetime\r\nimport plotly.express as px\r\nimport plotly.graph_objects as go\r\n\r\n\r\n##############################################################################\r\n#Webpage css colors: https://www.quackit.com/css/css_color_codes.cfm\r\n#Emojis HTML: https://www.w3schools.com/charsets/ref_emoji.asp\r\n\r\n#STREAMLIT CONTAINER CUSTOMIZE\r\nst.markdown(\r\n f\"\"\"\r\n\r\n\"\"\",\r\n unsafe_allow_html=True,\r\n )\r\n#BOTON 'OBTENER RESULTADOS' CUSTOMIZE\r\nst.markdown(f\"\"\"\r\n\t\r\n\t\"\"\",unsafe_allow_html=True,)\r\n\r\n#TITULO APP CUSTOMIZE\r\nst.subheader('¡Bienvenido! :earth_americas: :sunny: :wind_blowing_face:')\r\nTitle_html = \"\"\"\r\n\t\r\n\t
\r\n\t
\r\n\t RenewablesAPP.🇵🇪\r\n\t
\r\n\t
\r\n\t\"\"\"\r\nst.markdown(Title_html, unsafe_allow_html=True) #Title rendering\r\n\r\n\r\n##############################################################################\r\n#CONTAINERS\r\nheader_container = st.beta_container()\r\ndata_container = st.beta_container()\r\nuser_container = st.beta_container()\r\ntable_container = st.beta_container()\r\ngraph_container = st.beta_container()\r\n\r\n#######################################\r\nwith header_container:\r\n#CABECERA\r\n\tst.write('Aplicación web (en fase βeta) para mostrar data relevante de las centrales eólicas y solares en el Perú')\r\n\tst.image('images/renewables.jpeg', width=600, caption='Energía limpia para un futuro energético sostenible')\r\n\tst.write('')\r\n\r\n#######################################\r\nwith data_container:\r\n#CARGANDO DATOS DESDE .CSV\r\n\t\r\n\t@st.cache\r\n\tdef load_coes_data():\r\n\t\tdf = pd.read_csv('data/databasecoesrer.csv', parse_dates=[0], index_col=0, dayfirst=1)\r\n\t\treturn df\r\n\r\n\tdata_total = load_coes_data()\r\n\r\n\t# #PRUEBA CONVERTIR FORMATO DATE!!!!\r\n\t# data_total.reset_index(inplace=True)\r\n\t# st.write(data_total.head())\r\n\r\n\t# data_total['Fecha'] = pd.to_datetime(data_total['Fecha']).dt.strftime('%Y-%m-%dT%H:%M%:%SZ')\r\n\t# data_total['Fecha'] = pd.to_datetime(data_total['Fecha'])\r\n\t# data_total.set_index(\"Fecha\", inplace = True)\r\n\t# st.write(type(data_total.index))\r\n\t# st.write(data_total.head())\r\n\r\n\tdata_empty = pd.DataFrame(columns=data_total.columns) #Para la tabla por defecto\r\n\tdata_selected = data_empty\r\n\r\n\t#st.write(data_total.style.apply())\r\n\r\n\t# data_total.format(formatter=lambda x: x.strftime('%Y:%m:%d %H:%M'))\r\n#######################################\r\n\tdef header(url):\r\n\t st.markdown(f'

{url}

', unsafe_allow_html=True)\r\n\r\n\theader('1. Seleccione el tipo de tecnología renovable y un rango de fechas')\r\n\r\nwith user_container:\r\n\r\n\tst.write('Ingresa tus requerimientos en los siguientes campos y dar click al botón: ⚡GO RER⚡')\r\n\r\n\tcol1, col2 =st.beta_columns(2)\r\n\r\n#SELECTCBOX: SELECCIONAR TIPO DE TECNOLOGÍA\r\n\r\n\tcol1.subheader('1.1. Tipo de tecnología renovable')\r\n\r\n\tchart_visual = col1.selectbox('Seleccione el tipo de tecnología o una central RER individual:', \r\n\t\t('', 'Central RER individual', 'Centrales eólicas', 'Centrales solares', \r\n\t\t'Centrales solares y centrales eólicas (ambas)'))\r\n\r\n\tcount = 1\r\n\tchecker = False\r\n\tif chart_visual == 'Central RER individual':\r\n\t\tchart_visual = col1.selectbox('Escoja la central renovable:', [''] + data_total.columns.tolist())\r\n\t\tif chart_visual == '':\r\n\t\t\tcol1.warning(\"Por favor, seleccione una central renovable.\")\r\n\t\tfor i in data_total.columns:\r\n\t\t\tif chart_visual == i:\r\n\t\t\t\tdata_selected = data_total.copy()[i]\r\n\t\t\t\tcol1.success(\"¡Correcto!:heavy_check_mark:\")\r\n\t\t\t\tchecker = True\r\n\r\n\telif chart_visual == 'Centrales eólicas':\r\n\t\tdata_selected = data_total.copy().iloc[:, :7]\r\n\t\tfor i in data_selected.columns:\r\n\t\t\tcol1.text('{0}. {1}'.format(count, i))\r\n\t\t\tcount+=1\r\n\t\tcol1.success(\"¡Correcto!:heavy_check_mark:\")\r\n\t\tchecker = True\r\n\r\n\telif chart_visual == 'Centrales solares':\r\n\t\tdata_selected = data_total.copy().iloc[:, 7:]\r\n\t\tfor i in data_selected.columns:\r\n\t\t\tcol1.text('{0}. {1}'.format(count, i))\r\n\t\t\tcount+=1\r\n\t\tcol1.success(\"¡Correcto!:heavy_check_mark:\")\r\n\t\tchecker = True\r\n\r\n\telif chart_visual == 'Centrales solares y centrales eólicas (ambas)':\r\n\t\tdata_selected = data_total.copy()\r\n\t\tfor i in data_selected.columns:\r\n\t\t\tcol1.text('{0}. {1}'.format(count, i))\r\n\t\t\tcount+=1\r\n\t\tcol1.success(\"¡Correcto!:heavy_check_mark:\")\r\n\t\tchecker = True\r\n\r\n\telif chart_visual == '':\r\n\t\tcol1.warning(\"Por favor, seleccione el 'tipo de tecnología'\")\r\n\t\tchecker = False\r\n\r\n\r\n#CALENDARIO: SELECCIONAR RANGO DE FECHAS\r\n\tstart_date = datetime.datetime(2011, 1, 2) #start_date declarado arbitrariamente para no lanzar error en el calendario.\r\n\tend_date = datetime.datetime(2011, 1, 1) #end_date declarado arbitrariamente para no lanzar error en el calendario\r\n\r\n\tcol2.subheader(\"1.2 Rango de fechas\")\r\n\ttry:\r\n\t\tstart_date, end_date = col2.date_input('Seleccione una fecha inicial y una fecha final:', \r\n\t\t\tvalue=(datetime.datetime(2020, 12, 31), datetime.datetime(2020, 12, 31)),\r\n\t\t\tmin_value=datetime.datetime(2012, 7, 6), max_value=datetime.datetime.now())\r\n\t\t\r\n\t\tif start_date=end_date and checker==False:\r\n\t\t\tst.error(\"Error: Por favor, seleccione correctamente el 'tipo de tecnología' y el 'rango de fechas'.\")\r\n\t\t\trepeat()\r\n\r\n\t\telif press_button==True and start_date=end_date and checker==True:\r\n\t\t\tst.error(\"Error: Por favor, seleccione correctamente el 'rango de fechas'.\")\r\n\t\t\trepeat()\r\n\r\n\t\telse:\r\n\t\t\trepeat()\r\n\r\n\texcept:\r\n\t\t\tpass\r\n","repo_name":"KevinAQM/renewables-peru-app","sub_path":"coes_streamlit.py","file_name":"coes_streamlit.py","file_ext":"py","file_size_in_byte":8585,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27994519349","text":"import tensorflow as tf\nfrom unittest import TestCase\n\nfrom ctgan.utils import get_test_variables\nfrom ctgan.losses import gradient_penalty\n\n\nclass TestGradientPenalty(TestCase):\n def setUp(self):\n self._vars = get_test_variables()\n\n def tearDown(self):\n del self._vars\n\n def test_gradient_penalty(self):\n tf.random.set_seed(0)\n real = tf.random.uniform(\n [self._vars['batch_size'], self._vars['input_dim']])\n fake = tf.random.uniform(\n [self._vars['batch_size'], self._vars['input_dim']])\n\n gp = gradient_penalty(\n lambda x: x**2, real, fake,\n pac=self._vars['pac'], gp_lambda=self._vars['gp_lambda'])\n expected_output = tf.constant(1002.7697, dtype=tf.float32)\n tf.assert_equal(gp, expected_output)\n","repo_name":"ljk423/ctgan-tf","sub_path":"ctgan/losses/tests/test_gradient_penalty.py","file_name":"test_gradient_penalty.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"10607295357","text":"#!/usr/bin/env python3\n#-*- coding:utf-8 -*-\n\nimport smtplib\nimport argparse\n\nargparser = argparse.ArgumentParser(description=\"\")\nargparser.add_argument('--host', '-s', dest='host', help=\"SMTP host\", default='mail.protonmail.ch')\nargparser.add_argument('--port', '-p', dest='port', help=\"SMTP port\", default=25)\nargparser.add_argument('--from', '-f', dest='fromAddr', help=\"From\", default=False)\nargparser.add_argument('--append', '-a', dest='append', help=\"Append\", default=False)\nargparser.add_argument('--to', '-t', dest='toAddr', help=\"To\", default=False)\nargparser.add_argument('file', help=\"File with message\")\nargparser.add_argument('--debug', dest='debug', help=\"Print message\", default=False, action='store_true')\nargs = argparser.parse_args()\n\nwith open(args.file, \"rb\") as f:\n msg = f.read().decode('utf-8')\n\n if args.append:\n if args.toAddr:\n msg = (\"To: {}\\r\\n{}\".format(args.toAddr, msg))\n if args.fromAddr:\n msg = (\"From: {}\\r\\n{}\".format(args.fromAddr, msg))\n \n if args.debug:\n print(msg)\n\n server = smtplib.SMTP(args.host, args.port)\n server.starttls()\n\n if args.debug:\n \tserver.set_debuglevel(1)\n\n server.sendmail(args.fromAddr, args.toAddr, msg.encode('utf-8'))\n server.quit()\n","repo_name":"exander77/mailtool.py","sub_path":"protondeliver.py","file_name":"protondeliver.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"gu","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25254334643","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\nfrom . import views\n\nrouter = DefaultRouter()\n\n# router\nrouter.register(r'presentprojects', views.PresentProjectViewSet, basename='presentprojects')\nrouter.register(r'crudprojects', views.CRUDProjectViewSet, basename='crudprojects')\nrouter.register(r'presenttodos', views.PresentToDoViewSet, basename='presenttodos')\nrouter.register(r'crudtodos', views.CRUDToDoViewSet, basename='crudtodos')\n\nurlpatterns = [\n # API\n path('api/v1/', include(router.urls)),\n]\n","repo_name":"FidelSol/DRFGeekBrains","sub_path":"DRF/ToDo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34036813488","text":"import json\nfrom google.cloud import firestore\nimport pprint\n\n\ndef get_filtered_user(request):\n\n request_json = request.get_json()\n if request.args and 'name' in request.args:\n request_name = request.args.get('name')\n elif request_json and 'name' in request_json:\n request_name = request_json['name']\n else:\n request_name = ''\n\n db = firestore.Client()\n\n query = db.collection('user').where('name', '==', request_name)\n docs = query.get()\n users_list = []\n for doc in docs:\n users_list.append(doc.to_dict())\n return_json = json.dumps({\"users\": users_list}, ensure_ascii=False)\n\n return return_json\n\n\nif __name__ == \"__main__\":\n query = {}\n request = {}\n request = {\"name\": \"Alice\"}\n obj = DevRequest(query=query, params=request,method='POST')\n r = get_user(obj)\n pprint.pprint(r)\n","repo_name":"ny7760/simple-cf-firestore-api","sub_path":"functions/getFilteredUser.py","file_name":"getFilteredUser.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6653108339","text":"from time import strftime\n\nfrom boardgamegeek import BoardGameGeekAPIError\n\n\ndef retry_if_attribute_error(exception):\n \"\"\"Return True if we should retry (in this case when it's an IOError), False otherwise\"\"\"\n print(\" {} - URL timed out, so I'm going to retry.\"\n .format(strftime('%d %b, %H:%M:%S')))\n if isinstance(exception, AttributeError) or isinstance(exception, BoardGameGeekAPIError):\n return True\n else:\n print(\" Encountered a new error: {}\".format(exception))\n return False\n","repo_name":"tijlk/boardgamegeek-scraper","sub_path":"src/data/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21898928175","text":"import pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nimport numpy as np\nfrom datetime import date, timedelta\nfrom tensorflow.keras.layers import Dense, Input, LSTM\nfrom tensorflow.keras.layers import TimeDistributed, Conv1D\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\nimport tensorflow as tf\nimport tensorflow.keras as keras\nfrom capm import stock_prices\nimport matplotlib.pyplot as plt\nfrom pkg_resources import resource_filename\n\nn_step = 300\nn_step_ahead = 56\nbatch_size = 32\n\n\ndef last_friday(day: str) -> str:\n d = date.fromisoformat(day)\n shift = (d.weekday() - 4) % 7\n shift = shift + 7 if shift < 0 else shift\n return (d - timedelta(days=shift)).isoformat()\n\n\ndef start_day(end_day, n_days):\n return (\n date.fromisoformat(end_day)\n - timedelta(days=(n_days + n_step + n_step_ahead))\n ).isoformat()\n\n\ndef scalar_train_set(\n prices: pd.Series, n_12weeks: int,\n to_day: str) -> tuple:\n n_day = n_12weeks * batch_size\n from_day = start_day(to_day, n_12weeks)\n price_arr = (prices[from_day:to_day]\n .values.astype(np.float64))\n\n # scale\n sc = MinMaxScaler(feature_range=(0, 1))\n price_arr_scaled = sc.fit_transform(price_arr)\n\n X_train = []\n y_train = []\n for i in range(n_step, n_day + n_step):\n X_train.append(price_arr_scaled[i-n_step:i, 0])\n y_train.append(price_arr_scaled[i:i+n_step_ahead, 0])\n X_train = (np.array(X_train)\n .reshape((n_day, n_step, 1)))\n y_train = (np.array(y_train)\n .reshape((n_day, n_step_ahead, 1)))\n return X_train, y_train, sc\n\n\ndef vector_train_set(\n prices: pd.Series, n_12weeks: int,\n to_day: str) -> tuple:\n n_day = n_12weeks * batch_size\n from_day = start_day(to_day, n_day)\n price_arr = (prices[from_day:to_day]\n .values.astype(np.float64))\n\n # scale\n sc = MinMaxScaler(feature_range=(0, 1))\n price_arr_scaled = sc.fit_transform(price_arr)\n\n X_train = np.empty((n_day, n_step, 1))\n y_train = np.empty((n_day, n_step, n_step_ahead))\n for i in range(0, n_day):\n X_train[i, :] = price_arr_scaled[i:i + n_step]\n for step_ahead in range(1, n_step_ahead + 1):\n y_train[i, :, step_ahead-1] = (\n price_arr_scaled[\n i+step_ahead:i+step_ahead+n_step, 0\n ]\n )\n\n return X_train, y_train, sc\n\n\ndef train_dataset(\n prices: pd.Series, n_12weeks: int,\n to_day: str) -> tuple:\n\n n_day = n_12weeks * batch_size\n from_day = start_day(to_day, n_12weeks)\n price_arr = (prices[from_day:to_day]\n .values.astype(np.float64))\n\n # scale\n sc = MinMaxScaler(feature_range=(0, 1))\n price_arr_scaled = sc.fit_transform(price_arr)\n\n dataset = tf.data.Dataset.from_tensor_slices(price_arr_scaled)\n window_length = n_step + n_step_ahead\n dataset = dataset.window(window_length, shift=1,\n drop_remainder=True)\n dataset = dataset.flat_map(lambda window: window.batch(window_length))\n dataset = dataset.batch(batch_size)\n\n dataset = dataset.map(lambda windows:\n (windows[:, :-n_step_ahead],\n windows[:, -n_step_ahead:]))\n dataset = dataset.prefetch(1)\n\n return dataset, sc\n\n\ndef lstm_vector() -> keras.models.Model:\n inputs = keras.layers.Input(shape=(None, 1))\n lstm_1 = keras.layers.LSTM(20, return_sequences=True)(inputs)\n lstm_2 = keras.layers.LSTM(20, return_sequences=True)(lstm_1)\n outputs = keras.layers.TimeDistributed(\n Dense(n_step_ahead))(lstm_2)\n\n return keras.models.Model(inputs=inputs, outputs=outputs)\n\n\ndef lstm_vector_conv() -> keras.models.Model:\n inputs = keras.layers.Input(shape=(None, 1))\n conv_1 = keras.layers.Conv1D(filters=20, kernel_size=4, strides=2,\n padding='valid')(inputs)\n lstm_1 = keras.layers.LSTM(\n 20, return_sequences=True,\n dropout=0.2,\n recurrent_dropout=0.2\n )(conv_1)\n lstm_2 = keras.layers.LSTM(\n 20, return_sequences=True,\n dropout=0.2,\n recurrent_dropout=0.2\n )(lstm_1)\n outputs = keras.layers.TimeDistributed(\n Dense(n_step_ahead)\n )(lstm_2)\n\n return keras.models.Model(inputs=inputs, outputs=outputs)\n\n\nif __name__ == '__main__':\n to_day = '2021-04-15'\n friday = last_friday(to_day)\n n_12weeks = 6\n from_day = start_day(friday, n_12weeks)\n prices = stock_prices(['SPY'])\n prices = prices.rolling(window=7).mean()\n prices.loc[:, 'SPY'] = (\n np.sin(np.arange(len(prices)) / (2*np.pi))\n + np.sin(np.arange(len(prices)) / (5*np.pi))\n + np.sin(np.arange(len(prices)) / (20*np.pi))\n )\n\n X_train, y_train, sc = vector_train_set(prices, n_12weeks, friday)\n y_train = y_train[:, 3::2, :]\n model = lstm_vector_conv()\n print(model.summary())\n\n optimizer = Adam()\n model.compile(optimizer=optimizer, loss='mae')\n model.fit(X_train, y_train, epochs=200, batch_size=batch_size)\n\n fname = resource_filename('resources', 'rnn_lstm.h5')\n model.save(fname, save_format='h5')\n\n X_test = prices.values[-n_step-n_step_ahead:-n_step_ahead]\n y_test = prices.values[-n_step_ahead:]\n X_test_scaled = sc.transform(X_test)\n X_test_scaled = X_test_scaled.reshape(1, -1, 1)\n y_test_scaled = sc.transform(y_test)\n y_hat = model.predict(X_test_scaled)\n\n y_test = y_test[3::2]\n\n price_test = np.hstack((X_test_scaled[0, :, 0], y_test_scaled[:, 0]))\n price_preds = [np.hstack((X_test_scaled[0, :, 0], y_hat[0, i, :]))\n for i in range(y_hat.shape[1])]\n\n plt.plot(price_test, label='test')\n plt.plot(price_preds[-1], label='pred')\n plt.legend()\n plt.show()\n\n","repo_name":"afeborgeaud/equityboard","sub_path":"equityboard/rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":5822,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24069933453","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 1 13:38:42 2015\n\n@author: lc585\n\nPlot spectrum. Can deal with both linear and log-linear wavelength scales.\n\nPlots emission lines, and if linear plots the 2D spectrum on top.\n\nIf want rest-frame spectrum then rest=True\n\nThis is un-tested and is almost definitely broken\n\nTo do:\nAdd way to mask bits out\nAdd way to give dictionary of line names and wavelengths to plot\nFix 2D spectrum\nRemove spectrum specific values\n\n\"\"\"\n\nfrom SpectraTools.get_wavelength import get_wavelength\nfrom astropy.io import fits\nimport matplotlib.pyplot as plt\n# import seaborn as sns\nfrom rebin import rebin\nimport os \nfrom SpectraTools.range_from_zscale import range_from_zscale\n\ndef plot_spectrum(wav, \n flux,\n er = None,\n file2d = None,\n z = 0.,\n lheight = 1.0,\n rest = False,\n fout = None,\n maskout = False,\n rebin = 1,\n weightedrebin = False,\n sigmaclip = False):\n\n fig = plt.figure()\n sns.set_style(\"ticks\")\n\n box1d = [0.1, 0.1, 0.8, 0.7]\n ax1d = fig.add_axes(box1d, xlabel = r'Wavelength $(\\AA)$')\n\n if rest:\n wav = wav / (1 + z)\n\n wav, flux, er = rebin(wav,\n flux,\n er=er,\n n=rebin,\n weighted=weightedrebin)\n\n\n i1 = np.argmin( np.abs( wavelength - 15087.0 ))\n i2 = np.argmin( np.abs( wavelength - 17975.0 ))\n i3 = np.argmin( np.abs( wavelength - 19672.0 ))\n\n if sigmaclip:\n #######################################################################\n \"\"\"\n Use a median filter to smooth out single-pixel deviations.\n Then use sigma-clipping to remove large variations between the\n actual and smoothed image.\n\n What about y_sigma?\n \"\"\"\n\n spectrum_sm = medfilt(spectrum, 5)\n sigma = np.median(er)\n bad = np.abs(spectrum - spectrum_sm) / sigma > 8.0\n spectrum_cr = spectrum.copy()\n spectrum_cr[bad] = spectrum_sm[bad] # replace bad pixels with median values\n\n else:\n spectrum_cr = spectrum\n\n\n # Need to fix this to de-emphasise masked out regions\n\n# ax1d.plot(wavelength[i1:i2], spectrum_cr[i1:i2], color='black', lw=1)\n# ax1d.plot(wavelength[i3:], spectrum_cr[i3:], color='black', lw=1)\n# ax1d.plot(wavelength[:i1], spectrum_cr[:i1], lw=1, color= sns.xkcd_rgb[\"light grey\"])\n# ax1d.plot(wavelength[i2:i3], spectrum_cr[i2:i3], lw=1, color= sns.xkcd_rgb[\"light grey\"])\n\n\n linenames = [r'Ly$\\alpha$',\n r'CIV',\n r'CIII',\n r'H$\\beta$',\n r'OIII',\n r'H$\\alpha$',\n '']\n\n lines = [1216.0,\n 1549.0,\n 1909.0,\n 4861.0,\n 5008.24,\n 6562.8,\n 4960.295]\n\n for i in range(len(lines)):\n # positions of spectral lines in the plot\n linewav = (1 + z) * lines[i]\n\n ax1d.axvline(linewav, color = 'grey', ls = '--')\n\n # annotate names:\n if nameSDSS is not None:\n s = fluxSDSS\n else:\n s = np.concatenate( (spectrum_cr[i1:i2],spectrum_cr[i3:]) )\n\n height = lheight * max(s) + np.median(s) * (1. + (-1) ** i / 4.)\n ax1d.annotate(linenames[i], xy = (linewav + 50, height), color = 'r')\n\n if nameSDSS is not None:\n ax1d.set_xlim(min(wavelengthSDSS)-50,max(wavelength)+50)\n else:\n ax1d.set_xlim(min(wavelength)-50,max(wavelength)+50)\n\n\n if file2d is not None:\n\n # 2D spectrum\n\n hdulist = fits.open(file2d)\n img = hdulist[0].data\n hdulist.close()\n\n img = img[325:475,:]\n z1, z2, iteration = range_from_zscale(img)\n\n if hdr['CRVAL1'] < 10.0:\n\n newimg = np.zeros((np.shape(img)[0],len(wavelength)))\n wav2d = np.linspace(wavelength.min(),wavelength.max(),1024)\n\n for j in range(np.shape(img)[0]):\n f = interp1d( wav2d, img[j,:])\n newimg[j,:] = f(wavelength)\n\n height = 0.001 * img.shape[0]\n box2d = [0.1, 0.9 - height / 2, 0.8, height]\n ax2d = fig.add_axes(box2d, yticks = [])\n ax2d.imshow(img, aspect = 'auto', vmin = z1, vmax = z2, cmap = cm.Greys_r)\n setp( ax2d.get_xticklabels(), visible=False)\n\n\n ax1d.set_ylim( np.min( np.concatenate( (spectrum_cr[i1:i2],spectrum_cr[i3:]) )) - 1.0 * np.std( np.concatenate( (spectrum_cr[i1:i2],spectrum_cr[i3:]) ) ),\n np.max( np.concatenate( (spectrum_cr[i1:i2],spectrum_cr[i3:]) )) + 1.5 * np.std( np.concatenate( (spectrum_cr[i1:i2],spectrum_cr[i3:]) ) ))\n\n if maskout == True:\n ax1d.add_patch(Rectangle((wavelength.min(), ax1d.get_ylim()[0]), 15087.0 - wavelength.min() , ax1d.get_ylim()[1] - ax1d.get_ylim()[0], facecolor='grey',edgecolor='None'))\n ax1d.add_patch(Rectangle((17975.0, ax1d.get_ylim()[0]), 19672.0 - 17975.0, ax1d.get_ylim()[1] - ax1d.get_ylim()[0], facecolor='grey',edgecolor='None'))\n\n\n\n if fout is not None:\n plt.savefig(fout)\n else:\n plt.show()\n\n plt.close()\n\n return None\n\ndef plot_spectrum_2d(name):\n\n fig, ax = plt.subplots()\n\n # fname = os.path.join('/data/lc585/WHT_20150331/spec_150311/',name+'_LR','imcomb.fit')\n fname = name\n\n if os.path.exists(fname):\n hdulist = fits.open(fname)\n data = hdulist[0].data\n hdulist.close()\n\n z1, z2, iteration = range_from_zscale(data)\n\n ax.imshow(data,cmap='gray',vmin=z1,vmax=z2)\n\n fig.gca().invert_yaxis()\n fig.tight_layout()\n\n else:\n\n ax.text(0.4,0.5,'No Spectrum')\n\n # fig.savefig( fname.replace('imcombHR.fit', '2D_HR.png') )\n # plt.close()\n\n return None\n\nif __name__ == '__main__':\n plot_spectrum_2d('/data/lc585/NTT_Hennawi_Survey/Redux/Sept2011/H/Science/0059-4110_0/sci-SOFI.2011-09-23T06:23:27.151-SOFI.2011-09-23T06:44:47.765.fits')\n plt.savefig('/data/lc585/NTT_Coatman/Redux/example1.png')\n","repo_name":"liamcoatman/SpectraTools","sub_path":"plot_spectrum.py","file_name":"plot_spectrum.py","file_ext":"py","file_size_in_byte":6112,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"3492060083","text":"from __future__ import annotations\n\nimport polars as pl\nimport pandas as pd\nfrom data import WallboxesColumns\n\nfrom statsmodels.tsa.arima.model import ARIMA\n\n\ndef forecast_n_days(\n accumulated: pl.DataFrame, model: dict, DAYS_IN_FUTURE: int, dates: list\n):\n model_name = list(model.keys())[0]\n best_fit = list(model.values())[0]\n curr_model = ARIMA(\n accumulated.select(model_name).to_numpy(),\n order=best_fit,\n ).fit()\n\n forecast = curr_model.predict(\n start=len(accumulated.select(model_name).to_numpy()),\n end=len(accumulated.select(model_name).to_numpy()) + DAYS_IN_FUTURE,\n )\n\n print(f\"Forecast for {model_name}:\")\n print(forecast)\n\n\ndef forecast_total_power(df: pl.DataFrame) -> None:\n DAYS_IN_FUTURE = 7\n\n accumulated = df.with_columns(\n pl.fold(\n acc=pl.lit(0),\n function=lambda acc, col: acc + col,\n exprs=[\n WallboxesColumns.KEBA_ONE,\n WallboxesColumns.KEBA_THREE,\n WallboxesColumns.KEBA_TWO,\n WallboxesColumns.LADEBOX_ONE,\n WallboxesColumns.LADEBOX_TWO,\n WallboxesColumns.LADEBOX_THREE,\n WallboxesColumns.DELTA,\n WallboxesColumns.RAPTION,\n ],\n ).alias(\"Total Power\"),\n ).select(\n [\n WallboxesColumns.TIMESTAMP,\n \"Total Power\",\n WallboxesColumns.KEBA_ONE,\n WallboxesColumns.KEBA_THREE,\n WallboxesColumns.KEBA_TWO,\n WallboxesColumns.LADEBOX_ONE,\n WallboxesColumns.LADEBOX_TWO,\n WallboxesColumns.LADEBOX_THREE,\n WallboxesColumns.DELTA,\n WallboxesColumns.RAPTION,\n ]\n )\n\n best_arima_models = [\n {\"Total Power\": (3, 1, 3)},\n {WallboxesColumns.KEBA_ONE: (1, 1, 0)},\n {WallboxesColumns.KEBA_THREE: (1, 1, 2)},\n {WallboxesColumns.KEBA_TWO: (4, 1, 4)},\n {WallboxesColumns.LADEBOX_ONE: (0, 1, 0)},\n {WallboxesColumns.LADEBOX_TWO: (0, 1, 1)},\n {WallboxesColumns.LADEBOX_THREE: (0, 1, 1)},\n {WallboxesColumns.DELTA: (0, 1, 0)},\n {WallboxesColumns.RAPTION: (2, 0, 4)},\n ]\n\n last_date = accumulated.select(\"Timestamp\").to_numpy()\n last_date = last_date[-1][0]\n dates = pd.date_range(last_date, periods=DAYS_IN_FUTURE + 1)\n\n for model in best_arima_models:\n forecast_n_days(accumulated, model, DAYS_IN_FUTURE, dates)\n","repo_name":"BertoldVinczeIMC/ML-Capstone-E-Mobility","sub_path":"discarded_approaches/ARIMA.py","file_name":"ARIMA.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"12913766396","text":"import os\nimport numpy as np\nimport zipfile\n#import PIL.Image\nimport json\nimport torch\nimport utils.dnnlib as dnnlib\nimport random\nimport pandas as pd\nimport glob\nimport soundfile as sf\n\n#try:\n# import pyspng\n#except ImportError:\n# pyspng = None\n\n#----------------------------------------------------------------------------\n# Dataset subclass that loads images recursively from the specified directory\n# or ZIP file.\nclass AudioFolderDataset(torch.utils.data.IterableDataset):\n def __init__(self,\n dset_args,\n fs=44100,\n seg_len=131072,\n overfit=False,\n seed=42 ):\n self.overfit=overfit\n\n super().__init__()\n random.seed(seed)\n np.random.seed(seed)\n path=dset_args.path\n\n filelist=glob.glob(os.path.join(path,\"*.wav\"))\n assert len(filelist)>0 , \"error in dataloading: empty or nonexistent folder\"\n\n self.train_samples=filelist\n \n self.seg_len=int(seg_len)\n self.fs=fs\n if self.overfit:\n file=self.train_samples[0]\n data, samplerate = sf.read(file)\n if len(data.shape)>1 :\n data=np.mean(data,axis=1)\n self.overfit_sample=data[10*samplerate:60*samplerate] #use only 50s\n\n def __iter__(self):\n if self.overfit:\n data_clean=self.overfit_sample\n while True:\n if not self.overfit:\n num=random.randint(0,len(self.train_samples)-1)\n #for file in self.train_samples: \n file=self.train_samples[num]\n data, samplerate = sf.read(file)\n assert(samplerate==self.fs, \"wrong sampling rate\")\n data_clean=data\n #Stereo to mono\n if len(data.shape)>1 :\n data_clean=np.mean(data_clean,axis=1)\n \n #normalize\n #no normalization!!\n #data_clean=data_clean/np.max(np.abs(data_clean))\n \n #framify data clean files\n num_frames=np.floor(len(data_clean)/self.seg_len) \n \n #if num_frames>4:\n for i in range(8):\n #get 8 random batches to be a bit faster\n if not self.overfit:\n idx=np.random.randint(0,len(data_clean)-self.seg_len)\n else:\n idx=0\n segment=data_clean[idx:idx+self.seg_len]\n segment=segment.astype('float32')\n #b=np.mean(np.abs(segment))\n #segment= (10/(b*np.sqrt(2)))*segment #default rms of 0.1. Is this scaling correct??\n \n #let's make this shit a bit robust to input scale\n #scale=np.random.uniform(1.75,2.25)\n #this way I estimage sigma_data (after pre_emph) to be around 1\n \n #segment=10.0**(scale) *segment\n yield segment\n #else:\n # pass\n\n\n","repo_name":"eloimoliner/audio-inpainting-diffusion","sub_path":"datasets/audiofolder.py","file_name":"audiofolder.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"66"} +{"seq_id":"6301461251","text":"\"\"\"\nCore module for class Car and Car Controller\n\"\"\"\nimport numpy as np\nfrom numpy.linalg import norm\nfrom traffic.vecops import is_opposite, is_same_direction\nclass Car(object):\n \"\"\"\n Car object. Does self updating of state, position, velocity.\n \"\"\"\n def __init__(self, position=np.array([0, 0], 'float'), \\\n velocity=np.array([0, 0], 'float'), \\\n acceleration=np.array([0, 0], 'float')):\n self.position = position\n self.velocity = velocity\n self.acceleration = acceleration\n\n def update(self, delta_t=10):\n \"\"\"\n Updates state and pos, vel vectors\n \"\"\"\n state = self.state\n if state == 'STOP':\n self.acceleration = np.array([0, 0], 'float')\n self.velocity = np.array([0, 0], 'float')\n return #no point in running more calculations for this cycle.\n \n delta_t = delta_t/1000.0 #from Milliseconds to Seconds\n delta_v = self.acceleration * delta_t\n self.velocity += delta_v\n delta_p = self.velocity * delta_t\n self.position += delta_p\n ret = np.append(self.position, self.velocity)\n ret = np.append(ret, self.acceleration)\n return ret\n\n\n def __unicode__(self):\n return \"Car: %s, %s, %s\" % (self.position, \\\n self.velocity, \\\n self.acceleration)\n \n def __str__(self):\n return self.__unicode__()\n \n @property\n def state(self):\n \"\"\"\n Gets the state of the Car object. \n Possible values are: \n 'STOP' (car stopped), \n 'SLOWDOW' (car is velocity decreasing),\n 'CRUISE' (car at constant velocity) and \n 'SPEEDUP' (car velocity increasing)\n \"\"\"\n m_vel = norm(self.velocity)\n m_acc = norm(self.acceleration)\n if m_vel <= 0 and self.acceleration <= 0: #we never go backwards\n return 'STOP'\n \n if self.is_decelerating():\n return 'SLOWDOWN'\n \n if m_acc == 0:\n return 'CRUISE'\n \n if self.is_accelerating():\n return 'SPEEDUP'\n\n raise Exception(\"Bad state in Car! IS_ACCCEL: %s, \\\n IS_DECEL:%s, \\\n STATE:%s\" % (self.is_accelerating(), \\\n self.is_decelerating(), \\\n self))\n\n def turn(self, direction):\n \"\"\"\n Turn the car in one of the cardinal directions.\n `direction` values can be one of:\n 'NORTH',\n 'SOUTH',\n 'EAST' or\n 'WEST'\n \"\"\"\n vel_mag = norm(self.velocity)\n acc_mag = norm(self.acceleration)\n direction_map = {\n 'NORTH': np.array([0, 1], 'float'),\n 'SOUTH': np.array([0, -1], 'float'),\n 'EAST' : np.array([1, 0], 'float'),\n 'WEST' : np.array([-1, 0], 'float')\n }\n direction_vector = None\n if direction not in direction_map:\n direction_vector = direction # assume it's a \n # 2d vector that was passed in\n else:\n direction_vector = direction_map[direction]\n\n self.velocity = direction_vector * vel_mag\n self.acceleration = direction_vector * acc_mag\n\n return (self.velocity, self.acceleration)\n\n\n def is_decelerating(self):\n \"\"\"\n Returns true when acceleration vector is pointing in antiparalell \n direction of velocity vector\n \"\"\"\n return is_opposite(self.velocity, self.acceleration)\n\n def is_accelerating(self):\n \"\"\"\n Returns true when acceleration vector is pointing in paralell \n direction of velocity vector, and acceleration is positive.\n \"\"\"\n m_acc = norm(self.acceleration)\n return m_acc > 0 and is_same_direction(self.velocity, self.acceleration)\n\n","repo_name":"adewinter/traffic","sub_path":"traffic/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"33603080870","text":"import logging\n\nfrom disnake import Emoji, Guild\nfrom disnake.ext import commands, tasks\n\nimport logsnake\nfrom disco_snake import LOG_FORMAT, LOGDIR_PATH, DATADIR_PATH\nfrom disco_snake.bot import DiscoSnake\n\nCOG_UID = \"emojus\"\n\n\nlogger = logsnake.setup_logger(\n name=COG_UID,\n level=logging.DEBUG,\n isRootLogger=False,\n formatter=logsnake.LogFormatter(fmt=LOG_FORMAT, datefmt=\"%Y-%m-%d %H:%M:%S\"),\n logfile=LOGDIR_PATH.joinpath(f\"{COG_UID}.log\"),\n fileLoglevel=logging.DEBUG,\n maxBytes=1 * (2**20),\n backupCount=1,\n propagate=True,\n)\n\n\nclass Emojus(commands.Cog, name=COG_UID):\n def __init__(self, bot: DiscoSnake):\n self.bot: DiscoSnake = bot\n self.save_dir = DATADIR_PATH.joinpath(COG_UID)\n self.save_dir.mkdir(parents=True, exist_ok=True)\n\n async def cog_load(self) -> None:\n logger.info(\"idk seems pretty sus\")\n self.acquire_emoji_task.start()\n return await super().cog_load()\n\n def cog_unload(self) -> None:\n logger.info(\"now somewhat less sus\")\n self.acquire_emoji_task.cancel()\n return super().cog_unload()\n\n async def on_ready(self) -> None:\n \"\"\"\n The code in this even is executed when the bot is ready\n \"\"\"\n logger.info(\"emojus is ready to be sus\")\n if self.acquire_emoji_task.is_running() is not True:\n logger.info(\"starting emoji acquisition task\")\n self.acquire_emoji_task.start()\n logger.info(f\"task will next run at: {self.acquire_emoji_task.next_iteration}\")\n return await super().on_ready()\n\n @tasks.loop(seconds=900.0, count=1)\n async def acquire_emoji_task(self) -> None:\n guild: Guild\n async for guild in self.bot.fetch_guilds():\n try:\n logger.info(f\"acquiring emoji from {guild.name}\")\n guild_dir = self.save_dir.joinpath(f\"{guild.id}\")\n guild_dir.mkdir(parents=True, exist_ok=True)\n\n emoji: Emoji\n acquired = 0\n for emoji in await guild.fetch_emojis():\n emoji_extn = \"gif\" if emoji.animated else \"png\"\n emoji_path = guild_dir.joinpath(f\"{emoji.name}.{emoji.id}.{emoji_extn}\")\n if not emoji_path.exists():\n logger.debug(f\"emoji {emoji.name} has not been saved, acquiring\")\n await emoji.save(emoji_path)\n acquired += 1\n except Exception:\n logger.exception(f\"failed to acquire emoji from {guild.name}\")\n logger.debug(\"done acquiring emoji, sleeping for 1 hour\")\n\n @acquire_emoji_task.before_loop\n async def before_status_task(self) -> None:\n logger.info(\"waiting for ready... just to be sure\")\n await self.bot.wait_until_ready()\n logger.info(\"ready!\")\n\n\ndef setup(bot):\n bot.add_cog(Emojus(bot))\n","repo_name":"neggles/disco-snake","sub_path":"src/cogs/emojus.py","file_name":"emojus.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"66"} +{"seq_id":"5887607446","text":"def armstrong (n):\r\n if not (n//100>0 and n//100<10):\r\n return (print(\"Broj {:.0f} nije troznamenkast!\".format(n)))\r\n #if n//100<1 or n//100>9 #2.način\r\n #if len(str(n))==3 #3.način\r\n j = n%10\r\n d = n//10%10\r\n s = n//10//10\r\n if n==s**3+d**3+j**3:\r\n print(\"Broj {:.0f} je Armstrongov\".format(n))\r\n else:\r\n print(\"Broj {:.0f} nije Armstrongov\".format(n))\r\n\r\narmstrong (153)\r\narmstrong (370)\r\narmstrong (371)\r\narmstrong (407)\r\narmstrong (100)\r\narmstrong (555)\r\narmstrong (907)\r\narmstrong (670)\r\narmstrong (3704)\r\narmstrong (58)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"jberbic/2A","sub_path":"08_Armstrong_h.py","file_name":"08_Armstrong_h.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"10947580761","text":"import numpy as np\nimport pandas as pd\n\nfrom sklearn.utils.random import sample_without_replacement\nfrom sklearn.model_selection import KFold\nfrom tqdm import tqdm\nfrom learners import *\n\nnum_split = 10\n\nt_all = pd.read_csv('./datasets/TWINS/twin_pairs_T_3years_samesex.csv')\nx_all = pd.read_csv('./datasets/TWINS/twin_pairs_X_3years_samesex.csv')\ny_all = pd.read_csv('./datasets/TWINS/twin_pairs_Y_3years_samesex.csv')\n\nt_all = t_all.drop(t_all.columns[0], axis = 1)\n\nx_all = x_all.drop(x_all.columns[:2],axis = 1)\nx_all = x_all.drop(['infant_id_0', 'infant_id_1', 'bord_0', 'bord_1'], axis = 1)\n\ny_all = y_all.drop(y_all.columns[0], axis = 1)\n\nt_filter = t_all[(t_all<2000).all(axis=1)]\nx_filter = x_all[(t_all<2000).all(axis=1)]\nx_filter_na = x_filter.fillna(x_filter.mean())\ny_filter = y_all[(t_all<2000).all(axis=1)]\n\nnum = len(t_filter)\n\ntreats_all = np.random.binomial(1, 0.5, num)\nX_all = x_filter_na.values\noutcomes_all = np.array([y_filter.values[i, treats_all[i]] for i in range(num)])\n\ncates_all = np.array([y_filter.values[i, 1] - y_filter.values[i, 0] for i in range(num)])\n\nkf = KFold(n_splits = num_split)\nmse_s_cv = np.zeros(num_split)\nmse_t_cv = np.zeros(num_split)\nmse_r_cv = np.zeros(num_split)\nmse_dr_cv = np.zeros(num_split)\n\ns_r2 = np.zeros(num_split)\nt_r2 = np.zeros(num_split)\nr_r2 = np.zeros(num_split)\ndr_r2 = np.zeros(num_split)\n\nj = 0\n\nfor train_idxs, test_idxs in tqdm(kf.split(X_all)):\n\n xs = X_all[train_idxs]\n treats = treats_all[train_idxs]\n outcomes = outcomes_all[train_idxs]\n xs_test = X_all[test_idxs]\n\n cates_test = cates_all[test_idxs]\n\n mse_s_cv[j], s_r2[j] = s_learner(xs, treats, outcomes, xs_test, cates_test)\n mse_t_cv[j], t_r2[j]= t_learner(xs, treats, outcomes, xs_test, cates_test)\n mse_r_cv[j], r_r2[j] = r_learner(xs, treats, outcomes, xs_test, cates_test)\n mse_dr_cv[j], dr_r2[j] = dr_learner(xs, treats, outcomes, xs_test, cates_test)\n j+=1\n\n\nprint(np.mean(mse_s_cv), np.std(mse_s_cv)*1.96/np.sqrt(num_split))\nprint(np.mean(mse_t_cv), np.std(mse_t_cv)*1.96/np.sqrt(num_split))\nprint(np.mean(mse_r_cv), np.std(mse_r_cv)*1.96/np.sqrt(num_split))\nprint(np.mean(mse_dr_cv), np.std(mse_dr_cv)*1.96/np.sqrt(num_split))\n\n\nprint(np.mean(s_r2), np.std(s_r2)*1.96/np.sqrt(num_split))\nprint(np.mean(t_r2), np.std(t_r2)*1.96/np.sqrt(num_split))\nprint(np.mean(r_r2), np.std(r_r2)*1.96/np.sqrt(num_split))\nprint(np.mean(dr_r2), np.std(dr_r2)*1.96/np.sqrt(num_split))\n\n# train_idxs = sample_without_replacement(num, num//2)\n# n1 = len(train_idxs)\n# test_idxs = np.setdiff1d(np.arange(num), train_idxs, True)\n# n2 = len(test_idxs)\n\n# xs, treats, outcomes = X_all[train_idxs], treats_all[train_idxs], outcomes_all[train_idxs]\n\n# xs_test = X_all[test_idxs]\n\n# cates_test = cates_all[test_idxs]","repo_name":"samstan/ORIE6746Project","sub_path":"twins.py","file_name":"twins.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5259099556","text":"class Solution:\n def longestConsecutive(self, nums):\n sett = set(nums)\n count = 0\n for i in nums:\n if i-1 in sett:\n continue\n else:\n localCount = 1\n currentNum = i\n while (currentNum+1 in sett):\n localCount += 1\n currentNum += 1\n if localCount > count:\n count = localCount\n return count\n\n\nsol = Solution()\nprint(sol.longestConsecutive([0,3,7,2,5,8,4,6,0,1]))\n\n","repo_name":"holmista/algorithms","sub_path":"leetcode questions/Longest Consecutive Sequence/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"34652062436","text":"class FeistelFunction :\n\n def __init__(self) :\n\n pass\n\n def __rowConfusion(self,block, right_key) :\n\n for i in range(0,len(block)) :\n block[i] = (block[i] + right_key[i])%256\n \n stride = right_key[i]%8\n mask = 0xFF >> (8-stride)\n temp = (block[i]&mask) << (8-stride)\n block[i] = (block[i] >> stride) + temp\n\n def __columnConfusion(self,block, left_key) :\n\n for col in range(0,1):\n\n # Get column value\n col_value = 0\n mask = 0x1<<(7-col)\n for row in range(0,len(block)) :\n XORed_value = mask&block[row]\n temp_value = XORed_value>>(7-col)\n col_value = col_value + (temp_value<<(7-row))\n\n # Addition\n\n col_value = (col_value + left_key[col])%256\n\n\n # wrapping shift\n stride = left_key[col]%8\n mask = 0xFF >> (8-stride)\n temp = (col_value&mask) << (8-stride)\n col_value = (col_value >> stride) + temp\n\n\n # Redistribute\n\n # Lower 1s\n mask = 0xFF>>(col+1)\n\n # Upper 1s\n mask += (0xFF << (len(block)-col))&0xFF\n\n for row in range(0,len(block)) :\n block[row] = block[row]&mask\n bit_value = ((col_value<>col\n block[row] = block[row] + (bit_value)\n\n def __cascadingXOR(self,block) :\n for row in range(1,len(block)) :\n block[row] = block[row-1]^block[row]\n\n def __rowShiftUp(self,block) :\n temp = block[0]\n for i in range(1,len(block)):\n block[i-1] = block[i]\n block[len(block)-1] = temp\n\n def feistelFunc(self, input_block, left_key, right_key):\n\n block = bytearray(input_block)\n\n for _ in range(len(block)):\n\n self.__cascadingXOR(block)\n self.__rowConfusion(block, right_key)\n self.__columnConfusion(block, left_key)\n self.__rowShiftUp(block)\n\n return block\n\n\n","repo_name":"nandorusrin/IF4020-HAD-Cipher","sub_path":"feistelFunction.py","file_name":"feistelFunction.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9440395350","text":"# encoding =utf-8\nimport random\n\n\ndef bayes(data_list, label_name, search, lamda):\n label_value_list = search_type(data_list, label_name)\n p = {}\n\n for label_value in label_value_list:\n pxy = 1\n for key in range(len(search)):\n label_num = search_number(data_list, label_name, label_value) + lamda * len(search_type(data_list, key))\n\n pxy *= (search_number(data_list, key, search[key], label_value=label_value) + lamda) / label_num\n print(pxy, label_num)\n\n pk = (search_number(data_list, label_name, label_value) + lamda) / (\n len(data_list) + len(search_type(data_list, label_name)) * lamda)\n\n p[label_value] = pxy * pk\n\n print(p)\n\n\ndef search_type(data_list, type):\n columns = [\"颜色\", \"容量\", \"品牌\", \"价格\", \"能不能买\"]\n type_list = []\n for item in data_list:\n if len(type_list) == 0:\n type_list.append(item[type])\n else:\n flag = False\n for i in type_list:\n if item[type] == i:\n flag = True\n\n if not flag:\n type_list.append(item[type])\n\n # print(columns[type], len(type_list))\n return type_list\n\n\ndef search_number(data_list, type, value, label_value=None):\n columns = [\"颜色\", \"容量\", \"品牌\", \"价格\", \"能不能买\"]\n count = 0\n for item in data_list:\n if label_value is not None:\n print(item[-1], label_value, item[type], value)\n if (item[-1] == label_value) and (item[type] == value):\n # print(item[-1], label_value, item[type], value)\n count += 1\n else:\n if item[type] == value:\n count += 1\n if type != -1:\n print(len(data_list), columns[type], value, label_value, count)\n return count\n\n\ndef train():\n yanse = [\"红色\", \"蓝色\", \"白色\", \"紫色\"]\n rongliang = [\"1\", \"2\", \"3\", \"4\"]\n pinpai = [\"Starbucks\", \"RoyalCopenhagen\", \"UCC\", \"Maxwell\", \"Nescafe\"]\n jiage = [100, 50, 230, 21, 420]\n\n list = []\n\n for i in range(1000):\n data = {\"颜色\": yanse[random.randint(0, len(yanse) - 1)], \"容量\": rongliang[random.randint(0, len(rongliang) - 1)],\n \"品牌\": pinpai[random.randint(0, len(pinpai) - 1)], \"价格\": jiage[random.randint(0, len(jiage) - 1)],\n \"能不能买\": random.randint(0, 1)}\n list.append(data)\n\n for item in list:\n print(item)\n\n df = pd.DataFrame(list, columns=[\"颜色\", \"容量\", \"品牌\", \"价格\", \"能不能买\"])\n\n df.to_csv(\"./train_data.csv\", columns=[\"颜色\", \"容量\", \"品牌\", \"价格\", \"能不能买\"], index=True)\n","repo_name":"zhangqian666/model_project","sub_path":"statistics/native_bayes/bayes.py","file_name":"bayes.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"13054679287","text":"# Faça um Programa que leia 2 números e em seguida pergunte ao usuário qual operação ele deseja realizar.\n# O resultado da operação deve ser acompanhado de uma frase que diga se o número é:\n# par ou ímpar;\n# positivo ou negativo;\n# inteiro ou decimal.\n\nimport math\n\nnum1 = float(input('Informe um número: '))\nnum2 = float(input('Informe outro número: '))\noper = input('Informe uma operação matemática (+ - * /): ')\n\nif oper == '+':\n res = num1 + num2\nelif oper == '-':\n res = num1 - num2\nelif oper == '*':\n res = num1 * num2\nelif oper == '/':\n res = num1 / num2\n\nif res % 2 == 0:\n par_impar = 'é par'\nelse:\n par_impar = 'é impar'\n\nif res >= 0:\n pos_neg = 'é positivo'\nelse:\n pos_neg = 'é negativo'\n\nif res == math.floor(res):\n int_dec = 'é inteiro'\n\nelse:\n int_dec = 'é decimal'\n\nprint(f'''\n{res}\n{par_impar}\n{pos_neg}\n{int_dec}\n ''')\n","repo_name":"WillLume/ListaDeExercicios","sub_path":"EstruturaDecisao/exercicio24.py","file_name":"exercicio24.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"72328902930","text":"import numpy as np\nfrom collections import namedtuple\n\n\nclass TFTree(object):\n def __init__(self):\n self.nodes = {}\n\n def to_dict(self):\n return [v.to_dict() for k, v in self.nodes.items()]\n\n @classmethod\n def from_dict(cls, inlist):\n out = cls()\n for v in inlist:\n n = TFNode.from_dict(v)\n if not n.parent: # root encountered\n continue\n out.add_transform(n.parent.name, n.name, n.transform)\n return out\n\n def add_transform(self, parent, child, xform):\n if parent not in self.nodes:\n self.nodes[parent] = TFNode(parent, None, None)\n\n if child not in self.nodes:\n self.nodes[child] = TFNode(child, self.nodes[parent], xform)\n else:\n node = self.nodes[child]\n node.parent = self.nodes[parent]\n node.transform = xform\n\n def get_parent(self):\n parent_nodes = []\n\n for name, node in self.nodes.items():\n if not node.parent:\n parent_nodes.append(node)\n\n if len(parent_nodes) > 1:\n raise Exception(\n \"More than one tree found, this case is unsupported\")\n if len(parent_nodes) == 0:\n raise Exception(\n \"No parent node found, there are probably cycles in the tree\")\n\n return parent_nodes[0]\n\n def lookup_transform(self, frame, target):\n if target not in self.nodes:\n raise Exception(\"target is not part of the tf tree\")\n if frame not in self.nodes:\n raise Exception(\"frame is not part of the tf tree\")\n if target == frame:\n return np.eye(4)\n\n parent_node = self.get_parent()\n\n frame_path_to_parent = self._get_path_to_parent(parent_node, target)\n target_path_to_parent = self._get_path_to_parent(parent_node, frame)\n\n while True and len(frame_path_to_parent) > 0 and len(target_path_to_parent) > 0:\n if frame_path_to_parent[-1] == target_path_to_parent[-1]:\n frame_path_to_parent.pop()\n target_path_to_parent.pop()\n else:\n break\n\n # Note: I do not understand why the part below works\n\n def get_inverse_xform_for_path(path):\n transform_to_parent = np.identity(4)\n for node in path:\n transform_to_parent = np.dot(\n node.transformation_matrix, transform_to_parent)\n return transform_to_parent\n\n frame_transform_to_common_parent = get_inverse_xform_for_path(\n frame_path_to_parent)\n target_transform_to_common_parent = get_inverse_xform_for_path(\n target_path_to_parent)\n\n final_xform = np.dot(np.linalg.inv(\n target_transform_to_common_parent), frame_transform_to_common_parent)\n\n return np.linalg.inv(final_xform)\n\n def _get_path_to_parent(self, parent_node, node_name):\n if node_name == parent_node.name:\n return []\n\n node = self.nodes[node_name]\n traversed_nodes = {}\n path = []\n while True:\n if node in traversed_nodes:\n raise Exception(\"Cycle detected, this case is unsupported\")\n traversed_nodes[node] = True\n path.append(node)\n node = node.parent\n if node == parent_node:\n break\n\n return path\n\n def transform_point(self, x, y, z, target, base):\n t = self.lookup_transform(base, target)\n return np.dot(t, np.array([x, y, z, 1]))[0:3]\n\n\nclass TFNode(object):\n def __init__(self, name, parent, transform):\n self.parent = parent\n self.name = name\n self.transform = transform\n\n def to_dict(self):\n return {'parent': self.parent.name if self.parent else self.parent,\n 'name': self.name,\n 'transform': self.transform if hasattr(self.transform, 'shape') else None}\n\n @classmethod\n def from_dict(cls, indict):\n parent, xform = indict['parent'], indict['transform']\n parent = TFNode(parent, None, None) if parent else None\n # xform = Transform.from_dict(xform) if xform else None\n return cls(indict['name'], parent, xform)\n\n @property\n def transformation_matrix(self):\n if self.transform is None:\n return np.identity(4)\n return self.transform\n","repo_name":"eroniki/build_daq","sub_path":"tf/tf.py","file_name":"tf.py","file_ext":"py","file_size_in_byte":4400,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"72807265170","text":"from config import Configuration, patient\n\n\ndef medical_main():\n while True:\n print(f\"\"\" \n Welcome to the Medical History Menu\n {Configuration.medium_dashes}\n * Enter [1] to check for Medical History\n * Enter [2] to go back\n {Configuration.medium_dashes}\n \"\"\")\n\n choice = int(input(\"> \"))\n if choice not in Configuration.medical_history_number_of_choices:\n print(f\"{Configuration.small_dashes}\")\n print(\"Please enter a valid choice!\")\n print(f\"{Configuration.small_dashes}\")\n\n else:\n if choice == 1:\n patient_medical_records: dict = patient.get_patient_records()\n for date, diagnosis in patient_medical_records.items():\n print(date)\n print(diagnosis)\n print(f\"{Configuration.small_dashes}\")\n\n\n\n\n elif choice == 2:\n print(\"Going back...\")\n print(f\"{Configuration.medium_dashes}\")\n break\n","repo_name":"clickykeyboard/Medicode","sub_path":"medical_history/medical_history.py","file_name":"medical_history.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"7232645032","text":"import torch\nfrom transformers import AutoTokenizer, AutoModelForSeq2SeqLM\n\nimport constants\nimport processing\n\nKEYWORDMODEL = \"./keyword-generator-t5-small-30-3-4\"\n\n\n# KEYWORDMODEL = \"t5-small\"\n\n\ndef get_keywords_from_model(text, tokenizer, model):\n tokenized = tokenizer(text, padding=\"max_length\", return_tensors='pt')\n logits = model.generate(tokenized['input_ids'])[0]\n return tokenizer.decode(logits, skip_special_tokens=True)\n\n\nif __name__ == '__main__':\n print(\"Hello, World\")\n\n print(\"load tokenizers\")\n keyword_tokenizer = AutoTokenizer.from_pretrained(KEYWORDMODEL)\n keyword_tokenizer.max_length = constants.KEYWORDTOKENIZER_SOURCE_LENGTH\n\n print(\"load models\")\n keyword_model = AutoModelForSeq2SeqLM.from_pretrained(KEYWORDMODEL)\n\n dataset = torch.load(\"scripts/keyword-generator-4.pickle\")\n size = int(len(dataset) * 0.5)\n split_at = int(size * 0.8)\n print(\"split at \", split_at, \"/\", size)\n train_data = dataset[:split_at]\n eval_data = dataset[split_at:size]\n\n results = []\n\n for sid, pid, sentences, next_keywords in train_data[:1000]:\n text = processing.process_sentences_for_keyword_generation(sentences)\n keywords = get_keywords_from_model(text, keyword_tokenizer, keyword_model).split()\n results.append((sid, pid, sentences, next_keywords, keywords))\n print(\"%s - %s\" % (keywords, next_keywords))\n if pid == 4:\n print()\n\n torch.save(results, \"scripts/keywords-result-train-4.pickle\")\n\n results = []\n\n for sid, pid, sentences, next_keywords in eval_data[:1000]:\n text = processing.process_sentences_for_keyword_generation(sentences)\n keywords = get_keywords_from_model(text, keyword_tokenizer, keyword_model).split()\n results.append((sid, pid, sentences, next_keywords, keywords))\n print(\"%s - %s\" % (keywords, next_keywords))\n if pid == 4:\n print()\n\n torch.save(results, \"scripts/keywords-result-eval-4.pickle\")\n","repo_name":"fstiewitz/fwk","sub_path":"test-keyword-generator.py","file_name":"test-keyword-generator.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"27992913781","text":"import random\nfrom cachingalgo.full_observation.single_cache import LRU\n\n\n# LRUm algorithm consists of v virtual caches, f-v caches of total size m.\nclass LRUm(LRU):\n def __init__(self, size, f, v, L):\n \"\"\"\n size: vector of length f which specifies the length of each list\n If length = 1 and f > 1, all the lists have same length\n f: Total no. of lists used in the algorithm\n v: No. of lists used as virtual Caches.\n L: Library size\n \"\"\"\n\n self.f = f\n # No. of lists used as Cache\n self.cnum = f-v\n self.L = L\n\n # Uses LRU if LRUm uses one list\n if f == 1:\n super().__init__(cache_size=size, L=L)\n else:\n if isinstance(size, list) and len(size) == f:\n self.size = size\n else:\n self.size = [size]*f\n\n # self.part is to make sure that at the start, each of lists are unique\n self.part = self.L//self.f\n self.collection = []\n\n #Randomly initializes the f lists\n for itr, length in enumerate(self.size):\n start = int(itr*self.part)\n end = int((itr+1)*self.part)\n self.collection.append(random.sample(range(start, end), int(length)))\n\n def index_finder(self, req, full_search = False):\n \"\"\"\n Finds the index of request in the f lists\n req: Request\n full_search: True - searches all list or False - searches only cache excluding virtual caches\n Returns request's position in list and it's corresponding list index. returns -1 if not found\n \"\"\"\n if full_search:\n part = self.cnum\n else:\n part = self.f\n index = -1\n listnum = -1\n\n # Searches from the last list\n for i, list_ in enumerate(self.collection[-part:]):\n if req in list_:\n index = list_.index(req)\n listnum = i\n return index, listnum\n\n def update(self, req):\n \"\"\"\n Updates the f lists\n req: Request\n \"\"\"\n\n # If the algorithm has more lists\n if self.f != 1:\n # Gives the index, list number of the req\n index, listnum = self.index_finder(req)\n\n if index != -1:\n if listnum < self.f-1:\n # moves the req to the index 0 of listnum + 1 and listnum+1 last element\n # is moved to the index 0 of listnum\n self.collection[listnum].remove(req)\n temp = self.collection[listnum+1].pop()\n self.collection[listnum] = [temp] + self.collection[listnum]\n self.collection[listnum+1] = [req] + self.collection[listnum+1]\n\n else:\n # moves the req to the index 0\n temp = self.collection[listnum].pop(index)\n self.collection[listnum] = [temp] + self.collection[listnum]\n\n # adds the ele at index 0 of list 1 and discards the last element of that list.\n else:\n self.collection[0].pop()\n self.collection[0] = [req] + self.collection[0]\n\n else:\n super().update(req)\n\n def __contains__(self, req):\n \"\"\"\n Magic method to use \"in\" keyword\n req: request\n \"\"\"\n if self.f != 1:\n index, _ = self.index_finder(req, True)\n if index != -1:\n return True\n else:\n return False\n\n else:\n if req in self.cache:\n return True\n else:\n return False\n\n def currcache(self):\n \"\"\"\n Returns the current cache\n \"\"\"\n\n if self.f != 1:\n return self.collection[-self.cnum:]\n else:\n return self.cache\n\n# fLRU algorithm consists of f-1 virtual caches and 1 cache\nclass fLRU:\n def __init__(self, f, size, L):\n \"\"\"\n f: total no. of caches\n size: vector of length f which specifies the length of each list\n If length = 1 and f > 1, all the lists have same length\n L: Library Size\n \"\"\"\n self.f = f\n self.L = L\n if isinstance(size, list) and len(size) == f:\n self.size = size\n else:\n self.size = [size]*f\n\n self.collection = []\n\n #Randomly intialiases the f lists\n for length in self.size:\n self.collection.append(random.sample(range(1, L), length))\n\n def update(self, req):\n \"\"\"\n Updates the f lists\n req: Request\n \"\"\"\n found = False\n for i in range(self.f-1, -1, -1):\n currlist = self.collection[i]\n prevlist = self.collection[i-1] if i > 0 else None\n\n # if the req is present in the ith list, move it to the first position\n if req in currlist:\n currlist.remove(req)\n self.collection[i] = [req] + currlist\n found = True\n\n # if the req is present in the i-1th list but not in ith list, move it\n # to the first position of the ith list and discard the last item of the ith list.\n elif prevlist is not None and req in prevlist:\n _ = currlist.pop()\n self.collection[i] = [req] + currlist\n found = True\n\n # if the req is not in the collection of lists then insert it in the first list.\n if not found:\n _ = self.collection[0].pop()\n self.collection[0] = [req] + self.collection[0]\n\n def __contains__(self, req):\n \"\"\"\n Magic method to use \"in\" keyword\n req: request\n \"\"\"\n if req in self.collection[-1]:\n return True\n else:\n return False\n\n def currcache(self):\n \"\"\"\n Returns the current cache\n \"\"\"\n return self.collection[-1]\n","repo_name":"SuryaKrishna02/caching-algorithms","sub_path":"cachingalgo/full_observation/multiple_cache.py","file_name":"multiple_cache.py","file_ext":"py","file_size_in_byte":5986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"2158898767","text":"# coding: utf-8\nfrom flask import Flask, request, render_template, url_for, session, make_response, jsonify\nimport psycopg2\nimport matplotlib.pyplot as plt\nimport japanize_matplotlib\nimport datetime\nfrom io import BytesIO\nimport urllib\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\nfrom matplotlib.figure import Figure\nimport math\n\n\ndef cal_urgency(deadline, fday):\n fsec = 24 * 60\n td = deadline - datetime.datetime.now()\n urgency = int((math.exp(-1 * td.total_seconds() / (fsec * 60)) * 1000))\n return urgency\n\n\ndef draw_figure(task_list, day_int):\n x_list = []\n fig = plt.figure(figsize=(6, 4), dpi=100)\n for i in range(len(task_list)):\n plt.scatter(task_list[i][4], task_list[i][3], label = task_list[i][1])\n x_list.append(task_list[i][4])\n\n xmax, xmin = max(x_list) + 100, min(min(x_list) - 100, 0)\n \n plt.hlines([50],xmin, xmax, linestyle = 'solid')\n plt.vlines([(xmin + xmax) / 2], 0, 100,linestyle = 'solid')\n plt.xlabel(\"緊急度\")\n plt.ylabel(\"重要度\")\n plt.ylim(0, 100)\n plt.xlim(xmax, xmin)\n plt.title(str(day_int) + \"日間タスク一覧\")\n plt.legend(bbox_to_anchor=(1.05, 0.5, 0.5, .1), loc='upper left', borderaxespad=0)\n plt.subplots_adjust(left=0.1, right=0.52, bottom=0.24, top=0.82)\n fig.patch.set_alpha(0)\n\n # canvasにプロットした画像を出力\n canvas = FigureCanvasAgg(fig)\n png_output = BytesIO()\n canvas.print_png(png_output)\n data = png_output.getvalue()\n # HTML側に渡すレスポンスを生成する\n response = make_response(data)\n response.headers['Content-Type'] = 'image/png'\n response.headers['Content-Length'] = len(data)\n return response\n\n\n\n\n ","repo_name":"Moriomori/To_do_web","sub_path":"Taskdoapp_Web/to_do_web.py","file_name":"to_do_web.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"2165241933","text":"# Read the temperature from two distinct MOD-TC-MK2-31855\n# The modules must have different I2C addres.\n#\nfrom machine import I2C\nfrom modtc_mk2 import MODTC_MK2\nfrom time import sleep\n\n# The address of the two modules\nMK2_ADDR_1 = 0x23\nMK2_ADDR_2 = 0x25\n\n# PYBOARD-UNO-R3 & UEXT for Pyboard. SCL=Y9, SDA=Y10\ni2c = I2C(2)\nmk_dic = { 'temp 1' : MODTC_MK2( i2c, address=MK2_ADDR_1 ),\n\t\t 'temp 2' : MODTC_MK2( i2c, address=MK2_ADDR_2 ) }\n\nwhile True:\n\tprint( \"-\"*40 )\n\tfor name, mk2 in mk_dic.items():\n\t\ttemp = mk2.temperatures[1]\n\t\tprint( \"%s : %5.2f C\" % (name, temp) )\n\tsleep(1)\n","repo_name":"mchobby/esp8266-upy","sub_path":"modtc-mk2/examples/test_dual.py","file_name":"test_dual.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"66"} +{"seq_id":"19404246666","text":"from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton\nfrom callbacks import *\n\ndef words_inline(words):\n\tbuttons = []\n\tfor i, (word, _) in enumerate(words.items()):\n\t\tbuttons.append(InlineKeyboardButton(\n text=f'{i+1}',\n callback_data=show_translate_callback.new(word_id=word.word_id)))\n\t\n\twords_kb = InlineKeyboardMarkup(inline_keyboard=[[buttons[0],buttons[1]], [buttons[2], buttons[3]], [buttons[4]]])\n\t\n\treturn words_kb","repo_name":"markneonin/EngLearningBot","sub_path":"keyboards.py","file_name":"keyboards.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"30497371033","text":"# -*- coding: utf-8 -*-\n\nfrom email.policy import default\nfrom odoo import api, fields, models\n\n\nclass SuiviEpiEpi(models.Model):\n _name = 'suivi.epi.epi'\n _description = \"Permet de Lister les EPI\"\n\n \n name = fields.Char(\n string='Nom EPI',\n required=True,\n )\n\n duree = fields.Integer(\n string='Durée de vie(mois)',\n required=True,\n default=0,\n )\n \n\n \n\n \n \n ","repo_name":"orkojr/erp-bric","sub_path":"suivi_epi/models/epi.py","file_name":"epi.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"41706627863","text":"# -*- coding: utf-8 -*-\nimport cv2\nfrom imutils import paths\nfrom keras.preprocessing.image import img_to_array\nimport os\nimport numpy as np\nimport matplotlib\n\ndata = []\nlabels = []\nIMAGE_DIMS = (96, 96, 3)\nprint(\"[INFO] loading images...\")\nimagePaths = sorted(list(paths.list_images(\"./testdata\")))\nfor imagePath in imagePaths:\n\t# load the image, pre-process it, and store it in the data list\n\timage = cv2.imread(imagePath)\n\timage = cv2.resize(image, (IMAGE_DIMS[1], IMAGE_DIMS[0]))\n\timage = img_to_array(image)\n\tdata.append(image)\n \n\t# extract the class label from the image path and update the\n\t# labels list\n\tlabel = imagePath.split(os.path.sep)[-2]\n\tlabels.append(label)\n\n \ndata = np.array(data, dtype=\"float\") / 255.0\nprint(data)\nlabels = np.array(labels)\nprint(\"[INFO] data matrix: {:.2f}MB\".format(\n\tdata.nbytes / (1024 * 1000.0)))","repo_name":"abhilash01393/ASL","sub_path":"imageRead.py","file_name":"imageRead.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"10858557973","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File: Ampel-core/ampel/config/alter/ResolveRunTimeAliases.py\n# License: BSD-3-Clause\n# Author: valery brinnel \n# Date: 05.04.2023\n# Last Modified Date: 05.04.2023\n# Last Modified By: valery brinnel \n\nfrom typing import Any\nfrom ampel.log.AmpelLogger import AmpelLogger\nfrom ampel.core.AmpelContext import AmpelContext\nfrom ampel.abstract.AbsConfigUpdater import AbsConfigUpdater\nfrom ampel.util.recursion import walk_and_process_dict\n\n\nclass ResolveRunTimeAliases(AbsConfigUpdater):\n\n\tdef alter(self, context: AmpelContext, content: dict[str, Any], logger: AmpelLogger) -> dict[str, Any]:\n\n\t\t# Run-time aliases generated at T4 for T0/T1 processes\n\t\tif context.run_time_aliases:\n\t\t\twalk_and_process_dict(\n\t\t\t\targ = content,\n\t\t\t\tcallback = self._gather_run_time_aliases_callback,\n\t\t\t\trun_time_aliases = context.run_time_aliases,\n\t\t\t\tlogger = logger\n\t\t\t)\n\n\t\treturn content\n\n\tdef _gather_run_time_aliases_callback(self, path, current_key, current_d, **kwargs) -> None:\n\t\t\"\"\" Used by walk_and_process_dict(...) from morph(...) \"\"\"\n\t\t# print(f\"# path: {path}\\n# d: {d}\\n\")\n\t\tfor k, v in current_d.items():\n\t\t\tif isinstance(v, str) and v[0] == '%' == v[1]:\n\t\t\t\tfor rt_key, rt_val in kwargs['run_time_aliases'].items():\n\t\t\t\t\tif v == rt_key:\n\t\t\t\t\t\tif kwargs['logger'].verbose:\n\t\t\t\t\t\t\tkwargs['logger'].info(\n\t\t\t\t\t\t\t\tf\"Setting value for run time alias {rt_key} with path {path}.{k}\"\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\tcurrent_d[k] = kwargs['run_time_aliases'][rt_key]\n","repo_name":"AmpelProject/Ampel-core","sub_path":"ampel/config/alter/ResolveRunTimeAliases.py","file_name":"ResolveRunTimeAliases.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"66"} +{"seq_id":"15171047613","text":"class Solution:\n def minRemoveToMakeValid(self, s: str) -> str:\n pos=[]\n stack=[]\n s=list(s)\n for i in range(len(s)):\n if s[i]==\"(\":\n stack.append(s[i])\n pos.append(i)\n if s[i]==\")\":\n stack.append(s[i])\n pos.append(i)\n if len(stack)>=2:\n if stack[-1]==\")\" and stack[-2]==\"(\":\n stack.pop()\n stack.pop()\n pos.pop()\n pos.pop()\n for i in reversed(range(len(pos))):#倒序删除\n #print(pos[i])\n del s[pos[i]]\n return \"\".join(s)\n","repo_name":"ziqinXU/Leetcode","sub_path":"Medium_Python/1249_Minimum_remove_to_make_valid_parentheses.py","file_name":"1249_Minimum_remove_to_make_valid_parentheses.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"70788143252","text":"from codecs import open\nfrom os import path\nfrom setuptools import setup\nfrom pip.download import PipSession\nfrom pip.req import parse_requirements\n\nhere = path.abspath(path.dirname(__file__))\n\nwith open(path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nlista_objetos_dependencias = parse_requirements('requirements.txt', session=PipSession())\nlista_dependencias = [str(objeto.req) for objeto in lista_objetos_dependencias]\n\nsetup(\n name='toggl-client',\n packages=[\"client\"],\n version='0.1.1',\n description='Cliente para a toggl API',\n long_description=long_description,\n url='https://github.com/lramosduarte/toggl-client',\n author='Leonardo Ramos Duarte',\n author_email='lramosduarte@gmail.com',\n license='MIT',\n\n entry_points={\n 'console_scripts': [\n 'tgc = client.main:main',\n ]\n },\n\n data_files=['requirements.txt'],\n\n classifiers=[\n 'Development Status :: 4 - Beta',\n\n 'Intended Audience :: Developers',\n\n 'License :: OSI Approved :: MIT License',\n\n 'Programming Language :: Python :: 3',\n ],\n\n python_requires='>=3',\n\n keywords='toggl toggl-api toggl-client',\n\n install_requires=lista_dependencias,\n\n extras_require={\n 'dev': ['check-manifest'],\n 'test': ['coverage'],\n },\n\n)\n","repo_name":"lramosduarte/toggl-client","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"23886822389","text":"'''\n05 - How's our data integrity?\n\nNew data has been merged into the banking DataFrame that contains details on \nhow investments in the inv_amount column are allocated across four different \nfunds A, B, C and D.\n\nFurthermore, the age and birthdays of customers are now stored in the age and \nbirth_date columns respectively.\n\nYou want to understand how customers of different age groups invest. However, \nyou want to first make sure the data you're analyzing is correct. You will do \nso by cross field checking values of inv_amount and age against the amount \ninvested in different funds and customers' birthdays. Both pandas and datetime \nhave been imported as pd and dt respectively.\n-----------------------------------------------------------------------------------------\nInstructions 1/2\n-----------------------------------------------------------------------------------------\n- Find the rows where the sum of all rows of the `fund_columns` in banking are equal \n to the `inv_amount` column.\n- Store the values of banking with consistent inv_amount in consistent_inv, and those\n with inconsistent ones in inconsistent_inv.\n'''\n# importing the libraries\nimport datetime as dt\nimport pandas as pd\n\n# Store fund columns to sum against\nfund_columns = ['fund_A', 'fund_B', 'fund_C', 'fund_D']\n\n# Find rows where fund_columns row sum == inv_amount\ninv_equ = banking[fund_columns].sum(axis=1) == banking['inv_amount']\n\n# Store consistent and inconsistent data\nconsistent_inv = banking[inv_equ]\ninconsistent_inv = banking[~inv_equ]\n\n# Store consistent and inconsistent data\nprint(\"Number of inconsistent investments: \", inconsistent_inv.shape[0])\n\n# OUTPUT:- Number of inconsistent investments: 8\n'''\n-----------------------------------------------------------------------------------------\nInstructions 2/2\n-----------------------------------------------------------------------------------------\n- Store today's date into today, and manually calculate customers' ages and store them \n in ages_manual.\n- Find all rows of banking where the age column is equal to ages_manual and then filter \n banking into consistent_ages and inconsistent_ages.\n'''\n# Store today's date and find ages\ntoday = dt.date.today()\nages_manual = today.year - banking['birth_date'].dt.year\n\n# Find rows where age column == ages_manual\nage_equ = banking['age'] == ages_manual\n\n# Store consistent and inconsistent data\nconsistent_ages = banking[age_equ]\ninconsistent_ages = banking[~age_equ]\n\n# Store consistent and inconsistent data\nprint(\"Number of inconsistent ages: \", inconsistent_ages.shape[0])\n\n# OUTPUT: Number of inconsistent ages: 4\n","repo_name":"mohd-faizy/CAREER-TRACK-Data-Scientist-with-Python","sub_path":"16_Cleaning Data in Python [Part - 1]/03_Advanced data problems/05_How's our data integrity.py","file_name":"05_How's our data integrity.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"} +{"seq_id":"14444986953","text":"#두 정수 A와 B를 입력받은 다음, A+B를 출력하는 프로그램을 작성하시오.\n#입력은 여러 개의 테스트 케이스로 이루어져 있다.\n#각 테스트 케이스는 한 줄로 이루어져 있으며, 각 줄에 A와 B가 주어진다. (0 < A, B < 10)\n#각 테스트 케이스마다 A+B를 출력한다. \n\n# 두 정수 입력-출력,입력-출력이란 말 같음\n# sys로 입력 받는 방법\nimport sys\nfor input in sys.stdin:\n a, b = map(int, input.split())\n print(a+b)","repo_name":"yuntaekOhO/python_space","sub_path":"python_bj/3.반복문/10951_A더하기B-4.py","file_name":"10951_A더하기B-4.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"12600343660","text":"from tqdm import tqdm\n\n\ndef write_trec_run(results, outfn, tag=\"neural_ir\"):\n with open(outfn, \"wt\") as outf:\n qids = sorted(results.keys())\n for qid in tqdm(qids, desc=f\"Writing run file to {outfn}\"):\n rank = 1\n for docid, score in sorted(results[qid], key=lambda x: x[1], reverse=True):\n print(f\"{qid} Q0 {docid} {rank} {score} {tag}\", file=outf)\n rank += 1\n","repo_name":"abhinav-neil/neural-ir","sub_path":"neural_ir/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"41730714050","text":"import time\n\nfrom settings import pause_key\nfrom win32_utils.getkeys import key_check\n\n\ndef CountDown(tik):\n for i in range(tik)[::-1]:\n print(i+1)\n time.sleep(1)\n\n\ndef pause(sec=0.2):\n time.sleep(0.05)\n while True:\n if pause_key in key_check():\n break\n time.sleep(sec)\n\n time.sleep(0.05)\n","repo_name":"CharmsGraker/pySekiro","sub_path":"utils/count_down.py","file_name":"count_down.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"21444794660","text":"\"\"\"\nGiven an integer array nums, move all 0's to the end of it while maintaining\nthe relative order of the non-zero elements.\nNote that you must do this in-place without making a copy of the array.\n\nExample 1:\n Input: nums = [0,1,0,3,12]\n Output: [1,3,12,0,0]\n\nExample 2:\n Input: nums = [0]\n Output: [0]\n\nConstraints:\n- 1 <= nums.length <= 104\n- -231 <= nums[i] <= 231 - 1\n\nFollow up: Could you minimize the total number of operations done?\n\"\"\"\nfrom typing import List\n\n\ndef solution(nums: List[int]) -> None:\n insert_index = 0\n for i in range(len(nums)):\n if nums[i] != 0:\n nums[insert_index] = nums[i]\n insert_index += 1\n for i in range(insert_index, len(nums)):\n nums[i] = 0\n","repo_name":"jtprogru/interview-task","sub_path":"tasks/task0014.py","file_name":"task0014.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"66"} +{"seq_id":"9573770786","text":"from os.path import realpath\nfrom mininet.util import errFail, errRun\nfrom mininet.log import debug, info\nimport sys\n\n# Utility functions for unmounting a tree\n# Real path of OSHI's dir\nMNRUNDIR = realpath( '/var/run/mn' )\n\n\n# Take the mounted points of the root machine\ndef mountPoints():\n \"Return list of mounted file systems\"\n mtab, _err, _ret = errFail( 'cat /proc/mounts' )\n lines = mtab.split( '\\n' )\n mounts = []\n for line in lines:\n if not line:\n continue\n fields = line.split( ' ')\n mount = fields[ 1 ]\n mounts.append( mount )\n return mounts\n\n \n# Utility Function for unmount all the dirs\ndef unmountAll( rootdir=MNRUNDIR ):\n \"Unmount all mounts under a directory tree\"\n rootdir = realpath( rootdir )\n # Find all mounts below rootdir\n # This is subtle because /foo is not\n # a parent of /foot\n dirslash = rootdir + '/'\n mounts = [ m for m in mountPoints()\n if m == dir or m.find( dirslash ) == 0 ]\n # Unmount them from bottom to top\n mounts.sort( reverse=True )\n for mount in mounts:\n debug( 'Unmounting', mount, '\\n' )\n _out, err, code = errRun( 'umount', mount )\n if code != 0:\n info( '*** Warning: failed to umount', mount, '\\n' )\n info( err )\n","repo_name":"netgroup/Dreamer-Management-Scripts","sub_path":"oshi/vs/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"29744033775","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nnames = ['Cecilia', 'Lise', 'Marie']\nletters = [len(n) for n in names]\n\nmax_letters = 0\nfor name, count in zip(names, letters):\n if count > max_letters:\n longest_name = name\n max_letters = count\n\n# In Python 2.0 zip has some memory issue,\n# using itertools.izip or itertools.izip_longest is better.\n\n# In Python 3.0 zip_longest is under itertools.\n","repo_name":"zenanswer/python-practice","sub_path":"src/py/iterator_generator/ziptest.py","file_name":"ziptest.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"988027994","text":"import math\nimport pickle\n\nfrom preprocessing import preprocessing\n\n\ndef build_dataset(text, dataset_name):\n with open(\"%s.txt\" % dataset_name, 'w') as f:\n f.write(text)\n\n\ndef build_docs_set(docset, docset_name):\n result = [[None, None]] * len(docset)\n for i in range(len(docset)):\n result[i] = docset[i]\n with open(\"%s.bin\" % docset_name, 'wb') as f:\n pickle.dump(result, f)\n\n\ndef training(model_name, dataset, dim=100, ws=4):\n import fasttext\n model = fasttext.train_unsupervised(dataset, model='skipgram', dim=dim, epoch=250, loss='ns', ws=ws)\n model.save_model(model_name)\n return model\n\n\ndef vector_len(v):\n return math.sqrt(sum([x * x for x in v]))\n\n\ndef dot_product(v1, v2):\n return sum([x * y for (x, y) in zip(v1, v2)])\n\n\ndef cosine_similarity(v1, v2):\n return dot_product(v1, v2) / (vector_len(v1) * vector_len(v2))\n\n\ndef similarity(query, modelname, docset_name):\n from gensim.models import FastText\n model = FastText.load_fasttext_format(modelname)\n q, _ = preprocessing(query)\n with open(\"%s.bin\" % docset_name, 'rb') as f:\n docset = pickle.load(f)\n result = []\n for [_id, preprocessed] in docset:\n result.append((_id, model.wv.n_similarity(q, preprocessed)))\n result = sorted(result, key=lambda x: x[1], reverse=True)\n return result\n\n\ndef most_similar(model, x, top=10):\n v1 = model.get_word_vector(x)\n all_word = []\n for word in model.words:\n if word != x:\n v2 = model.get_word_vector(word)\n all_word.append((cosine_similarity(v1, v2), word))\n all_word = sorted(all_word, key=lambda item: item[0], reverse=True)\n return all_word[:top]\n\n\ndef load_model(model):\n import fasttext\n return fasttext.load_model(model)\n","repo_name":"reyzeal/comparing-tfidf-fasttext","sub_path":"methods/_fasttext/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"17882887655","text":"# coding=utf-8\n\"\"\"\nTools for running BDD frameworks in python.\nYou probably need to extend BddRunner (see its doc).\n\nYou may also need \"get_what_to_run_by_env\" that gets folder (current or passed as first argument)\n\"\"\"\nimport os\nimport time\nimport abc\nimport tcmessages\nfrom _jb_utils import VersionAgnosticUtils\n\n__author__ = 'Ilya.Kazakevich'\n\n\ndef fix_win_drive(feature_path):\n \"\"\"\n Workaround to fix issues like http://bugs.python.org/issue7195 on windows.\n Pass feature dir or file path as argument.\n This function does nothing on non-windows platforms, so it could be run safely.\n\n :param feature_path: path to feature (c:/fe.feature or /my/features)\n \"\"\"\n current_disk = (os.path.splitdrive(os.getcwd()))[0]\n feature_disk = (os.path.splitdrive(feature_path))[0]\n if current_disk and feature_disk and current_disk != feature_disk:\n os.chdir(feature_disk)\n\n\ndef get_what_to_run_by_env(environment):\n \"\"\"\n :type environment dict\n :param environment: os.environment (files and folders should be separated with | and passed to PY_STUFF_TO_RUN).\n Scenarios optionally could be passed as SCENARIOS (names or order numbers, depends on runner)\n :return: tuple (base_dir, scenarios[], what_to_run(list of feature files or folders))) where dir is current or first argument from env, checking it exists\n :rtype tuple of (str, iterable)\n \"\"\"\n if \"PY_STUFF_TO_RUN\" not in environment:\n what_to_run = [\".\"]\n else:\n what_to_run = str(environment[\"PY_STUFF_TO_RUN\"]).split(\"|\")\n\n scenarios = []\n if \"SCENARIOS\" in environment:\n scenarios = str(environment[\"SCENARIOS\"]).split(\"|\")\n\n if not what_to_run:\n what_to_run = [\".\"]\n\n for path in what_to_run:\n assert os.path.exists(path), \"{0} does not exist\".format(path)\n\n base_dir = what_to_run[0]\n if os.path.isfile(what_to_run[0]):\n base_dir = os.path.dirname(what_to_run[0]) # User may point to the file directly\n return base_dir, scenarios, what_to_run\n\n\ndef get_location(base_dir, location_file, location_line):\n \"\"\"\n Generates location that PyCharm resolves to file\n :param base_dir: base directory to resolve relative path against\n :param location_file: path to file\n :param location_line: line number\n \"\"\"\n my_file = str(location_file).lstrip(\"/\\\\\")\n return \"file:///{0}:{1}\".format(os.path.normpath(os.path.join(base_dir, my_file)), location_line)\n\n\nclass BddRunner(object):\n \"\"\"\n Extends this class, implement abstract methods and use its API to implement new BDD frameworks.\n Call \"run()\" to launch it.\n This class does the following:\n * Gets features to run (using \"_get_features_to_run()\") and calculates steps in it\n * Reports steps to Intellij or TC\n * Calls \"_run_tests()\" where *you* should install all hooks you need into your BDD and use \"self._\" functions\n to report tests and features. It actually wraps tcmessages but adds some stuff like duration count etc\n :param base_dir:\n \"\"\"\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, base_dir):\n \"\"\"\n :type base_dir str\n :param base_dir base directory of your project\n \"\"\"\n super(BddRunner, self).__init__()\n self.tc_messages = tcmessages.TeamcityServiceMessages()\n \"\"\"\n tcmessages TeamCity/Intellij test API. See TeamcityServiceMessages\n \"\"\"\n self.__base_dir = base_dir\n self.__last_test_start_time = None # TODO: Doc when use\n self.__last_test_name = None\n\n def run(self):\n \"\"\"\"\n Runs runner. To be called right after constructor.\n \"\"\"\n number_of_tests = self._get_number_of_tests()\n self.tc_messages.testCount(number_of_tests)\n self.tc_messages.testMatrixEntered()\n if number_of_tests == 0: # Nothing to run, so no need to report even feature/scenario start. (See PY-13623)\n return\n self._run_tests()\n\n def __gen_location(self, location):\n \"\"\"\n Generates location in format, supported by tcmessages\n :param location object with \"file\" (relative to base_dir) and \"line\" fields.\n :return: location in format file:line (as supported in tcmessages)\n \"\"\"\n return get_location(self.__base_dir, location.file, location.line)\n\n def _test_undefined(self, test_name, location):\n \"\"\"\n Mark test as undefined\n :param test_name: name of test\n :type test_name str\n :param location its location\n\n \"\"\"\n if test_name != self.__last_test_name:\n self._test_started(test_name, location)\n self._test_failed(test_name, message=\"Test undefined\", details=\"Please define test\")\n\n def _test_skipped(self, test_name, reason, location):\n \"\"\"\n Mark test as skipped\n :param test_name: name of test\n :param reason: why test was skipped\n :type reason str\n :type test_name str\n :param location its location\n\n \"\"\"\n if test_name != self.__last_test_name:\n self._test_started(test_name, location)\n self.tc_messages.testIgnored(test_name, \"Skipped: {0}\".format(reason))\n self.__last_test_name = None\n pass\n\n\n def _test_failed(self, name, message, details, duration=None):\n \"\"\"\n Report test failure\n :param name: test name\n :type name str\n :param message: failure message\n :type message basestring\n :param details: failure details (probably stacktrace)\n :type details str\n :param duration how long test took\n :type duration int\n \"\"\"\n self.tc_messages.testFailed(name,\n message=VersionAgnosticUtils().to_unicode(message),\n details=details,\n duration=duration)\n self.__last_test_name = None\n\n def _test_passed(self, name, duration=None):\n \"\"\"\n Reports test passed\n :param name: test name\n :type name str\n :param duration: time (in seconds) test took. Pass None if you do not know (we'll try to calculate it)\n :type duration int\n :return:\n \"\"\"\n duration_to_report = duration\n if self.__last_test_start_time and not duration: # And not provided\n duration_to_report = int(time.time() - self.__last_test_start_time)\n self.tc_messages.testFinished(name, duration=int(duration_to_report))\n self.__last_test_start_time = None\n self.__last_test_name = None\n\n def _test_started(self, name, location):\n \"\"\"\n Reports test launched\n :param name: test name\n :param location object with \"file\" (relative to base_dir) and \"line\" fields.\n :type name str\n \"\"\"\n self.__last_test_start_time = time.time()\n self.__last_test_name = name\n self.tc_messages.testStarted(name, self.__gen_location(location))\n\n def _feature_or_scenario(self, is_started, name, location):\n \"\"\"\n Reports feature or scenario launched or stopped\n :param is_started: started or finished?\n :type is_started bool\n :param name: scenario or feature name\n :param location object with \"file\" (relative to base_dir) and \"line\" fields.\n \"\"\"\n if is_started:\n self.tc_messages.testSuiteStarted(name, self.__gen_location(location))\n else:\n self.tc_messages.testSuiteFinished(name)\n\n def _background(self, is_started, location):\n \"\"\"\n Reports background or stopped\n :param is_started: started or finished?\n :type is_started bool\n :param location object with \"file\" (relative to base_dir) and \"line\" fields.\n \"\"\"\n self._feature_or_scenario(is_started, \"Background\", location)\n\n def _get_number_of_tests(self):\n \"\"\"\"\n Gets number of tests using \"_get_features_to_run()\" to obtain number of features to calculate.\n Supports backgrounds as well.\n :return number of steps\n :rtype int\n \"\"\"\n num_of_steps = 0\n for feature in self._get_features_to_run():\n if feature.background:\n num_of_steps += len(list(feature.background.steps)) * len(list(feature.scenarios))\n for scenario in feature.scenarios:\n num_of_steps += len(list(scenario.steps))\n return num_of_steps\n\n @abc.abstractmethod\n def _get_features_to_run(self):\n \"\"\"\n Implement it! Return list of features to run. Each \"feature\" should have \"scenarios\".\n Each \"scenario\" should have \"steps\". Each \"feature\" may have \"background\" and each \"background\" should have\n \"steps\". Duck typing.\n :rtype list\n :returns list of features\n \"\"\"\n return []\n\n @abc.abstractmethod\n def _run_tests(self):\n \"\"\"\n Implement it! It should launch tests using your BDD. Use \"self._\" functions to report results.\n \"\"\"\n pass\n","repo_name":"JetBrains/intellij-community","sub_path":"python/helpers/pycharm/_bdd_utils.py","file_name":"_bdd_utils.py","file_ext":"py","file_size_in_byte":9038,"program_lang":"python","lang":"en","doc_type":"code","stars":16005,"dataset":"github-code","pt":"66"} +{"seq_id":"43237054946","text":"class Portfolio(object):\r\n def __init__(self, min_trade_period=0, max_trade_period=1, denom=0.01, cost=3):\r\n self.min_trade_period = min_trade_period\r\n self.max_trade_period = max_trade_period\r\n self.trading_cost = cost\r\n self.reward_normalizer = 1. / denom\r\n self.open_trade = False\r\n\r\n def reset(self, prices, stock):\r\n # Store list of Open price and Close price to manage reward calculation\r\n self._open = prices['Open'].values\r\n self._close = prices['Close'].values\r\n self._index = prices['Date']\r\n self._stock = stock\r\n\r\n self.total_reward = 0\r\n self.total_trades = 0\r\n self.average_profit_per_trade = 0\r\n self.count_open_trades = 0\r\n self.journal = []\r\n self.current_time = 1\r\n self._reset_trade()\r\n self.open_trade = False\r\n\r\n def _reset_trade(self):\r\n self.curr_trade = {'Entry Price': 0, 'Exit Price': 0, 'Entry Time': None, 'Exit Time': None, 'Profit': 0,\r\n 'Trade Duration': 0, 'Type': None, 'reward': 0, 'Stock': self._stock}\r\n\r\n def close_trade(self, curr_close_price, curr_time):\r\n reward = 0\r\n if self.curr_trade['Type'] == 'SELL':\r\n self.count_open_trades -= 1\r\n\r\n # Update remaining keys in curr_trade dict\r\n self.curr_trade['Exit Price'] = curr_close_price\r\n self.curr_trade['Exit Time'] = curr_time\r\n reward = -1 * (curr_close_price - self.curr_trade['Entry Price']) * self.reward_normalizer - self.trading_cost\r\n self.curr_trade['Profit'] = reward\r\n self.curr_trade['reward'] = reward\r\n\r\n if self.curr_trade['Type'] == 'BUY':\r\n self.count_open_trades -= 1\r\n\r\n # Update remaining keys in curr_trade dict\r\n self.curr_trade['Exit Price'] = curr_close_price\r\n self.curr_trade['Exit Time'] = curr_time\r\n reward = (curr_close_price - self.curr_trade['Entry Price']) * self.reward_normalizer - self.trading_cost\r\n self.curr_trade['Profit'] = reward\r\n self.curr_trade['reward'] = reward\r\n\r\n # Add curr_trade to journal, then reset curr_trade\r\n self.journal.append(self.curr_trade)\r\n\r\n self._reset_trade()\r\n self.open_trade = False\r\n\r\n return reward\r\n\r\n def _holding_trade(self, curr_close_price, prev_close_price):\r\n self.curr_trade['Trade Duration'] += 1\r\n return 0\r\n\r\n def step(self, action):\r\n curr_open_price = self._open[self.current_time]\r\n curr_close_price = self._close[self.current_time]\r\n curr_time = self._index.iloc[self.current_time]\r\n prev_close_price = self._close[self.current_time - 1]\r\n reward = 0\r\n\r\n if action == 3 or self.curr_trade['Trade Duration'] >= self.max_trade_period:\r\n # Closing trade or trade duration is reached\r\n if self.curr_trade['Trade Duration'] >= self.min_trade_period:\r\n reward = self.close_trade(curr_close_price, curr_time)\r\n else:\r\n reward = self._holding_trade(curr_close_price, prev_close_price)\r\n\r\n elif action == 1:\r\n if not self.open_trade:\r\n # BUYING\r\n self.curr_trade['Entry Price'] = curr_open_price\r\n self.curr_trade['Type'] = \"BUY\"\r\n self.curr_trade['Entry Time'] = curr_time\r\n self.curr_trade['Trade Duration'] += 1\r\n reward = 0 # (curr_close_price - curr_open_price) * self.reward_normalizer - self.trading_cost\r\n self.total_trades += 1\r\n self.open_trade = True\r\n self.count_open_trades += 1\r\n else:\r\n reward = self._holding_trade(curr_close_price, prev_close_price)\r\n\r\n elif action == 2:\r\n if not self.open_trade:\r\n # SELLING\r\n self.curr_trade['Entry Price'] = curr_open_price\r\n self.curr_trade['Type'] = \"SELL\"\r\n self.curr_trade['Entry Time'] = curr_time\r\n self.curr_trade['Trade Duration'] += 1\r\n reward = 0 # -1 * (curr_close_price - curr_open_price) * self.reward_normalizer - self.trading_cost\r\n self.total_trades += 1\r\n self.open_trade = True\r\n self.count_open_trades += 1\r\n else:\r\n reward = self._holding_trade(curr_close_price, prev_close_price)\r\n\r\n elif action == 0:\r\n # Holding trade\r\n if self.open_trade:\r\n reward = self._holding_trade(curr_close_price, prev_close_price)\r\n else:\r\n pass\r\n\r\n self.total_reward += reward\r\n\r\n if self.total_trades > 0:\r\n self.average_profit_per_trade = self.total_reward / self.total_trades\r\n\r\n self.current_time += 1\r\n\r\n info = {'Average reward per trade': self.average_profit_per_trade,\r\n 'Reward for this trade': reward,\r\n 'Total reward': self.total_reward}\r\n\r\n return reward, info\r\n","repo_name":"visoloviev/Trend-Detection","sub_path":"RL/gym_trading/gym_trading/portfolio.py","file_name":"portfolio.py","file_ext":"py","file_size_in_byte":5109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"12020815443","text":"import random \r\nnumber = (random.randint(0,10))\r\n\r\nplayer_name = input(\"enter your name\")\r\nnumber_of_guess = 0\r\nprint('let\\'s play '+ player_name+ ' I am Guessing a number between 1 and 10:')\r\n\r\nwhile number_of_guess < 3:\r\n guess = int(input())\r\n number_of_guess += 1\r\n if guess < number:\r\n print ('Your number is too low')\r\n if guess > number:\r\n print ('Your number is too high')\r\n if guess == number:\r\n break\r\n if guess == number:\r\n print('You guess the number in a number of' + \" \" + str(number_of_guess) + \" \" + 'tries')\r\n else:\r\n print('You didn\\'t guess the number in a number of' + \" \" + str(number_of_guess) + \" \" + 'tries')\r\n\r\n\r\n","repo_name":"Bea305/Guess-the-number-","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18835064332","text":"#!/usr/bin/env python3\nfrom states.state import State\nfrom bs4 import BeautifulSoup\nimport requests\n\n\nclass get_xkcd(State):\n def get_other(self, url_ending, next_prev):\n url = \"http://xkcd.com\"+url_ending\n r = requests.get(url)\n parsed_page = BeautifulSoup(r.content, \"html.parser\")\n try:\n previous_url = parsed_page.find(\"a\", {\"rel\":next_prev})[\"href\"]\n except KeyError:\n previous_url = url_ending\n return previous_url\n\n def get_current(self, url):\n r = requests.get(url)\n parsed_page = BeautifulSoup(r.content, \"html.parser\")\n comic_block = parsed_page.find(\"div\", {\"id\": \"comic\"})\n image_tag = comic_block.find(\"img\")\n image_url = image_tag['src']\n image_title = image_tag['title']\n return image_url, image_title\n\n def execute(self, request_data) -> dict:\n context = request_data.get('context')\n old_response = request_data.get('response', False)\n url_ending = context.get('current_comic_xkcd')\n action = self.properties.get('action')\n if action == \"get_previous\":\n url_ending = self.get_other(url_ending, \"prev\")\n url = \"http://xkcd.com/\"+url_ending\n elif action == \"get_next\":\n url_ending = self.get_other(url_ending, \"next\")\n url = \"http://xkcd.com/\"+url_ending\n elif action == \"get_random\":\n url_ending = \"\"\n url = \"http://c.xkcd.com/random/comic\" \n elif action == \"get_current\":\n url = \"http://xkcd.com/\"+url_ending\n image_url, image_title = self.get_current(url)\n context.update({\"current_comic_xkcd\": url_ending})\n tag = \"

{}

\".format(image_url, image_title)\n # add response of this state to list of responses\n image = {'type': 'text', 'payload': {'text': tag}, 'delay': self.properties['delay']}\n if old_response:\n old_response.append(image)\n else:\n old_response = [image]\n # make dictionary with responses and name of next state of dialogue\n request_data.update({'response': old_response, 'next_state': self.transitions.get('next_state', False) })\n return request_data\n\n","repo_name":"AlquistManager/alquist","sub_path":"bots/comics/states/get_xkcd.py","file_name":"get_xkcd.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"66"} +{"seq_id":"42458480103","text":"from Key import Key\nimport pygame\n\nclass Board:\n\n # Creating an empty list for the background and keys.\n background = []\n keys = []\n\n def __init__(self, screen) -> None:\n \"\"\"\n It loads the images for the background and creates the keys.\n \n :param screen: The screen that the game is being played on\n \"\"\"\n # Setting the screen to the screen that the game is being played on.\n self.screen = screen\n\n # Loading the images for the background and appending them to the background list.\n for element in [ \"background\", \"grid\", \"base\"]:\n temp_image = pygame.image.load(\"assets/base/\" + element + \".png\").convert_alpha()\n self.background.append(temp_image)\n\n # Creating a key for each color and adding it to the list of keys.\n for color in [ \"yellow\", \"blue\", \"red\", \"green\" ]:\n self.keys.append(Key(color))\n\n ## Values (in pixels) extracted from Photoshop \n # Yellow Key has an x of 119 and a y of 129\n # Blue Key has an x of 561 and a y of 129\n # Red Key has an x of 119 and a y of 551\n # Green Key has an x of 561 and a y of 551\n\n # Setting the position of the keys.\n count = 0\n for y in [129, 551]:\n for x in [119, 561]:\n self.keys[count].set_position(x, y)\n count += 1\n\n def display(self):\n \"\"\"\n It draws the background and the keys to the screen\n \"\"\"\n # Drawing the background to the screen.\n for element in self.background:\n self.screen.blit(element, (0,0))\n \n # Drawing the keys to the screen.\n for key in self.keys:\n self.screen.blit(key.sprite, (0,0))\n if key.debug:\n pygame.draw.rect(self.screen, (255,0,0), pygame.Rect(key.x - 1, key.y - 1, key.width + 1, key.height + 1), 2)\n\n # It updates the screen.\n pygame.display.flip()","repo_name":"Kori-San/simon","sub_path":"src/Board.py","file_name":"Board.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"24728614922","text":"import numpy as np\nimport tf2onnx\nimport onnxruntime as rt\nimport cv2\n\ndef predict(img, model_path = \"face_liveness.onnx\"):\n if img.shape != (112, 112, 3):\n return -1\n\n dummy_face = np.expand_dims(np.array(img, dtype=np.float32), axis = 0) / 255.\n\n providers = ['CPUExecutionProvider']\n m = rt.InferenceSession(model_path, providers=providers)\n onnx_pred = m.run(['activation_5'], {\"input\": dummy_face})\n print(onnx_pred)\n liveness_score = list(onnx_pred[0][0])[1]\n\n return liveness_score\n\nfake_face_1 = cv2.resize(cv2.imread('ronaldo.png'), (112, 112)) \nfake_face_2 = cv2.resize(cv2.imread('Print_1.png'), (112, 112)) \n\nlive_face_1 = cv2.resize(cv2.imread('facecam.jpg'), (112, 112)) \nlive_face_2 = cv2.resize(cv2.imread('blur.png'), (112, 112))\n\nff1s = predict(fake_face_1)\nff2s = predict(fake_face_2)\n\nlf1s = predict(live_face_1)\nlf2s = predict(live_face_2)\n\nprint(\"fake scores:\")\nprint(ff1s)\nprint(ff2s)\n\nprint(\"--------------------\")\n\nprint(\"live scores:\")\nprint(lf1s)\nprint(lf2s)\n\n\n\n","repo_name":"zabir-nabil/onnx-face-liveness","sub_path":"onnx_pred.py","file_name":"onnx_pred.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"} +{"seq_id":"5371256421","text":"import SinGAN.functions as functions\nimport SinGAN.models as models\nimport os\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data\nimport math\nimport matplotlib.pyplot as plt\nfrom SinGAN.imresize import imresize\n\n\ndef train(options, generator_list, noise_maps_list, real_patch_list, noise_amps_list):\n real_ = functions.read_image(options)\n # print(\"real_ ====\", real_.shape)\n in_s = 0\n scale_num = 0\n real = imresize(real_, options.scale1, options)\n # print(\"real 1 ===\", real.shape)\n\n real_patch_list = functions.creat_reals_pyramid(real, real_patch_list, options)\n\n nfc_prev = 0\n\n while scale_num < options.stop_scale + 1:\n\n options.nfc = min(options.nfc_init * pow(2, math.floor(scale_num / 4)), 128)\n options.min_nfc = min(options.min_nfc_init * pow(2, math.floor(scale_num / 4)), 128)\n\n options.out_ = functions.generate_dir2save(options)\n options.outf = '%s/%d' % (options.out_, scale_num)\n try:\n os.makedirs(options.outf)\n except OSError:\n pass\n\n # plt.imsave('%s/in.png' % (opt.out_), functions.convert_image_np(real), vmin=0, vmax=1)\n # plt.imsave('%s/original.png' % (opt.out_), functions.convert_image_np(real_), vmin=0, vmax=1)\n plt.imsave('%s/real_scale.png' % options.outf, functions.convert_image_np(real_patch_list[scale_num]), vmin=0,\n vmax=1)\n\n current_discriminator, current_generator = init_models(options)\n\n if nfc_prev == options.nfc:\n current_generator.load_state_dict(torch.load('%s/%d/netG.pth' % (options.out_, scale_num - 1)))\n current_discriminator.load_state_dict(torch.load('%s/%d/netD.pth' % (options.out_, scale_num - 1)))\n\n\n z_curr, in_s, current_generator = train_single_scale(current_discriminator,\n current_generator,\n real_patch_list,\n generator_list,\n noise_maps_list,\n in_s,\n noise_amps_list,\n options)\n\n current_generator = functions.reset_grads(current_generator, False)\n current_generator.eval()\n current_discriminator = functions.reset_grads(current_discriminator, False)\n current_discriminator.eval()\n\n generator_list.append(current_generator)\n\n # We append the current noise map to the noise map list\n noise_maps_list.append(z_curr)\n noise_amps_list.append(options.noise_amp)\n\n torch.save(noise_maps_list, '%s/Zs.pth' % (options.out_))\n torch.save(generator_list, '%s/Gs.pth' % (options.out_))\n torch.save(real_patch_list, '%s/reals.pth' % (options.out_))\n torch.save(noise_amps_list, '%s/NoiseAmp.pth' % (options.out_))\n\n scale_num += 1\n nfc_prev = options.nfc\n del current_discriminator, current_generator\n return\n\n\ndef train_single_scale(curr_discriminator, curr_generator, real_patch_pyramid, curr_generator_list, noise_patch_list,\n in_s, noise_amps_list, opt, centers=None):\n '''\n From the SinGAN paper: The generation of an image sample starts at the coarsest\n # scale and sequentially passes through all generators up to\n # the finest scale, with noise injected at every scale. All the\n # generators and discriminators have the same receptive field\n # and thus capture structures of decreasing size as we go up\n # the generation process. At the coarsest scale, the generation\n # is purely generative, i.e. GN maps spatial white Gaussian\n # noise zN to an image sample $\\tilde{x}_N$,\n\n Parameters\n ----------\n curr_discriminator\n curr_generator\n real_patch_pyramid\n curr_generator_list\n noise_patch_list\n in_s\n noise_amps_list\n opt\n centers\n\n Returns\n -------\n\n '''\n\n # We take one specific path of the pyramid.\n current_real_patch = real_patch_pyramid[len(curr_generator_list)]\n # print(\"real shape===\", real.shape)\n # opt.ker_size -> number of kernels (globally fixed)\n # opt.num_layer -> number of layers (globally fixed)\n\n # These two fellas are the ones that change when we change the learning scale\n # They indicate the height and width of the current scale path.\n opt.nzx = current_real_patch.shape[2] # +(opt.ker_size-1)*(opt.num_layer)\n opt.nzy = current_real_patch.shape[3] # +(opt.ker_size-1)*(opt.num_layer)\n\n # receptive field = fixed_kernel_size + (( fixed_kernel_size -1 ) * fixed_number_of_layers * fixed_stride )\n # The receptive field si fixed for all scales... the paddings of noise and image patches also!!\n opt.receptive_field = opt.ker_size + ((opt.ker_size - 1) * (opt.num_layer - 1)) * opt.stride\n pad_noise = int(((opt.ker_size - 1) * opt.num_layer) / 2)\n pad_image = int(((opt.ker_size - 1) * opt.num_layer) / 2)\n\n if opt.mode == 'animation_train':\n opt.nzx = current_real_patch.shape[2] + (opt.ker_size - 1) * (opt.num_layer)\n opt.nzy = current_real_patch.shape[3] + (opt.ker_size - 1) * (opt.num_layer)\n pad_noise = 0\n\n # These torch objects serve as pad adders for whatever tensor they are feed with.\n noise_padder_layer = nn.ZeroPad2d(int(pad_noise))\n image_padder_layer = nn.ZeroPad2d(int(pad_image))\n\n alpha = opt.alpha\n\n # The following noise vector are varying for each scale training, because they are noise maps that share\n # the dimension with each scale-specific image-patch.\n fixed_noise = functions.generate_noise([opt.nc_z, opt.nzx, opt.nzy], device=opt.device)\n z_opt = torch.full(fixed_noise.shape, 0, device=opt.device, dtype=torch.bool)\n z_opt = noise_padder_layer(z_opt)\n\n # setup optimizers\n optimizerD = optim.Adam(curr_discriminator.parameters(), lr=opt.lr_d, betas=(opt.beta1, 0.999))\n optimizerG = optim.Adam(curr_generator.parameters(), lr=opt.lr_g, betas=(opt.beta1, 0.999))\n schedulerD = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerD, milestones=[1600], gamma=opt.gamma)\n schedulerG = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerG, milestones=[1600], gamma=opt.gamma)\n\n errD2plot = []\n errG2plot = []\n D_real2plot = []\n D_fake2plot = []\n z_opt2plot = []\n\n for epoch in range(opt.niter):\n\n '''\n THIS BLOCK GENERATES A NOISE TENSOR that is going to be used to feed the generator and train the discriminator, \n (however, it is going to be refined later in the code before being fed to the generator...)\n '''\n # guess SR_train stands for Super-resolution training, need to check SinGAN paper...\n if (curr_generator_list == []) & (opt.mode != 'SR_train'):\n # If we are in the first scale, and we are not in the SR_train setting, then we create 2 noise maps.\n\n # Notice that, if we are in the case of the coarsest scale,\n # z_opt is overwritten by a noise map that repeats the noise signal over 4 channels...\n # By doing so, SinGAN-seg authors condition the generative machinery on a 4-channel repeated noise patch\n z_opt = functions.generate_noise([1, opt.nzx, opt.nzy], device=opt.device)\n # The expand operation is a kind of shape transformation, where a broadcast operation\n # is done to fill the expanded dimensions.\n # At this point z_opt is a (1,1,x,y) tensor, and\n # with the expand operation we turn it to a (1,4,x,y) tensor, so we\n # repeat the whole noise map for 4 channels.\n expanded_z_opt = z_opt.expand(1, opt.nc_z, opt.nzx, opt.nzy)\n # Notice that SinGAN-seg changes the second parameter from 3 to opt.nc_z\n # in the previous line (w.r.t the SinGAN paper)\n z_opt = noise_padder_layer(expanded_z_opt)\n\n # Now the noise_ tensor is going to be the noise map for learning purposes...\n # We only need this one when we are not at the coarsest scale\n noise_ = functions.generate_noise([1, opt.nzx, opt.nzy], device=opt.device)\n noise_ = noise_padder_layer(noise_.expand(1, opt.nc_z, opt.nzx, opt.nzy))\n # Notice that SinGAN-seg changes the second parameter from 3 to opt.nc_z\n # in the previous line (w.r.t the SinGAN paper)\n else:\n noise_ = functions.generate_noise([opt.nc_z, opt.nzx, opt.nzy], device=opt.device)\n noise_ = noise_padder_layer(noise_)\n '''\n END OF BLOCK (EOB) \n '''\n\n ##\n # (1) Update D network:\n # maximize D(x) - D(G(z))\n # Or, equivalently:\n # minimize D(G(z)) - D(x)\n ##\n\n # We repeat this process Dsteps times... (Dsteps is the discriminator inner steps)\n for j in range(opt.Dsteps):\n\n # train with real\n curr_discriminator.zero_grad()\n # pass the real patch through the discriminator.\n output = curr_discriminator(current_real_patch).to(opt.device)\n\n # We want to maximize this output, or, equivalently,\n # minimize the negative of this output ;)\n # The torch optimizer (Adam) will do a gradient descent step, so we\n # can compute the gradient w.r.t. a loss function to MINIMIZE\n err_discriminator_real = - output.mean()\n err_discriminator_real.backward(retain_graph=True)\n\n # Now, this maximization True positive score, is saved just for plotting purposes ;)\n discr_output_real = - err_discriminator_real.item()\n\n # train with fake\n # the first thing to do is generate the fake sample.\n # The fake sample is generated feeding the generator with a noise tensor,\n # and an up-sampled version of the previous fake sample\n\n '''\n The following block generates:\n - The up-sampled version of the previous fake patch and calls is \"prev_random_patch\"\n - the \"spatial noise\" tensor as referred in the SinGAN paper\n calls it \"noise\". (it refines the noise_ tensor previously generated).\n '''\n\n # First we generate the up-sampled version of the previous fake patch and calls is \"prev_random_patch\"\n # We have to distinguish two cases: case A and case B.\n # case A\n if (j == 0) & (epoch == 0):\n # THE FOLLOWING is done ONLY IN THE FIRST INNER STEP\n # OF THE FIRST EPOCH OF EVERY SCALE-specific training.\n\n # case A.1\n if (curr_generator_list == []) & (opt.mode != 'SR_train'):\n # If we are in the first scale and NOT in super-resolution downstream task.\n # In this case, our \"previous\" scale real-patch is a zeros tensor: (in this code, the authors of\n # SinGAN-Seg have transformed it to a \"falses\" tensor, with the boolean-type specification.\n\n # Notice that the dimensions of this tensor match with the dimension of the current patches,\n # so if it has to do (for now, only in theory) with the \"previous patch\", then it should be used\n # to represent an up-sampled version of it.\n\n # in the first scale, the up-sampling is done creating directly this zero boolean tensor\n prev_random_patch = torch.full([1, opt.nc_z, opt.nzx, opt.nzy], 0, device=opt.device, dtype=torch.bool)\n # up to this point, in_s was a 0 integer :/,\n in_s = prev_random_patch\n prev_random_patch = image_padder_layer(prev_random_patch)\n\n # Same story holds for the prev_reconstructed_patch (used for the reconstruction error)\n prev_reconstructed_patch = torch.full([1, opt.nc_z, opt.nzx, opt.nzy], 0, device=opt.device, dtype=torch.bool)\n prev_reconstructed_patch = noise_padder_layer(prev_reconstructed_patch)\n\n opt.noise_amp = 1\n\n # case A.2\n elif opt.mode == 'SR_train':\n # if we are in the SR task (irrespective of the scale), need further study: (but remember,\n # we are in the first epoch and in the first inner step\n # of the current scale training, i.e., we are in case A)\n prev_reconstructed_patch = in_s\n criterion = nn.MSELoss()\n RMSE = torch.sqrt(criterion(current_real_patch, prev_reconstructed_patch))\n opt.noise_amp = opt.noise_amp_init * RMSE\n prev_reconstructed_patch = image_padder_layer(prev_reconstructed_patch)\n prev_random_patch = prev_reconstructed_patch\n\n # case A.3\n else:\n # not first scale, nor SR task: (but remember, we are in the first epoch and in the first inner step\n # of the current scale training, i.e., we are in case A) note that now in_s contains the previous\n # scale image patch because we have passed through case A.1 necessarily\n # (and also through case B at least 2 times)\n mode_param = 'rand'\n # setting the mode_param variable to \"rand\" we are now generating the up-sampled version of the\n # previous fake patch using new random noise vectors.\n prev_random_patch = draw_concat(curr_generator_list, noise_patch_list, real_patch_pyramid, noise_amps_list,\n in_s, mode_param, noise_padder_layer, image_padder_layer, opt)\n prev_random_patch = image_padder_layer(prev_random_patch)\n\n mode_param = 'rec'\n # In the paper $\\tilde{x}^{rec}_{n+1}$ is \"the generated image at the nth scale when using these\n # noise maps.\" When it says \"these noise maps, it refers to a fixed set of noise maps, here\n # referenced by noise_patch_list. We use them to generate $\\tilde{x}^{rec}_{n+1}$ when we invoke\n # draw_concat with \"rec\" as the value of the mode parameter\n prev_reconstructed_patch = draw_concat(curr_generator_list, noise_patch_list, real_patch_pyramid,\n noise_amps_list, in_s, mode_param, noise_padder_layer,\n image_padder_layer, opt)\n\n criterion = nn.MSELoss()\n RMSE = torch.sqrt(criterion(current_real_patch, prev_reconstructed_patch))\n opt.noise_amp = opt.noise_amp_init * RMSE\n\n prev_reconstructed_patch = image_padder_layer(prev_reconstructed_patch)\n\n # case B\n else:\n # the following is done in the second and third inner steps of the first epoch and in every inner step\n # of the rest of the 1999 epochs notice it doesn't maters which scale we are in.\n # We have already passed through case A.1 necessarily, so in_s contains a tensor of the same shape of\n # the current image patch. If we are in the coarsest scale, we know is a \"falses\" tensor (a zeros\n # boolean tensor) but we could be in other scales, in such a case it is the previous fake image patch.\n # ($\\hat{x}_{n+1}$ in the paper).\n\n # The following function creates the up-sampled version of this previous fake image patch:\n # Notice that, at every iteration, we inject different spatial noise in the process of up-sampling\n # through the mode parameter, which is, in this case, set to \"rand\". see the following function.\n prev_random_patch = draw_concat(curr_generator_list, noise_patch_list, real_patch_pyramid, noise_amps_list, in_s,\n 'rand', noise_padder_layer, image_padder_layer, opt)\n prev_random_patch = image_padder_layer(prev_random_patch)\n\n ##\n # don't know what this is... will study some day...\n ##\n if opt.mode == 'paint_train':\n prev_random_patch = functions.quant2centers(prev_random_patch, centers)\n plt.imsave('%s/prev_random_patch.png' % (opt.outf), functions.convert_image_np(prev_random_patch), vmin=0, vmax=1)\n\n ##\n # FINAL STEP TO GENERATE THE NOISE:\n ##\n # Now we refine the noise tensor previously generated (i.e. noise_) and call it \"noise\":\n if (curr_generator_list == []) & (opt.mode != 'SR_train'):\n noise = noise_\n else:\n # if we are not in the first scale,\n # or if we are in the SR downstream task, we could also be in the first scale...\n\n # \"Specifically, the noise zn is added to the [upsampled] image [path]\n # prior to being fed into a sequence of convolutional layers. This ensures that the GAN does not\n # disregard the noise, as often happens in conditional schemes involving randomness\"\n # (from the SinGAN paper). That explains the following line of code:\n noise = opt.noise_amp * noise_ + prev_random_patch\n\n '''\n EOB\n '''\n\n ##\n # Now we generate our fake sample :)\n # From the SinGAN paper: The input to Gn is a random noise image zn, and the generated image from the\n # previous scale $\\tilde{x}_n$, up-sampled to the current resolution (except for\n # the coarsest level which is purely generative).\n # Also, in other parts: Thus, in addition\n # to spatial noise zn, each generator Gn accepts an up-sampled\n # version of the [generated] image from the coarser scale\n fake = curr_generator(noise.detach(), prev_random_patch)\n\n # We want to minimize the output of the discriminator when fed with a fake sample...\n output = curr_discriminator(fake.detach())\n\n err_discriminator_fake = output.mean()\n err_discriminator_fake.backward(retain_graph=True)\n\n discr_output_fake = output.mean().item()\n\n # This kind of regularization term is what completes the formation of the WGAN-GP loss that we are using.\n # as mentioned in the SinGAN paper. Very elegant ;)\n gradient_penalty = functions.calc_gradient_penalty(curr_discriminator, current_real_patch, fake, opt.lambda_grad,\n opt.device)\n gradient_penalty.backward()\n\n # Notice this stuff is only for reporting and does not interfere with the gradient descent.\n discriminator_error = err_discriminator_real + err_discriminator_fake + gradient_penalty\n\n # We have already computed all the gradients that we are interested in, so we now just perform the gradient\n # descent. We have not given weights to each one of the loss terms... maybe we can experiment on that\n # I don't remember if the paper mentions something about that,\n # neither I have studied completely the WGAN-GP paper...\n optimizerD.step()\n\n # house-made reporting... what about using W&B??\n errD2plot.append(discriminator_error.detach())\n\n\n ############################\n # (2) Update G network: maximize D(G(z))\n ###########################\n\n for j in range(opt.Gsteps):\n curr_generator.zero_grad()\n output = curr_discriminator(fake)\n # we want to maximize this output (this time, using the generator's parameters).\n # it is equivalent to minimize its negative:\n # (our optimizers minimizes by default, so we do this second thing).\n generator_error = -output.mean()\n generator_error.backward(retain_graph=True)\n\n if alpha != 0:\n # this means the reconstruction loss weight is not zero.\n # so we need to compute the reconstruction loss.\n loss = nn.MSELoss()\n if opt.mode == 'paint_train':\n prev_reconstructed_patch = functions.quant2centers(prev_reconstructed_patch, centers)\n plt.imsave('%s/prev_reconstructed_patch.png' % (opt.outf), functions.convert_image_np(prev_reconstructed_patch), vmin=0, vmax=1)\n final_noise_map = opt.noise_amp * z_opt + prev_reconstructed_patch\n reconstructed_image = curr_generator(final_noise_map.detach(), prev_reconstructed_patch)\n reconstruction_loss = alpha * loss(reconstructed_image, current_real_patch)\n reconstruction_loss.backward(retain_graph=True)\n reconstruction_loss = reconstruction_loss.detach()\n else:\n final_noise_map = z_opt\n reconstruction_loss = 0\n # print(\"Error is here...!\")\n # generator_error.backward(retain_graph=True)\n optimizerG.step()\n\n errG2plot.append(generator_error.detach() + reconstruction_loss)\n D_real2plot.append(discr_output_real)\n D_fake2plot.append(discr_output_fake)\n z_opt2plot.append(reconstruction_loss)\n\n if epoch % 25 == 0 or epoch == (opt.niter - 1):\n print('scale %d:[%d/%d]' % (len(curr_generator_list), epoch, opt.niter))\n\n if epoch % 500 == 0 or epoch == (opt.niter - 1):\n plt.imsave('%s/fake_sample.png' % (opt.outf), functions.convert_image_np(fake.detach()), vmin=0, vmax=1)\n plt.imsave('%s/G(z_opt).png' % (opt.outf),\n functions.convert_image_np(curr_generator(final_noise_map.detach(),\n prev_reconstructed_patch).detach()),\n vmin=0, vmax=1)\n # plt.imsave('%s/D_fake.png' % (opt.outf), functions.convert_image_np(D_fake_map))\n # plt.imsave('%s/D_real.png' % (opt.outf), functions.convert_image_np(D_real_map))\n # plt.imsave('%s/z_opt.png' % (opt.outf), functions.convert_image_np(z_opt.detach()), vmin=0, vmax=1)\n # plt.imsave('%s/prev_random_patch.png' % (opt.outf), functions.convert_image_np(prev_random_patch), vmin=0, vmax=1)\n # plt.imsave('%s/noise.png' % (opt.outf), functions.convert_image_np(noise), vmin=0, vmax=1)\n # plt.imsave('%s/prev_reconstructed_patch.png' % (opt.outf), functions.convert_image_np(prev_reconstructed_patch), vmin=0, vmax=1)\n\n torch.save(z_opt, '%s/z_opt.pth' % (opt.outf))\n\n schedulerD.step()\n schedulerG.step()\n\n functions.save_networks(curr_generator, curr_discriminator, z_opt, opt)\n\n return z_opt, in_s, curr_generator\n\n\ndef draw_concat(list_of_generators, noise_patch_list, real_patches_pyramid,\n noise_amps_list, in_s, mode, noise_padder_layer, image_padder_layer, opt):\n '''\n Generates an up-sampled version of a previous lowest scale image patch THROUGH A GENERATIVE PROCESS.\n To do that, it uses the generator network and the set of possible lowest scale patches\n of the current scale patch. This is the reason whe it receives in input a list of generator models.\n From the SinGAN paper: The generation process at level n involves all generators $ \\{ G_{N} . . . G_{n} \\} $ and\n all noise maps $ \\{ z_N, . . . , z_n \\} $ up to this level.\n\n Parameters\n ----------\n list_of_generators\n noise_patch_list: if mode is set to \"rand\", then this fella is only used in this function to determine\n the shape of the current scale noise vectors. if mode is set to \"rec\" instead, the whole list of saved noise\n maps is used to feed the generators alongside with patches of the provided image to generate an up-sampled version of\n the current patch using the generators.\n\n tensors that are generated...\n real_patches_pyramid\n noise_amps_list\n in_s:\n mode: if \"rand\", then the image is generated with the usage of new pseudo random noise maps and patches of the ù\n provided image. If set to \"rec\", instead, the up-sampling process will use the provided list of saved noise maps to\n produce the up-sampled output.\n noise_padder_layer\n image_padder_layer\n opt\n\n Returns The generated up-sampled image patch.\n -------\n\n '''\n\n fake_image_patch = in_s\n\n if len(list_of_generators) > 0:\n\n count = 0\n z = 0\n # the following pad_noise variable is defined in the train_single_scale function, with the same exact value.\n # Notice it is scale independent.\n pad_noise = int(((opt.ker_size - 1) * opt.num_layer) / 2)\n\n if opt.mode == 'animation_train':\n pad_noise = 0\n\n # we now iterate inside the provided lists of generators and noise maps:\n for current_generator, curr_noise_patch, real_curr, real_next, noise_amp \\\n in zip(list_of_generators, noise_patch_list, real_patches_pyramid, real_patches_pyramid[1:],\n noise_amps_list):\n\n if mode == 'rand':\n # We are generating new noise patches for each scale in this inner loop\n # we now manually remove the padding overhead from the current noise patch\n noise_width = curr_noise_patch.shape[2] - 2 * pad_noise\n noise_height = curr_noise_patch.shape[3] - 2 * pad_noise\n\n if count == 0:\n # count==0 means we are considering the coarsest scale generator, noise and image patch...\n # In the coarsest scale, our noise map must be a repetition of a 2-d noise map over the four\n # channels:\n z = functions.generate_noise([1, noise_width, noise_height], device=opt.device)\n z = z.expand(1, opt.nc_z, z.shape[2], z.shape[3])\n # Notice that SinGAN-seg changes the second parameter from 3 to opt.nc_z\n # in the previous line (w.r.t the SinGAN paper)\n else:\n # If we are not in the coarsest scale, we generate a 4d noise map, each channel independent.\n z = functions.generate_noise([opt.nc_z, noise_width, noise_height], device=opt.device)\n\n z = noise_padder_layer(z)\n\n if mode == 'rec':\n # 'rec' stands for reconstruction. So we are not generating noise but using the saved\n # noise maps to produce the output we want to reconstruct.\n z = curr_noise_patch\n\n # fake_image_patch is G(z), i.e. a fake batch, in particular, the previous fake batch\n # (with respect to the corresponding scale training.)\n # Now, in this inner \"for\" we are again looping through all scales (real_curr),\n # having this fake_image_patch fixed. So it could be big, (as big as the semi-last finer scale),\n # and thus, real_curr could be smaller. That is why this line of code takes only a part of\n # fake_image_patch of the same dimension of real_curr.\n fake_image_patch = fake_image_patch[:, :, 0:real_curr.shape[2], 0:real_curr.shape[3]]\n # we have an image patch, we just need to add the padding\n fake_image_patch = image_padder_layer(fake_image_patch)\n # we do the adding strategy not to induce the generator disregarding noise...\n z_in = noise_amp * z + fake_image_patch\n # and now we feed our generator with this \"spatial noise\" and the previous patch\n fake_image_patch = current_generator(z_in.detach(), fake_image_patch)\n # we have generated a fake batch using the previous patch. NOW WE DO THE UP-SAMPLING:\n # we are resizing to a scale which is > 1, thus, up-sampling...\n # notice we overwrite fake_image_patch\n fake_image_patch = imresize(fake_image_patch, 1 / opt.scale_factor, opt)\n # Once we have done the up-sampling, we ensure that our generated patch has the dimensions\n # of the generated pyramid. (the upsampling function uses a real scale factor that could produce\n # some excess of pixels in the resulting image...\n fake_image_patch = fake_image_patch[:, :, 0:real_next.shape[2], 0:real_next.shape[3]]\n count += 1\n\n return fake_image_patch\n\n\ndef train_paint(opt, Gs, Zs, reals, noise_amps_list, centers, paint_inject_scale):\n in_s = torch.full(reals[0].shape, 0, device=opt.device, dtype=torch.bool)\n scale_num = 0\n nfc_prev = 0\n\n while scale_num < opt.stop_scale + 1:\n if scale_num != paint_inject_scale:\n scale_num += 1\n nfc_prev = opt.nfc\n continue\n else:\n opt.nfc = min(opt.nfc_init * pow(2, math.floor(scale_num / 4)), 128)\n opt.min_nfc = min(opt.min_nfc_init * pow(2, math.floor(scale_num / 4)), 128)\n\n opt.out_ = functions.generate_dir2save(opt)\n opt.outf = '%s/%d' % (opt.out_, scale_num)\n try:\n os.makedirs(opt.outf)\n except OSError:\n pass\n\n # plt.imsave('%s/in.png' % (opt.out_), functions.convert_image_np(real), vmin=0, vmax=1)\n # plt.imsave('%s/original.png' % (opt.out_), functions.convert_image_np(real_), vmin=0, vmax=1)\n plt.imsave('%s/in_scale.png' % (opt.outf), functions.convert_image_np(reals[scale_num]), vmin=0, vmax=1)\n\n D_curr, G_curr = init_models(opt)\n\n z_curr, in_s, G_curr = train_single_scale(D_curr, G_curr, reals[:scale_num + 1], Gs[:scale_num],\n Zs[:scale_num], in_s, noise_amps_list[:scale_num], opt,\n centers=centers)\n\n G_curr = functions.reset_grads(G_curr, False)\n G_curr.eval()\n D_curr = functions.reset_grads(D_curr, False)\n D_curr.eval()\n\n Gs[scale_num] = G_curr\n Zs[scale_num] = z_curr\n noise_amps_list[scale_num] = opt.noise_amp\n\n torch.save(Zs, '%s/Zs.pth' % (opt.out_))\n torch.save(Gs, '%s/Gs.pth' % (opt.out_))\n torch.save(reals, '%s/reals.pth' % (opt.out_))\n torch.save(noise_amps_list, '%s/NoiseAmp.pth' % (opt.out_))\n\n scale_num += 1\n nfc_prev = opt.nfc\n del D_curr, G_curr\n return\n\n\ndef init_models(opt):\n # generator initialization:\n netG = models.GeneratorConcatSkip2CleanAdd(opt).to(opt.device)\n netG.apply(models.weights_init)\n if opt.netG != '':\n netG.load_state_dict(torch.load(opt.netG))\n print(netG)\n\n # discriminator initialization:\n netD = models.WDiscriminator(opt).to(opt.device)\n netD.apply(models.weights_init)\n if opt.netD != '':\n netD.load_state_dict(torch.load(opt.netD))\n print(netD)\n\n return netD, netG\n","repo_name":"QwertyJacob/singan-seg-commented","sub_path":"SinGAN/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":31429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"66"} +{"seq_id":"70271254612","text":"class Solution:\n def addBinary(self, a: str, b: str) -> str:\n result = []\n str_result = ''\n i = len(a)-1\n j = len(b)-1\n\n carry = 0\n while (i>=0 or j >= 0):\n digitA = int(a[i]) if i>=0 else 0\n digitB = int(b[j]) if j>=0 else 0\n i -= 1\n j -= 1\n sum = digitA + digitB + carry\n carry = sum // 2\n result.append(sum%2)\n\n if carry == 1:\n result.append(1)\n\n for i in range(len(result)-1,-1,-1):\n str_result += str(result[i])\n return str_result\n\nif __name__ == \"__main__\":\n s = Solution()\n result = s.addBinary(\"1\",\"1\")\n print(result)","repo_name":"ChengFengGu/offer","sub_path":"二进制/二进制加法.py","file_name":"二进制加法.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"29838065339","text":"# Видалення символів: Реалізуйте генератор для видалення певного символу з рядка.\ndef remove_char(string, char_to_remove):\n for char in string:\n if char != char_to_remove:\n yield char\n\n\nstring = 'Hello, World!'\nchar_to_remove = 'l'\n\nremove_char_generator = remove_char(string, char_to_remove)\n\nnew_string = ''.join([char for char in remove_char_generator])\n\nprint(new_string)\n","repo_name":"ikramarenko1/pythonMainAcademy","sub_path":"lesson_17/generator/remove_char.py","file_name":"remove_char.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36096568229","text":"from arrow import get as get_ts\n\nfrom sqlalchemy import Column, Integer, DateTime, UnicodeText, Text\nfrom sqlalchemy.sql.expression import func\n\nfrom csirtg_fm.archiver.constants import BASE\n\n\nclass Indicator(BASE):\n __tablename__ = \"indicators\"\n\n id = Column(Integer, primary_key=True)\n indicator = Column(UnicodeText, index=True)\n group = Column(Text)\n provider = Column(Text)\n first_at = Column(DateTime)\n last_at = Column(DateTime)\n tags = Column(Text)\n created_at = Column(DateTime, default=func.now())\n\n def __init__(self, indicator=None, group='everyone', provider=None,\n first_at=None, last_at=None, tags=None):\n\n self.indicator = indicator\n self.group = group\n self.provider = provider\n self.first_at = first_at\n self.last_at = last_at\n self.tags = tags\n\n if isinstance(group, list):\n self.group = group[0]\n\n if isinstance(self.tags, list):\n self.tags.sort()\n self.tags = ','.join(self.tags)\n\n if self.last_at and isinstance(self.last_at, (str, bytes)):\n self.last_at = get_ts(self.last_at).datetime\n\n if self.first_at and isinstance(self.first_at, (str, bytes)):\n self.first_at = get_ts(self.first_at).datetime\n","repo_name":"csirtgadgets/csirtg-fm-v2","sub_path":"csirtg_fm/archiver/indicator.py","file_name":"indicator.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"12321646281","text":"import requests\nimport json\nimport unittest\nfrom NewPad.PubNew import TestLogin\n\n\nclass LiveBack(TestLogin):\n\n data = {\n \"token\":TestLogin.ld[\"token\"],\n \"pageIndex\":\"\",\n \"pageSize\":\"\",\n \"lastTime\":\"\",\n \"status\":\"1\",\n \"state\":\"1\",\n \"videoBack\":\"\"\n }\n\n def test_Back_case(self):\n code = requests.request(\"POST\",TestLogin.livevack,data=self.data)\n self.assertEquals(code.status_code,200)","repo_name":"Hanlen520/YoushiInterface","sub_path":"NewPad/test_livesback.py","file_name":"test_livesback.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36732199161","text":"from django import forms as django_forms\nimport floppyforms.__future__ as forms\nfrom django.forms.utils import ValidationError\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import get_user_model\n\nfrom core.models import Faculty, Course, Topic\nfrom lecturer.models import Lecturer\nfrom student.models import Tma, Exam, TmaQuestion, ExamQuestion\n\nfrom student.choices import GENDER, TYPES, TMA\n\nUser = get_user_model()\n\n\nclass LecturerSignUpForm(UserCreationForm):\n # Declare user option fields to form\n first_name = forms.CharField(required=True, widget=forms.TextInput(\n attrs={'placeholder': 'First name'}))\n other_name = forms.CharField(required=True, widget=forms.TextInput(\n attrs={'placeholder': 'Other name'}))\n last_name = forms.CharField(required=True, widget=forms.TextInput(\n attrs={'placeholder': 'Surname '}))\n birth_place = forms.ChoiceField(required=True)\n sex = forms.ChoiceField(required=True, choices=GENDER)\n birth_date = forms.DateField(required=True, widget=forms.DateInput)\n email = forms.EmailField(widget=forms.EmailInput(\n attrs={'placeholder': 'Enter email address'}), required=True)\n phone = forms.CharField(required=True, widget=forms.PhoneNumberInput(\n attrs={'placeholder': 'Mobile Number'}))\n address = forms.CharField(required=False, widget=forms.TextInput(attrs={'placeholder': 'House/Street/City/Town '}),\n max_length=100)\n faculty = forms.ModelChoiceField(\n queryset=Faculty.objects.all(), required=False)\n\n class Meta:\n model = User\n fields = ('first_name', 'other_name', 'last_name', 'sex', 'birth_place', 'address',\n 'phone', 'email', 'faculty', 'birth_date', 'username',)\n\n # Add placeholder to UserCreationForm fields\n def __init__(self, *args, **kwargs):\n super(LecturerSignUpForm, self).__init__(*args, **kwargs)\n self.fields['username'].widget.attrs.update(\n {'placeholder': 'Choose A Unique Username'})\n self.fields['password1'].widget.attrs.update(\n {'placeholder': 'Choose A Password'})\n self.fields['password2'].widget.attrs.update(\n {'placeholder': 'Verify Password'})\n\n # Check if inputted email has not been used by another user\n def clean_email(self):\n email = self.cleaned_data['email']\n check = User.objects.values('email')\n if email in check:\n msg = 'this email has been used!'\n self.add_error('email', msg)\n return email\n\n def save(self, commit=True):\n user = super().save(commit=False)\n user.is_lecturer = True\n if commit:\n user.save()\n # Create lecturer object with user id\n Lecturer.objects.create(user=user)\n\n return user\n\n\nclass PassportForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ('photo',)\n\n\nclass TmaForm(forms.ModelForm):\n title = forms.ChoiceField(choices=TMA, required=True)\n\n class Meta:\n model = Tma\n fields = ['title', 'course']\n\n # Filter Tma courses list for appointed lecturer\n def __init__(self, *args, **kwargs):\n self.request = kwargs.pop(\"request\")\n super().__init__(*args, **kwargs)\n self.fields[\"course\"].queryset = Course.objects.filter(\n lecturer=self.request.user.lecturer)\n\n\nclass QuestionForm(forms.ModelForm):\n text = forms.CharField(required=True,\n widget=forms.Textarea(\n attrs={'rows': 2, 'placeholder': 'Enter your Question.'}),\n max_length=4000,\n help_text='The max length of the question is 4000.')\n\n class Meta:\n model = TmaQuestion\n fields = ('text',)\n\n\nclass ExamQuestionForm(forms.ModelForm):\n type = forms.ChoiceField(required=True, choices=TYPES, widget=forms.RadioSelect)\n text = forms.CharField(required=True,\n widget=forms.Textarea(\n attrs={'rows': 2, 'placeholder': 'Enter your Question.'}),\n max_length=4000,\n help_text='The max length of the question is 4000.')\n\n class Meta:\n model = ExamQuestion\n fields = ('text', 'type')\n\n\nclass ExamQuestionUpdateForm(forms.ModelForm):\n text = forms.CharField(required=True,\n widget=forms.Textarea(\n attrs={'rows': 2, 'placeholder': 'Enter your Question.'}),\n max_length=4000,\n help_text='The max length of the question is 4000.')\n\n class Meta:\n model = ExamQuestion\n fields = ('text',)\n\n\nclass BaseAnswerInlineFormSet(forms.BaseInlineFormSet):\n\n def clean(self):\n super().clean()\n # Check that an answer is selected for a question\n has_one_correct_answer = False\n for form in self.forms:\n if not form.cleaned_data.get('DELETE', False):\n if form.cleaned_data.get('is_correct', False):\n has_one_correct_answer = True\n break\n if not has_one_correct_answer:\n raise ValidationError(\n 'Mark at least one answer as correct.', code='no_correct_answer')\n\n\nclass ExamForm(forms.ModelForm):\n class Meta:\n model = Exam\n fields = ['course', ]\n\n # Filter Exam courses list for appointed lecturer\n def __init__(self, *args, **kwargs):\n self.request = kwargs.pop(\"request\")\n super().__init__(*args, **kwargs)\n self.fields[\"course\"].queryset = Course.objects.filter(\n lecturer=self.request.user.lecturer)\n\n\nclass NewTopicForm(forms.ModelForm):\n subject = forms.CharField(widget=forms.TextInput(\n attrs={'placeholder': 'Add Subject'}))\n files = forms.FileField(required=False)\n\n class Meta:\n model = Topic\n fields = ['subject', 'course', 'message', 'files']\n\n # Filter topic courses list for appointed lecturer\n def __init__(self, *args, **kwargs):\n self.request = kwargs.pop(\"request\")\n super().__init__(*args, **kwargs)\n self.fields[\"course\"].queryset = Course.objects.filter(\n lecturer=self.request.user.lecturer)\n\n\nclass UpdateTopicForm(forms.ModelForm):\n class Meta:\n model = Topic\n fields = ('subject', 'message', 'files')\n","repo_name":"certified-dev/mini-lms","sub_path":"lecturer/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"12278302174","text":"import os\nimport sys\nimport json\nimport numpy as np\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.backends import cudnn\nimport torch.nn as nn\nimport torchvision\nfrom PIL import Image\nfrom utils.func import *\nfrom utils.vis import *\nfrom utils.IoU import *\nfrom utils.augment import *\nimport argparse\nfrom resnet import model\nfrom skimage import measure\n\nparser = argparse.ArgumentParser(description='Parameters for PSOL evaluation')\nparser.add_argument('--loc-model', metavar='locarg', type=str, default='resnet50',dest='locmodel')\nparser.add_argument('--cls-model', metavar='clsarg', type=str, default='vgg16',dest='clsmodel')\nparser.add_argument('--input_size',default=256,dest='input_size')\nparser.add_argument('--crop_size',default=224,dest='crop_size')\nparser.add_argument('--num_classes',default=1000)\nparser.add_argument('--tencrop', default=True)\nparser.add_argument('--gpu',help='which gpu to use',default='0',dest='gpu')\nparser.add_argument('--data',metavar='DIR',default='./',help='path to imagenet dataset')\nparser.add_argument('--threshold', type=float, default=0.2)\nargs = parser.parse_args()\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\ndef normalize_map(atten_map,w,h):\n min_val = np.min(atten_map)\n max_val = np.max(atten_map)\n atten_norm = (atten_map - min_val)/(max_val - min_val)\n atten_norm = cv2.resize(atten_norm, dsize=(w,h))\n return atten_norm\ndef to_data(x):\n if torch.cuda.is_available():\n x = x.cpu()\n return x.data \n#os.environ['OMP_NUM_THREADS'] = \"4\"\n#os.environ['MKL_NUM_THREADS'] = \"4\"\ncudnn.benchmark = True\nTEN_CROP = args.tencrop\nnormalize = transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))\ntransform = transforms.Compose([\n transforms.Resize((args.crop_size,args.crop_size)),\n # transforms.CenterCrop(args.crop_size),\n transforms.ToTensor(),\n normalize\n])\ncls_transform = transforms.Compose([\n transforms.Resize((args.crop_size,args.crop_size)),\n # transforms.CenterCrop(args.crop_size),\n transforms.ToTensor(),\n normalize\n])\nten_crop_aug = transforms.Compose([\n transforms.Resize((args.input_size, args.input_size)),\n transforms.TenCrop(args.crop_size),\n transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([normalize(crop) for crop in crops])),\n])\nmodel = model(args)\nmodel.load_state_dict(torch.load('logs/VGG0_15.pth.tar'))\n#print(model)\nmodel = model.to(0)\nmodel.eval()\ncls_model = model\nclsname = 'vgg'\nroot = args.data\nval_imagedir = os.path.join(root, 'test')\n\nanno_root = os.path.join(root,'bbox')\nval_annodir = os.path.join(root, 'val_gt.txt')\nval_list_path = os.path.join(root, 'val_list.txt')\n\nclasses = os.listdir(val_imagedir)\nclasses.sort()\ntemp_softmax = nn.Softmax()\n#print(classes[0])\n\n\nclass_to_idx = {classes[i]:i for i in range(len(classes))}\n\nresult = {}\n\naccs = []\naccs_top5 = []\nloc_accs = []\ncls_accs = []\nfinal_cls = []\nfinal_loc = []\nfinal_clsloc = []\nfinal_clsloctop5 = []\nbbox_f = open(val_annodir, 'r')\nbbox_list = []\nfor line in bbox_f:\n part_1, part_2 = line.strip('\\n').split(';')\n _, w, h, _ = part_1.split(' ')\n part_2 = part_2[1:]\n bbox = part_2.split(' ')\n bbox = np.array(bbox, dtype=np.float32)\n box_num = len(bbox) // 4\n w, h = np.float32(w),np.float32(h)\n for i in range(box_num):\n bbox[4*i], bbox[4*i+1], bbox[4*i+2], bbox[4*i+3] = bbox[4*i]/w, bbox[4*i+1]/h, bbox[4*i+2]/w , bbox[4*i+3]/h\n bbox_list.append(bbox) ## gt\ncur_num = 0\nbbox_f.close()\n\nfiles = [[] for i in range(1000)] ##[类别][路径]\n\nwith open(val_list_path, 'r') as f:\n for line in f:\n test_img_path, img_class = line.strip(\"\\n\").split(';')\n files[int(img_class)].append(test_img_path)\n\nfor k in range(1000):\n cls = classes[k]\n\n total = 0\n IoUSet = []\n IoUSetTop5 = []\n LocSet = []\n ClsSet = []\n\n\n #files = os.listdir(os.path.join(val_imagedir, cls))\n #files.sort()\n\n for (i, name) in enumerate(files[k]):\n # raw_img = cv2.imread(os.path.join(imagedir, cls, name))\n\n gt_boxes = bbox_list[cur_num]\n cur_num += 1\n if len(gt_boxes)==0:\n continue\n\n raw_img = Image.open(os.path.join(val_imagedir, name)).convert('RGB')\n w, h = args.crop_size, args.crop_size\n\n with torch.no_grad():\n img = transform(raw_img)\n img = torch.unsqueeze(img, 0)\n img = img.to(0)\n reg_outputs = model(img)\n #bbox = to_data(reg_outputs)\n #bbox = torch.squeeze(bbox)\n #bbox = bbox.numpy()\n # cam = model.get_fused_cam5()[0].data.cpu()\n cam = model.x_2_saliency[0][0].data.cpu()\n cam = normalize_map(np.array(cam),w,h)\n \n ## 制作bbox\n highlight = np.zeros(cam.shape)\n highlight[cam > args.threshold] = 1\n # max component\n all_labels = measure.label(highlight)\n highlight = np.zeros(highlight.shape)\n highlight[all_labels == count_max(all_labels.tolist())] = 1\n highlight = np.round(highlight * 255)\n highlight_big = cv2.resize(highlight, (w, h), interpolation=cv2.INTER_NEAREST)\n CAMs = copy.deepcopy(highlight_big)\n props = measure.regionprops(highlight_big.astype(int))\n\n if len(props) == 0:\n bbox = [0, 0, w, h]\n else:\n temp = props[0]['bbox']\n bbox = [temp[1], temp[0], temp[3], temp[2]]\n\n if TEN_CROP:\n img = ten_crop_aug(raw_img)\n img = img.to(0)\n vgg16_out = cls_model(img)\n vgg16_out = nn.Softmax()(vgg16_out)\n vgg16_out = torch.mean(vgg16_out,dim=0,keepdim=True)\n vgg16_out = torch.topk(vgg16_out, 5, 1)[1]\n else:\n img = cls_transform(raw_img)\n img = torch.unsqueeze(img, 0)\n img = img.to(0)\n vgg16_out,_,_,_ = cls_model(img)\n vgg16_out = torch.topk(vgg16_out, 5, 1)[1]\n vgg16_out = to_data(vgg16_out)\n vgg16_out = torch.squeeze(vgg16_out)\n vgg16_out = vgg16_out.numpy()\n out = vgg16_out\n ClsSet.append(out[0]==class_to_idx[cls])\n\n #handle resize and centercrop for gt_boxes\n\n gt_bbox_i = list(gt_boxes)\n raw_img_i = raw_img\n # raw_img_i, gt_bbox_i = ResizedBBoxCrop((256,256))(raw_img, temp_list)\n # raw_img_i, gt_bbox_i = CenterBBoxCrop((224))(raw_img_i, gt_bbox_i)\n # w, h = raw_img_i.size\n gt_bbox_i[0] = gt_bbox_i[0] * w\n gt_bbox_i[2] = gt_bbox_i[2] * w\n gt_bbox_i[1] = gt_bbox_i[1] * h\n gt_bbox_i[3] = gt_bbox_i[3] * h\n\n gt_boxes = gt_bbox_i\n\n w, h = raw_img_i.size\n\n bbox[0] = bbox[0] #0和1左上角的点,原本的2和3是框的大小\n bbox[2] = bbox[2] \n bbox[1] = bbox[1] \n bbox[3] = bbox[3] \n #print(gt_bbox_i, bbox)\n max_iou = -1\n iou = IoU(bbox, gt_boxes)\n if iou > max_iou:\n max_iou = iou\n\n LocSet.append(max_iou)\n temp_loc_iou = max_iou\n if out[0] != class_to_idx[cls]:\n max_iou = 0\n\n #print(max_iou, name)\n result[os.path.join(cls, name)] = bbox #max_iou\n IoUSet.append(max_iou)\n #cal top5 IoU\n max_iou = 0\n for i in range(5):\n if out[i] == class_to_idx[cls]:\n max_iou = temp_loc_iou\n IoUSetTop5.append(max_iou)\n #visualization code\n cls_loc_acc = np.sum(np.array(IoUSet) > 0.5) / len(IoUSet)\n final_clsloc.extend(IoUSet)\n cls_loc_acc_top5 = np.sum(np.array(IoUSetTop5) > 0.5) / len(IoUSetTop5)\n final_clsloctop5.extend(IoUSetTop5)\n loc_acc = np.sum(np.array(LocSet) > 0.5) / len(LocSet)\n final_loc.extend(LocSet)\n cls_acc = np.sum(np.array(ClsSet))/len(ClsSet)\n final_cls.extend(ClsSet)\n print('{} cls-loc acc is {}, loc acc is {}, vgg16 cls acc is {}'.format(cls, cls_loc_acc, loc_acc, cls_acc))\n with open('inference_CorLoc.txt', 'a+') as corloc_f:\n corloc_f.write('{} {}\\n'.format(cls, loc_acc))\n accs.append(cls_loc_acc)\n accs_top5.append(cls_loc_acc_top5)\n loc_accs.append(loc_acc)\n cls_accs.append(cls_acc)\n if (k+1) %100==0:\n print(k)\n\n\nprint(accs)\nprint('Cls-Loc acc {}'.format(np.mean(accs)))\nprint('Cls-Loc acc Top 5 {}'.format(np.mean(accs_top5)))\n\nprint('GT Loc acc {}'.format(np.mean(loc_accs)))\nprint('{} cls acc {}'.format(clsname, np.mean(cls_accs)))\nwith open('Corloc_result.txt', 'w') as f:\n for k in sorted(result.keys()):\n f.write('{} {}\\n'.format(k, str(result[k])))\n","repo_name":"illusory-hymn/resnet_test","sub_path":"resnet/PSOL_inference-test.py","file_name":"PSOL_inference-test.py","file_ext":"py","file_size_in_byte":8793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33676200452","text":"import json\nimport os\nimport stat\nfrom time import time\n\nimport mock\nfrom passport.backend.tvm_keyring import settings\nfrom passport.backend.tvm_keyring.exceptions import TVMPermanentError\nfrom passport.backend.tvm_keyring.test.base import BaseTestCase\nfrom passport.backend.tvm_keyring.test.base_test_data import (\n TEST_CONFIG_1_CONTENTS,\n TEST_CONFIG_1_NAME,\n TEST_CONFIG_2_CONTENTS,\n TEST_CONFIG_2_NAME,\n TEST_JUNK_RESULT_FILENAME,\n TEST_JUNK_RESULT_NAME,\n TEST_KEYS_FILENAME,\n TEST_RESULT_1,\n TEST_RESULT_1_FILENAME,\n TEST_RESULT_2,\n TEST_RESULT_2_FILENAME,\n TEST_TVM_KEYS,\n)\nfrom passport.backend.tvm_keyring.test.fake_tvm import tvm_ticket_response\nfrom passport.backend.tvm_keyring.update import update\n\n\nclass UpdateTestCase(BaseTestCase):\n def setUp(self):\n super(UpdateTestCase, self).setUp()\n self.fake_fs.create_dir(settings.RESULT_PATH)\n self.fake_fs.create_file(\n os.path.join(settings.CONFIG_PATH, TEST_CONFIG_1_NAME),\n contents=json.dumps(TEST_CONFIG_1_CONTENTS),\n )\n self.fake_fs.create_file(\n os.path.join(settings.CONFIG_PATH, TEST_CONFIG_2_NAME),\n contents=json.dumps(TEST_CONFIG_2_CONTENTS),\n )\n self.fake_fs.create_file(\n TEST_JUNK_RESULT_FILENAME,\n contents='{}',\n )\n\n self.fake_tvm.set_response_side_effect([\n TEST_TVM_KEYS,\n tvm_ticket_response({'2': 'ticket2'}),\n tvm_ticket_response({'1': 'ticket1', '3': 'ticket3', '4': 'ticket4'}),\n ])\n\n def _assert_result_filenames_equal(self, filenames):\n assert set(os.listdir(settings.RESULT_PATH)) == set(filenames)\n\n def _check_permissions(self, filename, expected_permissions):\n # по 3 бита на стандартные юниксовые права all, group и other\n # итого 9 бит, или 0o777, что одно и то же\n file_permissions = os.stat(filename)[stat.ST_MODE] & 0o777\n assert oct(file_permissions) == oct(expected_permissions)\n\n def _check_change_file_owner_call(self, owner, group, call_index=0):\n assert self.getpwnam_mock.call_args_list[call_index][0] == (owner, )\n assert self.getgrnam_mock.call_args_list[call_index][0] == (group, )\n\n def check_ok(self, keys_content=TEST_TVM_KEYS.decode('utf-8'), result_1_content=TEST_RESULT_1, result_2_content=TEST_RESULT_2,\n junk_left=False, result_owner=None, result_group=None, result_permissions=None, tvm_call_count=3,\n change_file_owner_call_count=3):\n expected_filenames = []\n if keys_content is not None:\n expected_filenames.append('tvm.keys')\n if result_1_content is not None:\n expected_filenames.append('%s.tickets' % TEST_CONFIG_1_NAME)\n if result_2_content is not None:\n expected_filenames.append('%s.tickets' % TEST_CONFIG_2_NAME)\n if junk_left:\n expected_filenames.append(TEST_JUNK_RESULT_NAME)\n if result_owner is None:\n result_owner = settings.RESULT_TICKETS_DEFAULT_OWNER\n if result_group is None:\n result_group = settings.RESULT_TICKETS_DEFAULT_GROUP\n if result_permissions is None:\n result_permissions = settings.RESULT_TICKETS_DEFAULT_PERMISSIONS\n\n self._assert_result_filenames_equal(expected_filenames)\n\n if keys_content is not None:\n with open(TEST_KEYS_FILENAME) as f:\n assert f.read() == keys_content\n self._check_permissions(\n TEST_KEYS_FILENAME,\n expected_permissions=settings.RESULT_KEYS_PERMISSIONS,\n )\n if change_file_owner_call_count >= 1:\n self._check_change_file_owner_call(\n owner=settings.RESULT_KEYS_OWNER,\n group=settings.RESULT_KEYS_GROUP,\n call_index=0,\n )\n else:\n assert not os.path.exists(TEST_KEYS_FILENAME)\n\n if result_1_content is not None:\n with open(TEST_RESULT_1_FILENAME) as f:\n assert f.read() == result_1_content\n self._check_permissions(\n TEST_RESULT_1_FILENAME,\n expected_permissions=result_permissions,\n )\n if change_file_owner_call_count >= 2:\n self._check_change_file_owner_call(\n owner=result_owner,\n group=result_group,\n call_index=1,\n )\n else:\n assert not os.path.exists(TEST_RESULT_1_FILENAME)\n\n if result_2_content is not None:\n with open(TEST_RESULT_2_FILENAME) as f:\n assert f.read() == result_2_content\n self._check_permissions(\n TEST_RESULT_2_FILENAME,\n expected_permissions=result_permissions,\n )\n call_index = 2 if result_1_content else 1\n if change_file_owner_call_count >= call_index + 1:\n self._check_change_file_owner_call(\n owner=result_owner,\n group=result_group,\n call_index=call_index,\n )\n else:\n assert not os.path.exists(TEST_RESULT_2_FILENAME)\n\n assert self.fake_tvm._mock.call_count == tvm_call_count\n\n def test_ok(self):\n assert update()\n self.check_ok()\n\n def test_results_already_actual(self):\n self.fake_fs.create_file(\n TEST_KEYS_FILENAME,\n contents='keys',\n st_mode=settings.RESULT_KEYS_PERMISSIONS,\n )\n self.fake_fs.create_file(\n TEST_RESULT_1_FILENAME,\n contents='1',\n st_mode=settings.RESULT_TICKETS_DEFAULT_PERMISSIONS,\n )\n self.fake_fs.create_file(\n TEST_RESULT_2_FILENAME,\n contents='2',\n st_mode=settings.RESULT_TICKETS_DEFAULT_PERMISSIONS,\n )\n\n assert update()\n\n self.check_ok(\n keys_content='keys',\n result_1_content='1',\n result_2_content='2',\n tvm_call_count=0,\n change_file_owner_call_count=0,\n )\n\n def test_forced_update(self):\n self.fake_fs.create_file(\n TEST_KEYS_FILENAME,\n contents=TEST_TVM_KEYS,\n st_mode=settings.RESULT_KEYS_PERMISSIONS,\n )\n self.fake_fs.create_file(\n TEST_RESULT_1_FILENAME,\n contents='1',\n st_mode=settings.RESULT_TICKETS_DEFAULT_PERMISSIONS,\n )\n self.fake_fs.create_file(\n TEST_RESULT_2_FILENAME,\n contents='2',\n st_mode=settings.RESULT_TICKETS_DEFAULT_PERMISSIONS,\n )\n self.fake_tvm.set_response_side_effect([\n tvm_ticket_response({'2': 'ticket2'}),\n tvm_ticket_response({'1': 'ticket1', '3': 'ticket3', '4': 'ticket4'}),\n ])\n\n assert update(force=True)\n\n self.check_ok(tvm_call_count=2, change_file_owner_call_count=2)\n\n def test_configs_changed_after_creating_results(self):\n self.fake_fs.create_file(\n TEST_KEYS_FILENAME,\n contents=TEST_TVM_KEYS,\n st_mode=settings.RESULT_KEYS_PERMISSIONS,\n )\n self.fake_fs.create_file(\n TEST_RESULT_1_FILENAME,\n contents='1',\n st_mode=settings.RESULT_TICKETS_DEFAULT_PERMISSIONS,\n )\n self.fake_fs.create_file(\n TEST_RESULT_2_FILENAME,\n contents='2',\n st_mode=settings.RESULT_TICKETS_DEFAULT_PERMISSIONS,\n )\n os.utime(os.path.join(settings.CONFIG_PATH, TEST_CONFIG_2_NAME), (0, time() + 1))\n\n self.fake_tvm.set_response_side_effect([\n tvm_ticket_response({'1': 'ticket1', '3': 'ticket3', '4': 'ticket4'}),\n ])\n\n assert update()\n\n self.check_ok(\n keys_content=TEST_TVM_KEYS.decode('utf-8'),\n result_1_content='1',\n tvm_call_count=1,\n change_file_owner_call_count=1,\n )\n\n def test_no_destinations_ok(self):\n self.fake_fs.remove_object(os.path.join(settings.CONFIG_PATH, TEST_CONFIG_1_NAME))\n with open(os.path.join(settings.CONFIG_PATH, TEST_CONFIG_2_NAME), 'w') as f:\n json.dump(dict(TEST_CONFIG_2_CONTENTS.items(), destinations=[]), f)\n\n assert update()\n\n expected_result = json.dumps(\n dict(json.loads(TEST_RESULT_2).items(), tickets={}),\n indent=2,\n )\n self.check_ok(\n result_1_content=None,\n result_2_content=expected_result,\n tvm_call_count=1,\n change_file_owner_call_count=1,\n )\n\n def test_no_secret_ok(self):\n self.fake_fs.remove_object(os.path.join(settings.CONFIG_PATH, TEST_CONFIG_1_NAME))\n with open(os.path.join(settings.CONFIG_PATH, TEST_CONFIG_2_NAME), 'w') as f:\n json.dump({'client_id': 2}, f)\n\n assert update()\n\n expected_result = json.dumps(\n dict(client_id=2, client_secret=None, tickets={}),\n indent=2,\n )\n self.check_ok(\n result_1_content=None,\n result_2_content=expected_result,\n tvm_call_count=1,\n change_file_owner_call_count=1,\n )\n\n def test_custom_result_properties_ok(self):\n self.fake_fs.remove_object(os.path.join(settings.CONFIG_PATH, TEST_CONFIG_2_NAME))\n with open(os.path.join(settings.CONFIG_PATH, TEST_CONFIG_1_NAME), 'w') as f:\n json.dump(\n dict(\n TEST_CONFIG_1_CONTENTS.items(),\n result={'owner': 'root', 'group': 'root', 'permissions': '777'},\n ),\n f,\n )\n\n with mock.patch('passport.backend.tvm_keyring.update.get_current_user', mock.Mock(return_value='root')):\n assert update()\n\n self.check_ok(\n result_1_content=TEST_RESULT_1,\n result_2_content=None,\n tvm_call_count=2,\n change_file_owner_call_count=2,\n result_owner='root',\n result_group='root',\n result_permissions=0o777,\n )\n\n def test_failed_to_get_keys(self):\n self.fake_tvm.set_response_side_effect(TVMPermanentError())\n\n assert not update()\n\n self.check_ok(\n keys_content=None,\n result_1_content=None,\n result_2_content=None,\n junk_left=True,\n tvm_call_count=1,\n change_file_owner_call_count=1,\n )\n\n def test_got_invalid_keys(self):\n self.fake_tvm.set_response_side_effect(['keys'])\n\n assert not update()\n\n self.check_ok(\n keys_content=None,\n result_1_content=None,\n result_2_content=None,\n junk_left=True,\n tvm_call_count=1,\n change_file_owner_call_count=1,\n )\n\n def test_failed_to_get_keys_but_old_present(self):\n self.fake_tvm.set_response_side_effect([\n TVMPermanentError(),\n tvm_ticket_response({'2': 'ticket2'}),\n tvm_ticket_response({'1': 'ticket1', '3': 'ticket3', '4': 'ticket4'}),\n ])\n self.fake_fs.create_file(\n TEST_KEYS_FILENAME,\n contents=TEST_TVM_KEYS,\n st_mode=settings.RESULT_KEYS_PERMISSIONS,\n )\n os.utime(TEST_KEYS_FILENAME, (0, time() - settings.KEYS_UPDATE_INTERVAL - 10))\n\n assert update()\n\n self.check_ok(change_file_owner_call_count=0)\n\n def test_got_invalid_keys_but_old_present(self):\n self.fake_tvm.set_response_side_effect([\n b'keys',\n tvm_ticket_response({'2': 'ticket2'}),\n tvm_ticket_response({'1': 'ticket1', '3': 'ticket3', '4': 'ticket4'}),\n ])\n self.fake_fs.create_file(\n TEST_KEYS_FILENAME,\n contents=TEST_TVM_KEYS,\n st_mode=settings.RESULT_KEYS_PERMISSIONS,\n )\n os.utime(TEST_KEYS_FILENAME, (0, time() - settings.KEYS_UPDATE_INTERVAL - 10))\n\n assert update()\n\n self.check_ok(change_file_owner_call_count=0)\n\n def test_failed_to_get_some_tickets(self):\n self.fake_tvm.set_response_side_effect([\n TEST_TVM_KEYS,\n TVMPermanentError(),\n tvm_ticket_response({'1': 'ticket1', '3': 'ticket3', '4': 'ticket4'}),\n ])\n\n assert update()\n\n self.check_ok(result_1_content=None)\n\n def test_some_configs_invalid(self):\n with open(os.path.join(settings.CONFIG_PATH, TEST_CONFIG_2_NAME), 'w') as f:\n f.write('corrupt json')\n\n assert update()\n\n self.check_ok(result_2_content=None, tvm_call_count=2, change_file_owner_call_count=2)\n\n def test_failed_to_delete_junk(self):\n # Кидаем OSError только для TEST_JUNK_RESULT_FILENAME\n # Остальные файлы удаляются корректно\n from os import remove as original_os_remove\n with mock.patch.object(os, 'remove') as os_remove_mock:\n def broken_remove(filename):\n if filename == TEST_JUNK_RESULT_FILENAME:\n raise OSError()\n else:\n original_os_remove(filename)\n os_remove_mock.side_effect = broken_remove\n\n assert update()\n\n self.check_ok(junk_left=True)\n\n def test_unhandled_error(self):\n self.fake_tvm.set_response_side_effect(NameError)\n\n assert not update()\n\n self.check_ok(\n keys_content=None,\n result_1_content=None,\n result_2_content=None,\n junk_left=True,\n tvm_call_count=1,\n change_file_owner_call_count=1,\n )\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"passport/tests/test_update.py","file_name":"test_update.py","file_ext":"py","file_size_in_byte":13809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"37689303782","text":"# echo-client.py\n# https://realpython.com/python-sockets/\n\nimport socket\n\nHOST = \"127.0.0.1\"\nPORT = 15213\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n with open(\"logs/2023-01-16.csv\", \"rb\") as file:\n s.connect((HOST, PORT))\n data = file.read(1024)\n while(data):\n s.send(data)\n data = file.read(1024)\n\nprint(f\"Sent data :(\")","repo_name":"daneengman/FeeshMonitor","sub_path":"testing/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11624293299","text":"\"\"\"\nUse lldb Python API to test base class resolution for ObjC classes\n\"\"\"\n\n\nimport lldb\nfrom lldbsuite.test.decorators import *\nfrom lldbsuite.test.lldbtest import *\nfrom lldbsuite.test import lldbutil\n\n\nclass ObjCDynamicValueTestCase(TestBase):\n def setUp(self):\n # Call super's setUp().\n TestBase.setUp(self)\n\n self.line = line_number(\"main.m\", \"// Set breakpoint here.\")\n\n @add_test_categories([\"pyapi\"])\n def test_get_baseclass(self):\n \"\"\"Test fetching ObjC dynamic values.\"\"\"\n if self.getArchitecture() == \"i386\":\n # rdar://problem/9946499\n self.skipTest(\"Dynamic types for ObjC V1 runtime not implemented\")\n\n self.build()\n exe = self.getBuildArtifact(\"a.out\")\n\n # Create a target from the debugger.\n\n target = self.dbg.CreateTarget(exe)\n self.assertTrue(target, VALID_TARGET)\n\n # Set up our breakpoints:\n\n target.BreakpointCreateByLocation(\"main.m\", self.line)\n process = target.LaunchSimple(None, None, self.get_process_working_directory())\n\n self.assertState(process.GetState(), lldb.eStateStopped, PROCESS_STOPPED)\n\n var = self.frame().FindVariable(\"foo\")\n var_ptr_type = var.GetType()\n var_pte_type = var_ptr_type.GetPointeeType()\n self.assertEqual(\n var_ptr_type.GetNumberOfDirectBaseClasses(), 1, \"Foo * has one base class\"\n )\n self.assertEqual(\n var_pte_type.GetNumberOfDirectBaseClasses(), 1, \"Foo has one base class\"\n )\n\n self.assertTrue(\n var_ptr_type.GetDirectBaseClassAtIndex(0).IsValid(),\n \"Foo * has a valid base class\",\n )\n self.assertTrue(\n var_pte_type.GetDirectBaseClassAtIndex(0).IsValid(),\n \"Foo * has a valid base class\",\n )\n\n self.assertEquals(\n var_ptr_type.GetDirectBaseClassAtIndex(0).GetName(),\n var_pte_type.GetDirectBaseClassAtIndex(0).GetName(),\n \"Foo and its pointer type don't agree on their base class\",\n )\n","repo_name":"llvm/llvm-project","sub_path":"lldb/test/API/lang/objc/objc-baseclass-sbtype/TestObjCBaseClassSBType.py","file_name":"TestObjCBaseClassSBType.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","stars":22888,"dataset":"github-code","pt":"66"} +{"seq_id":"39288686316","text":"from collections import defaultdict\nimport math\nimport random\nfrom numpy import random as random_np\n\nfrom colorama import Fore\n\nfrom lib import logger\nfrom lib.utils import all_dijkstra_tree, get_lower_bound, get_lm_distances\nfrom lm_picker import LMPicker\n\n\nLOG = logger.getLogger()\n\n\nclass MaxcoverPicker(LMPicker):\n def get_avoid_landmarks(self, lms=None, lm_dists=None, lm_dists_rev=None,\n lm_num=10):\n lms = lms or []\n lm_dists = lm_dists or {}\n lm_dists_rev = lm_dists_rev or {}\n\n # And now real picking begins...\n for i in range(len(lms), lm_num):\n LOG.info('Calculating landmark ' + Fore.RED + '%d' + Fore.RESET +\n '...', i)\n\n r = random.choice(self.G.keys())\n LOG.debug(' Choosen r=%d.', r)\n r_dists, r_tree, r_order = all_dijkstra_tree(r, self.G)\n\n # First calculate \"weights\".\n LOG.info(' Calculating weights...')\n weights = {}\n for v in self.G.keys():\n weights[v] = r_dists[v] - get_lower_bound(lm_dists,\n lm_dists_rev, r, v)\n\n LOG.info(' Calculating sizes...')\n # Then \"sizes\" dependent on \"weights\"\n sizes = defaultdict(lambda: 0)\n w = None # That's the node of max size\n\n # We're processing the vertices in reversed order of Dijkstra\n # algorithm. Basically we're starting on the leaves and going up.\n for v in reversed(r_order):\n # Traverse subtree of r_tree rooted at v using DFS\n Q = [v]\n while Q:\n u = Q.pop()\n\n # It's possible we already have size of the subtree\n if u in sizes:\n if sizes[u] == 0:\n sizes[v] = 0\n break\n else:\n sizes[v] += sizes[u]\n continue\n\n # If subtree has landmark then size is 0\n if u in lms:\n sizes[v] = 0\n break\n else:\n sizes[v] += weights[u]\n\n for x in r_tree[u]:\n Q.append(x)\n\n if w is None or sizes[w] < sizes[v]:\n w = v\n\n LOG.info(' Calculating landmark...')\n # We have all the sizes calculated, and max one (w). Now we travese\n # subtree of r_tree rooted in w. We always choose branch of highest\n # size.\n while r_tree[w]:\n w = max(r_tree[w], key=lambda x: sizes[x])\n\n # Adding leaf as a new landmark\n LOG.info(' Calculated node ' + Fore.RED + '%d' + Fore.RESET +\n ' as landmark.', w)\n lms.append(w)\n\n # Calculate distances for new landmark\n LOG.info(' Calculating distances for this landmark...')\n lm_dists[w] = get_lm_distances(self.G, [w])[w]\n lm_dists_rev[w] = get_lm_distances(self.G_reversed, [w])[w]\n\n LOG.info(Fore.RED + 'Choosen landmarks: %s' + Fore.RESET, str(lms))\n return lms, lm_dists, lm_dists_rev\n\n def calculate_cost(self, lms, lm_dists):\n cost = 0\n\n # Iterate over all edges.\n for v in self.G.keys():\n for w in self.G[v].keys():\n # Iterate over all landmarks\n for lm in lms:\n if self.G[v][w] - lm_dists[lm][w] + lm_dists[lm][v]:\n cost += 1\n break\n\n LOG.info('Cost of current solution: %d', cost)\n return cost\n\n def get_landmarks(self, lm_num=10):\n k = lm_num # For compatibility with description in Goldberg's article.\n C = set()\n C_dists = {}\n C_dists_rev = {}\n # Start with getting k landmarks by avoid, add all as candidates\n lms, lm_dists, lm_dists_rev = self.get_avoid_landmarks(lm_num=k)\n C.update(lms)\n C_dists.update(lm_dists)\n C_dists_rev.update(lm_dists_rev)\n\n # Repeat until avoid is called 5k times or we have 4k landmarks\n i = 0 # Number of calls to Avoid.\n # TODO: Too much candidates are generated (4 landmarks - 21 candidates)\n # I should probably split that to use avoid one-by-one.\n while len(C) < (4 * k) and i < (5 * k):\n # With probability 1/2 remove each landmark from the solution\n for lm in lms:\n if random.randint(0, 1):\n lms.remove(lm)\n del lm_dists[lm]\n del lm_dists_rev[lm]\n\n i += k - len(lms) # Avoid will be called that many times.\n\n # Generate new landmarks\n lms, lm_dists, lm_dists_rev = self.get_avoid_landmarks(lms,\n lm_dists,\n lm_dists_rev,\n k)\n\n # Add new ones to C and repeat everything\n C.update(lms)\n C_dists.update(lm_dists)\n C_dists_rev.update(lm_dists_rev)\n\n LOG.info('Got %d candidates in C.', len(C))\n\n # Multistart heuristic with local search - swapping\n solutions = []\n costs = []\n for i in xrange(int(math.log(k + 1, 2))):\n LOG.info('Calculating %d for %d sets', i + 1,\n int(math.log(k + 1, 2)))\n S = set(random.sample(C, k)) # Get k lms from C at random\n T = C - S # Rest of candidates\n\n current_cost = self.calculate_cost(S, C_dists)\n\n while True:\n profits = {}\n for s in S:\n for t in T:\n LOG.info('Trying out swap %d-%d.', s, t)\n new_S = S.copy()\n new_S.remove(s)\n new_S.add(t)\n new_cost = self.calculate_cost(new_S, C_dists)\n profit = new_cost - current_cost\n if profit > 0:\n LOG.info('Swap %d-%d profitable with %d profit.',\n s, t, profit)\n profits['%d-%d' % (s, t)] = profit\n\n # If no improvement can be made stop.\n if not profits:\n break\n\n # Otherwise choose swap at random with profit weights\n p = profits.values()\n p_sum = float(sum(p))\n p = [x / p_sum for x in p] # Normalize weigths to sum to 1.0\n swap = random_np.choice(profits.keys(), p=p)\n LOG.info('Swap %s chosen.', swap)\n s, t = swap.split('-')\n s, t = int(s), int(t)\n S.remove(s)\n S.add(t)\n current_cost += profits[swap]\n\n LOG.info('No improvements could be made - solution found: %s (%d).',\n str(S), current_cost)\n solutions.append(S)\n costs.append(current_cost)\n\n solution = solutions[max(xrange(len(costs)), key=costs.__getitem__)]\n LOG.info('Best solution chosen: %s.', str(solution))\n return (list(solution),\n {k: v for k, v in C_dists.items() if k in solution},\n {k: v for k, v in C_dists_rev.items() if k in solution})\n","repo_name":"dulek/alt-tester","sub_path":"lm_pickers/maxcover.py","file_name":"maxcover.py","file_ext":"py","file_size_in_byte":7637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36927159527","text":"import pprint\n\npp = pprint.PrettyPrinter()\n\ndef charToHex(c):\n hexVal = int(c, 16)\n bitArr = [0, 0, 0, 0]\n i = 3\n while hexVal > 0:\n bitArr[i] = hexVal % 2\n i = i - 1\n hexVal //= 2\n return bitArr\n\n# lo is inclusive, hi is exclusive\ndef bitsToNum(bits, lo, hi):\n num = 0\n for i in range(lo, hi):\n num <<= 1\n num += bits[i]\n return num\n\n# returns (val, num bits processed)\ndef processLiteral(bits, start):\n numBits = 0\n val = 0\n while bits[start] != 0:\n val += bitsToNum(bits, start + 1, start + 5)\n val <<= 4\n start += 5\n numBits += 5\n val += bitsToNum(bits, start + 1, start + 5)\n return (val, numBits + 5)\n\n# lo is inclusive, hi is exclusive\n# Returns (value, numProcessed)\ndef processBits(bits, lo):\n typeId = bitsToNum(bits, lo + 3, lo + 6)\n if typeId == 4:\n (val, numProc) = processLiteral(bits, lo + 6)\n return (val, numProc + 6)\n else:\n val = None\n lenId = bitsToNum(bits, lo + 6, lo + 7)\n numProc = 0\n packets = []\n if lenId: # equals 1\n numPackets = bitsToNum(bits, lo + 7, lo + 18)\n offset = 18\n while numPackets > 0:\n (nextVal, nextProc) = processBits(bits, lo + 18 + numProc)\n numProc += nextProc\n numPackets -= 1\n packets.append(nextVal)\n else:\n lenPackets = bitsToNum(bits, lo + 7, lo + 22)\n offset = 22\n while numProc < lenPackets:\n (nextVal, nextProc) = processBits(bits, lo + 22 + numProc)\n numProc += nextProc\n packets.append(nextVal)\n if typeId == 0:\n val = 0\n for packet in packets:\n val += packet\n elif typeId == 1:\n val = 1\n for packet in packets:\n val *= packet\n elif typeId == 2:\n for packet in packets:\n val = packet if val is None or packet < val else val\n elif typeId == 3:\n for packet in packets:\n val = packet if val is None or packet > val else val\n elif typeId == 5:\n val = int(packets[0] > packets[1])\n elif typeId == 6:\n val = int(packets[0] < packets[1])\n elif typeId == 7:\n val = int(packets[0] == packets[1])\n return (val, numProc + offset)\n\n\ndef main():\n f = open(\"packet.txt\")\n\n line = f.readline().rstrip()\n bits = []\n for c in line:\n bits.extend(charToHex(c))\n\n ans = processBits(bits, 0)\n\n print(\"Answer: \" + str(ans[0]))\n f.close()\n\nif __name__ == \"__main__\":\n main()","repo_name":"qtgeo1248/AdventOfCode","sub_path":"2021/16/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"10787700251","text":"from db.run_sql import run_sql\nfrom models.post import Post\nfrom models.user import User\nfrom models.thread import Thread\n\ndef create_thread(thread):\n sql = \"INSERT INTO threads (title, creator, locked) VALUES (%s, %s, %s) RETURNING id\"\n values = [thread.title, thread.creator, thread.locked]\n results = run_sql(sql, values)\n thread.thread_id = results[0]['id']\n return thread\n\ndef select(id):\n thread = None\n sql = \"SELECT * FROM threads WHERE id = %s\"\n values = [id]\n result = run_sql(sql, values)[0]\n\n if result is not None:\n thread = Thread(result['title'], result['creator'], result['locked'], result['id'] )\n return thread\n\ndef select_all():\n threads = []\n sql = \"SELECT * FROM threads\"\n results = run_sql(sql)\n\n for row in results:\n thread = Thread(row['title'], row['creator'], row['locked'], row['id'])\n threads.append(thread)\n\n return threads\n\ndef delete_thread(id):\n sql = \"DELETE FROM threads WHERE id = %s\"\n values = [id]\n run_sql(sql, values)\n\ndef delete_all_threads():\n sql = \"DELETE FROM threads\"\n run_sql(sql)\n\ndef lock_thread(id):\n sql = \"UPDATE threads SET locked = True WHERE id = %s\"\n values = [id]\n run_sql(sql, values)\n\ndef users(thread):\n users = []\n sql = \"SELECT users.* FROM users INNER JOIN posts ON posts.user_id = users.id WHERE thread_id = %s\"\n values = [thread.id]\n results = run_sql(sql, values)\n\n for row in results:\n user = User(row['user_name'], row['sig'], row['avatar_id'], row['account_banned'], row['admin_status'], row['id'])\n users.append(user)\n \n return users\n\ndef get_posts(id):\n posts = []\n sql = \"SELECT * FROM posts WHERE thread_id = %s ORDER BY id ASC\"\n values = [id]\n results = run_sql(sql, values)\n\n for row in results:\n post = Post(row['post_content'], row['user_id'], row['thread_id'], row['id'])\n posts.append(post)\n\n return posts","repo_name":"robdrawspictures/forum_ipsem","sub_path":"forum_ipsem/repositories/thread_repository.py","file_name":"thread_repository.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12866935751","text":"import requests\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nclass CryptoCurrencies:\n\n \"\"\"This class contains functions to track and analyze the value of cryptocurrencies\n across various markets. It uses the Alpha Vantage API and requires an API key that\n can be retrieved here: https://www.alphavantage.co/support/#api-key.\n \"\"\"\n\n _API_URL = \"https://www.alphavantage.co/query?\"\n\n def __init__(self, api_key):\n\n \"\"\" Initialize the class\n\n Keyword Arguments:\n api_key: Alpha Vantage api key\n \"\"\"\n\n self._api_key = api_key\n\n def get_timeseries_daily(self, symbol, market):\n\n \"\"\"Return a Pandas object with the daily timeseries of\n the cryptocurrency and market specified.\n\n Keyword Arguments:\n symbol: symbol of CryptoCurrency to be tracked.\n market: symbol of exchange market to be tracked.\n \"\"\"\n\n function = 'DIGITAL_CURRENCY_DAILY'\n\n url = '{}function={}&symbol={}&market={}&apikey={}' \\\n .format(self._API_URL,\n function,\n symbol,\n market,\n self._api_key)\n\n cc_json = self._api_call(url)['Time Series (Digital Currency Daily)']\n return pd.DataFrame(cc_json).T\n\n def get_processed_timeseries_daily(self, symbol, market):\n\n \"\"\"Return a Pandas object with daily timeseries of closing value of\n cryptocurrency at specified market, 3 day rolling average and 7 day\n rolling average, both calculated with a centered window.\n\n Keyword Arguments:\n symbol: symbol of CryptoCurrency to be tracked.\n market: symbol of exchange market to be tracked.\n \"\"\"\n closing_col_name = '4a. close ({})'.format(market)\n\n cr_df = self.get_timeseries_daily(symbol, market)[[closing_col_name]]\n cr_df[closing_col_name] = cr_df[closing_col_name].astype(float)\n cr_df['3day RA ({})'.format(market)] = cr_df[closing_col_name]\\\n .rolling(window=3, center=True).mean()\n cr_df['7day RA ({})'.format(market)] = cr_df[closing_col_name]\\\n .rolling(window=7, center=True).mean()\n\n cr_df = cr_df.rename(columns={closing_col_name: '{} closing value ({})'\n .format(symbol, market)})\n return cr_df\n\n @staticmethod\n def plot_timeseries(df):\n\n \"\"\"Plot given dataframe.\n\n Keyword Arguments:\n df: timeseries dataframe with data to be plotted.\n \"\"\"\n df = df.sort_index()\n df.plot()\n plt.tight_layout()\n plt.grid()\n plt.show()\n\n @staticmethod\n def _api_call(url):\n\n \"\"\" Makes the api call and handles the response.\n Raises ValueError on problems.\n\n Keyword Arguments:\n url: The url of the service API\n \"\"\"\n\n response = requests.get(url)\n json_response = response.json()\n\n if not json_response:\n raise ValueError('API is non responsive.')\n elif \"Error Message\" in json_response:\n raise ValueError(json_response[\"Error Message\"])\n elif \"Information\" in json_response:\n raise ValueError(json_response[\"Information\"])\n elif \"Note\" in json_response:\n raise ValueError(json_response[\"Note\"])\n\n return json_response\n\n\nif __name__ == '__main__':\n\n cc = CryptoCurrencies('6ML9EF1OECFZHJDG')\n btc_data = cc.get_processed_timeseries_daily('BTC', 'USD')\n\n print('\\n\\nBitcoin value in USD (last 10 days): \\n\\n', btc_data.head(10))\n cc.plot_timeseries(btc_data)\n","repo_name":"drb162/Donuts","sub_path":"crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21647574531","text":"import time\nimport numpy as np\nfrom logging import getLogger\nfrom libcity.utils import get_evaluator\nfrom libcity.executor.abstract_executor import AbstractExecutor\nfrom libcity.model.utils import *\n\n\nclass HOMEExecutor(AbstractExecutor):\n def __init__(self, config, model, data_feature):\n self._logger = getLogger()\n self.evaluator = get_evaluator(config, data_feature)\n self.config = config\n self.exp_id = self.config.get('exp_id', None)\n self.device = self.config.get('device', torch.device('cpu'))\n self.epochs = self.config.get('max_epoch', 100)\n self.model_name = self.config.get('model', '')\n self.dataset = self.config.get('dataset', '')\n self.seed = self.config.get('seed', '')\n\n super().__init__(config, model, data_feature)\n self.road_cl_ratio = config['road_cl_ratio']\n self.region_cl_ratio = config['region_cl_ratio']\n self.road_region_ratio = config['road_region_ratio']\n self.region_city_ratio = config['region_city_ratio']\n\n self.output_dim = config['output_dim']\n\n self.road_cache_file = './libcity/cache/{}/evaluate_cache/road_embedding_{}_{}_{}.npy'. \\\n format(self.exp_id, self.model_name, self.dataset, self.output_dim)\n self.region_cache_file = './libcity/cache/{}/evaluate_cache/region_embedding_{}_{}_{}.npy'. \\\n format(self.exp_id, self.model_name, self.dataset, self.output_dim)\n\n def get_emb(self, data):\n road_emb_to_save, region_emb_to_save = self.model.encode_road_region(\n data['road_node'].x, data['region_node'].x, data.edge_index_dict, emb=True)\n road_emb_to_save = road_emb_to_save.cpu().detach().numpy()\n region_emb_to_save = region_emb_to_save.cpu().detach().numpy()\n return road_emb_to_save, region_emb_to_save\n\n def evaluate(self, data):\n self.model.eval()\n road_emb_to_save, region_emb_to_save = self.get_emb(data)\n np.save(self.road_cache_file, road_emb_to_save)\n np.save(self.region_cache_file, region_emb_to_save)\n self.evaluator.clear()\n self.evaluator.evaluate()\n\n def train(self, data, _, __=None):\n self._logger.info('Start training ...')\n min_val_loss = float('inf')\n wait = 0\n best_epoch = -1\n train_time = []\n eval_time = []\n\n for epoch_idx in range(self.epochs):\n avg_loss, ti = self._train_epoch(data, epoch_idx)\n train_time.append(ti)\n\n t2 = time.time()\n end_time = time.time()\n eval_time.append(end_time - t2)\n self._logger.info('Val Epoch {}/{} complete, avg_loss={:4f}, time={:3f}s'.format(epoch_idx, self.epochs, avg_loss, end_time - t2))\n\n if avg_loss < min_val_loss:\n wait = 0\n if self.saved:\n model_file_name = self.save_model_with_epoch(epoch_idx)\n self._logger.info('Val loss decrease from {:.4f} to {:.4f},'\n 'saving to {}'.format(min_val_loss, avg_loss, model_file_name))\n min_val_loss = avg_loss\n best_epoch = epoch_idx\n else:\n wait += 1\n if wait == self.patience and self.use_early_stop:\n self._logger.warning('Early stopping at epoch: %d' % epoch_idx)\n break\n\n if self.load_best_epoch:\n self.load_model_with_epoch(best_epoch)\n\n return min_val_loss\n\n def _train_epoch(self, data, epoch_idx):\n start_time = time.time()\n\n self.model.train()\n self.optimizer.zero_grad()\n\n road_z1, region_z1, road_z2, region_z2, pos_poi_emb_list, \\\n neg_poi_emb_list, region_emb, neg_region_emb, city_emb = self.model(data)\n\n road_region_loss = self.model.road_region_loss(pos_poi_emb_list, neg_poi_emb_list, region_emb)\n region_city_loss = self.model.region_city_loss(region_emb, neg_region_emb, city_emb)\n\n road_loss = self.model.road_loss(road_z1, road_z2, batch_size=512 if self.dataset == 'bj' else None)\n region_loss = self.model.region_loss(region_z1, region_z2, batch_size=512 if self.dataset == 'bj' else None)\n\n loss = self.road_cl_ratio * road_loss + self.region_cl_ratio * region_loss \\\n + self.road_region_ratio * road_region_loss + self.region_city_ratio * region_city_loss\n loss.backward()\n if self.clip_grad_norm:\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.max_grad_norm)\n self.optimizer.step()\n\n train_time = time.time() - start_time\n self._logger.info('Epoch {}/{} complete, road_cl_loss={:6f}, region_cl_loss={:6f}, '\n 'road_region_loss={:6f}, region_city_loss={:6f}, '\n 'loss={:6f}, time={:3f}s'.format(epoch_idx, self.epochs, road_loss.item(),\n region_loss.item(), road_region_loss.item(),\n region_city_loss.item(), loss.item(), train_time))\n return loss.item(), train_time\n","repo_name":"aptx1231/HOME-GCL","sub_path":"libcity/executor/home_executor.py","file_name":"home_executor.py","file_ext":"py","file_size_in_byte":5163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8141205578","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('events', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='event',\n name='latitude',\n field=models.FloatField(null=True, verbose_name='Latitude', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='event',\n name='location_description',\n field=models.CharField(max_length=50, null=True, verbose_name='Location', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='event',\n name='longitude',\n field=models.FloatField(null=True, verbose_name='Longitude', blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='event',\n name='date_end',\n field=models.DateTimeField(verbose_name='End date/time'),\n ),\n migrations.AlterField(\n model_name='event',\n name='date_start',\n field=models.DateTimeField(verbose_name='Start date/time'),\n ),\n migrations.AlterField(\n model_name='event',\n name='name',\n field=models.CharField(help_text=b'', max_length=64, verbose_name='Event name'),\n ),\n migrations.AlterField(\n model_name='event',\n name='series_number',\n field=models.CharField(help_text=b'Example: Year or yyyy-mm-dd', max_length=32, verbose_name='Series number', blank=True),\n ),\n migrations.AlterField(\n model_name='event',\n name='website',\n field=models.URLField(verbose_name='Website'),\n ),\n migrations.AlterField(\n model_name='series',\n name='name',\n field=models.CharField(help_text=b'eg. DjangoCon Australia', max_length=64, verbose_name='Event name'),\n ),\n migrations.AlterField(\n model_name='series',\n name='website',\n field=models.URLField(verbose_name='Website'),\n ),\n ]\n","repo_name":"elena/django-history","sub_path":"events/migrations/0002_auto_20140913_0943.py","file_name":"0002_auto_20140913_0943.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70373213289","text":"\"\"\"docmuni URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\n\nfrom .views import certificadoedit, certificadolist, certificadonew, \\\n expedienteedit, expedientelist, expedientenew, obralist, obranew, \\\n obraedit\n\n\nurlpatterns = [\n path('certificadoedit/', certificadoedit),\n path('certificadolist', certificadolist),\n path('certificadonew', certificadonew),\n path('expedienteedit/', expedienteedit),\n path('expedientelist', expedientelist),\n path('expedientenew', expedientenew),\n path('obraedit/', obraedit),\n path('obralist', obralist),\n path('obranew', obranew),\n]\n","repo_name":"baroam0/docmuni","sub_path":"expediente/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35210492701","text":"import argparse\nimport pickle\nfrom collections import Counter\n\nimport ijson\nfrom tqdm import tqdm\n\n\ndef journal_stats(data_path):\n f = open(data_path, encoding=\"utf8\")\n objects = ijson.items(f, 'articles.item')\n\n label_id = []\n journals = []\n for i, obj in enumerate(tqdm(objects)):\n try:\n journal = obj['journal']\n mesh_id = obj['meshID']\n label_id.append(mesh_id)\n journals.append(journal)\n except AttributeError:\n print(obj[\"pmid\"].strip())\n\n journal_dict = {}\n mesh_counts = {}\n for i, journal in enumerate(journals):\n if journal in journal_dict:\n journal_dict[journal]['counts'] = journal_dict[journal]['counts'] + 1\n mesh_counts[journal].append(label_id[i])\n else:\n journal_dict[journal] = dict.fromkeys(['counts', 'mesh_counts'])\n journal_dict[journal]['counts'] = 1\n mesh_counts[journal] = [label_id[i]]\n\n for i, ids in enumerate(list(mesh_counts.values())):\n flat_list = [item for sublist in ids for item in sublist]\n occurrences = dict(Counter(flat_list))\n journal_name = list(mesh_counts.keys())[i]\n if journal_name in journal_dict:\n sorted_occurrence = dict(sorted(occurrences.items(), key=lambda item: item[1], reverse=True))\n journal_dict[journal_name]['mesh_counts'] = sorted_occurrence\n else:\n print(journal_name, 'is not in the list')\n\n sorted_journal = dict(sorted(journal_dict.items(), key=lambda item: item[1]['counts'], reverse=True))\n return sorted_journal\n\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--data')\n parser.add_argument('--save')\n\n args = parser.parse_args()\n\n journal_info = journal_stats(args.data)\n with open(args.save, 'wb') as f:\n pickle.dump(journal_info, f, pickle.HIGHEST_PROTOCOL)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"xdwang0726/KenMeSH","sub_path":"journal_info.py","file_name":"journal_info.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"18143099015","text":"#!/usr/bin/env python3\n\nimport sympy\n\n\nM = 10 ** 6\n\n\ndef main():\n total = 0\n for k in range(1, M + 1):\n if k % 10_000 == 0:\n print(f\"progress: {k}\")\n for d in sympy.divisors(k)[:-1]:\n total += (2 * M - 2 * k + 3 * d + 1) * d // 2\n print(total)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zmwangx/Project-Euler","sub_path":"555/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19236839566","text":"import gspread\nfrom openpyxl import Workbook\n\n\ndef get_data_from_gsheets():\n if __name__ == '__main__':\n filename = 'service_credentials.json'\n else:\n filename = 'google_sheets/service_credentials.json'\n service_account = gspread.service_account(filename=filename)\n spreadsheet = service_account.open('AdsKajetan')\n worksheet = spreadsheet.worksheet('accounts')\n\n headers = worksheet.get('A1:G1')[0]\n accounts_data = worksheet.get('A3:G52')\n\n return headers, accounts_data\n\n\ndef update_local_excel_file(data):\n path = '/home/kajetan/Documents/pryzmat/accounts.xlsx'\n wb = Workbook()\n ws = wb.active\n\n headers, accounts_data = data\n ws.append(headers)\n ws.append([])\n for account_data in accounts_data:\n ws.append(account_data)\n\n wb.save(path)\n\n\ndef update_accounts_list():\n update_local_excel_file(get_data_from_gsheets())\n","repo_name":"katek1094/allegroAds","sub_path":"src/google_sheets/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37608996229","text":"## @file\r\n# Standardized Error Handling infrastructures.\r\n#\r\n# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.
\r\n#\r\n# SPDX-License-Identifier: BSD-2-Clause-Patent\r\n#\r\n\r\n'''\r\nToolError\r\n'''\r\n\r\nimport Logger.StringTable as ST\r\n\r\nFILE_OPEN_FAILURE = 1\r\nFILE_WRITE_FAILURE = 2\r\nFILE_PARSE_FAILURE = 3\r\nFILE_READ_FAILURE = 4\r\nFILE_CREATE_FAILURE = 5\r\nFILE_CHECKSUM_FAILURE = 6\r\nFILE_COMPRESS_FAILURE = 7\r\nFILE_DECOMPRESS_FAILURE = 8\r\nFILE_MOVE_FAILURE = 9\r\nFILE_DELETE_FAILURE = 10\r\nFILE_COPY_FAILURE = 11\r\nFILE_POSITIONING_FAILURE = 12\r\nFILE_ALREADY_EXIST = 13\r\nFILE_NOT_FOUND = 14\r\nFILE_TYPE_MISMATCH = 15\r\nFILE_CASE_MISMATCH = 16\r\nFILE_DUPLICATED = 17\r\nFILE_UNKNOWN_ERROR = 0x0FFF\r\n\r\nOPTION_UNKNOWN = 0x1000\r\nOPTION_MISSING = 0x1001\r\nOPTION_CONFLICT = 0x1002\r\nOPTION_VALUE_INVALID = 0x1003\r\nOPTION_DEPRECATED = 0x1004\r\nOPTION_NOT_SUPPORTED = 0x1005\r\nOPTION_UNKNOWN_ERROR = 0x1FFF\r\n\r\nPARAMETER_INVALID = 0x2000\r\nPARAMETER_MISSING = 0x2001\r\nPARAMETER_UNKNOWN_ERROR = 0x2FFF\r\n\r\nFORMAT_INVALID = 0x3000\r\nFORMAT_NOT_SUPPORTED = 0x3001\r\nFORMAT_UNKNOWN = 0x3002\r\nFORMAT_UNKNOWN_ERROR = 0x3FFF\r\n\r\nRESOURCE_NOT_AVAILABLE = 0x4000\r\nRESOURCE_ALLOCATE_FAILURE = 0x4001\r\nRESOURCE_FULL = 0x4002\r\nRESOURCE_OVERFLOW = 0x4003\r\nRESOURCE_UNDERRUN = 0x4004\r\nRESOURCE_UNKNOWN_ERROR = 0x4FFF\r\n\r\nATTRIBUTE_NOT_AVAILABLE = 0x5000\r\nATTRIBUTE_GET_FAILURE = 0x5001\r\nATTRIBUTE_SET_FAILURE = 0x5002\r\nATTRIBUTE_UPDATE_FAILURE = 0x5003\r\nATTRIBUTE_ACCESS_DENIED = 0x5004\r\nATTRIBUTE_RETRIEVE_FAILURE = 0x5005\r\nATTRIBUTE_UNKNOWN_ERROR = 0x5FFF\r\nATTRIBUTE_RETRIEVE_FAILURE = 0x5F00\r\n\r\nIO_NOT_READY = 0x6000\r\nIO_BUSY = 0x6001\r\nIO_TIMEOUT = 0x6002\r\nIO_UNKNOWN_ERROR = 0x6FFF\r\n\r\nCOMMAND_FAILURE = 0x7000\r\n\r\nCODE_ERROR = 0xC0DE\r\n\r\nAUTOGEN_ERROR = 0xF000\r\nPARSER_ERROR = 0xF001\r\nBUILD_ERROR = 0xF002\r\nGENFDS_ERROR = 0xF003\r\nECC_ERROR = 0xF004\r\nEOT_ERROR = 0xF005\r\nDDC_ERROR = 0xF009\r\nWARNING_AS_ERROR = 0xF006\r\nMIGRATION_ERROR = 0xF010\r\nEDK1_INF_ERROR = 0xF011\r\nABORT_ERROR = 0xFFFE\r\nUNKNOWN_ERROR = 0xFFFF\r\n\r\nUPT_ALREADY_INSTALLED_ERROR = 0xD000\r\nUPT_ENVIRON_MISSING_ERROR = 0xD001\r\nUPT_REPKG_ERROR = 0xD002\r\nUPT_ALREADY_RUNNING_ERROR = 0xD003\r\nUPT_MUL_DEC_ERROR = 0xD004\r\nUPT_DB_UPDATE_ERROR = 0xD005\r\nUPT_INI_PARSE_ERROR = 0xE000\r\n\r\n## Error message of each error code\r\n#\r\ngERROR_MESSAGE = {\r\n FILE_NOT_FOUND : ST.ERR_FILE_NOT_FOUND,\r\n FILE_OPEN_FAILURE : ST.ERR_FILE_OPEN_FAILURE,\r\n FILE_WRITE_FAILURE : ST.ERR_FILE_WRITE_FAILURE,\r\n FILE_PARSE_FAILURE : ST.ERR_FILE_PARSE_FAILURE,\r\n FILE_READ_FAILURE : ST.ERR_FILE_READ_FAILURE,\r\n FILE_CREATE_FAILURE : ST.ERR_FILE_CREATE_FAILURE,\r\n FILE_CHECKSUM_FAILURE : ST.ERR_FILE_CHECKSUM_FAILURE,\r\n FILE_COMPRESS_FAILURE : ST.ERR_FILE_COMPRESS_FAILURE,\r\n FILE_DECOMPRESS_FAILURE : ST.ERR_FILE_DECOMPRESS_FAILURE,\r\n FILE_MOVE_FAILURE : ST.ERR_FILE_MOVE_FAILURE,\r\n FILE_DELETE_FAILURE : ST.ERR_FILE_DELETE_FAILURE,\r\n FILE_COPY_FAILURE : ST.ERR_FILE_COPY_FAILURE,\r\n FILE_POSITIONING_FAILURE: ST.ERR_FILE_POSITIONING_FAILURE,\r\n FILE_ALREADY_EXIST : ST.ERR_FILE_ALREADY_EXIST,\r\n FILE_TYPE_MISMATCH : ST.ERR_FILE_TYPE_MISMATCH ,\r\n FILE_CASE_MISMATCH : ST.ERR_FILE_CASE_MISMATCH,\r\n FILE_DUPLICATED : ST.ERR_FILE_DUPLICATED,\r\n FILE_UNKNOWN_ERROR : ST.ERR_FILE_UNKNOWN_ERROR,\r\n\r\n OPTION_UNKNOWN : ST.ERR_OPTION_UNKNOWN,\r\n OPTION_MISSING : ST.ERR_OPTION_MISSING,\r\n OPTION_CONFLICT : ST.ERR_OPTION_CONFLICT,\r\n OPTION_VALUE_INVALID : ST.ERR_OPTION_VALUE_INVALID,\r\n OPTION_DEPRECATED : ST.ERR_OPTION_DEPRECATED,\r\n OPTION_NOT_SUPPORTED : ST.ERR_OPTION_NOT_SUPPORTED,\r\n OPTION_UNKNOWN_ERROR : ST.ERR_OPTION_UNKNOWN_ERROR,\r\n\r\n PARAMETER_INVALID : ST.ERR_PARAMETER_INVALID,\r\n PARAMETER_MISSING : ST.ERR_PARAMETER_MISSING,\r\n PARAMETER_UNKNOWN_ERROR : ST.ERR_PARAMETER_UNKNOWN_ERROR,\r\n\r\n FORMAT_INVALID : ST.ERR_FORMAT_INVALID,\r\n FORMAT_NOT_SUPPORTED : ST.ERR_FORMAT_NOT_SUPPORTED,\r\n FORMAT_UNKNOWN : ST.ERR_FORMAT_UNKNOWN,\r\n FORMAT_UNKNOWN_ERROR : ST.ERR_FORMAT_UNKNOWN_ERROR,\r\n\r\n RESOURCE_NOT_AVAILABLE : ST.ERR_RESOURCE_NOT_AVAILABLE,\r\n RESOURCE_ALLOCATE_FAILURE : ST.ERR_RESOURCE_ALLOCATE_FAILURE,\r\n RESOURCE_FULL : ST.ERR_RESOURCE_FULL,\r\n RESOURCE_OVERFLOW : ST.ERR_RESOURCE_OVERFLOW,\r\n RESOURCE_UNDERRUN : ST.ERR_RESOURCE_UNDERRUN,\r\n RESOURCE_UNKNOWN_ERROR : ST.ERR_RESOURCE_UNKNOWN_ERROR,\r\n\r\n ATTRIBUTE_NOT_AVAILABLE : ST.ERR_ATTRIBUTE_NOT_AVAILABLE,\r\n ATTRIBUTE_RETRIEVE_FAILURE : ST.ERR_ATTRIBUTE_RETRIEVE_FAILURE,\r\n ATTRIBUTE_SET_FAILURE : ST.ERR_ATTRIBUTE_SET_FAILURE,\r\n ATTRIBUTE_UPDATE_FAILURE: ST.ERR_ATTRIBUTE_UPDATE_FAILURE,\r\n ATTRIBUTE_ACCESS_DENIED : ST.ERR_ATTRIBUTE_ACCESS_DENIED,\r\n ATTRIBUTE_UNKNOWN_ERROR : ST.ERR_ATTRIBUTE_UNKNOWN_ERROR,\r\n\r\n COMMAND_FAILURE : ST.ERR_COMMAND_FAILURE,\r\n\r\n IO_NOT_READY : ST.ERR_IO_NOT_READY,\r\n IO_BUSY : ST.ERR_IO_BUSY,\r\n IO_TIMEOUT : ST.ERR_IO_TIMEOUT,\r\n IO_UNKNOWN_ERROR : ST.ERR_IO_UNKNOWN_ERROR,\r\n\r\n UNKNOWN_ERROR : ST.ERR_UNKNOWN_ERROR,\r\n\r\n UPT_ALREADY_INSTALLED_ERROR : ST.ERR_UPT_ALREADY_INSTALLED_ERROR,\r\n UPT_ENVIRON_MISSING_ERROR : ST.ERR_UPT_ENVIRON_MISSING_ERROR,\r\n UPT_REPKG_ERROR : ST.ERR_UPT_REPKG_ERROR,\r\n UPT_ALREADY_RUNNING_ERROR : ST.ERR_UPT_ALREADY_RUNNING_ERROR,\r\n UPT_MUL_DEC_ERROR : ST.ERR_MUL_DEC_ERROR,\r\n UPT_INI_PARSE_ERROR : ST.ERR_UPT_INI_PARSE_ERROR,\r\n}\r\n\r\n## Exception indicating a fatal error\r\n#\r\nclass FatalError(Exception):\r\n pass\r\n\r\n","repo_name":"CloverHackyColor/CloverBootloader","sub_path":"BaseTools/Source/Python/UPT/Logger/ToolError.py","file_name":"ToolError.py","file_ext":"py","file_size_in_byte":5777,"program_lang":"python","lang":"en","doc_type":"code","stars":4186,"dataset":"github-code","pt":"53"} +{"seq_id":"31809119470","text":"import pyaudio\nimport sys\nimport time\nimport msgpack\nsys.path.append('../..')\nimport numpy as np\nimport re\nfrom shared import create_zmq_server, MessageQueue\nimport sys\nimport wave\nimport datetime\n\nif len(sys.argv) != 2:\n exit('please only supply sound card name')\ndevice_names_string = sys.argv[1]\n\nFORMAT = pyaudio.paInt16\nCHANNELS = 2\nRATE = 44100\nCHUNK = 2000\n\nzmq_socket_1, zmq_server_addr_1 = create_zmq_server()\nzmq_socket_2, zmq_server_addr_2 = create_zmq_server()\n\nmq = MessageQueue('microphone-sensor')\n\np = pyaudio.PyAudio()\ndevice_index = None\nfor i in range(p.get_device_count()):\n device = p.get_device_info_by_index(i)\n if device['name'].startswith('[{}]'.format(device_names_string)):\n device_index = i\n\nif not device_index:\n exit('please connect a proper soundcard')\n\ndevice_names = device_names_string.split(',')\n\nmq.publish(\n exchange='sensors',\n routing_key='microphone.new_sensor.{}'.format(device_names[0]),\n body={'address': zmq_server_addr_1, 'file_type': 'audio'}\n)\nmq.publish(\n exchange='sensors',\n routing_key='microphone.new_sensor.{}'.format(device_names[1]),\n body={'address': zmq_server_addr_2, 'file_type': 'audio'}\n)\n\nsession_name = datetime.datetime.now().isoformat().replace('.', '_').replace(':', '_') + device_names_string\n\n# Let's be on the safe side and recording this to the computer...\nwaveFile = wave.open('{}.wav'.format(session_name), 'wb')\nwaveFile.setnchannels(CHANNELS)\nwaveFile.setsampwidth(p.get_sample_size(FORMAT))\nwaveFile.setframerate(RATE)\n\ndef callback(in_data, frame_count, time_info, status):\n result = np.fromstring(in_data, dtype=np.uint16)\n result = np.reshape(result, (frame_count, 2))\n the_time = mq.get_shifted_time()\n zmq_socket_1.send(msgpack.packb((result[:, 0].tobytes(), the_time)))\n zmq_socket_2.send(msgpack.packb((result[:, 1].tobytes(), the_time)))\n waveFile.writeframes(in_data)\n return None, pyaudio.paContinue\n\n\nstream = p.open(\n format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input_device_index=device_index,\n input=True,\n frames_per_buffer=CHUNK,\n stream_callback=callback\n)\ntry:\n input('[*] Serving at {} and {}. To exit press enter'.format(zmq_server_addr_1, zmq_server_addr_2))\nfinally:\n waveFile.close()\n stream.stop_stream()\n stream.close()\n zmq_socket_1.send(b'CLOSE')\n zmq_socket_2.send(b'CLOSE')\n zmq_socket_1.close()\n zmq_socket_2.close()\n","repo_name":"kontogiorgos/enterface17","sub_path":"src/main/python/sensors/microphone/microphone.py","file_name":"microphone.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3236162846","text":"from django.shortcuts import render\nfrom openpyxl import Workbook, load_workbook\nfrom .models import Sales, Predictions\nfrom django.http import HttpResponse\nfrom django.db.models import Sum\n\n# Create your views here.\ndef home(request):\n\n # Historical Data - Region wise Sales\n\n models = [\"Maruti Suzuki Alto 800\",\"Maruti Suzuki Alto K10\",\"Maruti Suzuki S-Presso\",\"Maruti Suzuki Eeco\",\"Maruti Suzuki Celerio\",\"Maruti Suzuki Swift\",\"Maruti Suzuki Grand Vitara\",\"Maruti Suzuki XL6\",\"Maruti Suzuki Brezza\",\"Maruti Suzuki Dzire\"]\n regions = ['Mumbai','Pune','Nagpur','Nashik']\n region_wise = dict()\n for r in regions:\n region_wise_query = Sales.objects.filter(region = r).values('model').annotate(total_sales=Sum('sales'))\n sv = []\n for i in range(len(region_wise_query)):\n sv.append(region_wise_query[i]['total_sales'])\n region_wise[r] = sv\n print(region_wise)\n\n # Historical Data - Color wise Sales\n\n color_wise_query = Sales.objects.values('color').annotate(total_sales=Sum('sales'))\n color_wise = []\n for i in range(0,3):\n color_wise.append(color_wise_query[i][\"total_sales\"])\n print(color_wise)\n\n # Historical Data - Yearly Sales 2013 -2021\n\n yearly_query = Sales.objects.values('month').annotate(total_sales=Sum('sales'))\n yearly_dict = dict()\n for i in range(len(yearly_query)):\n if yearly_query[i]['month'][-4:] not in yearly_dict:\n yearly_dict[yearly_query[i]['month'][-4:]] = yearly_query[i]['total_sales']\n else:\n yearly_dict[yearly_query[i]['month'][-4:]] += yearly_query[i]['total_sales']\n year_wise = list(yearly_dict.values())\n print(year_wise)\n \n data = {\n 'region_wise' : region_wise,\n 'color_wise' : color_wise,\n 'year_wise' : year_wise,\n }\n context = {\n \"data\":data\n }\n return render(request,'home.html', context)\n \n \ndef forecast(request):\n if request.method == \"POST\":\n color = request.POST.get('color')\n region = request.POST.get('region')\n model = request.POST.get('model')\n sales_query = Predictions.objects.filter(color = color, region = region, model = model).all()\n sales = []\n for s in sales_query:\n sales.append(s.prediction)\n print(sales)\n data = {\n 'sales' : sales,\n }\n context = {\n \"data\":data\n }\n return render(request,'filter.html', context)\n return render(request,'filter.html')\n\n\ndef modelview(request):\n return render(request,\"model.html\")\n\ndef modelpage(request, model):\n sales = Sales.objects.filter(model = model).values('model').annotate(total_sales=Sum('sales'))\n total_sales = sales[0]['total_sales']\n data = {\n \"model\":model,\n \"total_sales\":total_sales\n }\n context= {\n \"data\":data\n }\n return render(request,\"alto.html\", context)\n\ndef addhistorydata(request):\n # Load the entire workbook.\n wb = load_workbook(\"data/monthly-car-sales-v3.xlsx\", data_only=True)\n # Load one worksheet.\n ws = wb['Worksheet']\n all_rows = list(ws.rows)\n\n # Pull information from specific cells.\n for row in all_rows[1:]:\n model = row[0].value\n month = row[1].value\n sales = row[2].value\n color = row[3].value\n region = row[4].value\n date = row[5].value\n db = Sales(model = model, month = month, sales = sales, color= color, region = region, date = date)\n db.save()\n print(\"Sales Data Added\")\n return HttpResponse(\"Sales Data Added\")\n\n\ndef addpredictions(request):\n # Load the entire workbook.\n wb = load_workbook(\"data/Predictions-2022.xlsx\", data_only=True)\n # Load one worksheet.\n ws = wb['Worksheet']\n all_rows = list(ws.rows)\n\n # Pull information from specific cells.\n for row in all_rows[1:]:\n model = row[0].value\n color = row[1].value\n region = row[2].value\n month = row[3].value\n prediction = row[4].value\n db = Predictions(model = model, month = month, prediction = prediction, color= color, region = region)\n db.save()\n print(\"Prediction Data Added\")\n return HttpResponse(\"Prediction Data Added\")\n\n\ndef modelinsights(request, model):\n # Maruti Suzuki Alto 800 - Color-wise\n\n color_wise_query = Sales.objects.filter(model = model).values('color').annotate(total_sales=Sum('sales'))\n color_wise = []\n for i in range(0,3):\n color_wise.append(color_wise_query[i][\"total_sales\"])\n print(color_wise)\n\n # Maruti Suzuki Alto 800 - Year-wise\n\n yearly_query = Sales.objects.filter(model = model).values('month').annotate(total_sales=Sum('sales'))\n yearly_dict = dict()\n for i in range(len(yearly_query)):\n if yearly_query[i]['month'][-4:] not in yearly_dict:\n yearly_dict[yearly_query[i]['month'][-4:]] = yearly_query[i]['total_sales']\n else:\n yearly_dict[yearly_query[i]['month'][-4:]] += yearly_query[i]['total_sales']\n year_wise = list(yearly_dict.values())\n print(year_wise)\n\n # Maruti Suzuki Alto 800 Yearly Region-wise\n\n regions = ['Mumbai','Pune','Nagpur','Nashik']\n region_wise = dict()\n for r in regions:\n region_wise_query = Sales.objects.filter(model = model, region = r).values('month').annotate(total_sales=Sum('sales'))\n yearly_dict = dict()\n for i in range(len(region_wise_query)):\n if region_wise_query[i]['month'][-4:] not in yearly_dict:\n yearly_dict[region_wise_query[i]['month'][-4:]] = region_wise_query[i]['total_sales']\n else:\n yearly_dict[region_wise_query[i]['month'][-4:]] += region_wise_query[i]['total_sales']\n region_wise[r] = list(yearly_dict.values())\n print(region_wise)\n\n data = {\n \"region_wise\":region_wise,\n \"color_wise\": color_wise,\n \"year_wise\":year_wise,\n \"model\":model\n }\n context = {\n \"data\":data\n }\n return render(request, 'alto-insights.html', context)\n\n\ndef modelforecast(request):\n return render(request, 'alto-forecast.html')\n\n\ndef groupingdata(request): \n\n # Prediction Data Month-wise\n\n prediction_data_month_wise_query = Predictions.objects.values('month').annotate(total_sales=Sum('prediction'))\n prediction_data_month_wise = []\n for i in range(0,12):\n prediction_data_month_wise.append(prediction_data_month_wise_query[i][\"total_sales\"])\n print(prediction_data_month_wise)\n\n # Prediction Data Color-wise\n\n prediction_data_color_wise_query = Predictions.objects.values('color').annotate(total_sales=Sum('prediction'))\n prediction_data_color_wise = []\n for i in range(0,3):\n prediction_data_color_wise.append(prediction_data_color_wise_query[i][\"total_sales\"])\n print(prediction_data_color_wise)\n\n # Prediction Data Region-wise\n\n prediction_data_region_wise_query = Predictions.objects.values('region').annotate(total_sales=Sum('prediction'))\n prediction_data_region_wise = []\n for i in range(0,4):\n prediction_data_region_wise.append(prediction_data_region_wise_query[i][\"total_sales\"])\n print(prediction_data_region_wise)\n\n # Prediction Data Model-wise Color-wise Region-wise Month-wise\n\n m = 0\n c = 0\n r = 0\n models = [\"Maruti Suzuki Alto 800\",\"Maruti Suzuki Alto K10\",\"Maruti Suzuki S-Presso\",\"Maruti Suzuki Eeco\",\"Maruti Suzuki Celerio\",\"Maruti Suzuki Swift\",\"Maruti Suzuki Grand Vitara\",\"Maruti Suzuki XL6\",\"Maruti Suzuki Brezza\",\"Maruti Suzuki Dzire\"]\n regions = ['Mumbai','Pune','Nagpur','Nashik']\n colors = [\"Black\",\"Grey\",\"White\"]\n prediction_query = Predictions.objects.filter(model = models[m], color = colors[c], region = regions[r]).all()\n prediction = []\n for i in range(0,12):\n prediction.append(prediction_query[i].prediction)\n print(prediction)\n\n # Prediction Data Region-wise Month-wise\n\n prediction_region_month_wise = []\n for r in regions:\n prediction_region_month_wise_query = Predictions.objects.filter(region = r).values('month').annotate(total_sales=Sum('prediction'))\n p = []\n for i in range(0,12):\n p.append(prediction_region_month_wise_query[i][\"total_sales\"])\n prediction_region_month_wise.append(p)\n print(prediction_region_month_wise)\n\n # Prediction Data Color-wise Month-wise\n\n prediction_color_month_wise = []\n for c in colors:\n prediction_color_month_wise_query = Predictions.objects.filter(color = c).values('month').annotate(total_sales=Sum('prediction'))\n p = []\n for i in range(0,12):\n p.append(prediction_color_month_wise_query[i][\"total_sales\"])\n prediction_color_month_wise.append(p)\n print(prediction_color_month_wise)\n\n # Historical Data - Region wise Sales\n\n region_wise_query = Sales.objects.values('region').annotate(total_sales=Sum('sales'))\n region_wise = []\n for i in range(0,4):\n region_wise.append(region_wise_query[i][\"total_sales\"])\n print(region_wise)\n\n # Historical Data - Color wise Sales\n\n color_wise_query = Sales.objects.values('color').annotate(total_sales=Sum('sales'))\n color_wise = []\n for i in range(0,3):\n color_wise.append(color_wise_query[i][\"total_sales\"])\n print(color_wise)\n\n # Historical Data - Yearly Sales 2013 -2021\n\n yearly_query = Sales.objects.values('month').annotate(total_sales=Sum('sales'))\n yearly_dict = dict()\n for i in range(len(yearly_query)):\n if yearly_query[i]['month'][-4:] not in yearly_dict:\n yearly_dict[yearly_query[i]['month'][-4:]] = yearly_query[i]['total_sales']\n else:\n yearly_dict[yearly_query[i]['month'][-4:]] += yearly_query[i]['total_sales']\n year_wise = list(yearly_dict.values())\n print(year_wise)\n\n return HttpResponse(\"Done\")\n\n# last_year_data_ungroup = Sales.objects.filter(date__iregex=r'2021$').all()\n # lyd_group_date = last_year_data_ungroup.values('date').annotate(total_sales=Sum('sales'))\n # sales_2021 = []\n # for i in range(0,12):\n # sales_2021.append(lyd_group_date[i][\"total_sales\"])\n # total_sales_2021 = sum(sales_2021)\n # current_year_data_ungroup = Predictions.objects.all()\n # cyd_group_date = current_year_data_ungroup.values('month').annotate(total_sales=Sum('prediction'))\n # sales_2022 = []\n # for i in range(0,12):\n # sales_2022.append(cyd_group_date[i][\"total_sales\"])","repo_name":"Deep-De-coder/Tantravihar2023","sub_path":"mysite/salesPrediction/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21067247422","text":"#!/usr/bin/env python3\n\"\"\"\n Script for parsing tabbed files\n\"\"\"\nfrom collections import OrderedDict\nimport re\nimport glob\nimport argparse\nimport json\n\n\n\"\"\"\"\n Script Configurations\n\"\"\"\n\nDELIMINER = \"\\t\" # Specify separator\nIGNORE_FIRST_LINE = False # Ignore first line if its a header\n\n# List of Variables in order with their respective types\nVARIABLENAMES = OrderedDict({\n 'key_child': 'boolean',\n 'other_children': 'boolean',\n 'number_of_other_children': 'number',\n 'mother': 'bool',\n 'other_female_adults': 'bool',\n 'number_of_other_female_adults': 'number',\n 'other_male_adults': 'bool',\n 'number_of_other_male_adults': 'number',\n 'who_were_they_talking_to': 'text',\n 'actions': 'text',\n 'comments': 'text',\n 'other_researchers_and_students': 'bool',\n 'research_participants': 'bool',\n 'anyone': 'bool'\n})\n\nFILENAME_PATTERN = \"*_ac.txt\" #  Pattern of the file names\n\n# Pattern of the files with rule to extract name\nEXTRACT_FILENAME_PATTERN = r\"(.*)_ac.txt\"\n\nINPUT_FILETYPE = \".wav\" # The types of files to describe\n\n\"\"\"\n Data Types & Convertion\n\"\"\"\n\n\ndef strToText(text):\n return text\n\n\ndef strToBoolean(boolean):\n if \"true\" in boolean.lower():\n return True\n elif \"1\" in boolean.lower():\n return True\n else:\n return False\n\n\ndef strToNumber(number):\n try:\n if \".\" in number:\n return float(number)\n else:\n return int(number)\n except ValueError:\n return 0\n\n\ndef convertToData(s, type):\n if \"text\" in type:\n return strToText(s)\n elif \"number\" in type:\n return strToNumber(s)\n elif \"bool\" in type:\n return strToBoolean(s)\n\n\n\"\"\"\n Parsor\n\"\"\"\n\n\ndef tokenise(items):\n \"\"\" Tokenise a list of items from the Global Variable array\"\"\"\n result = {}\n counter = 0\n if len(VARIABLENAMES.keys()) == len(items):\n for entryName, entryType in VARIABLENAMES.items():\n result[entryName] = convertToData(items[counter], entryType)\n counter += 1\n return result\n else:\n print(\"File format does not correspond to GRAMAR\")\n return {}\n\n\ndef fileToEntries(filename):\n \"\"\" Create a list of Entrie from a tabbed File \"\"\"\n found = re.search(EXTRACT_FILENAME_PATTERN, filename)\n if found:\n wavFilename = found.group(1) + INPUT_FILETYPE\n resultEntry = []\n with open(filename) as fd:\n for line in fd:\n items = line.split(DELIMINER)\n resultEntry.append(tokenise(items))\n return wavFilename, resultEntry\n else:\n print(\"Filename {\" + filename +\n \"} does not match pattern {\" + FILENAME_PATTERN + \"}\")\n return '', {}\n\n\n\"\"\"\n Append all info files into one dictionairy sorted by filename\n\"\"\"\n\n\ndef appendMultipleTabFiles(folder):\n result = {}\n location = \"{}/{}\".format(folder, FILENAME_PATTERN)\n for filename in glob.iglob(location):\n wavFilename, Entries = fileToEntries(filename)\n result[wavFilename] = Entries\n return result\n\n\n\"\"\"\n Main\n\"\"\"\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Image Downloader')\n parser.add_argument('folder', type=str, help='Input folder')\n parser.add_argument('output', type=str, help='Output file')\n args = parser.parse_args()\n\n if args.folder and args.output:\n result = appendMultipleTabFiles(args.folder)\n with open(\"{}.json\".format(args.output), 'w') as fd:\n json.dump(result, fd)\n","repo_name":"nhamilakis/erec","sub_path":"praat_parsor/tab2JSON.py","file_name":"tab2JSON.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1652072849","text":"#\n# requirement 1: serial library must be installed in the system\n# requirement 2: modbus-tk can be downloaded from the link below,\n# http://code.google.com/p/modbus-tk/downloads/detail?name=modbus-tk-0.4.2.zip\n# once downloaded it must be installed\n#\n# This python script expects 3 STRING arguments passed-in as inputs:\n# 1) Number of modbuses in use e.g 1 or 2\n# 2) COM PORT 'A' e.g. COM24 as recognized by the system\n# 3) COM PORT 'B' e.g. COM21 as recognized by the system\n#\n# How to call the script from the command line example,\n# C:\\python> modbus_slave_DAS4_prod_test.py 1 COM24 COM21 True or\n# C:\\python> modbus_slave_DAS4_prod_test.py 2 COM24 COM21 False or\n#\n'''\nThis program starts two modbus slaves for the DAQ code to query.\n'''\n\n#import packages\n#import easygui\nimport time\nimport os\nimport sys\nimport serial\nimport serial.tools.list_ports\nimport pdb\nimport argparse\n\n#---------------------------------------------------------------------------#\n# modbus-tk\nimport modbus_tk\nimport modbus_tk.defines as modbus_defines\nimport modbus_tk.modbus_rtu as modbus_rtu\nimport modbus_tk.modbus_tcp as modbus_tcp\nimport threading\n#---------------------------------------------------------------------------#\n\n#---------------------------------------------------------------------------#\n# logging\nimport logging\n#---------------------------------------------------------------------------#\n\n#---------------------------------------------------------------------------#\ndef parseARGS(argv):\n\n\n return (args.num, args.ports, args.verbose, args.level, args.filename)\n#---------------------------------------------------------------------------#\n\n\n#---------------------------------------------------------------------------#\ndef setupLogger(loglevel, logfilename):\n # assuming loglevel is bound to the string value obtained from the\n # command line argument. Convert to upper case to allow the user to\n # specify --log=DEBUG or --log=debug\n numeric_level = getattr(logging, loglevel.upper(), None)\n if not isinstance(numeric_level, int):\n raise ValueError('Invalid log level: %s' % loglevel)\n\n # configure logger\n # logger.setLevel(loglevel.upper())\n # handler_stream_formatter = logging.Formatter('%(levelname)s: %(message)s')\n # handler_stream = logging.StreamHandler()\n # handler_stream.setFormatter(handler_stream_formatter)\n # handler_stream.setLevel(loglevel.upper())\n # logger.addHandler(handler_stream)\n\n if logfilename != '':\n handler_file = logging.FileHandler('c:\\\\temp\\\\' + logfilename)\n # handler_file = logging.FileHandler('/home/jeastman/logs/' + logfilename)\n handler_file_formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')\n handler_file.setFormatter(handler_file_formatter)\n handler_file.setLevel(loglevel.upper())\n logger.addHandler(handler_file)\n\n logger.info('Log Level is: %s' % (loglevel))\n logger.info('Log Filename is: %s' % (logfilename))\n#---------------------------------------------------------------------------#\n\n\n#---------------------------------------------------------------------------#\ndef startModbusSlave(modbus_ports_and_slaves_per_port, COM_PORTS, verbose_enabled):\n\n num_modbus_ports = int(modbus_ports_and_slaves_per_port[0])\n num_slaves_per_port = int(modbus_ports_and_slaves_per_port[1])\n\n # modbus-tk\n logger.info('Number of Modbus Slave Ports is %d' % num_modbus_ports)\n logger.info('Number of Modbus Slaves per Port is %d' % num_slaves_per_port)\n\n for x in xrange(0, num_modbus_ports):\n logger.info('COM Port #%d is: %s' % (x + 1, COM_PORTS[x]))\n\n try:\n for modbus_ports in xrange(0, num_modbus_ports):\n exec('server_%d = modbus_rtu.RtuServer(serial.Serial(port=\"%s\", baudrate=9600, bytesize=8, parity=\"N\", stopbits=1, xonxoff=0))' % (modbus_ports, str(COM_PORTS[modbus_ports])))\n\n if verbose_enabled == 'True':\n logger.info('Modbus-tk verbose enabled')\n exec(\"server_%d.set_verbose(True)\" % (modbus_ports))\n else:\n logger.info('Modbus-tk verbose disabled')\n exec(\"server_%d.set_verbose(False)\" % (modbus_ports))\n\n\n exec('server_%d.start()' % (modbus_ports))\n\n for modbus_slaves in xrange(0,num_slaves_per_port):\n exec(\"server_%d_slave_%d = server_%d.add_slave(11 + %d)\" % (modbus_ports, modbus_slaves, modbus_ports, modbus_slaves))\n exec(\"server_%d_slave_%d.add_block('0', modbus_defines.HOLDING_REGISTERS, 100, 10)\" % (modbus_ports, modbus_slaves))\n\n holding_register_values = range(10 + (10 * modbus_slaves), 20 + (10 * modbus_slaves))\n\n exec(\"server_%d_slave_%d.set_values('0', 100, [%s])\" % \\\n (modbus_ports, modbus_slaves, ','.join(map(str, holding_register_values))))\n\n #Connect to the slave\n logger.info('All slave(s) running...')\n logger.info('enter \"quit\" to close the server(s)')\n\n while True:\n cmd = sys.stdin.readline()\n args = cmd.split(' ')\n\n if ((cmd.find('quit')==0) or (cmd.find('q')==0)):\n sys.stdout.write('bye-bye\\n')\n break\n\n finally:\n for modbus_ports in xrange(0, num_modbus_ports):\n exec('server_%d.stop()' % (modbus_ports))\n#---------------------------------------------------------------------------#\n\n\n#---------------------------------------------------------------------------#\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('num', help='number of modbus slave ports and number of slaves per port (format is 1:1)')\n parser.add_argument('ports', help='ports to use for the modbus slaves (format is COM1:COM2:COM3)')\n parser.add_argument('-v','--verbose', help='enables/disable modbus-tk verbose mode', default='False')\n parser.add_argument('-l','--level', help='defines the log level to be dispayed to the screen', default='info')\n parser.add_argument('-f','--filename', help='defines the filename of the debugs log', default='')\n args = parser.parse_args()\n\n # start-up logger\n logger = modbus_tk.utils.create_logger(name='console', level=args.level.upper() ,record_format=\"%(levelname)s: %(message)s\")\n # logger = logging.getLogger('modbus_log')\n\n # setupLogger('debug', 'modbus_log.log')\n setupLogger(args.level, args.filename)\n\n logger.info('Script started on: %s' % time.asctime(time.localtime(time.time())))\n\n startModbusSlave(args.num.split(':'), args.ports.split(':'), args.verbose)\n\ndef dependencies_for_freeezing():\n import modbus_tk\n import socket","repo_name":"englianhu/python_bucket","sub_path":"scripts/modbus_slave_modbus-tk.py","file_name":"modbus_slave_modbus-tk.py","file_ext":"py","file_size_in_byte":6717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"13805277648","text":"# Write your test here\nimport pytest\nfrom challenge03 import Tree\ndef test_sortedArrayToBST(): \n \n tree=Tree()\n tree.root=tree.sortedArrayToBST([-10,-3,0,5,9])\n array=tree.BFS()\n\n expected=[0,-3,9,-10,5]\n actual=array\n assert expected==actual","repo_name":"islamalghoul/Code-Challenges-and-Algorithms","sub_path":"python/code_challenges/tree/challenge03/test_challenge03.py","file_name":"test_challenge03.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30873897931","text":"def hangman():\n \n #Functions\n def display():\n print('\\n HANGMAN GAME')\n print(f\" _____ \\n | ||\\n {hang[6]} ||\\n {hang[3]}{hang[5]}{hang[4]} ||\\n {hang[1]} {hang[2]} ||\")\n\n def shuffle():\n import random\n words=['word', 'love', 'mail', 'sun', 'moon', 'tree', 'beauty', 'sick', 'normal', 'deal', 'bill', 'smile', 'sail', 'delight', 'weird', 'warrior', 'barrier', 'single', 'enthusiasm', 'rain', 'mystery']\n random.shuffle(words)\n return(words[0])\n\n def check(inp, word, out, similar):\n if inp in word:\n out=list(out)\n for i in range(len(word)):\n if inp==word[i]:\n out[i]=word[i]\n similar=1\n return(''.join(out), similar)\n\n def win(out):\n if '_' not in out:\n print(f'YOU WON! THE WORD WAS {out}.')\n return(1)\n else:\n return(0)\n \n #Variables\n win_stat=0\n hang={1:'/', 2:\"\\\\\", 3:\"/\", 4:\"\\\\\", 5:\"|\", 6:\"O\"}\n x=1\n\n #Hangman game\n word=shuffle()\n out=word[0]+'_'*(len(word)-2)+word[-1]\n\n while x<7:\n similar=0\n if win_stat==0:\n display()\n print(f\"\\n{' '.join(list(out))}\\n\")\n\n #The interactive part\n win_stat=win(out)\n if win_stat!=1:\n inp=input('Guess a letter: ')\n out, similar=check(inp, word, out, similar)\n \n if similar==0:\n hang[x]=' '\n x+=1\n\n else:\n break\n\n else:\n print(f'\\nGAME OVER! THE WORD WAS {word}.') \n\nif __name__=='__main__':\n hangman()","repo_name":"RimRaider639/Python-Scripts","sub_path":"play_terminal_games/all_games/hangman_v1.py","file_name":"hangman_v1.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22370770378","text":"\"\"\"!\nTest exporting the MESH namelist and all its variations\nfrom Blender files to fds, and compare the result with a reference.\n\"\"\"\n\nimport os\nfrom lib.bl_io import blend_tree_to_fds\nfrom lib import config\n\nBL_PATH = \"./bl/\"\nEXCLUDE_DIRS = None\nEXCLUDE_FILES = None\nREF_PATH = \"./bl_ref/\"\n\n\ndef run():\n current_path = os.path.dirname(os.path.abspath(__file__))\n return blend_tree_to_fds(\n package=__package__,\n path=os.path.join(current_path, BL_PATH),\n exclude_dirs=EXCLUDE_DIRS,\n exclude_files=EXCLUDE_FILES,\n ref_path=os.path.join(current_path, REF_PATH),\n run_fds=config.RUN_FDS,\n set_ref=config.SET_REF,\n )\n","repo_name":"firetools/blenderfds.verification","sub_path":"tests/MESH_export/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7863194075","text":"import pygame.font #for text\r\nfrom pygame.sprite import Group\r\n\r\nfrom mermaid import Mermaid\r\n\r\nclass Scoreboard():\r\n \"\"\"A class to report scoring information\"\"\"\r\n\r\n def __init__(self, ai_settings, screen, stats): #report values it's tracking\r\n \"\"\"Initialize scorekeeping attributes\"\"\"\r\n self.screen = screen\r\n self.screen_rect = screen.get_rect()\r\n self.ai_settings = ai_settings\r\n self.stats = stats\r\n\r\n #Font settings for scoring information\r\n self.text_colour = (30, 30, 30)\r\n self.font = pygame.font.SysFont(None, 48)\r\n\r\n #Prepare initial score image\r\n self.prep_score()\r\n self.prep_level()\r\n self.prep_mermaids()\r\n\r\n def prep_mermaids(self):\r\n \"\"\"Show how many lives are left\"\"\"\r\n self.mermaids = Group()\r\n for mermaid_number in range (self.stats.mermaids_left):\r\n mermaid = Mermaid(self.ai_settings, self.screen)\r\n mermaid.rect.x = 10 + mermaid_number * mermaid.rect.width\r\n mermaid.rect.y = 10\r\n self.mermaids.add(mermaid)\r\n\r\n def prep_score(self):\r\n \"\"\"Turn score into rendered image\"\"\"\r\n rounded_score = round(self.stats.score, -1) #round to nearest 10, 100, 1000, etc\r\n score_str = \"{:,}\".format(rounded_score) #string formating = insert commas into numbers\r\n self.score_image = self.font.render(score_str, True, self.text_colour, self.ai_settings.bg_colour)\r\n\r\n #Display score at top right of screen\r\n self.score_rect = self.score_image.get_rect()\r\n self.score_rect.right = self.screen_rect.right - 20 #20 px from right screen edge\r\n self.score_rect.top = 20 #20 px from top edge\r\n\r\n def prep_level(self):\r\n \"\"\"Turn the level into rendered image\"\"\"\r\n self.level_image = self.font.render(str(self.stats.level), True, self.text_colour, self.ai_settings.bg_colour)\r\n\r\n #Position level below score\r\n self.level_rect = self.level_image.get_rect()\r\n self.level_rect.right = self.score_rect.right\r\n self.level_rect.top = self.score_rect.bottom + 10 #position below score + 10 pixels\r\n\r\n def show_score(self):\r\n \"\"\"Draw score to the screen\"\"\"\r\n self.screen.blit(self.score_image, self.score_rect)\r\n self.screen.blit(self.level_image, self.level_rect)\r\n\r\n #Draw mermaids\r\n self.mermaids.draw(self.screen)\r\n","repo_name":"sheenagarcia/ITP-final-project","sub_path":"Attempt 2/scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36921601660","text":"# A program to export user data as a CSV file.\n\n# define function to export data to a csv\ndef export(f_path, num_bots):\n print(f\"Exporting...\")\n for count in range(0, num_bots):\n print(f\"Please enter the bot id:\")\n bot_id = str(input())\n print(f\"Please enter the bot name:\")\n bot_name = str(input())\n print(f\"Please enter the bot paint:\")\n bot_paint = str(input())\n user_data = bot_id + \",\" + bot_name + \",\" + bot_paint + \"\\n\"\n print(f\"{user_data}\\n\")\n with open(f_path, \"a\") as file:\n file.write(user_data)\n print(f\"Done!\")\n\n\n\n\n\n\n\n#define a function to run\ndef run():\n export(\"exported_bots.csv\", 2)\n\n# run program if not called\nif __name__ == \"__main__\":\n run()","repo_name":"5Dawe/com411","sub_path":"data/files/csv/export_csv.py","file_name":"export_csv.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21081154720","text":"class Solution(object):\n def trap(self, height):\n \"\"\"[0,1,0,2,1,0,1,3,2,1,2,1]\n [0,1,1,2,2,2,2,3,3,3,3,3]\n [3,3,3,3,3,3,3,3,2,2,2,1]\n 0+0+1+0+1+2+1+0+0+1+0+0\n :type height: List[int]\n :rtype: int\n \"\"\"\n water=0\n n=len(height)\n if n<2:\n return 0 \n l=[0]*n\n r=[0]*n\n l[0]=height[0]\n r[n-1]=height[n-1]\n for i in xrange(1,n):\n l[i]=max(l[i-1],height[i])\n for i in xrange(n-2,-1,-1):\n r[i]=max(r[i+1],height[i])\n for i, val in enumerate(height):\n m=min(l[i],r[i])\n if m> val:\n water+=m-val\n \n return water\n \n","repo_name":"melvin0008/leetcode","sub_path":"python/trap_rain.py","file_name":"trap_rain.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39174404136","text":"from flask import Flask, render_template, flash, redirect, render_template, request\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom models import db, connect_db, User, Employee, Department\n\nfrom forms import AddSnackForm\nfrom forms import UserForm\nfrom forms import NewEmployeeForm\napp = Flask(__name__)\napp.app_context().push()\napp.config[\"SECRET_KEY\"] = \"oh-so-secret\"\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"postgresql:///flask_wtforms\"\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\ndebug = DebugToolbarExtension(app)\n\nconnect_db(app)\n\n\n@app.route(\"/\")\ndef homepage():\n \"\"\"Show homepage links.\"\"\"\n # raise\n\n return render_template(\"index.html\")\n\n@app.route('/phones')\ndef list_phones():\n \"\"\"Renders directory of employees and phone numbers (from dept)\"\"\"\n emps = Employee.query.all()\n return render_template('phones.html', emps=emps)\n\n\n@app.route(\"/add\", methods=[\"GET\", \"POST\"])\ndef add_snack():\n \"\"\"Snack add form; handle adding.\"\"\"\n print(request.form)\n form = AddSnackForm()\n # raise\n\n if form.validate_on_submit():\n print(form.name.data)\n print(form.price.data)\n name = form.name.data\n price = form.price.data\n quantity=form.quantity.data\n flash(f\"Added {name} at {price} and quantity of {quantity}\")\n return redirect(\"/\")\n\n else:\n return render_template(\n \"snack_add_form.html\", form=form)\n\n\n@app.route(\"/users//edit\", methods=[\"GET\", \"POST\"])\ndef edit_user(uid):\n \"\"\"Show user edit form and handle edit.\"\"\"\n\n user = User.query.get_or_404(uid)\n form = UserForm(obj=user)\n\n if form.validate_on_submit():\n user.name = form.name.data\n user.email = form.email.data\n db.session.commit()\n flash(f\"User {uid} updated!\")\n return redirect(f\"/users/{uid}/edit\")\n\n else:\n return render_template(\"user_form.html\", form=form)\n\n\n@app.route('/employees/new', methods=[\"GET\", \"POST\"])\ndef show_results():\n form= NewEmployeeForm()\n depts = db.session.query(Department.dept_code, Department.dept_name)\n form.dept_code.choices = depts\n if form.validate_on_submit():\n name= form.name.data\n state= form.state.data\n dept_code= form.dept_code.data\n emp = Employee(name=name, state=state, dept_code=dept_code)\n db.session.add(emp)\n db.session.commit()\n return redirect('/phones')\n else:\n return render_template('add_employee_form.html', form=form)","repo_name":"conor-ried/flaskexercises","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25264978971","text":"import random\nimport datetime\nimport codecs, json, sys\n\ninit_lng = 126.923953\ninit_lat = 35.455391\nprint(init_lat,\",\",init_lng)\ncur_lng = init_lng\ncur_lat = init_lat\ncur_time = datetime.datetime.now()\n\n#output_file1 = \"data_1.json\"\noutput_file1 = \"data_2.json\"\n\nfor i in range(80000):\n\tprev_lng = cur_lng\n\tprev_lat = cur_lat\n\tadd_lng = random.uniform(0, 0.000098)\n\tadd_lat = random.uniform(0, 0.000098)\n\tcond = random.randrange(0, 10)\n\ttcond = random.randrange(0, 4)\n\n\tif(tcond == 0):\n\t\taddtime = random.uniform(0, 10)\n\telif(tcond == 1):\n\t\taddtime = random.uniform(0, 30)\n\telif(tcond == 2):\n\t\taddtime = random.uniform(0, 20)\n\telse:\n\t\taddtime = random.uniform(0, 45)\n\t\n\tcur_time = cur_time + datetime.timedelta(seconds = addtime)\n\t\n\n\n\tif(cond == 0 or cond == 3 or cond == 7 or cond == 8):\n\t\tcur_lng = prev_lng + add_lng\n\t\tcur_lat = prev_lat\n\telif(cond == 1 or cond == 2 or cond == 4 or cond == 5 or cond == 9):\n\t\tcur_lng = prev_lng\n\t\tcur_lat = prev_lat + add_lat\n\telse:\n\t\tcur_lng = prev_lng + add_lng\n\t\tcur_lat = prev_lat + add_lat\n\t\n\t#data = \"{location : [\" + str(cur_lng) + \",\" + str(cur_lat) +\"], time: \\\"\"+ str(cur_time.isoformat()).split('.')[0] + \".\" + str(cur_time.isoformat()).split('.')[1][:3]+\"Z\\\", values: {TEMP:\" + str(random.randrange(0,30)) + \"}}\" \n\tdata = \"{location : [\" + str(cur_lng) + \",\" + str(cur_lat) +\"], time: \\\"\"+ str(cur_time.isoformat()).split('.')[0] + \".\" + str(cur_time.isoformat()).split('.')[1][:3]+\"Z\\\", values: {HUM:\" + str(random.randrange(0,55)) + \"}}\" \n\n\twith open(output_file1, 'a+') as file:\n\t\tfile.write(data+'\\n')\n\t\t","repo_name":"minkky/2018-KISTI","sub_path":"2018-KISTI(BaseLine)/public/python/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13969149804","text":"from typing import List, Tuple, Dict\n\n\nclass Graph:\n def __init__(self, vertices:List[int]) -> None:\n self.vertices = vertices\n self.edges: List[Tuple[int]]= []\n\n def add_edge(self, source, dest, weight):\n self.edges.append((source, dest, weight))\n\n def bellman_fords_algo(self, source, destination, k):\n previous:Dict[int, int] = [float('inf') for node in self.vertices]\n previous[source] = 0\n current = [float('inf') for node in self.vertices]\n\n for _ in range(k+1):\n current[source] = 0\n for src, dest, weight in self.edges:\n # print(f\"Current val: i = {_}, {src}, {dest}, {weight}\")\n if previous[src] != float('inf'):\n # if dest == destination:\n # print(f\"\\tchanging from {previous[dest]} to {previous[src]+weight}\")\n current[dest] = min(current[dest], previous[src] + weight)\n print(f\"previous:{previous}\\ncurrent:{current}\")\n previous = current.copy()\n\n return previous[destination] if previous[destination] != float(\"inf\") else -1\n","repo_name":"alisha017/Computer-Science-Fundamentals","sub_path":"InterviewCamp/Graphs/single_source_shortest_distance/bellman_ford.py","file_name":"bellman_ford.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13293125899","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 21 23:58:46 2020\n\n@author: tobijegede\n\"\"\"\n\n#importing the modules \nimport pandas as pd\n#import tkinter\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\n\n\n#To do:\n# look at the HTML code for the Heinz Course Catalog Website \n# import tkinter (think about creating a GUI?)\n# look up information about GitHub\n# Include code that checks whehter one of the desired fields is empty (!!!)\n\n\ndef getHeinzCourseCatalog(course_num):\n #Trial Course Numbers to Use: \n #'90-717' (Writing for Public Policy)\n #'90-819' (Intermediate Programming with Python)\n #'94-806' (Privacy in the Digital Age) \n \n #Is the course number provided valid?\n pattern = r'^([0-9][0-9])\\-([0-9]{3})$'\n if re.search(pattern, course_num) == None: \n print('Error - Please enter a valid format for a course number')\n return \n \n #Go to the url \n course_website = 'https://api.heinz.cmu.edu/courses_api/course_detail/' + course_num\n page = requests.get(course_website)\n #Scraping:\n \n #Parse the page\n soup = BeautifulSoup(page.content, 'html.parser')\n \n # Pulls the entire html file for the specific course number \n course_page = soup.find(id=\"container-fluid\")\n \n #prints the entire html file\n #print(course_page)\n\n #Checks if the course number is a real course number\n if course_page == None: \n print(\"Error - Course Number Does Not Exist\")\n return \n \n #Pulls the section header html code for Units, Description, Learning Outcomes, Prereqs and Syllabus\n course_elements= course_page.find_all(\"p\")\n course_features = []\n for element in course_elements:\n header = element.get_text()\n course_features.append(header)\n # print(course_features)\n \n #Assigning the list to variable names\n class_name = course_features[0]\n #print(class_name)\n #removing class name from course_features\n course_features = course_features[1:]\n #print(course_features)\n #for feature in course_features:\n # class_name = feature\n desc_pattern = r'^(Description:)'\n course_units = ''\n course_loutcomes = 'None'\n course_prereqs = 'None'\n course_syl = 'None'\n #mult_links = [] #no longer need this since syllabus column will be dictionaries\n syllabi = {} #empty dictionary for courses with syllabi\n pattern = '\\([-0-9a-zA-z\\s]*' #pattern for finding Professor names\n for feature in course_features:\n if 'Units' in feature:\n course_units = feature[-2:]\n course_units = int(course_units)\n elif re.search(desc_pattern,feature) != None:\n course_desc = feature[13:]\n #print(course_desc)\n elif 'Learning Outcomes:' in feature:\n course_loutcomes = feature[19:]\n # print(course_loutcomes)\n elif 'Prerequisites Description:' in feature:\n course_prereqs = feature[26:]\n #print(course_prereqs)\n \n else:\n course_syl = feature\n # print(course_syl)\n #Course Syllabus Header & Text\n \n #finds, prints, and formats the information contained about syllabus on the course_page\n syllabus = course_page.select('a')\n #print(syllabus)\n \n #Is there a syllabus?\n if len(syllabus) == 0:\n #mult_links.append('None') #no longer need this since syllabus column will be dictionaries\n #print(\"There is no syllabus available for this class.\")\n syllabi[None] = 'None'\n if len(syllabus) == 1:\n syllabus_link = 'https://api.heinz.cmu.edu' + syllabus[0].get('href')\n str_syllabus = str(syllabus)\n syllabus_prof = re.findall(pattern, str_syllabus)\n #print(syllabus_prof)\n string = syllabus_prof[0]\n string = string[1:-1]\n string = string.split(' ')\n string = string[1] + ', ' + string[0]\n #print(string)\n syllabi[string] = syllabus_link\n #mult_links.append(syllabus_link)\n #print('There is %d syllabus available for this class. \\nHere is the link to the syllabus: %s '\n #% (len(syllabus), syllabus_link))\n #Is there more than one syllabus?\n if len(syllabus) > 1: \n for syl in syllabus:\n syllabus_link = 'https://api.heinz.cmu.edu' + syl.get('href')\n str_syl = str(syl)\n syllabus_prof = re.findall(pattern, str_syl)\n #print(syllabus_prof)\n string = syllabus_prof[0]\n string = string[1:-1]\n string = string.split(' ')\n string = string[1] + ', ' + string[0]\n #print(string)\n syllabi[string] = syllabus_link\n #mult_links.append(syllabus_link)\n #print('There are %d syllabi available for this class. \\nHere are the links to the syllabi: %s '\n #% (len(syllabus), mult_links))\n \n #If syllabus, for what semester? & for what professor?\n \n #Return to the main function to ask for another course number \n # main()\n \n course_details = [course_num, class_name, course_units, course_desc, course_loutcomes, course_prereqs, course_syl, syllabi]\n return course_details\n\n#getHeinzCourseCatalog('90-717')\n\n'''Creating a DataFrame of a list of courses'''\ndata = [] #will be a list of lists that's used to append course object returned from function\ncourses = pd.DataFrame(columns = ['Course Nubmer', 'Name', 'Units', 'Description', 'Outcomes', 'Prerequisites', 'Syllabus', 'Link']) #empty data frame with named columns\ncourse_num = ['90-717', '90-819', '94-806'] #used as a trial list of courses\nfor num in course_num: #loop to append course information to data object\n course = getHeinzCourseCatalog(num)\n data.append(course)\nfor obj in data: #loop to add rows to dataframe; each index is named by the course number\n courses.loc[obj[0]] = obj\n \n\n\n# Potential new function\n'''def whichCourseisBetter(course1, course2):\n course1_rating = \n course2_rating = \n if df[course1].averagerating > df[course2].averagerating:\n print('%s is a higher-rated course than %s \\n', \n %(course1, course2))'''\n\n\n\n\ndef main():\n course_num = input('Please type in a course number (XX-XXX). Enter \"quit\" to stop searching: ')\n if course_num.lower() != 'quit':\n course_info = getHeinzCourseCatalog(course_num)\n # print(course_info)\n if course_info == None or len(course_info) == 0:\n print(\"There is no such class\")\n \n else: \n for i in course_info:\n print(i)\n\n else:\n print(\"Enjoy your day! Thanks for stopping by!\")\n #Do I need a list of valid course numbers --- before the retrevial process?\n \n \n \n \nif __name__ == '__main__': \n main() \n \n \n ","repo_name":"sdutchie/heinzcoursesearch","sub_path":"finalproj_webscraping.py","file_name":"finalproj_webscraping.py","file_ext":"py","file_size_in_byte":6863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22973717673","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 23 22:38:53 2018\n\n@author: ecology\n\"\"\"\n\n\n#plot confusion matrix\ndef plot_confusion_matrix(cm,target_names,title='Confusion matrix',cmap=None,normalize=True,\\\n figsize=(8, 6),fontsize=15,labelsize=15,savedir=\"\"):\n \"\"\"\n given a sklearn confusion matrix (cm), make a nice plot\n\n Arguments\n ---------\n cm: confusion matrix from sklearn.metrics.confusion_matrix\n\n target_names: given classification classes such as [0, 1, 2]\n the class names, for example: ['high', 'medium', 'low']\n\n title: the text to display at the top of the matrix\n\n cmap: the gradient of the values displayed from matplotlib.pyplot.cm\n see http://matplotlib.org/examples/color/colormaps_reference.html\n plt.get_cmap('jet') or plt.cm.Blues\n\n normalize: If False, plot the raw numbers\n If True, plot the proportions\n\n Usage\n -----\n plot_confusion_matrix(cm = cm, # confusion matrix created by\n # sklearn.metrics.confusion_matrix\n normalize = True, # show proportions\n target_names = y_labels_vals, # list of names of the classes\n title = best_estimator_name) # title of graph\n\n Citiation\n ---------\n http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html\n\n \"\"\"\n import matplotlib.pyplot as plt\n import numpy as np\n import itertools\n\n accuracy = np.trace(cm) / float(np.sum(cm))\n misclass = 1 - accuracy\n\n if cmap is None:\n cmap = plt.get_cmap('YlGnBu')\n\n cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n plt.figure(figsize=figsize)\n plt.imshow(cm_norm, interpolation='nearest', cmap=cmap,vmin=0, vmax=1)\n plt.title(title,fontsize=labelsize)\n plt.colorbar()\n if target_names is not None:\n tick_marks = np.arange(len(target_names))\n plt.xticks(tick_marks, target_names, rotation=45, size=labelsize)\n plt.yticks(tick_marks, target_names, size=labelsize)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n\n thresh = cm_norm.max()/2\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n if normalize:\n plt.text(j, i, \"{:0.4f}\".format(cm[i, j]),fontsize=fontsize,\n horizontalalignment=\"center\",\n color=\"white\" if cm_norm[i, j] > thresh else \"black\")\n else:\n plt.text(j, i, \"{:,}\".format(cm[i, j]),fontsize=fontsize,\n horizontalalignment=\"center\",\n color=\"white\" if cm_norm[i, j] > thresh else \"black\")\n\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label\\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))\n if not savedir==\"\":\n plt.savefig(savedir,dpi=600)\n plt.show()\n","repo_name":"ZH-pku/xgb_vegetation_mapping","sub_path":"modeling_and_mapping/codes/src/ploting.py","file_name":"ploting.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"2340201752","text":"import boto3\nimport uuid\n\nfrom django.views import View\nfrom django.http import JsonResponse\nfrom django.db import transaction\nfrom django.db import IntegrityError\nfrom django.db.models import Count\n\nfrom core.utils import signin_decorator \nfrom hosts.models import Host\nfrom rooms.models import Category, Room, Facility, RoomFacility, RoomType, Image\n\nfrom my_settings import AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, IMAGE_URL, AWS_BUCKET_NAME\n\nclass HostingRoomView(View):\n \n @signin_decorator\n def post(self, request):\n \n data = request.POST \n\n try:\n host = Host.objects.get(user=request.user) \n \n name = data['name']\n address = data['address']\n detail_address = data['detail_address']\n price = data['price']\n description = data['description']\n latitude = data['latitude']\n longitude = data['longitude']\n maximum_occupancy = data['maximum_occupancy']\n bedroom = data['bedroom']\n bathroom = data['bathroom']\n bed = data['bed']\n host = data['host_id'] \n category = data['category_id']\n room_type = data['room_type_id']\n facility_ids = data.getlist('facility_id') \n\n files = request.FILES.getlist('files')\n\n if not Category.objects.filter(id=category).exists():\n return JsonResponse({'message':'CATEGORY_DOES_NOT_EXIST'}, status=404)\n \n if not RoomType.objects.filter(id=room_type).exists():\n return JsonResponse({'message':'ROOM_TYPE_DOES_NOT_EXIST'}, status=404)\n \n \n if not Facility.objects.filter(id__in=facility_ids)\\\n .aggregate(count=Count('id'))\\\n .get('count') == len(facility_ids):\n return JsonResponse({'message':'FACILITY_DOES_NOT_EXIST'}, status=404)\n \n with transaction.atomic():\n room, created = Room.objects.get_or_create(\n name = name,\n defaults = {\n \"address\" : address,\n \"detail_address\" : detail_address,\n \"price\" : price,\n \"description\" : description,\n \"latitude\" : latitude,\n \"longitude\" : longitude,\n \"maximum_occupancy\" : maximum_occupancy,\n \"bedroom\" : bedroom,\n \"bathroom\" : bathroom,\n \"bed\" : bed,\n \"host\" : Host.objects.get(id=host),\n \"category\" : Category.objects.get(id=category),\n \"room_type\" : RoomType.objects.get(id=room_type)\n }\n )\n \n if not created:\n return JsonResponse({'message':'ROOM_NAME_ALREADY_EXIST'}, status=400)\n \n created_room_id = Room.objects.latest('id').id \n \n RoomFacility.objects.bulk_create([\n RoomFacility(\n room_id = Room.objects.latest('id').id, \n room_facility_id = facility_id\n ) for facility_id in facility_ids ])\n\n for file in files:\n file._set_name(str(uuid.uuid4()))\n s3r = boto3.resource('s3', aws_access_key_id = AWS_ACCESS_KEY_ID, \n aws_secret_access_key = AWS_SECRET_ACCESS_KEY)\n s3r.Bucket(AWS_BUCKET_NAME).put_object(Key = host + '/%s'%(file), \n Body=file, \n ContentType='jpg')\n Image.objects.create(\n url = IMAGE_URL + '/' + f'{host}/{file}',\n room_id = created_room_id\n )\n \n images = Image.objects.filter(room_id = created_room_id).all()\n\n data = {\n \"id\" : room.id,\n \"name\" : room.name,\n \"room_images\" : [image.url for image in images]\n }\n \n return JsonResponse({'message':'SUCCESS', 'room':data}, status=201)\n\n except Host.DoesNotExist:\n return JsonResponse({'message':'HOST_DOES_NOT_EXIST'}, status=404)\n \n except KeyError:\n return JsonResponse({'message':'KEY_ERROR'}, status=400)\n \n except IntegrityError:\n return JsonResponse({'message':'UNKNOWN_DATA'}, status=400)","repo_name":"wecode-bootcamp-korea/35-2nd-WnB-backend","sub_path":"hosts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14561434063","text":"import os\nimport wget\nimport time\nimport yaml\nimport glob\nimport torch\nimport random\nimport inspect\nimport datetime\nimport argparse\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nimport seaborn as sns\nfrom numpy import linalg as la\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\n\n\n\n#########################################\n############ Basic functions ############\n#########################################\n\ndef check_folder(path):\n \"\"\"Create adequate folders if necessary.\"\"\"\n try:\n if not os.path.isdir(path):\n check_folder(os.path.dirname(path))\n os.mkdir(path)\n except:\n pass\n \ndef read_yaml(yaml_path):\n \"\"\"Open and read safely a yaml file.\"\"\"\n with open(yaml_path, 'r') as stream:\n try:\n parameters = yaml.safe_load(stream)\n except :\n print(\"Couldn't load yaml file: {}.\".format(yaml_path))\n return parameters\n\ndef save_yaml(data, yaml_path):\n \"\"\"Open and write safely in a yaml file.\n Arguments:\n - data: list/dict/str/int/float\n -yaml_path: str\n \"\"\"\n with open(yaml_path, 'w') as outfile:\n yaml.dump(data, outfile, default_flow_style=False)\n\ndef filter_args(func, d):\n \"\"\" Filter dictionary keys to match the function arguments.\n Arguments:\n - func: function\n - d: dict\n Returns:\n - args: dict\n \"\"\"\n keys = inspect.getfullargspec(func).args\n args = {key: d[key] for key in keys if ((key!='self') and (key in d.keys()))}\n return args\n\ndef get_device(device_number=0, local_rank=-1):\n \"\"\" Get the device to use for computations.\n \"\"\"\n if local_rank == -1:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n n_gpu = torch.cuda.device_count()\n print('There are %d GPU(s) available.' % torch.cuda.device_count())\n if torch.cuda.is_available():\n print('We will use the GPU:', torch.cuda.get_device_name(device_number))\n else:\n print('No GPU available, using the CPU instead.')\n else:\n torch.cuda.set_device(local_rank)\n device = torch.device(\"cuda\", local_rank)\n n_gpu = 1\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl')\n return device\n\ndef format_time(elapsed):\n \"\"\"\n Takes a time in seconds and returns a string hh:mm:ss\n \"\"\"\n # Round to the nearest second.\n elapsed_rounded = int(round((elapsed)))\n # Format as hh:mm:ss\n return str(datetime.timedelta(seconds=elapsed_rounded))\n\ndef set_seed(value=1111):\n \"\"\" Set all seeds to a given value for reproductibility.\"\"\"\n random.seed(value)\n np.random.seed(value)\n torch.manual_seed(value)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(value)\n\n\n\n#########################################\n########### Special functions ###########\n#########################################\n\n#def neighborhood_density(model, iterator, method='mean', threshold=0.7, param=None):\n# columns_activations = ['neighborhood_density']\n# activations = []\n# # computing metric\n# result = np.zeros(len(model))\n# tmp = np.zeros((len(model), len(model)))\n# for i in range(len(model) - 1):\n# for j in range(i + 1, len(model)):\n# tmp[i,j] = cosine_similarity(model.vectors[i], model.vectors[j])\n# vector = tmp[0,1:] if i==0 else (concat(tmp[i,i+1:], tmp[:i,i]))\n# if method == 'mean':\n# result[i] = np.mean(vector)\n# elif method == 'threshold':\n# vector[vector < threshold] = 0\n# result[i] = np.count_nonzero(vector)\n# # generating prediction\n# for item in tqdm(iterator):\n# if item in words2add.keys():\n# for word in words2add[item][0]:\n# activations.append(result[model[word].index])\n# skip = words2add[item][1]\n# elif skip ==0:\n# activations.append(result[model[item].index])\n# else:\n# skip -= 1\n# return pd.DataFrame(np.vstack(activations), columns=columns_activations)\n\n\ndef embeddings(model, iterator, embedding_size):\n columns_activations = ['embedding-{}'.format(i) for i in range(1, 1 + embedding_size)]\n activations = []\n for item in tqdm(iterator):\n if item not in model.keys():\n item = ''\n activations.append(model[item])\n return pd.DataFrame(np.vstack(activations), columns=columns_activations)\n\ndef embeddings_past_context(model, iterator, embedding_size, context_size, decreasing_factor, normalize=False, weighted_sum=False):\n columns_activations = ['embedding-{}'.format(i) for i in range(1, 1 + embedding_size)]\n activations = []\n for index, item in tqdm(enumerate(iterator)):\n activation = np.zeros(embedding_size)\n if item not in model.keys():\n item = ''\n tmp = model[item]/la.norm(model[item], ord=2) if normalize else model[item]\n activation += tmp\n for i, item_context in enumerate(iterator[max(0, index-context_size+1):index]): # +1 because context_size==1 is the current word\n if item_context not in model.keys():\n item_context = ''\n tmp = model[item_context]/la.norm(model[item_context], ord=2) if normalize else model[item_context]\n activation += tmp * (decreasing_factor ** (len(iterator[max(0, index-context_size+1):index]) - i))\n if weighted_sum:\n tmp = activation * (1 - decreasing_factor) / (1-decreasing_factor**len(iterator[max(0, index-context_size+1):index+1]))\n else:\n tmp = activation / la.norm(activation, ord=2)\n activations.append(tmp)\n return pd.DataFrame(np.vstack(activations), columns=columns_activations)\n\ndef embeddings_future_context(model, iterator, embedding_size, context_size, decreasing_factor, normalize=False, weighted_sum=False):\n columns_activations = ['embedding-{}'.format(i) for i in range(1, 1 + embedding_size)]\n activations = []\n for index, item in tqdm(enumerate(iterator)):\n activation = np.zeros(embedding_size)\n if item not in model.keys():\n item = ''\n tmp = model[item]/la.norm(model[item], ord=2) if normalize else model[item]\n activation += tmp\n for i, item_context in enumerate(iterator[min(index+1, len(iterator)): min(index+1 + context_size, len(iterator))]): # +1 because context_size==1 for future is the current word + the next word\n if item_context not in model.keys():\n item_context = ''\n tmp = model[item_context]/la.norm(model[item_context], ord=2) if normalize else model[item_context]\n activation += tmp * (decreasing_factor ** (i+1))\n if weighted_sum:\n tmp = activation * (1 - decreasing_factor) / (1-decreasing_factor**len(iterator[min(index, len(iterator)): min(index+1 + context_size, len(iterator))]))\n else:\n tmp = activation / la.norm(activation, ord=2)\n activations.append(tmp)\n return pd.DataFrame(np.vstack(activations), columns=columns_activations)","repo_name":"AlexandrePsq/NLP_models","sub_path":"GLOVE/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7175,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37316122482","text":"\"\"\"\n剑指 Offer 33. 二叉搜索树的后序遍历序列\n输入一个整数数组,判断该数组是不是某二叉搜索树的后序遍历结果。\n如果是则返回 true,否则返回 false。\n假设输入的数组的任意两个数字都互不相同。\n\n参考以下这颗二叉搜索树:\n 5\n / \\\n 2 6\n / \\\n 1 3\n\n示例 1:\n输入: [1,6,3,2,5]\n输出: false\n\n示例 2:\n输入: [1,3,2,6,5]\n输出: true\n\n提示:数组长度 <= 1000\n\ndate : 1-21-2021\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def verifyPostorder(self, postorder: List[int]) -> bool:\n \"\"\"\n\n :param postorder:\n :return:\n \"\"\"\n\n def recur(i, j):\n if i >= j:\n return True\n p = i\n while postorder[p] < postorder[j]:\n p += 1\n m = p\n while postorder[p] > postorder[j]:\n p += 1\n return p == j and recur(i, m - 1) and recur(m, j-1)\n\n return recur(0, len(postorder) - 1)\n","repo_name":"Aiooon/MyLeetcode","sub_path":"python/offer_33_verifyPostorder.py","file_name":"offer_33_verifyPostorder.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12127056629","text":"import cv2 as cv\nimport numpy as np\n\n\n\nimg = cv.imread(\"files/opencv/brick.jpg\")\n\nif img is None :\n print(\"something went wromg!!\")\nelse :\n cvt_img = cv.cvtColor(img,cv.COLOR_BGR2RGB)\n\n # blue histogram BGR ,Blue = 0\n bhistogram =cv.calcHist([img],channels=[0],mask=None,histSize=[256],ranges=[0,256])\n print(bhistogram.shape)\n cv.imshow(\"blue histogram\",bhistogram)\n\n\n\ncv.waitKey(0)\ncv.destroyAllWindows()","repo_name":"yogeshdhameliya6013/python","sub_path":"files/opencv/basics/imagehistogram.py","file_name":"imagehistogram.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73763807849","text":"import logging\n# IMPORTING LOCAL \"database.py\" FILE AS MODULE\nfrom constants import *\nimport database\nimport time\n\n# <====================== VARIABLES ======================>\nyears = [1,19,20,21,22,23,24,25]\nadmin = [1387682420,862799552,1339656586,1417818674]\n#mahesh chat id - 1490833723\n# admin=[]\nteachers=[]\n# for y in database.results.list_collection_names():\n# # collectio name is \"R19 Results\" and we're slicing it like [1:3] ==> \"19\" \n# # and then conveting to integer and appending that year to \"years\" list\n# years.append(int(y[1:3]))\n\nstudent_registrations = {}\nfor y in years:\n # this dictionary cotains YEAR as a Key and its respective COLLECTION as a Value \n # which is used to seggregate the registrations data YEAR-WISE\n student_registrations[y] = database.stu_registration[f'R{y} Student Registrations']\n# <================================================================>\n\n# <================== AUTHENTICATE USER ====================>\n\n# This fuction is used to check whether the student is registered or not \ndef authenticate_user(message):\n # print(\"Authenticating\")\n registered = None\n # we'll check in every year's collection whether the student's chat-id\n # is registered or not. If we found the chat-id then we will break the loop \n # and return \"registered\" variable as \"True\" else we will return \"None\"\n for y in years:\n if student_registrations[y].find_one({'_id':message.chat.id}) == registered:\n continue\n else:\n registered = True\n break\n return registered\n\n# <=================================================================>\n\ndef results_authenticate_user(message):\n # print(\"Authenticating\")\n registered = None\n # we'll check in every year's collection whether the student's chat-id\n # is registered or not. If we found the chat-id then we will break the loop \n # and return \"registered\" variable as \"True\" else we will return \"None\"\n for y in years:\n user_dict = student_registrations[y].find_one({'_id':message.chat.id})\n if user_dict == registered:\n continue\n else:\n registered = True\n break\n return user_dict\n\n# <=================================================================>\n\n# <================== VERIFY SECURITY CODE AND REGISTER THE STUDENT ====================>\n\ndef verify_otp(message):\n if(message.text == otp_dict[message.chat.id][0]):\n mail_id = otp_dict.pop(message.chat.id)[1]\n roll_num = mail_id[0:-9].upper()\n current_ids.append({\n \"_id\":message.chat.id,\n \"ROLL_NUM\":roll_num,\n \"TIME\": time.asctime(time.localtime())\n })\n if(mail_id[4:6].lower() == '5a'):\n year = str(int(mail_id[0:2]) - 1)\n elif(mail_id[4:6].lower()=='1a'):\n year = mail_id[0:2]\n else:\n year=1\n # Inserting the student details into the database\n student_registrations[int(year)].insert_many(current_ids)\n current_ids.clear()\n return True\n return False\n# <=================================================================>\n\n# <================== ADMIN AUTHENTICATION ====================>\n\ndef admin_authentication(message):\n if authenticate_user(message) and message.chat.id in admin:\n return True\n return False\n\n# <=================================================================>\n\n \n# <================== DELETE/ DEREGISTER THE USER(STUDENT) ====================>\n\ndef deleteUser(message):\n roll_num = message.upper()\n if(message[4:6].lower() == '5a'):\n year = str(int(message[0:2]) - 1)\n elif(message[4:6].lower()=='1a'):\n year = message[0:2]\n else:\n year=1\n\n deleted_user = student_registrations[int(year)].find_one({\"ROLL_NUM\":roll_num})\n if deleted_user != None:\n student_registrations[int(year)].delete_one({\"ROLL_NUM\":roll_num})\n return deleted_user\n\n# <=================================================================>\n\n# <================== TOTAL REGISTRATION DATA ====================>\n\ndef total_registrations():\n no_of_registrations = \"\"\n for y in years:\n if student_registrations[y].find():\n registrations = list(student_registrations[y].find())\n no_of_registrations += f\"{y}-Batch registrations: {len(registrations)}\\n\"\n return no_of_registrations\n \n# <=================================================================>\n\n\n# ====================== USER DETAILS =====================>\n\ndef user_details(message):\n details = \"\"\n roll_num = message.upper()\n if(message[4:6].lower() == '5a'):\n year = str(int(message[0:2]) - 1)\n elif(message[4:6].lower()=='1a'):\n year = message[0:2]\n else:\n year=1\n details = student_registrations[int(year)].find_one({\"ROLL_NUM\":roll_num})\n if details != None:\n return dict(details)\n return details\n\n# ====================================================>","repo_name":"Dasari-Bhovan/ResTele-Bot-1","sub_path":"register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":5020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72241695208","text":"# type 'cp' or '' (VQA-CP or VQA)\ntype = ''\n# dataset version (v1 or v2)\nversion = 'v2'\n# train or trainval\nmode = 'train'\nassert not (type == 'cp' and mode == 'trainval')\n\n# import settings\ntask = 'OpenEnded'\ndataset = 'mscoco'\nmax_question_len = 14\nvisual_glimpses = 1\ntest_split = 'test2015' # either 'test-dev2015' or 'test2015'\n\n# directory containing all images\ndataroot = '/home/share/liuyibing/vqa/'\n# dataroot = '/home/caiyuqi/liuyibing/vqa/'\nif type == 'cp':\n image_path = dataroot + 'mscoco-cp'\nelse:\n image_path = dataroot + 'mscoco' \n\n# directory containing the question and annotation jsons\nif version == 'v1':\n qa_path = dataroot + 'vqa-{}1.0/qa_path/'.format(type)\nelif version == 'v2':\n qa_path = dataroot + 'vqa-{}2.0/qa_path/'.format(type)\n\nglove_path = dataroot + 'word_embed/glove/'\n\n# dataroot and proceed_data path\nmain_path = '../data/'\ncache_root = main_path + (type + version)\nrcnn_path = main_path + '../rcnn-data/'\n# rcnn_path = dataroot + 'rcnn-data/'\n\noutput_features = 2048\nrcnn_output_size = 36 # max number of object proposals per image\nbottom_up_trainval_path = dataroot + 'rcnn-data/tsv/trainval_{}'.format(rcnn_output_size) # directory containing the .tsv file(s) with bottom up features\nbottom_up_test_path = dataroot + 'rcnn-data/tsv/test2015_{}'.format(rcnn_output_size) # directory containing the .tsv file(s) with bottom up features\nrcnn_trainval_path = rcnn_path + 'trainval_{}.h5'.format(rcnn_output_size) # path where preprocessed features from the trainval split are saved to and loaded from\nrcnn_test_path = rcnn_path + 'test_{}.h5'.format(rcnn_output_size) # path where preprocessed features from the test split are saved to and loaded from\n\nhid_dim = 1024\nworkers = 4\n\nuse_debias = False\nuse_rubi = False\n\nuse_rho = False\n\nuse_hint = True\nuse_all = True\n\nhint_type = 'qa' # ['qa', 'vqx']\noptimize_type = 'not_focus_objs' # ['all', 'not_focus_objs', 'att', 'overfit', 'none']\nfusion_type = 'mul' # ['cat', 'mul']\n\n# hard mask\nmasks = 7\nnum_sub = 4\n\natt_norm = True","repo_name":"BierOne/VQA-AttReg","sub_path":"utilities/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"15545809978","text":"import numpy as np\nimport pytest\n\nfrom hmlf.environments import DummyHybrid\nfrom hmlf.spaces import SimpleHybrid\n\nN_MAX_STEPS = 50\n\n\ndef test_type_assertions():\n with pytest.raises(AssertionError):\n DummyHybrid(\"hello\")\n with pytest.raises(AssertionError):\n DummyHybrid([1, 2, 3.4])\n with pytest.raises(AssertionError):\n DummyHybrid([1, 2], 2.3)\n with pytest.raises(ValueError):\n DummyHybrid([-1, 2], 2)\n with pytest.raises(AssertionError):\n DummyHybrid([1, 2], -2)\n\n\ndef test_empty_list():\n with pytest.raises(ValueError):\n DummyHybrid([])\n\n\n@pytest.mark.parametrize(\n \"parameter_dimensions\",\n [\n [1],\n [1, 3, 4],\n [5, 6, 7, 1, 2],\n ],\n)\ndef test__dimensions(parameter_dimensions):\n env = DummyHybrid(parameter_dimensions)\n assert env.n_parameter_spaces == len(parameter_dimensions)\n assert type(env.action_space) is SimpleHybrid\n assert env.action_space.get_dimension() == int(1 + np.sum(parameter_dimensions))\n\n\n@pytest.mark.parametrize(\n \"observation_dimension\",\n [\n 1,\n 10,\n 434,\n ],\n)\ndef test_observation_dimensions(observation_dimension):\n env = DummyHybrid([1], observation_dimension)\n assert env.observation_space.shape[0] == observation_dimension\n\n\ndef test_step():\n observation_dimension = 10\n env = DummyHybrid([1, 3, 2], observation_dimension)\n for i in range(10):\n observation, reward, is_done, info = env.step(env.action_space.sample())\n assert type(observation) is np.ndarray\n assert len(observation) == observation_dimension\n assert type(reward) is float\n assert type(is_done) is bool\n assert type(info) is dict\n assert info == {}\n assert env.n_steps == i + 1\n\n\ndef test_reset():\n observation_dimension = 10\n env = DummyHybrid([1, 3, 2], observation_dimension)\n for _ in range(10):\n env.step(env.action_space.sample())\n observation = env.reset()\n assert type(observation) is np.ndarray\n assert len(observation) == observation_dimension\n assert env.n_steps == 0\n\n\n@pytest.mark.parametrize(\n \"parameters\",\n [\n [-1, 1],\n [2, 2],\n [0, 0],\n [-1e-4, 0],\n ],\n)\ndef test_reward_not_positive(parameters):\n env = DummyHybrid([2])\n _, reward, _, _ = env.step((0, parameters))\n assert reward <= 0\n\n\n@pytest.mark.parametrize(\n \"steps\",\n [\n 2,\n N_MAX_STEPS - 1,\n N_MAX_STEPS,\n N_MAX_STEPS + 1,\n N_MAX_STEPS + 10,\n ],\n)\ndef test_is_done(steps):\n env = DummyHybrid([2])\n random_value = np.random.random(size=0)\n _, _, is_done, _ = env.step((0, [random_value, -random_value]))\n assert is_done\n env.reset()\n\n for _ in range(steps):\n _, _, is_done, _ = env.step((env.action_space.sample()))\n\n assert is_done == (steps >= N_MAX_STEPS)\n","repo_name":"lorenzob123/HMLF","sub_path":"tests/test_dummy_hybrid.py","file_name":"test_dummy_hybrid.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"23702780369","text":"import time\n\nstart = time.time()\n\nfrom math import factorial\n\nn = factorial(100)\n\nsolution = 0\n\nwhile n:\n solution += n % 10\n n //= 10\n\nprint(solution)\n\nend = time.time()\n\n# Executes in 0.0 seconds\nprint(end - start)\n","repo_name":"Cikguseven/Project-Euler","sub_path":"Solutions/20.py","file_name":"20.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74847384489","text":"#!/usr/bin/python3\n\nclass Node:\n def __init__(self, data, lchild=None, rchild=None):\n self.data = data\n self.lchild = lchild\n self.rchild = rchild\n\ndef pre_order(root):\n if root != None:\n print(root.data, end=' ')\n pre_order(root.lchild)\n pre_order(root.rchild)\n\ndef in_order(root):\n if root != None:\n in_order(root.lchild)\n print(root.data, end=' ')\n in_order(root.rchild)\n\ndef post_order(root):\n if root != None:\n post_order(root.lchild)\n post_order(root.rchild)\n print(root.data, end=' ')\n\ndef layor_order(root):\n if root == None:\n return\n\n q = []\n p = None\n q.append(root)\n while len(q) > 0:\n p = q.pop(0)\n print(p.data, end=' ')\n\n if p.lchild != None:\n q.append(p.lchild)\n if p.rchild != None:\n q.append(p.rchild)\n\n print()\n\ndef height(root):\n if root == None:\n return 0\n\n left_height = height(root.lchild)\n right_height = height(root.rchild)\n\n if left_height > right_height:\n return left_height + 1\n else:\n return right_height + 1\n\nif __name__ == \"__main__\":\n a = Node('A', Node('B', Node('D', None, Node('F')), None), Node('C', None, Node('E')))\n print(\"PreOrder:\")\n pre_order(a)\n print()\n\n print(\"InOder:\")\n in_order(a)\n print()\n\n print(\"PostOrder:\")\n post_order(a)\n print()\n\n print('LayorOrder:')\n layor_order(a)\n\n print(\"Tree height:\", height(a))\n","repo_name":"erqidiy/data-structure-python","sub_path":"tree/binary_tree.py","file_name":"binary_tree.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38146522747","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django.shortcuts import render\nimport requests\nimport math\nfrom .models import WikipediaArticle\n\n\ndef haversine(lat1, lon1, lat2, lon2):\n # Radius of the Earth in kilometers\n R = 6371\n\n # Convert latitude and longitude from degrees to radians\n lat1 = math.radians(lat1)\n lon1 = math.radians(lon1)\n lat2 = math.radians(lat2)\n lon2 = math.radians(lon2)\n\n # Haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n distance = R * c\n\n return distance\n\ndef get_nearby_wikipedia(request):\n if request.method == 'POST':\n latitude = request.POST.get('latitude', '')\n longitude = request.POST.get('longitude', '')\n\n if latitude and longitude:\n S = requests.Session()\n\n URL = \"https://en.wikipedia.org/w/api.php\"\n\n PARAMS = {\n \"action\": \"query\",\n \"format\": \"json\",\n \"generator\": \"geosearch\",\n \"ggsprimary\": \"all\",\n \"ggsradius\": 10000, # You can adjust the radius as needed\n \"ggscoord\": f\"{latitude}|{longitude}\",\n \"prop\": \"coordinates|pageimages\"\n }\n\n R = S.get(url=URL, params=PARAMS)\n DATA = R.json()\n print(\"hello\")\n print(DATA)\n # Check if 'query' key exists in the response\n if 'query' in DATA:\n PLACES = DATA['query']['pages']\n\n results = []\n for k, v in PLACES.items():\n title = v.get('title', '')\n thumbnail = v['thumbnail']['source'] if 'thumbnail' in v else ''\n if 'coordinates' in v:\n article_lat = v['coordinates'][0]['lat']\n article_lon = v['coordinates'][0]['lon']\n distance = haversine(float(latitude), float(longitude), article_lat, article_lon)\n\n # Safely access the 'pageimage' key with a default value of None\n pageimage = v.get('pageimage', None)\n\n article, created = WikipediaArticle.objects.get_or_create(\n pageid=k,\n defaults={\n 'title': title,\n 'index': v['index'],\n 'coordinates_lat': article_lat,\n 'coordinates_lon': article_lon,\n }\n )\n\n if not created:\n # Update the existing article if needed\n article.title = title\n article.index = v['index']\n article.coordinates_lat = article_lat\n article.coordinates_lon = article_lon\n article.save()\n\n\n\n results.append(\n {'title': title, 'thumbnail': thumbnail, 'distance': distance, 'latitude': article_lat,\n 'longitude': article_lon})\n else:\n # Handle the case where 'coordinates' are missing\n results.append({'title': title, 'thumbnail': thumbnail, 'distance': 'N/A', 'latitude': None,\n 'longitude': None})\n\n context = {\n 'results': results,\n 'latitude': latitude,\n 'longitude': longitude,\n }\n return render(request, 'nearby_wikipedia.html', context)\n else:\n return render(request, 'error.html', {'message': 'No data found from Wikipedia API'})\n\n return render(request, 'get_location.html')\n\n\ndef display_nearby_wikipedia(request):\n articles = WikipediaArticle.objects.all()\n return render(request, 'nearby_wikipedia.html', {'articles': articles})","repo_name":"conan0h/awm_project","sub_path":"wiki/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41429853465","text":"import random\npl_pos = 1\ncom_pos = 1\ndef board():\n print(\"•\" * (pl_pos - 1) + \"P\" + \"•\" * (30 - pl_pos) + \"Goal\")\n print(\"•\" * (com_pos - 1) + \"C\" + \"•\" * (30 - com_pos) + \"Goal\")\n\nboard()\nprint(\"주사위 게임, 스타트!\")\nwhile True:\n input(\"Enter를 누르면 여러분의 말이 움직입니다\")\n pl_pos = pl_pos + random.randint(1, 6)\n if pl_pos > 30:\n pl_pos = 30\n board()\n if pl_pos == 30:\n print(\"당신이 승리했습니다!\")\n break\n input(\"Enter를 누르면 컴퓨터의 말이 움직입니다\")\n com_pos = com_pos + random.randint(1, 6)\n if com_pos > 30:\n com_pos = 30\n board()\n if com_pos == 30:\n print(\"컴퓨터가 승리했습니다!\")\n break\n","repo_name":"Jpub/PythonGame_1","sub_path":"Chapter5/list0503_5.py","file_name":"list0503_5.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"ko","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"} +{"seq_id":"16270902060","text":"import datetime\r\nfrom tinydb import TinyDB, Query\r\nimport json\r\nimport os\r\n\r\n#databases and logging handler\r\nclass DataLogger():\r\n atmosLog = None\r\n mqttClient = None\r\n def __init__(self, mqttClient=None):\r\n self.atmosLog = TinyDB(os.getcwd() + '/atmos_db.json')\r\n self.mqttClient = mqttClient\r\n\r\n #log temperature and humidity\r\n def logEnvironment(self, temp, hum):\r\n\r\n log = {\r\n \"timestamp\": datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\r\n \"temp\": temp, \r\n \"hum\": hum\r\n }\r\n\r\n self.atmosLog.insert(log)\r\n if self.mqttClient: self.mqttClient.publish( \"SENSORS\", json.dumps(log))\r\n\r\n #log irrigation status\r\n def logIrrigation(self, rainwater):\r\n\r\n log = {\r\n \"timestamp\": datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\r\n \"rainwater\": rainwater\r\n }\r\n\r\n self.atmosLog.insert(log)\r\n if self.mqttClient: self.mqttClient.publish( \"SENSORS\", json.dumps(log))","repo_name":"mxx-lxg/spacebase","sub_path":"system/DataLogger.py","file_name":"DataLogger.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22872796938","text":"from __future__ import annotations\nimport copy\nimport numpy as np\nimport time\nfrom multiprocess import Pool\nimport math\n\nfrom typing import Type, Optional, List, Tuple\n\nclass Node:\n val: int\n\n left_edges: int\n right_edges: int\n \n left: Optional[Node]\n right: Optional[Node]\n\n traverse_index: int = 0\n \n def __init__(self, val: int):\n self.left = None\n self.right = None\n self.left_edges = 0\n self.right_edges = 0\n self.traverse_index = 0\n self.val = val\n\n def split_given_edge(self, src: Node, dst: Node, val: int) -> None:\n node = Node(-1)\n leaf = Node(val)\n\n if src.left == dst:\n src.left = node\n src.left_edges += 2\n else:\n src.right = node\n src.right_edges += 2\n\n node.left = dst\n node.right = leaf\n node.left_edges = dst.left_edges + dst.right_edges + 1\n node.right_edges = 1\n \n def split_edge(self, edge: int, val: int) -> None:\n if edge < self.left_edges:\n if edge == self.left_edges - 1:\n self.split_given_edge(self, self.left, val)\n else:\n self.left_edges += 2\n self.left.split_edge(edge, val)\n else:\n if edge == self.left_edges + self.right_edges - 1:\n self.split_given_edge(self, self.right, val)\n else:\n self.right_edges += 2\n self.right.split_edge(edge - self.left_edges, val)\n\n def traverse(self, result: List[int]) -> None:\n if self.left is not None:\n self.left.traverse(result)\n\n if self.val != -1:\n result.append(self.val)\n \n if self.right is not None:\n self.right.traverse(result)\n\n def permutate(self, src: List[int], dst: List[int]) -> None:\n Node.traverse_index = 0\n self.permutation_apply(src, dst)\n\n def permutation_apply(self, src: List[int], dst: List[int]) -> None:\n if self.left is not None:\n self.left.permutation_apply(src, dst)\n\n if self.val != -1:\n if src[Node.traverse_index] != dst[Node.traverse_index]:\n self.val = dst[Node.traverse_index]\n Node.traverse_index += 1\n \n if self.right is not None:\n self.right.permutation_apply(src, dst)\n\n def align(self, size: int) -> List[List[int]]:\n Node.traverse_index = size\n result: List[List[int]] = []\n self.align_do(result)\n return result\n\n def align_do(self, result: List[List[int]]) -> int:\n if self.left is None and self.right is None:\n return self.val\n \n if self.left is not None and self.right is not None:\n cur = Node.traverse_index\n \n Node.traverse_index += 1\n left = self.left.align_do(result)\n right = self.right.align_do(result)\n \n result.append([left, cur])\n result.append([right, cur])\n return cur\n\n return -2\n\n def display(self):\n lines, *_ = self._display_aux()\n for line in lines:\n print(line)\n print()\n\n def _display_aux(self):\n \"\"\"Returns list of strings, width, height, and horizontal coordinate of the root.\"\"\"\n # No child.\n if self.right is None and self.left is None:\n line = '%s' % self.val\n width = len(line)\n height = 1\n middle = width // 2\n return [line], width, height, middle\n\n # Only left child.\n if self.right is None:\n lines, n, p, x = self.left._display_aux()\n s = '%s' % self.val\n u = len(s)\n first_line = (x + 1) * ' ' + (n - x - 1) * '_' + s\n second_line = x * ' ' + '/' + (n - x - 1 + u) * ' '\n shifted_lines = [line + u * ' ' for line in lines]\n return [first_line, second_line] + shifted_lines, n + u, p + 2, n + u // 2\n\n # Only right child.\n if self.left is None:\n lines, n, p, x = self.right._display_aux()\n s = '%s' % self.val\n u = len(s)\n first_line = s + x * '_' + (n - x) * ' '\n second_line = (u + x) * ' ' + '\\\\' + (n - x - 1) * ' '\n shifted_lines = [u * ' ' + line for line in lines]\n return [first_line, second_line] + shifted_lines, n + u, p + 2, u // 2\n\n # Two children.\n left, n, p, x = self.left._display_aux()\n right, m, q, y = self.right._display_aux()\n s = '%s' % self.val\n u = len(s)\n first_line = (x + 1) * ' ' + (n - x - 1) * '_' + s + y * '_' + (m - y) * ' '\n second_line = x * ' ' + '/' + (n - x - 1 + u + y) * ' ' + '\\\\' + (m - y - 1) * ' '\n if p < q:\n left += [n * ' '] * (q - p)\n elif q < p:\n right += [m * ' '] * (p - q)\n zipped_lines = zip(left, right)\n lines = [first_line, second_line] + [a + u * ' ' + b for a, b in zipped_lines]\n return lines, n + m + u, max(p, q) + 2, n + u // 2\n \n\nclass Tree:\n size: int\n score: Optional[float]\n root: Optional[Node]\n \n def __init__(self, size, perm):\n self.score = None\n self.size = size\n self.root = None\n\n if perm:\n self.from_perm(perm)\n return\n for i in range(0, size):\n self.insert(i)\n \n\n def insert(self, val: int) -> None:\n if self.root is None:\n self.root = Node(val)\n return\n\n edges = self.root.left_edges + self.root.right_edges\n roll = np.random.randint(0, edges + 1)\n if roll == edges:\n new_root = Node(-1)\n\n new_root.left = self.root\n new_root.right = Node(val)\n new_root.left_edges = edges + 1\n new_root.right_edges = 1\n self.root = new_root\n return\n\n self.root.split_edge(roll, val)\n \n def from_perm(self, perm: List[int]):\n if self.root is None:\n self.root = Node(0)\n \n for i, roll in enumerate(perm[1:]):\n edges = self.root.left_edges + self.root.right_edges\n if roll == edges:\n new_root = Node(-1)\n\n new_root.left = self.root\n new_root.right = Node(i+1)\n new_root.left_edges = edges + 1\n new_root.right_edges = 1\n self.root = new_root\n continue\n self.root.split_edge(roll, i+1)\n \n def mutate(self) -> None:\n src = self.traverse()\n dst = list(src)\n\n a, b = random_pair(self.size)\n dst[a], dst[b] = src[b], src[a]\n \n self.score = None\n self.root.permutate(src, dst)\n\n def align_order(self) -> List[List[int]]:\n order = self.root.align(self.size)\n # order.sort(key=lambda t: t[1])\n return order\n\n @staticmethod\n def crossover(a: Tree, b: Tree) -> Tuple[Tree, Tree]:\n child_a: Tree = copy.deepcopy(a)\n child_b: Tree = copy.deepcopy(b)\n\n child_a.score = None\n child_b.score = None\n \n src_a = a.traverse()\n src_b = b.traverse()\n \n dst_a, dst_b = pmx(src_a, src_b)\n child_a.root.permutate(src_a, dst_a)\n child_b.root.permutate(src_b, dst_b)\n \n return (child_a, child_b)\n\n def traverse(self) -> List[int]:\n ret: List[int] = []\n self.root.traverse(ret)\n return ret\n\n def display(self):\n print(f\"({self.score})\")\n self.root.display()\n\nclass Ga:\n n: int\n size: int\n\n max_score: float\n avg_score: float\n best: Optional[Tree]\n \n lim_time: int\n lim_iter: int\n lim_same: int\n\n elite: int\n convergence_threshold: float\n \n population: List[Tree]\n \n def __init__(self, n: int, score, score_ctx,\n lim_time: int = 0, lim_iter: int = 0, lim_same: int = 0,\n pop_size: int = 0,\n elite: int = 10,\n convergence_threshold: float = 0.05):\n\n if pop_size != 0:\n self.n = pop_size\n else:\n self.n = self.population_size(n)\n \n self.size = n\n self.score = score\n self.score_ctx = score_ctx\n \n self.lim_time = lim_time\n self.lim_iter = lim_iter\n self.lim_same = lim_same\n \n self.elite = elite\n self.convergence_threshold = convergence_threshold\n\n self.max_score = 0\n self.avg_score = 0\n self.best = None\n\n self.population = [Tree(n, None) for i in range(self.n)]\n\n def population_size(self, size: int) -> int:\n if size > 300:\n return 2 * size\n \n x1 = 1.0\n y1 = 10.0\n x2 = 300.0\n y2 = 2\n\n y = ((y2 - y1) * size + x2 * y1 - x1 * y2) / (x2 - x1);\n return math.ceil(y * size)\n\n def selection(self):\n self.population.sort(key=lambda t: t.score, reverse=True)\n \n offset = self.size // self.elite\n if offset == 0:\n offset = 1\n \n ret = self.population[:offset]\n \n candidates = self.population[offset:]\n while len(ret) < self.n:\n if len(candidates) == 1:\n ret.append(candidates[0])\n break\n \n perm = np.random.permutation(len(candidates))\n loosers: List[Tree] = []\n for i in range(len(candidates) // 2):\n a = candidates[2 * i]\n b = candidates[2 * i + 1]\n if (a.score / (a.score + b.score)) < np.random.uniform():\n ret.append(a)\n loosers.append(b)\n else:\n ret.append(b)\n loosers.append(a)\n \n\n if len(ret) == self.n:\n break\n\n if len(candidates) % 2 == 1:\n loosers.append(candidates[-1])\n candidates = loosers\n \n self.population = ret\n self.stats()\n\n def fitness(self):\n tasks = []\n for i in range(len(self.population)):\n v = self.population[i]\n if v.score is None:\n tasks.append((np.array(v.align_order()), *self.score_ctx))\n else:\n tasks.append((None, *self.score_ctx))\n\n with Pool(processes=8) as pool:\n scores = pool.starmap(self.score, tasks)\n \n for i in range(len(self.population)):\n if self.population[i].score is None:\n self.population[i].score = scores[i]\n \n self.stats()\n\n def converged(self) -> bool:\n if self.max_score == 0:\n return True\n return ((self.max_score - self.avg_score)/ self.max_score) < self.convergence_threshold\n\n def iteration(self, iteration: int):\n if iteration > 0 and self.converged():\n for i in range(self.n):\n self.population.append(Tree(self.size, None))\n\n if iteration == 0 or self.converged():\n self.fitness()\n\n self.selection()\n self.crossover()\n self.fitness()\n self.mutation()\n self.fitness()\n\n \n def stats(self):\n max_score: float = 0\n sum_score: float = 0\n\n cur_best: float = 0\n cur_best_idx = -1\n if self.best is not None:\n cur_best = self.best.score\n \n for i in range(len(self.population)):\n v = self.population[i].score\n max_score = max(max_score, v)\n sum_score += v\n if cur_best < v:\n cur_best_idx = i\n cur_best = v\n\n if cur_best_idx >= 0:\n self.best = copy.deepcopy(self.population[i])\n \n self.max_score = max_score\n self.avg_score = sum_score / len(self.population)\n \n \n def mutation(self):\n for v in self.population:\n if np.random.uniform() < self.mutation_prob(v.score):\n v.mutate()\n\n def crossover(self):\n perm = np.random.permutation(self.n)\n for i in range(self.n // 2):\n a = self.population[perm[2 * i]]\n b = self.population[perm[2 * i + 1]]\n if np.random.uniform() >= self.crossover_prob(a.score, b.score):\n continue\n\n child_a, child_b = Tree.crossover(a, b)\n self.population.append(child_a)\n self.population.append(child_b)\n \n def mutation_prob(self, score: float) -> float:\n if self.max_score == 0:\n return 1\n\n if self.max_score == self.avg_score:\n return 0.01\n \n if score < self.avg_score:\n return 0.5\n \n p = 0.5 * (self.max_score - score) / (self.max_score - self.avg_score)\n if p < 0.01:\n p = 0.01\n return p\n\n def crossover_prob(self, a_score: float, b_score: float) -> float:\n if self.max_score == 0:\n return 1\n\n if self.max_score == self.avg_score:\n return 0.5\n \n m = (a_score + b_score) / 2;\n if m < self.avg_score:\n return 0.05\n\n return (m - self.avg_score) / (self.max_score - self.avg_score)\n\n def terminate(self, start, n_iter: int, unchanged: int) -> bool:\n if self.lim_time != 0 and time.time() - start >= self.lim_time:\n return True\n \n if self.lim_iter != 0 and n_iter >= self.lim_iter:\n return True\n\n if self.lim_same != 0 and unchanged >= self.lim_same:\n return True\n\n return False\n\n\n def run(self):\n n_iter: int = 0\n unchanged: int = 0\n start = time.time()\n best = self.best\n while not self.terminate(start, n_iter, unchanged):\n self.iteration(n_iter)\n n_iter += 1\n if best == self.best:\n unchanged += 1\n else:\n unchanged = 0\n best = self.best\n if best is not None:\n best.display()\n print(f\"iteration {n_iter}: elapsed {time.time() - start}, avg:{self.avg_score} max:{self.max_score}\")\n return best.align_order()\n \ndef random_pair(n: int) -> Tuple[int, int]:\n a = np.random.randint(n)\n b = np.random.randint(n)\n while a == b:\n b = np.random.randint(n)\n if a > b:\n a, b = b, a\n return (a, b)\n\ndef pmx_do(a: List[int], b: List[int], m: int, n: int) -> List[int]:\n ret = [0] * len(a)\n \n s = {}\n mapping = {}\n\n for i in range(m, n):\n mapping[b[i]] = i\n s[b[i]] = True\n ret[i] = b[i]\n\n for l, r in [(0, m), (n, len(a))]:\n for i in range(l, r):\n if a[i] not in s:\n ret[i] = a[i]\n else:\n v = a[mapping[a[i]]]\n while v in s:\n v = a[mapping[v]]\n ret[i] = v\n return ret\n\ndef pmx(a: List[int], b: List[int]) -> Tuple[List[int], List[int]]:\n m, n = random_pair(len(a))\n return (pmx_do(a, b, m, n), pmx_do(b, a, m, n))\n\n\n\n\n","repo_name":"n-canter/gamaps","sub_path":"gaCaretta/caretta/ga_tree.py","file_name":"ga_tree.py","file_ext":"py","file_size_in_byte":15268,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"70274726570","text":"from flask import Flask, request, abort\nimport net\nimport game\nfrom const import player_1, player_2\nimport uuid\n\napp = Flask(__name__)\n\nmatch_queue = []\ngame_player_move_map = {}\n\n@app.route('/session//current_game')\ndef get_game(session_id):\n session_id = uuid.UUID(session_id)\n for pool in [Session.session_map, Session.ended_session_map]:\n if session_id in pool:\n session = pool[session_id]\n server_game = session.current_game()\n return net.pack_command(\n net.GameCommand(\n server_game.game,\n str(server_game.game_id),\n server_game.status,\n server_game.player_name_map))\n abort(404)\n\n@app.route('/session//rollback', methods=['POST'])\ndef rollback_game(session_id):\n session_id = uuid.UUID(session_id)\n for pool in [Session.session_map, Session.ended_session_map]:\n if session_id in pool:\n session = pool[session_id]\n session.rollback()\n return 'done'\n abort(404)\n\n@app.route('/session//current_game_id')\ndef get_game_id(session_id):\n session_id = uuid.UUID(session_id)\n for pool in [Session.session_map, Session.ended_session_map]:\n if session_id in pool:\n return str(pool[session_id].current_game_id())\n return ''\n\n@app.route('/game//move', methods=['POST'])\ndef submit_player_move(game_id):\n game_id = uuid.UUID(game_id)\n command = net.unpack_command(request.get_json())\n assert(type(command) is net.PlayerMoveCommand)\n player = command.player_move.player\n print(f\"Received player {player} move: {command.player_move}.\")\n if game_id not in game_player_move_map:\n game_player_move_map[game_id] = ServerGamePlayerMove()\n game_player_move_map[game_id].update(player, command.player_move)\n\n Session.process_sessions()\n return 'done'\n\n@app.route('/match/', methods=['POST'])\ndef new_game(player_name):\n if len(match_queue) > 0:\n session_id, waiting_player_name = match_queue.pop(0)\n if waiting_player_name != player_name:\n player_name_map = {\n player_1: waiting_player_name,\n player_2: player_name\n }\n Session(session_id, player_name_map)\n return str(session_id)\n session_id = uuid.uuid4()\n match_queue.append((session_id, player_name))\n return str(session_id)\n\nclass ServerGame:\n server_game_map = {}\n\n def __init__(self, player_name_map, game_=None):\n if game_ is None:\n self.game = game.Game()\n else:\n self.game = game_\n self.game_id = uuid.uuid4()\n self.status = self.game.get_status()\n self.player_name_map = player_name_map\n ServerGame.server_game_map[self.game_id] = self\n\n def next(self, player_move):\n try:\n next_game = self.game.make_move(player_move.as_list())\n except Exception as e:\n print(e)\n print(player_move)\n return None\n return ServerGame(self.player_name_map, next_game)\n\nclass ServerGamePlayerMove:\n def __init__(self):\n self.reset()\n\n def all_players_moved(self):\n return self.as_list().count(None) == 0\n\n def update(self, player, player_move):\n self.player_move_map[player] = player_move\n\n def as_list(self):\n return list(self.player_move_map.values())\n\n def reset(self):\n self.player_move_map = { \n player_1: None, \n player_2: None \n }\n\n def __repr__(self):\n return ','.join([str(pm) for pm in self.as_list()])\n\nclass Session:\n session_map = {}\n ended_session_map = {}\n\n def __init__(self, session_id, player_name_map):\n self.session_id = session_id\n server_game = ServerGame(player_name_map)\n self.game_id_list = [server_game.game_id]\n Session.session_map[self.session_id] = self\n\n def update(self, game_id):\n self.game_id_list.append(game_id)\n\n def current_game_id(self):\n return self.game_id_list[-1]\n\n def current_game(self):\n return ServerGame.server_game_map[self.current_game_id()]\n\n def is_ended(self):\n return self.current_game().status != game.GameStatus.Ongoing\n\n def rollback(self):\n if len(self.game_id_list) > 1:\n self.game_id_list.pop()\n\n @classmethod\n def process_sessions(cls):\n ended_sessions = []\n for session in cls.session_map.values():\n if session.current_game_id() not in game_player_move_map:\n continue\n\n sg_player_move = game_player_move_map[session.current_game_id()]\n if not sg_player_move.all_players_moved():\n continue\n \n server_game = session.current_game()\n next_server_game = server_game.next(sg_player_move)\n\n if next_server_game is None:\n # this should never happen\n sg_player_move.reset()\n \n session.update(next_server_game.game_id)\n\n if session.is_ended():\n ended_sessions.append(session)\n\n for session in ended_sessions:\n cls.ended_session_map[session.session_id] = session\n del cls.session_map[session.session_id]\n\ndef start():\n app.run(debug=True, host='0.0.0.0')\n\nif __name__ == '__main__':\n start()","repo_name":"cottyard/Lancer","sub_path":"deprecated/command_prototype/Lancer Sametime/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"22130192918","text":"# DEBUG build with debug\n#\n# USE_SYSTEM_ONECCL=0\n# disables use of system-wide oneCCL (we will use our submoduled\n# copy in third_party/oneCCL)\n\nimport os\nimport sys\nimport pathlib\nimport shutil\nfrom subprocess import check_call, check_output\n\nimport torch\nfrom torch.utils.cpp_extension import BuildExtension, CppExtension, library_paths\nfrom setuptools import setup\nfrom distutils.command.clean import clean\nfrom tools.setup.cmake import CMakeExtension\nfrom tools.setup.env import get_compiler\n\n# Constant known variables used throughout this file\nCWD = os.path.dirname(os.path.abspath(__file__))\nONECCL_BINDINGS_FOR_PYTORCH_PATH = os.path.join(CWD, \"oneccl_bindings_for_pytorch\")\n\n\ndef _check_env_flag(name, default=''):\n return os.getenv(name, default).upper() in ['ON', '1', 'YES', 'TRUE', 'Y']\n\n\ndef check_file(f):\n if not os.path.exists(f):\n print(\"Could not find {}\".format(f))\n print(\"Did you run 'git submodule update --init --recursive'?\")\n sys.exit(1)\n\n\n# all the work we need to do _before_ setup runs\ndef create_version():\n \"\"\"Create the version string for torch-ccl\"\"\"\n package_name = os.getenv('CCL_PACKAGE_NAME', 'oneccl-bind-pt')\n version = open('version.txt', 'r').read().strip()\n sha = 'Unknown'\n\n try:\n sha = check_output(['git', 'rev-parse', 'HEAD'], cwd=CWD).decode('ascii').strip()\n except Exception:\n pass\n\n if os.getenv('CCL_SHA_VERSION', False):\n if sha != 'Unknown':\n version += '+' + sha[:7]\n\n if os.environ.get(\"COMPUTE_BACKEND\") == \"dpcpp\":\n backend = \"gpu\"\n else:\n backend = os.environ.get(\"ONECCL_BINDINGS_FOR_PYTORCH_BACKEND\", \"cpu\")\n\n if \"+\" not in version:\n version += '+' + backend\n\n print(\"Building {}-{}\".format(package_name, version))\n\n version_path = os.path.join(CWD, 'oneccl_bindings_for_pytorch', 'version.py')\n with open(version_path, 'w') as f:\n f.write(\"__version__ = '{}'\\n\".format(version))\n f.write(\"git_version = {}\\n\".format(repr(sha)))\n\n return version, package_name\n\n\nclass BuildCMakeExt(BuildExtension):\n \"\"\"\n Builds using cmake instead of the python setuptools implicit build\n \"\"\"\n\n def run(self):\n \"\"\"\n Perform build_cmake before doing the 'normal' stuff\n \"\"\"\n cmake_extensions = [ext for ext in self.extensions if isinstance(ext, CMakeExtension)]\n for ext in cmake_extensions:\n self.build_cmake(ext)\n\n self.extensions = [ext for ext in self.extensions if not isinstance(ext, CMakeExtension)]\n super(BuildCMakeExt, self).run()\n build_py = self.get_finalized_command('build_py')\n build_py.data_files = build_py._get_data_files()\n build_py.run()\n\n def build_cmake(self, extension: CMakeExtension):\n \"\"\"\n The steps required to build the extension\n \"\"\"\n build_dir = pathlib.Path('.'.join([self.build_temp, extension.name]))\n\n build_dir.mkdir(parents=True, exist_ok=True)\n install_dir = ONECCL_BINDINGS_FOR_PYTORCH_PATH\n\n # Now that the necessary directories are created, build\n my_env = os.environ.copy()\n my_env[\"CMAKE_DISABLE_FIND_PACKAGE_MKL\"] = \"TRUE\"\n build_type = 'Release'\n\n if _check_env_flag('DEBUG'):\n build_type = 'Debug'\n\n build_options = {\n 'CMAKE_BUILD_TYPE': build_type,\n # The value cannot be easily obtained in CMakeLists.txt.\n 'CMAKE_PREFIX_PATH': torch.utils.cmake_prefix_path,\n # skip the example and test code in oneCCL\n 'BUILD_EXAMPLES': 'OFF',\n 'BUILD_CONFIG': 'OFF',\n 'BUILD_FT': 'OFF'\n }\n\n runtime = 'gcc'\n if 'COMPUTE_BACKEND' in os.environ:\n if os.environ['COMPUTE_BACKEND'] == 'dpcpp':\n runtime = 'dpcpp'\n build_options['COMPUTE_BACKEND'] = os.environ['COMPUTE_BACKEND']\n import intel_extension_for_pytorch\n build_options['CMAKE_PREFIX_PATH'] += \";\" + intel_extension_for_pytorch.cmake_prefix_path\n\n cc, cxx = get_compiler(runtime)\n build_options['CMAKE_C_COMPILER'] = cc\n build_options['CMAKE_CXX_COMPILER'] = cxx\n\n extension.generate(build_options, my_env, build_dir, install_dir)\n\n build_args = ['-j', str(os.cpu_count())]\n check_call(['make', 'oneccl_bindings_for_pytorch'] + build_args, cwd=str(build_dir))\n if 'COMPUTE_BACKEND' in os.environ:\n if os.environ['COMPUTE_BACKEND'] == 'dpcpp':\n check_call(['make', 'oneccl_bindings_for_pytorch_xpu'] + build_args, cwd=str(build_dir))\n check_call(['make', 'install'], cwd=str(build_dir))\n\n\nclass Clean(clean):\n def run(self):\n import glob\n import re\n\n with open('.gitignore', 'r') as f:\n ignores = f.read()\n pat = re.compile(r'^#( BEGIN NOT-CLEAN-FILES )?')\n for wildcard in filter(None, ignores.split('\\n')):\n match = pat.match(wildcard)\n if match:\n if match.group(1):\n # Marker is found and stop reading .gitignore.\n break\n # Ignore lines which begin with '#'.\n else:\n for filename in glob.glob(wildcard):\n try:\n os.remove(filename)\n except OSError:\n shutil.rmtree(filename, ignore_errors=True)\n\n clean.run(self)\n\n\ndef get_python_c_module():\n main_compile_args = []\n main_libraries = ['oneccl_bindings_for_pytorch']\n main_link_args = []\n main_sources = [\"oneccl_bindings_for_pytorch/csrc/_C.cpp\", \"oneccl_bindings_for_pytorch/csrc/init.cpp\"]\n lib_path = os.path.join(ONECCL_BINDINGS_FOR_PYTORCH_PATH, \"lib\")\n library_dirs = [lib_path]\n include_path = os.path.join(CWD, \"src\")\n include_dirs = [include_path]\n extra_link_args = []\n extra_compile_args = [\n '-Wall',\n '-Wextra',\n '-Wno-strict-overflow',\n '-Wno-unused-parameter',\n '-Wno-missing-field-initializers',\n '-Wno-write-strings',\n '-Wno-unknown-pragmas',\n # This is required for Python 2 declarations that are deprecated in 3.\n '-Wno-deprecated-declarations',\n # Python 2.6 requires -fno-strict-aliasing, see\n # http://legacy.python.org/dev/peps/pep-3123/\n # We also depend on it in our code (even Python 3).\n '-fno-strict-aliasing',\n # Clang has an unfixed bug leading to spurious missing\n # braces warnings, see\n # https://bugs.llvm.org/show_bug.cgi?id=21629\n '-Wno-missing-braces',\n ]\n\n def make_relative_rpath(path):\n return '-Wl,-rpath,$ORIGIN/' + path\n\n _c_module = CppExtension(\"oneccl_bindings_for_pytorch._C\",\n libraries=main_libraries,\n sources=main_sources,\n language='c',\n extra_compile_args=main_compile_args + extra_compile_args,\n include_dirs=include_dirs,\n library_dirs=library_dirs,\n extra_link_args=extra_link_args + main_link_args + [make_relative_rpath('lib')])\n\n return _c_module\n\n\nif __name__ == '__main__':\n version, package_name = create_version()\n c_module = get_python_c_module()\n cmake_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"CMakeLists.txt\")\n modules = [CMakeExtension(\"liboneccl_bindings_for_pytorch\", cmake_file), c_module]\n setup(\n name=package_name,\n version=version,\n ext_modules=modules,\n packages=['oneccl_bindings_for_pytorch'],\n package_data={\n 'oneccl_bindings_for_pytorch': [\n '*.py',\n '*/*.h',\n '*/*.hpp',\n 'lib/*.so*',\n 'bin/*',\n 'env/*',\n 'etc/*',\n 'examples/*',\n 'include/native_device_api/*.h*',\n 'include/native_device_api/l0/*.h*',\n 'include/*.h*',\n 'lib/lib*',\n 'lib/prov/lib*',\n 'lib/kernels/*',\n 'licensing/*',\n 'modulefiles/*',\n ]},\n cmdclass={\n 'build_ext': BuildCMakeExt,\n 'clean': Clean,\n }\n )\n","repo_name":"intel/torch-ccl","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":8475,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"53"} +{"seq_id":"36134053152","text":"from joblib import dump, load\nimport pandas as pd\nimport numpy as np\n\n\nmodel = load('gradient_booster.model')\nlabels = np.array([\"Important\", \"VP_Hours\", 'Job_Advertisemnts'])\n\n\nvp_counts = ['experiment', 'study', 'studie', 'amazon', 'vp', 'stunde', 'hour','numbers']\n\ndef get_vp_score(text):\n\n \n\n text = text.lower()\n\n counts = [text.count(w) for w in vp_counts[:-1]]\n\n\n number_count = 0\n\n pos = text.find('vp')\n\n while pos != -1:\n\n pos = text.find('vp',pos + 1)\n\n window_before = text[max(0,pos - 10): pos]\n for c in window_before:\n number_count += c.isnumeric()\n\n counts.append(number_count)\n\n return counts\n\n\njob_count_words = ['hiwi','job','praktikum','intern','internship','praktikant','program','phd',\n 'thesis','projects', 'position','offer',\n 'doctoral','tutors','hilfskr','stelle','ausschreibung']\ndef get_job_score(text):\n\n text = text.lower()\n\n \n\n\n return [text.count(w) for w in job_count_words]\n\n\ndef data_pipe_line(texts):\n\n return np.array([get_job_score(t) + get_vp_score(t) for t in texts])\n\n\n\n\ndef get_email_predictions(email_texts):\n\n\n vals = data_pipe_line(email_texts)\n\n\n prediction = model.predict(vals) \n\n\n return labels[prediction]\n\n ","repo_name":"MockaWolke/cogsci_email_sorter","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"29276336521","text":"# author: Coppo Federico from Edx IBM course \"Machine Learning with Python\" \r\n# 17/11/2019 \r\n\r\n# readme:\r\n# waiting for 3.2.0 install the following release candidate\r\n# $ pip install matplotlib==3.2.0rc1\r\n\r\n# this example show non-linear regression usage\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom scipy.optimize import curve_fit\r\n\r\n#calibration \r\nenablePlotExample = False\r\nsklearnInstalled = False \t\t# disable if want to execute code without sklearn package\r\n\r\nif sklearnInstalled == True:\r\n\tfrom sklearn.metrics import r2_score\r\n\r\n# NON LINEAR FUNCTION EXAMPLE \r\n\r\nx = np.arange(-5.0, 5.0, 0.1)\r\n\r\n# example of linear funtion \r\ny = 1.1*(x) + 7\r\ny_noise = 2 * np.random.normal(size=x.size)\r\nydata = y + y_noise\r\nplt.plot(x, ydata, 'bo')\r\nplt.plot(x,y, 'g') \r\nplt.ylabel('Y')\r\nplt.xlabel('X')\r\nplt.show()\r\n\r\n# example of non linear funtion: cubic function (y = ax^3 + bx + c)\r\ny = 1*(x**3) + 1*(x**2) + 1*x + 3\r\ny_noise = 20 * np.random.normal(size=x.size)\r\nydata = y + y_noise\r\nplt.plot(x, ydata, 'bo')\r\nplt.plot(x,y, 'r') \r\nplt.ylabel('Y = ax^3 + bx + c')\r\nplt.xlabel('X')\r\nplt.show()\r\n\r\n# example of non linear funtion: quadratic function (y =x^2)\r\ny = np.power(x,2)\r\ny_noise = 2 * np.random.normal(size=x.size)\r\nydata = y + y_noise\r\nplt.plot(x, ydata, 'bo')\r\nplt.plot(x,y, 'c') \r\nplt.ylabel('Y = x^2')\r\nplt.xlabel('X')\r\nplt.show()\r\n\r\n# example of non linear funtion: exponential function (y =a+ b*c^x)\r\ny= np.exp(x)\r\nydata = y + y_noise\r\nplt.plot(x, ydata, 'bo')\r\nplt.plot(x,y, 'y') \r\nplt.ylabel('Y = a +b*c^x')\r\nplt.xlabel('X')\r\nplt.show()\r\n\r\n# example of non linear funtion: log function (y = log(x))\r\n# indep variable should start > zero\r\nx = np.arange(0.1, 5.0, 0.1) \r\ny = np.log(x)\r\n#noise should be re-calculated\r\ny_noise = np.random.normal(size=x.size) \r\nydata = y + y_noise\r\nplt.plot(x, ydata, 'bo')\r\nplt.plot(x,y, 'x') \r\nplt.ylabel('Y = log(X)')\r\nplt.xlabel('X')\r\nplt.show()\r\n\r\n# example of non linear funtion: sigmodal, logistic (y = a + b/(1 + c^(x-d))\r\nx = np.arange(-5.0, 5.0, 0.1)\r\ny = 1-4/(1+np.power(3, x-2))\r\ny_noise = np.random.normal(size=x.size) \r\nydata = y + y_noise\r\nplt.plot(x, ydata, 'bo')\r\nplt.plot(x,y, 'p') \r\nplt.ylabel('Y = a + b/(1 + c^(X-d)')\r\nplt.xlabel('X')\r\nplt.show()\r\n\r\n# NON LINEAR REGRESSION EXAMPLE \r\n\r\n# dataset\r\ndf = pd.read_csv(\"china_gdp.csv\")\r\n\r\n#show dataset\r\nprint (\"dataset: China's corresponding annual gross domestic income in US dollars for that year:\\n\")\r\nprint (df)\r\n\r\n#plot dataset\r\nplt.figure(figsize=(8,5))\r\nx_data, y_data = (df[\"Year\"].values, df[\"Value\"].values)\r\nplt.plot(x_data, y_data, 'ro')\r\nplt.ylabel('GDP')\r\nplt.xlabel('Year')\r\nplt.show()\r\n\r\n# CHOOSE THE MODEL -> logistic function\r\n# Y^ = 1/(1 + e^(𝛽1-x*𝛽2)) where 𝛽1 controls the curve's steepness and 𝛽2 slides the curve on the x-axis.\r\nif enablePlotExample == True:\r\n #example 1\r\n y = 1 / (1 + np.exp(-1*(x-5)))\r\n plt.plot(x,y, 'p') \r\n plt.ylabel('Y = 1/(1+ e(-(x-5))))')\r\n plt.xlabel('X')\r\n plt.show()\r\n\r\n #example 2\r\n y = 1 / (1 + np.exp(-3*(x-8)))\r\n plt.plot(x,y, 'p') \r\n plt.ylabel('Y = 1/(1+ e(-3(x-8))))')\r\n plt.xlabel('X')\r\n plt.show()\r\n\r\n# BUILD THE MODEL\r\n\r\n# routine define\r\ndef sigmoid_f(x, Beta_1, Beta_2):\r\n y = 1 / (1 + np.exp(-Beta_1*(x-Beta_2)))\r\n return y\r\n\r\n# find the parameter that fit the data\r\nbeta_1 = 0.10\r\nbeta_2 = 1990.0\r\nY_pred = sigmoid_f(x_data, beta_1 , beta_2)\r\n\r\n#plot initial prediction against datapoints\r\nplt.plot(x_data, Y_pred*15000000000000.)\r\nplt.plot(x_data, y_data, 'ro')\r\n\r\n# Lets first normalize our x and y (to find the best parameters for our model).\r\nxdata =x_data/max(x_data)\r\nydata =y_data/max(y_data)\r\n\r\n# find the best parameter\r\n# curve_fit routine uses non-linear least squares.\r\n# optimal values -> the sum of the squared residuals of y^ - ydata is minimized.\r\npopt, pcov = curve_fit(sigmoid_f, xdata, ydata)\r\nprint(\" beta_1 = %f, beta_2 = %f\" % (popt[0], popt[1]))\r\n\r\n#verify the model\r\nx = np.linspace(1960, 2015, 55)\r\nx = x/max(x)\r\nplt.figure(figsize=(8,5))\r\ny = sigmoid_f(x, popt[0], popt[1] )\r\nplt.plot(xdata, ydata, 'ro', label='data')\r\nplt.plot(x,y, linewidth=3.0, label='fit')\r\nplt.legend(loc='best')\r\nplt.ylabel('GDP')\r\nplt.xlabel('Year')\r\nplt.show()\r\n\r\n#model accuracy\r\n# split data (xdata, ydata are from dataset) into train/test\r\nmsk = np.random.rand(len(df)) < 0.8\r\ntrain_x = xdata[msk]\r\ntest_x = xdata[~msk]\r\ntrain_y = ydata[msk]\r\ntest_y = ydata[~msk]\r\n\r\n# build the model using train set\r\npopt, pcov = curve_fit(sigmoid_f, train_x, train_y)\r\n\r\n# predict using test set (x) and model with parameter calculated for train model\r\ny_predicted = sigmoid_f(test_x, *popt)\r\n\r\n# model accuracy\r\nprint(\"Mean absolute error: %.2f\" % np.mean(np.absolute(y_predicted - test_y))) #1/n sum (y -y_)\r\nprint(\"Residual sum of squares (MSE): %.4f\" % np.mean((y_predicted - test_y) ** 2))#1/n sum (y -y_)^2\r\nif sklearnInstalled == True:\r\n\tprint(\"R2-score: %.2f\" % r2_score(y_predicted , test_y) ) # RMSE 1.0 is good 0.0 is bad \r\n","repo_name":"FedericoCoppo/MachineLearning","sub_path":"NonLinearRegression/nonLinearRegression.py","file_name":"nonLinearRegression.py","file_ext":"py","file_size_in_byte":5021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27643162309","text":"import requests\r\nimport re\r\nimport zipfile\r\nimport io\r\nimport json\r\nimport csv\r\n\r\n#Open JSON File\r\nwith open(\".\\\\nvd\\\\nvdcve-1.1-2017.json\") as f_json:\r\n r = requests.get('https://nvd.nist.gov/vuln/data-feeds#JSON_FEED')\r\n\r\n# Create CSV file\r\nwith open('output.csv', 'w', newline='') as f_output:\r\n# Write output to CSV file\r\n csv_output = csv.writer(f_output)\r\n# Write headings\r\n csv_output.writerow(['ID','attackVector','attackComplexity','privilegesRequired','userInteraction','scope','confidentialityImpact','integrityImpact','availabilityImpact','availabilityImpact','baseScore','exploitabilityScore','impactScore'])\r\n\r\n# Download NVD files\r\n for filename in re.findall(\"nvdcve-1.1-[0-9]*\\.json\\.zip\", r.text):\r\n# Display message \r\n print(\"Downloading {}\".format(filename))\r\n r_zip_file = requests.get(\"https://static.nvd.nist.gov/feeds/json/cve/1.1/\" + filename, stream=True)\r\n zip_file_bytes = io.BytesIO()\r\n\r\n for chunk in r_zip_file:\r\n zip_file_bytes.write(chunk)\r\n\r\n zip_file = zipfile.ZipFile(zip_file_bytes)\r\n\r\n for json_filename in zip_file.namelist():\r\n# Extract Zip file\r\n print(\"Extracting {}\".format(json_filename))\r\n# Read JSON file \r\n json_raw = zip_file.read(json_filename).decode('utf-8')\r\n json_data = json.loads(json_raw)\r\n# Filter JSON file\r\n for entry in json_data['CVE_Items']:\r\n\r\n try:\r\n id = entry['cve']['CVE_data_meta']['ID']\r\n except IndexError:\r\n id = ''\r\n \r\n try:\r\n av = entry['impact']['baseMetricV3']['cvssV3']['attackVector']\r\n except KeyError:\r\n av = 'Key Error'\r\n \r\n try:\r\n ac = entry['impact']['baseMetricV3']['cvssV3']['attackComplexity']\r\n except KeyError:\r\n ac = 'Key Error'\r\n \r\n try:\r\n pr = entry['impact']['baseMetricV3']['cvssV3']['privilegesRequired']\r\n except KeyError:\r\n pr = 'Key Error'\r\n \r\n try:\r\n ui = entry['impact']['baseMetricV3']['cvssV3']['userInteraction']\r\n except KeyError:\r\n ui = 'Key Error'\r\n\r\n try:\r\n s = entry['impact']['baseMetricV3']['cvssV3']['scope']\r\n except KeyError:\r\n s = 'Key Error'\r\n\r\n try:\r\n ci = entry['impact']['baseMetricV3']['cvssV3']['confidentialityImpact']\r\n except KeyError:\r\n ci = 'Key Error'\r\n \r\n try:\r\n ii = entry['impact']['baseMetricV3']['cvssV3']['integrityImpact']\r\n except KeyError:\r\n ii = 'Key Error'\r\n\r\n try:\r\n ai = entry['impact']['baseMetricV3']['cvssV3']['availabilityImpact']\r\n except KeyError:\r\n ai = 'Key Error'\r\n try:\r\n bs = entry['impact']['baseMetricV3']['cvssV3']['baseScore']\r\n except KeyError:\r\n bs = 'Key Error'\r\n\r\n try:\r\n es = entry['impact']['baseMetricV3']['exploitabilityScore']\r\n except KeyError:\r\n es = 'Key Error'\r\n\r\n try:\r\n imps = entry['impact']['baseMetricV3']['impactScore']\r\n except KeyError:\r\n imps = 'Key Error'\r\n \r\n# Write variables to CSV file\r\n csv_output.writerow([\r\n id,\r\n av,\r\n ac,\r\n pr,\r\n ui,\r\n s,\r\n ci,\r\n ii,\r\n ai,\r\n bs,\r\n es,\r\n imps])\r\n","repo_name":"woadjon83/cves","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20049927080","text":"from gensim.models import Word2Vec\nfrom gensim.models.keyedvectors import WordEmbeddingSimilarityIndex\nfrom numpy.lib.shape_base import column_stack\nimport pandas as pd\n\nclass Topic():\n def __init__(self, topicName):\n self.topicName = topicName\n self.keyWords = dict()\n self.totalWeight = 0\n self.model = None\n self.relatedWordsDf = None\n \n def compTotalWeight(self):\n self.totalWeight = 0\n for key in self.keyWords:\n self.totalWeight += self.keyWords[key]\n\n def addKeyWord(self, keyword, weight):\n self.keyWords[keyword] = weight\n\n def deleteKeyWord(self, keyword):\n self.keyWords.pop(keyword, None)\n\n def emptySelf(self):\n self.keyWords = dict()\n\n def loadModel(self, modelPath):\n self.model = Word2Vec.load(modelPath)\n\n def relatedWords(self):\n view = pd.DataFrame(columns= ['word', 'similarity'])\n for key in self.keyWords:\n temp = pd.DataFrame(columns=['word', 'similarity'])\n if self.keyWords[key] > 0:\n temp = pd.DataFrame(self.model.wv.most_similar(key, topn=5), columns=['word', 'similarity'])\n temp['similarity'] *= self.keyWords[key]\n temp['belongTo'] = key\n temp = temp.append(pd.DataFrame([[key, 1 * self.keyWords[key]]], columns = ['word', 'similarity']))\n view = view.append(temp)\n\n for key in self.keyWords:\n if self.keyWords[key] <= 0:\n view.loc[view['word'] == key, 'similarity']*= self.keyWords[key]\n\n view2 = pd.DataFrame(view.groupby(by=['word']).max()).sort_values(by= 'similarity', ascending= False)\n self.relatedWordsDf = view2[view2['similarity'] > 0]\n\n def topicRelation(self, sentence):\n scoreDict = self.relatedWordsDf.to_dict()['similarity']\n score = 0\n for word in sentence:\n try:\n score += scoreDict[word]\n except KeyError:\n continue\n if score == 0:\n return 0\n else:\n return score / len(sentence)\n\n","repo_name":"jian5753/NTU2020Q1_twitter","sub_path":"app/main/topic.py","file_name":"topic.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32320722542","text":"import matplotlib.pyplot as plt\nimport numpy as np\nmy_list=[1,2,3,4,5,6]\nx=np.array(my_list)\ny=x**2\nplt.plot(x,y)\n# plt.xlabel('X Label')\n# plt.ylabel('Y Label')\n# plt.title('graph')\n# plt.show()\n#oo methods\nfig=plt.figure()\naxes=fig.add_axes([1,2,3,4])\naxes.plot()\n","repo_name":"abhigun1234/jsstolearnpython","sub_path":"datascience/matplotlibd.py","file_name":"matplotlibd.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19951204081","text":"import json\nimport re\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nfrom anon_browser import *\n\n\nclass ReconPerson:\n def __init__(self, handle):\n self.handle = handle\n self.tweets = self.get_tweets()\n\n def get_tweets(self):\n query = urllib.parse.quote_plus(f'from:{self.handle} since:2009-01-01 '\n f'include:retweets')\n tweets = []\n browser = AnonBrowser()\n browser.anonymize()\n response = browser.open(f'http://search.twitter.com/'\n f'search.json?q={query}')\n json_objects = json.load(response)\n\n for result in json_objects['results']:\n new_result = {\n 'from_user': result['from_user_name'],\n 'geo': result['geo'],\n 'tweet': result['text']\n }\n tweets.append(new_result)\n return tweets\n\n def find_interests(self):\n interests = {\n 'links': [],\n 'users': [],\n 'hashtags': []\n }\n\n for tweet in self.tweets:\n text = tweet['tweet']\n link = ''\n links = re.compile(r'(http.*?)\\Z|(http.*?)').findall(text)\n\n for link in links:\n if link[0]:\n link = link[0]\n elif link[1]:\n link = link[1]\n else:\n continue\n try:\n response = urllib.request.urlopen(link)\n full_link = response._url\n interests['links'].append(full_link)\n except Exception as e:\n print(f'[-] Exception: {e.__class__.__name__}')\n pass\n\n interests['users'] += re.compile(r'(@\\w+)').findall(text)\n interests['hashtags'] += re.compile(r'(#\\w+)').findall(text)\n\n interests['users'].sort()\n interests['hashtags'].sort()\n interests['links'].sort()\n\n return interests\n\n def twitter_locate(self, city_file):\n cities = []\n if city_file:\n for line in open(city_file).readlines():\n city = line.strip('\\n').strip('\\r').lower()\n cities.append(city)\n\n locations = []\n loc_cnt = 0\n city_cnt = 0\n tweets_text = ''\n\n for tweet in self.tweets:\n if tweet['geo']:\n locations.append(tweet['geo'])\n loc_cnt += 1\n tweets_text += tweet['tweet'].lower()\n\n for city in cities:\n if city in tweets_text:\n locations.append(city)\n city_cnt += 1\n\n return locations\n","repo_name":"EONRaider/violent-python3","sub_path":"chapter06/twitter_class.py","file_name":"twitter_class.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","stars":922,"dataset":"github-code","pt":"53"} +{"seq_id":"20188636934","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nfrom rules import (\n Rules\n)\n\nfrom utils import (\n Utils\n)\n\nfrom initialize import (\n Init\n)\n\nfrom database import (\n Database\n)\n\nfrom user import (\n User\n)\n\nfrom app import (\n App\n)\n\nfrom role import (\n Role\n)\n\nfrom uid_openid_mapping import (\n UidOpenidMapping\n)\n\nfrom role_app_mapping import (\n RoleAppMapping\n)\n\nfrom filter import (\n FilterFieldType,\n Filter\n)\n\n\n__author__ = 'James Iter'\n__date__ = '16/6/8'\n__contact__ = 'james.iter.cn@gmail.com'\n__copyright__ = '(c) 2016 by James Iter.'\n\n\n__all__ = [\n 'Rules', 'Utils', 'Init', 'Database', 'User', 'App', 'Role', 'UidOpenidMapping', 'RoleAppMapping',\n 'FilterFieldType', 'Filter'\n]\n","repo_name":"jamesiter/jimid","sub_path":"models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"4513710993","text":"import os\nimport sys\nimport time\n\nimport grpc\nfrom chirpstack_api.as_pb.external import api\n\n# Configuration.\n\n# This must point to the API interface.\nserver = \"138.68.97.249:8080\"\n\n# The DevEUI for which you want to enqueue the downlink.\ndev_eui = bytes([0x00, 0x00, 0x00, 0x00, 0x01, 0x03, 0x03, 0x07])\n\n# The API token (retrieved using the web-interface).\napi_token = \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhcGlfa2V5X2lkIjoiNDU2MWY1M2EtYTBkMy00NzQyLWJjYjItMWYzYjRmZDFjMGM5IiwiYXVkIjoiYXMiLCJpc3MiOiJhcyIsIm5iZiI6MTYyMTIzNjYxMSwic3ViIjoiYXBpX2tleSJ9.lNXzswwZtVsyOPGwiHS03X_pTUvBlmi0pokij1gsIOg\"\n\ndef send_msg(dev_eui, client, auth_token, msg):\n req = api.EnqueueDeviceQueueItemRequest()\n req.device_queue_item.confirmed = False\n req.device_queue_item.data = bytes(msg, \"utf-8\")\n req.device_queue_item.dev_eui = dev_eui.hex()\n req.device_queue_item.f_port = 201\n resp = client.Enqueue(req, metadata=auth_token)\n # Print the downlink frame-counter value.\n print(resp.f_cnt)\n\nif __name__ == \"__main__\":\n # Connect without using TLS.\n channel = grpc.insecure_channel(server)\n\n # Device-queue API client.\n client = api.DeviceQueueServiceStub(channel)\n\n # Define the API key meta-data.\n auth_token = [(\"authorization\", \"Bearer %s\" % api_token)]\n\n for i in range(0, 10):\n update_string = \"UpdateData\" + str(i).rjust(4, '0') + \"Garbage1337\" + \"Garbage1337\" + \"Garbage1337\"\n send_msg(dev_eui, client, auth_token, update_string)\n time.sleep(4)\n\n ","repo_name":"madsthom/fuota-app","sub_path":"chirp-api.py","file_name":"chirp-api.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74191482408","text":"n = int(input())\ntriangle = []\nfor _ in range(n):\n triangle.append(list(map(int, input().split())))\n\ndp = []\nfor i in range(n):\n dp.append([0] * (i + 1))\ndp[0][0] = triangle[0][0]\n\nfor i in range(1, n):\n for j in range(i + 1):\n if j == 0:\n dp[i][0] = dp[i - 1][0] + triangle[i][0]\n continue\n if j == i:\n dp[i][i] = dp[i - 1][i - 1] + triangle[i][i]\n continue\n for h in range(j - 1, j + 1):\n dp[i][j] = max(dp[i][j], dp[i - 1][h] + triangle[i][j])\n\nprint(max(dp[-1]))","repo_name":"GangHub1970/Algorithm","sub_path":"Python/boj1932.py","file_name":"boj1932.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20586388405","text":"def on_overlap_tile(sprite, location):\n tiles.set_tile_at(location, sprites.castle.tile_path5)\n info.change_score_by(1)\n effects.confetti.start_screen_effect(1000)\nscene.on_overlap_tile(SpriteKind.player,\n assets.tile(\"\"\"\n myTile0\n \"\"\"),\n on_overlap_tile)\n\ndef on_overlap_tile2(sprite, location):\n game.over(True, effects.confetti)\nscene.on_overlap_tile(SpriteKind.player,\n sprites.castle.tile_grass3,\n on_overlap_tile2)\n\ndef on_overlap_tile3(sprite, location):\n game.over(False, effects.bubbles)\nscene.on_overlap_tile(SpriteKind.player,\n sprites.dungeon.collectible_red_crystal,\n on_overlap_tile3)\n\ntiles.set_tilemap(tilemap(\"\"\"\n level1\n\"\"\"))\nmySprite = sprites.create(img(\"\"\"\n . . . . . . . . . . . . \n . . . f f f f f f . . . \n . f f f e e e e f f f . \n f f f e e e e e e f f f \n f f f f 4 e e e f f f f \n f f f 4 4 4 e e f f f f \n f f f 4 4 4 4 e e f f f \n f 4 e 4 4 4 4 4 4 e 4 f \n f 4 4 f f 4 4 f f 4 4 f \n f e 4 d d d d d d 4 e f \n . f e d d b b d 4 e f e \n f f f e 4 4 4 4 d d 4 e \n e 4 f b 1 1 1 e d d e . \n . . f 6 6 6 6 f e e . . \n . . f f f f f f f . . . \n . . f f f . . . . . . .\n \"\"\"),\n SpriteKind.player)\nmySprite.set_stay_in_screen(True)\ncontroller.move_sprite(mySprite)\nscene.camera_follow_sprite(mySprite)\nmySprite.set_velocity(50, 50)\nmySprite.set_position(0, 120)\ninfo.set_life(1)\n\ndef on_update_interval():\n pass\ngame.on_update_interval(500, on_update_interval)\n","repo_name":"mianazeemdaula/teasure-hunt","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16299370325","text":"import discord #from discord.py\r\nimport requests\r\nimport magic #from python-magic, specifically python-magic-bin\r\nimport io\r\nimport sys\r\nimport os\r\n#import pickle\r\nimport tokenStore\r\n\r\nglobalVars = {}\r\nglobalVars['currentDir'] = ''\r\n\r\n#TODO, the bot cant write data to itself in heroku.\r\n# instead ima have to save the config info in a discord channel of sorts.\r\n# actually I wonder if I can write this info into the notes field. on the bot itself\r\n# note's field can hold up to 256 chars.\r\ndef writeChanges(obj,dir):\r\n #f = open(dir,'wb')\r\n #pickle.dump(obj,f)\r\n #print(obj)\r\n #f.close()\r\n #pickle.dumps(obj).hex()\r\n \r\n print(tokenStore.setVal(dir,obj))\r\n \r\n \r\ndef loadFromFile(dir):\r\n #f = open(dir,'rb')\r\n #temp = pickle.load(f)\r\n #f.close()\r\n #print(temp)\r\n #pickle.loads(bytes.fromhex(aHex))\r\n temp = tokenStore.getVal(dir)\r\n if(temp is None):\r\n return {}\r\n print(temp)\r\n return temp\r\n \r\n\r\ndef processFileFromURL(url,fileName):\r\n ext = '.bin'\r\n response = requests.get(url)\r\n if(response.status_code == 200):\r\n #actually check if the file is something we recongize.\r\n test = magic.from_buffer(response.content).split(',')[0]\r\n if(test == 'JPEG image data'):\r\n ext = '.jpg'\r\n elif(test == 'GIF image data'):\r\n ext = '.gif'\r\n elif(test == 'PNG image data'):\r\n ext = '.png'\r\n elif(test == 'WebM'):\r\n ext = '.webm'\r\n elif(test == 'ASCII text'): # this means it just paintext, so we could just msg the whole thing loal.\r\n ext = '.txt'\r\n \r\n # need a case for jfif files. turns out they can work as jpg files?\r\n \r\n return discord.File(io.BytesIO(response.content),filename=fileName+ext)\r\n else:\r\n return discord.File(io.BytesIO(b''),filename=fileName+ext)\r\n \r\ndef extEquals(x,y):\r\n return x.lower().split('.')[-1] == y.lower().split('.')[-1]\r\n\r\ndisClient = discord.Client()\r\nbotSettings = {}\r\n\r\ntry:\r\n botSettings = loadFromFile('dat')\r\nexcept:\r\n pass\r\n\r\n@disClient.event\r\nasync def on_ready():\r\n print('We have logged in as {0.user}'.format(disClient))\r\n b = await disClient.application_info()\r\n print(b.owner)\r\n a = await disClient.fetch_user(b.owner.id)\r\n #await a.send(\"bot active.\")\r\n\r\n@disClient.event\r\nasync def on_message(message):\r\n print('===MSGBODY===')\r\n print(message.content)\r\n print('====FROM=====')\r\n print(message.author) #very obvious msg spy thingy. you may wana remove this.\r\n print(message.author.id)\r\n print('====GUILD====')\r\n if message.author == disClient.user:\r\n return\r\n \r\n isDM = False\r\n try:\r\n print(message.guild.id) #message.guild.id is None when its a DM or other.\r\n except KeyError:\r\n botSettings[message.guild.id] = {}\r\n botSettings[message.guild.id]['targetChannel'] = None\r\n botSettings[message.guild.id]['commandExt'] = '$$$'\r\n botSettings[message.guild.id]['whitelistedRoles'] = set() #SAVE\r\n writeChanges(botSettings,'dat')\r\n except AttributeError:\r\n print('DM')\r\n isDM = True\r\n print('=============')\r\n scanCommand = False\r\n try:\r\n if(not message.author.guild_permissions.administrator): #mystery error, but sometimes the author is a none type. no idea wtf it is.\r\n compare = set()\r\n for i in message.author.roles: compare.add(i.id)\r\n scanCommand = len(botSettings[message.guild.id]['whitelistedRoles'].intersection(compare))>0\r\n else:\r\n scanCommand = True\r\n except AttributeError:\r\n pass\r\n \r\n isChannel = False\r\n try:\r\n isChannel = message.channel.id == botSettings[message.guild.id]['targetChannel']\r\n except AttributeError:\r\n pass\r\n \r\n if(isDM or isChannel): #target channel\r\n gatheredFiles = []\r\n for i in message.attachments:\r\n print(i.url)\r\n # download file and analyze.\r\n iFile = processFileFromURL(i.url,str(len(gatheredFiles)))\r\n if not extEquals(i.url,iFile.filename):\r\n gatheredFiles.append(iFile)\r\n if(len(gatheredFiles)>0):\r\n await message.channel.send('processed files.',files=gatheredFiles)\r\n \r\n if(not isDM):\r\n if message.content.startswith(botSettings[message.guild.id]['commandExt']+'targetChannelHere') and scanCommand:\r\n botSettings[message.guild.id]['targetChannel'] = message.channel.id #SAVE\r\n writeChanges(botSettings,'dat')\r\n await message.channel.send('channel set to '+ str(message.channel.id))\r\n \r\n elif message.content.startswith(botSettings[message.guild.id]['commandExt']+'targetChannel') and scanCommand:\r\n targ = message.content.split(' ')[1]\r\n didFail = False\r\n try:\r\n botSettings[message.guild.id]['targetChannel'] = int(targ) #SAVE\r\n writeChanges(botSettings,'dat')\r\n except:\r\n didFail = True\r\n if(didFail):\r\n await message.channel.send('not a valid integer')\r\n else:\r\n await message.channel.send('channel set to '+targ)\r\n \r\n elif message.content.startswith(botSettings[message.guild.id]['commandExt']+'setCommandPrefix') and scanCommand:\r\n targ = message.content.split(' ')[1]\r\n didFail = False\r\n try:\r\n botSettings[message.guild.id]['commandExt'] = targ #SAVE\r\n writeChanges(botSettings,'dat')\r\n except:\r\n didFail = True\r\n if(didFail):\r\n await message.channel.send('cant set that ext')\r\n else:\r\n await message.channel.send('cmdExt set to '+targ)\r\n \r\n elif message.content.startswith(botSettings[message.guild.id]['commandExt']+'setWhitelistedRoles') and scanCommand:\r\n targ = message.content.split(' ')[1:]\r\n updatedRoles = set()\r\n didFail = False\r\n try:\r\n for i in targ:\r\n updatedRoles.add(int(i))\r\n except:\r\n didFail = True\r\n if(didFail):\r\n await message.channel.send('not a valid set of role IDs')\r\n else:\r\n botSettings[message.guild.id]['whitelistedRoles'] = updatedRoles #SAVE\r\n writeChanges(botSettings,'dat')\r\n await message.channel.send('updated roles')\r\n\r\nlogToken = None\r\ntry:\r\n logToken = sys.argv[1]\r\nexcept IndexError:\r\n try:\r\n logToken = os.environ['BOT_TOKEN']\r\n except KeyError:\r\n pass\r\n\r\ndisClient.run(logToken, bot=True)\r\n# btw, this is NOT the client secret, ya dumb dumb. goto the bot page not the OAuth page.","repo_name":"Snerfoil/jpgLargeFixerDiscord","sub_path":"discordBot.py","file_name":"discordBot.py","file_ext":"py","file_size_in_byte":6836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43439729944","text":"\"\"\"MCTS Tree Node\"\"\"\nfrom collections import Counter\nfrom copy import deepcopy\nimport math\n\nimport numpy as np\n\nfrom game import action\nfrom mcts.turn import TurnGenerator\n\n\nclass Node(object):\n NODE_ID_COUNTER = 0\n\n def __init__(self, state, parent):\n self.id = Node.NODE_ID_COUNTER\n Node.NODE_ID_COUNTER += 1\n\n self.state = state\n self.parent = parent\n self.children = []\n\n self.visited = 0\n self.reward = 0\n\n self.depth = 0 if parent is None else parent.depth + 1\n\n from mcts.stats import get_instance\n stats = get_instance()\n stats.push_node(self)\n\n def is_terminal(self):\n pass\n\n def is_fully_expanded(self):\n pass\n\n def expand(self):\n pass\n\n def get_best_child(self, coeff):\n pass\n\n def __repr__(self):\n return \"{}(id={}, children={}, visited={}, reward={})\".format(\n self.__class__.__name__,\n self.id,\n [child.id if child else \"None\" for child in self.children],\n self.visited,\n self.reward\n )\n\n\nclass DecisionTurnNode(Node):\n def __init__(self, state, parent, turns):\n #print('Creating DecisionTurnNode!')\n super(DecisionTurnNode, self).__init__(state, parent)\n\n self.turns = turns\n\n def is_terminal(self):\n return (not self.turns and not self.children) or self.state.is_terminal_state() # not self.children\n\n def is_fully_expanded(self):\n return not self.turns\n\n def expand(self):\n chosen_turn = self.turns.pop()\n\n new_child = DrawCardNode(state=chosen_turn.game_state,\n parent=self,\n turn=chosen_turn)\n self.children.append(new_child)\n return new_child.expand()\n\n def get_best_child(self, coeff):\n best_child = None\n best_child_score = -99999\n\n for child in self.children:\n score = child.reward / child.visited + \\\n coeff * math.sqrt(2 * math.log(self.visited)/child.visited)\n\n if score > best_child_score:\n best_child_score = score\n best_child = child\n\n return best_child\n\n\nclass DrawCardNode(Node):\n def __init__(self, state, parent, turn):\n #print('Creating DrawCardNode!')\n super(DrawCardNode, self).__init__(state, parent)\n\n self.turn = turn\n\n self.possible_cards = None\n self.probs = None\n self.children = None\n\n self._get_all_card_draws(state)\n\n def is_terminal(self):\n return False\n\n def is_fully_expanded(self):\n return self.children.count(None) == 0 or (self.probs == [1.0] and self.possible_cards == [None])\n\n def expand(self, idx=None):\n if self.probs == [1.0] and self.possible_cards == [None]: # Is fully expanded!\n return self.children[0]\n\n if idx is None:\n not_expanded_idx = -1\n for idx, elem in enumerate(self.children):\n if elem is None:\n not_expanded_idx = idx\n break\n\n assert not_expanded_idx != -1\n else:\n not_expanded_idx = idx\n\n self.children[not_expanded_idx] = self._create_child(not_expanded_idx)\n\n if isinstance(self.children[not_expanded_idx], DrawCardNode):\n return self.children[not_expanded_idx].expand()\n\n return self.children[not_expanded_idx]\n\n def get_best_child(self, coeff):\n selected_card_idx = np.random.choice(\n len(self.possible_cards),\n p=self.probs\n )\n\n if self.children[selected_card_idx] is None:\n self.expand(selected_card_idx)\n\n return self.children[selected_card_idx]\n\n def _get_all_card_draws(self, game_state):\n player, _ = game_state.get_players()\n\n if player.deck.is_empty():\n # If deck is empty, there is only one possible child,\n # which is created by reducing the players health\n self.possible_cards = [None]\n self.probs = [1.0]\n\n game_state_cpy = deepcopy(game_state)\n player, _ = game_state_cpy.get_players()\n player.deck.no_attempt_pop_when_empty += 1\n player.health -= player.deck.no_attempt_pop_when_empty\n\n turns = TurnGenerator().generate_all_turns(game_state_cpy)\n self.children = [DecisionTurnNode(state=game_state_cpy,\n parent=self,\n turns=turns)]\n return\n\n nb_cards_in_deck = len(player.deck.cards)\n possible_cards = []\n probabilities = []\n\n for card, nb_card in Counter(player.deck.cards).items():\n possible_cards.append(card)\n probabilities.append(nb_card / nb_cards_in_deck)\n\n self.possible_cards = possible_cards\n self.probs = probabilities\n self.children = [None] * len(self.possible_cards)\n\n def _create_child(self, card_idx):\n game_state_cpy = deepcopy(self.state)\n card_name = self.possible_cards[card_idx].name\n\n # Get card from deck\n player, _ = game_state_cpy.get_players()\n card = None\n\n for c in player.deck.cards:\n if c.name == card_name:\n card = c\n break\n\n assert card is not None\n\n # Put into cards (hand)\n player.cards.append(card)\n\n # Remove from deck\n player.deck.cards.remove(card)\n\n # Check if any turns are possible\n turns = TurnGenerator().generate_all_turns(game_state_cpy)\n if not turns:\n game_state_cpy.curr_step += 1\n player, _ = game_state_cpy.get_players()\n action.increment_mana(player)\n player.already_used_mana = 0\n return DrawCardNode(state=game_state_cpy, parent=self, turn=None)\n\n return DecisionTurnNode(state=game_state_cpy, parent=self, turns=turns)\n","repo_name":"pbielak/hearthstone-mcts","sub_path":"mcts/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":6006,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"19223150267","text":"\nimport random\n\n\ndef decimal_a_romano(num_decimal):\n \"\"\"\n Convertir de decimal a numeros romanos.\n \"\"\"\n mapa_numeros_romanos = [(\"M\", 1000), (\"CM\", 900), (\"D\", 500), (\"CD\", 400),\n (\"C\", 100), (\"XC\", 90), (\"L\", 50), (\"XL\", 40),\n (\"X\", 10), (\"IX\", 9), (\"V\", 5), (\"IV\", 4), (\"I\", 1)]\n numero_romano = ''\n while num_decimal > 0:\n for numeros, value in mapa_numeros_romanos:\n while num_decimal >= value:\n numero_romano += numeros\n num_decimal -= value\n return numero_romano\n# llamado de la funcion definida anteriormente\n#numero_romano = decimal_a_romano(num_decimal)\n\ncodigo_morse = {\n 'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.',\n 'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..',\n 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.',\n 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-',\n 'Y': '-.--', 'Z': '--..', '0': '-----', '1': '.----', '2': '..---',\n '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',\n '8': '---..', '9': '----.', '.': '.-.-.-', ',': '--..--', '?': '..--..',\n \"'\": '.----.', '!': '-.-.--', '/': '-..-.', '(': '-.--.', ')': '-.--.-',\n '&': '.-...', ':': '---...', ';': '-.-.-.', '=': '-...-', '+': '.-.-.',\n '-': '-....-', '_': '..--.-', '\"': '.-..-.', '$': '...-..-', '@': '.--.-.',\n ' ': ' '\n}\n\ndef codificar_codigo(text):\n \"\"\"\n Convertir texto a codigo morse.\n \"\"\"\n morse_text = ''\n for char in text:\n morse_char = codigo_morse.get(char.upper(), None)\n if morse_char is not None:\n morse_text += morse_char + ' '\n return morse_text.strip()\n\n\n# codificar texto en morse\n#codificar_texto = codificar_codigo(text)\n\n# Inpresion del codigo morse\n#print(\"codigo morse:\", codificar_texto)\n\n\ndef decimal_a_binario(num_decimal):\n \"\"\"\n Convertir de decimal a binario.\n \"\"\"\n num_binario = bin(num_decimal)\n return num_binario\n\ndef decimal_a_hexa(num_decimal):\n \"\"\"\n Convertir de decimal a hexadecimal.\n \"\"\"\n num_hexa = hex(num_decimal)\n return num_hexa\n\ndef decimal_a_octa(num_decimal):\n \"\"\"\n Convertir de decimal a octal.\n \"\"\"\n num_octal = oct(num_decimal)\n return num_octal\n\n\n# peticion del numero al usuario\nnum_decimal = int(input(\"Ingrese el numero que desea convertir \"))\n\n# variable funcion que se encargara de dar la funcion aleatorea \nfuncion_conversion = random.choice([decimal_a_binario, decimal_a_hexa, decimal_a_octa, decimal_a_romano, codificar_codigo])\n\n\nresultado = funcion_conversion(num_decimal)\n\n# Imprimir resultado\nprint(f\"La {funcion_conversion.__name__.replace('_', ' ')} es la representacion {num_decimal} es: {resultado}\")\n\n","repo_name":"NoheliaHdz/Proyecto_Convertidor","sub_path":"Convertidor_Aleatorio.py","file_name":"Convertidor_Aleatorio.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22664004477","text":"from display import MyScreen\nfrom turtle import Turtle\n\n# Declare variable and CONSTANT\nLEVEL_SCORE = 3\nDISTANCE_FOR_SCORE = 60\nGAME_OVER_IMAGE = \"./image/snake.png\"\nFONT = (\"Courier\", 15, \"bold\")\nALIGN = \"center\"\nfile_score = \"score_file.txt\"\n\n\n# Class Scoreboard\nclass Scoreboard(Turtle):\n \"\"\"\n Class Scoreboard\n Instance:\n score, level, high_score, level_speed\n Method:\n increase_score, increase_level,\n update_high_score, refresh_score\n \"\"\"\n def __init__(self):\n super().__init__()\n self.score = 0\n self.level = 0\n self.high_score = 0\n self.level_speed = 0.1\n self.color(\"#FFFFFF\")\n self.penup()\n self.hideturtle()\n self.goto(0, MyScreen().y_coord - DISTANCE_FOR_SCORE)\n self.read_scoreboard_file()\n self.refresh_score()\n\n# def game_over(self):\n# self.goto(0, 0)\n# MyScreen().this_window.bgpic(GAME_OVER_IMAGE)\n# self.write(f\"GAME OVER\", align=ALIGN, font=FONT)\n\n # Method read high score from file\n def read_scoreboard_file(self):\n \"\"\"# Method read high score from file\"\"\"\n with open(file_score, \"r\") as high_score_file:\n self.high_score = int(high_score_file.read())\n\n def increase_score(self):\n self.score += 1\n self.refresh_score()\n\n def increase_level(self):\n if self.score % LEVEL_SCORE == 0:\n self.level += 1\n self.level_speed *= 0.9\n\n # Method update high score and wtite to file\n def update_high_score(self):\n \"\"\"# Method update high score and wtite to file\"\"\"\n if self.score > self.high_score:\n self.high_score = self.score\n with open(file_score, \"w\") as high_score_file:\n high_score_file.write(str(self.score))\n self.score = 0\n self.level = 0\n self.level_speed = 0.1\n\n def refresh_score(self):\n self.clear()\n self.write(f\"Level: {self.level} Score: {self.score}\\n High Score: {self.high_score}\", align=ALIGN, font=FONT)\n","repo_name":"resole79/snake_game","sub_path":"scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9457422582","text":"import time\n\nfrom road import Road\n\n\ndef step(road: Road, time_increment: float) -> None:\n \"\"\"\n Plays one step of the simulation with the given road and time increment.\n\n :param road: Road object where the vehicles are stored\n :param time_increment: Time increment (dt) for the simulation. For the current model, anything below 0.5s leads to a static solution (no traffic jam)\n :return: Nothing\n :author: Clément Vellu\n\n \"\"\"\n vehicles = road.vehicles\n for index in range(len(vehicles) - 1):\n vehicles[index].update_speed(vehicles[index+1], time_increment)\n\n vehicles[len(vehicles) - 1].update_speed(vehicles[0], time_increment)\n road.update_pos()\n\n\nif __name__ == '__main__':\n\n road_obj = Road(200, 1, 10)\n print(road_obj)\n\n nb_step = 1000\n dt = 0.5 # Due to the model, any time step below 0.5s leads to a static solution (with no more evolution)\n\n for i in range(nb_step):\n step(road_obj, dt)\n time.sleep(0.01)\n print(road_obj)\n\n input(\"Press any key to continue\")\n","repo_name":"Clem103/Projet_info_embouteillage","sub_path":"simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20160999872","text":"from django.urls import path, include\nfrom .views import *\nfrom rest_framework.routers import DefaultRouter\n\nrouter = DefaultRouter()\nrouter.register(\"skill-set\", SkillSetView)\n\n\nurlpatterns = [\n path('', include(router.urls)),\n path('setting', SettingView.as_view()),\n path('login', UserLogin.as_view(), name=\"User_Login\"),\n path('register', Register.as_view(), name=\"User_Register\")\n]\n","repo_name":"adefemi/adefemigreat_ws_backend","sub_path":"user_controller/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22758783982","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision.utils as vutils\n\nENTROPY_THRESHOLD = 20\nN_VALIDATE = 1000\nN_CLASSES = 10\n\nclass Trainer(object):\n\n def __init__(self, model, trainloader, testloader, n_epoch, n_sampling, lr):\n self.model = model\n self.trainloader = trainloader\n self.testloader = testloader\n self.n_epoch = n_epoch\n self.n_sampling = n_sampling\n self.optim = optim.SGD(self.model.parameters(), lr=lr)\n\n def train(self):\n for epoch in range(self.n_epoch):\n self.model.train()\n for i, (images, labels) in enumerate(self.trainloader):\n self.optim.zero_grad()\n preds = self.model(images)\n loss = F.nll_loss(torch.log(preds), labels)\n\n loss.backward()\n\n self.optim.step()\n print(\"Epoch: {} Loss: {}\".format(epoch, loss))\n\n def validate(self):\n self.model.train()\n entropy = torch.zeros(N_VALIDATE)\n preds = torch.zeros(self.n_sampling, N_CLASSES)\n images = []\n for i, (image, labels) in enumerate(self.testloader):\n if (i + 1) > N_VALIDATE:\n break\n for sampling in range(self.n_sampling):\n preds[sampling, :] = self.model(image).squeeze()\n\n pred = preds.mean(dim=0)\n entropy[i] = torch.sum(- pred * torch.log(pred), dim=0)\n images.append(image.squeeze().unsqueeze(0))\n\n images = torch.stack(images)\n _, sort_idx = entropy.sort()\n high_entropy_images = images[sort_idx][:ENTROPY_THRESHOLD]\n vutils.save_image(high_entropy_images, \"high_entropy.jpg\", nrow=4)\n low_entropy_images = images[sort_idx][-ENTROPY_THRESHOLD:]\n vutils.save_image(low_entropy_images, \"low_entropy.jpg\", nrow=4)\n","repo_name":"yamad07/DropoutAsBayes","sub_path":"src/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37860519297","text":"class Solution:\n def decompressRLElist(self, nums: List[int]) -> List[int]:\n a=[]\n i=0\n j=1\n while(i0):\n a.append(nums[j])\n x=x-1\n i=i+2\n j=j+2\n return a\n \n ","repo_name":"AmitIITP23/LeetCode","sub_path":"decompress-run-length-encoded-list/decompress-run-length-encoded-list.py","file_name":"decompress-run-length-encoded-list.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38337208824","text":"#!/usr/bin/env python3\n\n\"\"\" This module is used as client for the control channel of detours. \"\"\"\n\nimport sys\nfrom pydetours.comm import DefaultChannel\n\n\nkeep_going = True\n\n\ndef status(channel):\n header = {'action': 'status'}\n channel.send([header])\n try:\n response = channel.recv(20*1000) # 5 secs form timeout\n except Exception as e:\n print(\"Unknown status: {}\".format(str(e)))\n else:\n response_header = response[0]\n status_dict = response_header['return']\n for elm, status in status_dict.items():\n print(\"### {} --> {}\".format(elm, status))\n\n\ndef terminate(channel):\n header = {'action': 'terminate'}\n channel.send([header])\n try:\n response = channel.recv(5*1000) # 5 secs form timeout\n except Exception as e:\n print(\"Unreached server: {}\".format(str(e)))\n else:\n response_header = response[0]\n print(\"{}\\n\".format(response_header['return']))\n\n\ndef quit(channel):\n print(\"Leaving client...\\n\")\n global keep_going\n keep_going = False\n sys.exit(0)\n\n\ndef unkown_command():\n print(\"Unknown command. Valid commands are: %s\\n\".format(commands.keys))\n\n\ncommands = {'status': status,\n 'terminate': terminate,\n 'quit': quit\n }\n\n\ndef start_service(endpoint):\n control = DefaultChannel(endpoint)\n control.connect()\n\n global keep_going\n while keep_going:\n command = input('> ')\n command_ftn = commands.get(command, unkown_command)\n command_ftn(control)\n print()\n\n\nif __name__ == '__main__':\n try:\n endpoint = sys.argv[1]\n except Exception:\n print(\"Usage: control_service \")\n sys.exit(2)\n else:\n\n start_service(endpoint)\n","repo_name":"michelav/cloud-detours","sub_path":"pydetours/control_service.py","file_name":"control_service.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"2335760561","text":"import telebot\nimport re\nfrom prod_settings import *\n\nbot = telebot.TeleBot(TGBOT_TOKEN)\n\nbot_keyboard = telebot.types.ReplyKeyboardMarkup(True, True)\nbot_keyboard.row('Информация о сайте', 'Связь с администрацией')\n\nHAY = re.compile(r'^(.*?)(how are you|как дела)(.*?)$') # Регулярка для ответа\n\n\n@bot.message_handler(commands=['start'])\ndef start_message(message):\n bot.send_message(message.chat.id, 'Привет, Вы запустили бота сайта PostBlog, что бы запустить меню: введите /menu')\n\n\n@bot.message_handler(commands=['menu'])\ndef show_menu(message):\n bot.send_message(message.chat.id, 'Меню', reply_markup=bot_keyboard)\n\n\n@bot.message_handler(commands=['commands'])\ndef show_command(message):\n bot.send_message(message.chat.id, '/start\\n/menu')\n\n\n@bot.message_handler(content_types=['text'])\ndef answer_on_message(message):\n if HAY.search(message.text.lower()):\n bot.send_message(message.chat.id, 'Great')\n elif message.text == 'Информация о сайте':\n bot.send_message(message.chat.id, 'Сайт BlogPost создан с применением Python и фреймворка Django. '\n 'Т��к же в создании используются множества других языков и фреймворков. '\n ' Что бы ознакомится с исходным кодом, перейдите на GitHub проекта '\n '- https://github.com/97Dmitry')\n elif message.text == 'Связь с администрацией':\n bot.send_message(message.chat.id, 'Для связи используйте https://t.me/PM_White')\n\n else:\n bot.send_message(message.chat.id, 'Таких слов я не понимаю(')\n bot.send_sticker(message.chat.id, 'CAACAgUAAxkBAAECFAABYFSW6VKZFVl2VOCgZmk_v7MK3AcAAngBAAIeQaUI9QcDyQvOvfMeBA')\n bot.send_message(message.chat.id, 'Вы можете узнать список доступных команд, написав /commands')\n\n\nbot.polling()\n","repo_name":"97Dmitry/my-first-blog","sub_path":"TGBot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39725673840","text":"from vue.abstract_vue import AbstractVue\nfrom service.compte_service import CompteService\nimport conf.properties\nimport socket\nimport threading\nimport pickle\nimport sys\n\nstate = {}\n\n\ndef serverListen(serverSocket):\n while True:\n msg = serverSocket.recv(1024).decode(\"utf-8\")\n if msg == \"/MesDemandesMatch\":\n serverSocket.send(bytes(\".\", \"utf-8\"))\n response = serverSocket.recv(1024).decode(\"utf-8\")\n if response == \"/EnvoyerDonner\":\n serverSocket.send(b\"/readyForData\")\n data = pickle.loads(serverSocket.recv(1024))\n if data == set():\n print(\"Vous n'avez pas de \")\n else:\n print(\"Pending Requests:\")\n for element in data:\n print(element)\n else:\n print(response)\n elif msg == \"/approveRequest\":\n serverSocket.send(bytes(\".\", \"utf-8\"))\n response = serverSocket.recv(1024).decode(\"utf-8\")\n if response == \"/proceed\":\n state[\"inputMessage\"] = False\n print(\"Veuillez entrer le nom pour liker: \")\n with state[\"inputCondition\"]:\n state[\"inputCondition\"].wait()\n state[\"inputMessage\"] = True\n serverSocket.send(bytes(state[\"userInput\"], \"utf-8\"))\n print(serverSocket.recv(1024).decode(\"utf-8\"))\n else:\n print(response)\n\n elif msg == \"/choisirInterlocuteur\":\n serverSocket.send(bytes(\".\", \"utf-8\"))\n response = serverSocket.recv(1024).decode(\"utf-8\")\n if response == \"/proceed\":\n state[\"inputMessage\"] = False\n print(\"Veuillez ecrire le nom de l'interlocuteur: \")\n with state[\"inputCondition\"]:\n state[\"inputCondition\"].wait()\n state[\"inputMessage\"] = True\n serverSocket.send(bytes(state[\"userInput\"], \"utf-8\"))\n print(serverSocket.recv(1024).decode(\"utf-8\"))\n state[\"inputMessage\"] = True\n else:\n print(response)\n\n elif msg == \"/messageNonLu\":\n serverSocket.send(bytes(\".\", \"utf-8\"))\n response = serverSocket.recv(1024).decode(\"utf-8\")\n if response == \"/proceed\":\n state[\"inputMessage\"] = False\n print(\"Veuillez ecrire le nom de l'interlocuteur: \")\n with state[\"inputCondition\"]:\n state[\"inputCondition\"].wait()\n state[\"inputMessage\"] = True\n serverSocket.send(bytes(state[\"userInput\"], \"utf-8\"))\n print(serverSocket.recv(1024).decode(\"utf-8\"))\n state[\"inputMessage\"] = True\n else:\n print(response)\n\n\n elif msg == \"/disconnect\":\n serverSocket.send(bytes(\".\", \"utf-8\"))\n state[\"alive\"] = False\n break\n elif msg == \"/messageSend\":\n serverSocket.send(bytes(state[\"userInput\"], \"utf-8\"))\n state[\"sendMessageLock\"].release()\n elif msg == \"/allMembers\":\n serverSocket.send(bytes(\".\", \"utf-8\"))\n data = pickle.loads(serverSocket.recv(1024))\n print(\"All Group Members:\")\n for element in data:\n print(element)\n elif msg == \"/onlineMembers\":\n serverSocket.send(bytes(\".\", \"utf-8\"))\n data = pickle.loads(serverSocket.recv(1024))\n print(\"Online Group Members:\")\n for element in data:\n print(element)\n elif msg == \"/changeAdmin\":\n serverSocket.send(bytes(\".\", \"utf-8\"))\n response = serverSocket.recv(1024).decode(\"utf-8\")\n if response == \"/proceed\":\n state[\"inputMessage\"] = False\n print(\"Please enter the username of the new admin: \")\n with state[\"inputCondition\"]:\n state[\"inputCondition\"].wait()\n state[\"inputMessage\"] = True\n serverSocket.send(bytes(state[\"userInput\"], \"utf-8\"))\n print(serverSocket.recv(1024).decode(\"utf-8\"))\n else:\n print(response)\n elif msg == \"/whoAdmin\":\n serverSocket.send(bytes(state[\"groupname\"], \"utf-8\"))\n print(serverSocket.recv(1024).decode(\"utf-8\"))\n elif msg == \"/kickMember\":\n serverSocket.send(bytes(\".\", \"utf-8\"))\n response = serverSocket.recv(1024).decode(\"utf-8\")\n if response == \"/proceed\":\n state[\"inputMessage\"] = False\n print(\"Please enter the username to kick: \")\n with state[\"inputCondition\"]:\n state[\"inputCondition\"].wait()\n state[\"inputMessage\"] = True\n serverSocket.send(bytes(state[\"userInput\"], \"utf-8\"))\n print(serverSocket.recv(1024).decode(\"utf-8\"))\n else:\n print(response)\n elif msg == \"/kicked\":\n state[\"alive\"] = False\n state[\"inputMessage\"] = False\n print(\"Vous avez été retiré\")\n break\n elif msg == \"/fileTransfer\":\n state[\"inputMessage\"] = False\n print(\"Please enter the filename: \")\n with state[\"inputCondition\"]:\n state[\"inputCondition\"].wait()\n state[\"inputMessage\"] = True\n filename = state[\"userInput\"]\n try:\n f = open(filename, 'rb')\n f.close()\n except FileNotFoundError:\n print(\"The requested file does not exist.\")\n serverSocket.send(bytes(\"~error~\", \"utf-8\"))\n continue\n serverSocket.send(bytes(filename, \"utf-8\"))\n serverSocket.recv(1024)\n print(\"Uploading file to server...\")\n with open(filename, 'rb') as f:\n data = f.read()\n dataLen = len(data)\n serverSocket.send(dataLen.to_bytes(4, 'big'))\n serverSocket.send(data)\n print(serverSocket.recv(1024).decode(\"utf-8\"))\n elif msg == \"/receiveFile\":\n print(\"Receiving shared group file...\")\n serverSocket.send(b\"/sendFilename\")\n filename = serverSocket.recv(1024).decode(\"utf-8\")\n serverSocket.send(b\"/sendFile\")\n remaining = int.from_bytes(serverSocket.recv(4), 'big')\n f = open(filename, \"wb\")\n while remaining:\n data = serverSocket.recv(min(remaining, 4096))\n remaining -= len(data)\n f.write(data)\n f.close()\n print(\"Received file saved as\", filename)\n else:\n print(msg)\n\n\ndef userInput(serverSocket):\n while state[\"alive\"]:\n state[\"sendMessageLock\"].acquire()\n state[\"userInput\"] = input()\n state[\"sendMessageLock\"].release()\n with state[\"inputCondition\"]:\n state[\"inputCondition\"].notify()\n if state[\"userInput\"] == \"/1\":\n serverSocket.send(b\"/MesDemandesMatch\")\n elif state[\"userInput\"] == \"/2\":\n serverSocket.send(b\"/approveRequest\")\n elif state[\"userInput\"] == \"/12\":\n serverSocket.send(b\"/choisirInterlocuteur\")\n elif state[\"userInput\"] == \"/13\":\n serverSocket.send(b\"/messageNonLu\")\n elif state[\"userInput\"] == \"/3\":\n serverSocket.send(b\"/disconnect\")\n break\n elif state[\"userInput\"] == \"/4\":\n serverSocket.send(b\"/allMembers\")\n elif state[\"userInput\"] == \"/5\":\n serverSocket.send(b\"/onlineMembers\")\n elif state[\"userInput\"] == \"/6\":\n serverSocket.send(b\"/changeAdmin\")\n elif state[\"userInput\"] == \"/7\":\n serverSocket.send(b\"/whoAdmin\")\n elif state[\"userInput\"] == \"/8\":\n serverSocket.send(b\"/kickMember\")\n elif state[\"userInput\"] == \"/9\":\n serverSocket.send(b\"/fileTransfer\")\n elif state[\"inputMessage\"]:\n state[\"sendMessageLock\"].acquire()\n serverSocket.send(b\"/messageSend\")\n\n\ndef waitServerListen(serverSocket):\n while not state[\"alive\"]:\n msg = serverSocket.recv(1024).decode(\"utf-8\")\n if msg == \"/accepted\":\n state[\"alive\"] = True\n print(\"Your join request has been approved. Press any key to begin chatting.\")\n break\n elif msg == \"/waitDisconnect\":\n state[\"joinDisconnect\"] = True\n break\n\n\ndef waitUserInput(serverSocket):\n while not state[\"alive\"]:\n state[\"userInput\"] = input()\n if state[\"userInput\"] == \"/1\" and not state[\"alive\"]:\n serverSocket.send(b\"/waitDisconnect\")\n break\n\n\ndef main():\n serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n serverSocket.connect((conf.properties.HOST, conf.properties.PORT))\n except socket.error:\n print(\"La tentative de connexion à l'adresse choisie a échoué.\")\n sys.exit()\n state[\"inputCondition\"] = threading.Condition()# on met en veille ce thread\n state[\"sendMessageLock\"] = threading.Lock()# on le bloque jusqu'à la réactivation\n state[\"username\"] = AbstractVue.session.user.pseudo\n # Effectuer un like\n state[\"groupname\"] = input(\"Acceder à la messagerie de ? (Entrez le nom): \")\n\n compte_service = CompteService()\n if compte_service.pseudo_disponible(state[\"groupname\"]):\n\n print('L\\'utilisateur {} n\\'existe pas, merci d\\'en choisir un autre ;) '.format(state[\"groupname\"]))\n\n else:\n print(\"L\\'utilisateur existe\")\n\n\n\n\n state[\"alive\"] = False\n state[\"joinDisconnect\"] = False\n state[\"inputMessage\"] = True\n serverSocket.send(bytes(state[\"username\"], \"utf-8\"))\n serverSocket.recv(1024)\n serverSocket.send(bytes(state[\"groupname\"], \"utf-8\"))\n response = serverSocket.recv(1024).decode(\"utf-8\")\n if response == \"/adminReady\":\n print(\"Vous êtes dans vontre messagerie \", state[\"groupname\"], \"vous en êtes l'administrateur.\")\n state[\"alive\"] = True\n elif response == \"/ready\":\n print(\"Vous avez rejoins la messagerie de \", state[\"groupname\"])\n state[\"alive\"] = True\n elif response == \"/wait\":\n print(\" Votre like a été envoyé avec succès.\")\n print(\"Pour vous deconnecter , tapez: /1 \")\n waitUserInputThread = threading.Thread(target=waitUserInput, args=(serverSocket,))\n waitServerListenThread = threading.Thread(target=waitServerListen, args=(serverSocket,))\n userInputThread = threading.Thread(target=userInput, args=(serverSocket,))\n serverListenThread = threading.Thread(target=serverListen, args=(serverSocket,))\n waitUserInputThread.start()\n waitServerListenThread.start()\n while True:\n if state[\"alive\"] or state[\"joinDisconnect\"]:\n break\n if state[\"alive\"]:\n print(\"Menu Messagerie:\\n/1 -> Voir mes likes (Admins)\\n/2 -> Accepter like (Admin)\\n/3 -> Se deconnecter\\n/4 -> Voir mes correspondants\\n/5 -> Qui est en ligne ?\\n/6 -> Proposez un rdv(à faire) \\n/7 -> Chatbot(à faire) \\n/8 -> Bloquer un utilisateur\\n/9 -> Envoyer un fichier \\n \\n Ecrivez pour envoyer de message\")\n waitUserInputThread.join()\n waitServerListenThread.join()\n userInputThread.start()\n serverListenThread.start()\n while True:\n if state[\"joinDisconnect\"]:\n serverSocket.shutdown(socket.SHUT_RDWR)\n serverSocket.close()\n waitUserInputThread.join()\n waitServerListenThread.join()\n print(\"Deconnecté de l'application.\")\n break\n elif not state[\"alive\"]:\n serverSocket.shutdown(socket.SHUT_RDWR)\n serverSocket.close()\n userInputThread.join()\n serverListenThread.join()\n print(\"Deconnecté de l'application.\")\n break\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Carloss1998-lab/Application_de-_rencontre2A","sub_path":"Messagerie/codebrut.py","file_name":"codebrut.py","file_ext":"py","file_size_in_byte":12058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69964115690","text":"import numpy as np\nimport torch\nimport torchvision.transforms as T\n\nfrom fed_distill.cifar10.constants import CIFAR10_MEAN, CIFAR10_STD\n\n\ndef prepare_to_visualize(img: torch.Tensor) -> np.ndarray:\n \"\"\"\n Given an image generated from deep inversion (cifar10),\n perform the necessary operations in order to visualize it.\n In particular:\n . The image need to be denormalized (inverse of whitened) according the cifar10 mean and std\n . It is brought back into the [0..1] range\n . Axis 0 and 2 are transposed \n . The image is transformed into a numpy array\n\n Args:\n img (torch.Tensor): tensor representing an image (shape: (3, 32, 32))\n\n Returns:\n np.ndarray: prepared image (shape: (32, 32, 3))\n \"\"\"\n mean, std = np.array(CIFAR10_MEAN), np.array(CIFAR10_STD)\n denormalize = T.Normalize(tuple(-mean / std), tuple(1.0 / std))\n transform = T.Compose(\n [\n denormalize,\n lambda x: (x - x.min()) / (x.max() - x.min()),\n lambda x: x.cpu().numpy().transpose(1, 2, 0),\n ]\n )\n return transform(img)\n","repo_name":"Ahmedjjj/Fed_distill","sub_path":"fed_distill/cifar10/viz.py","file_name":"viz.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"34380738056","text":"import logging\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass, field\nfrom typing import Optional\n\nfrom overrides import overrides\n\nfrom kernel.tools.fs import Dir\nfrom kernel.tools.paths import resolve_home\nfrom kernel.tools.rng import get_new_uuid\n\nLOG = logging.getLogger(__name__)\n\n\nclass FolderNameFactory(ABC):\n \"\"\"Base class for factories that generate random names for directories.\n \"\"\"\n\n @abstractmethod\n def new_name(self) -> str:\n \"\"\"Creates a new random name for a directory. Calling this multiple\n times will never produce the same name.\n\n :return: Not a path, but just the directory's name.\n \"\"\"\n raise NotImplementedError\n\n\nclass UUIDFactory(FolderNameFactory):\n \"\"\"Factory that generates directories by naming them after UUIDs.\n \"\"\"\n\n @overrides\n def new_name(self) -> str:\n return get_new_uuid()\n\n\nclass WorkspaceFactory:\n \"\"\"Factory that eases the creation of directories where SCM operations,\n such as cloning, can be performed in.\n \"\"\"\n DEFAULT_ROOT = Dir('~/.cibyl', resolve_home)\n \"\"\"Default location where the factory will 'mkdir' in.\"\"\"\n\n @dataclass\n class Tools:\n \"\"\"Tools this uses to perform its task.\n \"\"\"\n folders: FolderNameFactory = field(\n default_factory=lambda *_: UUIDFactory()\n )\n \"\"\"Generates names for the workspaces built by this.\"\"\"\n\n def __init__(\n self,\n root: Optional[Dir] = None,\n tools: Optional[Tools] = None\n ):\n \"\"\"Constructor.\n\n :param root: Directory where all built workspaces will hang from.\n This will never create files on the root, just directories. 'None'\n to go with the default one.\n :param tools: Tools this uses to perform its task. 'None' to let\n this build its own.\n \"\"\"\n if root is None:\n root = WorkspaceFactory.DEFAULT_ROOT\n\n LOG.debug(\n \"Root for workspaces not provided, defaulted to: '%s'.\",\n root\n )\n\n if tools is None:\n tools = WorkspaceFactory.Tools()\n\n self._root = root\n self._tools = tools\n\n @property\n def root(self) -> Dir:\n \"\"\"\n :return: Directory where all built workspaces will hang from.\n \"\"\"\n return self._root\n\n @property\n def tools(self) -> Tools:\n \"\"\"\n :return: Tools this uses to do its task.\n \"\"\"\n return self._tools\n\n def new_workspace(self) -> Dir:\n \"\"\"Creates a new, empty workspace without conditionals. Two\n workspaces created by this will never have the same name,\n so no need to worry whether the workspace has something inside or not.\n\n :return: Path to the created workspace.\n \"\"\"\n workspace = self.root.cd(path=self.tools.folders.new_name())\n workspace.mkdir(recursive=True)\n\n LOG.debug(\"Created new workspace at: '%s'.\", workspace)\n\n return workspace\n","repo_name":"RedHatCRE/cibyl","sub_path":"kernel/scm/tools/fs.py","file_name":"fs.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"5652492902","text":"\"\"\"\nA module containing different learning rules for use with the SGD training\nalgorithm.\n\"\"\"\nimport numpy as np\nimport warnings\n\nfrom theano.compat import six\nfrom theano import config\nfrom theano import tensor as T\n\nfrom pylearn2.compat import OrderedDict\nfrom pylearn2.space import NullSpace\nfrom pylearn2.train_extensions import TrainExtension\nfrom pylearn2.utils import sharedX\nfrom pylearn2.utils import wraps\nfrom pylearn2.monitor import Monitor\n\n\nclass LearningRule():\n \"\"\"\n A pylearn2 learning rule is an object which computes new parameter values\n given (1) a learning rate (2) current parameter values and (3) the current\n estimated gradient.\n \"\"\"\n\n def add_channels_to_monitor(self, monitor, monitoring_dataset):\n \"\"\"\n Method called by the training algorithm, which allows LearningRules to\n add monitoring channels.\n\n Parameters\n ----------\n monitor : pylearn2.monitor.Monitor\n Monitor object, to which the rule should register additional\n monitoring channels.\n monitoring_dataset : pylearn2.datasets.dataset.Dataset or dict\n Dataset instance or dictionary whose values are Dataset objects.\n \"\"\"\n pass\n\n def get_updates(self, learning_rate, grads, lr_scalers=None):\n \"\"\"\n Provides the symbolic (theano) description of the updates needed to\n perform this learning rule.\n\n Parameters\n ----------\n learning_rate : float\n Learning rate coefficient.\n grads : dict\n A dictionary mapping from the model's parameters to their\n gradients.\n lr_scalers : dict\n A dictionary mapping from the model's parameters to a learning\n rate multiplier.\n\n Returns\n -------\n updates : OrderdDict\n A dictionary mapping from the old model parameters, to their new\n values after a single iteration of the learning rule.\n\n Notes\n -----\n e.g. for standard SGD, one would return `sgd_rule_updates` defined\n below. Note that such a `LearningRule` object is not implemented, as\n these updates are implemented by default when the `learning_rule`\n parameter of sgd.SGD.__init__ is None.\n\n .. code-block:: python\n\n sgd_rule_updates = OrderedDict()\n for (param, grad) in grads.iteritems():\n sgd_rule_updates[k] = (param - learning_rate *\n lr_scalers.get(param, 1.) * grad)\n \"\"\"\n raise NotImplementedError(str(type(self)) + \" does not implement \"\n \"get_updates.\")\n\n\nclass Momentum(LearningRule):\n \"\"\"\n Implements momentum as described in Section 9 of\n \"A Practical Guide to Training Restricted Boltzmann Machines\",\n Geoffrey Hinton.\n\n Parameters are updated by the formula:\n inc := momentum * inc - learning_rate * d cost / d param\n param := param + inc\n\n Parameters\n ----------\n init_momentum : float\n Initial value for the momentum coefficient. It remains fixed during\n training unless used with a `MomentumAdjustor`\n extension.\n nesterov_momentum: bool\n Use the accelerated momentum technique described in:\n \"Advances in Optimizing Recurrent Networks\", Yoshua Bengio, et al.\n\n \"\"\"\n\n def __init__(self, init_momentum, nesterov_momentum=False):\n assert init_momentum >= 0.\n assert init_momentum < 1.\n self.momentum = sharedX(init_momentum, 'momentum')\n self.nesterov_momentum = nesterov_momentum\n\n def add_channels_to_monitor(self, monitor, monitoring_dataset):\n \"\"\"\n Activates monitoring of the momentum.\n\n Parameters\n ----------\n monitor : pylearn2.monitor.Monitor\n Monitor object, to which the rule should register additional\n monitoring channels.\n monitoring_dataset : pylearn2.datasets.dataset.Dataset or dict\n Dataset instance or dictionary whose values are Dataset objects.\n \"\"\"\n monitor.add_channel(\n name='momentum',\n ipt=None,\n val=self.momentum,\n data_specs=(NullSpace(), ''),\n dataset=monitoring_dataset)\n\n def get_updates(self, learning_rate, grads, lr_scalers=None):\n \"\"\"\n Provides the updates for learning with gradient descent + momentum.\n\n Parameters\n ----------\n learning_rate : float\n Learning rate coefficient.\n grads : dict\n A dictionary mapping from the model's parameters to their\n gradients.\n lr_scalers : dict\n A dictionary mapping from the model's parameters to a learning\n rate multiplier.\n \"\"\"\n\n updates = OrderedDict()\n\n for (param, grad) in six.iteritems(grads):\n vel = sharedX(param.get_value() * 0.)\n assert param.dtype == vel.dtype\n assert grad.dtype == param.dtype\n if param.name is not None:\n vel.name = 'vel_' + param.name\n\n scaled_lr = learning_rate * lr_scalers.get(param, 1.)\n updates[vel] = self.momentum * vel - scaled_lr * grad\n\n inc = updates[vel]\n if self.nesterov_momentum:\n inc = self.momentum * inc - scaled_lr * grad\n\n assert inc.dtype == vel.dtype\n updates[param] = param + inc\n\n return updates\n\n\nclass MomentumAdjustor(TrainExtension):\n \"\"\"\n A TrainExtension that implements a linear momentum schedule.\n\n Parameters\n ----------\n final_momentum : float\n The momentum coefficient to use at the end of learning.\n start : int\n The epoch on which to start growing the momentum coefficient.\n saturate : int\n The epoch on which the moment should reach its final value.\n \"\"\"\n def __init__(self, final_momentum, start, saturate):\n if saturate < start:\n raise TypeError(\"Momentum can't saturate at its maximum value \" +\n \"before it starts increasing.\")\n\n self.__dict__.update(locals())\n del self.self\n self._initialized = False\n self._count = 0\n\n def setup(self, model, dataset, algorithm):\n \"\"\"\n Initializes the momentum schedule based on epochs_seen.\n\n Parameters\n ----------\n model : pylearn2.models.Model\n The model to which the training algorithm is applied.\n dataset : pylearn2.datasets.Dataset\n The dataset to which the model is applied.\n algorithm : pylearn2.training_algorithms.TrainingAlgorithm\n Describes how gradients should be updated.\n \"\"\"\n monitor = Monitor.get_monitor(model)\n self._count = monitor.get_epochs_seen()\n self._apply_momentum(algorithm)\n\n def on_monitor(self, model, dataset, algorithm):\n \"\"\"\n Updates the momentum according to the linear schedule.\n\n Parameters\n ----------\n model : pylearn2.models.Model\n The model to which the training algorithm is applied.\n dataset : pylearn2.datasets.Dataset\n The dataset to which the model is applied.\n algorithm : pylearn2.training_algorithms.TrainingAlgorithm\n Describes how gradients should be updated.\n \"\"\"\n self._count += 1\n self._apply_momentum(algorithm)\n\n def _apply_momentum(self, algorithm):\n \"\"\"Updates the momentum on algorithm based on the epochs elapsed.\"\"\"\n if not hasattr(algorithm, 'learning_rule'):\n raise ValueError(\n 'For MomentumAdjustor to work, you need to use a '\n 'TrainingAlgorithm that supports learning rules '\n '(for instance, SGD), and specify a learning_rule '\n '(for instance, Momentum) for that training algorithm.')\n\n momentum = algorithm.learning_rule.momentum\n\n if not self._initialized:\n self._init_momentum = momentum.get_value()\n self._initialized = True\n\n momentum.set_value(np.cast[config.floatX](self.current_momentum()))\n\n def current_momentum(self):\n \"\"\"Returns the momentum currently desired by the schedule.\"\"\"\n w = self.saturate - self.start\n\n if w == 0:\n # saturate=start, so just jump straight to final momentum\n if self._count >= self.start:\n return self.final_momentum\n return self._init_momentum\n\n alpha = float(self._count - self.start) / float(w)\n if alpha < 0.:\n alpha = 0.\n if alpha > 1.:\n alpha = 1.\n return self._init_momentum * (1 - alpha) + alpha * self.final_momentum\n\n\nclass AdaDelta(LearningRule):\n \"\"\"\n Implements the AdaDelta learning rule as described in:\n \"AdaDelta: An Adaptive Learning Rate Method\", Matthew D. Zeiler.\n\n Parameters\n ----------\n decay : float, optional\n Decay rate :math:`\\\\rho` in Algorithm 1 of the aforementioned\n paper.\n \"\"\"\n\n def __init__(self, decay=0.95):\n assert decay >= 0.\n assert decay < 1.\n self.decay = decay\n\n def get_updates(self, learning_rate, grads, lr_scalers=None):\n \"\"\"\n Compute the AdaDelta updates\n\n Parameters\n ----------\n learning_rate : float\n Learning rate coefficient.\n grads : dict\n A dictionary mapping from the model's parameters to their\n gradients.\n lr_scalers : dict\n A dictionary mapping from the model's parameters to a learning\n rate multiplier.\n \"\"\"\n updates = OrderedDict()\n for param in grads.keys():\n\n # mean_squared_grad := E[g^2]_{t-1}\n mean_square_grad = sharedX(param.get_value() * 0.)\n # mean_square_dx := E[(\\Delta x)^2]_{t-1}\n mean_square_dx = sharedX(param.get_value() * 0.)\n\n if param.name is not None:\n mean_square_grad.name = 'mean_square_grad_' + param.name\n mean_square_dx.name = 'mean_square_dx_' + param.name\n\n # Accumulate gradient\n new_mean_squared_grad = (\n self.decay * mean_square_grad +\n (1 - self.decay) * T.sqr(grads[param])\n )\n\n # Compute update\n epsilon = lr_scalers.get(param, 1.) * learning_rate\n rms_dx_tm1 = T.sqrt(mean_square_dx + epsilon)\n rms_grad_t = T.sqrt(new_mean_squared_grad + epsilon)\n delta_x_t = - rms_dx_tm1 / rms_grad_t * grads[param]\n\n # Accumulate updates\n new_mean_square_dx = (\n self.decay * mean_square_dx +\n (1 - self.decay) * T.sqr(delta_x_t)\n )\n\n # Apply update\n updates[mean_square_grad] = new_mean_squared_grad\n updates[mean_square_dx] = new_mean_square_dx\n updates[param] = param + delta_x_t\n\n return updates\n\n\nclass AdaGrad(LearningRule):\n \"\"\"\n Implements the AdaGrad learning rule as described in:\n \"Adaptive subgradient methods for online learning and\n stochastic optimization\", Duchi J, Hazan E, Singer Y.\n\n Parameters\n ----------\n max_scaling: float, optional\n Restrict the gradient scaling coefficient to values\n below `max_scaling`. This prevents corner cases (like all-zero weights)\n to generate NaNs (see #1496).\n \"\"\"\n def __init__(self, max_scaling=1e5):\n assert max_scaling > 0\n self.eps = 1. / max_scaling\n\n def get_updates(self, learning_rate, grads, lr_scalers=None):\n \"\"\"\n Compute the AdaGrad updates\n\n Parameters\n ----------\n learning_rate : float\n Learning rate coefficient.\n grads : dict\n A dictionary mapping from the model's parameters to their\n gradients.\n lr_scalers : dict\n A dictionary mapping from the model's parameters to a learning\n rate multiplier.\n \"\"\"\n updates = OrderedDict()\n for param in grads.keys():\n\n # sum_square_grad := \\sum g^2\n sum_square_grad = sharedX(param.get_value() * 0.)\n\n if param.name is not None:\n sum_square_grad.name = 'sum_square_grad_' + param.name\n\n # Accumulate gradient\n new_sum_squared_grad = (\n sum_square_grad + T.sqr(grads[param])\n )\n\n # Compute update\n epsilon = lr_scalers.get(param, 1.) * learning_rate\n scale = T.maximum(self.eps, T.sqrt(new_sum_squared_grad))\n delta_x_t = (-epsilon / scale * grads[param])\n\n # Apply update\n updates[sum_square_grad] = new_sum_squared_grad\n updates[param] = param + delta_x_t\n\n return updates\n\n\nclass RMSProp(LearningRule):\n \"\"\"\n Implements the RMSProp learning rule.\n\n The RMSProp learning rule is described by Hinton in `lecture 6\n `\n of the Coursera Neural Networks for Machine Learning course.\n\n In short, Hinton suggests \"[the] magnitude of the gradient can be very\n different for different weights and can change during learning. This\n makes it hard to choose a global learning rate.\" RMSProp solves this\n problem by \"[dividing] the learning rate for a weight by a running\n average of the magnitudes of recent gradients for that weight.\"\n\n\n Parameters\n ----------\n decay : float, optional\n Decay constant similar to that used in AdaDelta and Momentum methods.\n max_scaling: float, optional\n Restrict the RMSProp gradient scaling coefficient to values\n below `max_scaling`.\n\n Notes\n -----\n An instance of this LearningRule should only be used with one\n TrainingAlgorithm, and its get_updates method should be called\n only once. This is required in order to make the monitoring\n channels correctly report the moving averages.\n \"\"\"\n\n def __init__(self, decay=0.9, max_scaling=1e5):\n assert 0. <= decay < 1.\n assert max_scaling > 0\n self.decay = sharedX(decay, 'decay')\n self.epsilon = 1. / max_scaling\n self.mean_square_grads = OrderedDict()\n\n @wraps(LearningRule.add_channels_to_monitor)\n def add_channels_to_monitor(self, monitor, monitoring_dataset):\n \"\"\"\n The channels added are the min, mean, and max of the\n mean_square_grad of each parameter.\n \"\"\"\n\n channel_mapping = {\n '_min': T.min,\n '_max': T.max,\n '_mean': T.mean\n }\n\n for mean_square_grad in self.mean_square_grads.values():\n for suffix, op in channel_mapping.items():\n monitor.add_channel(\n name=(mean_square_grad.name + suffix),\n ipt=None,\n val=op(mean_square_grad),\n data_specs=(NullSpace(), ''),\n dataset=monitoring_dataset)\n return\n\n def get_updates(self, learning_rate, grads, lr_scalers=None):\n \"\"\"\n Provides the symbolic (theano) description of the updates needed to\n perform this learning rule. See Notes for side-effects.\n\n Parameters\n ----------\n learning_rate : float\n Learning rate coefficient.\n grads : dict\n A dictionary mapping from the model's parameters to their\n gradients.\n lr_scalers : dict\n A dictionary mapping from the model's parameters to a learning\n rate multiplier.\n\n Returns\n -------\n updates : OrderdDict\n A dictionary mapping from the old model parameters, to their new\n values after a single iteration of the learning rule.\n\n Notes\n -----\n This method has the side effect of storing the moving average\n of the square gradient in `self.mean_square_grads`. This is\n necessary in order for the monitoring channels to be able\n to track the value of these moving averages.\n Therefore, this method should only get called once for each\n instance of RMSProp.\n \"\"\"\n\n updates = OrderedDict()\n for param in grads:\n\n # mean_squared_grad := E[g^2]_{t-1}\n mean_square_grad = sharedX(param.get_value() * 0.)\n\n if param.name is None:\n raise ValueError(\"Model parameters must be named.\")\n mean_square_grad.name = 'mean_square_grad_' + param.name\n\n if param.name in self.mean_square_grads:\n warnings.warn(\"Calling get_updates more than once on the \"\n \"gradients of `%s` may make monitored values \"\n \"incorrect.\" % param.name)\n # Store variable in self.mean_square_grads for monitoring.\n self.mean_square_grads[param.name] = mean_square_grad\n\n # Accumulate gradient\n new_mean_squared_grad = (self.decay * mean_square_grad +\n (1 - self.decay) * T.sqr(grads[param]))\n\n # Compute update\n scaled_lr = lr_scalers.get(param, 1.) * learning_rate\n rms_grad_t = T.sqrt(new_mean_squared_grad)\n rms_grad_t = T.maximum(rms_grad_t, self.epsilon)\n delta_x_t = - scaled_lr * grads[param] / rms_grad_t\n\n # Apply update\n updates[mean_square_grad] = new_mean_squared_grad\n updates[param] = param + delta_x_t\n\n return updates\n","repo_name":"lisa-lab/pylearn2","sub_path":"pylearn2/training_algorithms/learning_rule.py","file_name":"learning_rule.py","file_ext":"py","file_size_in_byte":17648,"program_lang":"python","lang":"en","doc_type":"code","stars":2743,"dataset":"github-code","pt":"53"} +{"seq_id":"5863577306","text":"\"\"\"Profiling and testing (through random actions) of the model\nimplementation\"\"\"\nimport time\nimport numpy as np\nfrom pycallgraph import PyCallGraph, Config, GlobbingFilter\nfrom pycallgraph.output import GraphvizOutput\nfrom generator import DefaultGenerator, get_random_action\n\n\ndef main(rand, pcg):\n \"\"\"Keep randomly exploring embeddings\"\"\"\n while True:\n action_list = []\n before = time.time()\n embedding = DefaultGenerator().random_embedding(rand)\n while True:\n action = get_random_action(embedding, rand=rand)\n if action is None:\n break\n action_list.append(action)\n pcg.start(reset=False)\n embedding.take_action(*action)\n pcg.stop()\n elapsed_ms = round((time.time() - before) * 1000, 2)\n if elapsed_ms > 10000:\n pcg.done()\n if action is None:\n break\n actions = len(action_list)\n per_action = round(elapsed_ms / actions, 2)\n print(f\"{elapsed_ms}ms ({actions}, {per_action}ms)\")\n # import sys\n # sys.exit(1)\n\n\ndef profile(rand=np.random):\n \"\"\"Profile the embedding\"\"\"\n config = Config()\n config.trace_filter = GlobbingFilter(exclude=[\"pycallgraph.*\"])\n graphviz = GraphvizOutput(output_file=f\"pc.png\")\n pcg = PyCallGraph(output=graphviz, config=config)\n main(rand, pcg)\n\n\nif __name__ == \"__main__\":\n profile(rand=np.random.RandomState(42))\n","repo_name":"timokau/wsn-embedding-rl","sub_path":"profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18288429002","text":"import os\nimport sys\n\nsys.path.append(\n os.path.normpath(\n os.path.join(os.path.abspath(__file__), \"..\", \"..\", \"..\", \"common\")\n )\n)\nfrom env_indigo import *\n\nindigo = Indigo()\n\nprint(\"*** Rxn to CML ***\")\n\nrxn = indigo.loadReactionFromFile(\n joinPathPy(\"molecules/reaction_for_cml.rxn\", __file__)\n)\nprint(rxn.cml())\n","repo_name":"epam/Indigo","sub_path":"api/tests/integration/tests/formats/rxn_to_cml.py","file_name":"rxn_to_cml.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":257,"dataset":"github-code","pt":"53"} +{"seq_id":"31076853046","text":"import logging\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom decimal import Decimal\n\nimport celery\nfrom django.conf import settings\nfrom django.utils.timezone import now as utcnow\n\nfrom fleio.billing.client_operations_summary import ClientOperationsSummary\nfrom fleio.billing.estimated_usage import EstimatedUsage\nfrom fleio.billing.models import Service\nfrom fleio.billing.models import TaxRule\nfrom fleio.billing.models.calcelation_request import CancellationTypes\nfrom fleio.billing.models.service import ServiceStatus\nfrom fleio.billing.modules.factory import module_factory\nfrom fleio.billing.services import tasks as service_tasks\nfrom fleio.billing.settings import ServiceSuspendType\nfrom fleio.billing.settlement_manager import SettlementManager\nfrom fleio.billing.usage_settings import UsageSettings\nfrom fleio.billing.utils import cdecimal\nfrom fleio.core.models import Client\nfrom fleio.core.models import ClientStatus\nfrom fleio.notifications import notifier\nfrom fleio.notifications.models import Notification\nfrom fleio.notifications.utils import reset_current_notification\nfrom fleio.reseller.utils import reseller_suspend_instead_of_terminate\n\nLOG = logging.getLogger(__name__)\n\n\nclass ClientUsage(object):\n def __init__(self, unpaid_usage: Decimal = Decimal(0), estimated_usage: EstimatedUsage = None):\n self.unpaid_usage = unpaid_usage\n self.estimated_usage = estimated_usage if estimated_usage is not None else EstimatedUsage()\n\n def get_remaining_hours(self, available_credit: Decimal, reference_datetime: datetime):\n return self.estimated_usage.get_hours_left(available_credit, reference_datetime=reference_datetime)\n\n\nclass ClientOperations(object):\n def __init__(self, client: Client, reference_datetime: datetime = None):\n self.client = client\n self.billing_settings = self.client.billing_settings\n self.has_billing_agreement = client.has_billing_agreement\n if self.has_billing_agreement:\n credit_limit = self.billing_settings.credit_limit_with_agreement\n if self.client.billing_settings.credit_notifications_enabled:\n self.credit_notifications_enabled = self.billing_settings.credit_notifications_when_agreement_enabled\n else:\n self.credit_notifications_enabled = False\n else:\n credit_limit = self.client.billing_settings.credit_limit\n self.credit_notifications_enabled = self.billing_settings.credit_notifications_enabled\n self.client_credit_limit = credit_limit\n self.reference_datetime = reference_datetime if reference_datetime is not None else utcnow()\n\n self.__client_usage = None\n self.summary = ClientOperationsSummary(self.client, self.client.get_uptodate_credit())\n\n def __compute_client_usage(self) -> ClientUsage:\n # compute total unpaid usage for all services associated with the client\n LOG.debug('Computing total usage for client {}'.format(self.client))\n total_unpaid_usage = Decimal(0)\n total_estimated_usage = EstimatedUsage()\n usage_settings = UsageSettings(billing_settings=self.billing_settings)\n for service in self.client.services.all():\n billing_module = module_factory.get_module_instance(service=service)\n\n # get unpaid usage from billing module\n unpaid_usage = billing_module.get_unpaid_usage(service)\n total_unpaid_usage += unpaid_usage.total_cost\n\n if service.status == ServiceStatus.active:\n # get unpaid usage from service\n if service.next_due_date is not None and service.next_due_date < self.reference_datetime:\n total_unpaid_usage += service.get_fixed_price()\n\n # see if we need to get estimated usage from billing module\n if not service.is_price_overridden:\n # get estimated usage from billing module\n estimated_usage = billing_module.get_estimated_usage(service, usage_settings=usage_settings)\n total_estimated_usage += estimated_usage\n\n # add service static price if needed\n if service.get_fixed_price() > 0:\n service_fixed_usage = EstimatedUsage.create_for_fixed_price(\n fixed_price=service.get_fixed_price(),\n cycle_end_date=service.next_due_date,\n get_next_end_date=lambda previous_end_date, s=service: s.get_next_due_date(previous_end_date),\n usage_settings=usage_settings\n )\n\n total_estimated_usage += service_fixed_usage\n\n return ClientUsage(total_unpaid_usage, total_estimated_usage)\n\n def update_usage(self, skip_collecting: bool = False, skip_compute_current: bool = False):\n if not skip_collecting:\n usage_settings = UsageSettings(billing_settings=self.billing_settings)\n\n # compute total unpaid usage for all services associated with the client\n LOG.debug('Updating usage for client {}'.format(self.client))\n for service in self.client.services.all():\n billing_module = module_factory.get_module_instance(service=service)\n billing_module.collect_usage(service=service, usage_settings=usage_settings)\n\n if not skip_compute_current:\n # compute current client usage\n self.__client_usage = self.__compute_client_usage()\n\n # update uptodate credit for client\n uptodate_credit = cdecimal(\n self.client.get_remaining_credit(self.client_usage.unpaid_usage, self.client.currency.code)\n )\n self.client.set_uptodate_credit(uptodate_credit=uptodate_credit)\n\n self.update_outofcredit_status()\n\n # log to summary\n self.summary.update_uptodate_credit(self.uptodate_credit)\n\n def reset_usage(self):\n # call reset usage for all services\n for service in self.client.services.all():\n billing_module = module_factory.get_module_instance(service=service)\n billing_module.reset_usage(service=service)\n\n # recalculate usage after reset\n self.update_usage()\n\n @property\n def client_usage(self) -> ClientUsage:\n if self.__client_usage is None:\n self.update_usage(skip_collecting=True)\n\n return self.__client_usage\n\n @client_usage.setter\n def client_usage(self, value: ClientUsage):\n self.__client_usage = value\n\n @property\n def uptodate_credit(self):\n if not self.client.has_uptodate_credit:\n self.update_usage(skip_collecting=True)\n\n return self.client.get_uptodate_credit()\n\n def get_add_credit_url(self):\n \"\"\"\n :return: URL where customers can add credit to their accounts\n \"\"\"\n if not settings.ADD_CREDIT_URLS:\n if self.client.reseller_resources:\n return self.client.reseller_resources.enduser_panel_url\n else:\n # if it's empty setting\n return settings.FRONTEND_URL\n\n if len(settings.ADD_CREDIT_URLS) == 1 or self.client.groups.all().count() <= 1:\n # just one item in dictionary, return it\n return list(settings.ADD_CREDIT_URLS.values())[0]\n\n group = self.client.groups.all()[0]\n if group.name in settings.ADD_CREDIT_URLS:\n return settings.ADD_CREDIT_URLS[group.name]\n\n return list(settings.ADD_CREDIT_URLS.values())[0]\n\n def evaluate_and_send_low_credit_notifications(self):\n LOG.debug('Evaluating and sending low credit notifications for client {}...'.format(self.client))\n\n remaining_credit = self.uptodate_credit - self.client_credit_limit\n remaining_hours = self.client_usage.get_remaining_hours(remaining_credit, self.reference_datetime)\n\n # reset the is_current field on out of credit notifications in order to\n # send again notifications in the future\n if remaining_hours > self.billing_settings.third_credit_remaining_hours:\n reset_current_notification(\n client=self.client,\n notification_name=self.billing_settings.third_credit_notification_template,\n priority=Notification.PRIORITY_CRITICAL # reset third nt which always has critical priority\n )\n if remaining_hours > self.billing_settings.second_credit_remaining_hours:\n reset_current_notification(\n client=self.client,\n notification_name=self.billing_settings.second_credit_notification_template,\n priority=Notification.PRIORITY_HIGH, # reset second nt which always has high priority\n )\n if remaining_hours > self.billing_settings.first_credit_remaining_hours:\n reset_current_notification(\n client=self.client,\n notification_name=self.billing_settings.first_credit_notification_template,\n priority=Notification.PRIORITY_LOW, # reset third nt which always has low priority\n )\n\n self.summary.update_remaining_hours(remaining_hours)\n\n if not self.credit_notifications_enabled:\n LOG.debug('Credit notifications not enabled, skipping')\n return\n\n if not self.client.status == ClientStatus.active:\n LOG.warning('Evaluate and send low credit notifications called for inactive client, skipping')\n return\n\n variables = {\n 'add_credit_url': self.get_add_credit_url(),\n 'credit_hours_left': remaining_hours,\n 'currency': str(self.client.currency),\n 'credit': '{0:.2f}'.format(remaining_credit)\n }\n\n if (self.billing_settings.third_credit_notification_template and\n (self.billing_settings.third_credit_remaining_hours >= remaining_hours)):\n LOG.debug('Sending third low credit notification')\n notifier.send(client=self.client,\n name=self.billing_settings.third_credit_notification_template,\n priority=notifier.Notification.PRIORITY_CRITICAL,\n variables=variables,\n is_current=True,\n check_if_already_notified=True,\n is_current_verification=True, )\n elif (self.billing_settings.second_credit_notification_template and\n (self.billing_settings.second_credit_remaining_hours >= remaining_hours)):\n LOG.debug('Sending second low credit notification')\n notifier.send(client=self.client,\n name=self.billing_settings.second_credit_notification_template,\n priority=notifier.Notification.PRIORITY_HIGH,\n variables=variables,\n is_current=True,\n check_if_already_notified=True,\n is_current_verification=True, )\n elif (self.billing_settings.first_credit_notification_template and\n (self.billing_settings.first_credit_remaining_hours >= remaining_hours)):\n LOG.debug('Sending first low credit notification')\n notifier.send(client=self.client,\n name=self.billing_settings.first_credit_notification_template,\n priority=notifier.Notification.PRIORITY_LOW,\n variables=variables,\n is_current=True,\n check_if_already_notified=True,\n is_current_verification=True, )\n\n def suspend(self, reason: str = ServiceSuspendType.SUSPEND_REASON_UNSPECIFIED, suspend_type: str = None) -> bool:\n LOG.debug('Suspend called for client {} with suspend type {} and reason {}'.format(\n self.client, suspend_type, reason\n ))\n\n if self.client.status == ClientStatus.suspended:\n LOG.warning('Suspend called for already suspended client, skipping')\n return False\n\n LOG.debug('Suspending client')\n suspend_service_tasks = list()\n for service in self.client.services.active():\n if suspend_type == ServiceSuspendType.overdue and service.is_suspend_overridden():\n # do not suspend services with suspend overridden\n continue\n suspend_service_tasks.append(service_tasks.suspend_service.s(\n service.id, reason,\n suspend_type=suspend_type\n ))\n\n celery.group(suspend_service_tasks).apply_async()\n\n self.client.status = ClientStatus.suspended\n self.client.suspend_reason = suspend_type\n self.client.save(update_fields=['status', 'suspend_reason'])\n LOG.debug('Client suspended')\n\n return True\n\n def resume(self, suspend_type: str = None) -> bool:\n LOG.debug('Resume called for client {} with suspend type {}'.format(self.client, suspend_type))\n\n if self.client.status == ClientStatus.active:\n LOG.warning('Resume called for already active client, skipping')\n return False\n\n if not self.client.suspend_reason == suspend_type:\n LOG.debug(\n 'Resume called for suspended client with suspend reason other than {}, skipping'.format(\n self.client.suspend_reason\n )\n )\n return False\n\n LOG.debug('Resuming client')\n resume_service_tasks = list()\n for service in self.client.services.suspended(suspend_type=suspend_type):\n resume_service_tasks.append(service_tasks.resume_service.s(service.id))\n\n celery.group(resume_service_tasks).apply_async()\n\n self.client.status = ClientStatus.active\n self.client.suspend_reason = ClientStatus.active\n self.client.save(update_fields=['status', 'suspend_reason'])\n LOG.debug('Client resumed')\n\n return True\n\n def update_outofcredit_status(self):\n if self.uptodate_credit < self.client_credit_limit:\n self.client.set_outofcredit(self.reference_datetime)\n else:\n self.client.clear_outofcredit()\n\n self.summary.update_outofcredit_status()\n\n def evaluate_and_suspend_if_overdue(self) -> bool:\n LOG.debug('Evaluate and suspend called for client {}'.format(self.client))\n\n if self.client.status == ClientStatus.suspended:\n LOG.warning('Client already suspended, skipping')\n return False\n\n self.update_outofcredit_status()\n\n if not self.client.is_outofcredit:\n LOG.debug('Client is not overdue, skipping')\n return False\n\n LOG.debug('Client is overdue, check for suspend delays')\n overdue_hours = self.client.get_hours_since_outofcredit(self.reference_datetime)\n overdue_credit = -(self.uptodate_credit - self.client_credit_limit)\n can_suspend = not (\n self.billing_settings.auto_suspend_delay_hours_enabled or\n self.billing_settings.auto_suspend_delay_credit_enabled\n )\n\n if can_suspend:\n LOG.debug('Client can be suspended because no suspension delay is active')\n else:\n if self.billing_settings.auto_suspend_delay_hours_enabled:\n if overdue_hours >= self.billing_settings.auto_suspend_delay_hours:\n LOG.debug('Client can be suspended because hours delay was reached')\n can_suspend = True\n else:\n self.summary.update_overdue_hours(\n overdue_hours,\n self.billing_settings.auto_suspend_delay_hours\n )\n if not can_suspend and self.billing_settings.auto_suspend_delay_credit_enabled:\n if overdue_credit >= self.billing_settings.auto_suspend_delay_credit:\n LOG.debug('Client can be suspended because credit delay was reached')\n can_suspend = True\n else:\n self.summary.update_overdue_credit(\n overdue_credit,\n self.billing_settings.auto_suspend_delay_credit\n )\n\n self.summary.update_suspend_status(can_suspend, self.billing_settings.auto_suspend)\n\n if not self.billing_settings.auto_suspend: # Auto suspend disabled\n LOG.debug('Auto suspend not active, skipping')\n return False\n\n if can_suspend:\n LOG.debug('Suspending client')\n self.suspend(reason=ServiceSuspendType.SUSPEND_REASON_OVERDUE, suspend_type=ServiceSuspendType.overdue)\n if self.billing_settings.auto_suspend_notification_template:\n variables = {'add_credit_url': self.get_add_credit_url()}\n notifier.critical(client=self.client,\n name=self.billing_settings.auto_suspend_notification_template,\n variables=variables)\n self.summary.update_status(self.client.status)\n return True\n else:\n LOG.debug('Client is overdue, but suspension is delayed, will not suspend')\n return False\n\n def evaluate_and_resume_if_enough_credit(self) -> bool:\n LOG.debug('Evaluate and resume called for client {}'.format(self.client))\n\n if self.client.status == ClientStatus.active:\n LOG.warning('Evaluate and resume called for active client, skipping')\n return False\n\n self.update_outofcredit_status()\n\n if self.client.is_outofcredit:\n LOG.debug('Client is overdue, skipping')\n return False\n else:\n LOG.debug('Client over limit, resuming')\n self.resume(suspend_type=ServiceSuspendType.overdue)\n LOG.debug('Client resumed')\n self.summary.update_status(self.client.status)\n\n return True\n\n def process_services_with_suspend_override(self):\n # only active services for suspended clients should be services with suspend overridden\n # so it should be safe to select only those\n services_to_suspend = Service.objects.filter(\n client=self.client,\n client__status=ClientStatus.suspended,\n status=ServiceStatus.active\n )\n\n for service in services_to_suspend:\n if not service.is_suspend_overridden():\n service_tasks.suspend_service.delay(\n service.id, ServiceSuspendType.SUSPEND_REASON_OVERDUE,\n suspend_type=ServiceSuspendType.overdue\n )\n self.summary.update_suspend_overridden_service_count(1)\n\n def process_client_suspended_services(self):\n # will set auto terminate date and terminate suspended services that exceeded that date\n if not (self.billing_settings.suspend_instead_of_terminate or reseller_suspend_instead_of_terminate(\n client=self.client,\n )):\n LOG.info('Processing client suspended services')\n terminate_service_tasks = list()\n suspended_services = Service.objects.filter(client=self.client, status=ServiceStatus.suspended)\n now = utcnow()\n termination_date = now + timedelta(hours=self.billing_settings.auto_terminate_delay_hours)\n for service in suspended_services:\n if not service.auto_terminate_date:\n service.auto_terminate_date = termination_date\n service.save()\n else:\n if service.auto_terminate_date < now:\n terminate_service_tasks.append(service_tasks.terminate_service.s(service.id))\n if self.billing_settings.auto_terminate_notification_template:\n variables = {\n 'terminated_service_id': service.id,\n 'terminated_service_display_name': service.display_name,\n }\n notifier.critical(client=self.client,\n name=self.billing_settings.auto_terminate_notification_template,\n variables=variables)\n\n if len(terminate_service_tasks):\n self.summary.update_auto_terminated_services_count(count=len(terminate_service_tasks))\n celery.group(terminate_service_tasks).apply_async()\n\n def process_client_services(self):\n LOG.info('Processing client services')\n client_tax_rules = None\n if self.billing_settings.generate_invoices or self.billing_settings.auto_settle_usage:\n client_tax_rules = TaxRule.for_country_and_state(\n country=self.client.country_name,\n state=self.client.state\n )\n\n if self.billing_settings.generate_invoices:\n \"\"\"Creates an invoice with all client services that are in their due date.\"\"\"\n active_services_in_invoice_due = self.client.services.active().recurring().in_invoice_due()\n active_services_in_invoice_due = active_services_in_invoice_due.filter(cancellation_request__isnull=True)\n\n if active_services_in_invoice_due:\n if SettlementManager.process_services(self.client, active_services_in_invoice_due, client_tax_rules):\n self.summary.update_invoiced_services_count(active_services_in_invoice_due.count())\n else:\n if self.billing_settings.auto_settle_usage:\n SettlementManager.settle_services_usage_from_client_credit(\n self.client,\n self.client.services.active().in_due(),\n client_tax_rules\n )\n\n def process_cancellation_requests(self):\n svc_filter = self.client.services.active().non_free()\n cancellable_services = svc_filter.filter(\n cancellation_request__cancellation_type=CancellationTypes.END_OF_CYCLE,\n next_due_date__lte=utcnow()\n )\n\n for s in cancellable_services:\n if self.billing_settings.suspend_instead_of_terminate or reseller_suspend_instead_of_terminate(\n client=self.client,\n ):\n service_tasks.suspend_service.delay(\n s.pk,\n reason=ServiceSuspendType.SUSPEND_REASON_UNSPECIFIED,\n suspend_type=ServiceSuspendType.staff\n )\n else:\n service_tasks.terminate_service.delay(s.pk, cancellation_request_id=s.cancellation_request.pk)\n self.summary.update_cancelled_services_count(1)\n","repo_name":"pizzhub/backendfleio-test","sub_path":"project/fleio/billing/client_operations.py","file_name":"client_operations.py","file_ext":"py","file_size_in_byte":22826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21053340083","text":"# -*- encoding: utf-8 -*-\n\n\nfrom os import name\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.template import loader\nfrom django.http import HttpResponse\nfrom django import template\nfrom django.shortcuts import render\nfrom plotly.offline import plot\nimport plotly.graph_objs as go\nfrom plotly.graph_objs import Scatter\nimport pandas as pd\nfrom django.contrib.auth.models import User\nfrom todolist.models import Task\nfrom calendarapp.models import Event\n\n# Libraries supports for Machine Learning part\nfrom statsmodels.tsa.arima.model import ARIMA\nfrom math import sqrt\nfrom statsmodels.compat import lzip\nimport numpy as np\nimport statsmodels.api as sm\nimport pandas as pd\n\n\n# Loads Dashboard Page\n\n@login_required(login_url=\"/login/\")\n\ndef index(request):\n \n context = {}\n context['segment'] = 'index'\n\n html_template = loader.get_template( 'index.html' )\n return HttpResponse(html_template.render(context, request))\n\ndef index(request):\n df = pd.read_csv('User_schedule_update.xls')\n context = {}\n context['graph'] = 'plot_div'\n x_data = df['Hour'].head(24)\n y_data = df['Make_Schedule_count_byweek']\n\n plot_div = plot({\n 'data': [Scatter(x=x_data, y=y_data,\n mode='lines', name='test',\n opacity=0.5, marker_color='green')],\n 'layout': {'title': 'Schedule', 'xaxis': {'title': 'Hours'}, 'yaxis': {'title': 'Booking Counts '}}\n }, output_type='div')\n\n \n return render(request, \"index.html\", context={'plot_div': plot_div})\n\n\n\n # Generate the task list and event list\n task_list = Task.objects.filter(user=request.user)\n event_list = Event.objects.filter(user=request.user)\n context['tasks'] = task_list\n context['events'] = event_list\n\n graph = [50, 40, 300, 220, 500, 250, 400, 230, 500]\n context['data_set'] = graph\n \n\n html_template = loader.get_template( 'index.html')\n return HttpResponse(html_template.render(context, request))\n\n# Loads Landing Page\n@login_required\ndef mainpage(request):\n context = {}\n context['segment'] = 'index'\n html_template = loader.get_template( 'mainpage.html' )\n return HttpResponse(html_template.render(context, request))\n\n@login_required\ndef pages(request):\n context = {}\n # All resource paths end in .html.\n # Pick out the html file name from the url. And load that template.\n try:\n \n load_template = request.path.split('/')[-1]\n context['segment'] = load_template\n \n html_template = loader.get_template( load_template )\n return HttpResponse(html_template.render(context, request))\n \n except template.TemplateDoesNotExist:\n\n html_template = loader.get_template( 'page-404.html' )\n return HttpResponse(html_template.render(context, request))\n\n except:\n \n html_template = loader.get_template( 'page-500.html' )\n return HttpResponse(html_template.render(context, request))\n\ndef result(request):\n\n df_schedule = pd.read_csv('User_schedule.csv')\n df_schedule.drop(['Day', 'Month', 'Year', 'Time'], axis=1, inplace=True)\n # display(df_schedule.head(20))\n df_schedule['Make_Schedule_count_byweek'] = df_schedule.groupby(['week_number', 'Hour'])['Make_Schedule'].transform(\n 'sum')\n df_schedule = df_schedule.drop_duplicates(subset=['week_number', 'Hour']).drop('Make_Schedule', 1)\n df_schedule['datetime'] = df_schedule['week_number'].map(str) + '_' + df_schedule['Hour'].map(str)\n\n df_schedule['index'] = df_schedule.index\n\n def f(x):\n if (x['Hour'] >= 1) and (x['Hour'] <= 6):\n return \"Sleep Time\"\n elif x['Hour'] >= 7 and x['Hour'] <= 12:\n return \"Morning Time\"\n elif x['Hour'] >= 13 and x['Hour'] <= 18:\n return \"Afternoon Time\"\n else:\n return \"Night Time\"\n\n df_schedule['Time_Region'] = df_schedule.apply(f, axis=1)\n\n df_schedule['Avg. Make_Schedule_count_byweek'] = df_schedule.groupby(['week_number', 'Time_Region'])[\n 'Make_Schedule_count_byweek'].transform('mean')\n\n df_schedule2 = df_schedule.drop(\n ['Hour', 'week_number', 'datetime', 'Time_Region', 'Avg. Make_Schedule_count_byweek'], 1)\n df_schedule2 = df_schedule2.drop('index', 1)\n X = df_schedule2.values\n\n # split into train and test sets\n\n size = int(len(X)) - 24\n train, test = X[0:size], X[size:len(X)]\n history = [x for x in train]\n predictions = list()\n\n # evaluate an ARIMA model using a walk-forward validation\n\n for t in range(len(test)):\n model = ARIMA(history, order=(5, 1, 0))\n model_fit = model.fit()\n output = model_fit.forecast()\n yhat = output[0]\n predictions.append(yhat)\n obs = test[t]\n history.append(obs)\n\n df_update = pd.DataFrame(predictions, columns=['Prediction Number of Booking'])\n df_update['Time'] = df_update.index\n df_update.to_csv('PreditionTable.csv')\n\n\n max_value = max(predictions)\n max_index = predictions.index(max_value)\n\n mx = max(predictions[0], predictions[1])\n second_max_value = min(predictions[0], predictions[1])\n n = len(predictions)\n for i in range(2, n):\n if predictions[i] > mx:\n second_max_value = mx\n mx = predictions[i]\n elif (predictions[i] > second_max_value) and (mx != predictions[i]):\n second_max_value = predictions[i]\n\n second_max_index = predictions.index(second_max_value)\n result_expected_1st = 'The first priority hour: '+str(max_index)+':00'\n result_expected_2nd = 'The second priority hour: ' +str(second_max_index)+':00' \n \n # Graph\n df = pd.read_csv('User_schedule_update.xls')\n context = {}\n context['graph'] = 'plot_div'\n x_data = df['Hour'].head(24)\n y_data = df['Make_Schedule_count_byweek']\n plot_div = plot({\n 'data': [Scatter(x=x_data, y=y_data,\n mode='lines', name='test',\n opacity=0.5, marker_color='green')],\n 'layout': {'title': 'Schedule', 'xaxis': {'title': 'Hours'}, 'yaxis': {'title': 'Booking Counts '}}\n }, output_type='div')\n\n return render(request,'index.html',{'result_1st': result_expected_1st,'result_2nd': result_expected_2nd, 'plot_div': plot_div})\n\n","repo_name":"JaeZhou/Mindful-Planner","sub_path":"AppCode/MindfulPlanner/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9750231773","text":"import cv2\nimport time\nimport numpy as np\nfrom skimage.feature import hog\nimport multiprocessing as mp\nfrom threading import RLock, Lock\n\n# ############################\n# CONFIGURATION\n# ############################\n\n# HOG Parameters\norient = 9\npix_per_cell = (8, 8)\ncell_per_block = (2, 2)\nhog_channel = 'ALL' # Can be 0, 1 ,2 or 'ALL'\ntransform_sqrt = False\n\n# Color Histogram Parameters:\nnbins = 32\nbin_range = (0.0, 1.)\n# Spatial Bin Parameters:\nspatial_size = (32, 32)\ncolor_space = 'YCrCb'\n\nclahe = cv2.createCLAHE(clipLimit=2.0)\n\n\ndef get_feature(images, workers=4):\n pool = mp.Pool(processes=workers)\n results = []\n t = time.time()\n\n for img in images:\n image = cv2.cvtColor(cv2.imread(img), cv2.COLOR_BGR2RGB)\n image = adaptive_equalize_image(image)\n results.append(image)\n\n avg, features = zip(*pool.map(process_img, results))\n print(\"Total time: {} seconds\".format(time.time() - t))\n print(\"Average time / feature : {} seconds\".format(np.average(avg)))\n test = cv2.cvtColor(cv2.imread(images[0]), cv2.COLOR_BGR2RGB)/255\n print(\"Max Value {} Min Value {}\\n\".format(np.max(test), np.min(test)))\n\n return features\n\n\ndef process_img(img):\n t = time.time()\n feature = np.concatenate(extract_feature(img))\n avg = (time.time() - t)\n return avg, feature\n\n\ndef extract_feature(img):\n feature_img = None\n if color_space != 'RGB':\n if color_space is 'HSV':\n feature_img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n elif color_space is 'HLS':\n feature_img = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n elif color_space is 'LUV':\n feature_img = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n elif color_space is 'YUV':\n feature_img = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n elif color_space is 'YCrCb':\n feature_img = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n else:\n feature_img = np.copy(img)\n\n if (255 - np.max(feature_img)) > 100:\n feature_img = feature_img/255\n\n feature = []\n\n bin_feat = bin_spatial(feature_img)\n feature.append(bin_feat)\n\n color_feat = color_hist(feature_img)\n feature.append(color_feat)\n\n hog_feat = get_hog_features(feature_img)\n feature.append(hog_feat)\n\n return feature\n\n\ndef adaptive_equalize_image(img, level=2.0):\n \"\"\"\n Equalize an image - Increase contrast for the image\n # http://docs.opencv.org/3.1.0/d5/daf/tutorial_py_histogram_equalization.html\n\n :param img: an image\n :param level: clipLevel\n :return: a equalized image\n \"\"\"\n\n if img.shape[2] == 3:\n lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)\n l, a, b = cv2.split(lab)\n cl = clahe.apply(l)\n result = cv2.merge((cl, a, b))\n result = cv2.cvtColor(result, cv2.COLOR_LAB2RGB)\n else:\n result = clahe.apply(img)\n return result\n\n\ndef convert_color(img, conv='RGB2YCrCb'):\n if conv == 'RGB2YCrCb':\n return cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n if conv == 'BGR2YCrCb':\n return cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)\n if conv == 'RGB2LUV':\n return cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n\n\ndef get_hog_features(img, ch='ALL', orient=9, pix_per_cell=8, cell_per_block=2, vis=False, feature_vec=True):\n if ch == 'ALL':\n hog_features = []\n for c in range(img.shape[2]):\n if vis is True:\n hog_feature, hog_img = hog(img[:, :, c], orientations=orient,\n pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block),\n transform_sqrt=transform_sqrt,\n visualise=True, feature_vector=feature_vec)\n else:\n hog_feature = hog(img[:, :, c], orientations=orient,\n pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block),\n transform_sqrt=transform_sqrt,\n visualise=False, feature_vector=feature_vec)\n\n hog_features.append(hog_feature)\n\n hog_features = np.ravel(hog_features)\n else:\n if vis is True:\n hog_features, hog_img = hog(img[:, :, ch], orientations=orient,\n pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block),\n transform_sqrt=transform_sqrt,\n visualise=True, feature_vector=feature_vec)\n else:\n hog_features = hog(img[:, :, ch], orientations=orient,\n pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block),\n transform_sqrt=transform_sqrt,\n visualise=False, feature_vector=feature_vec)\n if vis is True:\n return hog_features, hog_img\n else:\n return hog_features\n\n\ndef bin_spatial(img, size=(32, 32)):\n color1 = cv2.resize(img[:, :, 0], size).ravel()\n color2 = cv2.resize(img[:, :, 1], size).ravel()\n color3 = cv2.resize(img[:, :, 2], size).ravel()\n return np.hstack((color1, color2, color3))\n\n\ndef color_hist(img, nbins=32): # bins_range=(0, 256)\n # Compute the histogram of the color channels separately\n channel1_hist = np.histogram(img[:, :, 0], bins=nbins)\n channel2_hist = np.histogram(img[:, :, 1], bins=nbins)\n channel3_hist = np.histogram(img[:, :, 2], bins=nbins)\n # Concatenate the histograms into a single feature vector\n hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n # Return the individual histograms, bin_centers and feature vector\n return hist_features\n","repo_name":"datlife/vehicle-tracker","sub_path":"utils/feature_extractor.py","file_name":"feature_extractor.py","file_ext":"py","file_size_in_byte":5991,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"25869026503","text":"from django.conf import settings as django_settings\nfrom django.utils.importlib import import_module\n\nsettings = {}\n\n\ndef register_setting(namespace, module, name, global_name, default, exists=False, description=u'', hidden=False):\n # Create namespace if it doesn't exists\n settings.setdefault(namespace, [])\n\n # If passed a string and not a module, import it\n if isinstance(module, basestring):\n module = import_module(module)\n\n setting = {\n 'module': module,\n 'name': name,\n 'global_name': global_name,\n 'exists': exists,\n 'description': description,\n 'default': default,\n 'hidden': hidden,\n }\n\n # Avoid multiple appends\n if setting not in settings[namespace]:\n settings[namespace].append(setting)\n\n # Get the global value\n value = getattr(django_settings, global_name, default)\n\n # Create the local entity\n setattr(module, name, value)\n return value\n\n\ndef register_settings(namespace, module, settings):\n for setting in settings:\n register_setting(\n namespace,\n module,\n setting['name'],\n setting['global_name'],\n setting['default'],\n setting.get('exists', False),\n setting.get('description', u''),\n setting.get('hidden', False),\n )\n","repo_name":"ranjithtenz/cujo","sub_path":"apps/smart_settings/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74062762087","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[7]:\n\nimport os\nimport pandas\nimport glob\nimport sys\nimport matplotlib.pyplot as plt\nfrom utils import find_column_indices, parse_date, MAPE\nfrom beta_static import beta_static_lookup\n\ndata_file = sys.argv[1]\ndfs = pandas.read_excel(f'data/{data_file}.xlsx', sheet_name=None,\n skiprows=1)\nsheetnames = list(dfs.keys())\n\n# In\n\nif not os.path.isdir(\"data\"):\n os.makedirs(\"data\")\nbaselines = dfs['Baseline Schedule'][['ID', 'Duration', 'Total Cost']].values\nbaselines[:, 1] = [parse_date(x) for x in baselines[:, 1]]\n# planned duration\nBAC = baselines[0, 2]\ntracking_periods = [x for x in sheetnames if \"TP\" in x]\nn_tracking_periods = baselines[0, 1] / (20*60)\nprint(\"BAC:\", BAC)\nprint(\"Number of tracking periods:\", n_tracking_periods)\n\n\ndef cost_forecasting():\n # init trend\n Ts_AC = [BAC/n_tracking_periods]\n Ts_EV = [BAC/n_tracking_periods]\n print(\"T0_AC = T0_EV: \", Ts_AC[0])\n\n # Col 0 = ID, col 12 = Duration\n beta = beta_static_lookup[data_file]\n ACs = [0] # init AT0 = 0\n t = 1\n EVs = [0]\n EAC_costs = [] # predict project duration\n start_test = False\n for period in tracking_periods:\n print(\"Tracking periods:\", period)\n cols = find_column_indices(dfs[period].values[1], [\n \"ID\", \"Actual Cost\", \"Earned Value (EV)\", \"Planned Value (PV)\"])\n data_period = dfs[period].values[2:, cols]\n assert (baselines[:, 0] == data_period[:, 0]).sum() == len(\n baselines), \"Wrong permutation!\"\n\n # current trend\n cur_AC = data_period[0, 1]\n ACs.append(cur_AC)\n T_AC = beta*(ACs[t] - ACs[t-1]) + (1-beta)*Ts_AC[t-1]\n Ts_AC.append(T_AC)\n\n EV = data_period[0, 2]\n PV = data_period[0, 3]\n EVs.append(EV)\n T_EV = beta*(EVs[t] - EVs[t-1]) + (1-beta)*Ts_EV[t-1]\n Ts_EV.append(T_EV)\n\n # if EV < PV and not start_test:\n # start_test = True\n # if start_test:\n # if t >= (len(tracking_periods)*1/2) and T_EV > 0:\n # if T_EV > 0:\n if t >= (len(tracking_periods)*2/3) and T_EV > 0:\n # if T_EV > 0:\n k = (BAC-EVs[t]) / T_EV\n EAC = ACs[t] + k * T_AC\n EAC_costs.append(EAC)\n print(\"Predict EAC:\", EAC)\n # end calculate\n t += 1\n print(\"Project actual costs: \", data_period[0, 1])\n mape, error = MAPE([ACs[-1]]*len(EAC_costs[:-1]), EAC_costs[:-1])\n print(\"MAPE: \", mape)\n return error, mape\n\n\nif __name__ == '__main__':\n if not os.path.isdir(\"figures\"):\n os.makedirs(\"figures\")\n fp = open(f\"logs/costs/{data_file}.log\", \"w+\")\n fp.write(f\"Dataset\\tDynamic\\n\")\n error_static, mape = cost_forecasting()\n fp.write(f\"{data_file}\\t{mape:.2f}\\n\")\n","repo_name":"LeDinhPhuc/XSM_09","sub_path":"costs.py","file_name":"costs.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"432155156","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.tree import export_text\n# from sklearn.externals.six import StringIO\nfrom sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier\nfrom sklearn.model_selection import train_test_split, KFold, cross_val_score # Import train_test_split function\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import metrics,tree # Import scikit-learn metrics module for accuracy calculation\nimport graphviz\n# from sklearn.tree import export_graphviz\n# from IPython.display import Image\n# import pydotplus\n#\n\ndef faz_one():\n pol_fake = pd.read_csv(\"pol-fake.csv\")\n pol_real = pd.read_csv(\"pol-real.csv\")\n pol_real[\"real\"] = 1\n pol_fake[\"real\"] = 0\n\n result = pd.concat([pol_real, pol_fake])\n\n shuffled_resutl = result.iloc[np.random.permutation(len(result))].reset_index(drop=True)\n\n lable_name = list(shuffled_resutl.columns)\n\n feature_cols = lable_name[:-1]\n\n # split dataset in features and target variable\n x = shuffled_resutl[feature_cols] # Features\n y = shuffled_resutl.real # Target variable\n\n # Split dataset into training set and test set\n X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2,\n random_state=1) # 80% training and 20% test\n\n\n return [X_train, X_test, y_train, y_test, feature_cols], shuffled_resutl\n\n\ndef faz_two(input_list):\n print(\" faze two \")\n\n X_train, X_test, y_train, y_true, feature_cols = input_list\n\n # Train Decision Tree Classifer\n clf = DecisionTreeClassifier()\n\n # Train Decision Tree Classifer\n clf.fit(X_train, y_train)\n\n # Predict the response for test dataset\n y_pred = clf.predict(X_test)\n\n accuracy = metrics.accuracy_score(y_true, y_pred)\n print(\"Accuracy: \", accuracy)\n\n print(\"confusion matrix: \\n\", metrics.confusion_matrix(y_true, y_pred))\n\n print(\"precision: \", metrics.precision_score(y_true, y_pred))\n\n print(\"recall: \", metrics.recall_score(y_true, y_pred))\n\n print(\"F1: \", metrics.f1_score(y_true, y_pred))\n\n print(\"********************************\")\n \n dot_data = tree.export_graphviz(clf,out_file=\"graph1.dot\",\n feature_names=feature_cols,\n class_names=\"real\",\n filled=True, rounded=True,\n special_characters=True)\n\n graph = graphviz.Source(dot_data)\n \n try:\n graph.render(\"fazeTwoGraph\") \n \n except:\n print(export_text(clf,feature_names=feature_cols))\n \n\n\n\n\n print(\"_______________________________________________________________________________\")\n\n\n\n return accuracy\n\n\ndef faz_three(input_list):\n print(\" faze three \")\n\n X_train, X_test, y_train, y_true, feature_cols = input_list\n\n # Train Decision Tree Classifer\n clf = DecisionTreeClassifier(criterion=\"entropy\")\n\n # Train Decision Tree Classifer\n clf.fit(X_train, y_train)\n\n # Predict the response for test dataset\n y_pred = clf.predict(X_test)\n\n accuracy = metrics.accuracy_score(y_true, y_pred)\n print(\"Accuracy: \", metrics.accuracy_score(y_true, y_pred))\n\n print(\"confusion matrix: \\n\", metrics.confusion_matrix(y_true, y_pred))\n\n print(\"precision: \", metrics.precision_score(y_true, y_pred))\n\n print(\"recall: \", metrics.recall_score(y_true, y_pred))\n\n print(\"F1: \", metrics.f1_score(y_true, y_pred))\n\n \n print(\"********************************\")\n \n dot_data = tree.export_graphviz(clf,out_file=\"graph2.dot\",\n feature_names=feature_cols,\n class_names=\"real\",\n filled=True, rounded=True,\n special_characters=True)\n\n graph = graphviz.Source(dot_data)\n \n try:\n graph.render(\"fazeThreegraph\") \n \n except:\n print(export_text(clf,feature_names=feature_cols))\n \n \n \n print(\"_________________________________________________________________________________\")\n\n return accuracy\n\n\ndef faz_four(input_list):\n print(\" faze four \")\n\n kf = KFold(n_splits=10, shuffle=False, random_state=None)\n lable_name = list(input_list.columns)\n feature_cols = lable_name[:-1]\n X = input_list[feature_cols] # Features\n y = input_list.real # Target variable\n\n best_acuracy = 0\n best_depth = 0\n list_of_depth = []\n list_of_acurrcy = []\n for val in range(5, 21):\n score = cross_val_score(DecisionTreeClassifier(max_depth=val, random_state=None), X, y, cv=kf,\n scoring=\"accuracy\")\n\n list_of_depth.append(str(val))\n list_of_acurrcy.append(score.mean())\n if score.mean() > best_acuracy:\n best_acuracy = score.mean()\n best_depth = val\n\n print(\"best depth :\" + str(best_depth))\n print(\"it's acuracy :\" + str(best_acuracy))\n print(\"_____________________________________________________________________\")\n\n make_ser = pd.Series(list_of_acurrcy, index=list_of_depth)\n sns.barplot(x=make_ser, y=make_ser.index)\n # Add labels to your graph\n plt.xlabel('Tree depth')\n plt.ylabel('Accuracy')\n plt.title(\"Accuracy per decision tree depth on training data\")\n plt.legend()\n plt.show()\n\n return best_acuracy, best_depth\n\n\ndef faz_final(shuffled_resutl):\n lable_name = list(shuffled_resutl.columns)\n feature_cols = lable_name[:-1]\n\n # split dataset in features and target variable\n x = shuffled_resutl[feature_cols] # Features\n y = shuffled_resutl.real # Target variable\n\n # Split dataset into training set and test set\n X_train, X_test, y_train, y_true = train_test_split(x, y, test_size=0.2,\n random_state=1) # 80% training and 20% test\n\n # Create a Gaussian Classifier\n clf = RandomForestClassifier(n_estimators=100)\n\n # Train the model using the training sets y_pred=clf.predict(X_test)\n clf.fit(X_train, y_train)\n\n y_pred = clf.predict(X_test)\n\n accurecy = metrics.accuracy_score(y_true, y_pred)\n # print(\"Accuracy:\",accurecy )\n\n feature_imp = pd.Series(clf.feature_importances_, index=feature_cols).sort_values(ascending=False)\n\n sns.barplot(x=feature_imp, y=feature_imp.index)\n # Add labels to your graph\n plt.xlabel('Feature Importance Score')\n plt.ylabel('Features')\n plt.title(\"Visualizing Important Features\")\n plt.legend()\n plt.show()\n\n new_feature_cols = []\n number = 0\n\n feature_imp_dict = feature_imp.to_dict()\n\n for item in feature_imp_dict:\n\n if feature_imp_dict[item] >= 0.009:\n new_feature_cols.append(item)\n\n x = shuffled_resutl[new_feature_cols] # Features\n\n X_train, X_test, y_train, y_true = train_test_split(x, y, test_size=0.2,\n random_state=1) # 80% training and 20% test\n\n clf = RandomForestClassifier(n_estimators=100)\n clf.fit(X_train, y_train)\n\n y_pred = clf.predict(X_test)\n\n accurecy2 = metrics.accuracy_score(y_true, y_pred)\n # print(\"new Accuracy:\", accurecy2)\n\n return max(accurecy, accurecy2)\n\n\ndef main():\n result_of_faz, list_of_combine_two_csv = faz_one()\n\n accuracy_1 = faz_two(result_of_faz)\n #\n accuracy_2 = faz_three(result_of_faz)\n\n accuracy_3, depth = faz_four(list_of_combine_two_csv)\n\n acuracy_4 = faz_final(list_of_combine_two_csv)\n\n # compare the accuracy of Random Forest model with the accuracy of three former tasks\n\n acuracy_series = pd.Series([accuracy_1, accuracy_2, accuracy_3, acuracy_4],\n index=[\"decision tree Gini\", \"decision tree gain \",\n \"10-Fold with best depth \" + str(depth), \"Random Forest\"])\n\n sns.barplot(x=acuracy_series, y=acuracy_series.index)\n # Add labels to your graph\n plt.xlabel('Faze of project ')\n plt.ylabel('name of Faze')\n plt.title(\"compare the accuracy of Random Forest model with the accuracy of three former tasks\")\n plt.legend()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n\n ","repo_name":"mehdiFeghhi/AI_Project_Find_Spam_Email","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6794631928","text":"import matplotlib\nmatplotlib.rcParams['pdf.fonttype'] = 42\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport numpy as np\n\nconfig = {\n 'a': {\n 'legends': ['MOB-TCP', 'Cellular-TCP', 'MOB-UDP', 'Cellular-UDP'],\n 'filename': 'tcp_vs_udp_dl.pdf'\n },\n 'b': {\n 'legends': ['RM', 'MOB'],\n 'filename': 'rm_vs_mob_udp_dl.pdf'\n },\n 'c': {\n 'legends': ['Uplink', 'Downlink'],\n 'filename': 'mob_udp_ul_vs_dl.pdf'\n }\n}\n\ndef throughputcdf(plot_type):\n folder_path_base = os.getcwd()\n all_throughputs = []\n\n cmap20 = plt.cm.tab20\n\n if plot_type == 'a':\n mob_tcp = pd.read_csv(os.path.join(folder_path_base, 'TCP', 'Downlink', 'mob_all_area.csv'))['throughput']\n cellular_tcp_files = [\n pd.read_csv(os.path.join(folder_path_base, 'TCP', 'Downlink', f))['throughput'] \n for f in ['att_all_area.csv', 'tm_all_area.csv', 'vz_all_area.csv']\n ]\n cellular_tcp = pd.concat(cellular_tcp_files, ignore_index=True)\n \n mob_udp = pd.read_csv(os.path.join(folder_path_base, 'UDP', 'Downlink', 'mob_all.csv'))['throughput']\n cellular_udp_files = [\n pd.read_csv(os.path.join(folder_path_base, 'UDP', 'Downlink', f))['throughput']\n for f in ['att_all.csv', 'tm_all.csv', 'vz_all.csv']\n ]\n cellular_udp = pd.concat(cellular_udp_files, ignore_index=True)\n \n all_throughputs.extend([mob_tcp, cellular_tcp, mob_udp, cellular_udp])\n colors = [cmap20(0), cmap20(4), cmap20(0), cmap20(4)]\n linestyles = ['--', '--', '-', '-']\n \n elif plot_type == 'b':\n rm_udp = pd.read_csv(os.path.join(folder_path_base, 'UDP', 'Downlink', 'rm_all.csv'))['throughput']\n mob_udp = pd.read_csv(os.path.join(folder_path_base, 'UDP', 'Downlink', 'mob_all.csv'))['throughput']\n \n all_throughputs.extend([rm_udp, mob_udp])\n colors = [cmap20(18), cmap20(0)]\n linestyles = ['dashdot', '-']\n \n elif plot_type == 'c':\n mob_udp_down = pd.read_csv(os.path.join(folder_path_base, 'UDP', 'Downlink', 'mob_all.csv'))['throughput']\n mob_udp_up = pd.read_csv(os.path.join(folder_path_base, 'UDP', 'Uplink', 'mob_all_area.csv'))['throughput']\n \n all_throughputs.extend([mob_udp_up, mob_udp_down])\n colors = [cmap20(2), cmap20(8)]\n linestyles = ['-', '-']\n\n\n fig, ax = plt.subplots(figsize=(6.4, 4.8))\n plt.xlim(0, 600)\n plt.ylim(0, 1.02)\n\n for idx, data in enumerate(all_throughputs):\n sorted_data = np.sort(data)\n count, bins_count = np.histogram(sorted_data, bins=np.unique(sorted_data).shape[0])\n cdf = np.cumsum(count) / len(sorted_data)\n plt.plot(bins_count[1:], cdf, label=config[plot_type]['legends'][idx], color=colors[idx], linestyle=linestyles[idx], linewidth=4)\n\n fzsize = 22\n ax.tick_params(axis='y', labelsize=fzsize)\n ax.tick_params(axis='x', labelsize=fzsize)\n ax.set_xlabel('Throughput (Mbps)', fontsize=fzsize)\n ax.set_ylabel('CDF', fontsize=fzsize)\n legend = ax.legend(prop={'size': 20})\n ax.set_xticks([0, 100, 200, 300, 400, 500, 600])\n ax.set_xticklabels(['0', '100', '200', '300', '400', '500', '600'])\n plt.grid(True)\n plt.tight_layout()\n plt.savefig(os.path.join(folder_path_base, config[plot_type]['filename']))\n\nif __name__ == '__main__':\n throughputcdf('a')\n throughputcdf('b')\n throughputcdf('c')\n","repo_name":"Starlink-Project/Satellite-vs-Cellular","sub_path":"3-Thrpt-Comp/Fig3.py","file_name":"Fig3.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"11600922282","text":"import argparse\nfrom tkinter import BOTH, BOTTOM, TOP, Button, Canvas, Frame, Tk\n\nBOARDS = ['debug', 'n00b', 'l33t', 'error']\nMARGIN = 20\nSIDE = 50\nWIDTH = HEIGHT = MARGIN * 2 + SIDE * 9\n\n\nclass SudokuError(Exception):\n '''An application-specific error.'''\n pass\n\n\nclass SudokuBoard(object):\n '''Sudoku Board representation'''\n\n def __init__(self, board_file):\n self.board = __create_board(board_file)\n\n def __create_board(self, board_file):\n board = []\n for line in board_file:\n line = line.strip()\n if len(line) != 9:\n board = []\n raise SudokuError(\n \"Each line in the Sudoku board must be 9 chars long.\"\n )\n board.append([])\n for c in line:\n if not c.isdigit():\n raise SudokuError(\n \"Valid characters for a Sudoku puzzle must be in 0-9.\"\n )\n board[-1].append(int(c))\n if len(board) != 9:\n raise SudokuError(\n \"Each Sudoku puzzle must be 9 lines long.\"\n )\n return board\n\n\nclass SudokuGame(object):\n '''A Sudoku game, in charge of storying the state of the board and\n checking whether the puzzle is completed.'''\n\n def __init__(self, board_file):\n self.board_file = board_file\n self.start_puzzle = SudokuBoard(board_file).board\n\n def start(self):\n self.game_over = False\n self.puzzle = []\n for i in xrange(9):\n self.puzzle.append([])\n for j in xrange(9):\n self.puzzle[i].append(self.start_puzzle[i][j])\n\n def check_win(self):\n for row in xrange(9):\n if not self.__check_row(row):\n return False\n for column in xrange(9):\n if not self.__check_column(column):\n return False\n for row in xrange(3):\n for column in xrange(3):\n if not self.__check_square(row, column):\n return False\n self.game_over = True\n return True\n\n def __check_block(self, block):\n return set(block) == set(range(1, 10))\n\n def __check_row(self, row):\n return self.__check_block(self.puzzle[row])\n\n def __check_column(self, column):\n return self.__check_block(\n [self.puzzle[row][column] for row in xrange(9)]\n )\n\n def __check_square(self, row, column):\n return self.__check_block(\n [self.puzzle[r][c]\n for r in xrange(row * 3, (row + 1) * 3)\n for c in xrange(column * 3, (column + 1) * 3)\n ]\n )\n\n\nclass SudokuUI(Frame):\n '''tkinter ui, responsible for drawing the board and taking user input'''\n def __init__(self, parent, game):\n self.game = game\n self.parent = parent\n Frame.__init__(self, parent)\n self.row, self.col = 0, 0\n self.__initUI()\n\n def __initUI(self):\n self.parent.title(\"Sudoku\")\n self.pack(fill=BOTH, expand=1)\n self.canvas = Canvas(self, width=WIDTH, height=HEIGHT)\n self.canvas.pack(fill=BOTH, side=TOP)\n clear_button = Button(self, text='Clear answers',\n command=self.__clear_answers)\n clear_button.pack(fill=BOTH, side=BOTTOM)\n self.__draw_grid()\n self.__draw_puzzle()\n self.canvas.bind(\"\", self.__cell_clicked)\n self.canvas.bind(\"\", self.__key_pressed)\n\n def __draw_grid(self):\n '''Draws grid divided with blue lines into 3x3 squares'''\n for i in xrange(10):\n color = 'blue' if i % 3 == 0 else 'gray'\n x0 = MARGIN + i * SIDE\n y0 = MARGIN\n x1 = MARGIN + i * SIDE\n y1 = HEIGHT - MARGIN\n","repo_name":"katelouie/newcoder-projects","sub_path":"gui/sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22345691509","text":"salary = 1800\nsalary -= 1\nif salary == 1800:\n print(\"笑哈哈\")\nelse:\n print(\"呜呜呜\")\n\nsalary = int(input(\"输入工资:\"))\nif salary < 1800:\n print(\"笑哈哈\")\n if salary < 100:\n print(\"爬\")\nelif salary < 10000:\n print(\"呜呜呜\")\nelse:\n print(\"死了\")\n\nimport random\nnum = random.randint(1, 10)\ninput1 = int(input(\"猜:\"))\nif input1 == num:\n print('第一次猜对了')\nelse:\n print('第一次猜错了')\n if input1 < num:\n print(\"小了\")\n else:\n print(\"大了\")\n input2 = int(input(\"猜第二次:\"))\n if input2 == num:\n print(\"第二猜对了\")\n else:\n print(\"第二次猜错了\")\n if input2 < num:\n print(\"小了\")\n else:\n print(\"大了\")\n input3 = int(input(\"猜第三次:\"))\n if input3 == num:\n print(\"第三次猜对了\")\n else:\n print(f\"傻逼,是{num}\")","repo_name":"lsy719/python-learn","sub_path":"03_Python判断/02_if.py","file_name":"02_if.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15305568656","text":"# -*- coding: utf-8 -*-\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nimport os\nfrom image_data_enum import ColumnName\n\n\nclass Utils:\n\n image_format = '.jpg'\n start_of_image_name = 'digikala-products/'\n download_folder = 'download'\n\n def __init__(self, url_categorylist):\n \"\"\"\n Args:\n url_categorylist (str/url) product list page url, help to craete corresponding directory\n \"\"\"\n self.page_list = []\n self.base_url = 'https://www.digistyle.com'\n self.full_links = []\n self.images = []\n\n \n\n self.category = self.find_name_between_this(url_categorylist, 'www.digistyle.com/', '/?pageno=', False)\n\n self.download_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.download_folder, self.category)\n if not os.path.exists(self.download_path):\n os.makedirs(self.download_path)\n\n self.columns = [ColumnName.NAME.value,\n ColumnName.IMAGE_RELETIVE.value, ColumnName.IMAGE_ABSOLUTE.value, ColumnName.IMAGE_URL.value,\n ColumnName.PRODUCT_ID.value, ColumnName.PRODUCT_URL.value]\n self.image_data = pd.DataFrame(columns=self.columns)\n \n def product_list_urls(self,url_category_list,max_page_number):\n '''\n you should input base url that you want to search and navigate hover on pages number\n \n NOTE:\n Don't input page number of products whene you want to initialize base_url it should be automated\n input number end page of product list \n '''\n self.page_list =[]\n \n for i in range(1,max_page_number):\n url = url_category_list+str(i)\n self.page_list.append(url)\n return self.page_list\n \n def page_reader(self,page_url):\n '''\n this two function give page url address and make soup for dinner.\n \n NOTE:\n be careful that maybe your soup is hot !!!\n '''\n page = requests.get(page_url)\n soup = BeautifulSoup(page.content, 'html.parser')\n return soup\n \n def product_extractor(self,page_url):\n '''\n this madule can extract every product segments on list page of some category\n '''\n soup = self.page_reader(page_url)\n product_paths = soup.findAll('a', {'class': 'c-product-item__image-wrapper'},href=True)\n print(\"number of products in this page are\", len(product_paths))\n return product_paths\n \n def product_link_extractor(self,a):\n '''\n this madule can extract links of products to navigate programe to product page\n '''\n #self.full_links = []\n product_link = a['href']\n full_link = self.base_url+product_link\n print('full_link', full_link)\n self.full_links.append(full_link)\n print(\"len(self.full_links)\", len(self.full_links))\n return full_link\n \n def product_page_images(self, product_page):\n '''\n this madule can extract images of product that stored on product single page\n '''\n #self.images = []\n # for product_page in self.full_links:\n print('[READING PAGE]\\n'+product_page)\n soup = self.page_reader(product_page)\n print('\\nok') \n # this div is the left side div with all the images thumbnail\n # and the name of the original image is the same as the thumbnail image\n div_image = soup.find('div',{'class':'c-swiper-quick-view-gallery__thumbs-container c-swiper-quick-view-gallery__thumbs-container--pdp'})\n # iterate over images in this div and change images url to a high quality image\n for image in div_image.findAll('img', {'class': 'c-product-item__image'}):\n src = image.get('src')\n # dl_image_src = src[:src.rfind('resize')] + \"resize,w_1600/quality,q_80\"\n dl_image_src = src[:src.rfind('resize')] + \"resize,h_500,w_500/quality,q_80\"\n self.images.append(dl_image_src)\n self.add_image_to_data_frame(product_page, dl_image_src)\n\n print(\"images number \", len(self.images))\n return self.images\n \n \n \n\n def add_image_to_data_frame(self, product_page, image_url):\n \"\"\"Add all the data about the current image to a dataFrame.\n data include in [self.columns]\n self.columns=['name', 'image_path', 'image_absolute', 'image_url', 'product_id', 'prdouct_url'].\n\n Args:\n product_page (str): current product being extraxted\n image_url (str): one of the image from the product_page\n\n \"\"\"\n\n\n \n # Find image name.\n # start of the name is in image url strat after digikala-product \n # adding the second part len(start_of_image) is necessary, it cause the image name start\n # after the variable [start_of_iamge_name] ends\n # end if image is the image format which is .jpg\n # find image name based on the start and end of the image name in url\n # image_name_start_index = image_url.find(start_of_image_name) + len(start_of_image_name)\n # image_name_end_index = image_url.find(image_format) + len(image_format)\n # image_name = image_url[image_name_start_index:image_name_end_index]\n image_name = self.find_name_between_this(image_url, self.start_of_image_name, self.image_format, True)\n\n # base on [image_name]\n absolute_path = os.path.join(self.download_path, image_name)\n reletive_path = os.path.join(self.download_folder, self.category, image_name)\n\n\n # product ID appeared in product_url,\n # this ID works only on digistyle url style\n product_page_product = 'product/'\n # prodcut_id_start = product_page.find() + len(product_page_product)\n # product_id_end = product_page.find('-')\n # prodcut_id = product_page[prodcut_id_start:product_id_end]\n prodcut_id = self.find_name_between_this(product_page, product_page_product, '-', False)\n\n # create a pd.Series from the data to add to the [self.image_data]\n sample = pd.Series(data={ColumnName.NAME.value: image_name, ColumnName.IMAGE_RELETIVE.value : reletive_path,\n ColumnName.IMAGE_ABSOLUTE.value: absolute_path, ColumnName.IMAGE_URL.value: image_url,\n ColumnName.PRODUCT_ID.value: prodcut_id, ColumnName.PRODUCT_URL.value: product_page})\n\n self.image_data = self.image_data.append(sample, ignore_index=True)\n print(\"imagedata len\", len(self.image_data))\n\n\n def find_name_between_this(self, string, start, end, inclue_end=False):\n \"\"\"From the given string find the world in between start and end. \n\n Parameters\n ---------- \n string : str \n string to performe slicing on it.\n start : str \n specify the start of slicing.\n end : str \n specify where the slicing should end.\n include_end : bool, default=False\n if end string should be included in the sliced substring.\n\n Returns\n -------\n str\n A sub string start from @start and end in @end.\n \"\"\"\n start_index = string.find(start) + len(start)\n end_index = string.find(end) \n if inclue_end == True:\n # add this cause slicing to add end string \n end_index += len(end)\n return string[start_index:end_index]","repo_name":"mahdisharifloo/crawlers","sub_path":"crawler/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7533,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"36730555276","text":"\r\n#The purpose of this profram is to create fitness app used to record the number of calories burned during different kinds of exercise.\r\n#Author: Prang Kongthongluck\r\n#Version: 2.0\r\n#Date: 06/12/2019\r\nage=()\r\nweight=()\r\n\r\nmets=()\r\nactivities=[]\r\nenergy=()\r\nenergylist=[]\r\ndurationlist=[]\r\n\r\ndef user():\r\n global weight\r\n global name\r\n global age\r\n #The porpuse of this loop is to allow user to confirm their datails\r\n while True:\r\n \r\n name=input(\"Please enter your name: \")\r\n #make user naem start with alphabet\r\n name=name.capitalize()\r\n age=input(\"Please enter your age: \")\r\n #make sure user input interger\r\n while True:\r\n try:\r\n weight=int(input(\"Please enter your weight: \"))\r\n break\r\n except ValueError:\r\n print(\"Please input weight as integer\")\r\n \r\n\r\n\r\n #purpose of this loop is to make sure user input collect input to weightUnit\r\n while True:\r\n weightUnit=input(\"Please choose unite of your weight you entered (P=pound/KG): \")\r\n weightUnit= weightUnit.upper()\r\n \r\n #convert weight from pounds in kg when user input weight in pounds)\r\n if (weightUnit==\"P\"):\r\n weight=weight*0.45\r\n print(\"your weight in kg: \",weight)\r\n break\r\n elif (weightUnit==\"KG\"):\r\n\r\n break\r\n else:\r\n print(\"Invalid input.PLease try again\")\r\n \r\n print (\"Costumer name: \",name)\r\n print(\"Costumer age: \",age,\"years\")\r\n print (\"Costumer weight: \",weight,\"kg\")\r\n\r\n confirmDetails=input(\"Confirm your information (Y/N)\")\r\n confirmDetails=confirmDetails.upper()\r\n \r\n if confirmDetails==\"Y\":\r\n print(\"your informations are confirmed\")\r\n break\r\n elif confirmDetails==\"N\":\r\n print(\"Please re-enter your details\") \r\n\r\nuser()\r\n\r\n\r\ndef MET():\r\n global selected\r\n global act\r\n global energy\r\n global duration\r\n \r\n try:\r\n file= open (\"mettable.txt\")\r\n except:\r\n print(\"Something went wrong when writing to the file\")\r\n \r\n \r\n #use file to print out list of acttivites\r\n print(file.read())\r\n file.close()\r\n \r\n anothergo=\"Y\"\r\n #the purpose of this loop is to allow user to add more avtivity\r\n while (anothergo==\"Y\"): \r\n check=\"Y\"\r\n \r\n #check whether costumer enter right number for activity\r\n while check==\"Y\":\r\n #make sure user input interger\r\n while True:\r\n try:\r\n userselect=int(input(\"Please select your activity by input number located in front of met and activity name: \"))\r\n break\r\n except ValueError:\r\n print (\"Please input with integer\")\r\n \r\n #split each column in file\r\n with open(\"mettable.txt\") as file:\r\n for i in file:\r\n column=i.split(\",\")\r\n selected=column[0]\r\n met=column[1]\r\n act=column[2]\r\n if str(userselect)==selected:\r\n print(\"Your activity:\",act)\r\n activities.append(act)\r\n #make sure user input interger\r\n while True:\r\n try:\r\n duration=int(input(\"How long you do the activity(in minute): \"))\r\n break\r\n except ValueError:\r\n print (\"Please input with integer\")\r\n \r\n durationlist.append(duration)\r\n \r\n #Energy expenditure calculation\r\n energy=float(0.0175*(float(met))* weight)\r\n \r\n #add energy in to energy list to help calculate total energ at the end\r\n energylist.append(energy)\r\n\r\n print(\"Your energy expenditure for\",act,\"is \",energy,\"cal/min\")\r\n \r\n \r\n \r\n if (userselect <0) or (userselect>72):\r\n print(\"Incorrect input, please try again\")\r\n elif (userselect >0) or (userselect<=72):\r\n break\r\n else:\r\n print(\"Incorrect input, please try again\")\r\n\r\n\r\n\r\n anothergo=input(\"Do you want to add more activities(Y/N): \")\r\n anothergo=anothergo.upper()\r\n if (anothergo==\"Y\"):\r\n print(\"Add activities\")\r\n elif (anothergo==\"N\"):\r\n break\r\n else:\r\n print(\"Invalid input. Please try again\")\r\n \r\n\r\n\r\n \r\nMET()\r\n\r\n\r\n\r\n\r\n#this function will help print out activity summary\r\ndef activity(a,b,c):\r\n x=0\r\n num=1\r\n for i in range(len(activities)):\r\n print(\"Activity \",num,\": \",a[x])\r\n print(\"Duration: \",b[x])\r\n print(\"Energy expenditure: \",c[x]) \r\n x +=1\r\n num +=1\r\n\r\n\r\n\r\ndef summary():\r\n print(\"\")\r\n print(\"\")\r\n print(\"Your record\")\r\n \r\n\r\n print (\"Costumer name: \",name)\r\n print(\"Costumer age: \",age,\"years\")\r\n print (\"Costumer weight: \",weight,\"kg\")\r\n #call function activity\r\n activity(activities,durationlist,energylist) \r\n \r\n \r\n totalcal=sum(energylist)\r\n print(\"\")\r\n print(\"Your total cal burn: \",totalcal,\"cal/min\")\r\n \r\n #max() print out the maximum value in the list\r\n print(\"Your maximum engergy expenditure: \",max(energylist),\"cal/min\")\r\n\r\n\r\nsummary()\r\n","repo_name":"Challip/Food-Delivery","sub_path":"programing assignment/Program 2/Python program/PythonApplication1/PythonApplication1.py","file_name":"PythonApplication1.py","file_ext":"py","file_size_in_byte":5647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36617776915","text":"import numpy as np\nfrom power_planner.utils.utils import bresenham_line\n\n\nclass ConstraintUtils():\n\n @staticmethod\n def shift_surface_old(costs, shift):\n \"\"\"\n Shifts a numpy array and pads with zeros\n :param costs: 2-dim numpy array\n :param shift: tuple of shift in x and y direction\n (negative value for left / up shift)\n :returns shifted array of same size\n \"\"\"\n if shift[0] < 0:\n tup1 = (0, -shift[0])\n else:\n tup1 = (shift[0], 0)\n if shift[1] < 0:\n tup2 = (0, -shift[1])\n else:\n tup2 = (shift[1], 0)\n\n costs_shifted = np.pad(costs, (tup1, tup2), mode='constant')\n\n if shift[0] > 0 and shift[1] > 0:\n costs_shifted = costs_shifted[:-shift[0], :-shift[1]]\n elif shift[0] > 0 and shift[1] <= 0:\n costs_shifted = costs_shifted[:-shift[0], -shift[1]:]\n elif shift[0] <= 0 and shift[1] > 0:\n costs_shifted = costs_shifted[-shift[0]:, :-shift[1]]\n elif shift[0] <= 0 and shift[1] <= 0:\n costs_shifted = costs_shifted[-shift[0]:, -shift[1]:]\n\n return costs_shifted\n\n @staticmethod\n def shift_surface(costs, shift, fill_val=0):\n \"\"\"\n Shifts a numpy array and pads with zeros\n :param costs: 2-dim numpy array\n :param shift: tuple of shift in x and y direction\n BUT: ONLY WORKS FOR (+,+) or (+,-) shift tuples\n :returns shifted array of same size\n \"\"\"\n rolled_costs = np.roll(costs, shift, axis=(0, 1))\n if shift[0] >= 0:\n rolled_costs[:shift[0], :] = fill_val\n else:\n rolled_costs[shift[0]:, :] = fill_val\n if shift[1] >= 0:\n rolled_costs[:, :shift[1]] = fill_val\n else:\n rolled_costs[:, shift[1]:] = fill_val\n return rolled_costs\n\n @staticmethod\n def get_kernel(shifts, shift_vals):\n \"\"\"\n Get all kernels describing the path of the edges in a discrete raster\n :param shifts: possible circle points\n :returns kernel: all possible kernels\n shape: (number of circle points x upper x upper)\n :returns posneg: a list indicating whether it is a path to the left =1\n or to the right =0\n \"\"\"\n upper = np.amax(np.absolute(shifts)) + 1\n posneg = []\n kernel = np.zeros((len(shifts), upper, upper))\n\n max_val = np.max(shift_vals)\n\n for i, shift in enumerate(shifts):\n if shift[1] < 0:\n posneg.append(1)\n line = bresenham_line(\n 0, upper - 1, shift[0], upper - 1 + shift[1]\n )\n else:\n posneg.append(0)\n line = bresenham_line(0, 0, shift[0], shift[1])\n # add points of line to the kernel\n normed_val = shift_vals[i] / (len(line) * max_val)\n for (j, k) in line:\n kernel[i, j, k] = normed_val\n return kernel, posneg\n\n @staticmethod\n def convolve_faster(img, kernel, neg):\n \"\"\"\n Convolve a 2d img with a kernel, storing the output in the cell\n corresponding the the left or right upper corner\n :param img: 2d numpy array\n :param kernel: kernel (must have equal size and width)\n :param neg: if neg=0, store in upper left corner, if neg=1,\n store in upper right corner\n :return convolved image of same size\n \"\"\"\n k_size = len(kernel)\n # a = np.pad(img, ((0, k_size-1), (0, k_size-1)))\n if neg:\n padded = np.pad(img, ((0, k_size - 1), (k_size - 1, 0)))\n else:\n padded = np.pad(img, ((0, k_size - 1), (0, k_size - 1)))\n\n s = kernel.shape + tuple(np.subtract(padded.shape, kernel.shape) + 1)\n strd = np.lib.stride_tricks.as_strided\n subM = strd(padded, shape=s, strides=padded.strides * 2)\n return np.einsum('ij,ijkl->kl', kernel, subM)\n\n @staticmethod\n def convolve(img, kernel, neg=0):\n \"\"\"\n Convolve a 2d img with a kernel, storing the output in the cell\n corresponding the the left or right upper corner\n :param img: 2d numpy array\n :param kernel: kernel (must have equal size and width)\n :param neg: if neg=0, store in upper left corner, if neg=1,\n store in upper right corner\n :return convolved image of same size\n \"\"\"\n k_size = len(kernel)\n if neg:\n padded = np.pad(img, ((0, k_size - 1), (k_size - 1, 0)))\n else:\n padded = np.pad(img, ((0, k_size), (0, k_size)))\n # print(padded.shape)\n convolved = np.zeros(img.shape)\n w, h = img.shape\n for i in range(0, w):\n for j in range(0, h):\n patch = padded[i:i + k_size, j:j + k_size]\n convolved[i, j] = np.sum(patch * kernel)\n return convolved\n","repo_name":"NinaWie/PowerPlanner","sub_path":"power_planner/utils/utils_constraints.py","file_name":"utils_constraints.py","file_ext":"py","file_size_in_byte":4925,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"33066153779","text":"import viewPer\nimport model\n#import view_aeronave\n#import viewTorre\nfrom PIL import Image\nimport streamlit as st\nimport time\n\nclass viewGeneral:\n def __init__(self):\n self.persona = viewPer.viewPer()\n #self.nave = view_aeronave\n #self.torre = viewTorre\n\n\n def cambio(self, tri):\n data = []\n for num in tri:\n t = tri[num]\n data.append(t.id)\n Id = st.selectbox('Seleccione el ID de la tarea a completar:', data)\n boton= st.button(\"Marcar como asignada\", type=\"primary\")\n if boton:\n return Id\n def menu(self):\n option= st.sidebar.selectbox(\"Selecciona una opcion\",[\"selecciona\",\"Reservar\",\"Registro como Pasajero\",\"Registro como Tripulante\",\"Ver info pasajeros\",\"Ver info tripulación\",\"crear aeronave\",\"ver info de aeronaves\",\"Asignar tripulacion\"])\n return option\n\n def registrarPasajero(self):\n p = self.persona.datosPasajeros()\n return p\n\n\n def registrarTripulante(self):\n t = self.persona.datosTripulacion()\n return t\n\n\n def iniciar_sesion(self,liPas,liTri):\n sel = st.selectbox(\"Selecciona uno: \",['Pasajero','Tripulación'])\n if(sel == 'Pasajero'):\n ingresa = st.number_input(\"Ingresa el ID usado: \")\n with st.spinner(\"Comprobando...\"):\n if liPas[ingresa]:\n time.sleep(3)\n st.success('Entraste!')\n else:\n st.error(\"No se encontro en la base, debe registrarse \")\n else:\n ingresa = st.number_input(\"Ingresa el ID usado: \")\n with st.spinner(\"Comprobando...\"):\n if liTri[ingresa]:\n time.sleep(3)\n st.success('Entraste!')\n else:\n st.error(\"No se encontro en la base, debe registrarse \")\n\n\n\n def verInfoPasajeros(self, lp):\n st.header(\"Ver info pasajeros\")\n st.write(\"Informacion primordial del pasajero\")\n informacion = []\n for info in lp:\n pasa = lp[info]\n informacion.append(\n {\"ID\": pasa.id,\"Nombre \": pasa.name,\"Telefono \": pasa.telefono,\"Maletas\": pasa.maleta, \"Estado\": pasa.state})\n if informacion:\n st.table(informacion)\n else:\n st.info(\"Todavia no hay pasajeros registrados\")\n\n def verInfoTripulacion(self, lt,asi,cnt):\n st.header(\"Ver info tripulantes\")\n st.write(\"Información primordial del tripulante\")\n informacion = []\n cntl=0\n for info in lt:\n if cnt is not None and cnt-1 == cntl:\n tripu = lt[info]\n tripu.state = asi\n informacion.append(\n {\"ID\": tripu.id, \"Nombre \": tripu.name, \"Telefono \": tripu.telefono, \"Puesto\": tripu.puesto,\n \"Estado\": tripu.state})\n\n else:\n tripu = lt[info]\n informacion.append(\n {\"ID\": tripu.id, \"Nombre \": tripu.name, \"Telefono \": tripu.telefono, \"Puesto\": tripu.puesto,\n \"Estado\": tripu.state})\n cntl += 1\n\n\n if informacion:\n st.table(informacion)\n\n def pedirId(self):\n id = st.number_input(\"Ingresa tu id\",min_value= 1 ,step=1)\n return id\n\n def pedirId2(self):\n number = st.slider(\"Ingresa la posición de la nave que desee\", min_value=1, max_value=10)\n return number\n\n\n def cualNave(self):\n sel= st.selectbox(\"En cúal nave deseas reservar:\", [\"Avión\", \"Helicóptero\", \"Jet\"])\n return sel\n\n def error(self):\n #st.info(\"Por el momento no hay aeronaves disponibles\")\n st.info(\"Por el momento no se puede reservar\")\n\n def fondo(self):\n st.header(\"Aeropuerto Internacional Alfonso Bonilla Aragón\")\n st.write(\"Una experiencia para contar :helicopter: :airplane_departure:\")\n img = Image.open('aeropuertoAlfonso.jpg')\n st.image(img,caption=\"Aeropuerto Internacional Alfonso Bonilla Aragón\",width=735)\n st.link_button(\"Ver historia y la actualidad\",\"https://www.i-torrestrella.com/aeropuerto-alfonso-bonilla-aragon-de-cali-historia-y-situacion-actual/\")\n\n def seleccionaCategoria(self,nave):\n if(nave == \"Avión\"):\n sele = st.selectbox(\"Selecciona categoria de Avión\",[\"Carga\",\"Transporte\",\"Militar\"])\n return sele\n elif(nave == \"Helicóptero\"):\n sele = st.selectbox(\"Selecciona categoria de Helicóptero\",[\"Rescate\", \"Turismo\", \"Transporte\",\"Fuerza policial\", \"Ambulancia\" ])\n return sele\n\n def marca(self):\n mar = st.selectbox(\"Selecciona la marca de la aeronave\",[\"ArgueFlight\",\"GIAL Air\",\"Latem\",\"AC Air\",\"AFGM\"])\n return mar\n\n def nave(self):\n nav = st.selectbox(\"Selecciona la nave que se desee crear\",[\"Avión\",\"Helicóptero\",\"Jet\"])\n return nav\n\n def verInfoNaves(self, nave):\n st.header(\"Ver informacion de aeronaves\")\n st.write(\"Información primordial de la aeronave\")\n informacion = []\n for info in nave:\n n = nave[info]\n informacion.append({\"ID\": n.id,\"Tipo de nave\": n.tipo,\"Año de viejo\": n.yearFab,\"Estado\": n.estado,\"Marca\": n.marca,\"Modelo\": n.modelo,\"Destino\": n.destino,\"Sillas\": n.per,\"Categoria\": n.categoria,\"Propietario\": n.propietario })\n if informacion:\n st.table(informacion)\n \n def elemTorre(self):\n sel = st.selectbox(\"Que deseas hacer en torre\", ['generarPuertas','ubicarPuerta','despegarAeronave','borrarAeronave','agregarHist'])\n return sel\n \n def im(self):\n st.info(\"Se ha generado la puerta\")\n \n def propi(self):\n nombre= st.text_input(\"Ingresa nombre de propietario\")\n if not nombre:\n st.error(\"Debes ingreasar un nombre\")\n else:\n return nombre\n","repo_name":"isabel-olivero/proyecto-2","sub_path":"avances/viewGeneral.py","file_name":"viewGeneral.py","file_ext":"py","file_size_in_byte":5965,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43163504561","text":"#!/usr/bin/env python3\n\nimport os\nimport glob\nfrom file_manage import *\n\ndef samDict(sam_file):\n\n\tif \"R1\" in sam_file[0]:\n\t\topen_file=sam_file[0]\n\telse:\t\n\t\topen_file=sam_file[1]\n\n\tsam_file=open(open_file,\"r\")\n\tsam_lines=sam_file.readlines()\n\tsam_lines_number=len(sam_lines)\n\tline_index=0\n\tsam_dic={}\n\twhile line_index < sam_lines_number:\n\t\tsam_dic[line_index]=[sam_lines[line_index],\n\t\t\t\t\t\t\tsam_lines[line_index+1],\n\t\t\t\t\t\t\tsam_lines[line_index+2],\n\t\t\t\t\t\t\tsam_lines[line_index+3]\n\t\t\t\t\t\t\t]\n\t\tline_index=4+line_index\n\tsam_file.close()\n\treturn sam_dic\n\ndef fullDemux(partialDemux_path,sam_R1_dic,sam_R2_dic,out_path):\n\t\n\t# Remuevo los archivos del directorio de salida con formato fastq\n\tremoveTypeFile(\"fastq\",out_path)\t\n\n\t# Leo los archivos fastq de PartialDemux\n\tfastq_partialDemux_files=(glob.glob(partialDemux_path+\"/*.fastq\"))\n\n\t# Creo los full capture files\n\tfor fastq in fastq_partialDemux_files:\n\t\n\t\tfastq_file=open(fastq,\"r\")\n\t\tfastq_lines=fastq_file.readlines()\n\t\tfastq_file.close()\n\t\tfastq_number_of_lines=len(fastq_lines)\n\n\t\t\n\t\tfastq_name=fastq.split(\"/\")[-1]\n\t\tfastq_name=fastq_name[:-24]\t\t\t\n\t\t\n\t\tfullcapture_R1_file=open(out_path+fastq_name+\"_R1\"+\".fastq\",\"a\")\n\t\tfullcapture_R2_file=open(out_path+fastq_name+\"_R2\"+\".fastq\",\"a\")\n\t\t\n\t\tline_index=2\t\n\t\twhile line_index < fastq_number_of_lines:\n\t\t\t\n\t\t\tread_index=fastq_lines[line_index].rstrip(\"\\n\")\n\t\t\tcapture_R1=sam_R1_dic[int(read_index)]\n\t\t\t\t\t\t\n\t\t\tfullcapture_R1_file.write(capture_R1[0])\n\t\t\tfullcapture_R1_file.write(capture_R1[1][28:])\n\t\t\tfullcapture_R1_file.write(capture_R1[2])\n\t\t\tfullcapture_R1_file.write(capture_R1[3][28:])\n\t\t\t\n\t\t\tcapture_R2=sam_R2_dic[int(read_index)]\n\t\t\t\n\t\t\tfullcapture_R2_file.write(capture_R2[0])\n\t\t\tfullcapture_R2_file.write(capture_R2[1][28:])\n\t\t\tfullcapture_R2_file.write(capture_R2[2])\n\t\t\tfullcapture_R2_file.write(capture_R2[3][28:])\n\t\t\n\t\t\tline_index=line_index+3\t\t\n\t\t\n\t\t\t\n\t\tfullcapture_R1_file.close()\n\t\tfullcapture_R2_file.close()\n\n\treturn 0\n","repo_name":"AgustinPardo/demultiplex","sub_path":"full_capture.py","file_name":"full_capture.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42439980742","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport sys\nimport os\nfrom glob import glob\n\ndef get_energy_of_one_prediction(pdbqt):\n with open(pdbqt,'r') as f:\n data = [line for line in f if \"REMARK INTER + INTRA\" in line]\n \n num_of_predictions = len(data)\n energies = np.zeros((num_of_predictions, 1))\n for i in range(num_of_predictions):\n line = data[i]\n energies[i] = float(line.split()[4])\n \n return energies\n\ndef plot_all_energies(all_energies):\n prediction_index = np.array([])\n counter = 0\n for energy in all_energies:\n energy_len = len(energy)\n prediction_index = np.append(prediction_index, np.repeat(counter, energy_len))\n counter += 1\n all_energies = np.concatenate(all_energies)\n plt.figure()\n plt.title(\"AutoDock Vina Energies Per Site\")\n plt.ylabel(\"Energy [kcal/mol]\")\n plt.ylim([-3, 3])\n plt.xlabel(\"Docking site number\")\n plt.scatter(prediction_index, all_energies)\n plt.show()\n\nif __name__ == \"__main__\":\n prediction_path = sys.argv[1]\n prediction_files = sorted(glob(prediction_path + \"/point_*.pdbqt\"), key=os.path.getmtime)\n all_energies = []\n for pdbqt in prediction_files:\n one_energy = get_energy_of_one_prediction(pdbqt)\n all_energies.append(one_energy)\n\n plot_all_energies(all_energies)\n","repo_name":"wang-py/dowser-caver-project","sub_path":"plot_vina_energy.py","file_name":"plot_vina_energy.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39055944543","text":"#!/usr/bin/python3\n# 100-weight_average.py\n\ndef weight_average(my_list=[]):\n \"\"\"calculate the weighted average of all integers\"\"\"\n if not my_list:\n return 0\n if all([len(x) == 2 for x in my_list]):\n total = 0\n n = 0\n for i, j in dict(my_list).items():\n total += i * j\n n += j\n return total / n\n return 0\n","repo_name":"stephenoba/alx-higher_level_programming","sub_path":"0x04-python-more_data_structures/100-weight_average.py","file_name":"100-weight_average.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9904495241","text":"from bs4 import BeautifulSoup\nimport re\n\nsoup = BeautifulSoup(open('findme.txt'), \"html.parser\")\n\n#for email in soup.find_all('table'):\n# print email\n\n\n\ndef get_table_data():\n data = []\n table = soup.find('table')\n table_body = table.find('tbody')\n\n rows = table_body.find_all('tr')\n\n for row in rows:\n cols = row.find_all('td')\n tmp = []\n for col in cols:\n if col.find('a'):\n stuff = col.find('a')['href']\n try:\n result = re.match('^mailto:(.*)', stuff)\n tmp.append(result.group(1).strip('\\n'))\n continue\n except:\n tmp.append(stuff.strip('\\n'))\n continue\n else:\n tmp.append(col.text.strip('\\n'))\n\n \n data.append(tmp)\n\n return data\n\ndef write_table_data(table_list):\n f = open('creators.csv', 'a')\n for row in table_list:\n #a row is a record, remove newlines and write to csv\n tmp = [x.replace('\\n', '').encode('utf-8') for x in row]\n line = \"|\".join(tmp)\n f.write(line + '\\n')\n\n \n\n\ndata = get_table_data()\nwrite_table_data(data)\n","repo_name":"ivanwakeup/pycrawl","sub_path":"crawler/findme.py","file_name":"findme.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41494362352","text":"__author__ = 'Sumit Sharma'\n__copyright__ = 'Copyright 2022, Luna2 Project'\n__license__ = 'GPL'\n__version__ = '2.0'\n__maintainer__ = 'Sumit Sharma'\n__email__ = 'sumit.sharma@clustervision.com'\n__status__ = 'Development'\n\nfrom utils.database import Database\nfrom utils.log import Log\nfrom utils.helper import Helper\nfrom utils.config import Config\nfrom utils.service import Service\nfrom utils.model import Model\n\n\nclass OtherDev():\n \"\"\"\n This class is responsible for all operations for other devices.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n This constructor will initialize all required variables here.\n \"\"\"\n self.logger = Log.get_logger()\n self.table = 'otherdevices'\n self.table_cap = 'Other Device'\n\n def get_all_otherdev(self):\n \"\"\"\n This method will return all the other devices in detailed format.\n \"\"\"\n status, response = Model().get_record(\n table = self.table,\n table_cap = self.table_cap,\n ip_check = True,\n new_table = 'otherdev'\n )\n return status, response\n\n\n def get_otherdev(self, name=None):\n \"\"\"\n This method will return requested other device in detailed format.\n \"\"\"\n status, response = Model().get_record(\n name = name,\n table = self.table,\n table_cap = self.table_cap,\n ip_check = True,\n new_table = 'otherdev'\n )\n return status, response\n\n\n def update_otherdev(self, name=None, request_data=None):\n \"\"\"\n This method will create or update a other device.\n \"\"\"\n status=False\n data, response = {}, \"\"\n create, update = False, False\n if request_data:\n data = request_data['config']['otherdev'][name]\n data['name'] = name\n device = Database().get_record(table=self.table, where=f' WHERE `name` = \"{name}\"')\n if device:\n device_id = device[0]['id']\n if 'newotherdevname' in request_data['config']['otherdev'][name]:\n data['name'] = data['newotherdevname']\n del data['newotherdevname']\n update = True\n else:\n create = True\n device_columns = Database().get_columns(self.table)\n ipaddress, network = None, None\n if 'ipaddress' in data.keys():\n ipaddress = data['ipaddress']\n del data['ipaddress']\n if 'network' in data.keys():\n network = data['network']\n del data['network']\n column_check = Helper().compare_list(data, device_columns)\n data = Helper().check_ip_exist(data)\n if data:\n row = Helper().make_rows(data)\n if column_check:\n if create:\n device_id = Database().insert(self.table, row)\n response = f'Device {name} created successfully'\n status=True\n if update:\n where = [{\"column\": \"id\", \"value\": device_id}]\n Database().update(self.table, row, where)\n response = f'Device {name} updated successfully'\n status=True\n else:\n status=False\n return status, 'Invalid request: Columns are incorrect'\n # Antoine --->> interface(s) update/create -------------\n if ipaddress or network:\n result, message = Config().device_ipaddress_config(\n device_id,\n self.table,\n ipaddress,\n network\n )\n if result is False:\n response = f'{message}'\n status=False\n else:\n Service().queue('dhcp','restart')\n Service().queue('dns','restart')\n return status, response\n else:\n response = 'Invalid request: Did not receive data'\n status=False\n return status, response\n\n\n def clone_otherdev(self, name=None, request_data=None):\n \"\"\"\n This method will clone a other device.\n \"\"\"\n status=False\n data, response = {}, \"\"\n create = False\n ipaddress, networkname = None, None\n if request_data:\n data = request_data['config']['otherdev'][name]\n if 'newotherdevname' in data:\n data['name'] = data['newotherdevname']\n newotherdevname = data['newotherdevname']\n del data['newotherdevname']\n else:\n status=False\n return status, 'Invalid request: New device name not provided'\n where = f' WHERE `name` = \"{newotherdevname}\"'\n device = Database().get_record(table=self.table, where=where)\n if device:\n status=False\n return status, f'{newotherdevname} already present in database'\n else:\n create = True\n ipaddress, network = None, None\n if 'ipaddress' in data:\n ipaddress = data['ipaddress']\n del data['ipaddress']\n if 'network' in data:\n networkname=data['network']\n del data['network']\n device_columns = Database().get_columns(self.table)\n column_check = Helper().compare_list(data, device_columns)\n if data:\n if column_check:\n if create:\n where=f' WHERE `name` = \"{name}\"'\n device = Database().get_record(table=self.table, where=where)\n if not device:\n status = False\n return status, f\"Source device {name} does not exist\"\n del device[0]['id']\n for key in device[0]:\n if key not in data:\n data[key] = device[0][key]\n\n row = Helper().make_rows(data)\n device_id = Database().insert(self.table, row)\n if not device_id:\n status=False\n return status, 'Device not cloned due to clashing config'\n status=True\n network = None\n if networkname:\n network = Database().get_record_join(\n [\n 'ipaddress.ipaddress',\n 'ipaddress.networkid as networkid',\n 'network.network',\n 'network.subnet'\n ],\n ['network.id=ipaddress.networkid'],\n [f\"network.name='{networkname}'\"]\n )\n else:\n network = Database().get_record_join(\n [\n 'ipaddress.ipaddress',\n 'ipaddress.networkid as networkid',\n 'network.name as networkname',\n 'network.network',\n 'network.subnet'\n ],\n [\n 'network.id=ipaddress.networkid',\n 'ipaddress.tablerefid=otherdevices.id'\n ],\n [f'otherdevices.name=\"{name}\"', 'ipaddress.tableref=\"otherdevices\"']\n )\n if network:\n networkname = network[0]['networkname']\n if not ipaddress:\n if not network:\n where = f' WHERE `name` = \"{networkname}\"'\n network = Database().get_record(None, 'network', where)\n if network:\n networkname = network[0]['networkname']\n if network:\n ips = Config().get_all_occupied_ips_from_network(networkname)\n ret, avail = 0, None\n max_count = 10\n # we try to ping for 10 ips, if none of these are free, something\n # else is going on (read: rogue devices)....\n while(max_count > 0 and ret != 1):\n avail = Helper().get_available_ip(\n network[0]['network'],\n network[0]['subnet'],\n ips\n )\n ips.append(avail)\n _, ret = Helper().runcommand(f\"ping -w1 -c1 {avail}\", True, 3)\n max_count -= 1\n if avail:\n ipaddress = avail\n else:\n status=False\n return status, 'Invalid request: Network and ipaddress not provided'\n result, message = Config().device_ipaddress_config(\n device_id,\n self.table,\n ipaddress,\n networkname\n )\n if result is False:\n where = [{\"column\": \"id\", \"value\": device_id}]\n Database().delete_row(self.table, where)\n # roll back\n status=False\n response = f'{message}'\n else:\n Service().queue('dhcp', 'restart')\n Service().queue('dns', 'restart')\n response = 'Device created'\n else:\n response = 'Invalid request: Columns are incorrect'\n status=False\n else:\n response = 'Invalid request: Not enough details to create the device'\n status=False\n else:\n response = 'Invalid request: Did not receive data'\n status=False\n return status, response\n\n\n def delete_otherdev(self, name=None):\n \"\"\"\n This method will delete a other device.\n \"\"\"\n status, response = Model().delete_record(\n name = name,\n table = self.table,\n table_cap = self.table_cap,\n ip_check = True\n )\n return status, response\n","repo_name":"clustervision/luna2-daemon","sub_path":"daemon/base/otherdev.py","file_name":"otherdev.py","file_ext":"py","file_size_in_byte":11203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36828802145","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef nothing(x):\n pass\n\n\ncv2.namedWindow('control')\nkernel = np.ones((4,4), np.uint8)\n\n# Hue\nlh = 23\nhh = 88\ncv2.createTrackbar('lh','control',0,255,nothing)\ncv2.setTrackbarPos('lh', 'control', lh)\ncv2.createTrackbar('hh','control',0,255,nothing)\ncv2.setTrackbarPos('hh', 'control', hh)\n\n#Saturation \nls = 36\nhs = 255\ncv2.createTrackbar('ls','control',0,255,nothing)\ncv2.setTrackbarPos('ls', 'control', ls)\ncv2.createTrackbar('hs','control',0,255,nothing)\ncv2.setTrackbarPos('hs', 'control', hs)\n\nlv = 202\nhv = 255\ncv2.createTrackbar('lv','control',0,255,nothing)\ncv2.setTrackbarPos('lv', 'control', lv)\ncv2.createTrackbar('hv','control',0,255,nothing)\ncv2.setTrackbarPos('hv', 'control', hv)\n\ncap = cv2.VideoCapture(0)\nwhile(1):\n\tret, frame = cap.read()\n\thsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\t# get all positions\n\tlowH=cv2.getTrackbarPos('lh', 'control')\n\thighH=cv2.getTrackbarPos('hh', 'control')\n\tlowS=cv2.getTrackbarPos('ls', 'control')\n\thighS=cv2.getTrackbarPos('hs', 'control')\n\tlowV=cv2.getTrackbarPos('lv', 'control')\n\thighV=cv2.getTrackbarPos('hv', 'control')\n\tlower = np.array([lowH,lowS,lowV])\n\tupper = np.array([highH,highS,highV])\n\tth = cv2.inRange(hsv, lower, upper)\n\topening = cv2.morphologyEx(th, cv2.MORPH_OPEN, kernel)\n\tclosing = cv2.morphologyEx(opening, cv2.MORPH_OPEN, kernel)\n\t\n\t#calculate moments\n\t#contours, hierarchy = cv2.findContours(closing, 1, 2)\n\tres, contours, hierarchy = cv2.findContours(closing, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\tif contours:\n\t\tcnt = contours[0]\n\t\tM = cv2.moments(cnt)\n\t\tcX = int(M[\"m10\"] / M[\"m00\"])\n\t\tcY = int(M[\"m01\"] / M[\"m00\"])\n\t\tcv2.circle(frame, (cX, cY),10, (0,0,255), 2)\n\tcv2.imshow('video',frame)\n\tcv2.imshow('detector', closing)\n\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\tbreak\n\t\t\n\t\t\n# Release everything if job is finished\ncap.release()\n\ncv2.destroyAllWindows()\n\n","repo_name":"jorgelopezrivas/opencv-learning","sub_path":"color-detect.py","file_name":"color-detect.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20871426182","text":"from csv import DictWriter\nimport cPickle as pickle\n\n\npfields = ['id', 'title', 'nickname', 'fname', 'mname', 'lname', 'suffix']\npgmfields = ['person_id', 'program']\ndivfields = ['person_id', 'division']\n\npfile = open('people.csv', 'w')\npwriter = DictWriter(pfile, fieldnames=pfields, extrasaction='ignore')\n\npgmfile = open('people-in-programs.csv', 'w')\npgmwriter = DictWriter(pgmfile, fieldnames=pgmfields, extrasaction='ignore')\n\ndivfile = open('people-in-divisions.csv', 'w')\ndivwriter = DictWriter(divfile, fieldnames=divfields, extrasaction='ignore')\n\nwriters = [pwriter, pgmwriter, divwriter]\nfor writer in writers:\n writer.writeheader()\n\n\ndef gen_person(people):\n for person in people:\n person.__dict__['person_id'] = person.id\n yield person.__dict__\n\ndef people_to_csv(people):\n for person in gen_person(people):\n pwriter.writerow(person)\n divwriter.writerow(person)\n for pgm in person['programs']:\n pgmwriter.writerow({'person_id': person['id'], 'program': pgm})\n\n\nif __name__ == \"__main__\":\n with open('people.pickle', 'r') as f:\n people = pickle.load(f)\n people_to_csv(people)\n","repo_name":"macks22/nsf-award-data","sub_path":"people_to_csv.py","file_name":"people_to_csv.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"13312835493","text":"# THIS SCRIPT IS SUPPOSED TO RUN IN A JUPYTER NOTEBOOK (WE USED VS CODE)\n\n# %%\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sb\n\n\n# %%\ndef distPlot(data, var, title, xlab, ylab, bins=100):\n hplot = sb.displot(data[var], kde=False, bins=bins)\n plt.title(title, fontsize=18)\n plt.xlabel(xlab, fontsize=16)\n plt.ylabel(ylab, fontsize=16)\n \n return hplot\n\ndef scatterPlot(data, varx, vary, title, xlab, ylab):\n hplot = sb.scatterplot(varx, vary, data=data)\n plt.title(title, fontsize=18)\n plt.xlabel(xlab, fontsize=16)\n plt.ylabel(ylab, fontsize=16)\n \n return hplot\n\n\n\n# %%\ndataset_url = 'http://bit.ly/gdp-life-expect-data'\n\ndf = pd.read_csv(dataset_url)\ndf.head()\n\n# %%\ndistPlot(data=df, var='lifeExp', title='Life Expectancy',\n xlab='Life Expectancy (years)', ylab='Frequency')\n# In case you're not using a Jupyter notebook run also the following:\n# plt.show()\n \n# %%\ndistPlot(data=df, var='gdpPercap', title='GDP / capita',\n xlab='GDP / capita ($)', ylab='Frequency')\n# In case you're not using a Jupyter notebook run also the following:\n# plt.show()\n\n# %%\nscatterPlot(data=df, varx='lifeExp', vary='gdpPercap',\n title='Life Expectancy vs GDP/Capita', xlab='lifeExp', ylab='gdpPercap')\n# In case you're not using a Jupyter notebook run also the following:\n# plt.show()\n\n# %%\ndf[['lifeExp','gdpPercap']].corr(method='pearson')\n\n# %%\ndf[['lifeExp','gdpPercap']].corr(method='spearman')\n\n# %%\ndf[['lifeExp','gdpPercap']].corr(method='kendall')\n\n# %%\ncorr_df = df[['lifeExp','gdpPercap']].corr(method='spearman')\ncorr_df\n\n#%%\ncorr_df.index.name = 'rowname'\ncorr_df.reset_index(inplace=True)\ncorr_df\n\n# %%\n","repo_name":"PacktPublishing/Extending-Power-BI-with-Python-and-R","sub_path":"Chapter11/Python/01-gdp-life-expectancy-analysis-in-python.py","file_name":"01-gdp-life-expectancy-analysis-in-python.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"53"} +{"seq_id":"20760380248","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as ec\r\nfrom time import sleep\r\nimport xlsxwriter\r\n# Abre o Imoview\r\nworkbook = xlsxwriter.Workbook('Captações por Corretor.xlsx')\r\nworksheet = workbook.add_worksheet()\r\nworksheet.write('A1', 'Nome do Captador')\r\ndriver = webdriver.Chrome()\r\ndriver.get('https://app.imoview.com.br/Login/LogOn?ReturnUrl=%2f')\r\n\r\n# Espera fazer Login\r\nWebDriverWait(driver, 100).until(ec.element_to_be_clickable((By. XPATH, '//*[@id=\"mainnav-menu\"]/li[5]')))\r\n\r\n# Botão Iniciar\r\nstart = input('Digite qualquer coisa + \"Enter\" para começar\\n')\r\n\r\n# Pesquisa os Imóveis do último mês\r\ndriver.find_element(By. XPATH, '//*[@id=\"mainnav-menu\"]/li[5]').click()\r\nWebDriverWait(driver, 100).until(ec.element_to_be_clickable((By. XPATH, '//*[@id=\"SituacaoField\"]'))).click()\r\ndriver.find_element(By. XPATH, '//*[@id=\"painelFiltros\"]/div/div[1]/div/button').click()\r\ndriver.find_element(By. XPATH, '//*[@id=\"SituacaoField\"]/option[7]').click()\r\nWebDriverWait(driver, 100).until(ec.element_to_be_clickable((By. XPATH, '//*[@id=\"periodoCadastro\"]'))).click()\r\ndriver.find_element(By. XPATH, '//*[@id=\"periodoCadastro\"]/option[3]').click()\r\ndriver.find_element(By. XPATH, '//*[@id=\"PesquisarImoveis\"]').click()\r\n\r\n# Clica nos imóveis no modo lista\r\nWebDriverWait(driver, 100).until(ec.element_to_be_clickable((By. XPATH, '//*[@id=\"ListaPadraoBtn\"]'))).click()\r\nsleep(5)\r\n\r\n# Vê a quantidade de imóveis\r\nqtd_imoveis = WebDriverWait(driver, 20).until(ec.element_to_be_clickable((By.XPATH, '//*[@id=\"totalRegistros\"]'))).text\r\nqtd = int([i for i in qtd_imoveis.split() if i.isnumeric()][-1])\r\nint(qtd)\r\n# Seta a lista de imóveis, de páginas e de divs nas páginas\r\nlista = 1\r\ndiv = 1\r\nplan = 2\r\nwhile div < ((qtd/20) + 1):\r\n while lista < 21:\r\n # Clica nos imóveis\r\n sleep(1)\r\n try:\r\n WebDriverWait(driver, 100).until(ec.element_to_be_clickable((By. XPATH, '//*[@id=\"resultadoLista\"]/div[3]/div/div/table'\r\n f'/tbody/tr[{lista}]/td[2]/a'))).click()\r\n except Exception:\r\n print('Ocorreu um Erro ou o Programa acabou')\r\n workbook.close()\r\n # Muda pra segunda aba\r\n driver.switch_to.window(driver.window_handles[1])\r\n\r\n # Lógica para ver os captadores\r\n WebDriverWait(driver, 100).until(ec.element_to_be_clickable((By. XPATH, '//*[@id=\"page-content\"]/div/div[1]/ul/li[9]/a'))).click()\r\n sleep(2)\r\n auditoria = WebDriverWait(driver, 100).until(ec.element_to_be_clickable((By.XPATH, '//*[@id=\"totalRegistros\"]'))).text\r\n ultimo_numero = int([i for i in auditoria.split() if i.isnumeric()][-1])\r\n int(ultimo_numero)\r\n if ultimo_numero > 20:\r\n driver.find_element(By. XPATH, '//*[@id=\"painelHistorico\"]/div/div[2]/ul/li[4]/a').click()\r\n ultimo_numero2 = ultimo_numero - 20\r\n captador2 = WebDriverWait(driver, 100).until(ec.element_to_be_clickable((By. XPATH, f'//*[@id=\"painelHistorico\"]/div/div[2]/table/tbody/tr[{ultimo_numero2}]/td[3]')))\r\n print(captador2.text)\r\n worksheet.write(f'A{plan}', captador2.text)\r\n else:\r\n captador = WebDriverWait(driver, 100).until(ec.element_to_be_clickable((By. XPATH, f'//*[@id=\"painelHistorico\"]/div/div[2]/table/tbody/tr[{ultimo_numero}]/td[3]')))\r\n print(captador.text)\r\n worksheet.write(f'A{plan}', captador.text)\r\n plan = plan + 1\r\n driver.close()\r\n driver.switch_to.window(driver.window_handles[0])\r\n lista = lista + 1\r\n lista = 1\r\n if div == 1:\r\n driver.find_element(By.XPATH, '//*[@id=\"resultadoLista\"]/div[3]/div/div/ul/li[3]/a').click()\r\n if div == 2:\r\n driver.find_element(By.XPATH, f'//*[@id=\"resultadoLista\"]/div[3]/div/div/ul/li[4]/a').click()\r\n if div == 3:\r\n driver.find_element(By.XPATH, f'//*[@id=\"resultadoLista\"]/div[3]/div/div/ul/li[5]/a').click()\r\n if div >= 4:\r\n driver.find_element(By.XPATH, f'//*[@id=\"resultadoLista\"]/div[3]/div/div/ul/li[6]/a').click()\r\n div = div + 1\r\n sleep(5)\r\nworkbook.close()","repo_name":"sirjoaorodrigues/atena","sub_path":"ReceberImóveis.py","file_name":"ReceberImóveis.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10159060663","text":"import numpy as np\nfrom numpy.linalg import norm\nimport pickle\nfrom tqdm import tqdm, tqdm_notebook\nimport os\nimport time\nimport tensorflow as tf\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA\nimport PIL\nfrom PIL import Image\n\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input\nfrom tensorflow import keras\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\nfrom sklearn.neighbors import NearestNeighbors\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport random\nmatplotlib.rcParams['savefig.dpi'] = 160\nmatplotlib.rcParams['figure.dpi'] = 160\n# thay doi kich thuoc anh theo ResNet-50\nmodel = ResNet50(weights='imagenet', include_top=False,\n input_shape=(224, 224, 3))\ndef extract_features(img_path, model):\n input_shape = (224, 224, 3)\n img = image.load_img(img_path, target_size=(\n input_shape[0], input_shape[1]))\n img_array = image.img_to_array(img)\n expanded_img_array = np.expand_dims(img_array, axis=0)\n preprocessed_img = preprocess_input(expanded_img_array)\n features = model.predict(preprocessed_img)\n flattened_features = features.flatten()\n normalized_features = flattened_features / norm(flattened_features)\n return normalized_features\n\n\n\n# test data\nfeatures = extract_features('cat.jpg', model)\nprint('out data %d',len(features))\n\n\nextensions = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG']\ndef get_file_list(root_dir):\n file_list = []\n counter = 1\n for root, directories, filenames in os.walk(root_dir):\n for filename in filenames:\n if any(ext in filename for ext in extensions):\n file_list.append(os.path.join(root, filename))\n counter += 1\n return file_list\n\n# path to the datasets\nroot_dir = './dataset/'\nfilenames = sorted(get_file_list(root_dir))\n\n\nfeature_list = []\n#for i in tqdm(range(len(filenames))):\n# feature_list.append(extract_features(filenames[i], model))\n\nbatch_size = 64\ndatagen = ImageDataGenerator(preprocessing_function=preprocess_input)\n \ngenerator = datagen.flow_from_directory(root_dir,\n target_size=(224, 224),\n batch_size=batch_size,\n class_mode=None,\n shuffle=False)\n#store the result\n#pickle.dump(feature_list, open('./data/features-caltech101-resnet.pickle', 'wb'))\n#pickle.dump(filenames, open('./data/filenames-caltech101.pickle','wb'))\n#pickle.dump(generator.classes, open('./data/class_ids-caltech101.pickle','wb'))\n#load data \n\nfilenames = pickle.load(open('./data/filenames-caltech101.pickle', 'rb'))\nfeature_list = pickle.load(open('./data/features-caltech101-resnet.pickle', 'rb'))\nclass_ids = pickle.load(open('./data/class_ids-caltech101.pickle', 'rb'))\n\n\nprint(\"Number of images = \", len(generator.filenames))\nnum_images = len(filenames)\nnum_features_per_image = len(feature_list[0])\nprint(\"Number of images = \", num_images)\nprint(\"Number of features per image = \", num_features_per_image)\n\n\n\n\n# nearest-neighbor model using the brute-force algorithm \nneighbors = NearestNeighbors(n_neighbors=5, algorithm='brute',\nmetric='euclidean').fit(feature_list)\ndistances, indices = neighbors.kneighbors([feature_list[0]])\n# the nearest image is itself lol\nplt.imshow(mpimg.imread(filenames[0]))\n\n# show k-neighbor index value of 5 nearest image \nfor i in range(5):\n print(distances[0][i])\n\n# Helper function to get the classname\ndef classname(str):\n return str.split('/')[-2]\n \n \n# Helper function to get the classname and filename\ndef classname_filename(str):\n return str.split('/')[-2] + '/' + str.split('/')[-1]\n\n# Helper functions to plot the nearest images given a query image\ndef plot_images(filenames, distances):\n images = []\n for filename in filenames:\n images.append(mpimg.imread(filename))\n plt.figure(figsize=(20, 10))\n columns = 4\n for i, image in enumerate(images):\n ax = plt.subplot(len(images) / columns + 1, columns, i + 1)\n if i == 0:\n ax.set_title(\"Query Image\\n\" + classname_filename(filenames[i]))\n else:\n ax.set_title(\"Similar Image\\n\" + classname_filename(filenames[i]) +\n \"\\nDistance: \" +\n str(float(\"{0:.2f}\".format(distances[i]))))\n plt.imshow(image)\n \n\n\nfor i in range(6):\n random_image_index = random.randint(0,num_images)\n distances, indices = neighbors.kneighbors([feature_list[random_image_index]])\n # ignore first nearest image\n similar_image_paths = [filenames[random_image_index]] + [filenames[indices[0][i]] for i in range(1,4)]\n plot_images(similar_image_paths, distances[0])\n\n\n # Perform PCA over the features\nnum_feature_dimensions=100 # Set the number of features\npca = PCA(n_components = num_feature_dimensions)\npca.fit(feature_list)\nfeature_list_compressed = pca.transform(feature_list)\n\n# For speed and clarity, we'll analyze about first half of the dataset.\nselected_features = feature_list_compressed[:4000]\nselected_class_ids = class_ids[:4000]\nselected_filenames = filenames[:4000]\n\ntsne_results =TSNE(n_components=2,verbose=1,metric='euclidean').fit_transform(selected_features)\n\n# Plot a scatter plot from the generated t-SNE results\ncolormap = plt.cm.get_cmap('coolwarm')\nscatter_plot = plt.scatter(tsne_results[:,0],tsne_results[:,1], c =\n selected_class_ids, cmap=colormap)\nplt.colorbar(scatter_plot)\nplt.show()\n\n\n\n\n\n","repo_name":"NguyenHuynhSang/computer-vision-research","sub_path":"tensorflow/Reverse_Image_Search/Reverse_Image_Search.py","file_name":"Reverse_Image_Search.py","file_ext":"py","file_size_in_byte":5665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6911768026","text":"#https://practice.geeksforgeeks.org/problems/mirror-tree/1\n\ndef mirror(root):\n if root is None:\n return 0\n else:\n if(root.left is None and root.right is None):\n return \n else:\n mirror(root.left)\n mirror(root.right)\n l = root.right\n root.right = root.left \n root.left = l\n","repo_name":"Ankurvish06/DS-Algorithms","sub_path":"TreesStandard/mirrorTree.py","file_name":"mirrorTree.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42807039313","text":"import os\n\nNAME = 'judge'\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nDATA_DIR = os.path.join('/var/lib/', NAME, 'data')\nVERSION = '0.12.5'\n\nSECRET_KEY = 'somestrongdjangokey'\n\nDEBUG = True\n\nALLOWED_HOSTS = ['*']\n\n\nINSTALLED_APPS = [\n 'judge.ui',\n 'judge.api',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django_rq',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'judge.api.middleware.LastActivityMiddleware',\n]\n\n\nROOT_URLCONF = 'judge.urls'\n\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\n\nWSGI_APPLICATION = 'judge.wsgi.application'\n\n\nif DEBUG:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(DATA_DIR, 'db.sqlite3'),\n }\n }\n\n REDIS_HOST = 'localhost'\nelse:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'postgres',\n 'USER': 'postgres',\n 'HOST': 'postgres',\n 'PORT': '5432',\n },\n }\n\n REDIS_HOST = 'redis'\n\n\nREDIS_PORT = 6379\nREDIS_DB = 0\n\n\nRQ_QUEUES = {\n 'default': {\n 'HOST': REDIS_HOST,\n 'PORT': REDIS_PORT,\n 'DB': REDIS_DB,\n }\n}\n\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Europe/Moscow'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nSTATIC_URL = '/static/'\n\nLOGIN_URL = '/auth/login'\n\nSOURCE_DIR = os.path.join(DATA_DIR, 'user_sources')\n\nTEST_GENERATORS_DIR = os.path.join(DATA_DIR, 'test_generators')\n\nTEST_CHECKERS_DIR = os.path.join(DATA_DIR, 'test_checkers')\n\nTEST_ERRORS = [\n 'Полное решение',\n 'Ошибка компиляции программы',\n 'Неправильный ответ',\n 'Превышено ограничение по времени'\n]\n\nLOGS_DIR = os.path.join(DATA_DIR, 'logs')\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'verbose': {\n 'format': '%(asctime)s %(filename)s:'\n '%(funcName)s:%(lineno)s '\n '%(levelname)s: %(message)s'\n },\n 'simple': {\n 'format': '%(asctime)s %(message)s'\n },\n },\n 'handlers': {\n 'main': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(LOGS_DIR, 'main.log'),\n 'formatter': 'verbose'\n }\n },\n 'loggers': {\n 'main': {\n 'handlers': ['main'],\n 'level': 'DEBUG',\n 'propagate': True\n }\n },\n}\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.IsAuthenticated',\n ),\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.SessionAuthentication',\n ),\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',\n 'PAGE_SIZE': 10\n}\n\nSTATIC_ROOT = os.path.join(DATA_DIR, 'static')\n\nIM_REDIS_PREFIX = 'judge:im'\n\n# How many seconds IM message stores in redis\nIM_REDIS_EX = 300 # 5 minutes\n\nTEST_INPUT_MAX_LEN = 500\n\nUI_DATETIME_FORMAT = '%d.%m.%Y @ %H:%M:%S'\n","repo_name":"Sapunov/edujudge","sub_path":"app/judge/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10725841015","text":"# Libraries\nfrom haversine import haversine\nimport plotly.express as px\nimport plotly.graph_objects as go\n\n# bibliotecas necessárias\nimport folium\nimport pandas as pd\nimport streamlit as st\nfrom PIL import Image\nfrom streamlit_folium import folium_static\nimport re\nimport inflection\nimport locale\nfrom forex_python.converter import CurrencyRates\nfrom currency_converter import CurrencyConverter\n\n\nst.set_page_config( page_title='Visão Cidades', page_icon='🏙️', layout='wide')\n\n# =====================================================================================================\n# Funções\n#======================================================================================================\ndef top_restaurant( df1 ):\n \"\"\" Esta função tem a responsabilidade de plotar um gráfico de barras\n Tipos de ações:\n 1. Dataframe - Top 10 Cidades com mais restaurantes cadastrados\n 2. Filtra as colunas 'country_name', 'city'\n 4. Agrupa por 'country_name' e 'city'\n 5. Contar as cidades, renomeie para 'Quantidade de Restaurantes'\n 6. Classificar pelas colunas 'Quantidade de Restaurantes' e 'country_name'\n 7. Agrupa por cidades e define para mostrar 10 cidades\n 8. Desenhar e plotar um gráfico de barras\n 9. Personalizar a fonte do eixo 'x' e 'y'\n \n Input: Dataframe\n Output: Gráfico de barras \n \"\"\"\n #Top 10 Cidades com mais restaurantes cadastrados\n top_cities = df1.groupby(['country_name', 'city']).size().reset_index(name='Quantidade de Restaurantes')\n top_cities = top_cities.sort_values(['Quantidade de Restaurantes', 'country_name'], ascending=[False, True]).head(10)\n top_cities = top_cities.groupby('city').head(10)\n \n # Plota o gráfico de barras\n fig = px.bar(top_cities, x='city', y='Quantidade de Restaurantes', text='Quantidade de Restaurantes', color='country_name')\n fig.update_traces(textfont_size=12)\n\n fig.update_layout(\n title='Top 10 Cidades com mais restaurantes cadastrados',\n title_x=0.3,\n title_font=dict(size=14, family='Arial', color='white'),\n)\n fig.update_layout(xaxis_title='Cidade', yaxis_title='Quantidade de Restaurantes', showlegend=True,\n legend_title_text='País')\n\n # Personalize a fonte do eixo x\n fig.update_xaxes(\n tickfont=dict(size=12, family='Arial', color='white'),\n showgrid=False,\n title_font=dict(size=14, family='Arial', color='white'),\n)\n\n # Personalize a fonte do eixo y\n fig.update_yaxes(\n tickfont=dict(size=12, family='Arial', color='white'),\n showgrid=False,\n showticklabels=False,\n title_font=dict(size=14, family='Arial', color='white'),\n)\n\n return fig\n\n\ndef top7_cities_aval4( df1 ):\n \"\"\" Esta função tem a responsabilidade de plotar um gráfico de barras\n Tipos de ações:\n 1. Dataframe - Top 7 cidades com mais restaurantes com média de avaliação acima de 4\n 2. Filtra as linhas da coluna 'aggregate_rating' com avaliação maio que 4\n 3. Filtra as colunas 'restaurant_id', 'city' e 'country_name'\n 4. Agrupa por 'country_name' e 'city'\n 5. Contar as cidades, renomeie para 'Quantidade de Restaurantes'\n 6. Classificar pelas colunas 'Quantidade de Restaurantes' e 'country_name'\n 7. Agrupa por cidades e define para mostrar 7 cidades\n 8. Desenhar e plotar um gráfico de barras\n 9. Personalizar a fonte do eixo 'x' e 'y'\n \n Input: Dataframe\n Output: Gráfico de barras \n \"\"\"\n \n #Top 7 cidades com mais restaurantes com média de avaliação acima de 4'\n top_cities = df1.loc[df1['aggregate_rating'] > 4, ['restaurant_id', 'city', 'country_name']].groupby(['country_name', 'city']).size().reset_index(name='Quantidade de Restaurantes')\n top_cities = top_cities.sort_values(['Quantidade de Restaurantes', 'country_name'], ascending=[False, True]).head(7)\n top_cities = top_cities.groupby('city').head(7)\n \n # Plota o gráfico de barras\n fig = px.bar(top_cities, x='city', y='Quantidade de Restaurantes', text='Quantidade de Restaurantes', color='country_name')\n fig.update_traces(textfont_size=12) \n fig.update_layout(xaxis_title='Cidade', yaxis_title='Quantidade de Restaurantes', showlegend=True,\n legend_title_text='País')\n\n # Personalize a fonte do eixo x\n fig.update_xaxes(\n tickfont=dict(size=12, family='Arial', color='white'),\n showgrid=False,\n title_font=dict(size=14, family='Arial', color='white'),\n)\n\n # Personalize a fonte do eixo y\n fig.update_yaxes(\n tickfont=dict(size=12, family='Arial', color='white'),\n showgrid=False,\n showticklabels=False,\n title_font=dict(size=14, family='Arial', color='white'),\n)\n\n return fig\n\n\ndef top7_cities_aval25( df1 ):\n \"\"\" Esta função tem a responsabilidade de plotar um gráfico de barras\n Tipos de ações:\n 1. Dataframe - Top 7 cidades com mais restaurantes com média de avaliação abaixo de 2.5\n 2. Filtra as linhas da coluna 'aggregate_rating' com avaliação menor que 2.5\n 3. Filtra as colunas 'restaurant_id', 'city' e 'country_name'\n 4. Agrupa por 'country_name' e 'city'\n 5. Contar as cidades, renomeie para 'Quantidade de Restaurantes'\n 6. Classificar pelas colunas 'Quantidade de Restaurantes' e 'country_name'\n 7. Agrupa por cidades e define para mostrar 7 cidades\n 8. Desenhar e plotar um gráfico de barras\n 9. Personalizar a fonte do eixo 'x' e 'y'\n \n Input: Dataframe\n Output: Gráfico de barras \n \"\"\"\n#Top 7 cidades com mais restaurantes com média de avaliação abaixo de 2.5'\n\n top_cities = df1.loc[df1['aggregate_rating'] < 2.5, ['restaurant_id', 'city', 'country_name']].groupby(['country_name', 'city']).size().reset_index(name='Quantidade de Restaurantes')\n top_cities = top_cities.sort_values(['Quantidade de Restaurantes', 'country_name'], ascending=[False, True]).head(7)\n top_cities = top_cities.groupby('city').head(7)\n \n # Plota o gráfico de barras\n fig = px.bar(top_cities, x='city', y='Quantidade de Restaurantes', text='Quantidade de Restaurantes', color='country_name')\n fig.update_traces(textfont_size=12)\n fig.update_layout(xaxis_title='Cidade', yaxis_title='Quantidade de Restaurantes', showlegend=True,\n legend_title_text='País')\n\n # Personalize a fonte do eixo x\n fig.update_xaxes(\n tickfont=dict(size=12, family='Arial', color='white'),\n showgrid=False,\n title_font=dict(size=14, family='Arial', color='white'),\n)\n\n # Personalize a fonte do eixo y\n fig.update_yaxes(\n tickfont=dict(size=12, family='Arial', color='white'),\n showgrid=False,\n showticklabels=False,\n title_font=dict(size=14, family='Arial', color='white'),\n)\n\n return fig\n\n\ndef top10_cities_culinaria_unica( df1 ):\n \"\"\" Esta função tem a responsabilidade de plotar um gráfico de barras\n Tipos de ações:\n 1. Dataframe - 10 cidades com mais restaurantes com tipo de culinária única\n 2. Filtra as colunas 'cuisines', 'city' e 'country_name'\n 3. Agrupa por 'country_name' e 'city'\n 4. Contas as cidades únicas\n 5. Classificar pelas colunas 'cuisines' e 'country_name'\n 6. Agrupa por cidades e define para mostrar 10 cidades\n 7. Desenhar e plotar um gráfico de barras\n 8. Personalizar a fonte do eixo 'x' e 'y'\n \n Input: Dataframe\n Output: Gráfico de barras \n \"\"\"\n #Top 10 cidades com mais restaurantes com tipo de culinária única\n top_cities = df1.loc[:,['cuisines', 'city', 'country_name']].groupby(['country_name', 'city']).nunique().reset_index()\n top_cities = top_cities.sort_values(['cuisines', 'country_name'], ascending=[False, True]).head(10)\n top_cities = top_cities.groupby('city').head(10)\n \n # Plota o gráfico de barras\n fig = px.bar(top_cities, x='city', y='cuisines', text='cuisines', color='country_name')\n fig.update_traces(textfont_size=12)\n\n fig.update_layout(\n title='Top 10 Cidades com mais restaurantes com tipo de culinária única',\n title_x=0.3,\n title_font=dict(size=14, family='Arial', color='white'),\n)\n fig.update_layout(xaxis_title='Cidade', yaxis_title='Quantidade de Restaurantes', showlegend=True,\n legend_title_text='País')\n\n # Personalize a fonte do eixo x\n fig.update_xaxes(\n tickfont=dict(size=12, family='Arial', color='white'),\n showgrid=False,\n title_font=dict(size=14, family='Arial', color='white'),\n)\n\n # Personalize a fonte do eixo y\n fig.update_yaxes(\n tickfont=dict(size=12, family='Arial', color='white'),\n showgrid=False,\n showticklabels=False,\n title_font=dict(size=14, family='Arial', color='white'),\n)\n\n return fig\n\n\n\ndef clean_code( df1 ):\n \"\"\" Esta função tem a responsabilidade de limpar o dataframe\n Tipos de limpeza:\n 1. Remoção dos dados NaN\n 2. Mudança do tipo da coluna de dados\n 3. Remoção dos espaços da variáveis de texto\n 4. Renomear as colunas do Dataframe\n 5. Crias novas colunas\n 6. Categorizar colunas\n 7. Conversão de valores de coluna\n\n Input: Dataframe\n Output: Dataframe \n \"\"\"\n \n # 1. Removendo os espacos dentro de strings/texto/object\n df1.loc[:, 'Restaurant Name'] = df1.loc[:, 'Restaurant Name'].str.strip()\n df1.loc[:, 'City'] = df1.loc[:, 'City'].str.strip()\n df1.loc[:, 'Address'] = df1.loc[:, 'Address'].str.strip()\n df1.loc[:, 'Locality'] = df1.loc[:, 'Locality'].str.strip()\n df1.loc[:, 'Locality Verbose'] = df1.loc[:, 'Locality Verbose'].str.strip()\n df1.loc[:, 'Cuisines'] = df1.loc[:, 'Cuisines'].str.strip()\n df1.loc[:, 'Currency'] = df1.loc[:, 'Currency'].str.strip()\n df1.loc[:, 'Rating color'] = df1.loc[:, 'Rating color'].str.strip()\n df1.loc[:, 'Rating text'] = df1.loc[:, 'Rating text'].str.strip()\n \n # 2. Renomear as colunas do DataFrame\n title = lambda x: inflection.titleize(x)\n snakecase = lambda x: inflection.underscore(x)\n spaces = lambda x: x.replace(\" \", \"_\")\n cols_old = list(df1.columns)\n cols_old = list(map(title, cols_old))\n cols_old = list(map(spaces, cols_old))\n cols_new = list(map(snakecase, cols_old))\n df1.columns = cols_new\n\n # 3. Categorizar, todos os restaurantes somente por um tipo de culinária\n df1[\"cuisines\"] = df1.loc[:, \"cuisines\"].astype(str).apply(lambda x: x.split(\",\")[0])\n\n # 4 . Exluir dados duplicados na coluna Restaurants ID.\n df1['restaurant_id'] = df1['restaurant_id'].drop_duplicates()\n df1 = df1.dropna(subset=['restaurant_id'])\n df1['restaurant_id'] = df1['restaurant_id'].astype('int64')\n\n #limpando linhas 'nan' da coluna identificada\n linhas_vazias = df1['cuisines'] != 'nan'\n df1 = df1.loc[linhas_vazias, :]\n\n #Excluir as linhas com 'Mineira' na coluna 'cuisines'\n linhas_vazias = df1['cuisines'] != 'Mineira'\n df1 = df1.loc[linhas_vazias, :]\n\n #Excluir as linhas com 'Drinks Only' na coluna 'cuisines'\n linhas_vazias = df1['cuisines'] != 'Drinks Only'\n df1 = df1.loc[linhas_vazias, :]\n \n # 5. Excluir a linha com base no valor do Restaurant ID (nesse caso, 16608070)\n restaurant_id_to_delete = 16608070\n df1 = df1[df1['restaurant_id'] != restaurant_id_to_delete]\n\n # 6. Redefinir os índices do dataframe após a exclusão da linha:\n df1.reset_index(drop=True, inplace=True)\n\n return df1\n\n#Criar coluna de categoria com base no range de valores\ndef create_price_type(df1):\n def get_price_type(price_range):\n if price_range == 1:\n return 'cheap'\n elif price_range == 2:\n return 'normal'\n elif price_range == 3:\n return 'expensive'\n else:\n return 'gourmet'\n\n df1['price_type'] = df1['price_range'].apply(get_price_type)\n \n return df1\n\n\n#Substituir a coluna com o ID dos paises pelo nome do país\nCOUNTRIES = {\n 1: \"India\",\n 14: \"Australia\",\n 30: \"Brazil\",\n 37: \"Canada\",\n 94: \"Indonesia\",\n 148: \"New Zeland\",\n 162: \"Philippines\",\n 166: \"Qatar\",\n 184: \"Singapure\",\n 189: \"South Africa\",\n 191: \"Sri Lanka\",\n 208: \"Turkey\",\n 214: \"United Arab Emirates\",\n 215: \"England\",\n 216: \"United States of America\",\n}\n\ndef country_name(country_code):\n return COUNTRIES.get(country_code, \"Unknown\")\n\n# Renomear a coluna 'country_code' por 'country_name'\ndef rename_country( df1 ):\n df1['country_code'] = df1['country_code'].apply(country_name)\n df1 = df1.rename(columns={'country_code': 'country_name'})\n return df1\n\n\n# Criar coluna com o nome das cores com base nos códigos de cores\nCOLORS = {\n \"3F7E00\": \"darkgreen\",\n \"5BA829\": \"green\",\n \"9ACD32\": \"lightgreen\",\n \"CDD614\": \"orange\",\n \"FFBA00\": \"red\",\n \"CBCBC8\": \"darkred\",\n \"FF7800\": \"darkred\",\n}\n\ndef color_name(rating_color):\n return COLORS.get(rating_color)\n\ndef color_rename_name ( df1 ):\n df1['color_name'] = df1['rating_color'].apply(color_name)\n\n return df1\n\n\n# Dicionário de taxas de câmbio para conversão para dólar americano\nexchange_rates = {\n 'Botswana Pula(P)': 0.0907, # Taxa de câmbio dia 14/07/23 para Botswana Pula para USD\n 'Brazilian Real(R$)': 0.1922, # Taxa de câmbio dia 14/07/23 para Real brasileiro para USD\n 'Dollar($)': 1.0, # Taxa de câmbio dia 14/07/23 para Dólar para USD\n 'Emirati Diram(AED)': 0.2723, # Taxa de câmbio dia 14/07/23 para Emirati Dirham para USD\n 'Indian Rupees(Rs.)': 0.0134, # Taxa de câmbio dia 14/07/23 para Rúpia indiana para USD\n 'Indonesian Rupiah(IDR)': 7.1e-5, # Taxa de câmbio dia 14/07/23 para Rupia indonésia para USD\n 'NewZealand($)': 0.7033, # Taxa de câmbio dia 14/07/23 para Dólar da Nova Zelândia para USD\n 'Pounds(£)': 1.3804, # Taxa de câmbio dia 14/07/23 para Libra esterlina para USD\n 'Qatari Rial(QR)': 0.2747, # Taxa de câmbio dia 14/07/23 para Rial do Qatar para USD\n 'Rand(R)': 0.0675, # Taxa de câmbio dia 14/07/23 para Rand sul-africano para USD\n 'Sri Lankan Rupee(LKR)': 0.005, # Taxa de câmbio dia 14/07/23 para Rupia do Sri Lanka para USD\n 'Turkish Lira(TL)': 0.1147 # Taxa de câmbio dia 14/07/23 para Lira turca para USD\n}\n\n# Função para converter o valor para dólar americano\ndef convert_to_usd( row ):\n currency = row['currency']\n average_cost = row['average_cost_for_two']\n \n if currency in exchange_rates:\n exchange_rate = exchange_rates[currency]\n converted_cost = average_cost * exchange_rate\n return converted_cost\n else:\n return average_cost\n\n# Criar a nova coluna 'average_cost_usd' com os valores convertidos\ndef average_cost_usd ( df1 ):\n df1['average_cost_usd'] = round(df1.apply(convert_to_usd, axis=1), 2)\n\n return df1\n \n\n# ====================================Inicio da estrutura lógica do código==============================\n \n# ======================================\n# Import dataset\n# ======================================\ndf = pd.read_csv( 'dataset/zomato.csv' )\n\n# ======================================\n#Limpando os dados\n# ======================================\ndf1 = clean_code( df )\n# ======================================\n#Criar coluna de categoria com base no range de valores\ndf1 = create_price_type(df1)\n# ======================================\n#Renomear coluna Country_Code\ndf1 = rename_country( df1 )\n# ======================================\n# Criar coluna com o nome das cores com base nos códigos de cores\ndf1 = color_rename_name ( df1 )\n# ======================================\n# Função para converter o valor para dólar americano\ndf1 = average_cost_usd ( df1 )\n# ======================================\n\n# =======================================\n# Barra Lateral\n# =======================================\nst.title( '🏙️' 'Fome Zero - Visão Cidades' )\n\n#image_path = 'pngwing.com.png'\nimage = Image.open( 'pngwing.com.png' )\nst.sidebar.image( image, width=230 )\n\nst.sidebar.markdown( '# Fome Zero Company' )\nst.sidebar.markdown( '#### Restaurant Management Platform in Countries and Town' )\nst.sidebar.markdown( \"\"\"---\"\"\" )\n\nst.sidebar.markdown( '# Filtro' )\n\ncountries = st.sidebar.multiselect( \n 'Escolha os países que deseja visualizar as informações',\n df1.loc[:, 'country_name'].unique(), \n default=['Australia', 'Brazil', 'Canada', 'England', 'India', 'Indonesia', 'New Zeland', 'Philippines', 'Qatar', 'Singapure', 'South Africa', 'Sri Lanka', 'Turkey', 'United Arab Emirates', 'United States of America'] )\n\nst.sidebar.markdown( \"\"\"---\"\"\" )\nst.sidebar.markdown( '### Powered by Wagner Sobrinho' )\n\n# Filtro de país\nlinhas_selecionadas = df1['country_name'].isin( countries )\ndf1 = df1.loc[linhas_selecionadas, :]\n\n\n# =======================================\n# Layout no Streamlit\n# =======================================\nwith st.container():\n fig = top_restaurant( df1 )\n st.plotly_chart(fig,use_container_width=True)\n \nwith st.container():\n st.markdown(\"\"\"---\"\"\")\n col1, col2 = st.columns( 2 )\n \n with col1:\n st.markdown( '###### Top 7 cidades com mais restaurantes com média de avaliação acima de 4' )\n fig = top7_cities_aval4( df1 )\n st.plotly_chart( fig, use_container_width=True ) \n \n with col2:\n st.markdown( '###### Top 7 cidades com mais restaurantes com média de avaliação abaixo de 2.5' )\n fig = top7_cities_aval25( df1 )\n st.plotly_chart( fig, use_container_width=True )\n\nwith st.container():\n st.markdown(\"\"\"---\"\"\")\n fig = top10_cities_culinaria_unica( df1 )\n st.plotly_chart(fig,use_container_width=True) \n","repo_name":"WagnerSobrinho/Fome-Zero-Company-Project","sub_path":"pages/3_🏙️_Visão_Cidades.py","file_name":"3_🏙️_Visão_Cidades.py","file_ext":"py","file_size_in_byte":17859,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22680690591","text":"import argparse\nimport itertools\nimport os\nimport pickle\n\nimport numpy as np\nimport torch\nfrom PIL import Image\n\nimport code.archs as archs\nfrom code.utils.cluster.data import cluster_twohead_create_dataloaders\nfrom code.utils.cluster.transforms import sobel_process\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--model_ind\", type=int, required=True)\nparser.add_argument(\"--num_imgs\", type=int, default=200)\nparser.add_argument(\"--out_root\", type=str,\n default=\"/scratch/shared/slow/xuji/iid_private\")\n\ngiven_config = parser.parse_args()\n\ngiven_config.out_dir = os.path.join(given_config.out_root,\n str(given_config.model_ind))\n\nreloaded_config_path = os.path.join(given_config.out_dir, \"config.pickle\")\nprint(\"Loading restarting config from: %s\" % reloaded_config_path)\nwith open(reloaded_config_path, \"rb\") as config_f:\n config = pickle.load(config_f)\nassert (config.model_ind == given_config.model_ind)\n\nnet = archs.__dict__[config.arch](config)\nmodel_path = os.path.join(config.out_dir, \"best_net.pytorch\")\nnet.load_state_dict(\n torch.load(model_path, map_location=lambda storage, loc: storage))\n\nnet.cuda()\nnet.eval()\n\nnet = torch.nn.DataParallel(net)\n\n# model dataloader\n_, _, _, dataloader = cluster_twohead_create_dataloaders(config)\n\n# render dataloader\nold_value = config.include_rgb\nconfig.include_rgb = True\n_, _, _, render_dataloader = cluster_twohead_create_dataloaders(config)\nconfig.include_rgb = old_value\n\nif \"MNIST\" in config.dataset:\n sobel = False\nelse:\n sobel = True\n\nusing_IR = False # not segmentation\n\n# from first batch\nimg_inds = np.random.choice(config.batch_sz, size=given_config.num_imgs,\n replace=False)\n\n# already know the best head (and one-to-one mapping, but not used)\nbest_i = np.argmax(np.array(config.epoch_acc))\nstats_dict = config.epoch_stats[best_i]\n\nprint(stats_dict)\nif \"best_train_sub_head\" in stats_dict:\n best_head = stats_dict[\"best_train_sub_head\"]\n print(\"best_train_sub_head: %d\" % best_head)\n best_match = stats_dict[\"best_train_sub_head_match\"] # pred -> target\n\nif \"best_head\" in stats_dict:\n best_head = stats_dict[\"best_head\"]\n print(\"best_head: %d\" % best_head)\n best_match = stats_dict[\"best_head_match\"]\n\nassert (not (\"best_train_sub_head\" in stats_dict and \"best_head\" in stats_dict))\n\nbest_match_dict = {}\nfor pred_i, target_i in best_match:\n best_match_dict[pred_i] = target_i\n\nrender_out_dir = os.path.join(config.out_dir, \"print_examples\")\nif not os.path.exists(render_out_dir):\n os.makedirs(render_out_dir)\n\nresults_f = os.path.join(render_out_dir, \"results.txt\")\n\niterators = (d for d in [dataloader, render_dataloader])\n\nfor tup in itertools.izip(*iterators):\n train_batch = tup[0]\n render_batch = tup[1]\n\n imgs = train_batch[0].cuda()\n orig_imgs = render_batch[0]\n\n if sobel:\n imgs = sobel_process(imgs, config.include_rgb, using_IR=using_IR)\n\n flat_targets = train_batch[1]\n\n with torch.no_grad():\n x_outs = net(imgs)\n\n assert (x_outs[0].shape[1] == config.output_k)\n assert (len(x_outs[0].shape) == 2)\n\n x_outs_curr = x_outs[best_head]\n flat_preds_curr = torch.argmax(x_outs_curr, dim=1) # along output_k\n\n with open(results_f, \"w\") as f:\n for i, img_i in enumerate(img_inds):\n img = orig_imgs[img_i].numpy()\n img = img[:3]\n img = img.transpose((1, 2, 0)) # channels last\n img *= 255.\n\n print(img.shape)\n print(img.max())\n print(img.min())\n\n img = Image.fromarray(img.astype(np.uint8))\n img.save(os.path.join(render_out_dir, \"%d.png\" % i))\n\n f.write(\"(%d) %d %d %d\\n\" % (i,\n best_match_dict[\n flat_preds_curr[img_i].item()],\n flat_targets[img_i].item(),\n flat_preds_curr[img_i].item()))\n\n break\n\nprint(\"finished rendering to: %s\" % render_out_dir)\n","repo_name":"xu-ji/IIC","sub_path":"code/scripts/cluster/analysis/print_examples.py","file_name":"print_examples.py","file_ext":"py","file_size_in_byte":3936,"program_lang":"python","lang":"en","doc_type":"code","stars":828,"dataset":"github-code","pt":"53"} +{"seq_id":"31624058226","text":"from flask import Flask, jsonify, request\n\nfrom api.services.get_result_service import GetResultService\nfrom api.store_processor import main as store_processor\n\napi = Flask(__name__)\n\n\n@api.route('/objects/', methods=['POST'])\ndef get_objects_api():\n if not request.files or not request.files.get('video'):\n return jsonify({'error': 'Video was not provided.'}), 400\n\n service = GetResultService(request.files.get('video'))\n success, response, filename = service.save()\n\n if not success:\n return jsonify({'error': response}), 400\n\n results = store_processor(filename)\n\n return jsonify({'results': results}), 200\n","repo_name":"DastanB/FlaskAppTest","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25605578662","text":"#! /usr/bin/env python3\n\nimport sys\nimport re\nimport requests\n\ndef main():\n zip_code = sys.argv[1] if len(sys.argv) != 1 else \"\"\n\n if re.match('^[0-9]{7}$', zip_code):\n params = { 'zipcode': {int(zip_code)} }\n else:\n print(f'Usage: {sys.argv[0]} \\n')\n sys.exit()\n\n url = 'https://zipcloud.ibsnet.co.jp/api/search'\n res = requests.get(url, params)\n\n print(res.status_code)\n # print(res.text)\n res_json = res.json()\n\n if res_json['results'] is not None:\n results = res_json['results'][0]\n address = results['address1'] + results['address2'] + results['address3']\n print(address)\n else:\n print(f'zip code error({zip_code}: {res_json[\"results\"]})')\n\nif __name__ == '__main__':\n main()\n","repo_name":"nakazt/test","sub_path":"requests_test.py","file_name":"requests_test.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27440051163","text":"\n\nimport requests \nimport datetime\n\nimport pytz\nfrom dotenv import load_dotenv\nimport os\nload_dotenv(\".env\")\n\n\n\n\ndef test_market_options(**kwargs):\n\n key = os.environ[\"API_KEY_AMERITRADE\"]\n params = {}\n params.update({'apikey':key})\n for arg in kwargs:\n parameter = {arg: kwargs.get(arg)}\n params.update(parameter)\n url =\"https://api.tdameritrade.com/v1/marketdata/chains?apikey={}&symbol={}&contractType={}&toDate={}\".format(\n \n params['apikey'],\n params['symbol'],\n params['contractType'],\n params['toDate']\n\n )\n response =requests.get(url).json()\n\n list_of_contracts = []\n\n dict_of_contract = {\n \"strike\":\"\",\n \"ask\":\"\",\n \"bid\":\"\",\n \"expiration_date\":\"\" \n }\n\n\n if params['contractType'] == \"CALL\":\n contract_type = \"callExpDateMap\"\n elif params['contractType'] == \"PUT\":\n contract_type = \"putExpDateMap\"\n\n\n tz_new_york = pytz.timezone(\"America/New_York\") \n for expiration_date_data in response[contract_type]:\n for data in response[contract_type][expiration_date_data]:\n for contract in response[contract_type][expiration_date_data][data]:\n ms = contract['expirationDate']\n expiration_date=datetime.datetime.fromtimestamp(ms/1000.0, tz_new_york)\n\n if contract[\"ask\"] >= params['low_rank'] and contract[\"ask\"] <= params['high_rank'] :\n list_of_contracts.append((data, contract['ask'], contract[\"bid\"] ,str(expiration_date)))\n\n\n ask_prices = []\n if len(list_of_contracts) >0:\n for contract in list_of_contracts:\n ask_prices.append(contract[1])\n \n max_value_ask = max(ask_prices)\n\n max_value_ask_index = ask_prices.index(max_value_ask)\n dict_of_contract[\"strike\"] =list_of_contracts[max_value_ask_index][0] \n dict_of_contract[\"ask\"] =list_of_contracts[max_value_ask_index][1]\n dict_of_contract[\"bid\"] = list_of_contracts[max_value_ask_index][2]\n dict_of_contract[\"expiration_date\"] =list_of_contracts[max_value_ask_index][3]\n print (dict_of_contract)\n return dict_of_contract\n\n return None\n\n \n\n\n\n\n","repo_name":"webclinic017/ameritrade_public","sub_path":"test_market_options.py","file_name":"test_market_options.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35512022829","text":"# printing N to ! using recursion\n# using IBH , induction , base , hypothesis\n\ndef n_to_one(N):\n if N==1:\n print(1)\n return\n print(N)\n n_to_one(N-1)\n \n\nN = 5\nprint(n_to_one(N))\n\n\n# printing factorial of n using recursion\n# using IBH , induction , base , hypothesis\n\ndef factorial(N):\n if N==1 or N== 0:\n return 1\n return N * factorial(N-1)\n \n\nN = 10\nprint(factorial(N))\n","repo_name":"AbhinavSingh111/HackerRank-DS","sub_path":"N_TO_1_using_recursion.py","file_name":"N_TO_1_using_recursion.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42282093887","text":"import hid\r\n\r\nVENDORID\t=0x1F0A\r\nPIDRUN\t\t=0x0088\r\n#VENDORID\t=0x136e\r\n#PIDRUN\t\t=0x1088\r\n\r\n#general status/action values\r\nSLEEP\t\t=0x7f\t\t\t#device in sleep mode\r\nRUN\t\t =0x0f\t\t\t#device running\r\n\r\nDOORCLSD\t=0x01\t\t\t#door closed\r\nDOOROPEN\t=0x02\t\t\t#door open\r\n\r\nDSKPOS0 =0x00 #disk out of beam path, wide field\r\nDSKPOS1\t\t=0x01\t\t\t#disk pos 1, low sectioning\r\nDSKPOS2\t\t=0x02\t\t\t#disk pos 2, mid sectioning\r\nDSKPOS3 =0x03 #disk pos 3, high sectioning\r\nDSKERR\t\t=0xff\t\t\t#An error has occurred in setting slide position (end stops not detected)\r\nDSKMID\t\t=0x10\t\t\t#slide is moving between positions\r\n\r\nFLTPOS1\t\t=0x01\t\t\t#Filter in position 1\r\nFLTPOS2\t\t=0x02\t\t\t#Filter in position 2\r\nFLTPOS3\t\t=0x03\t\t\t#Filter in position 3\r\nFLTPOS4\t\t=0x04\t\t\t#Filter in position 4\r\nFLTERR\t\t=0xff\t\t\t#An error has been detected in the filter drive (eg filters not present)\r\nFLTMID\t\t=0x10\t\t\t#Filter in mid position\r\n\r\nCALON\t\t=0x01\t\t\t#CALibration led power on\r\nCALOFF\t\t=0x02\t\t\t#CALibration led power off\r\n\r\n#common commands\r\n\r\n# Common commands consist of 1 byte of command immediately followed by any data\r\n# Total record length is expected to be 16 bytes for RUNSTATE\r\n\r\nGETVERSION\t=0x00\t\t\t#No data out, returns 3 byte version byte1.byte2.byte3\r\nCMDERROR\t=0xff\t\t\t#Reply to sent command that was not understood\r\n\r\n#Run state status commands\r\n\r\n# Run State commands are 16 byte records consisting of a single command byte imediately followed by any data\r\n# Response has same format \r\n\r\nGETONOFF\t=0x12\t\t\t#No data out, returns 1 byte on/off status\r\nGETDOOR \t=0x13\t\t\t#No data out, returns 1 byte shutter status, or SLEEP if device sleeping\r\nGETDISK \t=0x14\t\t\t#No data out, returns 1 byte disk-slide status, or SLEEP if device sleeping\r\nGETFILT\t\t=0x15\t\t\t#No data out, returns 1 byte filter position, or SLEEP if device sleeping\r\nGETCAL\t\t=0x16\t\t\t#No data out, returns 1 byte CAL led status, or SLEEP if device sleeping\r\nGETSERIAL\t=0x19\t\t\t#No data out, returns 4 byte BCD serial number (little endian)\r\nFULLSTAT\t=0x1f\t\t\t#No data, Returns 10 bytes VERSION[3],ONOFF,DOOR,DISK,FILT,CAL,??,??\r\n\r\n#run state action commands\r\nSETONOFF\t=0x21\t\t\t#1 byte out on/off status, echoes command or SLEEP\r\nSETDISK \t=0x23\t\t\t#1 byte out disk position, echoes command or SLEEP\r\nSETFILT\t\t=0x24\t\t\t#1 byte out filter position, echoes command or SLEEP\r\nSETCAL\t\t=0x25\t\t\t#1 byte out CAL led status, echoes command or SLEEP\r\n\r\n#run state service mode commands - not for general user usage, stops the disk spinning for alignment purposes\r\n\r\nSETSVCMODE1\t=0xe0\t\t\t#1 byte for service mode (SLEEP activates service mode and RUN, returns unit to normal run state), echoes command\r\n\r\nclass Controller:\r\n\r\n hiddevice = hid.device()\r\n\r\n def __init__(self):\r\n try:\r\n self.hiddevice.open(vendor_id=VENDORID, product_id=PIDRUN)\r\n self.hiddevice.set_nonblocking(0)\r\n self.isOpen = True # hid device open\r\n except (IOError, ex):\r\n print(ex)\r\n self.hiddevice.close()\r\n\r\n def close(self):\r\n # close HID device\r\n if (self.isOpen):\r\n self.hiddevice.close()\r\n self.isOpen = False\r\n\r\n ## Send command to HID device using cython-hidapi, all transactions are 2 way - write then read\r\n def sendCommand(self, command, param = 0, maxLength = 16, timeoutMs = 100):\r\n if (self.isOpen):\r\n if ((command==SETONOFF)|(command==SETDISK)|(command==SETFILT)|(command==SETCAL)|\r\n (command == GETONOFF) |(command==GETDISK)|(command==GETFILT)|(command==GETCAL)|\r\n (command == GETDOOR) |(command==GETSERIAL)|(command==FULLSTAT)) :\r\n buffer = [0x00] * maxLength\r\n buffer[1] = command\r\n buffer[2] = param\r\n result = self.hiddevice.write(buffer)\r\n answer = self.hiddevice.read(maxLength, timeoutMs)\r\n return answer\r\n return [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\r\n\r\n ## Switch on Clarity\r\n def switchOn(self):\r\n res = self.sendCommand(SETONOFF, RUN)\r\n return res[0]\r\n\r\n ## Switch off Clarity\r\n def switchOff(self):\r\n res = self.sendCommand(SETONOFF, SLEEP)\r\n return res[0]\r\n\r\n ## Get of/off status\r\n def getOnOff(self):\r\n res = self.sendCommand(GETONOFF)\r\n return res[1]\r\n\r\n # Set Clarity's disk position\r\n def setDiskPosition(self, newDiskPosition):\r\n if (newDiskPosition >= DSKPOS0) & (newDiskPosition <= DSKPOS3):\r\n res = self.sendCommand(SETDISK, newDiskPosition)\r\n return res[0]\r\n return DSKERR\r\n\r\n # Get Clarity's disk position\r\n def getDiskPosition(self):\r\n res = self.sendCommand(GETDISK)\r\n return res[1]\r\n\r\n # Set Clarity's filter position\r\n def setFilterPosition(self, filterPosition) :\r\n if (filterPosition >= FLTPOS1) & (filterPosition <= FLTPOS4):\r\n res = self.sendCommand(SETFILT, filterPosition)\r\n return res[0]\r\n return FLTERR\r\n\r\n # Get Clarity's filter position\r\n def getFilterPosition(self):\r\n res = self.sendCommand(GETFILT)\r\n return res[1]\r\n\r\n # Set Clarity's calibration LED on or off\r\n def setCalibrationLED(self, calLED):\r\n if (calLED != CALOFF) & (calLED != CALON):\r\n print(calLED)\r\n return -1\r\n res = self.sendCommand(SETCAL, calLED)\r\n return res[0]\r\n\r\n # Get Clarity's calibration LED status\r\n def getCalibrationLED(self):\r\n res = self.sendCommand(GETCAL)\r\n return res[1]\r\n\r\n # Get Clarity's door status\r\n def getDoor(self):\r\n res = self.sendCommand(GETDOOR)\r\n return res[1]\r\n\r\n # Get Clarity's serial number\r\n def getSerialNumber(self):\r\n res = self.sendCommand(GETSERIAL)\r\n return ((res[4]//16)*10000000+(res[4]%16)*1000000+(res[3]//16)*100000+(res[3]%16)*10000+\r\n (res[2]//16)*1000+(res[2]%16)*100+(res[1]//16)*10+(res[1]%16))\r\n\r\n # Returns 10 bytes Firmware VERSION[3], ONOFF, DOOR, DISK, FILT, CAL\r\n def getFullStat(self):\r\n res = self.sendCommand(FULLSTAT)\r\n return [(res[1],res[2],res[3]),res[4],res[5],res[6],res[7],res[8]]","repo_name":"pptman/aurox_clarity","sub_path":"aurox_clarity/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":6158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9223231225","text":"\"\"\"\nA library to display things on screen.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport math\nimport sys\n\n# define some colors\n# ----------------------------------------------------------------------------------------------------------------------\nBLACK = '\\033[30m'\nRED = '\\033[31m'\nGREEN = '\\033[32m'\nYELLOW = '\\033[33m'\nBLUE = '\\033[34m'\nMAGENTA = '\\033[35m'\nCYAN = '\\033[36m'\nWHITE = '\\033[37m'\nBRIGHT_RED = '\\033[91m'\nBRIGHT_GREEN = '\\033[92m'\nBRIGHT_YELLOW = '\\033[93m'\nBRIGHT_BLUE = '\\033[94m'\nBRIGHT_MAGENTA = '\\033[95m'\nBRIGHT_CYAN = '\\033[96m'\nBRIGHT_WHITE = '\\033[97m'\nENDC = '\\033[0m'\nBG_RED = \"\\u001b[41m\"\nBLINK = \"\\033[5m\"\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef display_progress(count,\n total,\n old_percent,\n width=50,\n completed_char=\"#\",\n empty_char=\".\",\n postpend_str=\"\"):\n \"\"\"\n Draws and updates ASCII progress bar on the stdout.\n\n :param count:\n The current count for our progress bar.\n :param total:\n The count at 100%.\n :param old_percent:\n The previous percent. Necessary to prevent updates if the percentage has not changed since the last call.\n :param width:\n How wide to draw the progress bar in characters. If given an odd number, it will be rounded down to the\n nearest even value.\n :param completed_char:\n The character to display for a completed chunk.\n :param empty_char:\n The character to display for an as-yet uncompleted chunk.\n :param postpend_str:\n An arbitrary (and optional) string to append to the end of the progress bar.\n\n :return: The percent value for the current state.\n \"\"\"\n\n # only allow even numbered widths\n if width % 2 != 0:\n width -= 1\n\n # calculate the percent\n percent = round((count * 1.0) / total * 100, 1)\n\n # only update the display if the percentage has changed\n if percent == old_percent and percent != 0:\n return percent\n\n # build the completed and uncompleted portions of the progress bar\n done_str = \"{0}\".format(completed_char * (int(round(percent / (100 / width), 0))))\n empty_str = \"{0}\".format(empty_char * (width - (int(round(percent / (100 / width))))))\n\n # build the X out of Y text\n count_str = \" (\" + BRIGHT_WHITE + str(count) + ENDC + \" of \" + BRIGHT_WHITE + str(total) + ENDC + \")\"\n\n # build the percent string\n percent_str = \"{0}\".format(\" \" * (4 - len(str(int(math.floor(percent)))))) + str(percent) + \"%\" + \" \"\n\n # build the complete string, and insert the percent\n progress_bar_str = \"[\" + done_str + empty_str + \"]\"\n progress_left = progress_bar_str[:int((len(progress_bar_str) / 2) - math.floor(len(percent_str) / 2)) + 2]\n progress_right = progress_bar_str[int((len(progress_bar_str) / 2) + math.ceil(len(percent_str) / 2)) + 2:]\n progress_bar_str = progress_left\n progress_bar_str += BRIGHT_YELLOW + percent_str + ENDC\n progress_bar_str += progress_right\n\n # append the count string\n progress_bar_str += count_str\n\n # append the postpend string\n progress_bar_str += postpend_str\n\n # show it\n sys.stdout.write(progress_bar_str)\n sys.stdout.flush()\n sys.stdout.write(\"\\b\" * (len(progress_bar_str))) # return to start of line\n\n # return the percent (so that we only update the percentage when it changes)\n return percent\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef display_error(*msgs):\n \"\"\"\n Given any number of args, converts those args to strings, concatenates them, and prints to stdErr.\n\n :return: Nothing.\n \"\"\"\n\n output = \"\"\n for msg in msgs:\n output += \" \" + str(msg)\n print(output.lstrip(\" \"), file=sys.stderr)\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef format_string(msg_str):\n \"\"\"\n Given a string (msg) this will format it with colors based on the {{COLOR}} tags. (example {{COLOR_RED}}). It will\n also convert literal \\n character string into a proper newline.\n\n :param msg_str:\n The string to format.\n\n :return: The formatted string.\n \"\"\"\n\n output = msg_str.replace(r\"\\n\", \"\\n\")\n output = output.replace(\"{{\", \"{\")\n output = output.replace(\"}}\", \"}\")\n\n try:\n output = output.format(\n BLACK=BLACK,\n RED=RED,\n GREEN=GREEN,\n YELLOW=YELLOW,\n BLUE=BLUE,\n MAGENTA=MAGENTA,\n CYAN=CYAN,\n WHITE=WHITE,\n BRIGHT_RED=BRIGHT_RED,\n BRIGHT_GREEN=BRIGHT_GREEN,\n BRIGHT_YELLOW=BRIGHT_YELLOW,\n BRIGHT_BLUE=BRIGHT_BLUE,\n BRIGHT_MAGENTA=BRIGHT_MAGENTA,\n BRIGHT_CYAN=BRIGHT_CYAN,\n BRIGHT_WHITE=BRIGHT_WHITE,\n COLOR_NONE=ENDC,\n BG_RED=BG_RED,\n BLINK=BLINK,\n )\n except KeyError:\n pass\n except ValueError:\n pass\n\n output += ENDC\n\n return output\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef msg(*msgs: object):\n \"\"\"\n Given any number of args, converts those args to strings, concatenates them, and prints to stdOut.\n\n :return: Nothing.\n \"\"\"\n\n msg = \" \".join([str(item) for item in msgs])\n msg = format_string(msg)\n print(msg)\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef multiple_choice_user_input(*msgs,\n legal_answers,\n alternate_legal_answers=None,\n default=None,\n blank_lines=0):\n \"\"\"\n Get a user input.\n\n :param msgs:\n The prompt to display to the user. This may be a series of strings.\n :param legal_answers:\n A list of legal answers to the prompt. These will be presented in all upper case to the user. Note, the legal\n answers must include some sort of quit option since this function will keep looping until a legal answer is\n received.\n :param alternate_legal_answers:\n An optional dictionary of alternate legal answers that will be accepted, but not displayed to the user.\n The key is the alternate legal answer and the value is the actual legal answer that this alternate maps tp.\n Primarily used to provide alternates that the user might type in but are not necessary to display. Example:\n the user may be presented with the legal answers \"YES\" and \"NO\", but additional accepted values may be \"Y\"\n and \"N\". In this case, alternate_legal_answers would be {\"Y\": \"YES\", \"N\": \"NO\"}\n :param default:\n Which of the legal answers is the default answer. If None, there is no default. Defaults to None.\n :param blank_lines:\n How many blank lines to display before the first instance of the prompt. Defaults to 0.\n\n :return: The legal answer that was returned.\n \"\"\"\n\n assert type(legal_answers) is list\n assert alternate_legal_answers is None or type(alternate_legal_answers) is dict\n assert default is None or type(default) is str\n assert type(blank_lines) is int\n\n options = [item.upper() for item in legal_answers]\n\n if alternate_legal_answers is not None:\n for key, value in alternate_legal_answers.items():\n assert value.upper() in options\n\n if default is not None:\n assert default.upper() in options\n\n alternate_options = list()\n if alternate_legal_answers is not None:\n alternate_options = [item.upper() for item in alternate_legal_answers.keys()]\n\n alternate_legal_answers_upper = dict()\n if alternate_legal_answers is not None:\n for key in alternate_legal_answers.keys():\n alternate_legal_answers_upper[key.upper()] = alternate_legal_answers[key]\n\n options_str = f\"({','.join(options)})\"\n\n if blank_lines > 0:\n msg(\"\\n\" * blank_lines)\n\n result = \"\"\n while result.upper() not in legal_answers and result.upper() not in alternate_options:\n msg(*msgs, options_str)\n\n if default is None:\n prompt = \"> \"\n else:\n prompt = format_string(f\"({{BRIGHT_YELLOW}}{default.upper()}{{COLOR_NONE}}) > \")\n\n try:\n result = input(prompt)\n except KeyboardInterrupt:\n sys.exit()\n\n if result == \"\" and default is not None:\n result = default.upper()\n\n if result.upper() in alternate_options:\n result = alternate_legal_answers_upper[result.upper()]\n\n return result.upper()\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef format_boolean(value,\n colorize=True,\n invert_color=False):\n \"\"\"\n Converts a boolean value into a Yes or No. If colorize is True, then Yes will be returned as green, and no will be\n returned as red.\n\n :param value:\n A boolean either passed as a boolean or as a string.\n :param colorize:\n If True, then the returned string will be formatted green for True, or red for False. Defaults to True.\n :param invert_color:\n If True (and if colorize) is True, then True will be red, and False will be green. Defaults to False.\n\n\n :return: A string containing either \"Yes\" or \"No\" depending on the original boolean value.\n \"\"\"\n\n assert type(value) is bool or (type(value) is str and value.upper() in (\"TRUE\", \"FALSE\"))\n\n if str(value).upper() == \"TRUE\":\n color = \"BRIGHT_GREEN\"\n return_value = \"Yes\"\n else:\n color = \"BRIGHT_RED\"\n return_value = \"No\"\n\n if colorize:\n return_value = format_string(\"{{\" + color + \"}}\" + return_value)\n\n return return_value\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef display_refreshable_message(*msgs):\n \"\"\"\n Given any number of args, converts those args to strings, concatenates them, and prints to stdOut. Then resets the\n output to be back at the beginning f the line ready for the next string to overwrite the just printed string. NOTE:\n THIS ONLY WORKS FOR STRINGS THAT STAY THE SAME LENGTH OR GROW IN LENGTH. If the string shrinks in length, part of\n the previous message will be left behind. To counter this (if you have potentially shrinking strings), it may be\n necessary to pad your strings with spaces at the end to a known length.\n\n :return: Nothing.\n \"\"\"\n\n # Print the message, flush buffer, and move back to the beginning of the line.\n message = \" \".join([str(item) for item in msgs])\n message = format_string(message)\n sys.stdout.write(message)\n sys.stdout.flush()\n sys.stdout.write(\"\\b\" * (len(message)))\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef flush_refreshable_message(length=80):\n \"\"\"\n Call to clear out the current line. Used primarily when a line has been half printed and needs to be removed. This\n does NOT move the cursor to a new line, it just clears out the current line and leaves the cursor at the beginning.\n\n :param length: How many spaces to flush. Defaults to 80.\n\n :return: Nothing.\n \"\"\"\n\n sys.stdout.write(\"\\b\" * length)\n sys.stdout.write(\" \" * length)\n sys.stdout.write(\"\\b\" * length)\n sys.stdout.flush()\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef finish_refreshable_message():\n \"\"\"\n Called when the refreshable message is no longer needed (prevents the next printed statement from overwriting the\n last version of the refreshed message).\n\n :return: Nothing.\n \"\"\"\n\n print()\n","repo_name":"bvz2000/bvzdisplaylib","sub_path":"bvzdisplaylib/displaylib.py","file_name":"displaylib.py","file_ext":"py","file_size_in_byte":12122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42389368392","text":"import discord\nfrom discord.ext import commands\n\n\nclass Moderation(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n @commands.command(aliases=[\"purge\"])\n @commands.has_role('Moderators')\n async def clear(self, ctx, amount=5):\n await ctx.channel.purge(limit=amount + 1)\n\n @commands.Cog.listener()\n async def spam_protection(self):\n return\n\n\ndef setup(client):\n client.add_cog(Moderation(client))\n","repo_name":"JustCasuallyJames/Scarlett-Discord-Bot","sub_path":"cogs/Moderation.py","file_name":"Moderation.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"920139989","text":"from flask import Flask, render_template, request, json\n#from flaskext.mysql import MySQL\n\n#initializations\napp = Flask(__name__)\n#mysql = MySQL()\n \n# MySQL configurations\n# app.config['MYSQL_DATABASE_USER'] = 'root'\n# app.config['MYSQL_DATABASE_PASSWORD'] = 'nettuts'\n# app.config['MYSQL_DATABASE_DB'] = 'testDB'\n# app.config['MYSQL_DATABASE_HOST'] = 'localhost'\n\n# mysql.init_app(app)\n# conn = mysql.connect()\n# cursor = conn.cursor()\n\n\n@app.route(\"/\")\n@app.route(\"/index\")\ndef main():\n drugs = []\n\n # d1=('Makena','http://www.makena.com/resources','hydroxyprogesterone caproate','AMAG Pharmaceuticals')\n # d2=('Humira','https://www.humira.com/humira-complete/injection','adalimumab','Abbvie')\n # d3=('Enbrel','/index','etanercept','Amgen')\n # d4=('Stelara','/index','ustekinumab','Janssen')\n # d5=('Cimzia','/index','certoluzimab pegol','UCB')\n # d6=('Rasuvo','http://www.rasuvo.com/using-rasuvo/how-to-use-rasuvo/','methotrexate','Medac Pharma')\n # d7=('Epi Pen','/index','','Mylan')\n # d8=('Epi Pen Junior','/index','','Mylan')\n # d9=('Levemir','/index','detemir','Novo Nordisk')\n # d10=('Lantus','/index','glargine','Sanofi')\n # d11=('Basaglar','/index','glargine','Lilly')\n # d12=('Toujeo','/index','glargine','Sanofi')\n # d13=('Tresiba','/index','degludec','Novo Nordisk')\n # d14=('Novolog','/index','aspart','NovoNordisk')\n # d15=('Humalog','/index','lispro','Lilly')\n # d16=('GLP1s','/index','','')\n # d17=('Trulicity','/index','dulaglutide','Lilly')\n # d18=('Taltz','/index','','Lilly')\n # d19=('Lovenox','/index','','Sanofi')\n\n d1=('Lovenox','https://www.lovenox.com/patient-self-injection-video','','Sanofi')\n d2=('Makena','http://www.makena.com/resources','hydroxyprogesterone caproate','AMAG Pharmaceuticals')\n d3=('Humira','https://www.humira.com/humira-complete/injection','adalimumab','Abbvie')\n d4=('Enbrel','https://www.enbrel.com/support/how-to-take-enbrel?isipaid=true&utm_term=how%20to%20take%20enbrel&gclid=EAIaIQobChMI49aYxPjR3AIVCNRkCh2bHA8NEAAYASACEgI7fvD_BwE&gclsrc=aw.ds&dclid=CPW0tMb40dwCFcybZAodQL4Fzw','etanercept','Amgen')\n d5=('Stelara (Psoriasis)','https://www.stelarainfo.com/stelara-injection-support','ustekinumab','Janssen')\n d29=('Stelara (Crohns)','https://www.stelarainfo.com/crohns-disease/patient-support/injection-infusion-support','ustekinumab','Janssen')\n d6=('Cimzia','https://www.cimzia.com/injection-training','certoluzimab pegol','UCB')\n d7=('Rasuvo','http://www.rasuvo.com/using-rasuvo/how-to-use-rasuvo/','methotrexate','Medac Pharma')\n d8=('Epi Pen','https://www.epipen.com/en/about-epipen-and-generic/how-to-use-epipen','epinephrine','Mylan')\n d9=('Epi Pen Junior','https://www.epipen.com/en/about-epipen-and-generic/how-to-use-epipen','epinephrine','Mylan')\n d10=('Levemir','https://www.levemir.com/starting-on-levemir.html','detemir','Novo Nordisk')\n d11=('Lantus','https://www.lantus.com/using-solostar-insulin-pen','glargine','Sanofi')\n d12=('Basaglar','https://www.basaglar.com/en/beginning-basaglar#injection-instructions','glargine','Lilly')\n d13=('Toujeo','https://www.toujeo.com/how-to-use-toujeo-insulin','glargine','Sanofi')\n d14=('Tresiba','https://www.tresiba.com/tresiba-flextouch/using-tresiba-flextouch.html','degludec','Novo Nordisk')\n d15=('Novolog','https://www.rapidactinginsulin.com/novolog/using-novolog/videos-and-downloads.html','aspart','NovoNordisk')\n d16=('Humalog','https://www.humalog.com/type-2-diabetes/how-to-use-u-100-kwikpen/','lispro','Lilly')\n d17=('Trulicity','https://www.trulicity.com/how-to-use/non-insulin-pen/','dulaglutide','Lilly')\n d18=('Taltz','https://www.taltz.com/taking-taltz/how-to-inject','','Lilly')\n d19=('Victoza','https://www.victoza.com/get-started-using-victoza-/your-first-injection.html','liraglutide','')\n d20=('Bydureon (Pen)','https://www.bydureon.com/pen/taking-bydureon/your-first-bydureon-injection.html','exenatide','')\n d28=('Bydureon (B Cise)','https://www.bydureon.com/using-bcise/how-to-use-bydureon-bcise.html','exenatide','')\n d21=('Byetta','https://www.azpicentral.com/byetta/ifu_byetta.pdf#page=1','','')\n d22=('Fiasp','https://www.rapidactinginsulin.com/fiasp/using-fiasp/fiasp-flextouch-and-vial.html','aspart','Novo Nordisk')\n d23=('Tremfya','https://www.tremfya.com/what-is-tremfya','','')\n d24=('Ozempic','https://www.ozempic.com/how-to-use/the-ozempic-pen.html','semaglutide','')\n d25=('Orencia','https://www.orencia.bmscustomerconnect.com/how-to-take-orencia/clickject-autoinjector','','')\n d26=('Kineret','https://www.kineretrx.com/ra/using-kineret','','')\n d27=('Actemra','https://www.actemra.com/ra/taking-actemra/taking-actemra-sc-injections.html','','')\n\n\n drugs.append(d1)\n drugs.append(d2)\n drugs.append(d3)\n drugs.append(d4)\n drugs.append(d5)\n drugs.append(d6)\n drugs.append(d7)\n drugs.append(d8)\n drugs.append(d9)\n drugs.append(d10)\n drugs.append(d11)\n drugs.append(d12)\n drugs.append(d13)\n drugs.append(d14)\n drugs.append(d15)\n drugs.append(d16)\n drugs.append(d17)\n drugs.append(d18)\n drugs.append(d19)\n drugs.append(d20)\n drugs.append(d21)\n drugs.append(d22)\n drugs.append(d23)\n drugs.append(d24)\n drugs.append(d25)\n drugs.append(d26)\n drugs.append(d27)\n drugs.append(d28)\n drugs.append(d29)\n\n drugs.sort(key=lambda x: x[0])\n\n return render_template('index.html', drugs=drugs)\n\n\n\n@app.route('/residents')\ndef residents():\n return render_template('residents.html')\n\n\n\n\n# @app.route('/signUp',methods=['POST']) \n# def signUp():\n \n# # read the posted values from the UI\n# _name = request.form['inputName']\n# _email = request.form['inputEmail']\n# _password = request.form['inputPassword']\n\n# out = \"Name: '\"+_name+\"' Email: '\"+_email+\"'\"\n# print \"CASEY\"\n# print out\n\n# # validate the received values\n# if _name and _email and _password:\n# \t\tcursor.callproc('sp_createUser',(_name,_email,_password))#not using HASH. BAD.\n# \t\tdata = cursor.fetchall()\n# \t\tif len(data) is 0:\n# \t\t\tconn.commit()\n# \t\t\tprint \"User: \"+_name+\" created successfully.\"\n# \t\t\treturn json.dumps({'message':'User created successfully !'})\n# \t\telse:\n# \t\t\tprint \"User: \"+_name+\" not created.\"\n# \t\t\treturn json.dumps({'error':str(data[0])})\n\n# else:\n# return json.dumps({'html':'Enter the required fields'})\n\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n app.run()","repo_name":"caseymorris61/injectables","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":6475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13366283101","text":"import torch\nimport torch.nn as nn\n\nclass TinyModel(torch.nn.Module):\n\n def forward(self,x):\n \n return torch.reshape(x,(2, 2))\n\ntinymodel = TinyModel()\n\ninput = torch.arange(4)\noutput = torch.reshape(input, (2, 2))\n\nprint(\"This is the input:\",input)\nprint(\"----------------------------------------------------\")\nprint(\"This is the output:\", output)\nsaveOnnx=True\nloadModel=False\nsavePtModel = False\n\n\nif savePtModel :\n torch.save({'model_state_dict':model.state_dict()}, name + \".pt\")\n\nif saveOnnx:\n torch.onnx.export(\n tinymodel,\n input,\n \"Reshape\" + \".onnx\",\n export_params=True\n )","repo_name":"Neel-Shah-29/Scripts-for-ONNX-Models","sub_path":"Reshape.py","file_name":"Reshape.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3893241011","text":"def decode(num):\n num = list(num)\n n = len(num)\n p = 0\n for i in range(n-1, -1, -1):\n if not num[i].isdigit():\n if num[i] == '-':\n num[i] = -1\n elif num[i] == '=':\n num[i] = -2\n num[i] = int(num[i]) * (5 ** p)\n p += 1\n print(num)\n return sum(num)\n\ndef recode(num):\n s = 0\n i = 0\n while True:\n s += 5 ** i * 2\n if s >= num:\n break\n i += 1\n n = i + 1\n id = []\n for i in range(n-1, -1, -1):\n s = 5 ** i\n rng = [1, 2, 0, -1, -2]\n d = [abs(num - s * r) for r in rng]\n m = min(d)\n v = rng[d.index(m)] \n id.append(v)\n num = num - v * s\n for i in range(len(id)):\n if id[i] == -2:\n id[i] = '='\n elif id[i] == -1:\n id[i] = '-'\n else:\n id[i] = str(id[i])\n return ''.join(id)\n\ninput = \"\"\"1=-0-2\n12111\n2=0=\n21\n2=01\n111\n20012\n112\n1=-1=\n1-12\n12\n1=\n122\"\"\"\n\ninput = open('data/day25.txt').read()\ns = 0\nfor num in input.splitlines():\n s += decode(num)\nprint(recode(s))\n","repo_name":"mdequeljoe/aoc2022","sub_path":"day25.py","file_name":"day25.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39868375604","text":"import numpy as np\r\nimport pygimli as pg\r\n\r\nfileName = \"Unsorted ABEM WN.dat\"\r\ndataset = pg.physics.ert.load(fileName)\r\n\r\nx = pg.x(dataset)\r\nA = np.array(x[dataset(\"a\")])\r\nB = np.array(x[dataset(\"b\")])\r\n\r\ndepthOfInvestigations = (B-A)*0.17\r\ndepthOfInvestigations = list(depthOfInvestigations)\r\ndepthOfInvestigations.sort() \r\n\r\nresult = []\r\nfor item in depthOfInvestigations:\r\n if item not in result:\r\n result.append(item)\r\ndepthOfInvestigations = np.array(result.copy())\r\n\r\ndepthOfInvestigations = np.around(depthOfInvestigations, 2)\r\n","repo_name":"brianhavk/Python","sub_path":"pyGIMLi/Depth of Investigation for ABEM WN.py","file_name":"Depth of Investigation for ABEM WN.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"81731547","text":"#!/usr/bin/env python3\n\n\n'''\n* Team Id : HB_2359\n* Author List : Sriram Thangavel, Sree Harish, Akshith, Prasannakumar\n* Filename: Controller.py\n* Theme: \n* Functions: \n* Global Variables: \n'''\n\n\n################### IMPORT MODULES #######################\n\nimport rospy\nimport sys\nimport cv2\nfrom std_msgs.msg import String, Int32\nfrom cv_basics.msg import aruco_data\nimport socket\nimport signal\nimport math\nimport numpy as np\nimport time\n\n################### VARIABLES AND CONSTANTS #######################\nPI = 3.14\n\nxList, yList, xListFinal, yListFinal = [], [], [], []\ntheta_goals = [0]\npen_status = 0\n\n# positions and orientation\nhola_x = 0\nhola_y = 0\nhola_theta = 0\ntask_status = 0\n\npenup = 0\ntaskstart = 0\n\n\n# distance from the center to the wheel\nd = 0.17483\n\n\n# wheel angle\nwheel_angle = PI/3\n\n# kp ratio\nl_kp = 27\nr_kp = 250\n\n# kp values\nkp_x = l_kp*1\nkp_y = l_kp*1\nkp_z = r_kp*1\n\nkd_x = 42\nkd_y = 42\nkd_z = 42\n\nki_x = 0\nki_y = 0\nki_z = 0\n\nprev_error_x = 0\nprev_error_y = 0\nprev_error_z = 0\n\nip = '192.168.43.50'\n\n##################### FUNCTION DEFINITIONS #######################\n\n\ndef signal_handler(sig, frame):\n rospy.loginfo('Clean-up !')\n cleanup()\n sys.exit(0)\n\n\ndef cleanup():\n socket.close()\n rospy.loginfo(\"cleanup done\")\n\n\ndef aruco_feedback_Cb(msg):\n global hola_x, hola_y, hola_theta\n hola_x = msg.x\n hola_y = msg.y\n hola_theta = round(msg.theta, 2)\n\n\ndef thresh_img(img_path):\n img = cv2.imread(img_path)\n img = cv2.resize(img, (400, 400), interpolation=cv2.INTER_AREA)\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n edges = cv2.Canny(gray_img, 100, 200)\n ret, thresh = cv2.threshold(edges, 127, 255, 0)\n return ret, thresh\n\n\ndef get_contour(thresh):\n contours, hierarchy = cv2.findContours(\n thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contours_ext, _ = cv2.findContours(\n thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n return contours, contours_ext\n\n\ndef robot_contours():\n global xList, yList, xListFinal, yListFinal\n ret, thresh = thresh_img('/home/prasannakumar/Desktop/taskdrawing_ws/src/cv_basics/scripts/images/robotFinal.png')\n cnts, cnts_ext = get_contour(thresh)\n\n for i in cnts_ext:\n for j in i:\n xList.append(j[0][0])\n yList.append(j[0][1])\n xListFinal.append(xList)\n yListFinal.append(yList)\n\ndef pen_coordinate(xList):\n initial_coordinate = xList[0]\n final_coordinate = xList[-1]\n return initial_coordinate,final_coordinate\n\ndef pen(xList, index):\n\n global penup\n # initial_coordinate, final_coordinate = pen_coordinate(xList)\n if 0 0:\n for i in range(len(target_word_spacy_tokens)):\n current_token = target_word_spacy_tokens[i]\n if current_token.has_vector:\n word_vec = np.add(word_vec, current_token.vector)\n word_vec = np.true_divide(word_vec,number_of_vecs) #avg of word embeddings for individual tokens.\n return word_vec","repo_name":"sheffieldnlp/cwi","sub_path":"src/features/word_emb_features.py","file_name":"word_emb_features.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"31516323767","text":"def char_range(first, second):\r\n for ch in range(ord(first), ord(second)):\r\n if ch != ord(first) and ch != ord(second):\r\n print(chr(ch), end=\" \")\r\n\r\n\r\nfirst_letter = input()\r\nsecond_letter = input()\r\n\r\nchar_range(first_letter, second_letter)\r\n","repo_name":"MihailPo91/SoftUni","sub_path":"Fundamentals/Functions/characters_in_range.py","file_name":"characters_in_range.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3941262847","text":"from collections import Counter\nclass Solution:\n def countPairs(self, deliciousness: List[int]) -> int:\n \n count = Counter(deliciousness)\n good_meal = 0\n n= 22\n \n for meal in deliciousness:\n count[meal] -= 1\n for i in range(n):\n good_meal += count.get(2**i - meal,0)\n \n return good_meal % (10**9 + 7)\n \n \n ","repo_name":"Beki4382/Competitive-Programming","sub_path":"1711-count-good-meals/1711-count-good-meals.py","file_name":"1711-count-good-meals.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19969687027","text":"from operator import invert\nfrom statistics import mode\nfrom tabnanny import verbose\nfrom django.contrib import admin\nfrom django.db.models import Q, Model\nfrom django.shortcuts import redirect, render\nfrom typing import List\n\nfrom . import models\nfrom app_invest.models import Investition\n\n\nadmin.site.site_header = \\\nadmin.site.site_title = \\\nadmin.site.index_title = 'Tradiary'\n\n\n@admin.register(models.Asset)\nclass AssetAdmin(admin.ModelAdmin):\n pass\n\n\n@admin.register(models.Pair)\nclass AssetAdmin(admin.ModelAdmin):\n list_display = 'base_asset', 'second_asset', 'current_quote_'\n\n def current_quote_(self, obj: models.Pair):\n return f'{float(obj.current_quote)} {obj.base_asset}' if obj.current_quote else None\n\n\n@admin.register(models.Trade)\nclass AssetAdmin(admin.ModelAdmin):\n list_display = (\n 'transaction_date',\n 'pair',\n 'side',\n 'price',\n 'amount',\n 'total'\n )\n\n\nclass InvestitionTotalsFake(Model):\n class Meta:\n managed = False\n verbose_name = 'Investition totals'\n\n\n@admin.action(description='Calculate totals for ...')\ndef calculate_totals_for(modeladmin, request, queryset: List[Investition]):\n data = []\n\n totals = {\n 'spend_base_asset': 0,\n 'current_price_in_base_asset': 0,\n 'grow_amount': 0\n }\n\n\n for investition in queryset:\n data.append(investition)\n\n if investition.sell_trade:\n sell_sum = investition.sell_trade.total\n else:\n sell_sum = investition.calculated_grow_amount + investition.buy_trade.total\n\n totals['spend_base_asset'] += investition.buy_trade.total\n totals['current_price_in_base_asset'] += sell_sum\n\n totals['grow_amount'] = totals['current_price_in_base_asset'] - totals['spend_base_asset']\n totals['grow_percentage'] = ((totals['current_price_in_base_asset'] / totals['spend_base_asset']) - 1 ) * 100\n\n return render(\n request,\n 'admin/investitions/investition_totals.html',\n {\n 'data': data,\n 'totals': totals,\n 'base_asset': investition.buy_trade.pair.base_asset,\n 'cl': {\n 'opts': InvestitionTotalsFake._meta\n },\n 'opts': InvestitionTotalsFake._meta,\n }\n )\n\n\n@admin.register(Investition)\nclass InvestitionAdmin(admin.ModelAdmin):\n list_display = (\n 'id',\n 'buy_trade',\n 'sell_trade',\n 'duration',\n 'fixed_grow_',\n 'current_grow_',\n 'current_investition_price_',\n )\n\n list_filter = 'buy_trade__pair',\n actions = [calculate_totals_for]\n\n def fixed_grow_(self, obj: Investition):\n result = []\n\n if obj.fixed_grow_amount:\n result.append(f'{obj.fixed_grow_amount:+} {obj.buy_trade.pair.base_asset}')\n\n if obj.fixed_grow_percentage:\n result.append(f'{obj.fixed_grow_percentage:+.2f}%')\n\n if result:\n return ' / '.join(result)\n\n def current_grow_(self, obj: Investition):\n result = []\n\n if not obj.sell_trade and obj.buy_trade.pair.current_quote:\n result.append(\n f'{float(obj.calculated_grow_amount):+} '\n f'{obj.buy_trade.pair.base_asset}'\n )\n\n result.append(f'{float(obj.calculated_grow_percentage):.2f}%')\n\n if result:\n return ' / '.join(result)\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == 'buy_trade':\n kwargs['queryset'] = \\\n models.Trade.objects\\\n .filter(side=models.Trade.BUY)\n elif db_field.name == 'sell_trade':\n kwargs['queryset'] = \\\n models.Trade.objects\\\n .filter(side=models.Trade.SELL)\n\n return super(InvestitionAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)\n\n def current_investition_price_(self, obj: Investition):\n if not obj.sell_trade and obj.buy_trade.pair.current_quote:\n return f'{float(obj.buy_trade.pair.current_quote * obj.buy_trade.amount)} {obj.buy_trade.pair.base_asset}'\n","repo_name":"DAVIDhaker/tradiary","sub_path":"app_trade/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18471994555","text":"import requests\nph=7906722499\nmsg='hello'\nurl = \"https://www.fast2sms.com/dev/bulk\"\n\nquerystring = {\"authorization\":\"pJFuR4e1ZXH7UgOsjdNkmoWwtCEqfYn5v0iS9aVGxKc6M83yThf5ZwkME37e8ODYcXiq0bNrzh4Jx2Pm\",\"sender_id\":\"SHRIRA\",\"message\":msg,\"language\":\"english\",\"route\":\"p\",\"numbers\":ph}\n\nheaders = {\n 'cache-control': \"no-cache\"\n}\n\nresponse = requests.request(\"GET\", url, headers=headers, params=querystring)\n\nprint(response.text)","repo_name":"mukeshpal15/realestatere","sub_path":"check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1519407474","text":"\n\"\"\"\nCoadding module.\n\n.. include common links, assuming primary doc root is up one directory\n.. include:: ../include/links.rst\n\"\"\"\nimport inspect\n\nfrom IPython import embed\n\nimport numpy as np\n\nfrom astropy.io import fits\n\nfrom pypeit.spectrographs.util import load_spectrograph\nfrom pypeit.onespec import OneSpec\nfrom pypeit import sensfunc\nfrom pypeit import specobjs\nfrom pypeit import msgs\nfrom pypeit.core import coadd, flux_calib\nfrom pypeit.history import History\n\n\nclass CoAdd1D:\n\n @classmethod\n def get_instance(cls, spec1dfiles, objids, spectrograph=None, par=None, sensfile=None, debug=False, show=False):\n \"\"\"\n Superclass factory method which generates the subclass instance. See __init__ docs for arguments.\n \"\"\"\n pypeline = fits.getheader(spec1dfiles[0])['PYPELINE'] + 'CoAdd1D'\n return next(c for c in cls.__subclasses__() if c.__name__ == pypeline)(\n spec1dfiles, objids, spectrograph=spectrograph, par=par, sensfile=sensfile, debug=debug, show=show)\n\n def __init__(self, spec1dfiles, objids, spectrograph=None, par=None, sensfile=None, debug=False, show=False):\n \"\"\"\n\n Args:\n spec1dfiles (list):\n List of strings which are the spec1dfiles\n objids (list):\n List of strings which are the objids for the object in each spec1d file that you want to coadd\n spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`, optional):\n par (:class:`pypeit.par.pypeitpar.Coadd1DPar`, optional):\n Pypeit parameter set object for Coadd1D\n sensfile (str, optional):\n File holding the sensitivity function. This is required for echelle coadds only.\n debug (bool, optional)\n Debug. Default = False\n show (bool, optional):\n Debug. Default = True\n \"\"\"\n # Instantiate attributes\n self.spec1dfiles = spec1dfiles\n self.objids = objids\n\n # Optional\n if spectrograph is not None:\n self.spectrograph = spectrograph\n else:\n header = fits.getheader(spec1dfiles[0])\n self.spectrograph = load_spectrograph(header['PYP_SPEC'])\n if par is None:\n self.par = self.spectrograph.default_pypeit_par()['coadd1d']\n else:\n self.par = par\n #\n self.sensfile = sensfile\n self.debug = debug\n self.show = show\n self.nexp = len(self.spec1dfiles) # Number of exposures\n self.coaddfile = None\n\n def run(self):\n \"\"\"\n Runs the coadding\n \"\"\"\n\n # Load the data\n self.waves, self.fluxes, self.ivars, self.gpms, self.header = self.load_arrays()\n # Coadd the data\n self.wave_grid_mid, self.wave_coadd, self.flux_coadd, self.ivar_coadd, self.gpm_coadd = self.coadd()\n # Scale to a filter magnitude?\n if self.par['filter'] != 'none':\n scale = flux_calib.scale_in_filter(self.wave_coadd, self.flux_coadd, self.gpm_coadd, self.par)\n self.flux_coadd *= scale\n self.ivar_coadd = self.ivar_coadd / scale**2\n\n def load_arrays(self):\n \"\"\"\n Load the arrays we need for performing coadds.\n\n Returns:\n tuple:\n - waves, fluxes, ivars, gpms, header\n \"\"\"\n for iexp in range(self.nexp):\n sobjs = specobjs.SpecObjs.from_fitsfile(self.spec1dfiles[iexp], chk_version=self.par['chk_version'])\n indx = sobjs.name_indices(self.objids[iexp])\n if not np.any(indx):\n msgs.error(\"No matching objects for {:s}. Odds are you input the wrong OBJID\".format(self.objids[iexp]))\n wave_iexp, flux_iexp, ivar_iexp, gpm_iexp, meta_spec, header = \\\n sobjs[indx].unpack_object(ret_flam=self.par['flux_value'], extract_type=self.par['ex_value'])\n # Allocate arrays on first iteration\n # TODO :: We should refactor to use a list of numpy arrays, instead of a 2D numpy array.\n if iexp == 0:\n waves = np.zeros(wave_iexp.shape + (self.nexp,))\n fluxes = np.zeros_like(waves)\n ivars = np.zeros_like(waves)\n gpms = np.zeros_like(waves, dtype=bool)\n header_out = header\n if 'RA' in sobjs[indx][0].keys() and 'DEC' in sobjs[indx][0].keys():\n header_out['RA_OBJ'] = sobjs[indx][0]['RA']\n header_out['DEC_OBJ'] = sobjs[indx][0]['DEC']\n # Check if the arrays need to be padded\n # TODO :: Remove the if/elif statement below once these 2D arrays have been converted to a list of 1D arrays\n if wave_iexp.shape[0] > waves.shape[0]:\n padv = [(0, wave_iexp.shape[0]-waves.shape[0]), (0, 0)]\n waves = np.pad(waves, padv, mode='constant', constant_values=(0, 0))\n fluxes = np.pad(fluxes, padv, mode='constant', constant_values=(0, 0))\n ivars = np.pad(ivars, padv, mode='constant', constant_values=(0, 1))\n gpms = np.pad(gpms, padv, mode='constant', constant_values=(False, False))\n elif wave_iexp.shape[0] < waves.shape[0]:\n padv = [0, waves.shape[0]-wave_iexp.shape[0]]\n wave_iexp = np.pad(wave_iexp, padv, mode='constant', constant_values=(0, 0))\n flux_iexp = np.pad(flux_iexp, padv, mode='constant', constant_values=(0, 0))\n ivar_iexp = np.pad(ivar_iexp, padv, mode='constant', constant_values=(0, 1))\n gpm_iexp = np.pad(gpm_iexp, padv, mode='constant', constant_values=(False, False))\n # Store the information\n waves[...,iexp], fluxes[...,iexp], ivars[..., iexp], gpms[...,iexp] \\\n = wave_iexp, flux_iexp, ivar_iexp, gpm_iexp\n return waves, fluxes, ivars, gpms, header_out\n\n def save(self, coaddfile, telluric=None, obj_model=None, overwrite=True):\n \"\"\"\n Generate a :class:`OneSpec` object and write it to disk.\n\n Args:\n coaddfile (str):\n File to output coadded spectrum to.\n telluric (`numpy.ndarray`_):\n obj_model (str):\n overwrite (bool):\n Overwrite existing file?\n \"\"\"\n self.coaddfile = coaddfile\n wave_gpm = self.wave_coadd > 1.0\n # Generate the spectrum container object\n onespec = OneSpec(wave=self.wave_coadd[wave_gpm], wave_grid_mid=self.wave_grid_mid[wave_gpm], flux=self.flux_coadd[wave_gpm],\n PYP_SPEC=self.spectrograph.name, ivar=self.ivar_coadd[wave_gpm],\n mask=self.gpm_coadd[wave_gpm].astype(int),\n ext_mode=self.par['ex_value'], fluxed=self.par['flux_value'])\n\n onespec.head0 = self.header\n\n # Add history entries for coadding.\n history = History()\n history.add_coadd1d(self.spec1dfiles, self.objids)\n\n # Add on others\n if telluric is not None:\n onespec.telluric = telluric[wave_gpm]\n if obj_model is not None:\n onespec.obj_model = obj_model[wave_gpm]\n # Write\n onespec.to_file(coaddfile, history=history, overwrite=overwrite)\n\n def coadd(self):\n \"\"\"\n Dummy method overloaded by sub-classes\n\n Returns:\n :obj:`tuple`: four items\n - wave\n - flux\n - ivar\n - gpm\n\n \"\"\"\n return (None,)*4\n\n\nclass MultiSlitCoAdd1D(CoAdd1D):\n \"\"\"\n Child of CoAdd1d for Multislit and Longslit reductions\n \"\"\"\n\n def __init__(self, spec1dfiles, objids, spectrograph=None, par=None, sensfile=None, debug=False, show=False):\n \"\"\"\n See `CoAdd1D` doc string\n \"\"\"\n super().__init__(spec1dfiles, objids, spectrograph=spectrograph, par = par, sensfile = sensfile,\n debug = debug, show = show)\n\n def coadd(self):\n \"\"\"\n Perform coadd for for Multi/Longslit data using multi_combspec\n\n Returns:\n tuple\n - wave_grid_mid, wave, flux, ivar, gpm\n\n \"\"\"\n return coadd.multi_combspec(\n self.waves, self.fluxes, self.ivars, self.gpms,\n sn_smooth_npix=self.par['sn_smooth_npix'], wave_method=self.par['wave_method'],\n dv=self.par['dv'], wave_grid_min=self.par['wave_grid_min'], wave_grid_max=self.par['wave_grid_max'],\n spec_samp_fact=self.par['spec_samp_fact'], ref_percentile=self.par['ref_percentile'],\n maxiter_scale=self.par['maxiter_scale'], sigrej_scale=self.par['sigrej_scale'],\n scale_method=self.par['scale_method'], sn_min_medscale=self.par['sn_min_medscale'],\n sn_min_polyscale=self.par['sn_min_polyscale'], maxiter_reject=self.par['maxiter_reject'],\n lower=self.par['lower'], upper=self.par['upper'], maxrej=self.par['maxrej'], sn_clip=self.par['sn_clip'],\n debug=self.debug, show=self.show)\n\n\n\n\n\nclass EchelleCoAdd1D(CoAdd1D):\n \"\"\"\n Child of CoAdd1d for Echelle reductions\n \"\"\"\n\n def __init__(self, spec1dfiles, objids, spectrograph=None, par=None, sensfile=None, debug=False, show=False):\n \"\"\"\n See `CoAdd1D` doc string\n\n \"\"\"\n super().__init__(spec1dfiles, objids, spectrograph=spectrograph, par = par, sensfile = sensfile,\n debug = debug, show = show)\n\n def coadd(self):\n \"\"\"\n Perform coadd for for echelle data using ech_combspec\n\n Returns:\n tuple\n - wave_grid_mid, wave, flux, ivar, gpm\n\n \"\"\"\n weights_sens = sensfunc.SensFunc.sensfunc_weights(self.sensfile, self.waves,\n debug=self.debug)\n wave_grid_mid, (wave_coadd, flux_coadd, ivar_coadd, gpm_coadd), order_stacks \\\n = coadd.ech_combspec(self.waves, self.fluxes, self.ivars, self.gpms, weights_sens,\n nbest=self.par['nbest'],\n sn_smooth_npix=self.par['sn_smooth_npix'],\n wave_method=self.par['wave_method'],\n spec_samp_fact=self.par['spec_samp_fact'],\n ref_percentile=self.par['ref_percentile'],\n maxiter_scale=self.par['maxiter_scale'],\n sigrej_scale=self.par['sigrej_scale'],\n scale_method=self.par['scale_method'],\n sn_min_medscale=self.par['sn_min_medscale'],\n sn_min_polyscale=self.par['sn_min_polyscale'],\n maxiter_reject=self.par['maxiter_reject'],\n lower=self.par['lower'], upper=self.par['upper'],\n maxrej=self.par['maxrej'], sn_clip=self.par['sn_clip'],\n debug=self.debug, show=self.show)\n\n return wave_grid_mid, wave_coadd, flux_coadd, ivar_coadd, gpm_coadd\n\n\n","repo_name":"kadart/pypeit_nte_uv","sub_path":"coadd1d.py","file_name":"coadd1d.py","file_ext":"py","file_size_in_byte":11200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18354032722","text":"# pandas and numpy for data manipulation\nimport pandas as pd\nimport numpy as np\nimport sqlite3\n\nfrom bokeh.plotting import Figure\nfrom bokeh.models import (\n CategoricalColorMapper,\n HoverTool,\n ColumnDataSource,\n Panel,\n FuncTickFormatter,\n SingleIntervalTicker,\n LinearAxis,\n)\nfrom bokeh.models.widgets import (\n CheckboxGroup,\n Slider,\n RangeSlider,\n Tabs,\n CheckboxButtonGroup,\n TableColumn,\n DataTable,\n Select,\n)\nfrom bokeh.layouts import column, row, WidgetBox\n\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\n\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\n\n\nplot = \"\"\n\n\ndef mgstat_tab(db):\n def make_dataset(mgstat_list):\n newdf = mgstat[mgstat_list]\n # Convert dataframe to column data source\n return ColumnDataSource(newdf)\n\n def make_plot(src):\n # Blank plot with correct labels\n p = Figure(\n plot_width=1024,\n plot_height=768,\n x_axis_type=\"datetime\",\n title=\"mgstat\",\n output_backend=\"webgl\",\n )\n cm = plt.get_cmap(\"gist_rainbow\")\n\n numlines = len(mgstat.columns)\n mypal = [cm(1.0 * i / numlines) for i in range(numlines)]\n mypal = list(map(lambda x: colors.rgb2hex(x), mypal))\n col = 0\n for key in src.data.keys():\n if key == \"datetime\":\n continue\n l = key + \" \"\n col = col + 1\n p.line(\n mgstat.index.values,\n mgstat[key],\n line_width=1,\n alpha=0.8,\n name=key,\n legend=key,\n color=mypal[col],\n )\n p.legend.click_policy = \"hide\"\n return p\n\n def update(attr, old, new):\n print(\"update called\")\n mgstats_to_plot = [mgstat_selection.labels[i] for i in mgstat_selection.active]\n new_src = make_dataset(mgstats_to_plot)\n src.data = new_src.data\n plot = make_plot(src)\n layout.children[1] = plot\n\n # get data from DB, setup index\n mgstat = pd.read_sql_query(\"select * from mgstat\", db)\n mgstat.index = pd.to_datetime(mgstat[\"datetime\"])\n mgstat = mgstat.drop([\"datetime\"], axis=1)\n mgstat.index.name = \"datetime\"\n mgstat_selection = CheckboxGroup(labels=list(mgstat.columns), active=[0, 5])\n\n mgstat_list = [mgstat_selection.labels[i] for i in mgstat_selection.active]\n src = make_dataset(mgstat_list)\n plot = make_plot(src)\n\n mgstat_selection.on_change(\"active\", update)\n controls = WidgetBox(mgstat_selection)\n layout = row(controls, plot)\n tab = Panel(child=layout, title=\"mgstat\")\n return tab\n","repo_name":"murrayo/yape","sub_path":"yapesrv/scripts/mgstat_tab.py","file_name":"mgstat_tab.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"70500490729","text":"# PE problem 12 \n# find the value of triangle number that has over five hundred divisors\n\nimport time\nimport math\nfrom math import ceil\n\nstartTime = time.time()\n\n##def getFactors():\n## lists = [1]\n## number = 1\n## factors = []\n## counts = 0\n## n = 0\n##\n## while counts < 250:\n## number += 1\n## lists.append(lists[-1]+number)\n## for i in lists:\n## factors = []\n## counts = 0\n## for factor in range(1,int(math.sqrt(i))+1):\n## if i % factor == 0:\n## factors.append(factor)\n## counts += 1\n## if counts > 250:\n## return i\n## break\n## print(i,factors)\n\ndef getFactors(x):\n cnt = 0\n rt = math.sqrt(x)\n if rt == int(rt):\n cnt = 1\n rt = math.ceil(rt)\n for a in range(1,rt):\n if x % a == 0:\n cnt += 2\n return cnt\n\ni = 0\nwhile True:\n if getFactors(i*(i+1)/2) > 500:\n print(i*(i+1)/2)\n break\n i += 1\n\n\nprint(\"--- %s seconds ---\" % (time.time() - startTime))\n","repo_name":"chicshin/Algorithms","sub_path":"Project_Euler/divisible_tri_num.py","file_name":"divisible_tri_num.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3820982299","text":"\"\"\"\nFunctions to read the stdout or stderr of a job.\n\"\"\"\n\nimport os\nfrom .info import SlurmJob\n\ndef read_stdout( jobid : int ) -> str:\n \"\"\"\n Read the stdout of a job.\n\n Parameters\n ----------\n jobid : int\n The job-id whose stdout to read.\n \n Returns\n -------\n stdout : str\n The stdout of the job.\n \"\"\"\n job = SlurmJob( jobid )\n if os.path.exists( job.stdout ):\n with open( job.stdout , \"r\" ) as f:\n stdout = f.read()\n else:\n print( \"The stdout file does not exist (yet).\" ) \n return\n return stdout\n\n\n\ndef read_stderr( jobid : int ) -> str:\n \"\"\"\n Read the stderr of a job.\n\n Parameters\n ----------\n jobid : int\n The job-id whose stderr to read.\n \n Returns\n -------\n stderr : str\n The stderr of the job.\n \"\"\"\n job = SlurmJob( jobid )\n if os.path.exists( job.stderr ):\n with open( job.stderr , \"r\" ) as f:\n stderr = f.read()\n else:\n print( \"The stderr file does not exist (yet).\" ) \n return\n return stderr\n","repo_name":"NoahHenrikKleinschmidt/slurmtools","sub_path":"slurmtools/func_api/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25845142034","text":"__VERSION__ = '0.01'\n\nDEBUG = False\n\nMAX_RETRIES = 20\nINITIAL_RECONNECT_DELAY = 500 # milliseconds\n\nCOLAB_DIR = ''\nPROJECT_PATH = ''\nDEFAULT_HOST = 'floobits.com'\nDEFAULT_PORT = 3448\nSECURE = True\n\nUSERNAME = ''\nSECRET = ''\n\nALERT_ON_MSG = True\n\nROOM_WINDOW = None\n\nCHAT_VIEW = None\nCHAT_VIEW_PATH = None\n\nDELETE_LOCAL_FILES = True\nSHOW_HIGHLIGHTS = True\n\nSPARSE_MODE = False\n","repo_name":"rdgmatos/dot","sub_path":"vim/bundle/floobits-vim/plugin/floo/shared.py","file_name":"shared.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8547578540","text":"import json\nimport os\nimport shutil\nimport ast\n\n\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\n\ndef print_info(my_str):\n print(f\"{bcolors.OKBLUE}[INFO] {my_str}{bcolors.ENDC}\")\n\ndef print_warning(my_str):\n print(f\"{bcolors.WARNING}[WARNING] {my_str}{bcolors.ENDC}\")\n\ndef print_debug(my_str):\n print(f\"{bcolors.OKCYAN}[DEBUG] {my_str}{bcolors.ENDC}\")\n\n\n\ndef string_to_python(string):\n try:\n return ast.literal_eval(string)\n except:\n return string\n\n\ndef dict_to_json(dict, fname):\n with open(fname, 'a') as f:\n json.dump(dict, f)\n f.write('\\n')\n\n\ndef dict_list_to_json(dict_list, fname):\n with open(fname, 'a') as f:\n for dict in dict_list:\n json.dump(dict, f)\n f.write('\\n')\n\n\ndef json_to_dict_list(fname):\n dict_list = []\n epoch_set = set()\n with open(fname) as f:\n lines = f.readlines()\n for line in lines:\n line = line.rstrip()\n dict = json.loads(line)\n if dict['epoch'] not in epoch_set:\n dict_list.append(dict)\n epoch_set.add(dict['epoch'])\n return dict_list\n\n\ndef dict_to_tb(dict, writer, epoch):\n for key in dict:\n writer.add_scalar(key, dict[key], epoch)\n\n\ndef dict_list_to_tb(dict_list, writer):\n for dict in dict_list:\n assert 'epoch' in dict, 'Key epoch must exist in stats dict'\n dict_to_tb(dict, writer, dict['epoch'])\n\n\ndef makedirs(dir):\n os.makedirs(dir, exist_ok=True)\n\n\ndef makedirs_rm_exist(dir):\n if os.path.isdir(dir):\n shutil.rmtree(dir)\n os.makedirs(dir, exist_ok=True)\n","repo_name":"bkoyuncu/vamoh","sub_path":"imagegym/utils/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"71588326249","text":"import discord\nimport random\nfrom Helper.Util import *\nfrom Helper.InputParser import *\nfrom Constant.Messages import *\nfrom Constant.Tokens import *\nfrom Constant.Paths import *\nfrom Constant.Values import *\nfrom Constant.KeyWords import *\nfrom Actions.CringeAction import *\nfrom Actions.DateTimeAction import *\nfrom Actions.GoogleAction import *\nfrom Actions.HelloAction import *\nfrom Actions.HelpAction import *\nfrom Actions.MagicEightBallAction import *\nfrom Actions.ManPageAction import *\nfrom Actions.PostMemeAction import *\nfrom Actions.PostPornAction import *\nfrom Actions.PostStockImageAction import *\nfrom Actions.RandomFailureAction import *\nfrom Actions.RandomNumberAction import *\nfrom Actions.WikiAction import *\nfrom Actions.InjectedTextAction import *\nfrom Actions.RandomWordsAction import *\nfrom Actions.ConstantTextAction import *\nfrom Actions.FarmAction import *\n\nasync def parseMessage(message):\n #Early exits\n if isRandomFailure():\n return randomFailureAction()\n if hasInjectText():\n return injectedTextAction()\n \n #Trim Jarvis\n text = message.content.lower()\n text = trimFirstWord(text)\n\n try:\n text = trimFillerIfPresent(text, PLEASE)\n if text.startswith(HELP):\n return helpAction()\n elif text.startswith(tuple(HELLO)):\n return helloAction()\n elif text.startswith(CANCEL):\n emojiList = await message.guild.fetch_emojis()\n return farmAction(emojiList)\n elif text.startswith(THANK):\n return cringeAction()\n elif text.startswith(WHEN):\n return dateTimeAction()\n elif text.startswith(tuple(WILL)):\n return magicEightBallAction()\n elif text.startswith(tuple(MANY)):\n return randomNumberAction()\n elif text.startswith(GOOGLE):\n return googleAction(trimFirstWord(text))\n elif text.startswith(WIKI):\n return wikiAction(trimFirstWord(text))\n elif text.startswith(MAN):\n return manPageAction(trimFirstWord(text))\n elif text.startswith(tuple(SHOW)):\n return parsePostMessage(trimFirstWord(text))\n else:\n return randomWordsAction()\n except Exception as e:\n raise e\n\ndef parsePostMessage(text):\n try:\n if text == \"\":\n return ConstantTextAction(ERROR_INVALID_USE_MESSAGE)\n text = trimFillerIfPresent(text, ME)\n if text == \"\":\n return ConstantTextAction(ERROR_INVALID_USE_MESSAGE)\n keyword = text.split()[-1]\n count = 1\n if text.split()[0].isnumeric():\n count = int(text.split()[0])\n text = trimFirstWord(text)\n elif isAlphaToNumeric(text.split()[0]):\n count = alphaToNumeric(text.split()[0])\n text = trimFirstWord(text)\n if count > POST_LIMIT:\n return ConstantTextAction(ERROR_OVER_LIMIT_MESSAGE)\n\n if keyword.startswith(tuple(MEME)):\n return postMemeAction(count, trimLastWord(text))\n elif keyword.startswith(tuple(PORN)):\n return postPornAction(count, trimLastWord(text))\n else:\n return postStockImageAction(text)\n except Exception as e:\n raise e","repo_name":"Ericgi231/DiscordBot_Jarvis","sub_path":"Helper/InputParser.py","file_name":"InputParser.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40241602455","text":"n=int(input())\ninCounter, outCounter=0, 0\nfor i in range(0, n):\n a=int(input())\n if a>=10 and a<=20:\n inCounter+=1\n else:\n outCounter+=1\n\nprint(f\"{inCounter} in\")\nprint(f\"{outCounter} out\")\n","repo_name":"mxTuhin/URI_Python","sub_path":"1072.py","file_name":"1072.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18108898362","text":"import os\nimport json\nimport uuid\nfrom PIL import Image\nimport requests\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nplt.ioff()\nmatplotlib.rcParams.update({'figure.max_open_warning': 0})\n\n\ndef ic_auto_exclude(ica, verbose=False):\n ic_to_reject = []\n for i in range(ica.n_components_):\n plot_to_save = ica.plot_components(i, show=False)\n plot_filename = str(uuid.uuid4()) + \".png\"\n plot_to_save.savefig(plot_filename)\n\n # cropping images\n plot_to_save = Image.open(plot_filename)\n plot_to_save = plot_to_save.crop((25, 56, 205, 232))\n plot_to_save.save(plot_filename)\n\n with open(plot_filename, 'rb') as img:\n files = {'plot': (plot_filename, img, 'image/png')}\n resp = requests.post('http://icamark.herokuapp.com/label', files=files).text\n label = json.loads(resp)['label']\n\n if label == 0:\n ic_to_reject.append(i)\n\n os.remove(plot_filename)\n\n if verbose:\n print(\"ICA#{} ({})\".format(i, label))\n return ic_to_reject\n","repo_name":"Evgenius2020/icaMark","sub_path":"mne_extension/IC_auto_exclude.py","file_name":"IC_auto_exclude.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74135405608","text":"from sqlalchemy import create_engine\nfrom pyspark.sql import SparkSession\nfrom google.oauth2 import service_account\nimport os, pandas as pd, googleapiclient.discovery, googleapiclient.errors\n\nDEVELOPER_KEY=\"AIzaSyAru82hLkwVztVx2xCJIdIzy-rKDMhUeyg\"\n\ndef startup() -> service_account.Credentials:\n # service account credentials\n creds = service_account.Credentials.from_service_account_info(\n info=DEVELOPER_KEY,\n scopes=[\"https://www.googleapis.com/auth/youtube.readonly\"]\n )\n \n return creds\n\ndef load_etl(creds : service_account.Credentials):\n api_service_name = \"Youtube\"\n api_version = \"v3\"\n youtube = googleapiclient.discovery.build(\n api_service_name, api_version, developerKey = DEVELOPER_KEY)\n \n # create and run the query\n request = youtube.videos().list(\n part=\"snippet,contentDetails,statistics\",\n chart=\"mostPopular\",\n regionCode=\"US\"\n )\n r = request.execute()\n transform_etl(r)\n\ndef transform_etl(response):\n# storing popular video's metadata in dictionary\n popular_videos = {\n 'id':[],\n 'published_date':[],\n 'title':[],\n 'description':[],\n 'thumbnail':[],\n 'channel_name':[],\n 'tags': [],\n 'duration': [],\n 'views': [],\n 'likes': [],\n 'favorites': [],\n 'comments': [],\n }\n \n for item in response['items']:\n duration = response['items'][0]['contentDetails']['duration']\n views = response['items'][0]['statistics']['viewCount']\n likes = response['items'][0]['statistics']['likeCount']\n favorites = response['items'][0]['statistics']['favoriteCount']\n comments = response['items'][0]['statistics']['commentCount']\n video_id = response.id\n popular_videos['duration'].append(duration)\n popular_videos['views'].append(views)\n popular_videos['likes'].append(likes)\n popular_videos['favorites'].append(favorites)\n popular_videos['comments'].append(comments)\n \n \n pd.DataFrame(data=response).to_csv(\"popular_videos.csv\", index=False)\n \ndef execute_etl():\n # Initialize spark session for youtube API\n spark = SparkSession.builder.appName(\"Youtube_API_ETL\").getOrCreate()\n\n credentials = startup()\n load_etl(credentials)\n \nif __name__ == '__main__':\n execute_etl()","repo_name":"austin-vu1017/YouTube-API-Pipeline","sub_path":"etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28644711067","text":"import time\nimport datetime\nimport dateutil.parser\n\nimport pytest\n\nfrom rana.blueprints.heartbeats import process_hb, fetch_machine\n\n\n@pytest.mark.asyncio\nasync def test_heartbeats(test_cli_user):\n \"\"\"Test heartbeat creation for the given user.\"\"\"\n resp = await test_cli_user.post(\n \"/api/v1/users/current/heartbeats\",\n json={\n \"entity\": \"/home/uwu/uwu.py\",\n \"type\": \"file\",\n \"category\": None,\n \"time\": time.time(),\n \"project\": \"awoo\",\n },\n )\n\n assert resp.status_code == 201\n rjson = await resp.json\n assert isinstance(rjson, dict)\n\n data = rjson[\"data\"]\n assert isinstance(data, dict)\n assert data[\"project\"] == \"awoo\"\n\n\nasync def do_heartbeats(test_cli_user, minutes=10, *, project=\"awoo\", start=None):\n \"\"\"Add heartbeats.\"\"\"\n start = start or datetime.datetime.now()\n end = start + datetime.timedelta(minutes=minutes)\n\n app = test_cli_user.cli.app\n user_id = test_cli_user.user[\"id\"]\n\n mach_id = await fetch_machine(user_id, \"test_machine\", app_=app)\n\n for n in range(minutes):\n hb_time = start + datetime.timedelta(minutes=n)\n\n heartbeat = await process_hb(\n user_id,\n mach_id,\n {\n \"entity\": \"/home/uwu/uwu.py\",\n \"type\": \"file\",\n \"category\": None,\n \"time\": hb_time.timestamp(),\n \"is_write\": True,\n \"project\": project,\n \"language\": \"uwulang\",\n \"branch\": None,\n \"lines\": 10,\n \"lineno\": None,\n \"cursorpos\": None,\n },\n app_=app,\n )\n\n assert heartbeat is not None\n\n return start, end\n\n\n@pytest.mark.asyncio\nasync def test_durations(test_cli_user):\n \"\"\"Test if given heartbeats generate a duration.\"\"\"\n start, end = await do_heartbeats(test_cli_user)\n\n now = datetime.datetime.now()\n now_str = f\"{now.year}-{now.month}-{now.day}\"\n resp = await test_cli_user.get(f\"/api/v1/users/current/durations?date={now_str}\")\n\n assert resp.status_code == 200\n rjson = await resp.json\n assert isinstance(rjson, dict)\n\n data = rjson[\"data\"]\n assert isinstance(data, list)\n\n try:\n duration = next(iter(data))\n except StopIteration:\n raise Exception(\"data is empty\")\n\n assert isinstance(duration, dict)\n assert duration[\"project\"] == \"awoo\"\n\n dstart = dateutil.parser.parse(duration[\"start\"])\n dend = dateutil.parser.parse(duration[\"end\"])\n\n assert (dstart - start) < datetime.timedelta(seconds=30)\n assert (dend - end) < datetime.timedelta(seconds=30)\n\n\n@pytest.mark.asyncio\nasync def test_summaries(test_cli_user):\n \"\"\"Test summary generation\"\"\"\n start, end = await do_heartbeats(test_cli_user, 10, project=\"awoo\")\n start2, end2 = await do_heartbeats(\n test_cli_user, 9, project=\"awoo2\", start=end + datetime.timedelta(minutes=5)\n )\n\n now = datetime.datetime.now()\n now_str = f\"{now.year}-{now.month}-{now.day}\"\n\n resp = await test_cli_user.get(\n f\"/api/v1/users/current/summaries?start={now_str}&end={now_str}\"\n )\n\n assert resp.status_code == 200\n rjson = await resp.json\n assert isinstance(rjson, dict)\n\n s_start = dateutil.parser.parse(rjson[\"start\"])\n s_end = dateutil.parser.parse(rjson[\"end\"])\n\n assert s_start.day == s_end.day == now.day\n assert s_start.month == s_end.month == now.month\n\n data = rjson[\"data\"]\n assert isinstance(data, list)\n\n try:\n data1 = next(iter(rjson[\"data\"]))\n except StopIteration:\n raise Exception(\"rjson.data is empty\")\n\n assert isinstance(data1[\"grand_total\"][\"total_seconds\"], float)\n\n assert isinstance(data1[\"languages\"], list)\n assert isinstance(data1[\"projects\"], list)\n\n projects = data1[\"projects\"]\n assert isinstance(projects, list)\n\n # the first should always be awoo, the second should be always awoo2\n # due to the amount of time spent in them\n assert projects[0][\"name\"] == \"awoo\"\n assert projects[1][\"name\"] == \"awoo2\"\n","repo_name":"lun-4/rana","sub_path":"tests/test_durations.py","file_name":"test_durations.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"11662593123","text":"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib.animation import FuncAnimation\nimport sys\nfrom matplotlib import colors\nfrom IPython import embed\nfrom os import path\n\nimport time\nfrom functools import wraps\n\n# We define inverese and direct fft reversely, because the renormalisation\n# makes more sense: the fft should be an integral and thus have a factor 1/n in\n# front of the noise.\nfrom numpy.fft import fft2 as ifft\nfrom numpy.fft import ifft2 as fft\n\n\ndef timeit(func):\n @wraps(func)\n def timeit_wrapper(*args, **kwargs):\n start_time = time.perf_counter()\n result = func(*args, **kwargs)\n end_time = time.perf_counter()\n total_time = end_time - start_time\n print(f'Function {func.__name__}{args} {kwargs} Took {total_time:.4f} seconds')\n return result\n return timeit_wrapper\n\n\nclass vorticity:\n\t\"\"\"\n\tWe simulate the vorticity equation with a shear force\n\t\"\"\"\n\tdef __init__(self):\n\n\t\t# Initial state of the system in terms of Fourier coefficients:\n\t\t# This is the number of eigenfunctions we use (for the definitions\n\t\t# below to work correctly with the Numpy implementation of the fast\n\t\t# Fourier transform this number should be odd: zeroth + first half positive,\n\t\t# + second half negative modes)\n\t\tself.N = 1001\n\t\t# And we set the time discretization for our problem\n\t\tself.dt = 0.01\n\t\t# And we set the parameter a for the viscosity\n\t\tself.nu = 0.005\n\t\t\n\t\t# We define our two initial conditions.\n\t\tself.space = np.linspace(-0.5, 0.5, self.N)\n\t\tself.X, self.Y = np.meshgrid(self.space, self.space)\n\t\t\n\t\tself.vort1 = np.zeros(shape = (self.N,self.N), dtype = float)\n\t\tself.vort2 = np.zeros(shape = (self.N,self.N), dtype = float)\n\t\n\t\tself.vort1 = np.sin(2*np.pi*self.X)\n\t\tself.vort2 = np.sin(2*np.pi*self.Y)\n\t\t\n\t\t# And we define the noise, which we immediately update\n\t\tself.noise = np.zeros(shape = (self.N,self.N), dtype = float)\n\t\tself.renoise()\n\n\t\t# And on a coarser scale the value for the picture\n\t\tself.V = 100\n\t\tself.visual1 = np.zeros(shape = (self.V,self.V), dtype = float)\n\t\tself.visual2 = np.zeros(shape = (self.V,self.V), dtype = float)\n\n\t\t# The initial condition in its Fourier coefficients \n\t\tself.vort1ft = fft(self.vort1)\n\t\tself.vort2ft = fft(self.vort2)\n\t\t\n\t\t# Placeholders for the gradients\n\t\tself.grad1_x = np.zeros(shape = (self.N,self.N))\n\t\tself.grad1_y = np.zeros(shape = (self.N,self.N))\n\t\t\n\t\tself.grad2_x = np.zeros(shape = (self.N,self.N))\n\t\tself.grad2_y = np.zeros(shape = (self.N,self.N))\n\t\t\n\t\t# Placeholders for the stream function\n\t\tself.stream1_x = np.zeros(shape = (self.N,self.N))\n\t\tself.stream1_y = np.zeros(shape = (self.N,self.N))\n\t\t\n\t\tself.stream2_x = np.zeros(shape = (self.N,self.N))\n\t\tself.stream2_y = np.zeros(shape = (self.N,self.N))\n\n\t\t# We define the inverse Laplacian as a multiplier\n\t\t# This leaves the zeroth Fourier mode unchanged\n\t\tself.invlaplace = np.zeros(shape = (self.N, self.N), dtype= complex)\n\t\tfor i in range(0,self.N):\n\t\t\tfor k in range(0, self.N):\n\t\t\t\tif i>0 or k > 0:\n\t\t\t\t\tif i = self.N//2+1:\n\t\t\t\t\t\tself.invlaplace[i,k] = complex(- 1/(float(i)**2 + (self.N-k)**2),0.0)\n\t\t\t\t\tif i >= self.N//2+1 and k < self.N//2+1:\n\t\t\t\t\t\tself.invlaplace[i,k] = complex(- 1/(float(self.N-i)**2 + float(k)**2),0.0)\n\t\t\t\t\tif i >= self.N//2+1 and k >= self.N//2+1:\n\t\t\t\t\t\tself.invlaplace[i,k] = complex(- 1/(float(self.N-i)**2 + float(self.N-k)**2),0.0)\n\t\t\t\t\t\t\n\t\t# This is the multiplier for differentiation in the x variable\n\t\tself.nabla_x = np.zeros(shape = (self.N, self.N), dtype = complex)\n\t\tfor i in range(0,self.N):\n\t\t\tfor k in range(0, self.N):\n\t\t\t\tif i>0 or k > 0:\n\t\t\t\t\tif i = self.N//2+1:\n\t\t\t\t\t\tself.nabla_x[i,k] = complex(0.0, 1.0*i)\n\t\t\t\t\tif i >= self.N//2+1 and k < self.N//2+1:\n\t\t\t\t\t\tself.nabla_x[i,k] = complex(0.0, -1.0*(self.N-i))\n\t\t\t\t\tif i >= self.N//2+1 and k >= self.N//2+1:\n\t\t\t\t\t\tself.nabla_x[i,k] = complex(0.0, -1.0*(self.N-i))\n\t\t\n\t\t# This is the multiplier for differentiation in the y variable\n\t\tself.nabla_y = np.zeros(shape = (self.N, self.N), dtype = complex)\n\t\tfor i in range(0,self.N):\n\t\t\tfor k in range(0, self.N):\n\t\t\t\tif i>0 or k > 0:\n\t\t\t\t\tif i = self.N//2+1:\n\t\t\t\t\t\tself.nabla_y[i,k] = complex(0.0, -1.0*(self.N-k))\n\t\t\t\t\tif i >= self.N//2+1 and k < self.N//2+1:\n\t\t\t\t\t\tself.nabla_y[i,k] = complex(0.0, 1.0*k)\n\t\t\t\t\tif i >= self.N//2+1 and k >= self.N//2+1:\n\t\t\t\t\t\tself.nabla_y[i,k] = complex(0.0, -1.0*(self.N-k))\n\t\t\t\t\t\t\n\t\t# This is the Fourier multiplier for the x derivative of the stream function\n\t\tself.invlaplace_x = np.ones(shape = (self.N), dtype = complex)\n\t\tself.invlaplace_x = np.multiply(self.invlaplace, self.nabla_x, dtype = complex)\n\t\t\n\t\t# This is the Fourier multiplier for the x derivative of the stream function\n\t\tself.invlaplace_y = np.ones(shape = (self.N), dtype = complex)\n\t\tself.invlaplace_y = np.multiply(self.invlaplace, self.nabla_y, dtype = complex)\n\n\t\t# We define the resolvent of the fractional Laplacian as a multiplier\n\t\t# in Fourier coordinates\n\t\tself.relap = np.zeros(shape = (self.N, self.N), dtype = complex)\n\t\tfor i in range(0,self.N):\n\t\t\tfor k in range(0, self.N):\n\t\t\t\tif i>0 or k > 0:\n\t\t\t\t\tif i = self.N//2+1:\n\t\t\t\t\t\tself.relap[i,k] = complex(1.0/(1.0+(self.nu*i**2+self.nu*(self.N -k)**2)*self.dt), 0.0)\n\t\t\t\t\tif i >= self.N//2+1 and k < self.N//2+1:\n\t\t\t\t\t\tself.relap[i,k] = complex(1.0/(1.0+(self.nu*(self.N-i)**2+ self.nu*k**2)*self.dt), 0.0)\n\t\t\t\t\tif i >= self.N//2+1 and k >= self.N//2+1:\n\t\t\t\t\t\tself.relap[i,k] = complex(1.0/(1.0+(self.nu*(self.N-i)**2+ self.nu*(self.N-k)**2)*self.dt), 0.0)\n\t\t\n\tdef force(self):\n\t\t\n\t\t# We compute the gradient\n\t\tself.grad1_x = ifft(np.multiply(self.nabla_x, self.vort1ft, dtype = complex)).real\n\t\tself.grad1_y = ifft(np.multiply(self.nabla_y, self.vort1ft, dtype = complex)).real\n\t\t\n\t\tself.grad2_x = ifft(np.multiply(self.nabla_x, self.vort2ft, dtype = complex)).real\n\t\tself.grad2_y = ifft(np.multiply(self.nabla_y, self.vort2ft, dtype = complex)).real\n\t\t\n\t\t# We compute the stream function\n\t\tself.stream1_x = ifft(np.multiply(self.invlaplace_x, self.vort1ft, dtype = complex)).real\n\t\tself.stream1_y = ifft(np.multiply(self.invlaplace_y, self.vort1ft, dtype = complex)).real\n\t\t\n\t\tself.stream2_x = ifft(np.multiply(self.invlaplace_x, self.vort2ft, dtype = complex)).real\n\t\tself.stream2_y = ifft(np.multiply(self.invlaplace_y, self.vort2ft, dtype = complex)).real\n\t\t\n\t\t# We compute the force in Fourier coordinates\n\t\tself.force1 = fft(np.multiply(self.stream1_x, self.grad1_x, dtype = complex) - np.multiply(self.stream1_y, self.grad1_y, dtype = complex))\n\t\t\n\t\tself.force2 = fft(np.multiply(self.stream2_x, self.grad2_x, dtype = complex) - np.multiply(self.stream2_y, self.grad2_y, dtype = complex))\n\t\t\n\t\t\n\tdef evaluate(self):\n\t\t# This function adjourns the value of the real state of the system.\n\t\tself.vort1 = ifft(self.vort1ft, s = (self.N, self.N)).real\n\t\tself.vort2 = ifft(self.vort2ft, s = (self.N, self.N)).real\n\n\tdef visualize(self):\n\t\t# This function adjourns the value of the visualizer\n\t\tself.visual1 = ifft(self.vort1ft, s=(self.V,self.V)).real\n\t\tself.visual2 = ifft(self.vort2ft, s=(self.V,self.V)).real\n\n\tdef renoise(self):\n\t\n\t\t# We adjourn the noise\n#\t\tself.noise = 0.5*np.random.normal(loc = 0.0, scale =1.0, size\n#\t\t\t\t\t\t\t\t=(self.N, self.N))*np.sqrt(self.N*self.dt)\n\t\tself.noise = np.sin(30*np.pi*self.X)*np.random.normal(loc = 0.0, scale = 1.0)*np.sqrt(self.dt)\n\t\n\t@timeit\n\tdef implicit_euler(self):\n\t\n\t\t# We do one more step in the implicit Euler approximation\n\n\t\t# We start by computing the nonlinearity and the noise\n\t\tself.force()\n\t\tself.renoise()\n\n\t\t# This is the step forward in the Euler scheme\n\t\tself.vort1ft = np.multiply(self.relap, self.vort1ft +\n\t\t\t\t\t\t\tself.dt*(self.force1)+ fft(self.noise), dtype = complex)\n\t\tself.vort2ft = np.multiply(self.relap, self.vort2ft +\n\t\t\t\t\t\t\tself.dt*(self.force2)+ fft(self.noise), dtype = complex)\n\t\t\t\t\t\t\t\n\t\t# We adjourn both other values\n\t\tself.evaluate()\n\t\tself.visualize()\n\ndef animate(i):\n\n\tglobal vo, ax, fig, time_text\n\n\t# Real time is:\n\tani_time = i*vo.dt\n\n\t# Set the new data\n\tim1.set_data(vo.vort1)\n\tim2.set_data(vo.vort2)\n\t\n\n\t# Set the new time\n\ttime_text1.set_text(\"Time = {:2.3f}\".format(ani_time))\n\ttime_text2.set_text(\"Time = {:2.3f}\".format(ani_time))\n\n\t# We print the step we are in\n\tsys.stdout.flush()\n\tsys.stdout.write(\"\\r Step = {}\".format(i))\n\n\t# And we do the next step:\n\tvo.implicit_euler()\n\t\n\treturn [im1] + [im2] + [time_text1,] +[time_text2,]\n\n# We initiate our solver\nvo = vorticity()\n\n\nfrom PIL import Image, ImageDraw\n\nimages = []\n\n# We set up the picture\nfig = plt.figure(figsize = (19, 8))\nplt.title(\"Vorticity with shear flow and low viscosity (0.1)\")\nplt.axis('off')\n\n# And the two subplots\nax1 = fig.add_subplot(1,2,1)\nax2 = fig.add_subplot(1,2,2)\n\n# Add axis limits\nax1.set_xlim(0, vo.N)\nax1.set_ylim(0, vo.N)\nax2.set_xlim(0, vo.N)\nax2.set_ylim(0, vo.N)\n\n# But we do not plot the axis\nax1.axis('off')\nax2.axis('off')\n\n# And time counter\ntime_text1 = ax1.text(0.05, 0.95,'',horizontalalignment='left',verticalalignment='top', transform=ax1.transAxes, color = 'white')\n\ntime_text2 = ax2.text(0.05, 0.95,'',horizontalalignment='left',verticalalignment='top', transform=ax2.transAxes, color = 'white')\n\n\n# Picture for the two initial conditions\nim1 = ax1.imshow(vo.vort1, cmap = plt.get_cmap('jet'), vmin = -1, vmax = 1)\nim2 = ax2.imshow(vo.vort2, cmap = plt.get_cmap('jet'), vmin = -1, vmax = 1)\n\t\n\nfor i in range(0, 200):\n\n\t# Real time is:\n\tani_time = i*vo.dt\n\n\t# Set the new data\n\tim1.set_data(vo.vort1)\n\tim2.set_data(vo.vort2)\n\t\n\t# Set the new time\n\ttime_text1.set_text(\"Time = {:2.3f}\".format(ani_time))\n\ttime_text2.set_text(\"Time = {:2.3f}\".format(ani_time))\n\n\tplt.savefig('vorticity_fig_lv'+str(i)+'.png')\n\t\n\tfor k in range(0, 28):\n\t\tvo.implicit_euler()\n\t\tprint(i,k)\n\n## We set up the picture\n#fig = plt.figure(figsize = (40, 20))\n#plt.title(\"Vorticity equation\")\n#plt.axis('off')\n#\n## And the two subplots\n#ax1 = fig.add_subplot(1,2,1)\n#ax2 = fig.add_subplot(1,2,2)\n#\n## Add axis limits\n#ax1.set_xlim(0, vo.N)\n#ax1.set_ylim(0, vo.N)\n#ax2.set_xlim(0, vo.N)\n#ax2.set_ylim(0, vo.N)\n#\n## But we do not plot the axis\n#ax1.axis('off')\n#ax2.axis('off')\n#\n## And time counter\n#time_text1 = ax1.text(0.05, 0.95,'',horizontalalignment='left',verticalalignment='top', transform=ax1.transAxes, color = 'white')\n#\n#time_text2 = ax2.text(0.05, 0.95,'',horizontalalignment='left',verticalalignment='top', transform=ax2.transAxes, color = 'white')\n#\n## Picture for the two initial conditions\n#im1 = ax1.imshow(vo.vort1, cmap = plt.get_cmap('rainbow'), vmin = -1.3, vmax = 1.3)\n#im2 = ax2.imshow(vo.vort2, cmap = plt.get_cmap('rainbow'), vmin = -1.3, vmax = 1.3)\n#\n#\n### We let the animation run.\n##ani = FuncAnimation(fig, animate, frames= 2, repeat=False)\n##mywriter = animation.PillowWriter(fps=30,bitrate=6000000)\n##ani.save('vorticity.gif',writer=mywriter)\n","repo_name":"rosati-tom/Simulation-of-some-SPDEs","sub_path":"vorticity.py","file_name":"vorticity.py","file_ext":"py","file_size_in_byte":11384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10600793733","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom layers.Embed import DataEmbedding, DataEmbedding_wo_pos\nfrom layers.AutoCorrelation import AutoCorrelation, AutoCorrelationLayer\nfrom layers.SpectralInteraction import SpectralInteraction, SpectralInteractionLayer\nfrom layers.Autoformer_EncDec import Encoder, Decoder, EncoderLayer, DecoderLayer, my_Layernorm, series_decomp\nimport math\nimport numpy as np\n\nfrom models.simple_linear import simple_linear\n\nfrom scipy.fftpack import fft, ifft\nimport matplotlib.pyplot as plt\nfrom matplotlib.pylab import mpl\nfrom scipy import signal\n\nfrom layers.SelfAttention_Family import FullAttention, AttentionLayer\n\n\nclass separate_encoder_layer(nn.Module):\n \"\"\"\n 功能:\n \"\"\"\n def __init__(self, input_dim, output_dim, dropout=0.1, activation=\"gelu\"):\n super(separate_encoder_layer, self).__init__()\n self.layer = simple_linear(input_dim, output_dim)\n def forward(self, layer_input):\n layer_output = self.layer(layer_input)\n return layer_output\n\nclass separate_linear(nn.Module):\n def __init__(self, input_dim, output_dim, dropout=0.01, activation='gelu'):\n \"\"\"\n :param step: 步长 即每个子序列的长度\n :param attn_layer: 注意力模块初始化\n \"\"\"\n super(separate_linear, self).__init__()\n self.window_size = 12 #第一层,二层,三层\n self.layer = 1\n self.block_num = input_dim // self.window_size\n self.encoder_layer_list = nn.ModuleList([])\n count = 0\n while(count < self.layer):\n layer_input_dim = self.window_size\n layer_output_dim = output_dim // self.block_num\n # share the core layer\n self.encoder_layer_list.append(separate_encoder_layer(layer_input_dim, layer_output_dim))\n # don't share the core layer\n # temp = nn.ModuleList([])\n # for i in range(self.block_num):\n # temp.append(separate_encoder_layer(layer_input_dim, layer_output_dim))\n # self.encoder_layer_list.append(temp)\n count = count + 1\n\n self.linear_out = nn.Linear(output_dim, output_dim, bias=False)\n\n def forward(self, input):\n batch_size, sequence_len, feature_dim = input.shape\n count = 0\n layer_output = [] #各层输出的list\n while(count < self.layer):\n #sequence由本层序列长度 cnt为本层分块数\n cnt = sequence_len//self.window_size\n #用于存储局部输出\n output = torch.tensor([]).to(input.device)\n for i in range(cnt):\n ii = i * self.window_size\n # block index\n input_ii = input[:, ii:ii + self.window_size, :]\n next_output = self.encoder_layer_list[count](input_ii)\n output = torch.cat((output, next_output), 1) # 按sequenc_len这一维度拼接\n #print(\"encoder: 第{}次离散局部输出,output:[{},{},{}]\".format(count,output.shape[0],output.shape[1],output.shape[2]))\n input = output\n sequence_len = output.shape[1]\n count = count + 1 # 层数\n #output为最终隐藏层z ,layer_output为各层输出的list\n output = self.linear_out(output.permute(0, 2, 1)).permute(0, 2, 1)\n return output\n\n\n","repo_name":"wzhSteve/MTGSR","sub_path":"models/separate_linear.py","file_name":"separate_linear.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"73933952487","text":"import json\nimport string\n\nimport re\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom django.utils.crypto import get_random_string\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom api.models import APIKey\nfrom api.utils import is_valid_api_key\nfrom tasks.models import Task\nfrom teams.models import Team\n\n\n@login_required\n@csrf_exempt\ndef generate_api_key(request):\n def get_unique_key():\n new_key = get_random_string(length=32, allowed_chars=string.ascii_letters + string.digits)\n\n if is_valid_api_key(new_key):\n return get_unique_key()\n else:\n return new_key\n\n key = get_unique_key()\n\n if \"app_name\" not in request.GET:\n return HttpResponse(\"{\\\"status\\\": 1}\", status=400)\n\n api_key = APIKey(key=key)\n api_key.user = request.user\n api_key.app_name = request.GET[\"app_name\"]\n api_key.save()\n\n content = json.dumps({\n \"status\": 0,\n \"key\": key\n })\n\n return HttpResponse(content, status=200)\n\n\n@csrf_exempt\ndef get_team(request, year, team_number):\n key = request.GET.get(\"key\", \"\")\n\n if not is_valid_api_key(key):\n return HttpResponse(\"{\\\"status\\\": 1}\", status=401)\n\n try:\n team = Team.objects.get(team_number=team_number, year=year)\n except Team.DoesNotExist:\n return HttpResponse(\"{\\\"status\\\": 1}\", status=400)\n\n tasks_obj = []\n team_tasks = team.tasks.all()\n for task in Task.objects.filter(year=year):\n tasks_obj.append({\n \"codeyear\": task.codeyear, \n \"name\": task.name,\n \"team_able\": task in team_tasks\n })\n\n content = json.dumps({\n \"status\": 0,\n \"team_number\": team.team_number,\n \"name\": team.name,\n \"tasks\": tasks_obj,\n \"auto_points\": team.auto_points,\n \"year\": team.year,\n \"favorite\": team.favorite\n })\n\n return HttpResponse(content, status=200)\n\n\n@csrf_exempt\ndef set_team(request, year, team_number):\n try:\n json_body = json.loads(request.body.decode(\"UTF-8\"))\n except json.JSONDecodeError:\n return HttpResponse(\"{\\\"status\\\": 1}\", status=400)\n\n key = json_body.get(\"key\", \"\")\n\n if not is_valid_api_key(key):\n return HttpResponse(\"{\\\"status\\\": 1}\", status=401)\n\n try:\n team = Team.objects.get(team_number=team_number, year=year)\n except Team.DoesNotExist:\n team = Team(team_number=team_number, year=year)\n\n if \"name\" in json_body:\n new_name = json_body[\"name\"]\n if isinstance(new_name, str):\n team.name = new_name\n\n if \"tasks\" in json_body:\n team.tasks.clear()\n new_tasks = json_body[\"tasks\"]\n if isinstance(new_tasks, list):\n for task in new_tasks:\n if isinstance(task[\"codeyear\"], str) and isinstance(task[\"team_able\"], bool):\n codeyear = str(task[\"codeyear\"]).lower()\n team_able = task[\"team_able\"]\n if isinstance(task, dict) \\\n and team_able \\\n and re.search(\"^\\w+-\\d+$\", codeyear):\n try:\n task_obj = Task.objects.get(codeyear=codeyear)\n team.tasks.add(task_obj)\n except Task.DoesNotExist:\n pass\n\n if \"auto_points\" in json_body:\n new_auto_points = json_body[\"auto_points\"]\n if isinstance(new_auto_points, int):\n team.auto_points = int(new_auto_points)\n\n if \"favorite\" in json_body:\n new_favorite = json_body[\"favorite\"]\n if isinstance(new_favorite, str):\n team.favorite = True if new_favorite.lower() == \"true\" else False\n\n team.save()\n\n return HttpResponse(\"{\\\"status\\\": 0}\", status=200)\n","repo_name":"thomassross/FRCScoutWeb","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11078195999","text":"import os\nimport re\nimport glob\nimport inspect\nimport pytest\nimport sys\nimport time\nimport subprocess\nimport tempfile\nimport shutil\n\n#\n# we assume everwhere our current directory is in the package\n# test area, so go ahead and cd there\n#\nos.chdir(os.path.dirname(__file__))\n\n#\n# add to path what we eed to test\n# unless we're testing installed, then use /opt/jobsub_lite/...\n#\nif os.environ.get(\"JOBSUB_TEST_INSTALLED\", \"0\") == \"1\":\n sys.path.append(\"/opt/jobsub_lite/lib\")\nelse:\n sys.path.append(\"../lib\")\n\nimport fake_ifdh\n\nif os.environ.get(\"JOBSUB_TEST_INSTALLED\", \"0\") == \"1\":\n os.environ[\"PATH\"] = \"/opt/jobsub_lite/bin:\" + os.environ[\"PATH\"]\nelse:\n os.environ[\"PATH\"] = (\n os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n + \"/bin:\"\n + os.environ[\"PATH\"]\n )\n\n\n@pytest.fixture\ndef add_links():\n # add symlink and harlink in dagnabbit directory for tarfile tests\n os.system(\"/bin/pwd\")\n f = \"dagnabbit/jobA.sh\"\n slf = \"dagnabbit/test_symlink\"\n hlf = \"dagnabbit/test_hardlink\"\n if not os.path.exists(slf):\n os.symlink(\"jobA.sh\", slf)\n if not os.path.exists(hlf):\n os.link(f, str(hlf))\n return True\n\n\n@pytest.fixture\ndef job_envs():\n os.environ[\"IFDH_DEBUG\"] = \"1\"\n os.environ[\"IFDH_FORCE\"] = \"https\"\n os.environ[\"IFDH_VERSION\"] = \"v2_6_10,ifdhc_config v2_6_15\"\n os.environ[\"IFDH_TOKEN_ENABLE\"] = \"1\"\n os.environ[\"IFDH_PROXY_ENABLE\"] = \"0\"\n os.environ[\"IFDH_CP_MAXRETRIES\"] = \"2\"\n os.environ[\"VERSION\"] = \"v1_1\"\n if os.environ.get(\"_condor_COLLECTOR_HOST\"):\n del os.environ[\"_condor_COLLECTOR_HOST\"]\n\n\n@pytest.fixture\ndef noexp(job_envs):\n if os.environ.get(\"GROUP\", None):\n del os.environ[\"GROUP\"]\n if os.environ.get(\"EXPERIMENT\", None):\n del os.environ[\"EXPERIMENT\"]\n if os.environ.get(\"SAM_EXPERIMENT\", None):\n del os.environ[\"SAM_EXPERIMENT\"]\n if os.environ.get(\"SAM_STATION\", None):\n del os.environ[\"SAM_STATION\"]\n\n\n@pytest.fixture\ndef samdev(job_envs):\n \"\"\"fixture to run launches for samdev/fermilab\"\"\"\n os.environ[\"GROUP\"] = \"fermilab\"\n os.environ[\"EXPERIMENT\"] = \"samdev\"\n os.environ[\"SAM_EXPERIMENT\"] = \"samdev\"\n os.environ[\"SAM_STATION\"] = \"samdev\"\n\n\n@pytest.fixture\ndef nova(job_envs):\n \"\"\"fixture to run launches for dune\"\"\"\n os.environ[\"GROUP\"] = \"nova\"\n os.environ[\"EXPERIMENT\"] = \"nova\"\n os.environ[\"SAM_EXPERIMENT\"] = \"nova\"\n os.environ[\"SAM_STATION\"] = \"nova\"\n\n\n@pytest.fixture\ndef dune(job_envs):\n \"\"\"fixture to run launches for dune\"\"\"\n os.environ[\"GROUP\"] = \"dune\"\n os.environ[\"EXPERIMENT\"] = \"dune\"\n os.environ[\"SAM_EXPERIMENT\"] = \"dune\"\n os.environ[\"SAM_STATION\"] = \"dune\"\n\n\n@pytest.fixture\ndef dune_test_file(dune):\n datafile = f\"/pnfs/dune/scratch/users/{os.environ['USER']}/test_file.txt\"\n print(\"trying to generate {datafile}\")\n exists = fake_ifdh.ls(datafile)\n if exists:\n print(\"found {datafile}\")\n else:\n fake_ifdh.cp(__file__, datafile)\n print(\"copying to {datafile}\")\n return datafile\n\n\ndef get_collector():\n \"\"\"obfuscated way to find collector for dune pool\"\"\"\n cp = \"collector\"[:4]\n hn = os.environ[\"HOSTNAME\"]\n exp = os.environ[\"GROUP\"]\n dom = hn[hn.find(\".\") :]\n n = dom.find(\".\", 1) - 3\n col = f\"{exp}gp{cp}0{n}{dom}\"\n return col\n\n\n@pytest.fixture\ndef dune_gp(dune):\n \"\"\"fixture to run launches for dune global pool\"\"\"\n os.environ[\"_condor_COLLECTOR_HOST\"] = get_collector()\n\n\njoblist = []\njid2test = {}\njid2nout = {}\njid2group = {}\njid2pool = {}\noutdirs = {}\nddirs = {}\n\n\ndef run_launch(cmd, expected_out=1, get_dir=False):\n \"\"\"\n run a jobsub launch command, get jobids from output to watch\n for, etc.\n \"\"\"\n schedd = None\n jobid = None\n outdir = None\n jobsubjobid = None\n added = False\n # do not submit too fast...\n time.sleep(1)\n\n if os.environ.get(\"JOBSUB_TEST_SUBMIT_EXTRA\", \"\"):\n # add extra submit flags, if available\n cmd = cmd.replace(\n \"jobsub_submit\", \"jobsub_submit \" + os.environ[\"JOBSUB_TEST_SUBMIT_EXTRA\"]\n )\n\n pf = os.popen(cmd + \" 2>&1\")\n for l in pf.readlines():\n print(l)\n m = re.match(r\"Submission files are in: (\\S+)\", l)\n if get_dir and m:\n pf.close()\n return m.group(1).strip()\n m = re.match(r\"Running:.*/usr/bin/condor_submit.*-remote (\\S+)\", l)\n if m:\n print(\"Found schedd!\")\n schedd = m.group(1)\n m = re.match(r\"\\d+ job\\(s\\) submitted to cluster (\\d+).\", l)\n if m:\n print(\"Found jobid!\")\n jobid = m.group(1)\n\n m = re.match(r\"Use job id (\\S+) to retrieve output\", l)\n if m:\n jobsubjobid = m.group(1)\n print(f\"Found jobsubjobid {jobsubjobid}!\")\n\n if jobid and schedd and jobsubjobid and not added:\n added = True\n print(\"Found all three! \", jobid, schedd, jobsubjobid)\n joblist.append(\"%s.0@%s\" % (jobid, schedd))\n # note which test led to this jobid\n jid2test[\"%s.0@%s\" % (jobid, schedd)] = inspect.stack()[2][3]\n jid2nout[\"%s.0@%s\" % (jobid, schedd)] = expected_out\n jid2group[\"%s.0@%s\" % (jobid, schedd)] = os.environ.get(\"GROUP\", \"fermilab\")\n jid2pool[\"%s.0@%s\" % (jobid, schedd)] = os.environ.get(\n \"_condor_COLLECTOR_HOST\", \"\"\n )\n res = pf.close()\n\n if not added:\n raise ValueError(\n \"Did not get expected output from %s\\njobid %s schedd %s jobsubjobid %s \"\n % (cmd, jobid, schedd, jobsubjobid)\n )\n\n if not jobsubjobid == \"%s.0@%s\" % (jobid, schedd):\n raise ValueError(\"Did not get consistent output from %s \" % cmd)\n\n return res == None\n\n\ndef lookaround_launch(extra, verify_files=\"\"):\n \"\"\"Simple submit of our lookaround script\"\"\"\n assert run_launch(\n f\"jobsub_submit --mail-never --verbose=2 -e SAM_EXPERIMENT {extra} file://`pwd`/job_scripts/lookaround.sh {verify_files}\"\n )\n\n\n@pytest.mark.smoke\ndef test_launch_lookaround_samdev(samdev):\n lookaround_launch(\"\")\n\n\n@pytest.mark.integration\ndef test_launch_lookaround_samdev_dev(samdev):\n lookaround_launch(\"--devserver\")\n\n\n@pytest.mark.integration\ndef test_no_submit_condor_submit(samdev):\n dir = run_launch(\n \"jobsub_submit --verbose=1 --no-submit \"\n \"file://`pwd`/job_scripts/lookaround.sh\",\n get_dir=True,\n )\n assert run_launch(f\"cd {dir} && condor_submit --verbose=1 simple.cmd\")\n\n\n@pytest.mark.integration\ndef test_launch_lookaround_ddir(samdev):\n pid = os.getpid()\n ddir = f\"/pnfs/fermilab/users/$USER/d{pid}\"\n fake_ifdh.mkdir_p(ddir)\n lookaround_launch(f\"--devserver -d D1 {ddir}\")\n ddirs[joblist[-1]] = ddir\n\n\n@pytest.mark.integration\ndef test_launch_lookaround_dune(dune):\n lookaround_launch(\"--devserver\")\n\n\n@pytest.mark.integration\ndef test_launch_lookaround_dune_gp_poolflag(dune):\n lookaround_launch(\"--global-pool=dune\")\n\n\n@pytest.mark.integration\ndef test_launch_lookaround_dune_gp(dune_gp):\n lookaround_launch(\"\")\n\n\n@pytest.mark.integration\ndef test_maxconcurrent(samdev):\n lookaround_launch(\"--maxConcurrent 2 -N 6 \")\n\n\n@pytest.mark.integration\ndef test_dd_args(samdev):\n fife_launch(\" --dd-percentage 50 \" \" --dd-extra-dataset mwm_out_1 \")\n\n\n@pytest.mark.integration\ndef test_maxconcurrent_dataset(samdev):\n fife_launch(\"--maxConcurrent 2\")\n\n\n@pytest.mark.integration\ndef test_dash_f_plain(dune_test_file):\n lookaround_launch(\n f\"-f {dune_test_file}\",\n f\"\\\\$CONDOR_DIR_INPUT/{os.path.basename(dune_test_file)}\",\n )\n\n\n@pytest.mark.integration\ndef test_dash_f_sl6(dune_test_file):\n lookaround_launch(\n f\"-f {dune_test_file} \"\n \"--singularity=/cvmfs/singularity.opensciencegrid.org/fermilab/fnal-wn-sl6:latest\",\n f\"\\\\$CONDOR_DIR_INPUT/{os.path.basename(dune_test_file)}\",\n )\n\n\n@pytest.mark.integration\ndef test_dash_f_dropbox_cvmfs(dune):\n lookaround_launch(\n f\"-f dropbox://{__file__} --use-cvmfs-dropbox\",\n f\"\\\\$CONDOR_DIR_INPUT/{os.path.basename(__file__)}\",\n )\n\n\n@pytest.mark.integration\ndef test_tar_dir_cvmfs(dune, add_links):\n lookaround_launch(\n f\"--tar_file_name tardir://{os.path.dirname(__file__)}/dagnabbit --use-cvmfs-dropbox\",\n f\"\\\\$INPUT_TAR_DIR_LOCAL/ckjobA.sh\",\n )\n\n\n@pytest.mark.integration\ndef test_tar_dir_pnfs(dune, add_links):\n lookaround_launch(\n f\"--tar_file_name tardir://{os.path.dirname(__file__)}/dagnabbit --use-pnfs-dropbox\",\n f\"\\\\$INPUT_TAR_DIR_LOCAL/ckjobA.sh\",\n )\n\n\n@pytest.mark.integration\ndef test_dash_f_dropbox_pnfs(dune):\n lookaround_launch(\n f\"-f dropbox://{__file__} --use-pnfs-dropbox\",\n f\"\\\\$CONDOR_DIR_INPUT/{os.path.basename(__file__)}\",\n )\n\n\n@pytest.mark.integration\ndef test_dash_f_dropbox_pnfs_exra_slashes(dune):\n lookaround_launch(\n f\"-f dropbox:////{__file__} --use-pnfs-dropbox\",\n f\"\\\\$CONDOR_DIR_INPUT/{os.path.basename(__file__)}\",\n )\n\n\ndef dagnabbit_launch(extra, which=\"\", nout_files=5):\n os.environ[\"SUBMIT_FLAGS\"] = \"\"\n os.chdir(os.path.join(os.path.dirname(__file__), \"dagnabbit\"))\n res = run_launch(\n f\"\"\"\n jobsub_submit \\\n --mail-never \\\n --verbose=2 \\\n -e SAM_EXPERIMENT {extra} \\\n --dag file://dagTest{which} \\\n \"\"\",\n nout_files,\n )\n os.chdir(os.path.dirname(__file__))\n assert res\n\n\n@pytest.mark.integration\ndef test_launch_dagnabbit_simple(samdev):\n dagnabbit_launch(\"--devserver\", \"\")\n\n\n@pytest.mark.integration\ndef test_launch_dagnabbit_collapse(samdev):\n dagnabbit_launch(\"--devserver\", \"HS\", 12)\n\n\n@pytest.mark.integration\ndef test_launch_dagnabbit_dropbox(samdev):\n dagnabbit_launch(\"--devserver\", \"Dropbox\")\n\n\n@pytest.mark.integration\ndef test_launch_dagnabbit_complex(samdev):\n os.environ[\"JOBSUB_EXPORTS\"] = \"\"\n os.environ[\"SUBMIT_FLAGS\"] = \"\"\n\n dagnabbit_launch(\"--devserver\", \"7\", 8)\n\n\ndef fife_launch(extra):\n assert run_launch(\n \"\"\"\n jobsub_submit \\\n --mail-never \\\n --verbose=2 \\\n -e EXPERIMENT \\\n -e IFDH_DEBUG \\\n -e IFDH_FORCE \\\n -e IFDH_VERSION \\\n -e IFDH_TOKEN_ENABLE \\\n -e IFDH_PROXY_ENABLE \\\n -e SAM_EXPERIMENT \\\n -e SAM_STATION \\\n -e IFDH_CP_MAXRETRIES \\\n -e VERSION \\\n -N 5 \\\n --generate-email-summary \\\n --expected-lifetime=2h \\\n --timeout=2h \\\n --disk=100MB \\\n --memory=500MB \\\n %(extra)s \\\n --dataset-definition=gen_cfg \\\n file://///grid/fermiapp/products/common/db/../prd/fife_utils/v3_3_2/NULL/libexec/fife_wrap \\\n --find_setups \\\n --setup-unquote 'hypotcode%%20v1_1' \\\n --setup-unquote 'ifdhc%%20v2_6_10,ifdhc_config%%20v2_6_15' \\\n --prescript-unquote 'ups%%20active' \\\n --self_destruct_timer '1400' \\\n --debug \\\n --getconfig \\\n --limit '1' \\\n --schema 'https' \\\n --appvers 'v1_1' \\\n --metadata_extractor 'hypot_metadata_extractor' \\\n --addoutput 'gen.troot' \\\n --rename 'unique' \\\n --dest '/pnfs/%(exp)s/users/mengel/dropbox' \\\n --add_location \\\n --declare_metadata \\\n --addoutput1 'hist_gen.troot' \\\n --rename1 'unique' \\\n --dest1 '/pnfs/%(exp)s/users/mengel/dropbox' \\\n --add_location1 \\\n --declare_metadata1 \\\n --exe hypot.exe \\\n -- \\\n -o \\\n gen.troot \\\n -c \\\n hist_gen.troot \"\"\"\n % {\"exp\": os.environ[\"GROUP\"], \"extra\": extra},\n expected_out=5,\n )\n\n\n@pytest.mark.integration\ndef test_samdev_fife_launch(samdev):\n fife_launch(\"--devserver\")\n\n\n@pytest.mark.integration\ndef test_dune_fife_launch(dune):\n fife_launch(\"--devserver\")\n\n\n@pytest.mark.integration\ndef test_nova_fife_launch(nova):\n fife_launch(\"--devserver\")\n\n\n@pytest.mark.integration\ndef test_dune_gp_fife_launch(dune_gp):\n fife_launch(\"\")\n\n\ndef group_for_job(jid):\n\n group = jid2group.get(jid, \"\")\n\n if jid.find(\"dune\") > 0:\n if not group:\n group = \"dune\"\n if jid2pool.get(jid, \"\"):\n os.environ[\"_condor_COLLECTOR_HOST\"] = get_collector()\n else:\n if not group:\n group = \"fermilab\"\n if os.environ.get(\"_condor_COLLECTOR_HOST\"):\n del os.environ[\"_condor_COLLECTOR_HOST\"]\n os.environ[\"GROUP\"] = group\n return group\n\n\n# turning this test off for now; I can not seem to get it to consistently get\n# the setup of two jobs each on two schedd's... mengel\n# @pytest.mark.integration\ndef xx_test_jobsub_q_repetitions(samdev):\n # test to make sure if we do jobsub_q 1@jobsub01 2@jobsub01 3@jobsub02 4@jobsub02 we get only one repitition\n # first submit a few more jobs so we have fresh ones\n lookaround_launch(\"\")\n lookaround_launch(\"\")\n lookaround_launch(\"\")\n lookaround_launch(\"\")\n lookaround_launch(\"\")\n lookaround_launch(\"\")\n jobs_by_schedd = {}\n all_schedds = set()\n for jid in joblist:\n schedd = re.sub(r\".*@\", \"\", jid)\n all_schedds.add(schedd)\n if schedd in jobs_by_schedd:\n jobs_by_schedd[schedd].append(jid)\n else:\n jobs_by_schedd[schedd] = [jid]\n\n print(f\"jobs_by_schedd: {repr(jobs_by_schedd)}\")\n args = [\"jobsub_q\", \"-G\", \"fermilab\"]\n jcount = 0\n all_schedds_l = list(all_schedds)\n all_schedds_l.sort()\n for schedd in all_schedds_l:\n # pick the most recent 2 of jobs from each schedd\n nj = len(jobs_by_schedd[schedd])\n if nj > 1 and not schedd.find(\"dune\") == 0:\n args.append(jobs_by_schedd[schedd][-1])\n args.append(jobs_by_schedd[schedd][-2])\n jcount = jcount + 2\n if jcount == 4:\n break\n\n # now we have 4 jobs on 2 schedd's from our list\n count = 0\n cmd = \" \".join(args)\n print(\"Running: \", cmd)\n with os.popen(cmd, \"r\") as fin:\n for line in fin.readlines():\n print(\"got: \", line)\n count = count + 1\n assert count == 5\n\n\n@pytest.mark.smoke\n@pytest.mark.integration\ndef test_wait_for_jobs():\n \"\"\"Not really a test, but we have to wait for jobs to complete...\"\"\"\n count = 1\n print(\"Waiting for jobs: \", \" \".join(joblist))\n\n # put the list somewhere so we can see what the test is waiting for\n # when not running with -s or whatever...\n with open(\"/tmp/jobsub_lite_test_joblist\", \"w\") as f:\n f.write(\" \".join(joblist))\n\n repeats = 0\n while count > 0 and repeats < 3:\n if repeats == 0:\n time.sleep(20)\n count = len(joblist)\n for jid in joblist:\n group = group_for_job(jid)\n cmd = \"jobsub_q -format '%%s' JobStatus -G %s %s\" % (group, jid)\n print(\"running: \", cmd)\n pf = os.popen(cmd)\n l = pf.readlines()\n res = pf.close()\n print(\"got output: \", repr(l))\n if l:\n status = l[0][0]\n else:\n status = None\n print(\"jobid: \", jid, \" status: \", status)\n if status == \"4\" or status == \"A\" or status == None:\n # '4' is Completed.\n # 'A' is when it says 'All queues are empty' (so they're\n # all completed...)\n # None is when there's no output...\n count = count - 1\n\n # have to all look good 3 times in a row...\n if count == 0:\n repeats = repeats + 1\n else:\n repeats = 0\n\n print(\"Done.\")\n assert True\n\n\n@pytest.mark.smoke\n@pytest.mark.integration\ndef test_fetch_output():\n for jid in joblist:\n group = group_for_job(jid)\n owd = tempfile.mkdtemp()\n outdirs[jid] = owd\n subprocess.run(\n [\"jobsub_fetchlog\", \"--group\", group, \"--jobid\", jid, \"--destdir\", owd],\n check=True,\n )\n\n\n@pytest.mark.smoke\n@pytest.mark.integration\ndef test_check_job_output():\n res = True\n for jid, ddir in ddirs.items():\n print(f\"Checking {jid2test[jid]} {jid} -d tag {ddir}...\")\n fl = fake_ifdh.ls(ddir)\n res = res and bool(len(fl))\n\n for jid, outdir in outdirs.items():\n fl = glob.glob(\"%s/*[0-9].out\" % outdir)\n\n if len(fl) < jid2nout[jid]:\n # if not enough files, try fetching again...\n # sometimes when we look later they're all there\n print(f\"Notice: re-fetching {jid} logs...\")\n group = group_for_job(jid)\n subprocess.run(\n [\n \"jobsub_fetchlog\",\n \"--group\",\n group,\n \"--jobid\",\n jid,\n \"--destdir\",\n outdir,\n ],\n check=True,\n )\n fl = glob.glob(\"%s/*[0-9].out\" % outdir)\n\n # make sure we have enough output files\n print(\n f\"Checking out file count test {jid2test[jid]} {jid} expecting {jid2nout[jid]} actual count {len(fl)}\"\n )\n if len(fl) >= jid2nout[jid]:\n print(\"-- ok\")\n else:\n res = False\n print(\"-- bad\")\n\n for f in fl:\n print(f\"Checking {jid2test[jid]} {jid} output file {f}...\")\n fd = open(f, \"r\")\n f_ok = False\n ll = fd.readlines()\n fd.close()\n if ll[-1].endswith(\"status 0\\n\") or ll[-1].endswith(\"success!\\n\"):\n print(\"-- ok\")\n else:\n print(\"-- bad\")\n res = False\n\n shutil.rmtree(outdir)\n assert res\n\n\n@pytest.mark.integration\n@pytest.mark.parametrize(\n \"constraint_flag_and_arg\",\n [\"--constraint 'Owner==\\\"{user}\\\"'\", \"--constraint='Owner==\\\"{user}\\\"'\"],\n)\ndef test_valid_constraint(samdev, constraint_flag_and_arg):\n lookaround_launch(\"--devserver\")\n if len(joblist) == 0:\n raise AssertionError(\"No jobs submitted\")\n jid = joblist[-1]\n group = group_for_job(jid)\n user = os.environ[\"USER\"]\n cmd = f\"jobsub_q -G {group} {constraint_flag_and_arg} --jobid={jid} -format '%s' ClusterId\"\n cmd = cmd.format(user=user)\n with os.popen(cmd) as query:\n output = query.readlines()\n assert len(output) == 1 and output[0] in jid\n\n\n@pytest.mark.integration\n@pytest.mark.parametrize(\n \"constraint_flag_and_arg\",\n [\n \"--constraint 'thisisabadconstraintbutwillparse==true'\",\n \"--constraint='thisisabadconstraintbutwillparse==true'\",\n ],\n)\ndef test_invalid_constraint(samdev, constraint_flag_and_arg):\n cmd = f\"jobsub_q -G fermilab {constraint_flag_and_arg} -autoformat ClusterId\"\n query = os.popen(cmd)\n output = query.readlines()\n assert len(output) == 0\n rc = query.close()\n assert (\n rc is None\n ) # We got a 0 return code from the query even if it returned nothing\n","repo_name":"fermitools/jobsub_lite","sub_path":"tests/test_submit_wait_int.py","file_name":"test_submit_wait_int.py","file_ext":"py","file_size_in_byte":19113,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24365617776","text":"from django.http import Http404\nfrom django.shortcuts import render\nfrom django.views.generic import ListView, DetailView\nfrom weblog_about_us.models import AboutUs\nfrom weblog_content.models import Content\nfrom .models import CurrentTopics, ResearchGrants, ResearchPartners\nfrom meta.repeatable_code import my_grouper\n\n\ndef research_page(request):\n about_us=AboutUs.objects.last()\n content=Content.objects.last()\n current_topics = list(my_grouper(2, CurrentTopics.objects.active()))\n research_grants = ResearchGrants.objects.all()\n research_collaborators = ResearchPartners.objects.active()\n\n context = {\n 'about_us':about_us,\n 'current_topics': current_topics,\n 'research_grants': research_grants,\n 'research_collaborators': research_collaborators,\n 'content':content\n }\n return render(request, 'research/research.html', context)\n\n\nclass ResearcherList(ListView):\n template_name = 'research/researcher.html'\n queryset = ResearchPartners.objects.active()\n paginate_by = 9\n\n def get_context_data(self,*args, **kwargs) :\n context= super(ResearcherList,self).get_context_data(*args,**kwargs)\n context['about_us']=AboutUs.objects.last()\n return context\n\n\nclass ResearcherDetailView(DetailView):\n queryset = ResearchPartners.objects.active()\n template_name = 'research/researcher_info.html'\n\n def get_object(self, *args, **kwargs):\n slug = self.kwargs.get('slug')\n try:\n research_info = ResearchPartners.objects.get(slug=slug)\n except ResearchPartners.DoesNotExist:\n raise Http404(\"research does not exists ...\")\n except ResearchPartners.MultipleObjectsReturned:\n qs = ResearchPartners.objects.filter(slug=slug)\n research_info = qs.first()\n\n return research_info\n\n def get_context_data(self,*args, **kwargs) :\n context=super(ResearcherDetailView,self).get_context_data(*args,**kwargs)\n context['about_us']=AboutUs.objects.last()\n return context\n","repo_name":"FarzanehGhorbani/personal-weblog-django","sub_path":"weblog_research/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35653629916","text":"#!/usr/bin/env python3\n# recognize text in audio clips\n# then guess language based on result\n# store results in json so we can process files later\n\n# speech recognition\nimport speech_recognition as sr\nimport os, json\n\nr = sr.Recognizer()\n\ninpath = \"clips/\"\n\nout = {}\ncount = 0\nfor fn in os.listdir(inpath):\n if fn.endswith('wav'):\n count += 1\n print('\\r',count, fn, ' ',end ='')\n src = sr.AudioFile(os.path.join(inpath,fn))\n with src as source:\n audio = r.record(src)\n try:\n result=r.recognize_google(audio,show_all=True)\n if len(result) > 0:\n if 'confidence' in result['alternative'][0]:\n confidence = result['alternative'][0]['confidence']\n else:\n confidence = 0\n txt = result['alternative'][0]['transcript']\n # print(f\"{fn}: {txt} ({confidence})\")\n\n else:\n # empty result\n txt = \"\"\n confidence = 0\n except sr.UnknownValueError:\n # recognizer fails\n txt = \"\"\n confidence = 0\n except sr.RequestError as e:\n # recognizer unreachable??\n txt = \"ERROR\"\n confidence = 0\n\n # guess language as tanacross if low confidence\n # unless it has the word \"number\", which for some reason\n # also has low confidence\n if confidence < .9 and 'number' not in txt:\n lang = 'tnc'\n else:\n lang = 'eng'\n\n out.update( {fn : {'text':txt, 'confidence':confidence, 'language': lang } })\n \nwith open('test.json', 'w') as convert_file:\n convert_file.write(json.dumps(out, indent=4))\nprint(\"DONE\")","repo_name":"gmholton/tnc-phrases","sub_path":"scripts/recognize.py","file_name":"recognize.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24942192422","text":"import numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.naive_bayes import GaussianNB\r\n\r\ndata=[]\r\nlabel=[]\r\n\r\nwith open('./data.txt') as f:\r\n for line in f:\r\n token=line.strip().split(' ')\r\n data.append([float(tk) for tk in token[:-1]])\r\n label.append(token[-1])\r\nx=np.array(data)\r\nlabel=np.array(label)\r\ny=np.zeros(label.shape)\r\n\r\ny[label=='fat']=1\r\n\r\n\r\nx_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3)\r\n\r\nmodel=GaussianNB()\r\nmodel.fit(x,y)\r\npredicted=model.predict(x_test)\r\nprint(x_test)\r\nprint(predicted)\r\n","repo_name":"yzjbryant/YZJ_MIX_Code","sub_path":"Machine_Learning_Model/Naive_Bayes.py","file_name":"Naive_Bayes.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"473323024","text":"'''Write new catalogue without FLAGS and corrected 657 filter and corrected UNC\n - data_v2.txt has corrected 657 filter and removed flags \n - data_v3.txt uses data_V2.txt to correct the mag_unc:\n - narrow filters: v2_unc/15\n - broad filters: v2_unc/10\n - data_v4.txt uses data_v2.txt to correct the mag_unc: \n - narrow filters: v2_unc/15\n - 657: v2_unc/8\n - broad filters: v2_unc/10\n \nlabel_cat(): make catalogue of label data\nmk_cat(): make new version of data_v(n).txt'''\nimport numpy as np\nfrom astropy.table import Table\nimport os\nimport os.path\n\n\ndef label_cat():\n label_titles = ['PN', 'wr']\n for i in range(0, len(label_titles)):\n data = Table.read('data_v3.txt', format='ascii.commented_header',\n guess=False)\n object_cat = Table.read('C:\\\\Users\\\\Owner\\\\Documents\\\\GitHub\\\\m83_clustering\\\\cat_data\\\\m83_ers_pubcat_by_object\\\\' + label_titles[i] + '_catalogue.txt',\n format='ascii.commented_header', guess=False)\n remove = []\n object_id = object_cat['id_']\n for r in range(0, len(data)):\n if data['id_'][r] not in object_id:\n remove.append(r)\n data.remove_rows(remove)\n label_table = Table(data=data)\n label_table.write(label_titles[i] + '_labels.txt',\n format='ascii.commented_header')\n return\n\n\ndef mk_cat():\n broad_correction = 10\n data = Table.read('data_v2.txt', format='ascii.commented_header',\n guess=False)\n broad = ['225', '336', '438', '555', '814']\n narrow = ['373', '487', '502', '657', '673']\n\n # Broad uncertanty correction\n for b in range(0, len(broad)):\n for i in range (0, len(data)):\n if data['mag05_' + broad[b]][i] != -99.0 and data['mag05_' + broad[b] + '_unc'][i] != -99.0:\n data['mag05_' + broad[b] + '_unc'][i] = data['mag05_' + broad[b] + '_unc'][i]/broad_correction\n if data['mag3_' + broad[b]][i] != -99.0 and data['mag3_' + broad[b] + '_unc'][i] != -99.0:\n data['mag3_' + broad[b] + '_unc'][i] = data['mag3_' + broad[b] + '_unc'][i]/broad_correction\n\n # Narrow uncertanty correction\n for n in range(0, len(narrow)):\n if narrow[n] == '657':\n narrow_correction = 8\n else: \n narrow_correction = 15\n\n for j in range (0, len(data)):\n if data['mag05_' + narrow[n]][j] != -99.0 and data['mag05_' + narrow[n] + '_unc'][j] != -99.0:\n data['mag05_' + narrow[n] + '_unc'][j] = data['mag05_' + narrow[n] + '_unc'][j]/narrow_correction\n if data['mag3_' + narrow[n]][j] != -99.0 and data['mag3_' + narrow[n] + '_unc'][j] != -99.0:\n data['mag3_' + narrow[n] + '_unc'][j] = data['mag3_' + narrow[n] + '_unc'][j]/narrow_correction\n Table.write(data, 'data_v4.txt', format='ascii.commented_header')\n return()\n","repo_name":"PBarmby/m83_clustering","sub_path":"Code/new_catalogue.py","file_name":"new_catalogue.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31315555617","text":"\"\"\"\nDjango settings for oxford project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nPROJECT_ROOT =os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))\nMEDIA_URL = \"/media/\"\nMEDIA_ROOT = os.path.join(PROJECT_ROOT, \"static/\", *MEDIA_URL.strip(\"/\").split(\"/\"))\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'job',\n\n 'django_extensions',\n 'markitup',\n 'sorl.thumbnail',\n 'rest_framework',\n 'geopy',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'oxford.urls'\n\nWSGI_APPLICATION = 'oxford.wsgi.application'\n\n\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# PASSWORD_HASHERS = (\n# 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',\n# 'django.contrib.auth.hashers.BCryptPasswordHasher',\n# 'django.contrib.auth.hashers.PBKDF2PasswordHasher',\n# 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',\n# 'django.contrib.auth.hashers.SHA1PasswordHasher',\n# 'django.contrib.auth.hashers.MD5PasswordHasher',\n# 'django.contrib.auth.hashers.CryptPasswordHasher',\n# )\n\nLOGIN_REDIRECT_URL = 'profile'\nLOGIN_URL = 'login'\n\n\nAUTH_USER_MODEL = 'job.Worker'\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_ROOT = 'staticfiles'\nSTATIC_URL = '/job/static/'\n\n\nTEMPLATE_DIRS = [os.path.join(BASE_DIR, 'job/templates')]\n\n\ntry:\n from local_settings import *\nexcept ImportError:\n pass","repo_name":"andrewuscf/oxford","sub_path":"oxford/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71872221288","text":"import copy\r\nimport math\r\nimport os\r\nimport random\r\nimport sys\r\nimport traceback\r\nimport shlex\r\n\r\nimport modules.scripts as scripts\r\nimport gradio as gr\r\n\r\nfrom modules import sd_samplers\r\nfrom modules.processing import Processed, process_images\r\nfrom PIL import Image\r\nfrom modules.shared import opts, cmd_opts, state\r\n\r\n\r\ndef process_string_tag(tag):\r\n return tag\r\n\r\n\r\ndef process_int_tag(tag):\r\n return int(tag)\r\n\r\n\r\ndef process_float_tag(tag):\r\n return float(tag)\r\n\r\n\r\ndef process_boolean_tag(tag):\r\n return True if (tag == \"true\") else False\r\n\r\n\r\n\r\ndef load_prompt_file(file):\r\n if file is None:\r\n lines = []\r\n else:\r\n lines = [x.strip() for x in file.decode('utf8', errors='ignore').split(\"\\n\")]\r\n\r\n return None, \"\\n\".join(lines), gr.update(lines=7)\r\n\r\n\r\nclass Script(scripts.Script):\r\n def title(self):\r\n return \"regional prompter helper\"\r\n\r\n def ui(self, is_img2img): \r\n prompt_txt1 = gr.Textbox(label=\"Start prompt inputs\", lines=1, elem_id=self.elem_id(\"prompt_txt\"))\r\n prompt_txt2 = gr.Textbox(label=\"End prompt inputs\", lines=1, elem_id=self.elem_id(\"prompt_txt\"))\r\n image_number = gr.Number(label=\"region count\", value=20)\r\n\r\n\r\n # We start at one line. When the text changes, we jump to seven lines, or two lines if no \\n.\r\n # We don't shrink back to 1, because that causes the control to ignore [enter], and it may\r\n # be unclear to the user that shift-enter is needed.\r\n # prompt_txt.change(lambda tb: gr.update(lines=7) if (\"\\n\" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt])\r\n return [prompt_txt1, prompt_txt2, image_number]\r\n\r\n def run(self, p, prompt_txt1: str, prompt_txt2: str, image_number: int):\r\n p.do_not_save_grid = True\r\n\r\n job_count = 0\r\n jobs = []\r\n\r\n state.job_count = job_count\r\n\r\n images = []\r\n all_prompts = []\r\n infotexts = []\r\n\r\n prompt_txt1 += \" BREAK \"\r\n prompt_txt2 += \" BREAK \"\r\n\r\n result = \"\"\r\n image_number = int(image_number)\r\n for i in range(image_number):\r\n a_count = image_number - i\r\n b_count = i\r\n result += p.prompt + \" BREAK \" + prompt_txt1 * a_count + prompt_txt2 * b_count\r\n result = result.rstrip(\"BREAK \")\r\n\r\n jobs.append({\"prompt\":result})\r\n job_count += 1\r\n result = \"\"\r\n\r\n\r\n state.job_count = job_count \r\n\r\n for n, args in enumerate(jobs):\r\n state.job = f\"{state.job_no + 1} out of {state.job_count}\"\r\n\r\n copy_p = copy.copy(p)\r\n for k, v in args.items():\r\n print(v)\r\n setattr(copy_p, k, v)\r\n\r\n proc = process_images(copy_p)\r\n images += proc.images\r\n \r\n all_prompts += proc.all_prompts\r\n infotexts += proc.infotexts\r\n\r\n return Processed(p, images, p.seed, \"\", all_prompts=all_prompts, infotexts=infotexts)\r\n","repo_name":"yang0/regional_prompter_helper","sub_path":"regional_prompter_helper.py","file_name":"regional_prompter_helper.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"20298049951","text":"from datetime import datetime\nfrom collections import namedtuple\nfrom fractions import Fraction\nfrom decimal import Decimal\n\nfrom flask import Flask, jsonify\nfrom flask_jsonplus import FlaskJSONPlus\n\napp = Flask(__name__)\napp.config['JSONPLUS_EXACT'] = True\n\njsonplus = FlaskJSONPlus(app)\n\n\n@app.route('/')\ndef hello_world():\n Point = namedtuple('Point', 'x y')\n data = {\n 'third': Fraction(1, 3),\n 'dec': Decimal('0.1'),\n 'now': datetime.now(),\n 'set': set(range(3)),\n 'tuple': (3, 1, 4),\n 'namedtuple': Point(3, 4)\n }\n return jsonify(data)\n","repo_name":"randomir/jsonplus","sub_path":"python-flask/test/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"20107926239","text":"# date : 2020/11/19\r\n# 시뮬레이션에 사용하는 utilities\r\n#\r\n# 보다 자세한 내용을 아래 tistory 참고\r\n# https://money-expert.tistory.com/34\r\n#\r\nimport json\r\nimport csv\r\n\r\n#\r\n# for read data from cvs\r\n#\r\n# row : value list\r\ndef get_new_item(keys, row) :\r\n data = {}\r\n for i in range(len(row)) :\r\n data[keys[i]] = row[i]\r\n return data\r\n\r\n# 첫 줄은 title이라고 가정, 이후에 title 값을 key로 갖는 dict로 읽기\r\ndef read_csv_to_dict(fname) :\r\n data = []\r\n keys =[]\r\n first = 1\r\n with open(fname, 'r', encoding='UTF8') as FILE :\r\n csv_reader = csv.reader(FILE, delimiter=',', quotechar='\"')\r\n for row in csv_reader :\r\n if first : # make dict keys\r\n keys = row.copy()\r\n# for key in row :\r\n# keys .append(key)\r\n first = 0\r\n else : \r\n data.append(get_new_item(keys, row))\r\n return data\r\n\r\n#\r\n# for writing data to cvs format\r\n#\r\ndef save_to_file_csv(file_name, data) :\r\n with open(file_name,'w',encoding=\"cp949\") as make_file: \r\n # title 저장\r\n vals = data[0].keys()\r\n ss = ''\r\n for val in vals:\r\n val = val.replace(',','')\r\n ss += (val + ',')\r\n ss += '\\n'\r\n make_file.write(ss)\r\n\r\n for dt in data:\r\n vals = dt.values()\r\n ss = ''\r\n for val in vals:\r\n sval = str(val) \r\n sval = sval.replace(',','')\r\n ss += (sval + ',')\r\n ss += '\\n'\r\n make_file.write(ss)\r\n make_file.close()\r\n\r\nif __name__ == '__main__':\r\n\r\n print('myutil')\r\n","repo_name":"multizone-quant/System_trading_ex","sub_path":"my_util.py","file_name":"my_util.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"9886450677","text":"import dagster._check as check\nimport graphene\nfrom dagster._core.storage.captured_log_manager import CapturedLogData\nfrom dagster._core.storage.compute_log_manager import ComputeIOType, ComputeLogFileData\n\nfrom dagster_graphql.schema.util import ResolveInfo, non_null_list\n\n\nclass GrapheneComputeIOType(graphene.Enum):\n STDOUT = \"stdout\"\n STDERR = \"stderr\"\n\n class Meta:\n name = \"ComputeIOType\"\n\n\nclass GrapheneComputeLogFile(graphene.ObjectType):\n class Meta:\n name = \"ComputeLogFile\"\n\n path = graphene.NonNull(graphene.String)\n data = graphene.Field(\n graphene.String, description=\"The data output captured from step computation at query time\"\n )\n cursor = graphene.NonNull(graphene.Int)\n size = graphene.NonNull(graphene.Int)\n download_url = graphene.Field(graphene.String)\n\n\ndef from_compute_log_file(file: ComputeLogFileData):\n check.opt_inst_param(file, \"file\", ComputeLogFileData)\n if not file:\n return None\n return GrapheneComputeLogFile(\n path=file.path,\n data=file.data,\n cursor=file.cursor,\n size=file.size,\n download_url=file.download_url,\n )\n\n\nclass GrapheneComputeLogs(graphene.ObjectType):\n runId = graphene.NonNull(graphene.String)\n stepKey = graphene.NonNull(graphene.String)\n stdout = graphene.Field(GrapheneComputeLogFile)\n stderr = graphene.Field(GrapheneComputeLogFile)\n\n class Meta:\n name = \"ComputeLogs\"\n\n def _resolve_compute_log(self, graphene_info: ResolveInfo, io_type):\n return graphene_info.context.instance.compute_log_manager.read_logs_file(\n self.runId, self.stepKey, io_type, 0\n )\n\n def resolve_stdout(self, graphene_info: ResolveInfo):\n return self._resolve_compute_log(graphene_info, ComputeIOType.STDOUT)\n\n def resolve_stderr(self, graphene_info: ResolveInfo):\n return self._resolve_compute_log(graphene_info, ComputeIOType.STDERR)\n\n\ndef from_captured_log_data(log_data: CapturedLogData):\n return GrapheneCapturedLogs(\n logKey=log_data.log_key,\n stdout=log_data.stdout.decode(\"utf-8\") if log_data.stdout else None,\n stderr=log_data.stderr.decode(\"utf-8\") if log_data.stderr else None,\n cursor=log_data.cursor,\n )\n\n\nclass GrapheneCapturedLogs(graphene.ObjectType):\n logKey = non_null_list(graphene.String)\n stdout = graphene.Field(graphene.String)\n stderr = graphene.Field(graphene.String)\n cursor = graphene.Field(graphene.String)\n\n class Meta:\n name = \"CapturedLogs\"\n\n\nclass GrapheneCapturedLogsMetadata(graphene.ObjectType):\n stdoutDownloadUrl = graphene.Field(graphene.String)\n stdoutLocation = graphene.Field(graphene.String)\n stderrDownloadUrl = graphene.Field(graphene.String)\n stderrLocation = graphene.Field(graphene.String)\n\n class Meta:\n name = \"CapturedLogsMetadata\"\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster-graphql/dagster_graphql/schema/logs/compute_logs.py","file_name":"compute_logs.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"38713077207","text":"import pika\n\n\n\n\ndef push_task(queue_name, routing_key, body):\n # 连接RabbitMq 默认端口5672\n connection = pika.BlockingConnection(pika.ConnectionParameters('192.168.1.53', 32072))\n\n # 订阅一个频道\n channel = connection.channel()\n # 声明一个叫hello的队列\n channel.queue_declare(queue=queue_name, durable=True)\n\n # 消息不会直接发送到队列,先发送到交换机,exchange为空,默认交换--->允许我们准确的指定到那一个队列中,routing_key表示队列名称,body代表要发送过去的消息\n channel.basic_publish(exchange='', routing_key=routing_key, body=body, properties=pika.BasicProperties(delivery_mode=2))\n\n # 刷新网络缓冲区,连接断开\n connection.close()\n\n\nif __name__ == '__main__':\n for i in range(50):\n push_task('test', 'test', f'{i}')","repo_name":"1987128073/project","sub_path":"pinyou/test/mq_sendmsg_test.py","file_name":"mq_sendmsg_test.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10783732461","text":"from tkinter import *\nfrom tkinter import ttk\nimport tkinter.messagebox\nimport mysql.connector\nfrom mysql.connector import Error\nfrom navigator import *\n\n\nclass GraphicalUserInterface:\n def __init__(self, root):\n self.root = root\n empty = \" \"\n self.root.title(200*empty+ \"AADT Tennessee, US\")\n self.root.geometry(\"1350x760+0+0\")\n\n #Variables\n self.stationId = StringVar()\n self.county = StringVar()\n self.routeNumber = StringVar()\n self.year = StringVar()\n self.street = StringVar()\n self.stnNumber = IntVar()\n self.stationLocation = StringVar()\n self.aadt2018 = IntVar()\n self.aadt2017 = IntVar()\n self.aadt2016 = IntVar()\n self.aadt2015 = IntVar()\n self.aadt2014 = IntVar()\n\n #Functions\n def closeApp():\n close = tkinter.messagebox.askyesno(\"AADT TnState\",\"Confirm exit?\")\n if close > 0:\n root.destroy()\n return\n\n def resetEnries():\n self.stationId.set(\"\")\n self.county.set(\"\")\n self.routeNumber.set(\"\")\n self.year.set(\"AADT_2018\")\n self.street.set(\"\")\n self.stationLocation.set(\"\")\n\n def printRecords():\n for row in self.trafficRecords.get_children():\n print(self.trafficRecords.item(row)[\"values\"])\n\n def addRecords():\n return\n\n def displayRecords():\n try:\n connection = mysql.connector.connect(host = \"localhost\",user=\"root\",passwd=\"root\",database=\"dtraffic\")\n cursor = connection.cursor()\n print(\"debug->Connected\")\n cursor.execute(\"\"\"SELECT * FROM dtraffic.datatraffic\"\"\")\n results = cursor.fetchall()\n\n if len(results) !=0:\n self.trafficRecords.delete(*self.trafficRecords.get_children())\n for row in results:\n self.trafficRecords.insert('',END,values=row)\n connection.commit()\n connection.close()\n except Error as e:\n print(f\"Error '{e}' occured\")\n \n def queryData():\n try:\n connection = mysql.connector.connect(host = \"localhost\",user=\"root\",passwd=\"root\",database=\"dtraffic\")\n cursor = connection.cursor()\n print(\"debug->Connected\")\n cursor.execute(\"SELECT STATION_ID,STN_NUMBER,COUNTY,LOCATION,RTE_NUMBER,AADT_2018,AADT_2017,AADT_2016,AADT_2015 FROM dtraffic.trafficdata where COUNTY='%s'\", self.county.get())\n results = cursor.fetchall()\n\n if len(results) !=0:\n self.trafficRecords.delete(*self.trafficRecords.get_children())\n for row in results:\n self.trafficRecords.insert('',END,values=row)\n connection.commit()\n connection.close()\n except Error as e:\n print(f\"Error '{e}' occured\")\n\n\n def tableData(env):\n viewInfo = self.trafficRecords.focus()\n record = self.trafficRecords.item(viewInfo)\n row= record['values'] \n self.stationId.set(row[0])\n self.stnNumber.set(row[1])\n self.county.set(row[2])\n self.stationLocation.set(row[3])\n self.routeNumber.set(row[4])\n self.aadt2018.set(row[5])\n self.aadt2017.set(row[6])\n self.aadt2016.set(row[7])\n self.aadt2016.set(row[8])\n\n #Frames\n self.mainFrame = Frame(self.root, bd=10, width=1350, height=700, relief=RIDGE, bg=\"cadet blue\")\n self.mainFrame.grid()\n \n\n self.topFrame = Frame(self.mainFrame, bd=5, width=1340, height=100, relief=RIDGE)#title frame\n self.topFrame.grid(row=0,column=0)\n self.topFrameA = Frame(self.mainFrame, bd=5, width=1340, height=50, relief=RIDGE)\n self.topFrameA.grid(row=2,column=0)\n self.topFrameB = Frame(self.mainFrame, bd=5, width=1340, height=450, relief=RIDGE)\n self.topFrameB.grid(row=1,column=0)\n \n self.leftFrame=Frame(self.topFrameB, bd=5, width=600, height=180,padx=2,bg=\"cadet blue\", relief=RIDGE)\n self.leftFrame.pack(side=LEFT)\n self.leftFrameA=Frame(self.leftFrame, bd=5, width=600, height=180, relief=RIDGE,padx=2,pady=4)\n self.leftFrameA.pack(side=TOP,padx=0,pady=0)\n self.leftFrameB=Frame(self.leftFrame, bd=5, width=600, height=180, relief=RIDGE)\n self.leftFrameB.pack(side=TOP,pady=4)\n self.leftFrameBLeft=Frame(self.leftFrameB, bd=5, width=300, height=170, relief=RIDGE,padx=2)\n self.leftFrameBLeft.pack(side=LEFT,pady=4)\n self.leftFrameBRight=Frame(self.leftFrameB, bd=5, width=300, height=170, relief=RIDGE,padx=2)\n self.leftFrameBRight.pack(side=RIGHT)\n\n self.rightFrame = Frame(self.topFrameB, bd=5, width=600, height=400, relief=RIDGE,padx=2, bg=\"cadet blue\")\n self.rightFrame.pack(side=RIGHT)\n self.rightFrameA = Frame(self.rightFrame, bd=5, width=540, height=300, relief=RIDGE,padx=2,pady=2)\n self.rightFrameA.pack(side=TOP)\n\n #titles\n self.labelTitle = Label(self.topFrame, bd=7, font=('new times roman',56,UNDERLINE,'bold'),text=\"AADT State of Tennessee\")\n self.labelTitle.grid(row=0,column=0, padx=132)\n\n #labels and entries\n self.stationIdLbl = Label(self.leftFrameA, bd=7, font=('new times roman',12,'bold'),text=\"Station ID\",anchor=W)\n self.stationIdLbl.grid(row=0,column=0)\n self.stationIdEntry = Entry(self.leftFrameA, bd=2, font=('new times roman',12),width=40,justify='left', textvariable=self.stationId)\n self.stationIdEntry.grid(row=0,column=1)\n\n self.countyNameLbl = Label(self.leftFrameA, bd=7, font=('new times roman',12,'bold'),text=\"County\",anchor=W)\n self.countyNameLbl.grid(row=1,column=0)\n self.countyNameEntry = Entry(self.leftFrameA, bd=2, font=('new times roman',12),width=40,justify='left', textvariable=self.county)\n self.countyNameEntry.grid(row=1,column=1)\n\n self.rteNumberLbl = Label(self.leftFrameA, bd=7, font=('new times roman',12,'bold'),text=\"Route Number\",anchor=W)\n self.rteNumberLbl.grid(row=2,column=0)\n self.rteNumberEntry = Entry(self.leftFrameA, bd=2, font=('new times roman',12),width=40,justify='left',textvariable=self.routeNumber)\n self.rteNumberEntry.grid(row=2,column=1)\n\n self.yearEntryLbl = Label(self.leftFrameA, bd=7, font=('new times roman',12,'bold'),text=\"Year\",anchor=W)\n self.yearEntryLbl.grid(row=3,column=0)\n self.yearEntry = ttk.Combobox(self.leftFrameA, font=('arial',12),width=38,state='readonly',textvariable=self.year)\n self.yearEntry['values'] = ('','AADT_2018','AADT_2017','AADT_2016','AADT_2015','AADT_2014',\n 'AADT_2013','AADT_2012','AADT_2011','AADT_2010','AADT_2009',\n 'AADT_2008','AADT_2007','AADT_2006','AADT_2005','AADT_2004',\n 'AADT_2003','AADT_2002','AADT_2001','AADT_2000','AADT_1999',\n 'AADT_1998','AADT_1997','AADT_1996','AADT_1995','AADT_1994',\n 'AADT_1993','AADT_1992','AADT_1991','AADT_1990','AADT_1989',\n 'AADT_1988','AADT_1987','AADT_1986','AADT_1985','AADT_1984')\n self.yearEntry.current(0)\n self.yearEntry.grid(row=3,column=1)\n\n self.streetLabel = Label(self.leftFrameA, bd=7, font=('new times roman',12,'bold'),text=\"Location\",anchor=W)\n self.streetLabel.grid(row=4,column=0)\n self.streetEntry = Entry(self.leftFrameA, bd=2, font=('new times roman',12),width=40,justify='left', textvariable=self.stationLocation)\n self.streetEntry.grid(row=4,column=1)\n\n #nvigations\n\n\n #buttons\n self.trfcStatisticsBtn = Button(self.topFrameA, pady=1,padx=24,bd=4,font=('arial',8,'bold'),width=8,text=\"Traffic Statistics\",command=self.gotoTrafficStats)\n self.trfcStatisticsBtn.grid(row=0,column=0,padx=1)\n self.customReportsBtn = Button(self.topFrameA, pady=1,padx=24,bd=4,font=('arial',8,'bold'),width=8,text=\"Custom reports\", command=self.gotoCustomReports)\n self.customReportsBtn.grid(row=0,column=1,padx=1)\n self.adminModeBtn = Button(self.topFrameA, pady=1,padx=24,bd=4,font=('arial',8,'bold'),width=8,text=\"Admin mode\",command=self.gotoAdminMode)\n self.adminModeBtn.grid(row=0,column=2,padx=1)\n self.downloadDataBtn = Button(self.topFrameA, pady=1,padx=24,bd=4,font=('arial',8,'bold'),width=8,text=\"Search\", command=self.gotoSearchMode)\n self.downloadDataBtn.grid(row=0,column=3,padx=1)\n self.settingsBtn = Button(self.topFrameA, pady=1,padx=24,bd=4,font=('arial',8,'bold'),width=8,text=\"Settings\")\n self.settingsBtn.grid(row=0,column=4,padx=1)\n self.inspectDataBtn = Button(self.topFrameA, pady=1,padx=24,bd=4,font=('arial',8,'bold'),width=8,text=\"Inspect This\",command=displayRecords)\n self.inspectDataBtn.grid(row=0,column=5,padx=1)\n self.optionsBtn = Button(self.topFrameA, pady=1,padx=24,bd=4,font=('arial',8,'bold'),width=8,text=\"Options\")\n self.optionsBtn.grid(row=0,column=6,padx=1)\n self.resetBtn = Button(self.topFrameA, pady=1,padx=24,bd=4,font=('arial',8,'bold'),width=8,text=\"Reset\",command=resetEnries)\n self.resetBtn.grid(row=0,column=7,padx=1)\n self.printBtn = Button(self.topFrameA, pady=1,padx=24,bd=4,font=('arial',8,'bold'),width=8,text=\"Print\",command=printRecords)\n self.printBtn.grid(row=0,column=8,padx=1)\n self.exitBtn = Button(self.topFrameA, pady=1,padx=24,bd=4,font=('arial',8,'bold'),width=8,text=\"Exit\",command=closeApp)\n self.exitBtn.grid(row=0,column=9,padx=1)\n \n\n\n #Scrollbars\n self.scrollY = Scrollbar(self.rightFrameA, orient=VERTICAL)\n self.trafficRecords = ttk.Treeview(self.rightFrameA,height=10,columns=(\"STATION_ID\",\"STN_NUMBER\",\"COUNTY\",\"LOCATION\",\"RTE_NUMBER\",\n \"AADT_2018\",\"AADT_2017\",\"AADT_2016\",\"AADT_2015\"), yscrollcommand=self.scrollY.set)\n\n self.scrollY.pack(side=RIGHT, fill=Y)\n self.trafficRecords.heading(\"STATION_ID\", text=\"Station Id\")\n self.trafficRecords.heading(\"STN_NUMBER\", text=\"Station No\")\n self.trafficRecords.heading(\"COUNTY\", text=\"County\")\n self.trafficRecords.heading(\"LOCATION\", text=\"Location\")\n self.trafficRecords.heading(\"RTE_NUMBER\", text=\"Route Number\")\n self.trafficRecords.heading(\"AADT_2018\", text=\"2018\")\n self.trafficRecords.heading(\"AADT_2017\", text=\"2017\")\n self.trafficRecords.heading(\"AADT_2016\", text=\"2016\")\n self.trafficRecords.heading(\"AADT_2015\", text=\"2015\")\n\n self.trafficRecords['show'] = 'headings'\n\n self.trafficRecords.column(\"STATION_ID\", width=40)\n self.trafficRecords.column(\"STN_NUMBER\", width=40)\n self.trafficRecords.column(\"COUNTY\", width=40)\n self.trafficRecords.column(\"LOCATION\", width=40)\n self.trafficRecords.column(\"RTE_NUMBER\", width=40)\n self.trafficRecords.column(\"AADT_2018\", width=40)\n self.trafficRecords.column(\"AADT_2017\", width=40)\n self.trafficRecords.column(\"AADT_2016\", width=40)\n self.trafficRecords.column(\"AADT_2015\", width=40)\n\n \n self.trafficRecords.pack(fill=BOTH,expand=1)\n self.trafficRecords.bind(\"\", tableData)\n displayRecords()\n\n\n \n #open statistics page\n def gotoTrafficStats(self):\n self.statsPage = Toplevel(self.root)\n self.app = TrafficStatistics(self.statsPage)\n\n def gotoCustomReports(self):\n self.statsPage = Toplevel(self.root)\n self.app = CustomReports(self.statsPage)\n\n def gotoAdminMode(self):\n self.statsPage = Toplevel(self.root)\n self.app = AdminMode(self.statsPage)\n\n def gotoSearchMode(self):\n self.statsPage = Toplevel(self.root)\n self.app = Search(self.statsPage)\n \n\n\n\n\n\nif __name__ == \"__main__\":\n root = Tk()\n gui = GraphicalUserInterface(root)\n root.mainloop()","repo_name":"Ramluk/trafficdata","sub_path":"traffic-data/Old_gui/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":12126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28314532445","text":"def sequence(num):\n chk = 10\n for i in range(len(str(num))-1):\n tmp = int(str(num)[i]) - int(str(num)[i+1])\n if chk == 10 :\n chk = tmp\n elif chk == tmp:\n continue\n else:\n return False\n return True\n\nn = int(input())\nresult = 0\nfor i in range(1,n+1):\n if sequence(i):\n result += 1\n\nprint(result)","repo_name":"seunghoon2334/baekjoon","sub_path":"1065.py","file_name":"1065.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42332886467","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import include, path\n\nhandler404 = \"notes.views.page_not_found\"\nhandler500 = \"notes.views.server_error\"\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('auth/', include(\"allauth.urls\")),\n path(\"auth/\", include(\"users.urls\")),\n path(\"auth/\", include(\"django.contrib.auth.urls\")),\n path(\"\", include(\"notes.urls\", namespace='note')),\n path(\"api/\", include(\"api.urls\", namespace='api')),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n","repo_name":"KhantsevDanil/SimbirNote","sub_path":"backend/simbir_main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34558514710","text":"def factorial(n):\n o = 1\n if n == 0:\n return 1\n if (n > 0) and (n < 13):\n for i in range(1, n + 1):\n o = o * i\n return o\n else:\n raise ValueError","repo_name":"Thomas-UA/my-kata-solutions","sub_path":"Factorial.py","file_name":"Factorial.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30421394197","text":"import sys\ninput = sys.stdin.readline\n\nsys.setrecursionlimit(1000000)\n\ndef dfs(graph,w,n,m):\n dx = [1,-1,0,0]\n dy = [0,0,1,-1]\n\n graph[w[0]][w[1]] = -1\n for i in range(4):\n x = w[0]+dx[i]\n y = w[1]+dy[i]\n if 0<=x 0):\n answer = json.loads(rec.Result())\n if answer['text']:\n yield answer['text']\n\n\nsearch_cmds = load_commands_dict(Path('search_commands.json'))\nopen_cmds = load_commands_dict(Path('open_commands.json'))\ncmds_dict = {'найди': search_cmds, 'включи': open_cmds}\n\nfor text in listen():\n print(text)\n found_command, match_percent = process.extractOne(text, list(cmds_dict.keys()))\n print(found_command, match_percent)\n\n if match_percent > 30:\n command_dict = cmds_dict[found_command]\n requested_command = text\n try:\n requested_command = text.replace(found_command, '')\n except KeyError as e:\n pass\n if command_dict == search_cmds:\n webbrowser.open('http://www.google.com/search?q=' + requested_command)\n","repo_name":"danCraw/Voice_helper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20582528207","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\ndb_dict = [{\n 'id': 1,\n 'name': 'Красный зал',\n 'film_name': 'Пятница',\n 'sessions': [{\n 'time': 12,\n 'price': 250\n }, {\n 'time': 16,\n 'price': 350\n }, {\n 'time': 20,\n 'price': 450\n }]\n}, {\n 'id': 2,\n 'name': 'Синий зал',\n 'film_name': 'Чемпионы: Быстрее. Выше. Сильнее',\n 'sessions': [{\n 'time': 10,\n 'price': 250\n }, {\n 'time': 13,\n 'price': 350\n }, {\n 'time': 16,\n 'price': 350\n }]\n}, {\n 'id': 3,\n 'name': 'Голубой зал',\n 'film_name': 'Пернатая банда',\n 'sessions': [{\n 'time': 10,\n 'price': 350\n }, {\n 'time': 14,\n 'price': 450\n }, {\n 'time': 18,\n 'price': 450\n }]\n}]\n\n\ndef persent_formula(price: int, percent: int) -> float:\n return (price * percent) / 100\n\n\ndef sale_calculator(tiket_price: int, tiket_count: int) -> float:\n basic_price: int = tiket_count * tiket_price\n if (tiket_count > 10):\n return basic_price - persent_formula(basic_price, 10)\n elif (tiket_count > 5):\n return basic_price - persent_formula(basic_price, 5)\n else:\n return basic_price\n\n\ndef ticket_calculator(room_id: int, session_time: int, tiket_count: int) -> float:\n price: int = 0\n for room in db_dict:\n if room['id'] == room_id:\n for session in room['sessions']:\n if session['time'] == session_time:\n price = sale_calculator(session['price'], tiket_count)\n return price\n","repo_name":"WeslyG/labs","sub_path":"src/7_7/s7_7.py","file_name":"s7_7.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18548560070","text":"\nfrom Data_Structures.hashtable.hashtable import Hashtable\nfrom code_challenges.left_join.left_join import left_join\nimport pytest\n\ndef test_left_join_1(hash_t1,hash_t2):\n assert left_join(hash_t1,hash_t2) == [['diligent', 'employed', 'idle'], ['wrath', 'anger', 'delight'], ['outift', 'garb', None], ['guide', 'usher', 'follow'], ['fond', 'enamored', 'averse']]\n\ndef test_left_join_2(hash_t1,hash_t3):\n assert left_join(hash_t1,hash_t3) == [['diligent', 'employed', 'idle'], ['wrath', 'anger', 'delight'], ['outift', 'garb', None], ['guide', 'usher', None], ['fond', 'enamored', None]]\n\ndef test_left_join_3(hash_t4,hash_t3):\n assert left_join(hash_t4,hash_t3) == [['fond', 'averse', None]]\n\n@pytest.fixture\ndef hash_t1():\n test1 = Hashtable()\n test1.add('fond', 'enamored')\n test1.add('wrath', 'anger')\n test1.add('diligent', 'employed')\n test1.add('outift', 'garb')\n test1.add('guide', 'usher')\n\n return test1\n\n@pytest.fixture\ndef hash_t2():\n test2 = Hashtable()\n test2.add('fond', 'averse')\n test2.add('wrath', 'delight')\n test2.add('diligent', 'idle')\n test2.add('guide', 'follow')\n test2.add('flow', 'jam')\n\n return test2\n\n@pytest.fixture\ndef hash_t3():\n test3 = Hashtable()\n test3.add('wrath', 'delight')\n test3.add('diligent', 'idle')\n\n return test3\n\n@pytest.fixture\ndef hash_t4():\n test4 = Hashtable()\n test4.add('fond', 'averse')\n\n return test4\n","repo_name":"mhn998/-data-structures-and-algorithms","sub_path":"python/tests/challenges/test_left_join.py","file_name":"test_left_join.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9890684737","text":"from dagster._core.host_representation import ExternalExecutionPlan\nfrom dagster._core.instance import DagsterInstance, InstanceRef\nfrom dagster._core.snap import create_execution_plan_snapshot_id, create_job_snapshot_id\nfrom dagster._utils import file_relative_path\nfrom dagster._utils.test import copy_directory\n\n\n# a change of schema in the snapshot hierarchy caused hashes to be different\n# when snapshots reloaded\ndef test_run_created_in_0_7_9_snapshot_id_change():\n src_dir = file_relative_path(__file__, \"snapshot_0_7_9_shapshot_id_creation_change/sqlite\")\n with copy_directory(src_dir) as test_dir:\n instance = DagsterInstance.from_ref(InstanceRef.from_dir(test_dir))\n # run_id = 'e297fa70-49e8-43f8-abfe-1634f02644f6'\n\n old_job_snapshot_id = \"88528edde2ed64da3c39cca0da8ba2f7586c1a5d\"\n old_execution_plan_snapshot_id = \"2246f8e5a10d21e15fbfa3773d7b2d0bc1fa9d3d\"\n\n historical_job = instance.get_historical_job(old_job_snapshot_id)\n job_snapshot = historical_job.job_snapshot\n ep_snapshot = instance.get_execution_plan_snapshot(old_execution_plan_snapshot_id)\n\n # It is the pipeline snapshot that changed\n # Verify that snapshot ids are not equal. This changed in 0.7.10\n created_snapshot_id = create_job_snapshot_id(job_snapshot)\n assert created_snapshot_id != old_job_snapshot_id\n\n # verify that both are accessible off of the historical pipeline\n assert historical_job.computed_job_snapshot_id == created_snapshot_id\n assert historical_job.identifying_job_snapshot_id == old_job_snapshot_id\n\n # We also changed execution plan schema in 0.7.11.post1\n assert create_execution_plan_snapshot_id(ep_snapshot) != old_execution_plan_snapshot_id\n\n # This previously failed with a check error\n assert ExternalExecutionPlan(ep_snapshot)\n\n\n# Scripts to create this (run against 0.7.9)\n#\n# from dagster import solid, DagsterInstance, execute_pipeline\n# from dagster._core.snap.utils import create_snapshot_id\n#\n# from dagster.serdes import serialize_pp\n#\n# @solid\n# def noop_solid(_):\n# pass\n#\n# @pipeline\n# def noop_pipeline():\n# noop_solid()\n#\n# instance = DagsterInstance.get()\n#\n# result = execute_pipeline(noop_pipeline, instance=instance)\n#\n# run_id = result.run_id\n\n# print(serialize_pp(run))\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster/dagster_tests/general_tests/compat_tests/test_change_snapshot_structure.py","file_name":"test_change_snapshot_structure.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"6336664916","text":"import uuid\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass Job(models.Model):\n\n access_token = models.CharField(\n max_length=255,\n help_text=_(\n \"Used to authenticate as the user on Reddit\",\n ),\n )\n\n refresh_token = models.CharField(\n max_length=255,\n help_text=_(\n \"Used to refresh the access_token\",\n ),\n )\n\n code = models.CharField(\n max_length=255,\n help_text=_(\n \"The code given by Reddit, we use this to exchange for tokens\",\n ),\n )\n\n started = models.DateTimeField(\n auto_now_add = True,\n )\n\n last_updated = models.DateTimeField(\n auto_now = True,\n )\n\n identifier = models.UUIDField(\n primary_key=True,\n default=uuid.uuid4,\n max_length=255,\n help_text=_(\n \"The identifier for this task. Shown publicly.\",\n ),\n )\n\n comments_deleted = models.PositiveSmallIntegerField(\n default=0,\n )\n\n submissions_deleted = models.PositiveSmallIntegerField(\n default=0,\n )\n\n STATE_AUTHORIZE = 10\n STATE_AUTHENTICATED = 20\n STATE_RECEIVED_CODE_AND_STATE = 30\n STATE_DELETING_COMMENTS = 40\n STATE_DELETING_SUBMISSIONS = 41\n STATE_FINISHED = 50\n STATE_UNKNOWN_ERROR = 100\n STATE_ACCESS_DENIED = 101\n\n # How far along in the deletion process we are. Note that there are\n # large increments to begin with, this is so that if we later on\n # decide to include additional states in between, then no additional\n # migrations will required.\n STATE_CHOICES = (\n (STATE_AUTHORIZE, _('Asked user to authorize')),\n (STATE_AUTHENTICATED, _('Authenticated as user on Reddit')),\n (STATE_RECEIVED_CODE_AND_STATE, _('Received code and state')),\n (STATE_DELETING_COMMENTS, _('Deleting comments')),\n (STATE_DELETING_SUBMISSIONS, _('Deleting submissions')),\n (STATE_FINISHED, _('Finished')),\n (STATE_UNKNOWN_ERROR, _('Unknown error')),\n (STATE_ACCESS_DENIED, _('Access denied')),\n )\n\n state = models.PositiveSmallIntegerField(\n choices=STATE_CHOICES,\n default=STATE_CHOICES[0][0],\n help_text=_(u\"How far are we along in the process.\")\n )\n\n class Meta:\n ordering = ['started', ]\n","repo_name":"Damgaard/Never-Saiddit","sub_path":"never_saiddit/core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42057355145","text":"# coding: utf-8\n\nimport os\n\nfrom django.db import models\nfrom django.template.defaultfilters import slugify\n\n# Create your models here.\n\n\ndef organization_logo_path(self, filename):\n return '%s/%s%s' % ('organizations', self.slug, os.path.splitext(filename)[-1])\n\n\n#########################\n# Model: Organization\n#########################\n\nclass Organization(models.Model):\n name = models.CharField(\n max_length = 75,\n verbose_name = 'Name *', # Required\n )\n\n country = models.CharField(\n max_length = 500,\n verbose_name = 'Country',\n blank = True,\n )\n\n homepage = models.URLField(\n verbose_name = 'Homepage',\n blank = True,\n null = True,\n )\n\n logo = models.ImageField(\n upload_to = organization_logo_path,\n verbose_name = 'Logo',\n blank = True,\n null = True,\n )\n\n slug = models.SlugField(\n max_length = 100,\n blank = True,\n )\n\n def __unicode__(self):\n return u'%s' % (self.name)\n\n def save(self, *args, **kwargs):\n self.slug = slugify(str(self.name))\n super(Organization, self).save(*args, **kwargs)\n\n def delete(self, *args, **kwargs):\n # You have to prepare what you need before delete the model\n if self.logo:\n storage = self.logo.storage\n path = self.logo.path\n # Delete the model before the file\n super(Organization, self).delete(*args, **kwargs)\n # Delete the file after the model\n storage.delete(path)\n else:\n super(Organization, self).delete(*args, **kwargs)\n\n def update(self, *args, **kwargs):\n # You have to prepare what you need before delete the model\n storage = self.logo.storage\n\n try:\n path = self.logo.path\n os.remove(path)\n # Delete the file after the model\n storage.delete(path)\n except:\n pass\n # No previous logo\n\n class Meta:\n ordering = ['name']\n","repo_name":"OscarPDR/projects_morelab","sub_path":"organizations/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17729188721","text":"from turtle import Turtle, Screen\nimport random\n\nwho_won = False\ncolors = [\"red\", \"blue\", \"gold1\", \"purple\", \"green\", \"SkyBlue1\"]\npositions = [-250, -150, -50, 50, 150, 250]\nx_cor = -360\nrunners = []\n\ndef new_turtles():\n for n in range(0, len(colors)):\n racer = Turtle()\n racer.penup()\n racer.shape(\"turtle\")\n racer.color(colors[n])\n racer.setposition(x=x_cor, y=positions[n])\n runners.append(racer)\n\n\n# Screen setup configurations\nscreen = Screen()\nscreen.title(\"🐢...Run Turtle Run...🐢\")\nscreen.setup(width=800, height=600)\nuser_bet = screen.textinput(title=\"Make your bet\", prompt=\"Which turtle you think is going to win? (Red, Blue, Gold1,\"\n \" Purple, Green, Sky Blue) Enter the color\").lower()\nwinner = \"\"\n\n# set up the track\nnew_turtles()\nwhile not who_won:\n for runner in runners:\n if round(runner.xcor()) < 369:\n runner.fd(random.randint(1, 10))\n\n else:\n who_won = True\n winner = runner.pencolor()\n break\n\nif user_bet == winner:\n print(f\"You won the best. The winner is: {winner.title()}. Your prize $500\")\nelse:\n print(f\"You lost the best. The winner is: {winner.title()}. You lost $500\")\n\n\n\nscreen.exitonclick()","repo_name":"Koigor97/python","sub_path":"run_turtle_run/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31159336560","text":"\"\"\"Library for scraping metacritic movie rating info\"\"\"\n\nimport asyncio\nimport os\nimport re\nimport requests\nimport sys\nimport traceback\n\nfrom aiohttp import ClientSession\nfrom lxml import html\nfrom pathlib import Path\n\nfrom metacritic.common import debug\nfrom metacritic.common import err\nfrom metacritic.common import warning\nfrom metacritic.common import info\n\nUSER_AGENT = 'Mozilla/5.0'\nPREFIX_PATTERN = re.compile(r'''^/movie/''')\nSUFFIX_PATTERN = re.compile('/(critic|user)-reviews$')\nDEFAULT_SENTINEL = '__CONTENT_UPDATED'\n\nMOVIE_HREF_PATTERN = '//a[starts-with(@href, \"/movie/\")]'\nMETASCORE_SPAN_PATTERN = '//table[@class=\"simple_summary marg_btm1\"]//span[starts-with(@class, \"metascore_w\")]'\nREVIEW_PATTERN = '//div[starts-with(@class, \"review\")]'\nINDIVIDUAL_RATING_PATTERN = './/div[starts-with(@class, \"metascore_w\")]'\nAUTHOR_PATTERN = './/span[contains(@class, \"author\")]'\nPUBLICATION_PATTERN = './/span[contains(@class, \"source\")]//img[@title]'\n\n\ndef scrape_movie_urls(html_content_filename):\n \"\"\"Extract metacritic movie url suffixes from specified metacritic html content\"\"\"\n\n tree = html.parse(html_content_filename)\n urls = set()\n for a_node in tree.xpath(MOVIE_HREF_PATTERN):\n href = a_node.get('href')\n href = PREFIX_PATTERN.sub('', href)\n href = SUFFIX_PATTERN.sub('', href)\n urls.add(href)\n return urls\n\n\ndef get_suffixes_to_download(dir, sentinel_filename, refresh=False):\n suffixes = (s for s in os.listdir(dir) if s != sentinel_filename)\n if not refresh:\n def is_empty(filename):\n return os.stat(filename).st_size == 0\n # only fetch content if file is empty\n suffixes = (s for s in suffixes if is_empty(os.path.join(dir, s)))\n return suffixes\n\n\nasync def download_and_write_urls(dir, suffixes, concurrency):\n \"\"\"Asynchronously download urls and write to disk.\"\"\"\n async with ClientSession() as session:\n tasks = [download_task(s, dir, session) for s in suffixes]\n # list, where each element is of the form (url, content)\n return await gather_with_concurrency(concurrency, *tasks)\n\n\nasync def download_task(suffix, dir, session):\n url = f\"https://www.metacritic.com/movie/{suffix}/critic-reviews\"\n response = await session.get(url, headers={'User-Agent': USER_AGENT})\n if response.status != 200:\n err(f\"{response.status} while downloading {url}\")\n return False\n html = await response.text()\n dest = os.path.join(dir, suffix)\n with open(dest, 'w') as f:\n debug(f\"writing downloaded content for {suffix} to {dest} ...\")\n f.write(html)\n return True\n\n\nasync def gather_with_concurrency(n, *tasks):\n semaphore = asyncio.Semaphore(n)\n async def sem_task(task):\n async with semaphore:\n return await task\n return await asyncio.gather(*(sem_task(task) for task in tasks))\n\n\ndef find(node, xpath_pattern):\n \"\"\"replacement for lxml find() that supports full xpath power\"\"\"\n matches = node.xpath(xpath_pattern)\n return matches[0] if matches else None\n\n\ndef get_overall_score(content_tree):\n span = find(content_tree, METASCORE_SPAN_PATTERN)\n try:\n return int(span.text_content())\n except:\n err(\"get_overall_score: \" + traceback.format_exc())\n return None\n\n\ndef extract_one_rating(review_node):\n rating_node = find(review_node, INDIVIDUAL_RATING_PATTERN)\n try:\n return int(rating_node.text_content())\n except:\n err(\"extract_one_rating: \" + traceback.format_exc())\n return None\n\n\ndef extract_author(review_node):\n author_node = find(review_node, AUTHOR_PATTERN)\n return author_node.text_content() if author_node is not None else None\n\n\ndef extract_publication(review_node):\n img_node = find(review_node, PUBLICATION_PATTERN)\n return img_node.get('title') if img_node is not None else None\n\n\ndef extract_ratings_for_movie(filename, group_pub=False):\n tree = html.parse(filename)\n if not tree.getroot():\n # if the file is empty, there is no tree!\n return None\n metascore = get_overall_score(tree)\n if not metascore:\n err(f\"could not extract metascore for: {filename}\")\n return None\n review_nodes = tree.xpath(REVIEW_PATTERN)\n if not review_nodes:\n err(f\"no critic reviews found for: {filename}\")\n return None\n ratings = {}\n total = len(review_nodes)\n succeeded = 0\n for node in review_nodes:\n critic_score = extract_one_rating(node)\n critic_name = extract_author(node)\n publication = extract_publication(node)\n if not (critic_score and critic_name and publication):\n continue\n if not group_pub:\n critic_name = critic_name + ' (' + publication + ')'\n else:\n critic_name = publication\n ratings[critic_name] = critic_score\n succeeded += 1\n if succeeded < total:\n warning(f\"only parsed {succeeded} out of {total} reviews for {filename}\")\n return (metascore, ratings)\n\n\ndef extract_all_ratings(content_dir, sentinel, group_pub=False):\n ratings = {}\n for suffix in os.listdir(content_dir):\n if suffix == sentinel:\n continue\n debug(f\"extracting ratings for: {suffix} ...\")\n rating_for_movie = extract_ratings_for_movie(\n os.path.join(content_dir, suffix), group_pub=group_pub)\n if not rating_for_movie:\n err(f\"could not extract ratings for: {suffix}\")\n continue\n ratings[suffix] = rating_for_movie\n return ratings\n","repo_name":"shashank025/metacritic-weights","sub_path":"metacritic/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":5547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13063859851","text":"numeros = [i for i in range(100)]\n# numeros = [0,1,2,3,4,5,]\nprint(numeros)\n\n\nvector = []\nfor i in numeros:\n if i%2 == 0: # 1 0\n j = i**2\n vector.append(j)\n else:\n j = i*5\n vector.append(j)\nprint(vector)\n\nvector_2 = [i**2 if i%2==0 else i*5 for i in numeros]\nprint(vector_2)\n\n","repo_name":"LeTurtleBoy/Fluent-Python","sub_path":"Cap1/Ejemplo0.py","file_name":"Ejemplo0.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20351866134","text":"from sys import stdin\n\ninput = stdin.readline\n\n\nclass Bingo:\n def __init__(self, n, numPosition):\n self.n = n\n self.numPosition = numPosition\n self.board = [[False] * n for _ in range(n)]\n self.bingoCnt = 0\n\n def checkNum(self, num):\n pos = self.numPosition[num]\n self.board[pos[0]][pos[1]] = True\n self.checkBingo(pos[0], pos[1])\n if self.bingoCnt >= 3:\n return True\n return False\n\n def checkBingo(self, y, x):\n rowBingo = True\n for i in range(self.n):\n if not self.board[i][x]:\n rowBingo = False\n break\n if rowBingo:\n self.bingoCnt += 1\n colBingo = True\n for j in range(self.n):\n if not self.board[y][j]:\n colBingo = False\n break\n if colBingo:\n self.bingoCnt += 1\n if y == x:\n crossBingo = True\n for i in range(self.n):\n if not self.board[i][i]:\n crossBingo = False\n break\n if crossBingo:\n self.bingoCnt += 1\n if y + x == self.n - 1:\n crossBingo = True\n for i in range(self.n):\n if not self.board[i][self.n - 1 - i]:\n crossBingo = False\n break\n if crossBingo:\n self.bingoCnt += 1\n\n\nif __name__ == \"__main__\":\n numPosition = {}\n n = 5\n for i in range(n):\n numList = list(map(int, input().strip().split()))\n for j in range(n):\n numPosition[numList[j]] = (i, j)\n\n bingo = Bingo(n, numPosition)\n\n cnt = 0\n flag = True\n for i in range(n):\n numList = list(map(int, input().strip().split()))\n for j in range(n):\n cnt += 1\n if bingo.checkNum(numList[j]):\n flag = False\n break\n if not flag:\n break\n print(cnt)","repo_name":"Alphanewbie/TIL","sub_path":"Algorithm_problem_solving/Baek-joon/2578/2578.py","file_name":"2578.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70280037289","text":"from .basic_sub import BasicSub\nimport numpy as np\nimport cv2\nimport os\nimport pandas as pd\n\nfrom monart.process.est_ception import load_batch\nfrom monart.models.inception import init_inception\n\nclass MonetToVec(BasicSub):\n def __init__(self):\n arguments = [('-i', '--input', 'Input path folder with images')]\n super().__init__(arguments = arguments)\n\n self.input_p = self.pathify(self.args.input)\n self.bs = 16\n self.model = init_inception()\n\n def submit(self):\n images = os.listdir(self.input_p)\n batches = np.array_split(np.array(images),len(images)//self.bs)\n dfs = list()\n\n for b in batches:\n sqz = load_batch(b, self.input_p, self.model)\n data = {'names': b.tolist(), 'vectors': sqz.tolist()}\n df = pd.DataFrame(data)\n dfs.append(df) \n\n alldf = pd.concat(dfs)\n alldf.to_csv(f'{self.resdr}monvec.csv')\n","repo_name":"magisterbrown/monet","sub_path":"commands/monet_to_vec.py","file_name":"monet_to_vec.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29156098895","text":"from xml.etree.ElementTree import SubElement, parse\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\n\r\nxmldoc = parse(\"Before.osm\")\r\nroot = xmldoc.getroot()\r\n\r\ndef get_html(url):\r\n _html = \"\"\r\n resp = requests.get(url)\r\n #print(resp.url)\r\n if resp.status_code == 200:\r\n _html = resp.text\r\n return _html\r\n\r\ndef extract_wikidata(href):\r\n\tcount = -1\r\n\t_href = \"\"\r\n\thref_list = list(href)\r\n\t\r\n\tfor char in href_list:\r\n\t\tcount += 1\r\n\t\tif char == \"Q\":\r\n\t\t\tbreak\r\n\t\r\n\tfor char in href_list:\r\n\t\tcount -= 1\r\n\t\tif count <= -1:\r\n\t\t\t_href = _href + char\r\n\t\r\n\treturn _href\r\n\t\t\t\r\n\r\nfor node in root.findall(\"node\"):\r\n\tname = \"\"\r\n\tname_value = \"\"\r\n\twikidata = \"\"\r\n\twikidata_value = \"\"\r\n\twikidata_from_name = \"\"\r\n\thref_from_name = \"\"\r\n\tlist_name_value = []\r\n\r\n\tfor tag in node.iter(\"tag\"):\r\n\t\tif tag.attrib[\"k\"] == \"name\":\r\n\t\t\tname = tag\r\n\t\t\tname_value = name.attrib[\"v\"]\r\n\t\t\tlist_name_value = list(name_value)\r\n\t\t\tprint(\"name: \" + str(name))\r\n\t\t\tprint(\"name_value: \" + name_value)\r\n\t\tif tag.attrib[\"k\"] == \"wikidata\":\r\n\t\t\twikidata = tag\r\n\t\t\twikidata_value = wikidata.attrib[\"v\"]\r\n\t\t\tprint(\"wikidata: \" + str(wikidata))\r\n\t\t\tprint(\"wikidata: \" + wikidata_value)\r\n\t\r\n\tif wikidata == \"\" and list_name_value != [] and list_name_value[-1] != \"동\":\r\n\t\thtml = get_html(\"https://ko.wikipedia.org/w/index.php?title=\" + name_value + \"&redirect=no\")\r\n\t\tsoup = BeautifulSoup(html, 'html.parser')\r\n\t\t\r\n\t\tif soup.find(\"a\",{\"accesskey\": \"g\"}) != None and soup.find(\"a\",{\"title\": \"분류:동음이의어 문서\"}) == None:\r\n\t\t\turl_from_name = soup.find(\"a\",{\"accesskey\": \"g\"})[\"href\"]\r\n\t\t\t#print(url_from_name)\r\n\t\t\twikidata_from_name = extract_wikidata(url_from_name)\r\n\t\t\t\r\n\t\t\tif wikidata_from_name != \"Q5296\":\r\n\t\t\t\telement_wikidata = SubElement(node, \"tag\")\r\n\t\t\t\telement_wikidata.attrib[\"k\"] = \"wikidata\"\r\n\t\t\t\telement_wikidata.attrib[\"v\"] = wikidata_from_name\r\n\t\t\t\tprint(\"element_wikidata.attrib['v']: \" + element_wikidata.attrib[\"v\"] + \"\\n\\n\")\r\n\t\t\t\t\r\n\t\t\t\tnode.attrib[\"action\"] = \"modify\"\r\n\t\t\t\t\r\n\t\t\t\telement_wikidata = \"\"\r\n\t\telse:\r\n\t\t\tprint(\"wikidata_from_name: Not one\\n\\n\")\r\n\telse:\r\n\t\tprint(\"\\n\")\r\n\r\nxmldoc.write(\"After.osm\", encoding=\"utf-8\", xml_declaration=True)","repo_name":"depth221/Automated_for_OSM_python","sub_path":"wikidata_from_name.py","file_name":"wikidata_from_name.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33241068379","text":"from algoliasearch.search_client import SearchClient\nimport os\nfrom dotenv import load_dotenv\nload_dotenv()\n\nALGOLIA_APP_ID = os.getenv('ALGOLIA_APP_ID')\nALGOLIA_ADMIN_API_KEY = os.getenv('ALGOLIA_ADMIN_API_KEY')\nALGOLIA_INDEX_NAME = os.getenv('ALGOLIA_INDEX_NAME')\n\nclient = SearchClient.create(ALGOLIA_APP_ID, ALGOLIA_ADMIN_API_KEY)\nindex = client.init_index(ALGOLIA_INDEX_NAME)\n\n\ndef search_text(text):\n res = index.search(text)\n name = {}\n i = 0\n # store all result into a dict and use as return\n for r in res['hits']: \n name[i] = r['_highlightResult']['data']['name']['value'].replace('' , '').replace('' , '')\n i = i+1\n print(name)\n return name\n\nif __name__ == '__main__':\n search_text('foundation')\n","repo_name":"icecreamforfree/robot-cone","sub_path":"others/text_search.py","file_name":"text_search.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2184539564","text":"s = input()\r\nstart = 0\r\nend = len(s) - 1\r\nres = True\r\n\r\nwhile True:\r\n if s[start] != s[end]:\r\n res = False\r\n break\r\n\r\n start += 1\r\n end -= 1\r\n\r\n if start > end:\r\n break\r\n\r\nprint(1 if res else 0)","repo_name":"rloldl-c/algorithm","sub_path":"백준/Bronze/10988. 팰린드롬인지 확인하기/팰린드롬인지 확인하기.py","file_name":"팰린드롬인지 확인하기.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70855297128","text":"from pico2d import *\nimport game_framework\nfrom Object import CPortrait\nfrom Scene import play_scene\nfrom Scene import main_scene\n\nfont = None\nback_img = None\ngauge_img = None\nP1_img = None\nP2_img = None\nshortHL_img = None\nportrait_list = []\nplayer_list = []\nselectP1 = 0\nselectP2 = 0\nnum_player = 0\n\ndef enter():\n global font, back_img, portrait_list, gauge_img, P1_img, P2_img, shortHL_img\n global selectP1, selectP2, num_player\n portrait_list = []\n\n\n\n f = open('game_data.txt', 'r')\n game_data = json.load(f)\n f.close()\n\n num_player = game_data[0]\n\n if num_player is 1:\n selectP1 = 0\n selectP2 = -1\n elif num_player is 2:\n selectP1 = 0\n selectP2 = 0\n\n portrait_list.append(CPortrait.Portrait(get_canvas_width()/2 - 200, get_canvas_height()/2 + 150, 0))\n portrait_list.append(CPortrait.Portrait(get_canvas_width()/2 + 200, get_canvas_height()/2 + 150, 1))\n shortHL_img\n font = load_font('Resource/font/RPGSystem.ttf', 50)\n back_img = load_image('resource/image/backimg.png')\n gauge_img = load_image('resource/image/gauge.png')\n P1_img = load_image('resource/image/P1.png')\n P2_img = load_image('resource/image/P2.png')\n shortHL_img = load_image('resource/image/highlight_short.png')\n pass\n\n\ndef exit():\n data_list = [num_player, selectP1 - 1, selectP2 - 1]\n f = open('game_data.txt', 'w')\n json.dump(data_list, f)\n f.close()\n pass\n\n\ndef update(frame_time):\n pass\n\n\ndef draw():\n clear_canvas()\n back_img.draw(get_canvas_width()/2, get_canvas_height()/2)\n if len(portrait_list) is not 0:\n for portrait in portrait_list:\n portrait.draw()\n font.draw(\n get_canvas_width() / 2 - 300 + 400 * portrait_list.index(portrait),\n get_canvas_height() / 2,\n 'SPEED', (230, 230, 255))\n font.draw(\n get_canvas_width() / 2 - 300 + 400 * portrait_list.index(portrait),\n get_canvas_height() / 2 - 50,\n 'ROTATE', (230, 230, 255))\n font.draw(\n get_canvas_width() / 2 - 250 + 400 * portrait_list.index(portrait),\n get_canvas_height() / 2 - 120,\n 'SELECT', (230, 230, 255))\n for i in range(0, portrait.speed):\n gauge_img.draw(\n get_canvas_width() / 2 - 140 + 400 * portrait_list.index(portrait) + 25 * i,\n get_canvas_height() / 2)\n for i in range(0, portrait.rotate):\n gauge_img.draw(\n get_canvas_width() / 2 - 140 + 400 * portrait_list.index(portrait) + 25 * i,\n get_canvas_height() / 2 - 50)\n if selectP1 is portrait_list.index(portrait) + 1:\n shortHL_img.draw(\n get_canvas_width() / 2 - 200 + 400 * portrait_list.index(portrait),\n get_canvas_height() / 2 - 120)\n P1_img.draw(\n get_canvas_width() / 2 - 310 + 400 * portrait_list.index(portrait),\n get_canvas_height() / 2 - 120)\n if selectP2 is portrait_list.index(portrait) + 1:\n shortHL_img.draw(\n get_canvas_width() / 2 - 200 + 400 * portrait_list.index(portrait),\n get_canvas_height() / 2 - 120)\n P2_img.draw(\n get_canvas_width() / 2 - 80 + 400 * portrait_list.index(portrait),\n get_canvas_height() / 2 - 120)\n update_canvas()\n\n\ndef handle_events(frame_time):\n global selectP1, selectP2\n events = get_events()\n for event in events:\n if event.type == SDL_QUIT:\n game_framework.quit()\n elif event.type == SDL_KEYDOWN:\n if event.key == SDLK_ESCAPE:\n game_framework.change_state(main_scene)\n elif event.key == SDLK_a:\n selectP1 = clamp(1, selectP1 - 1, 2)\n elif event.key == SDLK_d:\n selectP1 = clamp(1, selectP1 + 1, 2)\n elif event.key == SDLK_LEFT:\n if selectP2 is not -1:\n selectP2 = clamp(1, selectP2 - 1, 2)\n elif event.key == SDLK_RIGHT:\n if selectP2 is not -1:\n selectP2 = clamp(1, selectP2 + 1, 2)\n elif event.key == SDLK_RETURN:\n if num_player is 1:\n if selectP1 > 0:\n game_framework.change_state(play_scene)\n else:\n if selectP1 > 0 and selectP2 > 0:\n game_framework.change_state(play_scene)\n pass\n\n\ndef pause():\n pass\n\n\ndef resume():\n pass\n","repo_name":"mado0421/2DGameProgrammingProject","sub_path":"Asteroid/Scene/character_scene.py","file_name":"character_scene.py","file_ext":"py","file_size_in_byte":4682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2602390710","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.text import slugify\nfrom django.db import IntegrityError\nfrom django.db.models import F\nfrom django.contrib.postgres.search import SearchQuery, SearchRank, SearchVector\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.contrib.postgres.search import TrigramSimilarity\nimport operator\nimport hashlib\nimport pdb\nfrom rest_framework.renderers import JSONRenderer\nimport json\n# models\nfrom authors.models.author import Author\nfrom articles.models.article import Article\nfrom articles.models.content import Content\nfrom tags.models.tag import Tag\nfrom articles.models.content_meta import ContentMeta\nfrom articles.models.content_rating import ContentRating\nfrom trackings.models.impression import Impression\nfrom authors.models.user_tag import UserTag\n# forms\nfrom articles.forms.article import NewArticleForm\nfrom articles.forms.article import UpdateArticleForm\n# tasks\nfrom trackings.tasks import article_impressions as impressions\n\n\n# Create your views here.\n\ndef index(request):\n \n articles = []\n suggesting_articles = []\n page_meta = {\n 'keywords': '',\n 'articles': '',\n 'suggesting_articles': '',\n 'full_url_path': request.build_absolute_uri(),\n }\n\n if request.user.is_authenticated:\n articles = Article.objects.filter(status=Article.STATUS[1][0], in_home=True).order_by('-created_at')[:20]\n _sugg_tags = __get_suggetion_tags(request.user.id)\n suggesting_articles = Article.objects.filter(status=Article.STATUS[1][0], tags__pk__in=_sugg_tags).distinct('id').order_by('-id', '-created_at')[:20]\n suggesting_articles = sorted(suggesting_articles, key=operator.attrgetter('views', 'rating'), reverse=True)\n else:\n articles = Article.objects.filter(status=Article.STATUS[1][0], in_home=True).order_by('-views')[:20]\n _sugg_tags = __get_suggetion_tags(None)\n suggesting_articles = Article.objects.filter(status=Article.STATUS[1][0], tags__pk__in=_sugg_tags).distinct('id').order_by('-id', '-created_at')[:20]\n suggesting_articles = sorted(suggesting_articles, key=operator.attrgetter('views', 'rating'), reverse=True)\n\n for article in articles:\n page_meta['keywords'] += article.title + ','\n\n _articles_seri = Article.ArticleSerializer(articles, many=True)\n page_meta['articles'] = JSONRenderer().render(_articles_seri.data)\n\n _articles_seri = Article.ArticleSerializer(suggesting_articles, many=True)\n page_meta['suggesting_articles'] = JSONRenderer().render(_articles_seri.data)\n\n context = {\n 'articles': articles,\n 'suggesting_articles': suggesting_articles,\n 'meta': page_meta,\n }\n\n return render(request, 'articles/index.html', context)\n\ndef search(request):\n # pdb.set_trace()\n _query = request.GET.get('q')\n _page = request.GET.get('page')\n # _page = int(_page)\n\n # advance search\n vector = SearchVector('title', weight='A') + SearchVector('tags__name', weight='B')\n search_query = SearchQuery(_query)\n trigram_similarity = TrigramSimilarity('title', _query) + TrigramSimilarity('tags__name', _query)\n\n _articles = Article.objects.annotate(rank=SearchRank(vector, search_query), similarity=trigram_similarity).filter(similarity__gt=0.3, status=Article.STATUS[1][0]).order_by('id', '-similarity').distinct('id')[:10]\n _tags = Tag.objects.filter(name__contains=_query).order_by('-weight')[:20]\n\n paginator = Paginator(_articles, 5)\n try:\n _paginated_artciles = paginator.page(_page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n _paginated_artciles = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n _paginated_artciles = paginator.page(paginator.num_pages)\n\n _context = {\n 'query': _query,\n 'articles': _paginated_artciles,\n 'tags': _tags,\n 'cpage': _page\n }\n return render(request, 'articles/search.html', _context)\n\ndef detail(request, slug):\n article = get_object_or_404(Article, slug=slug)\n _tags = []\n for tag in article.tags.all():\n _tags.append(tag.id)\n\n action_ids = Article.objects.filter(tags__id__in=_tags, status=Article.STATUS[1][0]).exclude(id=article.id).distinct('id').values_list('id', flat=True)\n related_articles = Article.objects.filter(id__in=action_ids, status=Article.STATUS[1][0]).order_by('-tags__weight').order_by('-views')[:10]\n\n _total_ratings = ContentRating.objects.filter(content_id__in=article.content_set.values('id')).order_by('user_id', '-id').distinct('user_id').count()\n _my_rating = 0\n if hasattr(request.user, 'author'):\n try:\n _rating = ContentRating.objects.get(content_id=article.active_content().id, user_id=request.user.id)\n _my_rating = _rating.value\n except Author.DoesNotExist:\n _my_rating = -1\n pass\n except ContentRating.DoesNotExist:\n _my_rating = -1\n pass\n\n _context = {\n 'article': article,\n 'content': article.active_content,\n 'related_articles': related_articles,\n 'my_rating': _my_rating,\n 'total_ratings': _total_ratings,\n 'full_url_path': request.build_absolute_uri()\n }\n\n __add_view_log(request, article)\n\n return render(request, 'articles/detail.html', _context)\n\ndef network(request, slug):\n article = get_object_or_404(Article, slug=slug)\n\n connection_tree = []\n excludes = []\n excludes.append(article.id)\n connection_tree = __get_article_tree(article, connection_tree, excludes, 0)\n tree_data = json.dumps(connection_tree)\n\n _context = {\n 'article': article,\n 'content': article.active_content,\n 'full_url_path': request.build_absolute_uri(),\n 'relation_tree': tree_data\n }\n\n return render(request, 'articles/network.html', _context)\n\ndef history(request, slug, content_id):\n article = get_object_or_404(Article, slug=slug)\n content = Content.objects.get(pk=content_id)\n _context = {\n 'article': article,\n 'content': content\n }\n\n return render(request, 'articles/history.html', _context)\n\ndef about(request):\n _context = {\n }\n return render(request, 'about.html', _context)\n\n@login_required\ndef add(request):\n if request.method == 'POST':\n # pdb.set_trace()\n _form = NewArticleForm(request.POST)\n if _form.is_valid():\n _user = request.user\n\n _metas = {\n 'keys': _form.data.getlist('meta_keys'),\n 'values': _form.data.getlist('meta_values')\n }\n flag, message, article = __save_article(_form.cleaned_data, _user, _metas)\n if flag:\n return HttpResponseRedirect('/article/%s' % (article.slug))\n else:\n _context = {\n 'form': _form,\n 'errors': message\n }\n return render(request, 'articles/add.html', _context)\n\n else:\n _context = {\n 'form': _form\n }\n return render(request, 'articles/add.html', _context)\n else:\n _form = NewArticleForm()\n _context = {\n 'form': _form\n }\n # pdb.set_trace()\n return render(request, 'articles/add.html', _context)\n\n@login_required\ndef update(request, slug, article_id):\n _article = get_object_or_404(Article, id=article_id)\n\n # take current content hash\n _meta_keys = []\n _meta_values = []\n for meta in _article.active_content().contentmeta_set.all():\n _meta_keys.append(meta.name)\n _meta_values.append(meta.data)\n\n _hash = hashlib.md5(_article.active_content().body.encode())\n _hash.update(repr(_meta_keys).encode('utf-8'))\n _hash.update(repr(_meta_values).encode('utf-8'))\n\n _uform = UpdateArticleForm(request.POST or None, initial={'article_id': article_id, 'content': _article.active_content().body, 'tags': _article.tags_str(), 'content_hash': _hash.hexdigest()})\n # pdb.set_trace()\n if request.method == 'POST':\n if _uform.is_valid():\n _user = request.user\n _metas = {\n 'keys': _uform.data.getlist('meta_keys'),\n 'values': _uform.data.getlist('meta_values')\n }\n flag, message, article = __update_article(_uform.cleaned_data, _user, _article, _metas)\n if flag:\n return HttpResponseRedirect('/article/%s' % (article.slug))\n else:\n _context = {\n 'form': _form,\n 'errors': message\n }\n return render(request, 'articles/update.html', _context)\n\n _context = {\n 'article': _article,\n 'uform': _uform\n }\n return render(request, 'articles/update.html', _context)\n\n\n# private\ndef __save_article(post, user, metas):\n flag = False\n message = []\n try:\n _slug = slugify(post['title'])\n _article = Article(title=post['title'], slug=_slug, status=Article._DEFAULT_STATUS)\n _content = Content(body=post['content'], status=Content._DEFAULT_STATUS, author=user.author)\n \n _article.save()\n\n article_tags = []\n _tags = post['tags']\n for _tag in _tags:\n tag, created = Tag.objects.get_or_create(name=_tag.lower(), slug=slugify(_tag))\n if not created:\n tag.increase_weight(1)\n _article.tags.add(tag)\n\n _content.article = _article\n _content.save()\n\n _key_index = 0\n for key in metas['keys']:\n _meta = ContentMeta(name=key, data=metas['values'][_key_index])\n _meta.content = _content\n _meta.save()\n _key_index += 1\n\n except IntegrityError as e:\n message.append(e)\n pass\n else:\n message.append(\"Article created successfully\")\n flag = True\n\n return flag, message, _article\n\n\ndef __update_article(post, user, article, metas):\n flag = False\n message = []\n try:\n\n article.tags.clear()\n\n article_tags = []\n _tags = post['tags']\n for _tag in _tags:\n tag, created = Tag.objects.get_or_create(name=_tag.lower(), slug=slugify(_tag))\n if not created:\n tag.increase_weight(1)\n article.tags.add(tag)\n\n # take current content hash\n _meta_keys = []\n _meta_values = []\n _key_index = 0\n for key in metas['keys']:\n _meta_keys.append(key)\n _meta_values.append(metas['values'][_key_index])\n\n _hash = hashlib.md5(post['content'].encode())\n _hash.update(repr(_meta_keys).encode('utf-8'))\n _hash.update(repr(_meta_values).encode('utf-8'))\n\n if _hash.hexdigest() != post['content_hash']:\n _content = Content(body=post['content'], status=Content._DEFAULT_STATUS, author=user.author)\n\n article.content_set.update(status=Content.STATUS[0][0])\n _content.article = article\n _content.save()\n\n _key_index = 0\n for key in metas['keys']:\n _meta = ContentMeta(name=key, data=metas['values'][_key_index])\n _meta.content = _content\n _meta.save()\n _key_index += 1\n\n except IntegrityError as e:\n message.append(e)\n pass\n else:\n message.append(\"Content updated successfully\")\n flag = True\n\n return flag, message, article\n\n\ndef __add_view_log(request, article):\n user_id = None\n if request.user.is_authenticated:\n user_id = request.user.id\n\n ip_address = __get_ip(request)\n # impressions.add.delay(article.id, user_id, request.META.get('REMOTE_ADDR'))\n impressions.add.delay(article.id, user_id, ip_address)\n\ndef __get_ip(request):\n \"\"\"Returns the IP of the request, accounting for the possibility of being\n behind a proxy.\n \"\"\"\n ip = request.META.get(\"HTTP_X_FORWARDED_FOR\", None)\n if ip:\n # X_FORWARDED_FOR returns client1, proxy1, proxy2,...\n ip = ip.split(\", \")[0]\n else:\n ip = request.META.get(\"REMOTE_ADDR\", \"\")\n return ip\n\n\ndef __get_suggetion_tags(user_id):\n primary_tags = []\n optional_tags = []\n\n if user_id is not None:\n _article_tags = UserTag.objects.filter(user_id=user_id, source=UserTag.SOURCE_TYPE[0][0], preferece=UserTag.PREFERENCE_TYPE[0][0]).order_by('-created_at')[:10]\n _tags_tags = UserTag.objects.filter(user_id=user_id, source=UserTag.SOURCE_TYPE[1][0], preferece=UserTag.PREFERENCE_TYPE[0][0]).order_by('-created_at')[:10]\n _rating_tags = UserTag.objects.filter(user_id=user_id, source=UserTag.SOURCE_TYPE[2][0], preferece=UserTag.PREFERENCE_TYPE[0][0]).order_by('-created_at')[:10]\n else:\n _article_tags = UserTag.objects.filter(source=UserTag.SOURCE_TYPE[0][0], preferece=UserTag.PREFERENCE_TYPE[0][0]).order_by('-created_at')[:10]\n _tags_tags = UserTag.objects.filter(source=UserTag.SOURCE_TYPE[1][0], preferece=UserTag.PREFERENCE_TYPE[0][0]).order_by('-created_at')[:10]\n _rating_tags = UserTag.objects.filter(source=UserTag.SOURCE_TYPE[2][0], preferece=UserTag.PREFERENCE_TYPE[0][0]).order_by('-created_at')[:10]\n\n optional_tags += _article_tags\n optional_tags += _tags_tags\n optional_tags += _rating_tags\n\n # _article_tags = UserTag.objects.filter(user_id=user_id, source=UserTag.SOURCE_TYPE[0][0], preferece=UserTag.PREFERENCE_TYPE[1][0]).order_by('-created_at')\n # _tags_tags = UserTag.objects.filter(user_id=user_id, source=UserTag.SOURCE_TYPE[1][0], preferece=UserTag.PREFERENCE_TYPE[1][0]).order_by('-created_at')\n # _rating_tags = UserTag.objects.filter(user_id=user_id, source=UserTag.SOURCE_TYPE[2][0], preferece=UserTag.PREFERENCE_TYPE[1][0]).order_by('-created_at')\n\n # primary_tags += _article_tags\n # primary_tags += _tags_tags\n # primary_tags += _rating_tags\n\n tags = []\n for tag in optional_tags:\n tags.append(tag.tag_id)\n\n return tags\n\n\ndef __get_article_tree(article, collection, excludes, level): \n _tags = []\n for tag in article.tags.all():\n _tags.append(tag.id)\n\n action_ids = Article.objects.filter(tags__id__in=_tags, status=Article.STATUS[1][0]).exclude(id__in=excludes).distinct('id').values_list('id', flat=True)\n related_articles = Article.objects.filter(id__in=action_ids, status=Article.STATUS[1][0]).order_by('-tags__weight').order_by('-views')\n\n for r_article in related_articles:\n excludes.append(r_article.id)\n\n _children = []\n if len(related_articles) > 0:\n for r_article in related_articles:\n _child = __get_article_tree(r_article, collection, excludes, level+1)\n _children.append(_child)\n\n # _child = { article.title: _children }\n print(article.title)\n _article_seri = Article.ArticleSerializer(article, many=False)\n if len(_children) > 0:\n _child = { \"name\": _article_seri.data, \"children\": _children }\n else:\n _child = { \"name\": _article_seri.data }\n\n return _child\n\n\n\n\n \n","repo_name":"LakithaRav/techjargon","sub_path":"techjargon-app/articles/views/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":15319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38335113374","text":"#!/usr/bin/python3\n\nfrom mpg import *\n\nx,y = 2,2\n\nfor i in range(10,1000): # (3,3,22, 2,2,0) (4,4, 10, puis 11)\n\n print('Seed:',i)\n seed(i)\n g=planar_mp_game(x,y,3,20)\n g.print()\n\n # solve by PI\n v,pol = g.policy_iteration(player=1)\n \n # algorithm\n v2,pol2 = g.algo(verbose=True)\n\n if True:#pol!=pol2:\n\n print(pol)\n print(pol2)\n \n cycles, c_v, path, p_v, cycle = g.analyze_policy( pol )\n ax = get_ax()\n g.plot_cycle_regions(ax, cycles,cycle,c_v)\n g.plot_graph(ax, pol)\n \n cycles, c_v, path, p_v, cycle = g.analyze_policy( pol2 )\n ax = get_ax()\n g.plot_cycle_regions(ax, cycles,cycle,c_v)\n g.plot_graph(ax, pol2)\n\n plt.show()\n \n\n","repo_name":"brunoscherrer/meanpayoffgame","sub_path":"algo.py","file_name":"algo.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31004454388","text":"import re\nimport sys\n\nfrom lab.parser import Parser\n\n\ndef coverage(content, props):\n props[\"coverage\"] = int(\"cost\" in props)\n\n\ndef unsolvable(content, props):\n # Note that this naive test may easily generate false positives.\n props[\"unsolvable\"] = int(\n not props[\"coverage\"]\n and \"Completely explored state space -- no solution!\" in content\n )\n\n\ndef parse_g_value_over_time(content, props):\n \"\"\"Example line: \"[g=6, 16 evaluated, 15 expanded, t=0.00328561s, 22300 KB]\" \"\"\"\n matches = re.findall(\n r\"\\[g=(\\d+), \\d+ evaluated, \\d+ expanded, t=(.+)s, \\d+ KB\\]\\n\", content\n )\n props[\"g_values_over_time\"] = [(float(t), int(g)) for g, t in matches]\n\n\ndef set_outcome(content, props):\n lines = content.splitlines()\n solved = props[\"coverage\"]\n unsolvable = props[\"unsolvable\"]\n out_of_time = int(\"TIMEOUT=true\" in lines)\n out_of_memory = int(\"MEMOUT=true\" in lines)\n # runsolver decides \"out of time\" based on CPU rather than (cumulated)\n # WCTIME.\n if (\n not solved\n and not unsolvable\n and not out_of_time\n and not out_of_memory\n and props[\"runtime\"] > props[\"time_limit\"]\n ):\n out_of_time = 1\n # In cases where CPU time is very slightly above the threshold so that\n # runsolver didn't kill the planner yet and the planner solved a task\n # just within the limit, runsolver will still record an \"out of time\".\n # We remove this record. This case also applies to iterative planners.\n # If such planners solve the task, we don't treat them as running out\n # of time.\n if (solved or unsolvable) and (out_of_time or out_of_memory):\n print(\"task solved however runsolver recorded an out_of_*\")\n print(props)\n out_of_time = 0\n out_of_memory = 0\n\n if not solved and not unsolvable:\n props[\"runtime\"] = None\n\n if solved ^ unsolvable ^ out_of_time ^ out_of_memory:\n if solved:\n props[\"error\"] = \"solved\"\n elif unsolvable:\n props[\"error\"] = \"unsolvable\"\n elif out_of_time:\n props[\"error\"] = \"out_of_time\"\n elif out_of_memory:\n props[\"error\"] = \"out_of_memory\"\n else:\n print(f\"unexpected error: {props}\", file=sys.stderr)\n props[\"error\"] = \"unexpected-error\"\n\n\ndef get_parser():\n parser = Parser()\n parser.add_pattern(\n \"planner_exit_code\",\n r\"run-planner exit code: (.+)\\n\",\n type=int,\n file=\"driver.log\",\n required=True,\n )\n parser.add_pattern(\n \"node\", r\"node: (.+)\\n\", type=str, file=\"driver.log\", required=True\n )\n parser.add_pattern(\n \"planner_wall_clock_time\",\n r\"run-planner wall-clock time: (.+)s\",\n type=float,\n file=\"driver.log\",\n required=True,\n )\n parser.add_pattern(\"runtime\", r\"Singularity runtime: (.+?)s\", type=float)\n parser.add_pattern(\n \"time_limit\",\n r\"Enforcing CPUTime limit \\(soft limit, will send \"\n r\"SIGTERM then SIGKILL\\): (\\d+) seconds\",\n type=int,\n file=\"watch.log\",\n required=True,\n )\n # Cumulative runtime and virtual memory of the solver and all child processes.\n parser.add_pattern(\n \"runtime\", r\"WCTIME=(.+)\", type=float, file=\"values.log\", required=True\n )\n parser.add_pattern(\n \"virtual_memory\", r\"MAXVM=(\\d+)\", type=int, file=\"values.log\", required=True\n )\n parser.add_pattern(\"raw_memory\", r\"Peak memory: (\\d+) KB\", type=int)\n parser.add_pattern(\"cost\", r\"\\nFinal value: (.+)\\n\", type=int)\n parser.add_function(coverage)\n parser.add_function(unsolvable)\n parser.add_function(parse_g_value_over_time)\n parser.add_function(set_outcome, file=\"values.log\")\n return parser\n","repo_name":"aibasel/lab","sub_path":"examples/singularity/singularity_parser.py","file_name":"singularity_parser.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"53"} +{"seq_id":"7980109652","text":"# -*- coding: utf-8 -*-\n################################################################################################################\n# @file: ifengquery.py\n# @author: HuBorui\n# @date: 2016/11/22\n# @version: Ver0.0.0.100\n# @note:\n################################################################################################################\nfrom configuration.constant import SPIDER_S2_WEBSITE_VIDEO\nfrom utility.common import Common\nfrom configuration import constant\nfrom utility.xpathutil import XPathUtility\nfrom website.common.s2query import SiteS2Query\nfrom utility.regexutil import RegexUtility\nfrom utility.gettimeutil import getuniformtime,compareNow\nfrom lxml import etree\nimport re\nimport math\nimport datetime\nfrom bs4 import BeautifulSoup \nfrom utility.timeutility import TimeUtility \nfrom log.spiderlog import Logger \n################################################################################################################\n# @class:ifengquery.py\n# @author: HuBorui\n# @date: 2016/11/22\n# @note:\n################################################################################################################\nclass IfengS2Query(SiteS2Query):\n IFENG_QUERY_TEMPLATE = 'http://so.v.ifeng.com/video?&p={pn}&q={q}'\n DEFAULT_PAGE_SIZE = 22\n IFENG_S2QUERY_FIRST_PAGE = 'S2QUERY_FIRST_PAGE'\n IFENG_S2QUERY_EACH_PAGE = 'S2QUERY_EACH_PAGE'\n \n ################################################################################################################\n # @functions:__init__\n # @param: none\n # @return:none\n # @note:IfengS2Query,初始化内部变量\n ################################################################################################################\n def __init__(self):\n # 使用该URL识别回传S2查询结果的类,推荐使用主站URL\n SiteS2Query.__init__(self)\n self.fakeoriginalurl = 'http://v.ifeng.com/'\n\n\n ################################################################################################################\n # @functions:query\n # @info: query condition\n # @return:none\n # @note:SiteS2Query,S2 query\n ################################################################################################################\n def query(self, info):\n q = Common.urlenc(info)\n urls = [IfengS2Query.IFENG_QUERY_TEMPLATE.format(pn = 1,q = q)]\n self.__storeqeuryurllist__(urls, IfengS2Query.IFENG_S2QUERY_FIRST_PAGE, {'query':q})\n\n ################################################################################################################\n # @functions:process\n # @params: see WebSite.process\n # @return:none\n # @note:SiteS2Query, process S2 query result,一般为查询到的URL列表\n ################################################################################################################\n def process(self, params):\n if params.step == IfengS2Query.IFENG_S2QUERY_FIRST_PAGE:\n q = params.customized['query']\n # html = etree.HTML(params.content)\n xparser = XPathUtility(params.content)\n mid_count = xparser.getnumber('//div[@class=\"serpinfo\"]/span/em')\n count = str(mid_count).strip()\n querylist = []\n # 获取不到,则返回\n if count == 0:\n return\n elif count > 0:\n pagenum = int(math.ceil(float(count) / IfengS2Query.DEFAULT_PAGE_SIZE))\n if pagenum >= self.maxpages:\n pagenum = self.maxpages\n for page in range(1, pagenum + 1, 1):\n url = IfengS2Query.IFENG_QUERY_TEMPLATE.format(pn = page,q = q)\n querylist.append(url)\n self.__storeqeuryurllist__(querylist, IfengS2Query.IFENG_S2QUERY_EACH_PAGE,{'info':q})\n elif params.step == IfengS2Query.IFENG_S2QUERY_EACH_PAGE:\n self.step2(params)\n \n \n def step2(self, params):\n info = Common.urldec(params.customized['info'])\n soup = BeautifulSoup(params.content,'html5lib')\n text_divs = soup.select('.s_r_txt')\n urllist = []\n \n if text_divs:\n for item in text_divs:\n title = item.select_one('h3 > a').get_text()\n url = item.select_one('h3 > a').get('href')\n curtime = item.select('p')[-1].get_text().strip()\n try:\n if TimeUtility.compareNow(TimeUtility.getuniformtime(curtime), self.querylastdays):\n if Common.checktitle(info, title):\n urllist.append(url)\n else:\n Logger.log(url, constant.ERRORCODE_WARNNING_NOMATCHTITLE)\n else:\n Logger.log(url, constant.ERRORCODE_WARNNING_NOMATCHTIME)\n except:\n urllist.append(url)\n self.__storeurllist__(urllist,SPIDER_S2_WEBSITE_VIDEO)\n \n \n \n ","repo_name":"ErBingBing/django-tonado-crawler","sub_path":"ZG-PhaseFour/code/website/ifeng/ifengquery.py","file_name":"ifengquery.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27346745827","text":"import requests,bs4, os\n\nname='Berserk'\nurl='https://kissmanga.org/chapter/ilsi12001567132882/chapter_' ## ROOT FOLDER\nstart=330\nend=361\n\n\n\npath='C:\\\\Users\\\\Drastis\\\\Desktop\\\\MANGA_DOWNLOAD\\\\' + name #### MANGA FOLDER\nos.makedirs(path,exist_ok=True)\n\n\n## CHAPTERS\nwhile start>0 and start 2 else mask\r\n if len(mask) > 0:\r\n middle_category = middle_category.split(mask[0])[0] + mask[0][1] + middle_category.split(mask[0])[1]\r\n try: \r\n middle_category = change_dict[middle_category]\r\n except:\r\n pass\r\n category = middle_category + \" \" + main_category\r\n return category\r\n\r\n#데이터에 맞는 법정동 geometry 리스트 반환\r\ndef get_location_bjd_geometry_and_admname(location_list, hjd_df):\r\n HJD_Dong_2018 = list(map(lambda data: data.split(\" \")[-1], hjd_df.adm_nm))\r\n HJD_Sigon_2018 = list(map(lambda data: data.split(\" \")[1], hjd_df.adm_nm))\r\n\r\n hjd_geometry = []\r\n adm_code = []\r\n for i in location_list:\r\n place = i.split(\" \")\r\n step1_mask = np.where(np.array(HJD_Dong_2018) == place[0])[0].tolist()\r\n if len(step1_mask) == 1:\r\n hjd_geometry.append(hjd_df.iloc[step1_mask[0]].geometry)\r\n adm_code.append(hjd_df.iloc[step1_mask[0]].adm_nm)\r\n else:\r\n step2_mask = np.where(np.array(HJD_Sigon_2018) == place[1])[0].tolist()\r\n step2_mask = set(step1_mask) & set(step2_mask)\r\n hjd_geometry.append(hjd_df.iloc[list(step2_mask)[0]].geometry)\r\n adm_code.append(hjd_df.iloc[list(step2_mask)[0]].adm_nm)\r\n \r\n return hjd_geometry, adm_code\r\n\r\n#행정구역 별 랜덤 좌표 필요한 갯수 데이터프레임 추출\r\ndef generate_location_cnt_df(move_data, where):\r\n pos_cnt = move_data[f\"{where}pos\"].value_counts().to_frame().reset_index()\r\n pos_cnt.columns = [f\"{where}pos\", \"cnt\"] \r\n pos_cnt = pd.merge(move_data[[f\"{where}pos\", f\"{where}_geometry\"]].drop_duplicates([f\"{where}pos\"]), pos_cnt)\r\n return pos_cnt\r\n\r\n#위치 좌표 랜덤 생성\r\ndef Generate_random_location(data, CNT): #place : 관심지역, cnt: 차량 수 \r\n #Meter -> Euclid : 단위 변환\r\n def euclid_distance_cal(meter):\r\n ###유클리드 거리와 실제 거리를 기반으로 1미터당 유클리드 거리 추출\r\n #점 쌍 사이의 유클리드 거리를 계산\r\n dis_1 = ox.distance.euclidean_dist_vec(36.367658 , 127.447499, 36.443928, 127.419678)\r\n #직선거리 계산\r\n dis_2 = ox.distance.great_circle_vec(36.367658 , 127.447499, 36.443928, 127.419678)\r\n return dis_1/dis_2 * meter\r\n \r\n #위치 좌표 랜덤 생성\r\n locations = []\r\n for i in random.choice(range(len(data)), size = CNT, replace = True):\r\n #교차로 중심에 생성되지 않게 고정 미터로 생성이 아닌 해당 링크 길이로 유동적인 미터 생성\r\n random_num = random.choice([0.1,0.2,0.3,0.4,0.5])\r\n random_meter = data.iloc[i][\"length\"] * random_num\r\n #좌표 생성\r\n new_node = list(ox.utils_geo.interpolate_points(data.iloc[i][\"geometry\"], euclid_distance_cal(random_meter)))\r\n #좌표의 처음과 끝은 노드이기 때문에 제거하고 선택\r\n del new_node[0], new_node[-1]\r\n #랜덤으로 선택한 하나의 링크에서 하나의 택시 좌표 선택 \r\n idx = random.choice(len(new_node), size = 1)\r\n location = new_node[idx[0]]\r\n locations.append(location)\r\n \r\n locations = list(map(lambda data: Point(data),locations))\r\n\r\n return locations\r\n\r\n#도로 행정구역 경계로 서브셋 추출\r\ndef generate_subset(geometry, data_edges):\r\n data_edges[\"idx\"] = range(len(data_edges))\r\n \r\n subset = gpd.GeoDataFrame({\"geometry\": [geometry]})\r\n subset = data_edges.iloc[sorted(gpd.sjoin(subset, data_edges,how='left', op=\"intersects\").idx.values)]\r\n return subset\r\n\r\n\r\ndef main_random_location(data_edges, pos_cnt, move_data, where):\r\n random_locations = []\r\n\r\n for i in tqdm(range(len(pos_cnt))):\r\n subset = generate_subset(pos_cnt.iloc[i][f\"{where}_geometry\"], data_edges)\r\n random_location = Generate_random_location(subset, pos_cnt.iloc[i].cnt)\r\n random_locations.append(random_location)\r\n pos_cnt[f\"{where}_random_location\"] = random_locations\r\n \r\n node_mask_dict = dict()\r\n for i in range(len(pos_cnt)):\r\n mask = np.where(np.array(move_data[f\"{where}pos\"]) == pos_cnt.iloc[i][f\"{where}pos\"])[0].tolist()\r\n nodes = pos_cnt.iloc[i][f\"{where}_random_location\"]\r\n for m,n in zip(mask,nodes):\r\n node_mask_dict[m] = n\r\n \r\n return pos_cnt, node_mask_dict\r\n\r\ndef add_ps_location(disabled_data, hjd_20180401 = hjd_20180401, mode = \"basic\"):\r\n #출발지, 도착지 리스트로 정의\r\n if mode == \"basic\":\r\n start_location = list(map(lambda data: generate_places_name(disabled_data.iloc[data][\"startpos1\"], disabled_data.iloc[data][\"startpos2\"]), range(len(disabled_data))))\r\n end_location = list(map(lambda data: generate_places_name(disabled_data.iloc[data][\"endpos1\"], disabled_data.iloc[data][\"endpos2\"]), range(len(disabled_data))))\r\n\r\n disabled_data[\"startpos\"] = [i.replace(\".\",\"·\") if \".\" in i else i for i in start_location]\r\n disabled_data[\"endpos\"] = [i.replace(\".\",\"·\") if \".\" in i else i for i in end_location]\r\n elif mode == \"fake\":\r\n pass\r\n start_result = get_location_bjd_geometry_and_admname(disabled_data[\"startpos\"], hjd_20180401)\r\n end_result = get_location_bjd_geometry_and_admname(disabled_data[\"endpos\"], hjd_20180401)\r\n\r\n disabled_data[\"start_geometry\"] = start_result[0]\r\n disabled_data[\"end_geometry\"] = end_result[0]\r\n disabled_data[\"adm_cn_start\"] = start_result[1]\r\n disabled_data[\"adm_cn_end\"] = end_result[1]\r\n \r\n startpos_cnt = generate_location_cnt_df(disabled_data,\"start\")\r\n endpos_cnt = generate_location_cnt_df(disabled_data,\"end\")\r\n\r\n startpos_cnt, start_dict = main_random_location(edges, startpos_cnt, disabled_data, \"start\")\r\n endpos_cnt, end_dict = main_random_location(edges, endpos_cnt, disabled_data, \"end\")\r\n\r\n disabled_data[\"start_point\"] = [start_dict[i] for i in range(len(disabled_data))]\r\n disabled_data[\"end_point\"] = [end_dict[i] for i in range(len(disabled_data))]\r\n try: \r\n disabled_data = disabled_data[[\"no\", \"cartype\", \"settime_date\", \"settime_time\", \"receipttime_date\", \"receipttime_time\",\r\n \"start_point\", \"end_point\", \"adm_cn_start\", \"adm_cn_end\", \"start_geometry\", \"end_geometry\"]]\r\n except:\r\n pass\r\n return disabled_data\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"HNU209/UMOS","sub_path":"module/add_location.py","file_name":"add_location.py","file_ext":"py","file_size_in_byte":8825,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"40393428634","text":"from sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.preprocessing import StandardScaler\nimport pandas as pd\nimport numpy as np\nimport random\nimport time\nfrom functools import wraps\nimport ipdb\nimport pickle\n\n\ndef timer(function):\n @wraps(function)\n def function_timer(*args, **kwargs):\n t0 = time.time()\n result = function(*args, **kwargs)\n t1 = time.time()\n print (\"Total time running function: %s with %s seconds\" %\n (function.__name__, str(t1-t0)))\n return result\n return function_timer\n\n\nclass NullEnc(BaseEstimator, TransformerMixin):\n '''\n '''\n def __init__(self, null_value, normalize, add_ori): # no *args and **kwargs\n super().__init__()\n self.null_value = null_value\n self.normalize = normalize\n self.add_ori = add_ori\n \n @timer\n def fit(self, x, y=None):\n if self.normalize:\n new_df = pd.DataFrame(index = x.index) \n null_df = x==self.null_value\n new_df['null_num'] = null_df.sum(axis=1)\n new_df['null_rate'] = null_df.sum(axis=1)/null_df.shape[1]\n self.SS = StandardScaler()\n self.SS.fit(new_df)\n return self\n \n def transform(self, x):\n x_null = pd.DataFrame(index = x.index) \n null_df = x==self.null_value\n x_null['null_num'] = null_df.sum(axis=1)\n x_null['null_rate'] = null_df.sum(axis=1)/null_df.shape[1]\n if self.normalize:\n x_null = pd.DataFrame(self.SS.transform(x_null), index=x.index, columns=['null_num','null_rate'])\n \n if self.add_ori:\n x_null = x_null.join(x)\n return x_null\n \nif __name__ == '__main__':\n data_path = './data/'\n # test.csv train.csv train_target.csv\n tra_x = pd.read_csv(data_path + '/train.csv')\n tra_y = pd.read_csv(data_path + '/train_target.csv')\n tes_x = pd.read_csv(data_path + '/test.csv')\n final = tra_x.merge(tra_y,on='id')\n final['certValidStop'] = final.certValidStop.astype(int)\n final.fillna(-999,inplace=True)\n\n file = open('/data-0/qibo/pickle_files/cv_idx_dic.pickle', 'rb')\n idx_dic = pickle.load(file)\n tra_id, val_id = idx_dic['cv_0']['train_idx'], idx_dic['cv_0']['valid_idx']\n Train = final.iloc[tra_id,:].set_index(keys='id')\n Valid = final.iloc[val_id,:].set_index(keys='id')\n tra_x, tra_y = Train.drop('target', axis=1), Train.target\n val_x, val_y = Valid.drop('target', axis=1), Valid.target\n NE = NullEnc(null_value=-999, normalize=True, add_ori=False)\n NE.fit(tra_x)\n tra_rc = NE.transform(tra_x)\n val_rc = NE.transform(val_x)","repo_name":"brakeman/general_pro","sub_path":"to_be_transfor/auto_feat/Piplines/NullEnc_test.py","file_name":"NullEnc_test.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19251473273","text":"# coding=utf-8\nfrom __future__ import absolute_import\nimport os\nimport json\n\nimport octoprint.plugin\nfrom octoprint.printer.profile import PrinterProfileManager\nfrom octoprint_OctoPower.plugmanager import PlugManager\n\nfrom octoprint.events import Events\n\nfrom threading import Timer\n\nclass OctopowerPlugin(octoprint.plugin.StartupPlugin,\n\t\t\t\t\t\toctoprint.plugin.TemplatePlugin,\n\t\t\t\t\t\toctoprint.plugin.SettingsPlugin,\n\t\t\t\t\t\toctoprint.plugin.EventHandlerPlugin,\n\t\t\t\t\t\toctoprint.plugin.SimpleApiPlugin):\n\n\t__plugManager = PlugManager()\n\n\t__powerOffDelayTimer = None\n\n\tdef get_api_commands(self):\n\t\treturn dict(\n\t\t\ton=[\"printerProfile\"],\n\t\t\toff=[\"printerProfile\"]\n\t\t\t)\n\n\tdef on_api_command(self, command, data):\n\t\tif command == \"on\":\n\t\t\tself._logger.info(\"Turning on \" + data['printerProfile'])\n\n\t\t\tplug = self.__getPlugFromProfile(data['printerProfile'])\n\t\t\t\t\t\t\n\t\t\tif plug != None:\n\t\t\t\tplug.on()\n\t\telif command == \"off\":\n\t\t\tself._logger.info(\"Turning off \" + data['printerProfile'])\n\n\t\t\tplug = self.__getPlugFromProfile(data['printerProfile'])\n\t\t\t\n\t\t\tif plug != None:\n\t\t\t\tplug.off()\n\n\tdef get_settings_defaults(self):\n\t\treturn dict(profiles=dict())\n\n\tdef get_template_configs(self):\n\t\treturn [\n\t\t\tdict(type=\"settings\", custom_bindings=False)\n\t\t]\n\n\tdef get_template_vars(self):\n\t\tprofiles = self._printer_profile_manager.get_all()\n\t\tprofileNames = []\n\t\tfor k in profiles.keys():\n\t\t\tprofileNames.append(profiles[k]['name'])\n\n\t\treturn dict(plugDevices=[{'name':x.getName(), 'uuid':x.getUUID()} for x in self.__plugManager.discoverPlugs()], profiles = profileNames)\n\n\tdef on_settings_save(self, data):\n\t\toctoprint.plugin.SettingsPlugin.on_settings_save(self, data)\n\t\tself._logger.info(\"Settings: \" + json.dumps(data))\n\n\tdef __getCurrentPlug(self):\n\t\t\tcurProfile = self._printer_profile_manager.get_current_or_default()\n\n\t\t\treturn self.__getPlugFromProfile(curProfile['name'])\n\n\tdef __getPlugFromProfile(self, profileName):\n\t\t\tprofiles = self._settings.get(['profiles'])\n\t\t\tif profileName not in profiles:\n\t\t\t\treturn None\n\t\t\t\n\t\t\tplugUUID = profiles[profileName]\n\n\t\t\tif plugUUID != \"none\":\n\t\t\t\tplug = self.__plugManager.findCachedPlug(plugUUID)\n\t\t\telse:\n\t\t\t\treturn None\n\n\t\t\treturn plug\n\n\tdef on_event(self, event, payload):\n\t\tplug = self.__getCurrentPlug()\n\n\t\tif plug == None:\n\t\t\treturn\n\n\t\tif event == Events.POWER_OFF:\n\t\t\t#Wait a few seconds before powering off\n\t\t\t__powerOffDelayTimer = Timer(5.0, lambda : plug.off())\n\t\t\t__powerOffDelayTimer.start()\n\t\telif event == Events.POWER_ON:\n\t\t\tplug.on()\n\n\t\tif event in (Events.PRINT_DONE, Events.PRINT_FAILED, Events.PRINT_CANCELLED):\n\t\t\tself._logger.info(\"Turning {} off\".format(plug.getName()))\n\n\t\t\tself._printer.commands(\"M81\")\n\n\t\t\t\n\t\telif event in (Events.PRINT_STARTED, ):\n\t\t\tself._printer.commands(\"M80\")\n\t\t\tplug.on()\n\n\t##~~ Softwareupdate hook\n\n\tdef get_update_information(self):\n\t\t# Define the configuration for your plugin to use with the Software Update\n\t\t# Plugin here. See https://docs.octoprint.org/en/master/bundledplugins/softwareupdate.html\n\t\t# for details.\n\t\treturn dict(\n\t\t\tOctoPower=dict(\n\t\t\t\tdisplayName=\"Octopower Plugin\",\n\t\t\t\tdisplayVersion=self._plugin_version,\n\n\t\t\t\t# version check: github repository\n\t\t\t\ttype=\"github_release\",\n\t\t\t\tuser=\"SeanReg\",\n\t\t\t\trepo=\"OctoPrint-Octopower\",\n\t\t\t\tcurrent=self._plugin_version,\n\n\t\t\t\t# update method: pip\n\t\t\t\tpip=\"https://github.com/SeanReg/OctoPrint-Octopower/archive/{target_version}.zip\"\n\t\t\t)\n\t\t)\n\n\n# If you want your plugin to be registered within OctoPrint under a different name than what you defined in setup.py\n# (\"OctoPrint-PluginSkeleton\"), you may define that here. Same goes for the other metadata derived from setup.py that\n# can be overwritten via __plugin_xyz__ control properties. See the documentation for that.\n__plugin_name__ = \"Octopower Plugin\"\n\n# Starting with OctoPrint 1.4.0 OctoPrint will also support to run under Python 3 in addition to the deprecated\n# Python 2. New plugins should make sure to run under both versions for now. Uncomment one of the following\n# compatibility flags according to what Python versions your plugin supports!\n#__plugin_pythoncompat__ = \">=2.7,<3\" # only python 2\n#__plugin_pythoncompat__ = \">=3,<4\" # only python 3\n__plugin_pythoncompat__ = \">=2.7,<4\" # python 2 and 3\n\ndef __plugin_load__():\n\tglobal __plugin_implementation__\n\t__plugin_implementation__ = OctopowerPlugin()\n\n\tglobal __plugin_hooks__\n\t__plugin_hooks__ = {\n\t\t\"octoprint.plugin.softwareupdate.check_config\": __plugin_implementation__.get_update_information\n\t}\n\n","repo_name":"SeanReg/OctoPrint-Octopower","sub_path":"octoprint_OctoPower/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23523327774","text":"import math\n\nwith open(\"input-3.txt\") as f:\n content = [i.strip() for i in f.readlines()]\n\n# print(content)\n\nwidth = len(content[0])\n\n\n\nh_moves_array = [1, 3, 5, 7, 1]\nv_moves_array = [1, 1, 1, 1, 2]\ntree_array = []\n\nfor (index, h) in enumerate(h_moves_array):\n h_index = 0\n v_index = 0\n print(h)\n print(index)\n h_moves = h\n v_moves = v_moves_array[index]\n\n trees = 0\n\n while (v_index < len(content)):\n if(content[v_index][h_index] == '#'):\n trees += 1\n h_index = (h_index + h_moves) % width\n v_index += v_moves\n\n\n print(trees)\n tree_array.append(trees)\nprint(len(content))\nprint(width)\n\nprint(tree_array)\n\nproduct = math.prod(tree_array)\nprint(product)","repo_name":"iradkaplan/AoC2020","sub_path":"day3/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21057162570","text":"import math\nl=[]\nsym=[]\nw=[]\nf=0\nfil = open('edge.txt','r')\nfor n in fil:\n if(f==0):\n \n for i in range(int(n)):\n l.append([])\n sym.append([])\n l[i].append(i)\n f=1 \n else:\n r=[int(x) for x in n.split()]\n w.append([r[0],r[1]])\n l[r[0]].append(r[1])\n l[r[1]].append(r[0])\n \n \n\nfil.close() \n \n#print(l)\n#print(w)\nfor i in range(len(w)):\n a=len(list(set(l[w[i][0]]) & set(l[w[i][1]])))\n b=len(l[w[i][0]])\n f=len(l[w[i][1]])\n c=a/(math.sqrt(b*f))\n w[i].append(c)\n sym[w[i][0]].append([w[i][1],c])\n sym[w[i][1]].append([w[i][0],c])\ncomm=[]\nw=sorted(w, key=lambda x:x[2],reverse=True)\n#print(w)\n#print(sym)\nfor i in range(len(l)):\n comm.append(-1)\ng=0\n\nfor i in range(len(w)):\n a=w[i][0]\n b=w[i][1]\n \n if(comm[a]>-1 and comm[b]>-1):\n sym[a]=[subl for subl in sym[a] if subl[0] != b]\n sym[b]=[subl for subl in sym[b] if subl[0] != a]\n continue\n c=max(sym[a], key=lambda x: x[1])\n d=max(sym[b], key=lambda x: x[1])\n #print(a,b,c,d)\n if(c[1]==d[1] ):\n if(comm[a]>-1):\n comm[b]=comm[a]\n\t \n if(comm[b]>-1):\n comm[a]=comm[b]\n\t \n else:\n comm[a]=g \n comm[b]=g \n g+=1 \n\t \n sym[a]=[subl for subl in sym[a] if subl[0] != b]\n sym[b]=[subl for subl in sym[b] if subl[0] != a]\n\n \n \nfor i in range(len(l)):\n if(comm[i]==-1):\n comm[i]=g \n g+=1 \n \ncommunity_list=[]\nfor i in range(g): \n community_list.append([])\nfor i in range(len(l)):\n community_list[comm[i]].append(i)\nprint(community_list) \n \n \n \n \n \n \n\n \n \n","repo_name":"Pumawat/Explo","sub_path":"num.py","file_name":"num.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"31542212286","text":"# 라이브러리 추가\nimport requests\nfrom urllib.parse import urlparse\n\n# 검색값과 검색주소를 가져와서 카카오 map url에 접속하여 위도 경도를 불러올 함수\ndef make_info_list(title,address):\n # 카카오 map의 url에 접속하여 주소 검색\n url = \"https://dapi.kakao.com/v2/local/search/address.json?&query=\" + address\n # 검색 결과를 result에 저장을 하고 json 포멧으로 변환\n result = requests.get(urlparse(url).geturl(), headers={\"Authorization\":\"----------------------------------------------------\"})\n print(result)\n json_obj = result.json()\n print(json_obj)\n # 검색 결과 정렬\n # 검색 결과를 저장 할 빈 리스트를 만들어 놓는다.\n list = []\n\n # 제이슨 포멧에 정리되어있는 주소와 위도, 경도를 list에 저장\n for document in json_obj['documents']:\n val = [title, document['address_name'], float(document['y']), float(document['x'])]\n list.append(val)\n return list\n\n# 위에서 만들어진 리스트를 이용하여 위도, 경도 값만 리턴하는 함수\ndef find_xy(list):\n x = list[0][3]\n y = list[0][2]\n return x,y\n\n# 활용 예시\nif __name__ == '__main__':\n address=\"서울특별시 강남구 테헤란로 212\"\n temp_list = make_info_list(\"멀티캠퍼스 역삼\",address)\n print(temp_list,\"테에에에엥엠프리스트\")\n\n x,y = find_xy(temp_list)\n print(\"멀티캠퍼스역삼의 위도는 \",y,\" 경도는 \",x,\" 이다.\")\n # 위도는 37.5012767241426 경도는 127.039600248343 이다.\n","repo_name":"ktb5891/JJinmak","sub_path":"project1/Function/make_info_list.py","file_name":"make_info_list.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74358107686","text":"from django.contrib import admin\nfrom .models import Post, Comment, Tag\n\nfrom django.utils.safestring import mark_safe\n\n# Register your models here.\n# admin.site.register(Post)\n\n\n# #등록법 1\n# admin.site.register(Post) @model만 등록하는 방법\n#\n# #등록법 2\n# class PostAdmin(admin, ModelAdmin):\n# list_display= ['id','title','content'] #해당 어드민 페이지에서 값을 보여주고 싶을때\n# admin.site.register(Post, PostAdmin) @model + 화면에 보여주고 싶은 클래스 같이 등록\n#\n# #등록법 3 : 장식자 형태로 지원\n@admin.register(Post)\nclass PostAdmin(admin.ModelAdmin):\n list_display = ['id','title','status','content_size','feeling','updated_at']\n list_display_links = ['title']\n list_editale = ['title']\n list_per_page = 100\n\n actions = ['delete_selected_post','make_published','make_Draft','make_withdraw','geonil_good','geonil_sad','geonil_bad'] #d여기에 등록 2018-06-28\n\n def content_size(self, post):\n return mark_safe('{}글자'.format(len(post.content)))\n content_size.short_description = \"글자수\"\n # content_size.allow_tags = True\n\n\n # geonil.allow_tags = True --->mark_safe 를 사용한 방법을 권장합니다.\n\n #여기서의 쿼리셋은 선택한 현재의 row 값만을 많한다.\n def delete_selected_post(self, request, queryset):\n updated_count = queryset.delete()\n self.message_user(request, '{}건 삭제'.format(updated_count[0]))\n delete_selected_post.short_description =\"삭제\"\n\n def make_published(self, request, queryset): #admin에서 목록에 해당하는 작업을 한번에 실행 처리\n updated_count = queryset.update(status='p')\n self.message_user(request, '{}건 published'.format(updated_count))\n make_published.short_description = '지정 포스팅을 Published상태로 변경' # 함수의 이름으로 적용된 곳에 설명으로적용\n\n def make_Draft(self, request, queryset): #admin에서 목록에 해당하는 작업을 한번에 실행 처리\n updated_count = queryset.update(status='d')\n self.message_user(request, '{}건 Draft'.format(updated_count))\n make_Draft.short_description = '지정 포스팅을 Draft 상태로 변경'\n\n def make_withdraw(self, request, queryset): #admin에서 목록에 해당하는 작업을 한번에 실행 처리\n updated_count = queryset.update(status='w')\n self.message_user(request, '{}건 Withdraw'.format(updated_count))\n make_withdraw.short_description = '지정 포스팅을 Withdraw 상태로 변경'\n\n def geonil_good(self, request, qeuryset):\n updated_count = qeuryset.update(feeling='g')\n self.message_user(request, '{}건 좋음'.format(updated_count))\n geonil_good.short_description = '좋음'\n\n def geonil_sad(self, request, qeuryset):\n updated_count = qeuryset.update(feeling='s')\n self.message_user(request, '{}건 슬픔'.format(updated_count))\n geonil_sad.short_description = '슬픔'\n\n def geonil_bad(self, request, qeuryset):\n updated_count = qeuryset.update(feeling='b')\n self.message_user(request, '{}건 나쁨'.format(updated_count))\n geonil_bad.short_description = '나쁨'\n\n\n # def geonil_test(self, request, qeuryset):\n # updated_count = qeuryset.update(title='제목2')\n # self.message_user(request, '{}건 변경'.format(updated_count))\n # geonil_test.short_description = '제목변경'\n\n\n\n\n\n\n##########comments 등록\n@admin.register(Comment)\nclass CommetAdmin(admin.ModelAdmin):\n list_display = ['id','author','message','update_at']\n list_display_links = ['message']\n actions = ['delete_selected_post']\n def delete_selected_post(self, request, queryset):\n updated_count = queryset.delete()\n self.message_user(request, '{}건 삭제'.format(updated_count[0]))\n delete_selected_post.short_description =\"삭제\"\n\n\n\n\n\n@admin.register(Tag)\nclass TagAdmin(admin.ModelAdmin):\n list_display = ['name']\n \n","repo_name":"JangGeonil/MyDjango","sub_path":"blog/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11950953049","text":"from gpiozero import LED\nimport time\n# Import the ADS1x15 module.\nimport Adafruit_ADS1x15\n\n\n# Create an ADS1115 ADC (16-bit) instance.\nadc = Adafruit_ADS1x15.ADS1115()\n# relay control GPIO\nrelay = LED(17)\n# See table 3 in the ADS1015/ADS1115 datasheet for more info on gain.\nGAIN = 1\nAIN = 2\nAIN_LOW = 11100\nAIN_HIGH = 25750\n\nbatteryEmpty = False\n\nprint('Reading ADS1x15 values, press Ctrl-C to quit...')\n\n# Default relay state: grid:\nrelay.off()\n\nwhile True:\n\t\n\tbatteryVoltage = adc.read_adc(AIN, gain=GAIN)\n\t\n\tif (batteryVoltage < AIN_LOW):\n\t\tif batteryEmpty:\n\t\t\tprint('Battery discharged. Running on grid supply.')\n\t\telse:\t\t\n\t\t\tprint('Battery is getting empty. Switching to grid supply.')\n\t\t\tbatteryEmpty = True\n\t\t\trelay.off()\n\t\n\telif (batteryVoltage > AIN_HIGH):\n\t\tif batteryEmpty:\t\t\t\n\t\t\tprint('It seems like battery is fully charged and back on.')\n\t\t\tbatteryEmpty = False\n\t\t\trelay.on()\n\t\telse:\n\t\t\tprint('Battery charged and working normally.')\n\t\t\t\n\telse:\n\t\tprint('Battery charging ... Voltage: {}'.format(batteryVoltage))\n\t\n\ttime.sleep(1)\n","repo_name":"PeraZver/relay-control","sub_path":"relay_control.py","file_name":"relay_control.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6597568018","text":"from typing import Any, AnyStr, Dict, Union\n\nimport openvds\n\nfrom ovds_utils.exceptions import VDSMetadataException\nfrom ovds_utils.ovds import METADATATYPE_TO_OVDS_GET_FUNCTION, METADATATYPE_TO_OVDS_SET_FUNCTION, MetadataTypes\n\n\nclass MetadataValue:\n def __init__(self, value: Any, category: AnyStr, type: Union[AnyStr, MetadataTypes]) -> None:\n self.value = value\n self.category = category\n if isinstance(type, MetadataTypes):\n self._type = type\n elif str(type) not in METADATATYPE_TO_OVDS_GET_FUNCTION:\n raise VDSMetadataException(\n f\"The type {type} was not recognized among: {', '.join(METADATATYPE_TO_OVDS_GET_FUNCTION.keys())}\")\n else:\n self._type = getattr(MetadataTypes, str(type).replace(\"MetadataType.\", \"\"))\n\n def __repr__(self) -> str:\n return f\"<{self.__class__.__qualname__}(value={self.value}, category={self.category}, type={self.type})>\"\n\n @property\n def type(self):\n return self._type.value\n\n\nclass MetadataContainer(dict):\n def __init__(self, **kwargs: Dict[AnyStr, MetadataValue]) -> None:\n super().__init__()\n self.update(kwargs)\n\n def __repr__(self) -> str:\n return f\"<{self.__class__.__qualname__}({', '.join(self.keys())})>\"\n\n def get_container(self):\n container = openvds.MetadataContainer()\n for k, v in self.items():\n set_method = METADATATYPE_TO_OVDS_SET_FUNCTION[v.type]\n set_method(container, v.category, k, v.value)\n return container\n\n @staticmethod\n def get_from_layout(layout: openvds.core.VolumeDataLayout) -> openvds.core.MetadataContainer:\n metadata = {}\n for i in layout.getMetadataKeys():\n method = METADATATYPE_TO_OVDS_GET_FUNCTION[str(i.type)]\n value = method(layout, i.category, i.name)\n metadata[i.name] = MetadataValue(value, i.category, i.type)\n\n return MetadataContainer(**metadata)\n","repo_name":"micmurawski/ovds-utils","sub_path":"src/ovds_utils/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17394431672","text":"# coding: utf-8\n\nfrom .base import Base\n\nUSER = 1\nADMIN = 2\nPOWER_INFO = {\n USER: 'user',\n ADMIN: 'admin',\n}\n\n\nclass Power(Base):\n def __init__(self):\n self.id = None # ID\n self.info = None # 权限信息\n\n def load(self, **kwargs):\n Base.load(self, **kwargs)\n return self\n","repo_name":"jiesunn/file-transfer","sub_path":"app/lib/models/power.py","file_name":"power.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17214551414","text":"class Solution:\n def criticalConnections(self, n: int, connections: List[List[int]]) -> List[List[int]]:\n self.visited = [False] * n\n self.index = [-1] * n\n self.curIndex = 0\n self.lowIndex = [-1] * n\n self.graph = collections.defaultdict(list)\n for v1, v2 in connections:\n self.graph[v1].append(v2)\n self.graph[v2].append(v1)\n self.critical = []\n self.strongConnect(0)\n\n return self.critical\n\n def strongConnect(self, i, prev=-1):\n self.visited[i] = True\n self.lowIndex[i] = self.index[i] = self.curIndex\n self.curIndex += 1\n for to in self.graph[i]:\n if to == prev: continue\n if not self.visited[to]:\n self.strongConnect(to, i)\n self.lowIndex[i] = min(self.lowIndex[i], self.lowIndex[to])\n if self.lowIndex[to] > self.index[i]:\n self.critical.append((i, to))\n","repo_name":"yunkaiwang/LeetCodeSol","sub_path":"dailyChallenge/Apr2021/Apr_24.py","file_name":"Apr_24.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22316373093","text":"from picamera import PiCamera\nfrom threading import Thread\nfrom time import sleep\nimport numpy as np\nfrom camera_analysis import analyse_for_colours, get_all_detections\n\nclass camera():\n def __init__(self):\n self.camera = PiCamera()\n self.camera.start_preview()\n sleep(0.1)\n self.camera.resolution = (224, 176)\n self.camera.framerate = 24\n # self.picture = np.empty((176, 224, 3), dtype=np.uint8)\n self.result = [[0 for j in range(5)] for i in range(5)]\n camera_thread = Thread(target=self.camera_sensing)\n camera_thread_daemon = True\n camera_thread.start()\n\n def camera_sensing(self):\n while True:\n try:\n temp_img = np.empty((176, 224, 3), dtype=np.uint8)\n self.camera.capture(temp_img, 'rgb')\n temp_img = np.flip(np.flip(temp_img, 0),1)\n colour_masks = analyse_for_colours(temp_img, 3)\n temp_img = get_all_detections(colour_masks, bins=5, tr=0.01)\n self.result = temp_img\n except Exception as e:\n print(e)\n sleep(2)\n pass\n\n def __del__(self):\n self.camera.stop_preview()\n self.camera.close()\n\n\nif __name__ == \"__main__\":\n a = camera()\n sleep(0.1)\n import matplotlib.pyplot as plt\n from camera_analysis import analyse_for_colours\n\n picture = a.take_picture()\n masks = analyse_for_colours(picture)\n mask = np.concatenate(masks, axis=1)\n im = plt.imshow(picture)\n plt.show()\n\n while True:\n print('upd')\n picture = a.take_picture()\n masks = analyse_for_colours(picture)\n mask = np.concatenate(masks, axis=1)\n im.set_data(mask)\n\n plt.ioff() # due to infinite loop, this gets never called.\n plt.show()\n","repo_name":"Marius-So/advanced_robotics","sub_path":"final_exam/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74254352807","text":"from __future__ import unicode_literals\nfrom ejtp.util.compat import unittest\nfrom pymads.tests.dig import dig\n\nimport requests\nimport signal\nimport time\nfrom subprocess import Popen, PIPE\n\ntry:\n from cStringIO import StringIO\nexcept:\n from io import BytesIO as StringIO\n\nPROCESS_INIT_TIME = 0.75\nTOM_IDENT_JSON = {\n '[\"local\",null,\"tom\"]':{\n 'name': 'tom@example.org',\n 'encryptor': ['rotate', 5],\n 'location': ['local', None, 'tom']\n }\n}\n\nclass ExternalScriptError(Exception): pass\n\nclass ScriptTester(object):\n\n def __init__(self, path, argv):\n self.path = path\n self.argv = [self.path] + list(argv)\n self.io_stdin = StringIO()\n self.io_stdout = StringIO()\n self.io_stderr = StringIO()\n self.io_output = StringIO()\n\n self.returncode = None\n\n def write(self, input):\n self.io_stdin.write(input)\n return self.process.stdin.write(input)\n\n def read(self):\n out = self.process.stdout.read()\n err = self.process.stderr.read()\n\n self.io_stdout.write(out)\n self.io_stderr.write(err)\n self.io_output.write(out)\n self.io_output.write(err)\n\n return (out, err)\n\n @property\n def stdin(self):\n return self.io_stdin.getvalue()\n\n @property\n def stderr(self):\n return self.io_stderr.getvalue()\n\n @property\n def stdout(self):\n return self.io_stdout.getvalue()\n\n @property\n def output(self):\n return self.io_output.getvalue()\n\n def terminate(self):\n '''\n Stop process and return after wait.\n '''\n if self.returncode != None:\n return\n if not self.process.poll():\n try:\n self.process.send_signal(signal.SIGINT)\n except:\n pass # harmless race condition\n self.returncode = self.process.wait()\n self.read()\n\n def __enter__(self):\n self.process = Popen(self.argv, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n return self\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n self.terminate()\n self.process = None\n if self.returncode and exc_value == None:\n raise ExternalScriptError(\n self.returncode,\n self.io_output.getvalue()\n )\n\nclass TestMainScript(unittest.TestCase):\n path = 'djdns'\n\n def test_runs_at_all(self):\n args = [\n '-d', 'diskdemo',\n '-P', '4444',\n ]\n with ScriptTester(self.path, args) as p:\n time.sleep(PROCESS_INIT_TIME)\n p.terminate()\n self.assertEqual(\n p.output,\n b\"STOPPING SERVER \\n\"+\n b\"STOPPING SERVER \\n\"\n )\n self.assertEqual(p.returncode, 0)\n\n def test_resolution(self):\n port = 4444\n args = [\n '-d', 'diskdemo',\n '-P', str(port),\n ]\n with ScriptTester(self.path, args) as p:\n host_data = dig('in.root.demo', 'localhost', port)\n expected = \"ANSWER SECTION:\\n%s.\\t\\t1800\\tIN\\tA\\t%s\\n\\n\" % (\n 'in.root.demo',\n '1.2.3.4',\n )\n self.assertIn(expected, host_data)\n\n def test_has_ident_server(self):\n args = [\n '-d', 'diskdemo',\n '-P', '4444',\n ]\n with ScriptTester(self.path, args) as p:\n time.sleep(PROCESS_INIT_TIME)\n r = requests.get('http://localhost:16232/idents/tom@example.org')\n self.assertEqual(r.json(), TOM_IDENT_JSON)\n self.assertEqual(r.status_code, 200)\n\nclass TestIdentServer(unittest.TestCase):\n path = 'python'\n\n def test_get_user(self):\n args = ['djdns/ident_server.py']\n with ScriptTester(self.path, args) as p:\n time.sleep(PROCESS_INIT_TIME)\n r = requests.get('http://localhost:16232/idents/tom@example.org')\n self.assertEqual(r.json(), TOM_IDENT_JSON)\n self.assertEqual(r.status_code, 200)\n\n r = requests.get('http://localhost:16232/idents/nobody@example.org')\n self.assertEqual(r.json(), {})\n self.assertEqual(r.status_code, 404)\n","repo_name":"campadrenalin/python-djdns","sub_path":"djdns/tests/test_script.py","file_name":"test_script.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"53"} +{"seq_id":"34325264872","text":"from collections.abc import Iterable\nfrom game import GameRoom\nfrom bots import RandomSearchBot, NNBot\nimport pandas as pd\nimport csv\nfrom matplotlib import pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.python.keras.models import Sequential\nfrom tensorflow.python.keras.layers import Dense\nfrom tensorflow.python.keras.optimizers import adam_v2, adadelta_v2\nfrom tensorflow.python.keras.losses import mae\nfrom concurrent.futures import ProcessPoolExecutor\nfrom tensorflow.python.keras.models import load_model\n\ndef play_game(num_bots: int) -> None:\n settings = {\n \"max_players\": 7,\n \"bot_type\": None,\n \"step_size\": 1,\n \"reverse\": True\n }\n \n room = GameRoom('bruh', settings)\n\n for i in range(num_bots):\n bot = RandomSearchBot(f\"RSBot{i + 1}\", i + 1, 500, 13)\n room.add_player(bot)\n\n room.start_game()\n\n while room.engine.state.phase != 'game over':\n while room.engine.state.player_turn != 999:\n current_player = room.engine.state.get_player_from_turn()\n move = current_player.get_move(room.engine.get_player_perspective(current_player, room.engine.state))\n room.do_player_move(move)\n\n room.do_player_move(None)\n\ndef build_and_compile_regression_model():\n model = Sequential()\n model.add(Dense(550, input_shape=(377,), activation='relu'))\n model.add(Dense(550, activation='relu'))\n model.add(Dense(225, activation=\"relu\"))\n model.add(Dense(1))\n\n model.compile(loss=mae, optimizer=adam_v2.Adam(learning_rate=0.001))\n return model\n\ndef train_regression_model(model):\n x = pd.read_csv(\"src/static/data.csv\", header=None)\n y = x.pop(x.columns[-1])\n\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=112358)\n history = model.fit(x_train, y_train, epochs=30, batch_size=256, validation_split=0.1)\n\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title(f'Loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n plt.show()\n\n # results = model.evaluate(x_test, y_test)\n # print(f\"Loss: {results}\")\n\n model.save(\"src/static/model3\")\n\ndef build_and_compile_classification_model():\n model = Sequential()\n model.add(Dense(340, input_shape=(683,), activation='relu'))\n model.add(Dense(17, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer=adam_v2.Adam(learning_rate=0.01), metrics=['accuracy'])\n return model\n\ndef train_classification_model(model):\n df = pd.read_csv(\"src/static/data2.csv\", header=None)\n x = df.iloc[:, :683]\n y = df.iloc[:, -17:]\n\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1)\n\n history = model.fit(x_train, y_train, epochs=50, batch_size=512, validation_split=0.1)\n\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title(f'Loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n plt.savefig(\"loss_graph.png\")\n plt.clf()\n\n plt.plot(history.history['accuracy'])\n plt.plot(history.history['val_accuracy'])\n plt.title(f'Accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n plt.savefig(\"accuracy_graph.png\")\n plt.clf()\n\ndef task(arg):\n count = 0\n for i in range(arg):\n count += 1\n for i in range(3, 7):\n if i == 3:\n print(f\"{count}: {i}\")\n play_game(i)\n if i == 4:\n print(f\"{count}: {i}\")\n play_game(i)\n if i == 5:\n print(f\"{count}: {i}\")\n play_game(i)\n if i == 6:\n print(f\"{count}: {i}\")\n play_game(i)\n\nif __name__ == '__main__':\n # with ProcessPoolExecutor(6) as exe:\n # exe.map(task, range(1, 600))\n\n # model = build_and_compile_regression_model()\n # train_regression_model(model)\n\n x = pd.read_csv(\"src/static/data3.csv\", header=None)\n y = x.pop(x.columns[-1])\n\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=112358)\n \n model = load_model('src/static/model')\n\n results = model.evaluate(x_test, y_test)\n print(f\"Model Loss: {results}\")\n\n","repo_name":"conorsegeth/BoerenbridgeWeb","sub_path":"src/ml.py","file_name":"ml.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13033314901","text":"from appium.webdriver import WebElement\nfrom appium.webdriver.webdriver import WebDriver\nfrom selenium.webdriver.common.by import By\n\nimport logging\n\n\nclass BasePage:\n logging.basicConfig(level=logging.INFO)\n # 弹框处理的名单\n _black_ist = [\n (By.XPATH, '//*[@text=\"确认\"]'),\n (By.XPATH, '//*[@text=\"下次再说\"]'),\n (By.XPATH, '//*[@text=\"确定\"]'),\n ]\n\n # 最大查找次数设置为三次\n _max_num = 3\n _error_num = 0\n\n def __init__(self, driver: WebDriver = None):\n self._driver = driver\n\n def find(self, locator, value: str = None):\n '''\n 查找元素的时候弹框���理\n :param locator:\n :param value:\n :return:\n '''\n\n logging.info(locator)\n logging.info(value)\n\n try:\n element: WebElement = self._driver.find_element(*locator) if isinstance(locator, tuple) else self._driver.find_element(locator, value)\n\n # if isinstance(locator, tuple):\n # element: WebElement = self._driver.find_element(*locator)\n # else:\n # element: WebElement = self._driver.find_element(locator, value)\n\n # 找到元素之后,_error_num归零\n self._error_num = 0\n\n # 元素查找完之后,再将隐式等待设置为10秒\n self._driver.implicitly_wait(10)\n return element\n except Exception as e:\n '''\n 处理弹框导致的没有找到元素的情况,最大查找次数为三次\n 做法:点击弹框、查找元素\n '''\n # 查找元素出现异常的时候将隐式等待设置成1秒,快速的处理弹框\n self._driver.implicitly_wait(1)\n\n # 判断异常处理次数\n if self._error_num > self._max_num:\n raise e\n\n self._error_num += 1\n\n # 处理弹框\n for ele in self._black_ist:\n logging.info(ele)\n ele_list = self._driver.find_elements(*ele)\n if len(ele_list) > 0:\n ele_list[0].click()\n # 处理玩弹框之后,去查找目标元素\n return self.find(locator, value)\n\n raise e\n\n def find_and_gettext(self, locator, value: str = None):\n '''\n 查找元素并获取text属性时弹框处理\n :param locator:\n :param value:\n :return:\n '''\n try:\n text = self._driver.find_element(*locator).text if isinstance(locator, tuple) else self._driver.find_element(locator, value).text\n\n self._error_num = 0\n\n self._driver.implicitly_wait(10)\n return text\n except Exception as e:\n self._driver.implicitly_wait(1)\n\n if self._error_num > self._max_num:\n raise e\n\n self._error_num += 1\n\n for ele in self._black_ist:\n ele_list = self._driver.find_elements(*ele)\n if len(ele_list) > 0:\n ele_list[0].click()\n return self.find_and_gettext(locator, value)\n\n raise e\n","repo_name":"ZhjingK/PricticeTest","sub_path":"AppTest/page/basepage.py","file_name":"basepage.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33393446118","text":"\"\"\"\nDefine the Status Type model\n\"\"\"\nfrom enum import Enum\n\n\nclass StatusType(Enum):\n INACTIVE = 0\n ACTIVE = 1\n NEW = 2\n PENDING = 3\n IN_PROGRESS = 4\n ON_HOLD = 5\n COMPLETED = 6\n FAILED = 7\n RETRY = 8\n ABORTED = 9\n CANCELED = 10\n TIMEOUT = 11\n SCHEDULED = 12\n DISCARDED = 13\n DELAYED = 14\n SKIPPED = 15\n PRIORITY = 16\n QUEUED = 17\n RESUMED = 18\n WAITING = 19\n ARCHIVED = 20\n EXPIRED = 21\n VALIDATING = 22\n VALIDATED = 23\n INVALID = 24\n PROCESSING = 25\n PROCESSED = 26\n ACKNOWLEDGED = 27\n UNACKNOWLEDGED = 28\n ASSIGNED = 29\n UNASSIGNED = 30\n ACCEPTED = 31\n REJECTED = 32\n SUBMITTED = 33\n REVOKED = 34\n SUCCEEDED = 35\n TERMINATED = 36\n BLOCKED = 37\n UNBLOCKED = 38\n EXCEPTION = 39\n RECOVERED = 40\n DELEGATED = 41\n INITIALIZED = 42\n UPLOADING = 43\n UPLOADED = 44\n DOWNLOADING = 45\n DOWNLOADED = 46\n SYNCING = 47\n SYNCED = 48\n MERGING = 49\n MERGED = 50\n VERIFYING = 51\n VERIFIED = 52\n COMMITTING = 53\n COMMITTED = 54\n REVERSING = 55\n REVERSED = 56\n BACKED_UP = 57\n RESTORING = 58\n RESTORED = 59\n CLEANING = 60\n CLEANED = 61\n RECALCULATING = 62\n RECALCULATED = 63\n SAVING = 64\n SAVED = 65\n LOADING = 66\n LOADED = 67\n ENQUEUED = 68\n DEQUEUED = 69\n RERUNNING = 70\n RERUN = 71\n RESCHEDULING = 72\n RESCHEDULED = 73\n REINITIALIZING = 74\n REINITIALIZED = 75\n DELETING = 76\n DELETED = 77\n REPLACING = 78\n REPLACED = 79\n EVALUATING = 80\n EVALUATED = 80\n CONFIGURING = 81\n CONFIGURED = 82\n IMPORTING = 83\n IMPORTED = 84\n","repo_name":"caiola/vinhos.com","sub_path":"backend/api/models/status_type.py","file_name":"status_type.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71677291049","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport sys, tty, termios\nfrom std_srvs.srv import Trigger, TriggerRequest\nfrom sensor_msgs.msg import JointState\nfrom control_msgs.msg import FollowJointTrajectoryGoal\nfrom trajectory_msgs.msg import JointTrajectoryPoint\nfrom std_msgs.msg import String\nimport rospy\n# This allows Ctrl-C and related keyboard commands to still function.\n# It detects escape codes in order to recognize arrow keys. It also\n# has buffer flushing to avoid repeated commands. This is\n# blocking code.\n\ndef getch():\n stdin_fd = 0\n # \"Return a list containing the tty attributes for file descriptor\n # fd, as follows: [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]\"\n # from https://docs.python.org/2/library/termios.html\n original_tty_attributes = termios.tcgetattr(stdin_fd)\n new_tty_attributes = original_tty_attributes[:]\n # Change the lflag (local modes) to turn off canonical mode\n new_tty_attributes[3] &= ~termios.ICANON\n try:\n termios.tcsetattr(stdin_fd, termios.TCSAFLUSH, new_tty_attributes)\n ch1 = sys.stdin.read(1)\n if ch1 == '\\x1b':\n # special key pressed\n ch2 = sys.stdin.read(1)\n ch3 = sys.stdin.read(1)\n ch = ch1 + ch2 + ch3\n else:\n # not a special key\n ch = ch1\n finally:\n termios.tcsetattr(stdin_fd, termios.TCSAFLUSH, original_tty_attributes)\n return ch\n\ndef talker():\n stretch_keyboard_pub = rospy.Publisher('/stretch/keyboard_pub',String, queue_size=10)\n rospy.init_node('stretch_talker')\n rate = rospy.Rate(10)\n rospy.loginfo('successfully init node:{}'.format(\"stretch_talker\"))\n while not rospy.is_shutdown():\n c = getch()\n stretch_keyboard_pub.publish(c)\n rate.sleep()\n\nif __name__ == '__main__':\n try:\n talker()\n except rospy.ROSInterruptException:\n pass\n\n","repo_name":"jiaweili-hammer/stretch_project_li","sub_path":"stretch_basic_nodes/src/keyboard_remote.py","file_name":"keyboard_remote.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37900737487","text":"from flask import g,current_app,jsonify,request\nfrom flask import session\nfrom ihome.libs.utils.image_storage import storage\nfrom ihome.libs.utils.response_code import RET\n\nfrom ihome.libs.utils.utils import login_required\nfrom ihome.models.models import User\nfrom ihome.registers import db\nfrom . import api\n\n\n@api.route(\"/users/avatar\",methods=[\"POST\"])\n@login_required\ndef set_user_avatar():\n \"\"\"\n 设置用户头像\n :return:\n \"\"\"\n user_id =g.user_id\n\n image_file=request.files.get(\"avatar\")\n\n if image_file is None:\n return jsonify(errno=RET.PARAMERR, errmsg=\"未上传图片\")\n\n image_data = image_file.read()\n\n # 调用七牛上传图片, 返回文件名\n try:\n file_name = storage(image_data)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.THIRDERR, errmsg=\"上传图片失败\")\n\n # 保存文件名到数据库中\n try:\n User.query.filter_by(id=user_id).update({\"avatar_url\": file_name})\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"保存图片信息失败\")\n\n # 最好返回保存的文件信息\n avatar_url=current_app.config[\"QINIU_URL_DOMAIN\"]+file_name\n\n return jsonify(errno=RET.OK,errmsg=\"保存成功\",data={\"avatar_url\":avatar_url})\n\n\n@api.route(\"/user\")\n@login_required\ndef get_user_profile():\n \"\"\"\n 用户名获取\n :return:\n \"\"\"\n user_id = g.user_id\n try:\n user=User.query.get(user_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"获取用户信息失败\")\n\n if user is None:\n return jsonify(errno=RET.NODATA, errmsg=\"无效操作\")\n\n return jsonify(errno=RET.OK,errmsg=\"OK\", data = user.to_dict())\n\n\n@api.route(\"/users/name\",methods=[\"PUT\"])\n@login_required\ndef change_user_name():\n \"\"\"\n 用户名修改\n :return:\n \"\"\"\n\n req_json=request.get_json()\n if not req_json:\n return jsonify(errno=RET.PARAMERR,errmsg=\"名字不能为空\")\n\n new_name=req_json.get(\"name\")\n\n # 判断用户名是否重复,如果重复则出错,可以细化错误类型\n try:\n User.query.filter_by(id=g.user_id).update({\"name\":new_name})\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR,errmsg=\"设置用户名出错\")\n\n # 因为session中保存的有name值,所以也要更新一下\n session[\"name\"]=new_name\n\n # 更新后最好返回新的信息\n return jsonify(errno=RET.OK,errmsg=\"OK\",data={\"name\":new_name})\n\n\n# 实名认证实现\n@api.route(\"/users/auth\")\n@login_required\ndef get_user_auth():\n \"\"\"\n 查询实名认证信息\n :return:\n \"\"\"\n try:\n user=User.query.get(g.user_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"获取用户实名信息失败\")\n\n return jsonify(errno=RET.OK,errmsg=\"OK\",data=user.auth_to_dict())\n\n\n# 返回实名认证数据\n@api.route(\"/users/auth\",methods=[\"POST\"])\n@login_required\ndef set_user_auth():\n \"\"\"\n 设置实名认证信息\n :return:\n \"\"\"\n # 获取参数\n req_data = request.get_json()\n if not req_data:\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\")\n\n real_name = req_data.get(\"real_name\") # 真实姓名\n id_card = req_data.get(\"id_card\") # 身份证号\n\n # 参数校验\n if not all([real_name, id_card]):\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\")\n\n # 保存用户的姓名与身份证号\n try:\n User.query.filter_by(id=g.user_id, real_name=None, id_card=None) \\\n .update({\"real_name\": real_name, \"id_card\": id_card})\n db.session.commit()\n except Exception as e:\n current_app.logger.error(e)\n db.session.rollback()\n return jsonify(errno=RET.DBERR, errmsg=\"保存用户实名信息失败\")\n\n return jsonify(errno=RET.OK, errmsg=\"OK\")","repo_name":"yf-web/myhouse","sub_path":"ihome/api_1_0/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":4114,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74144784169","text":"# [896] 单调数组\n\n# https://leetcode-cn.com/problems/monotonic-array/description/\n\n# * algorithms\n# * Easy (53.98%)\n# * Total Accepted: 49.4K\n# * Total Submissions: 84.5K\n# * Testcase Example: '[1,2,2,3]'\n\n# 如果数组是单调递增或单调递减的,那么它是单调的。\n\n# 如果对于所有 i <= j,A[i] <= A[j],那么数组 A 是单调递增的。 如果对于所有 i <= j,A[i]> = A[j],那么数组 A 是单调递减的。\n\n# 当给定的数组 A 是单调数组时返回 true,否则返回 false。\n\n# 示例 1:\n\n# 输入:[1,2,2,3]\n# 输出:true\n\n\n# 示例 2:\n\n# 输入:[6,5,4,4]\n# 输出:true\n\n\n# 示例 3:\n\n# 输入:[1,3,2]\n# 输出:false\n\n\n# 示例 4:\n\n# 输入:[1,2,4,5]\n# 输出:true\n\n\n# 示例 5:\n\n# 输入:[1,1,1]\n# 输出:true\n\n# 提示:\n\n# \t1 <= A.length <= 50000\n# \t-100000 <= A[i] <= 100000\n\n\nclass Solution:\n def isMonotonic0(self, a):\n n = len(a)\n if n <= 2:\n return True\n flag = None\n for i in range(1, n):\n if a[i - 1] != a[i]:\n if flag is None:\n flag = a[i - 1] > a[i]\n elif flag != (a[i - 1] > a[i]):\n return False\n return True\n\n def isMonotonic1(self, a):\n desc = True if a[0] >= a[-1] else False\n n = len(a)\n if desc:\n return all(a[i - 1] >= a[i] for i in range(1, n))\n return all(a[i - 1] <= a[i] for i in range(1, n))\n\n def isMonotonic(self, a):\n asc, desc = True, True\n i, n = 1, len(a)\n while i < n and (asc or desc):\n asc = asc and a[i - 1] <= a[i]\n desc = desc and a[i - 1] >= a[i]\n i += 1\n return asc or desc\n","repo_name":"hedeqiang/leetcode-1","sub_path":"python/896.monoic-array.py","file_name":"896.monoic-array.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20404436756","text":"from asyncio import tasks\nfrom datetime import datetime\nimport discord\nfrom discord.ext import tasks,commands\nimport os\nfrom dotenv import load_dotenv\n\nfrom get_from_caldav import CalDAV\nload_dotenv()\nTOKEN = os.getenv('TOKEN')\nCHANNELID = os.getenv('CHANNELID')\n\nclass MyClient(discord.Client):\n async def on_ready(self):\n self.caldav = CalDAV()\n self.send_schedules.start()\n async def on_message(self, message):\n # don't respond to ourselves\n if message.author == self.user:\n return\n\n if message.content == 'stop':\n self.send_schedules.cansel()\n\n\n @tasks.loop(hours=6)\n async def send_schedules(self):\n today = datetime.now()\n schedulelist = self.caldav.getSchedules(today,1)\n\n channel = self.get_channel(CHANNELID)\n await channel.send(schedulelist)\n #print(schedulelist)\n\n\nintents = discord.Intents.default()\nintents.message_content = True\nclient = MyClient(intents=intents)\n\nclient.run(TOKEN)","repo_name":"masaki12-s/discord_schedule_notification","sub_path":"post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73207980967","text":"from datetime import datetime, timedelta\n\ntry:\n print('Введите дату рождения в формате ДД/ММ/ГГ')\n hb_date = datetime.strptime(input(), \"%d/%m/%y\")\nexcept ValueError:\n print(\"Неверный формат\")\n exit()\n\ndays = 10000\nminutes = 1000000\nseconds = 100000000\n\npr_days = timedelta(days=days)\npr_minutes = timedelta(minutes=minutes)\npr_seconds = timedelta(seconds=seconds)\n\nhb_days = hb_date + pr_days\nhb_minutes = hb_date + pr_minutes\nhb_seconds = hb_date + pr_seconds\n\nprint(\"10.000 дней будет: \", hb_days)\nprint(\"1.000.000 минут будет: \", hb_minutes)\nprint(\"1.000.000.000 секунд будет: \", hb_seconds)","repo_name":"LorganMM/Labarotornie","sub_path":"laba3/zadanie4.py","file_name":"zadanie4.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32899858017","text":"def gcdIter(a, b):\n \"\"\"\n a, b: positive integers\n\n returns: a positive integer, the greatest common divisor of a & b.\n \"\"\"\n # Your code here\n if a > b:\n result = b\n else:\n result = a\n\n while (a % result) != 0 or (b % result) != 0:\n result -= 1\n\n return result\n","repo_name":"biofalopes/MITx-6.00.1x","sub_path":"Week2/gcdIter.py","file_name":"gcdIter.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31004440938","text":"#! /usr/bin/env python\n\n\"\"\"\nSimple experiment showing how to make reports for data obtained without\nLab.\n\nTo use custom results, create the file -eval/properties. It\nmust be a JSON file mapping planner run names to results (see below).\nThe run names must obviously be unique, but they're not used for the\nreports. Each value in the dictionary must itself be a dictionary with\nat least the keys \"domain\", \"problem\", \"algorithm\". In addition you need\nthe attribute names and values that you want to make reports for, e.g.\n\"coverage\", \"expansions\", \"time\".\n\n\"\"\"\n\nimport json\nfrom pathlib import Path\n\nfrom downward.reports.absolute import AbsoluteReport\nfrom lab.experiment import Experiment\n\n\nPROPERTIES = {\n \"ff-gripper-prob01.pddl\": {\n \"domain\": \"gripper\",\n \"problem\": \"prob01.pddl\",\n \"algorithm\": \"ff\",\n \"coverage\": 1,\n \"expansions\": 1234,\n },\n \"blind-gripper-prob01.pddl\": {\n \"domain\": \"gripper\",\n \"problem\": \"prob01.pddl\",\n \"algorithm\": \"blind\",\n \"coverage\": 1,\n \"expansions\": 6543,\n },\n}\n\n\ndef write_properties(eval_dir):\n eval_dir = Path(eval_dir)\n eval_dir.mkdir(parents=True, exist_ok=True)\n with open(eval_dir / \"properties\", \"w\") as f:\n json.dump(PROPERTIES, f)\n\n\nexp = Experiment()\nexp.add_report(AbsoluteReport(attributes=[\"coverage\", \"expansions\"]))\n\nwrite_properties(exp.eval_dir)\nexp.run_steps()\n","repo_name":"aibasel/lab","sub_path":"examples/report-external-results.py","file_name":"report-external-results.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"53"} +{"seq_id":"70521113769","text":"from brain import Brain\nfrom helper import *\nimport config, pygame\n\nclass Member:\n\n\tmax_energy = config.member_max_energy\n\n\tdef __init__( self ):\n\t\tself.radius \t = config.member_radius\n\t\tself.color \t = Helper.random_color()\n\n\t\tself.brain = Brain()\n\n\t\tself.x \t\t\t = None\n\t\tself.y \t\t\t = None\n\n\t\tself.energy = self.max_energy\n\t\tself.distance = 0.0\n\t\tself.relation = 0.0\n\t\tself.rotation = 0.0\n\t\tself.speed = 0.0\n\n\t\tself.left_track = None\n\t\tself.right_track = None\n\n\t\tself.close_to = None\n\t\tself.alive \t\t = True\n\t\tself.food \t\t = 0\n\t\tself.score = 0\n\t\tself.norm_score = 0.0\n\t\tself.mutations = 0\n\n\t\tself.error_count = 0\n\n\t\tself.born \t\t = pygame.time.get_ticks()\n\t\tself.died\t\t = None\n\t\tself.lifespan = 0\n\n\t\tself.generation = 0\n\t\tself.member_num = 0\n\n\tdef update_member_state( self ):\n\n\t\tif self.energy == 0 or self.energy > self.max_energy * 2:\n\t\t\tself.alive = False\n\t\t\tself.died \t = pygame.time.get_ticks()\n\t\t\tself.lifespan = self.died - self.born\n\t\telse:\n\t\t\tself.alive = True\n\n\t\tif self.alive:\n\t\t\tself.process_network()\n\t\t\tself.rotation += (self.left_track - self.right_track) / self.radius\n\t\t\tif self.rotation > (2 * math.pi): self.rotation = 0\n\t\t\telif self.rotation < 0: self.rotation = (2 * math.pi)\n\t\t\tself.speed = self.left_track + self.right_track\n\n\t\t\tif self.close_to != None:\n\t\t\t\tself.distance = Helper.get_distance(self,self.close_to)\n\t\t\t\tself.relation = Helper.get_relation_to(self,self.close_to)\n\n\t\t\tself.energy -= config.energy_loss\n\n\n\tdef update_member_position( self ):\n\n\t\tif self.alive:\n\n\t\t\tx_move = self.x + Helper.delta_x( self.rotation, self.speed )\n\t\t\ty_move = self.y + Helper.delta_y( self.rotation, self.speed )\n\n\t\t\tif x_move > config.viewport_width: x_move = 0\n\t\t\telif x_move < 0: x_move = config.viewport_width\n\n\t\t\tif y_move > config.viewport_height: y_move = 0\n\t\t\telif y_move < 0: y_move = config.viewport_height\n\n\t\t\tself.x, self.y = x_move, y_move\n\t\n\tdef process_network( self ):\n\t\tif self.error_count < 10:\n\t\t\ttry:\n\t\t\t\tself.left_track, self.right_track = self.brain.activate_network(self.get_params())\n\t\t\texcept:\n\t\t\t\tself.brain.network.sortModules()\n\t\t\t\tself.error_count += 1\n\t\t\t\tself.process_network()\n\t\telse:\n\t\t\tself.left_track, self.right_track = [0,0]\n\t\t\tself.color = [20,20,20]\n\n\tdef get_params( self ):\n\t\tenergy_input \t= Helper.make_uniform(self.energy, 1)\n\t\tdistance \t\t= Helper.make_uniform(self.distance, 2)\n\t\trelation\t\t= Helper.make_uniform(self.relation, 2)\n\t\treturn [ energy_input, distance, relation ]\n\n\tdef draw( self, display ):\n\t\tif self.alive:\n\t\t\tdx = self.x + Helper.delta_x( self.rotation, self.radius )\n\t\t\tdy = self.y + Helper.delta_y( self.rotation, self.radius )\n\t\t\t# if self.close_to != None and config.debug: pygame.draw.aaline( display, [130,130,130], (Helper.fixed(self.x), Helper.fixed(self.y)), (Helper.fixed(self.close_to.x), Helper.fixed(self.close_to.y)), 1)\n\t\t\tpygame.draw.circle( display, self.color, (Helper.fixed(self.x), Helper.fixed(self.y)), self.radius, 0)\n\t\t\tpygame.draw.circle( display, [0,0,0], (Helper.fixed(self.x), Helper.fixed(self.y)), self.radius, 1)\n\t\t\tpygame.draw.aaline( display, [0,0,0], (Helper.fixed(self.x), Helper.fixed(self.y)), (Helper.fixed(dx), Helper.fixed(dy)), 1)\n\n\t\t\tif config.debug:\n\t\t\t\tfont = pygame.font.SysFont(\"monospace\", 10)\n\n\t\t\t\tlabel = font.render(str(self.mutations), 1, [0,0,0] )\n\t\t\t\tdisplay.blit( label, (self.x - 6, self.y + 10))\n\n\t\t\t\tlabel = font.render(str(self.food), 1, [0,0,0] )\n\t\t\t\tdisplay.blit( label, (self.x, self.y + 10))\n\n","repo_name":"TaylorBenner/evo-project","sub_path":"classes/member.py","file_name":"member.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6317880542","text":"import torch\nimport torch.nn as nn\n\nclass AdaptiveNormalization(nn.Module):\n def __init__(self, filters, skip_mode=False):\n super().__init__()\n\n self.skip_mode = skip_mode\n if not skip_mode:\n self.bn = nn.BatchNorm2d(filters)\n self.phi = nn.Conv2d(1, 1, 1, 1, 0, bias=True)\n self.phi.weight.data.fill_(1.5)\n self.phi.bias.data.fill_(0)\n \n\n def forward(self, x, func, skip):\n \n \n if self.skip_mode:\n x_nm = func(x)\n out = x_nm + skip\n\n else:\n s = torch.std(skip, dim=[1,2,3], keepdim=True)\n self.s = self.phi(s)\n x_nm = self.bn(x)\n x_nm = func(x_nm)\n out = x_nm*self.s + skip\n\n return out\n ","repo_name":"diff7/QuanToaster","sub_path":"sr_models/ADN.py","file_name":"ADN.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"21170928095","text":"class Solution(object):\n def findDisappearedNumbers(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n\n if nums == []:\n return []\n else:\n nums = sorted(nums)\n complete_list = range(1,len(nums)+1)\n incomplete_list = list(set(nums))\n return list(set(complete_list) - set(incomplete_list))\n","repo_name":"kevinlacaille/LeetCode-practice-problems","sub_path":"Find_all_numbers_disappeared in_array.py","file_name":"Find_all_numbers_disappeared in_array.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"6422623804","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/9/26 14:58\n# @Author : YuChou\n# @Site :\n# @File : locusettest1.py\n# @Software: PyCharm\n\nfrom locust import HttpLocust,TaskSet,task\n\nclass test_126(TaskSet):\n @task\n def test_baidu(self):\n header = {\n \"User-Agent\": \"Mozilla/5.0 \" \n \"(Windows NT 6.1; Win64; x64) AppleWebKit/537.36 \" \n \"(KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36\"}\n r=self.client.get(\"http://10.230.2.36/\",timeout=30,headers=header)\n assert r.status_code==200\n\nclass websiteUser(HttpLocust):\n task_set = test_126\n min_wait = 3000\n max_wait = 6000\n\n\n","repo_name":"mrzhouyu/ServerPr","sub_path":"locustfile.py","file_name":"locustfile.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43055267031","text":"import tensorflow as tf\nimport logging\nimport argparse\nimport train\nimport predict\n\ndef get_parser():\n \"\"\" CLI Argument Parser.\"\"\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"mode\", type=str, help=\"train/predict\")\n parser.add_argument(\"--image\", type=str)\n parser.add_argument(\"--arch\", default=\"nn\", type=str,\n help=\"the ML model to be used.\")\n parser.add_argument(\"--batch_size\", default=1, type=int,\n help=\"size of the mini batch\")\n parser.add_argument(\"--learning_rate\", default=1E-3, type=float,\n help=\"learning rate\")\n parser.add_argument(\"--num_steps\", default=1000, type=int,\n help=\"number of steps\")\n parser.add_argument(\"--shuffle\", default=False, type=bool,\n help=\"flag to shuffle the dataset before training\")\n return parser\n\ndef main():\n\n logging.basicConfig(filename='classifier.log',\n filemode='w', level=logging.DEBUG)\n tf.logging.set_verbosity(tf.logging.INFO)\n\n logging.info(\"reading arguments\\n\")\n # Parse the CLI Arguments.\n parser = get_parser()\n args = parser.parse_args()\n\n if args.mode == 'train':\n train.run(args)\n elif args.mode == 'predict':\n predict.run(args)\n else:\n logging.error(\"Please enter the correct mode of operation.\\n\")\n\nif __name__ == '__main__':\n main()\n","repo_name":"pratikadarsh/JBM_Assignment","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33981251467","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.new, name='new'),\n path('popular/', views.popular, name='popular'),\n path('question//', views.question_details, name='question'),\n path('ask/', views.ask_question, name='ask_question'),\n path('login/', views.user_login, name='login'),\n path('signup/', views.user_signup, name='signup'),\n path('logout/', views.user_logout, name='logout'),\n\n #path('admin/', admin.site.urls), \n \n]","repo_name":"Catingblack/web","sub_path":"ask/qa/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32527615821","text":"from sqlalchemy.orm import Session, load_only, Load\nfrom sqlite_sqlalchemy import models, schemas\n\n# CRUD for Employee\n\n\n# add employee to employees table\ndef create_employee(employee: schemas.Employee, db: Session):\n db_employee = models.Employee(name=employee.name, email=employee.email, password=employee.password,\n salary=employee.salary, dept_id=employee.dept_id)\n db.add(db_employee)\n db.commit()\n db.refresh(db_employee)\n return db_employee\n\n\n# get employee by id\ndef get_employee(db: Session, employee_id: int):\n return db.query(models.Employee)\\\n .filter(models.Employee.id == employee_id)\\\n .first()\n\n\n# get employee by email\ndef get_employee_by_email(db: Session, employee_email: str):\n return db.query(models.Employee)\\\n .filter(models.Employee.email == employee_email)\\\n .first()\n\n\n# get all employees by offset and limit\ndef get_employees(db: Session, skip: int = 0, limit: int = 100):\n return db.query(models.Employee)\\\n .offset(skip)\\\n .limit(limit)\\\n .all()\n\n\n# delete employee by id\ndef delete_employee(db: Session, employee_id: int):\n affected_rows = db.query(models.Employee)\\\n .filter(models.Employee.id == employee_id)\\\n .delete()\n db.commit()\n return affected_rows\n\n\n# CRUD for EmployeeDepartment\n\n\n# create Department record\ndef create_department(db: Session, employee_department: schemas.Department):\n db_department = models.Department(dept_name=employee_department.dept_name)\n db.add(db_department)\n db.commit()\n db.refresh(db_department)\n return db_department\n\n\n# joining two tables(employees & departments)\ndef get_employee_with_dept(db: Session):\n # return db.query(models.Employee, models.Department).\\\n # join(models.Department).all()\n\n rows = db.query(models.Employee, models.Department)\\\n .join(models.Employee.department)\\\n .options(\n Load(models.Employee).load_only(\"name\", \"email\"),\n Load(models.Department).load_only(\"dept_name\")\n )\\\n .all()\n\n attrs = ['name', 'id', 'email', 'dept_name']\n # Build the mappings using dictionary comprehensions\n # mappings = [{attr: getattr(e, attr) for attr in attrs} for e in rows]\n mappings = []\n for employee in rows:\n d = {\n 'name': employee.name,\n 'id': employee.id,\n 'email': employee.email,\n 'dept_name': employee.department.dept_name\n }\n mappings.append(d)\n return mappings\n\n # rows = db.query(*models.Employee.__table__.columns + models.Department.__table__.columns)\\\n # .select_from(models.Employee)\\\n # .join(models.Employee.department)\\\n # .all()\n # return rows\n\n # rows = db.query(models.Employee.name, models.Department.dept_name) \\\n # .join(models.Employee.department) \\\n # .all()\n # return rows\n # return db.query(models.Employee.name, models.Employee.salary, models.Department.dept_name).\\\n # join(models.Department).all()\n\n\n\n\n\n","repo_name":"omarcsejust/learn-fastapi","sub_path":"sqlite_sqlalchemy/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17435984204","text":"import re\nfrom pykakasi import kakasi\nimport pyttsx3\nimport time\n\n# オブジェクトをインスタンス化\nkakasi = kakasi()\ns = pyttsx3.init()\n\n#解答して不正解の回数(グローバル変数)\nmiss_count=0\n\n#メインの関数。バビ語に変換した後、音声出力する関数(正解だった場合実行する)\ndef lambda_handler(text1):\n #バビ語を音声に\n s.say(text1) \n s.runAndWait()\n\n#メインの関数。バビ語に変換した後、音声出力する関数(正解だった場合実行する)\ndef answer(text1):\n #ひらがなに変換\n text2=toHira(text1)\n #バビ語に変換\n text=tobabi(text2)\n #バビ語に変換した文字列を出力\n return text\n\n#(漢字やカタカナを)ひらがなに変換する関数\ndef toHira(text):\n # モードの設定:J(Kanji) to H(Hiragana)\n kakasi.setMode('J', 'H') \n # 変換して出力\n conv = kakasi.getConverter()\n return conv.do(text)\n\n#バビ語に変換する関数\ndef tobabi(text):\n rettext=\"\"\n for i in range(len(text)):\n if re.search(text[i],\"あかさたなはまやらわんがざだばぱ\"):\n rettext+=text[i]+\"ば\"\n if re.search(text[i],\"いきしちにひみりぎじぢびぴ\"):\n rettext+=text[i]+\"び\"\n if re.search(text[i],\"うくすつぬふむゆるぐずづぶぷ\"):\n rettext+=text[i]+\"ぶ\"\n if re.search(text[i],\"えけせてねへめれげぜでべぺ\"):\n rettext+=text[i]+\"べ\"\n if re.search(text[i],\"おこそとのほもよろをごぞどぼぽ\"):\n rettext+=text[i]+\"ぼ\"\n return rettext\n\n#ストップウォッチの関数\ndef convert(sec):\n minits = sec // 60\n second = sec % 60\n milli_sec = (second - int(second)) * 1000\n hour = minits // 60\n min = minits % 60\n return f\"{int(hour)}:{int(min)}:{int(second)}:{int(milli_sec)}\"\n\n#解答があっているかの関数。\ndef judge(response):\n #グローバル変数を使うことを再宣言\n global miss_count\n\n #解答が正解の場合\n if answer(text)==response:\n #ストップウォッチを止める。\n stop_time = time.time()\n #ストップウォッチの結果(正解までにかかった時間)を出力。\n result = stop_time - start_time\n time_result = convert(result)\n print(f\"正解!かかった時間は:{time_result}です。\")\n #正解の場合、バビ語に変換した文字列を音声で出力。\n lambda_handler(response)\n return 0\n #不正解の場合\n else:\n #解答して不正解の回数(グローバル変数)\n miss_count+=1\n if miss_count==3:\n print(\"残念!不正解です。解答チャンスが0回になりました。また遊んでね!\")\n return 0\n print(f\"不正解です。あと{3-miss_count}回解答できます\")\n #もう一度入力を受け取り、解答があっているかの関数を実行する。\n judge(input())\n\n#ユーザーがエンターを押すとストップウォッチが開始され、問題文が出力される。\nstart_signal = input(\"エンターを押すと問題文が出力されます。同時にストップウォッチが開始されます。解答権は3回です。\")\n#問題出力\nprint(\"以下の言葉をバビ語に変換して下さい。\")\n\n#問題(ここに問題文を入れる。)\ntext=\"はじめまして\"\nprint(text)\n#ストップウォッチを開始する。\nstart_time = time.time()\n\n#解答が正解かを判定する関数を実行。\njudge(input())","repo_name":"dodoya1/babilaung","sub_path":"バビ語ストップ.py","file_name":"バビ語ストップ.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19886001129","text":"# 命名切片\n\nrecord = '....................100 .......513.25 ..........'\ncost = int(record[20:23]) * float(record[31:37])\n\nprint(cost)\n\n# 使用切片\nSHARES = slice(20, 23)\nPRICE = slice(32, 37)\ncost = int(record[SHARES]*record[PRICE])\n\n# 具体使用方法\nitems = [0, 1, 2, 3, 4, 5, 6]\na = slice(2, 4)\nitems[2:4]\n# [2, 3]\nitems[a]\n# [2, 3]\nitems[a] = [10, 11]\nitems\n# [0, 1, 10, 11, 4, 5, 6]\ndel items[a]\nitems\n# [0, 1, 4, 5, 6]\n\n\n# 如��你有一个切片对象a,你可以分别调用它的 a.start , a.stop , a.step 属性来获取更多的信息\n\na = slice(5, 50, 2)\na.start\na.stop\na.step\n\n\n# 你还能通过调用切片的 indices(size) 方法将它映射到一个确定大小的序列上,\n# 这个方法返回一个三元组(start, stop, step) ,所有值都会被合适的缩小以满足边界限制,\n# 从而使用的时候避免出现 IndexError 异常\ns = 'HelloWorld'\na.indices(len(s))\n# (5,10,2)\nfor i in range(*a.indices(len(s))):\n print(s[i])\n# W\n# r\n# d\n","repo_name":"yangtao0304/python-cookbook","sub_path":"c1 数据结构和算法/1_11.py","file_name":"1_11.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19742328229","text":"import numpy as np\nimport pandas as pd\nimport tldextract\nfrom collections import Counter\n\n\ndef chr_count_FQN(url):\n chr_sum= 0\n for i in url:\n if i==\".\": \n continue\n else: \n chr_sum += 1\n return chr_sum\n \ndef sub_domain_count(url):\n sub_domain, _ , __ =tldextract.extract(url)\n return chr_count_FQN(sub_domain)\n\ndef upper_count(url):\n up_sum = 0\n for i in url:\n if i.isupper():\n up_sum += 1\n return up_sum\n\ndef lower_count(url):\n low_sum = 0\n for i in url:\n if (i.islower()==True) and (i.isdigit()==False):\n low_sum += 1\n return low_sum\n \ndef numeric_count(url):\n num_sum = 0\n for i in url:\n if i.isnumeric():\n num_sum += 1\n return num_sum \n\ndef entropy_count(url):\n pb, lenn = Counter(url), float(len(url))\n return - sum( count/lenn * np.log2(count/lenn) for count in pb.values())\n\ndef chr_special_count(url): \n special_sum= 0\n for i in url:\n if (i.isalpha()) or (i.isdigit() or i == '.'):\n continue\n else: \n special_sum += 1\n return special_sum\n\ndef label_count(url):\n l_sum =len(url.split('.'))\n return l_sum\n\ndef label_max(url):\n labels = url.split('.')\n return max(([len(x) for x in labels]))\n\ndef label_avg(url):\n labels = url.split('.')\n return np.average(([len(x) for x in labels]))\n \ndef longest_word(url):\n l_word = label_max(url)\n lens = [len(x) for x in url.split('.')]\n return url.split('.')[lens.index(max(lens))]\n \ndef second_level_D(url):\n subdomain,sld_,suffix_=tldextract.extract(url)\n return sld_ \n \ndef contains_subdomain(url):\n subdomain,sld_,suffix_=tldextract.extract(url)\n if len(subdomain) > 0:\n return 1\n else :\n return 0\n \ndef sub_domain_len(url):\n subdomain,sld,suffix_=tldextract.extract(url)\n return chr_count_FQN(subdomain)+chr_count_FQN(sld) \n \ndef build_feature_df(url):\n dictt = {'FQDN_count': chr_count_FQN(url),\n 'subdomain_length': sub_domain_count(url),\n 'upper': upper_count(url),\n 'lower': lower_count(url),\n 'numeric': numeric_count(url),\n 'entropy': entropy_count(url),\n 'special': chr_special_count(url),\n 'labels': label_count(url),\n 'labels_max': label_max(url),\n 'labels_average': label_avg(url),\n 'longest_word': longest_word(url),\n 'sld': second_level_D(url),\n 'len': sub_domain_len(url),\n 'subdomain': contains_subdomain(url)\n }\n \n df = pd.DataFrame(dictt, index=[1])\n return df","repo_name":"Aboalarbe/Real-time-Detection-of-Data-Exfiltration","sub_path":"src/data/features/build_features.py","file_name":"build_features.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22976999475","text":"from __future__ import annotations\n\nimport re\nimport string\n\nimport bs4\nimport pandas as pd\n\nfrom cryptics.utils import align_suspected_definitions_with_clues\n\nDASHES = [\"-\", \"—\", \"–\", \"–\", \"—\"]\nPUNCTUATION_IN_CLUE = list(\"/\\\\\")\nPUNCTUATION_IN_ANNOTATION = DASHES + list(\"{}~*/\\\\\")\nPUNCTUATION_IN_ANSWERS = DASHES + list(\"(){}|~*/\\\\_<'\")\n\n\ndef delete_chars(string: str, chars: list[str]):\n for char in chars:\n string = string.replace(char, \"\")\n return string\n\n\ndef is_parsable_special_type_1(html: str):\n \"\"\"\n Identifies if the web page looks like this:\n\n
\n 1a WASP, in part, // agitated Logan (5)
\n
\n ANGLO — anagram (agitated) of LOGAN
\n
\n\n
\n 9a Stirred neat gin, // feeding the kitty (7)
\n
\n ANTEING* — anagram (stirred) of NEAT GIN
\n
\n\n Examples:\n\n - https://natpostcryptic.blogspot.com/2021/08/saturday-august-21-2021-cox-rathvon.html\n - https://natpostcryptic.blogspot.com/2021/08/saturday-august-28-2021-cox-rathvon.html\n - https://natpostcryptic.blogspot.com/2021/09/saturday-september-4-2020-cox-rathvon.html\n \"\"\"\n\n soup = bs4.BeautifulSoup(html, \"html.parser\")\n entry_content = soup.find(\"div\", attrs={\"class\": lambda s: s in [\"entry-content\"]})\n answers_and_annotations = [\n line for line in entry_content.text.split(\"\\n\") if line.strip()\n ]\n\n phrases = [\n \"cox\",\n \"rathvon\",\n \"signing off for today\",\n \"falcon\",\n \"key to reference sources\",\n ]\n\n return (\n 30 - 10\n <= len(\n entry_content.find_all(\n \"div\", style=\"background-color: blue; line-height: 200%;\"\n )\n )\n and 100 <= len(answers_and_annotations)\n and 3 <= sum([phrase in entry_content.text.lower() for phrase in phrases])\n )\n\n\ndef parse_special_type_1(html: str):\n soup = bs4.BeautifulSoup(html, \"html.parser\")\n entry_content = soup.find(\"div\", attrs={\"class\": lambda s: s in [\"entry-content\"]})\n\n clue_number_and_clues = [\n a.text.strip()\n for a in entry_content.find_all(\n \"div\", style=lambda s: \"background-color:\" in s if s is not None else None\n )\n ]\n\n clue_numbers = []\n clues = []\n for line in clue_number_and_clues:\n clue_number = re.search(r\"^[0-9]+[a|d]?\", line)\n if clue_number is None:\n continue\n clue = line[clue_number.end() :].replace(\"\\n\", \" \").strip()\n\n clue_numbers.append(clue_number.group())\n clues.append(delete_chars(clue, PUNCTUATION_IN_CLUE))\n\n raw_definitions = [\n tag.text\n for table in entry_content.find_all(\n \"div\", style=lambda s: \"background-color:\" in s if s is not None else None\n )\n for tag in table.find_all(\"u\")\n ]\n\n # Save this for later - before we extract all the tables.\n for table in entry_content.find_all(\"table\"):\n table.extract()\n\n stop_phrases = [\"introduction\", \"epilogue\", \"signing off for today\"]\n answers_and_annotations = [\n line\n for line in entry_content.text.split(\"\\n\")\n if line.strip()\n and not any(\n line.lower().startswith(stop_phrase) for stop_phrase in stop_phrases\n )\n ]\n while True:\n try:\n line = answers_and_annotations.pop(0)\n except IndexError:\n return None\n\n if line.lower().strip() == \"across\":\n break\n\n answers = []\n annotations = []\n for line in answers_and_annotations:\n try:\n # Take the first match\n matches = [\n re.search(\"\\s+[\" + \"|\".join(DASHES) + \"]\\s+\", line),\n re.search(\"\\s+[\" + \"|\".join(DASHES) + \"]\\s?\", line),\n re.search(\"\\s?[\" + \"|\".join(DASHES) + \"]\\s+\", line),\n ]\n divider = next(m for m in matches if m is not None)\n\n answer = line[: divider.start()]\n stripped_answer = delete_chars(\n answer, PUNCTUATION_IN_ANSWERS + list(string.whitespace)\n )\n annotation = line[divider.end() :]\n if (\n not any([c.isalpha() for c in answer])\n or sum([c.isupper() for c in stripped_answer])\n <= len(stripped_answer)\n - 5 # Occasionally there will be an answer like \"M(E)ETS or ME(E)TS\"\n or len(\n delete_chars(\n answer, PUNCTUATION_IN_ANSWERS + list(string.whitespace)\n )\n )\n > 15 + 10\n ):\n continue\n except (StopIteration, AttributeError):\n continue\n\n answers.append(delete_chars(answer, PUNCTUATION_IN_ANSWERS))\n annotations.append(annotation.strip(\"\".join(PUNCTUATION_IN_ANNOTATION + [\" \"])))\n\n definitions = align_suspected_definitions_with_clues(clues, raw_definitions)\n\n out = pd.DataFrame(\n data=[clue_numbers, answers, clues, annotations, definitions],\n index=[\"clue_number\", \"answer\", \"clue\", \"annotation\", \"definition\"],\n ).T\n\n if out.isna().any(0).any(0):\n return None\n\n return out\n","repo_name":"eigenfoo/cryptics","sub_path":"cryptics/specials.py","file_name":"specials.py","file_ext":"py","file_size_in_byte":5416,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"31256515497","text":"import geopandas as gpd\nimport pandas as pd\nimport numpy as np\nimport os, pyodbc, sqlalchemy, time\nimport networkx as nx\nimport time\nfrom shapely import wkt\nfrom shapely.geometry import Point\nimport h5py\nimport yaml\nfrom pathlib import Path\nimport shutil\n\ndef h5_to_data_frame(h5file, integer_cols, table_name):\n \"\"\"Load h5 tables as Pandas DataFrame object\"\"\"\n \n table = h5file[table_name]\n col_dict = {}\n #cols = ['hhno', 'hhtaz']\n for col in table.keys():\n if col == 'sov_ff_time':\n pass\n elif col in integer_cols:\n my_array = np.asarray(table[col]).astype('int')\n else:\n my_array = np.asarray(table[col])\n col_dict[col] = my_array.astype(float)\n return(pd.DataFrame(col_dict))\n\ndef update_df(target_df, target_index, update_df, update_index, col_name):\n target_df[col_name] = 0\n target_df.set_index(target_index, inplace = True)\n update_df.set_index(update_index, inplace = True)\n target_df.update(update_df)\n target_df.reset_index(inplace = True)\n update_df.reset_index(inplace = True)\n\n return target_df\n\n\ndef recode(df, col, new_col, bins, labels, group_by_col):\n category = pd.cut(df[col],bins=bins,labels=labels)\n if new_col in df.columns:\n df = df.drop(columns = [new_col])\n df.insert(len(bins), new_col, category)\n\n return pd.crosstab(df[group_by_col], df[new_col]).rename_axis(None, axis=1)\n\n\nconfig = yaml.safe_load(open(\"config.yaml\"))\n\n# create output dir if it doesn't exist\nif not os.path.exists(config['output_dir']):\n os.makedirs(config['output_dir'])\n\n# Setup paths\npopsim_run_dir_path = Path(config['output_dir'])\nland_use_path = Path(config['input_land_use_path'])\npums_path = Path(config['input_pums_data_path'])\ngis_path = Path(config['input_gis_data_path'])\n\n# create sub-directories in popsim folder:\nfor folder in ['configs','data','output']:\n if os.path.exists(popsim_run_dir_path/folder):\n shutil.rmtree(popsim_run_dir_path/folder)\n os.makedirs(popsim_run_dir_path/folder)\n\n\n# Load GIS files\n# 2 layers are required, including regionwide PUMAs.\n# a layer that covers a specific study area that can be altered is provided\n# Only households within the study area will be available for allocation\nif str(gis_path)[-4:]=='.gdb':\n taz_study_area = gpd.read_file(gis_path, layer=config['taz_layer'])\n # Program will use taz_id & puma_id going forward\n taz_study_area.rename(columns={config['taz_id'] : 'taz_id'}, inplace = True)\n puma_gdf = gpd.read_file(gis_path, layer=config['puma_layer'])\n puma_gdf.rename(columns={config['puma_id'] : 'PUMA'}, inplace = True)\nelse:\n taz_study_area = gpd.read_file(gis_path/config['taz_layer']/'.shp')\n # Program will use taz_id & puma_id going forward\n taz_study_area.rename(columns={config['taz_id'] : 'taz_id'}, inplace = True)\n puma_gdf = gpd.read_file(gis_path/config['puma_layer']/'.shp')\n puma_gdf.rename(columns={config['puma_id'] : 'PUMA'}, inplace = True)\n\n# Load parcel data from Soundcast input as geoDataframe\nparcels_gdf = pd.read_csv(land_use_path/config['parcel_file'], sep = ' ')\nparcels_gdf.columns= parcels_gdf.columns.str.lower()\ngeometry = [Point(xy) for xy in zip(parcels_gdf['xcoord_p'], parcels_gdf['ycoord_p'])]\nparcels_gdf = parcels_gdf.drop(['ycoord_p', 'xcoord_p'], axis=1)\nparcels_gdf = gpd.GeoDataFrame(parcels_gdf, crs=\"EPSG:2285\", geometry=geometry)\n\n# Load synthetic household and person tables from a Soundcast run\nhdf_file = h5py.File(land_use_path/config['synthetic_pop_file'], \"r\")\npersons = h5_to_data_frame(hdf_file, ['id'], 'Person')\nhh = h5_to_data_frame(hdf_file, ['id'], 'Household')\n\n# Select parcels that are within the study area\nparcels_cols = list(parcels_gdf.columns)\n#parcels_cols.extend([config['taz_id'], config['block_group_id'], config['puma_id']])\nparcels_cols.extend(['taz_id', 'PUMA'])\nparcels_gdf = gpd.sjoin(parcels_gdf, taz_study_area, how='inner')\nparcels_gdf = parcels_gdf[[col for col in parcels_cols if col in parcels_gdf.columns]]\n\n# Identify PUMA for a TAZ based on centroid location\ntaz_points = taz_study_area.copy()\ntaz_points.geometry = taz_points.geometry.centroid\ntaz_puma_gdf = gpd.sjoin(taz_points, puma_gdf, how='inner')\ntaz_puma_gdf = taz_puma_gdf[['taz_id', 'PUMA']]\ntaz_puma_gdf['region'] = 1\n\n# Write PopulationSim geographic crosswalk between TAZs and PUMAs\n#taz_puma_gdf.rename(columns={config['taz_id']:'taz_id', config['puma_id']:'PUMA'}, inplace = True)\nfor col in taz_puma_gdf.columns:\n taz_puma_gdf[col] = taz_puma_gdf[col].astype('int64')\n\ntaz_puma_gdf.to_csv(popsim_run_dir_path/'data'/'geo_cross_walk.csv', index=False)\n\n# Build PopulationSim control file from future land use\n# Distribution of household and person characteristics will be applied to any change in totals\nstudy_area_hhs = hh[hh['hhparcel'].isin(parcels_gdf[config['parcel_id']])]\nstudy_area_hhs = update_df(study_area_hhs, 'hhparcel', parcels_gdf, config['parcel_id'], 'taz_id')\n\nstudy_area_persons = persons[persons['hhno'].isin(study_area_hhs['hhno'])]\nstudy_area_persons = update_df(study_area_persons, 'hhno', study_area_hhs, 'hhno', 'taz_id')\n\n# Get household worker distribution from person table\nworkers = study_area_persons[study_area_persons['pwtyp']>0]\nhh_workers = workers.groupby('hhno').size().reset_index()\nhh_workers = hh_workers.rename(columns={0:'hhwkrs'})\nstudy_area_hhs = update_df(study_area_hhs, 'hhno', hh_workers, 'hhno', 'hhwkrs')\n\n# Household categories\ncol_list = []\n# total households:\ncol_list.append(pd.DataFrame(study_area_hhs.groupby('taz_id').size(), columns = ['hh_taz_weight']))\n# households size:\ncol_list.append(recode(study_area_hhs, 'hhsize', 'num_hh', [0, 1, 2, 3, 4, 5, 6, 200], \n ['hh_size_1','hh_size_2', 'hh_size_3', 'hh_size_4', 'hh_size_5', 'hh_size_6', 'hh_size_7_plus'], 'taz_id'))\n# workers:\ncol_list.append(recode(study_area_hhs, 'hhwkrs', 'num_workers', [-1, 0, 1, 2, 999], \n ['workers_0','workers_1', 'workers_2', 'workers_3_plus'], 'taz_id'))\n# income \ncol_list.append(recode(study_area_hhs, 'hhincome', 'income_cat', [-1, 15000, 30000, 60000, 100000, 999999999], \n ['income_lt15','income_gt15-lt30', 'income_gt30-lt60', 'income_gt60-lt100', 'income_gt100'], 'taz_id'))\n\n# Person categories\n# Total persons\ncol_list.append(pd.DataFrame(study_area_persons.groupby('taz_id').size(), columns = ['pers_taz_weight']))\n# School:\ncol_list.append(recode(study_area_persons, 'pstyp', 'school', [-1, 0, 100], ['school_no','school_yes'], 'taz_id'))\n# Gender:\ncol_list.append(recode(study_area_persons, 'pgend', 'gender', [0, 1, 100], ['male','female'], 'taz_id'))\n# Age:\ncol_list.append(recode(study_area_persons, 'pagey', 'age', [-1, 19, 35, 60, 999], \n ['age_19_and_under', 'age_20_to_35', 'age_35_to_60', 'age_above_60'], 'taz_id'))\n# Worker status\ncol_list.append(recode(study_area_persons, 'pwtyp', 'worker', [0, 999], ['is_worker'], 'taz_id'))\n\n# Race\ncol_list.append(recode(study_area_persons, 'prace', 'num_hh', [0, 1, 2, 3, 4, 5, 6, 200], \n ['white_non_hispanic', 'black_non_hispanic', 'asian_non_hispanic', 'other_non_hispanic', 'two_or_more_races_non_hispanic', 'white_hispanic', 'non_white_hispanic'], 'taz_id'))\n\ndf = pd.concat(col_list, axis = 1)\ndf.reset_index(inplace = True)\n#df.rename(columns={config['taz_id']:'taz_id'}, inplace = True)\ndf['taz_id'] = df['taz_id'].astype('int64')\n\n# For zones in the study area that have no synthetic household data use an average of households in the study area\nunpopulated_tazs = taz_study_area[~taz_study_area.taz_id.isin(df.taz_id)][['taz_id']]\n\ndf['imputed_regional_dist'] = 0 # Flag to identify this zone had no controled distribution\nunpopulated_tazs['imputed_regional_dist'] = 1\ndf = df.append(unpopulated_tazs)\ndf = df.sort_values('taz_id')\ndf = df.drop_duplicates()\n\n# Check that all TAZs have parcels; if not these should be purposefully excluded from user_allocation.csv\n_filter = df['taz_id'].isin(parcels_gdf['taz_p'].unique())\nif len(df[~_filter]) > 0:\n for i in df[~df['taz_id'].isin(parcels_gdf['taz_p'].unique())]['taz_id'].values:\n print('no parcels for study area zone: ID: ' + str(i))\n df = df[_filter]\n\n# Define household totals from allocation file\nallocate_df = df[['taz_id', 'hh_taz_weight','pers_taz_weight']]\nallocate_df.rename(columns={'hh_taz_weight' : 'households', 'pers_taz_weight': 'persons'}, inplace=True)\nallocate_df = allocate_df.merge(parcels_gdf.groupby('taz_id')['emptot_p'].sum().reset_index(), how='left', on='taz_id')\nallocate_df.rename(columns={'emptot_p' : 'employment'}, inplace=True)\nallocate_df.fillna(0, inplace=True)\nallocate_df = allocate_df.astype('int')\nallocate_df.to_csv(popsim_run_dir_path/'data'/'user_allocation.csv', index=False)\ndf.fillna(0, inplace=True)\n\n## Enforce integers\ndf = df.astype('int')\ndf.to_csv(popsim_run_dir_path/'data'/'future_controls.csv', index=False)\n\n# Create seed hh and person files; include only seed households and persons from PUMAs within the study area\nseed_hh = pd.read_csv(pums_path/config['seed_hh_file'])\nseed_hh = seed_hh[seed_hh['PUMA'].isin(taz_puma_gdf['PUMA'])]\nseed_hh.to_csv(popsim_run_dir_path/'data'/'seed_households.csv', index=False)\n\nseed_persons = pd.read_csv(pums_path/config['seed_person_file'])\nseed_persons = seed_persons[seed_persons['hhnum'].isin(seed_hh['hhnum'])]\nseed_persons.to_csv(popsim_run_dir_path/'data'/'seed_persons.csv', index=False)","repo_name":"psrc/soundcast_landuse_scenario_builder","sub_path":"generate_controls.py","file_name":"generate_controls.py","file_ext":"py","file_size_in_byte":9464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17078176442","text":"from __future__ import absolute_import\n__author__ = 'maartenbreddels'\n\nimport asyncio\nimport tornado.ioloop\nimport tornado.web\nimport tornado.httpserver\nimport tornado.websocket\nimport tornado.auth\nimport tornado.gen\nimport threading\nimport logging\nimport vaex as vx\nimport vaex.utils\nimport argparse\nimport vaex.events\nimport vaex.execution\nimport vaex.multithreading\nimport tornado.escape\nfrom cachetools import LRUCache\nimport sys\n\nfrom vaex.encoding import serialize, deserialize, Encoding\nimport vaex.server.service\nimport vaex.asyncio\nimport vaex.server.dataframe\nimport vaex.core._version\nimport vaex.server._version\nimport vaex.server.dataframe\n\nfrom .utils import exception, error\nimport vaex.server.websocket\n\n\nlogger = logging.getLogger(\"vaex.webserver.tornado\")\n\n\nclass WebSocketHandler(tornado.websocket.WebSocketHandler):\n def initialize(self, service, webserver, submit_threaded, cache, cache_selection, datasets=None):\n self.service = service\n self.webserver = webserver\n self.submit_threaded = submit_threaded\n self.handler = vaex.server.websocket.WebSocketHandler(self.send, self.service, token=self.webserver.token, token_trusted=self.webserver.token_trusted)\n\n async def send(self, value):\n await self.write_message(value, binary=True)\n\n async def on_message(self, websocket_msg):\n # Tornado does not receive messages before the current is finished, this\n # avoids this limitation of tornado, so we can send progress/cancel information\n logger.debug(\"get msg: %r\", websocket_msg)\n try:\n # see https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task\n # TODO: replace after we drop 36 support asyncio.create_task(handler.handle_message(data['bytes']))\n asyncio.ensure_future(self._on_message(websocket_msg))\n except:\n logger.exception(\"creating task\")\n\n async def _on_message(self, websocket_msg):\n logger.debug(\"handle msg: %r\", websocket_msg)\n await self.handler.handle_message(websocket_msg)\n\n def on_close(self):\n logger.debug(\"WebSocket closed\")\n\n\nMB = 1024**2\nGB = MB * 1024\n\n\nclass WebServer(threading.Thread):\n def __init__(self, address=\"127.0.0.1\", port=9000, webserver_thread_count=2, cache_byte_size=500 * MB,\n token=None, token_trusted=None, base_url=None,\n cache_selection_byte_size=500 * MB, datasets=[], compress=True, development=False, threads_per_job=4):\n threading.Thread.__init__(self)\n self._test_latency = None # for testing purposes\n self.setDaemon(True)\n self.address = address\n self.port = port\n self.started = threading.Event()\n self.service = None\n self.webserver_thread_count = webserver_thread_count\n self.threads_per_job = threads_per_job\n self.base_url = base_url\n if self.base_url is None:\n if self.port == 80:\n self.base_url = f'{self.address}'\n else:\n self.base_url = f'{self.address}:{self.port}'\n\n self.service_bare = vaex.server.service.Service({})\n self.service_threaded = vaex.server.service.AsyncThreadedService(self.service_bare, self.webserver_thread_count,\n self.threads_per_job)\n self.service = self.service_threaded\n self.set_datasets(datasets)\n self.token = token\n self.token_trusted = token_trusted\n\n self.cache = LRUCache(cache_byte_size, getsizeof=sys.getsizeof)\n self.cache_selection = LRUCache(cache_selection_byte_size, getsizeof=sys.getsizeof)\n\n self.options = dict(webserver=self, service=self.service, datasets=datasets, submit_threaded=self.submit_threaded, cache=self.cache,\n cache_selection=self.cache_selection)\n\n # tornado.web.GZipContentEncoding.MIN_LENGTH = 1\n tornado.web.GZipContentEncoding.CONTENT_TYPES.add(\"application/octet-stream\")\n self.application = tornado.web.Application([\n (r\"/websocket\", WebSocketHandler, self.options),\n ], compress_response=compress, debug=development)\n logger.debug(\"compression set to %r\", compress)\n logger.debug(\"cache size set to %s\", vaex.utils.filesize_format(cache_byte_size))\n logger.debug(\"thread count set to %r\", self.webserver_thread_count)\n\n def set_datasets(self, datasets):\n self.datasets = list(datasets)\n self.datasets_map = dict([(ds.name, ds) for ds in self.datasets])\n self.service_bare.df_map = self.datasets_map\n\n def submit_threaded(self, callable, *args, **kwargs):\n def execute():\n value = callable(*args, **kwargs)\n return value\n future = self.thread_pool.submit(execute)\n return future\n\n def serve(self):\n self.mainloop()\n\n def serve_threaded(self):\n logger.debug(\"start thread\")\n if tornado.version_info[0] >= 5:\n from tornado.platform.asyncio import AnyThreadEventLoopPolicy\n # see https://github.com/tornadoweb/tornado/issues/2308\n asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())\n self.start()\n logger.debug(\"wait for thread to run\")\n self.started.wait()\n logger.debug(\"make tornado io loop the main thread's current\")\n # this will make the main thread use this ioloop as current\n # self.ioloop.make_current()\n\n def run(self):\n self.mainloop()\n\n def mainloop(self):\n logger.info(\"serving at http://%s:%d\" % (self.address, self.port))\n self.ioloop = tornado.ioloop.IOLoop.current()\n # listen doesn't return a server object, which we need to close\n # self.application.listen(self.port, address=self.address)\n from tornado.httpserver import HTTPServer\n self.server = HTTPServer(self.application)\n try:\n self.server.listen(self.port, self.address)\n except: # noqa\n self.started.set()\n raise\n self.started.set()\n if tornado.version_info[0] >= 5:\n from tornado.platform.asyncio import AnyThreadEventLoopPolicy\n # see https://github.com/tornadoweb/tornado/issues/2308\n asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())\n\n try:\n self.ioloop.start()\n except RuntimeError:\n pass # TODO: not sure why this happens in the unittest\n\n def stop_serving(self):\n logger.debug(\"stop server\")\n self.server.stop()\n logger.debug(\"stop io loop\")\n # self.ioloop.stop()\n self.service.stop()\n # for thread_pool in self.thread_pools:\n # thread_pool.shutdown()\n\n\ndefaults_yaml = \"\"\"\naddress: 0.0.0.0\nport: 9000\nfilenames: []\nverbose: 2\ncache: 500000000\ncompress: true\nfilename: []\ndevelopment: False\nthreads_per_job: 4\n\"\"\"\n\n\ndef main(argv, WebServer=WebServer):\n\n parser = argparse.ArgumentParser(argv[0])\n parser.add_argument(\"filename\", help=\"filename for dataset\", nargs='*')\n parser.add_argument(\"--address\", help=\"address to bind the server to (default: %(default)s)\", default=\"0.0.0.0\")\n parser.add_argument(\"--base-url\", help=\"External base url (default is
:port)\", default=None)\n parser.add_argument(\"--port\", help=\"port to listen on (default: %(default)s)\", type=int, default=9000)\n parser.add_argument('--verbose', '-v', action='count', default=2)\n parser.add_argument('--cache', help=\"cache size in bytes for requests, set to zero to disable (default: %(default)s)\", type=int, default=500000000)\n parser.add_argument('--compress', help=\"compress larger replies (default: %(default)s)\", default=True, action='store_true')\n parser.add_argument('--no-compress', dest=\"compress\", action='store_false')\n parser.add_argument('--development', default=False, action='store_true', help=\"enable development features (auto reloading)\")\n parser.add_argument('--add-example', default=False, action='store_true', help=\"add the example dataset\")\n parser.add_argument('--token', default=None, help=\"optionally protect server access by a token\")\n parser.add_argument('--token-trusted', default=None, help=\"when using this token, the server allows more deserialization (e.g. pickled function)\")\n parser.add_argument('--threads-per-job', default=4, type=int, help=\"threads per job (default: %(default)s)\")\n # config = layeredconfig.LayeredConfig(defaults, env, layeredconfig.Commandline(parser=parser, commandline=argv[1:]))\n config = parser.parse_args(argv[1:])\n\n verbosity = [\"ERROR\", \"WARNING\", \"INFO\", \"DEBUG\"]\n logging.getLogger(\"vaex\").setLevel(verbosity[config.verbose])\n # import vaex\n # vaex.set_log_level_debug()\n\n filenames = []\n filenames = config.filename\n datasets = []\n for filename in filenames:\n df = vx.open(filename)\n if df is None:\n print(\"error opening file: %r\" % filename)\n else:\n datasets.append(df)\n if config.add_example:\n df_example = vaex.example()\n df_example.name = \"example\"\n datasets.append(df_example)\n\n datasets = datasets or [vx.example()]\n\n # datasets = [ds for ds in datasets if ds is not None]\n logger.info(\"datasets:\")\n for dataset in datasets:\n logger.info(\"\\thttp://%s:%d/%s or ws://%s:%d/%s\", config.address, config.port, dataset.name, config.address, config.port, dataset.name)\n server = WebServer(datasets=datasets, address=config.address, base_url=config.base_url, port=config.port, cache_byte_size=config.cache,\n token=config.token, token_trusted=config.token_trusted,\n compress=config.compress, development=config.development,\n threads_per_job=config.threads_per_job)\n server.serve()\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"vaexio/vaex","sub_path":"packages/vaex-server/vaex/server/tornado_server.py","file_name":"tornado_server.py","file_ext":"py","file_size_in_byte":9913,"program_lang":"python","lang":"en","doc_type":"code","stars":8057,"dataset":"github-code","pt":"53"} +{"seq_id":"43227943555","text":"from pyimagesearch.transform import four_point_transform\nfrom skimage.filters import threshold_local\nimport numpy as np\nimport argparse\nimport cv2\n#install imutils package using the command \"pip install imutils\"\nimport imutils\n\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required = True,\n\thelp = \"Path to the image to be scanned along with the extension\")\nargs = vars(ap.parse_args())\n\nimage = cv2.imread(args[\"image\"])\nratio = image.shape[0] / 500.0\norig = image.copy()\nimage = imutils.resize(image, height = 500)\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ngray = cv2.GaussianBlur(gray, (5, 5), 0)\nedged = cv2.Canny(gray, 75, 200)\n\ncountour = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\ncountour = imutils.grab_contours(countour)\ncountour = sorted(countour, key = cv2.contourArea, reverse = True)[:5]\n\n\nfor c in countour:\n\tperi = cv2.arcLength(c, True)\n\tapprox = cv2.approxPolyDP(c, 0.02 * peri, True)\n\n\t# if our approximated contour has four points, then we can assume that we have found our screen\n\tif len(approx) == 4:\n\t\tscreenCnt = approx\n\t\tbreak\n\ncv2.drawContours(image, [screenCnt], -1, (255, 255, 0), 2)\nwarped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)\nwarped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)\nT = threshold_local(warped, 11, offset = 10, method = \"gaussian\")\nwarped = (warped > T).astype(\"uint8\") * 255\ncv2.imshow(\"Original\", imutils.resize(orig, height = 650))\ncv2.imshow(\"Scanned\", imutils.resize(warped, height = 650))\ncv2.waitKey(0)","repo_name":"windzhougithub/Document_scanner","sub_path":"document-scanner/scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73113767251","text":"from django.shortcuts import render, get_object_or_404, reverse\nfrom django.views import generic, View\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.models import User\nfrom django.views.generic.edit import UpdateView, DeleteView\nfrom django.utils.text import slugify\nfrom .models import Event\nfrom .forms import CommentForm, EventForm\nfrom datetime import datetime\n\n\nclass EventList(generic.ListView):\n model = Event\n queryset = Event.objects.filter(status=1).exclude(\n date__lt=datetime.today()\n ).order_by(\"date\")\n template_name = \"index.html\"\n paginate_by = 8\n\n\nclass EventDetail(View):\n\n def get(self, request, slug):\n queryset = Event.objects.filter(status=1)\n event = get_object_or_404(queryset, slug=slug)\n comments = event.comments.order_by('created_on')\n attended = False\n if event.attendees.filter(id=self.request.user.id).exists():\n attended = True\n\n return render(\n request,\n \"event.html\",\n {\n \"event\": event,\n \"comments\": comments,\n \"commented\": False,\n \"attended\": attended,\n \"comment_form\": CommentForm()\n },\n )\n\n def post(self, request, slug):\n queryset = Event.objects.filter(status=1)\n event = get_object_or_404(queryset, slug=slug)\n comments = event.comments.order_by('created_on')\n attended = False\n if event.attendees.filter(id=self.request.user.id).exists():\n attended = True\n\n comment_form = CommentForm(data=request.POST)\n\n if comment_form.is_valid():\n comment_form.instance.name = request.user.username\n comment = comment_form.save(commit=False)\n comment.event = event\n comment.save()\n else:\n comment_form = CommentForm()\n return render(\n request,\n \"event.html\",\n {\n \"event\": event,\n \"comments\": comments,\n \"commented\": True,\n \"attended\": attended,\n \"comment_form\": CommentForm()\n },\n )\n\n\nclass PostAttend(View):\n\n def post(self, request, slug):\n event = get_object_or_404(Event, slug=slug)\n\n if event.attendees.filter(id=request.user.id).exists():\n event.attendees.remove(request.user)\n else:\n event.attendees.add(request.user)\n\n return HttpResponseRedirect(reverse('event_detail', args=[slug]))\n\n\nclass AddEvent(View):\n\n def get(self, request):\n event_form = EventForm()\n return render(request, 'add_event.html', {\n 'form': event_form\n })\n\n def post(self, request):\n error = False\n event_form = EventForm(request.POST, request.FILES)\n if event_form.is_valid():\n event_form.instance.author = User.objects.get(\n username=request.user.username\n )\n event_form.instance.slug = slugify(event_form.instance.title)\n event_form.instance.status = 1\n event_form.save()\n error = False\n submitted = True\n else:\n submitted = False\n error: True\n\n return render(request, 'add_event.html', {\n 'form': event_form, 'submitted': submitted, 'error': error,\n })\n\n\nclass DeleteEvent(DeleteView):\n\n model = Event\n template_name = 'delete_event.html'\n deleted = True\n success_url = '/'\n\n\nclass EditEvent(UpdateView):\n\n model = Event\n form_class = EventForm\n template_name_suffix = '_edit_form'\n template_name = 'event_edit_form.html'\n edited = True\n success_url = '/'\n","repo_name":"jessicarydberg/SocialClimbing","sub_path":"social/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"12669374511","text":"#!/usr/bin/env python3\nimport mysql.connector\nimport dbConnection\nimport datetime\n\nTodayFormatDt = datetime.date.today()\n\nprint(TodayFormatDt)\ndef isWeekend(date):\n weekno = date.weekday()\n if weekno > 4:\n return (\"1\")\n else:\n return (\"\")\nprint(isWeekend(TodayFormatDt))\n# Weekend(TodayFormatDt)\nconn = dbConnection.connect()\n\ntry:\n mycursor = conn.cursor(dictionary = True)\n begin = datetime.datetime.strptime(\"2040-07-01\", \"%Y-%m-%d\")\n \n \n print(begin)\n end = datetime.datetime.strptime(\"2050-07-31\", \"%Y-%m-%d\")\n period = [begin + datetime.timedelta(days=x) for x in range(0, (end-begin).days+1)]\n \n for dt in period: \n weekend = 0\n weekday = 0\n result = dt\n weekend = isWeekend(result)\n if weekend == \"\":\n weekend = 0\n weekday = 1\n\n sql = \"INSERT INTO roadmoto_dynamiccalendar (date, weekend, weekday) VALUES (%s, %s, %s)\"\n val = (result, weekend, weekday)\n mycursor.execute(sql, val)\n conn.commit()\n\nexcept mysql.connector.Error as err:\n print(err) \n\nfinally:\n conn.close()\n\n\n","repo_name":"somchaisaeueng/PHP_Python","sub_path":"Task_2/Python/InsertDates.py","file_name":"InsertDates.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"38890563700","text":"import numpy as np\nimport cv2\nimport random\n\nrandom.seed(0)\n\n\ndef adjust(img, alpha=1.0, beta=0.0):\n # 積和演算を行う。\n dst = alpha * img + beta\n # [0, 255] でクリップし、uint8 型にする。\n return np.clip(dst, 0, 255).astype(np.uint8)\n\nnum = 10\n\nhuman_name = [\"kusumoto\", \"orui\", \"saitou\", \"nomura\"]\n\nfor n in range (len(human_name)):\n data_index = 1\n\n while True:\n\n # 画像を読み込む。\n img = cv2.imread(f\"data/{human_name[n]}_{data_index}.jpg\")\n\n if(type(img) is not np.ndarray):\n break\n\n list = np.loadtxt(f\"data/{human_name[n]}_{data_index}.txt\")\n\n for i in range(num):\n # alphaの値を決める\n alpha = random.uniform(0.2, 2.0)\n # print(alpha)\n\n # 明るさを変更する。\n img1 = adjust(img, alpha)\n\n cv2.imwrite(f\"processed_data/{human_name[n]}_{data_index}_bl_{i}.jpg\", img1)\n np.savetxt(f\"processed_data/{human_name[n]}_{data_index}_bl_{i}.txt\", [list], fmt=[\"%.0f\", \"%.6f\", \"%.6f\", \"%.6f\", \"%.6f\"])\n\n data_index += 1\n# ### ボックスの変化 ###\n# #読み込む\n# list = np.loadtxt(\"data_1/kusumoto.txt\")\n# #コピーして名前変える\n# for i in range(num):\n# np.savetxt(f\"data_1/kusumoto_bl_{i}.txt\", [list], fmt=[\"%.0f\", \"%.6f\", \"%.6f\", \"%.6f\", \"%.6f\"])\n\n \n","repo_name":"Kaito34/lab_stay_time","sub_path":"anotation/change_blightness.py","file_name":"change_blightness.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"70271729491","text":"# -*- encoding=utf-8 -*-\nfrom tkinter import messagebox\nfrom tkinter import ttk\nimport numpy as np\nimport common\nimport edit\nimport login\nfrom pub import load_json\nfrom pub import write_dict\n\nNAME = 'name'\nWIDTH = 'width'\nDEFAULT_WIDTH = 100\nGRADE_FILE_NAME = 'grade.json'\nSTUDENT_ID = \"student_id\"\nSTUDENT_NAME = \"student_name\"\nCLASS_NAME = \"class_name\"\nCOURSE_TITLE = \"course_title\"\nUSUAL_SCORE = \"usual_score\"\nTEST_SCORE = \"test_score\"\nFINAL_SCORE = \"final_score\"\nimport tkinter.colorchooser as cc\nfrom tkinter import *\n\n\ndef choose_color():\n choose = cc.askcolor()\n print(choose)\n color_value = choose[1]\n # button.config(bg=color_value)\n\n\ndef save(grades):\n success = True\n msg = ''\n new = []\n for one in grades:\n data_dict = dict()\n data_dict[STUDENT_ID] = one[0]\n data_dict[STUDENT_NAME] = one[1]\n data_dict[CLASS_NAME] = one[2]\n data_dict[COURSE_TITLE] = one[3]\n data_dict[USUAL_SCORE] = one[4]\n data_dict[TEST_SCORE] = one[5]\n data_dict[FINAL_SCORE] = one[6]\n new.append(data_dict)\n try:\n write_dict(GRADE_FILE_NAME, new, encoding='utf-8')\n except Exception as e:\n success = False\n msg = '{}'.format(e)\n return success, msg\n\n\nclass SelectFrame:\n @staticmethod\n def get_head():\n # 重构\n head = [{NAME: '学号', WIDTH: '80', ANCHOR: CENTER, },\n {NAME: '姓名', WIDTH: '80', ANCHOR: CENTER, },\n {NAME: '班级', WIDTH: '80', ANCHOR: CENTER, },\n {NAME: '课程名称', WIDTH: '100', ANCHOR: CENTER, },\n {NAME: '平时成绩', WIDTH: '80', ANCHOR: CENTER, },\n {NAME: '考试成绩', WIDTH: '80', ANCHOR: CENTER, },\n {NAME: '最终成绩', WIDTH: '80', ANCHOR: CENTER, },\n ]\n return head\n\n @staticmethod\n def get_data():\n # 重构\n json_data = load_json(GRADE_FILE_NAME)\n grades = list(map(lambda x: [\n x.get(STUDENT_ID, ''),\n x.get(STUDENT_NAME, ''),\n x.get(CLASS_NAME, ''),\n x.get(COURSE_TITLE, ''),\n x.get(USUAL_SCORE, ''),\n x.get(TEST_SCORE, ''),\n x.get(FINAL_SCORE, '')\n ], json_data))\n grades.sort()\n print('所有学生的信息:{}'.format(grades))\n # data = [\n # ['1001', '~', '+', '高2015-3班', '~', '高2015-3班', '~'],\n # ['小红1', '~', '+', '高2015-3班', '~', '高2015-3班', '~'],\n # ['小红1', '~', '+', '高2015-3班', '~', '高2015-3班', '~'],\n # ['小红1', '~', '+', '高2015-3班', '~', '高2015-3班', '~'],\n # ]\n return grades\n\n @staticmethod\n def delete(data):\n # 重构\n # [('小红1', '~', '+', '高2015-6班', '~'), ('小红1', '~', '+', '高2015-7班', '~')]\n\n success = True\n msg = ''\n json_data = load_json(GRADE_FILE_NAME)\n grades = list(map(lambda x: [\n x.get(STUDENT_ID, ''),\n x.get(STUDENT_NAME, ''),\n x.get(CLASS_NAME, ''),\n x.get(COURSE_TITLE, ''),\n x.get(USUAL_SCORE, ''),\n x.get(TEST_SCORE, ''),\n x.get(FINAL_SCORE, '')\n ], json_data))\n print('所有学生成绩信息是:{}'.format(grades))\n remove_ids = []\n for delete_one in data:\n delete_one = list(delete_one)\n print('要删除的信息是{}'.format(delete_one))\n for one in grades:\n # print('原来')\n if delete_one == one:\n print('找到要删除的下标:{}'.format(grades.index(one)))\n remove_ids.append(grades.index(one))\n # print(score_data.index(one))\n remove_ids.sort()\n remove_ids.reverse()\n print('要删除的下标有:{}'.format(remove_ids))\n for remove_id in remove_ids:\n grades.pop(remove_id)\n print('删除后的学生剩余信息:{}'.format(grades))\n success, msg = save(grades)\n return success, msg\n\n def __init__(self,\n win, # 窗口\n title='查询', # 窗口标题\n width=800, # 窗口宽度\n height=500, # 窗口高度\n line=10, # 显示的行数\n button_distance=10, # 按钮的间距\n button_width=10): # 按钮的宽度\n self.win = win\n\n win.title(title)\n common.set_size_center(win, width, height) # 设置位置\n self._set_frame(line, button_distance, button_width)\n self.refresh()\n\n def search(self):\n grades = self.get_data()\n\n # choose_color()\n stu_id = self.stu_entry.get().strip()\n print('要查询的学号是:{}'.format(stu_id))\n stu_name = self.name_entry.get().strip()\n print('要查询的姓名是:{}'.format(stu_name))\n if stu_id and stu_name:\n see_data = []\n for grade in grades:\n grade_id = grade[0]\n grade_name = grade[1]\n if grade_id == stu_id and grade_name == stu_name:\n see_data.append(grade)\n self._clear()\n self._insert_data(see_data)\n elif stu_id and not stu_name:\n see_data = []\n for grade in grades:\n grade_id = grade[0]\n if grade_id == stu_id:\n see_data.append(grade)\n self._clear()\n self._insert_data(see_data)\n elif stu_name and not stu_id:\n see_data = []\n for grade in grades:\n grade_name = grade[1]\n if grade_name == stu_name:\n see_data.append(grade)\n self._clear()\n self._insert_data(see_data)\n else:\n pass\n\n pass\n\n def _head_frame(self):\n frame = Frame(self.win)\n Label(frame, text='信息详情阅览', font=('黑体', 20)).pack()\n frame.pack(pady=30)\n\n def _table_frame(self, height, ):\n frame = Frame(self.win, )\n frame.pack()\n scrollbar_y = Scrollbar(frame, orient=VERTICAL)\n scrollbar_x = Scrollbar(frame, orient=HORIZONTAL)\n head = self.get_head()\n columns = list(map(lambda x: x.get(NAME), head))\n self.table = ttk.Treeview(\n master=frame, # 父容器\n height=height, # 高度,可显示height行\n columns=columns, # 显示的列\n show='headings', # 隐藏首列\n yscrollcommand=scrollbar_y.set, # 滚动条\n xscrollcommand=scrollbar_x.set, # 滚动条\n )\n for one in head:\n name = one.get(NAME, '')\n anchor = one.get(ANCHOR, LEFT)\n width = one.get(WIDTH, DEFAULT_WIDTH)\n # min_width = one.get(MINWIDTH, DEFAULT_WIDTH)\n self.table.heading(name, text=name, anchor=anchor)\n self.table.column(name, width=width, anchor=anchor, minwidth=width)\n scrollbar_y.config(command=self.table.yview)\n scrollbar_x.config(command=self.table.xview)\n scrollbar_y.pack(side=RIGHT, fill=Y)\n scrollbar_x.pack(side=BOTTOM, fill=X)\n self.table.pack(fill=BOTH, expand=True)\n\n def _button_frame(self, distance, width):\n frame = Frame(self.win)\n text = 'text'\n color = 'color'\n command = 'command'\n data = [\n {text: '刷新', color: 'yellow', command: self.refresh},\n {text: '添加', color: 'pink', command: self.add},\n {text: '修改', color: '#00ffff', command: self.update},\n {text: '删除', color: 'green', command: self._delete_choose},\n {text: '统计', color: '#8080ff', command: self.statistics},\n {text: '注销', color: '#ff8000', command: self.logout},\n ]\n for i in data:\n name = i.get(text)\n bg = i.get(color)\n cmd = i.get(command)\n btn = Button(frame, text=name, bg=bg, width=width, command=cmd)\n btn.pack(side=LEFT, padx=(0, distance))\n frame.pack(pady=10)\n\n def statistics(self):\n grades = self.get_data()\n if grades:\n s = 0\n for grade in grades:\n if grade[4]:\n s += int(grade[4])\n mean = s / len(grades)\n mean = '{:.2f}'.format(mean)\n print('平时成绩的均值是:{}'.format(mean))\n self.usual_label.config(text=mean)\n s = 0\n for grade in grades:\n if grade[5]:\n s += int(grade[5])\n mean = s / len(grades)\n mean = '{:.2f}'.format(mean)\n print('考试成绩的均值是:{}'.format(mean))\n self.test_label.config(text=mean)\n s = 0\n for grade in grades:\n if grade[6]:\n s += int(grade[6])\n mean = s / len(grades)\n mean = '{:.2f}'.format(mean)\n print('最终成绩的均值是:{}'.format(mean))\n self.final_label.config(text=mean)\n if grades:\n usual = list(map(lambda x:x[4],grades))\n usual = list(map(lambda x:x if x else '0' ,usual))\n usual = list(map(lambda x:int(x),usual))\n print('平时成绩是:{}'.format(usual))\n variance = '{:.2f}'.format(np.var(usual))\n print('平时成绩方差是:{}'.format(variance))\n self.usual_label2.config(text=variance)\n\n usual = list(map(lambda x:x[5],grades))\n usual = list(map(lambda x:x if x else '0' ,usual))\n usual = list(map(lambda x:int(x),usual))\n print('考试成绩是:{}'.format(usual))\n variance = '{:.2f}'.format(np.var(usual))\n print('考试成绩方差是:{}'.format(variance))\n self.test_label2.config(text=variance)\n\n usual = list(map(lambda x:x[6],grades))\n usual = list(map(lambda x:x if x else '0' ,usual))\n usual = list(map(lambda x:int(x),usual))\n print('最终成绩是:{}'.format(usual))\n variance = '{:.2f}'.format(np.var(usual))\n print('最终成绩方差是:{}'.format(variance))\n self.final_label2.config(text=variance)\n\n\n\n def _search_frame(self):\n frame = Frame()\n label_font = ('宋体', 16)\n Label(frame, text='学 号', font=label_font).pack(side=LEFT)\n entry_width = 12\n self.stu_entry = Entry(frame, font=label_font, width=entry_width)\n padx_entry_label = 10\n self.stu_entry.pack(side=LEFT, padx=padx_entry_label)\n Label(frame, text='姓 名', font=label_font).pack(side=LEFT)\n self.name_entry = Entry(frame, font=label_font, width=entry_width)\n self.name_entry.pack(side=LEFT, padx=padx_entry_label)\n frame.pack(pady=(5, 15))\n Button(frame, text='查询', width=8, bg='#00ff00', reli='g', font=('宋体', 12), command=self.search).pack(side=LEFT,\n padx=10)\n\n def _statistics_frame(self):\n frame = Frame()\n frame.pack()\n frame1 = Frame(frame)\n label_font = ('宋体', 16)\n x_distance = 0\n label_width = 8\n Label(frame1, text='均值:', font=label_font).pack(side=LEFT, padx=(0, 0))\n Label(frame1, text='平时成绩', font=label_font).pack(side=LEFT, padx=(0, 0))\n self.usual_label = Label(frame1, text='', reli='g', width=label_width, font=label_font)\n self.usual_label.pack(side=LEFT, padx=(x_distance, 0))\n Label(frame1, text='考试成绩', font=label_font).pack(side=LEFT, padx=(x_distance, 0))\n self.test_label = Label(frame1, text='', reli='g', width=label_width, font=label_font)\n self.test_label.pack(side=LEFT, padx=(x_distance, 0))\n Label(frame1, text='最终成绩', font=label_font).pack(side=LEFT, padx=(x_distance, 0))\n self.final_label = Label(frame1, text='', reli='g', width=label_width, font=label_font)\n self.final_label.pack(side=LEFT, padx=(x_distance, 0))\n frame1.pack()\n\n frame2 = Frame(frame)\n label_font = ('宋体', 16)\n Label(frame2, text='方差:', font=label_font).pack(side=LEFT, padx=(0, 0))\n Label(frame2, text='平时成绩', font=label_font).pack(side=LEFT, padx=(0, 0))\n self.usual_label2 = Label(frame2, text='', reli='g', width=label_width, font=label_font)\n self.usual_label2.pack(side=LEFT, padx=(x_distance, 0))\n Label(frame2, text='考试成绩', font=label_font).pack(side=LEFT, padx=(x_distance, 0))\n self.test_label2 = Label(frame2, text='', reli='g', width=label_width, font=label_font)\n self.test_label2.pack(side=LEFT, padx=(x_distance, 0))\n Label(frame2, text='最终成绩', font=label_font).pack(side=LEFT, padx=(x_distance, 0))\n self.final_label2 = Label(frame2, text='', reli='g', width=label_width, font=label_font)\n self.final_label2.pack(side=LEFT, padx=(x_distance, 0))\n frame2.pack()\n pass\n\n def _set_frame(self, height, button_distance, button_width):\n self._head_frame()\n self._search_frame()\n self._table_frame(height, )\n self._button_frame(button_distance, button_width)\n self._statistics_frame()\n pass\n\n def refresh(self):\n self._clear() # 清空所有数据\n data = self.get_data() # 获取数据\n self._insert_data(data) # 插入数据\n\n def add(self):\n common.clear_child(self.win) # 清空元素\n edit.MyFrame(self.win, update_data=[]) # 添加页面\n pass\n\n def update(self):\n selects = self.table.selection()\n if len(selects) != 1:\n messagebox.showwarning('提示', '选择一条记录进行修改')\n else:\n data = self.table.item(selects[0], 'values') # 获取选中的数据\n common.clear_child(self.win) # 清空组件\n edit.MyFrame(self.win, update_data=data,head='修改信息') # 修改界面\n pass\n\n def _delete_choose(self):\n selection = self.table.selection() # 获取选中的数据\n data = []\n for select in selection:\n data.append(self.table.item(select, 'values')) # 获取item的值\n if selection:\n flag = messagebox.askyesno('提示信息', '确认删除选中数据?')\n if flag:\n success, msg = self.delete(data) # 删除数据\n if success:\n for item in selection:\n self.table.delete(item) # 删除页面行数\n else:\n messagebox.showinfo('提示信息', '删除失败!\\n异常:{}'.format(msg))\n else:\n messagebox.showwarning('提示', '至少选择一条记录进行删除!')\n\n def _clear(self):\n for child in self.table.get_children():\n self.table.delete(child) # 清空所有的数据\n\n def _insert_data(self, data):\n for value in data:\n self.table.insert('', END, value=value) # 插入数据\n\n def logout(self):\n common.clear_child(self.win) # 清空组件\n login.LoginFrame(self.win) # 登录界面\n pass\n\n\nif __name__ == '__main__':\n pass\n w = Tk()\n obj = SelectFrame(w)\n w.mainloop()\n","repo_name":"rainbow-tan/rainbow","sub_path":"MyTkinter程序/学生成绩管理系统2_json文件/select.py","file_name":"select.py","file_ext":"py","file_size_in_byte":15510,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"15432017815","text":"# SWEA.\n# 설계 목적:\n# 1. 예전에 풀어본 문제랑 비슷하네... x+y 좌표가 짝수냐 홀수냐로 체스판 만들 수 있음. 이거 응용\n# 하지만 같은 문제를 같게 풀면 노잼이니까... 이번에는 그냥 받으면서 구조화 시킬께엽\n# 개선점:\n# 1.\nT = int(input())\nfor case_num in range(1, T+1):\n N, M = tuple(map(int, input().split()))\n set_A = set()\n set_B = set()\n for put_in in range(N):\n write_in = list(input())\n for pick in range(M):\n set_A.add(write_in[pick]) if (put_in + pick) % 2 == 0 else set_B.add(write_in[pick])\n set_A.discard('?')\n set_B.discard('?')\n print(f'#{case_num} {\"impossible\" if set_A & set_B or len(set_B) > 1 or len(set_A) > 1 else \"possible\"}')\n\n\"\"\"\n3\n3 1\n#\n?\n.\n6 1\n?\n?\n?\n?\n?\n?\n2 1\n.\n.\n\"\"\"","repo_name":"DataMarksman/TIL","sub_path":"2.Algorithm/1. SWEA/14413.격자판_칠하기.py","file_name":"14413.격자판_칠하기.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9169864673","text":"import sqlite3\nfrom discord import Embed, File\ndb_path = 'data/databases/Enemies.db'\n\n\nclass EnemyControl:\n def create(self, name, bowed_name, hp, power, exp, money, icon=None, color=None):\n conn = sqlite3.connect(db_path)\n cursor = conn.cursor()\n\n if color:\n if icon:\n cursor.execute(\n f\"\"\"INSERT INTO Enemies(name, bowed_name, exp, power, hp, money, icon, color) VALUES(\"{name}\", \"{bowed_name}\", {exp}, {power}, {hp}, {money}, \"{icon}\", {color});\"\"\")\n else:\n cursor.execute(\n f\"\"\"INSERT INTO Enemies(name, bowed_name, exp, power, hp, money, color) VALUES(\"{name}\", \"{bowed_name}\", {exp}, {power}, {hp}, {money}, {color});\"\"\")\n else:\n if icon:\n cursor.execute(\n f\"\"\"INSERT INTO Enemies(name, bowed_name, exp, power, hp, money, icon) VALUES(\"{name}\", \"{bowed_name}\", {exp}, {power}, {hp}, {money}, \"{icon}\");\"\"\")\n else:\n cursor.execute(\n f\"\"\"INSERT INTO Enemies(name, bowed_name, exp, power, hp, money) VALUES(\"{name}\", \"{bowed_name}\", {exp}, {power}, {hp}, {money});\"\"\")\n\n conn.commit()\n conn.close()\n\n def read(self, name):\n conn = sqlite3.connect(db_path)\n cursor = conn.cursor()\n result = cursor.execute(f\"\"\"SELECT * FROM Enemies where name = \"{name}\";\"\"\").fetchone()\n conn.close()\n return result\n\n def get_all(self):\n conn = sqlite3.connect(db_path)\n cursor = conn.cursor()\n result = cursor.execute(f\"SELECT * FROM Enemies\").fetchall()\n conn.close()\n return result\n\n def get_all_by_id(self):\n conn = sqlite3.connect(db_path)\n cursor = conn.cursor()\n result = cursor.execute(f\"SELECT * FROM Enemies ORDER BY id;\").fetchall()\n conn.close()\n return result\n\n def edit(self, name, column, new):\n conn = sqlite3.connect(db_path)\n cursor = conn.cursor()\n try:\n new = int(new)\n cursor.execute(f\"\"\"UPDATE Enemies SET {column} = {new} where name = \"{name}\";\"\"\")\n except ValueError:\n cursor.execute(f\"\"\"UPDATE Enemies SET {column} = '{new}' where name = \"{name}\";\"\"\")\n conn.commit()\n conn.close()\n\n def remove(self, name):\n conn = sqlite3.connect(db_path)\n cursor = conn.cursor()\n cursor.execute(f\"\"\"DELETE FROM Enemies where name = \"{name}\";\"\"\")\n conn.commit()\n conn.close()\n\n def get_embed(self, mob: tuple):\n name, exp, power, hp, money = mob[1], mob[3], mob[4], mob[5], mob[6]\n color = int(mob[8]) if mob[8] else None\n file = None\n embed = Embed(title=name, description=\" \", color=color) if color else Embed(title=name, description=\" \")\n\n if mob[7]: # icon\n file = File(f\"data/pictures/{mob[7]}\")\n embed.set_thumbnail(url=f\"attachment://{mob[7]}\")\n embed.add_field(\n name=f\":heart: Здоровье    {hp}\\n\"\n f\":crossed_swords: Сила              {power}\\n\"\n f\":star: Опыт             {exp} \\n\"\n f\":coin: Монеты       {money}\",\n value=\" ‌‌‍‍\", inline=False)\n return file, embed\n","repo_name":"MrEluzium/UlviCorpseBot","sub_path":"Classes/Enemies.py","file_name":"Enemies.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"26499600561","text":"import re, ssl\nfrom collections import Counter\nimport snowballstemmer\nfrom nltk import download\nfrom nltk.corpus import stopwords\nimport jpype\nimport os\nimport yaml\n\n## KULLANIMI ##\n###############\n# 1) Örnek corpusun cümlelerini parcalara ayırır fonksiyonu ile parçalanır kelime-kelime haline getirilir\n# ve gereksiz kelimeler atılır\n# 2) Parçalara ayrılmış olan haber cümlenin öğelerine ayrılır\n# 3) Cümlenin öğelerine ayrılmış olan kelimelerin kökleri bulunur bir listeye konulur\n\n\ndef _find_libjvm():\n java_home = os.environ.get('JAVA_HOME', None)\n jre_home = os.environ.get('JRE_HOME', None)\n if java_home is not None:\n return _find_libjvm_in_java_home(java_home)\n elif jre_home is not None:\n return _find_libjvm_in_jre_home(jre_home)\n else:\n raise ValueError('Either set one of JAVA_HOME and JRE_HOME environment variables, or pass a path value to libjvmpath argument.')\n\ndef _find_libjvm_in_java_home(path):\n if os.name == 'nt': # windows\n path = os.path.join(path, 'jre', 'bin', 'server', 'jvm.dll')\n else:\n path = os.path.join(path, 'jre', 'lib', 'amd64', 'server', 'libjvm.so')\n if os.path.exists(path):\n return path\n else:\n raise IOError('Could not find libjvm in {}. Please make sure that you set JAVA_HOME environment variable correctly, or pass a value to libjvmpath argument'.format(path))\n\ndef _find_libjvm_in_jre_home(path):\n if os.name == 'nt': # windows\n path = os.path.join(path, 'bin', 'server', 'jvm.dll')\n else:\n path = os.path.join(path, 'lib', 'amd64', 'server', 'libjvm.so')\n if os.path.exists(path):\n return path\n else:\n raise IOError('Could not find libjvm in {}. Please make sure that you set JRE_HOME environment variable correctly, or pass a value to libjvmpath argument'.format(path))\n\nclass zemberek_api:\n def __init__(self,libjvmpath=None,zemberekJarpath=os.path.join(os.path.dirname(__file__), 'zemberek-full.jar')):\n if libjvmpath is not None:\n self.libjvmpath = libjvmpath\n else:\n self.libjvmpath = _find_libjvm()\n self.zemberekJarpath = zemberekJarpath\n jpype.startJVM(self.libjvmpath, \"-Djava.class.path=\" + self.zemberekJarpath, \"-ea\")\n\n def getTurkishTokenizer(self):\n Token = jpype.JClass(\"zemberek.tokenization.Token\")\n turkishTokenizer = jpype.JClass(\"zemberek.tokenization.TurkishTokenizer\").builder().ignoreTypes(Token.Type.Punctuation, Token.Type.NewLine, Token.Type.SpaceTab).build();\n return turkishTokenizer\n\n def getTurkishPOSTagger(self):\n turkishPOSTagger = jpype.JClass(\"zemberek.morphology.TurkishMorphology\").createWithDefaults();\n return turkishPOSTagger\n\nclass TokenizerTool:\n def __init__(self,tokenizer):\n self.turkishTokenizer = tokenizer\n\n def tokenize(self,text):\n \"\"\"\n input format: a paragraph of text\n output format: a list of sentences as lists of words.\n e.g.: [['Bu', 'bir', 'cümle'], ['Bu', 'da', 'diğeri']]\n \"\"\"\n sentences = text.split('.')\n tokenized_sentences = [self.turkishTokenizer.tokenizeToStrings(sentence) for sentence in sentences]\n return tokenized_sentences\n\nclass POSTaggerTool:\n def __init__(self, tagger):\n self.turkishPOSTagger = tagger\n\n def analyze_and_disambiguate(self,sentence):\n return self.turkishPOSTagger.analyzeAndDisambiguate(sentence)\n\n def pos_tag(self, sentence_analysis):\n \"\"\"\n input format: list of words\n output format: a word form, a word lemma, and a list of associated tags\n \"\"\"\n pos_tagged_sentence = []\n for sentence_word_analysis in sentence_analysis:\n word_analysis = sentence_word_analysis.getWordAnalysis()\n best_word_analysis = sentence_word_analysis.getBestAnalysis()\n best_lemma = self.get_best_lemma(best_word_analysis)\n primary_pos = best_word_analysis.getPos()\n tagged_word_tuple = (word_analysis.getInput(),best_lemma,[primary_pos.getStringForm()])\n pos_tagged_sentence.append(tagged_word_tuple)\n return pos_tagged_sentence\n\n def get_best_lemma(self, best):\n return best.getLemmas()[0]\n\nclass nltk_download:\n def __init__(self):\n try:\n _create_unverified_https_context = ssl._create_unverified_context\n except AttributeError:\n pass\n else:\n ssl._create_default_https_context = _create_unverified_https_context\n\n download()\n\nclass DictionaryTagger(object):\n def __init__(self, dictionary_paths):\n files = [open(path, 'r') for path in dictionary_paths]\n dictionaries = [yaml.load(dict_file) for dict_file in files]\n for file in files:\n file.close()\n self.dictionary = {}\n self.max_key_size = 0\n for curr_dict in dictionaries:\n for key in curr_dict:\n if key in self.dictionary:\n self.dictionary[key].extend(curr_dict[key])\n else:\n self.dictionary[key] = curr_dict[key]\n self.max_key_size = max(self.max_key_size, len(key))\n\n def tag(self, postagged_sentences):\n return [self.tag_sentence(sentence) for sentence in postagged_sentences]\n\n def tag_sentence(self, sentence, tag_with_lemmas=False):\n \"\"\"\n the result is only one tagging of all the possible ones.\n The resulting tagging is determined by these two priority rules:\n - longest matches have higher priority\n - search is made from left to right\n \"\"\"\n tag_sentence = []\n N = len(sentence)\n if self.max_key_size == 0:\n self.max_key_size = N\n i = 0\n while (i < N):\n j = min(i + self.max_key_size, N) #avoid overflow\n tagged = False\n while (j > i):\n expression_form = ' '.join([word[0] for word in sentence[i:j]]).lower()\n expression_lemma = ' '.join([word[1] for word in sentence[i:j]]).lower()\n if tag_with_lemmas:\n literal = expression_lemma\n else:\n literal = expression_form\n if literal in self.dictionary:\n #self.logger.debug(\"found: %s\" % literal)\n is_single_token = j - i == 1\n original_position = i\n i = j\n taggings = [tag for tag in self.dictionary[literal]]\n tagged_expression = (expression_form, expression_lemma, taggings)\n if is_single_token: #if the tagged literal is a single token, conserve its previous taggings:\n original_token_tagging = sentence[original_position][2]\n tagged_expression[2].extend(original_token_tagging)\n tag_sentence.append(tagged_expression)\n tagged = True\n else:\n j = j - 1\n if not tagged:\n tag_sentence.append(sentence[i])\n i += 1\n return tag_sentence\n\nclass Reviewer(object):\n\n def value_of(self, sentiment):\n if sentiment == 'positive': return 1\n if sentiment == 'negative': return -1\n return 0\n\n def sentiment_score(self, dict_tagged_sentences):\n return sum ([self.value_of(tag) for sentence in dict_tagged_sentences for token in sentence for tag in token[2]])\n","repo_name":"neslihanturan/Basic-Sentiment-Analysis","sub_path":"zemberek_python/main_libs.py","file_name":"main_libs.py","file_ext":"py","file_size_in_byte":7561,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"70307429012","text":"# linked list가 팰린드롬 구인 판별\n# 팰린드롬 구조 앞으로해도 뒤로 해도 똑같은 자료구조\nfrom qa_system import QA\nfrom collections import deque\n\n#내 풀이 -> 이게 더 빠름\ndef answer_function(input_string):\n input_list = input_string.split('->')\n input_string = ''.join(input_list)\n back_string = input_string[::-1]\n\n if input_string == back_string:\n return 'true'\n else :\n return 'false'\n\n#데크를 사용한 풀이\ndef answer_function2(input_string):\n input_list = input_string.split('->')\n q = deque(input_list)\n while len(q) > 1:\n if q.popleft() != q.pop():\n return 'false'\n return 'true'\n\nqa1 = QA('1->2', 'false')\nqa1.check_answer(answer_function)\nqa1.check_answer(answer_function2)\nqa2 = QA('1->2->2->1', 'true')\nqa2.check_answer(answer_function)\nqa2.check_answer(answer_function2)\n\n\n","repo_name":"zeus0007/algorithms","sub_path":"palindrome-linked-list.py","file_name":"palindrome-linked-list.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1819627897","text":"import pickle\nimport string\nimport heapq\nfrom itertools import chain\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.pipeline import Pipeline\nfrom prettytable import PrettyTable\n\n\nclass Topics(object):\n \"Using the titles of the articles collected; generate a list of top 5 topics per month \\\n Show results for each month of 2019.\"\n\n def __init__(self, file):\n self.file = file\n self.data = self._load_and_clean_data()\n\n self.vectorizer = TfidfVectorizer(stop_words='english',\n use_idf=True,\n max_features=3000,\n smooth_idf=True)\n\n self.svd = TruncatedSVD(n_components=5,\n algorithm='randomized',\n random_state=23,\n n_iter=10)\n\n def _load_and_clean_data(self):\n data_dict = self._load_data()\n\n for k, v in data_dict.items():\n data_dict[k] = [s.translate(str.maketrans('', '', string.punctuation)) for s in\n list(chain.from_iterable(list(v.values())))]\n # @[,]\n return data_dict\n\n def _load_data(self):\n with open(self.file, 'rb') as h:\n data_dict = pickle.load(h)\n # @[,dict]\n return data_dict\n\n def fit_transform(self, data):\n \"\"\"\n Args:\n data (list):\n \"\"\"\n U = Pipeline([('tfidf', self.vectorizer), ('svd', self.svd)]).fit_transform(data) # documents-topics\n V = self.svd.components_ # topics-words\n return U, V\n\n def print_n_dominante_words_topic_(self, V=None, n=30):\n \"\"\"\n :param n (int):\n :param V_T (words, topics):\n :return:\n \"\"\"\n if not V:\n V = self.svd.components_ # words-topics\n\n words = self.vectorizer.get_feature_names()\n pt = PrettyTable()\n\n pt.add_column('Topic1', [words[i] for i in range(len(words)) if i in V[0, :].argsort()[-n:][::-1]])\n pt.add_column('Topic2', [words[i] for i in range(len(words)) if i in V[1, :].argsort()[-n:][::-1]])\n pt.add_column('Topic3', [words[i] for i in range(len(words)) if i in V[2, :].argsort()[-n:][::-1]])\n pt.add_column('Topic4', [words[i] for i in range(len(words)) if i in V[3, :].argsort()[-n:][::-1]])\n pt.add_column('Topic5', [words[i] for i in range(len(words)) if i in V[4, :].argsort()[-n:][::-1]])\n\n print(pt)\n\n\nif __name__ == '__main__':\n\n topics_ = Topics(file=\"./titles_2002.pickle\")\n\n\n\n","repo_name":"nadavcosta/checkpoint","sub_path":"cTopics.py","file_name":"cTopics.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6900855053","text":"\"\"\"'added_location'\n\nRevision ID: 0e02a3643b32\nRevises: 0e51f2ca71b0\nCreate Date: 2023-04-17 11:20:43.111902\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '0e02a3643b32'\ndown_revision = '0e51f2ca71b0'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('products', 'name',\n existing_type=sa.VARCHAR(length=120),\n type_=sa.String(length=128),\n existing_nullable=True)\n op.alter_column('products', 'desc',\n existing_type=sa.VARCHAR(length=1000),\n type_=sa.String(length=1028),\n existing_nullable=True)\n op.add_column('users', sa.Column('location_latitude', sa.String(length=256), nullable=True))\n op.add_column('users', sa.Column('location_longitude', sa.String(length=256), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('users', 'location_longitude')\n op.drop_column('users', 'location_latitude')\n op.alter_column('products', 'desc',\n existing_type=sa.String(length=1028),\n type_=sa.VARCHAR(length=1000),\n existing_nullable=True)\n op.alter_column('products', 'name',\n existing_type=sa.String(length=128),\n type_=sa.VARCHAR(length=120),\n existing_nullable=True)\n # ### end Alembic commands ###\n","repo_name":"koyaware/evos-delivery-clone","sub_path":"migrations/versions/0e02a3643b32_added_location.py","file_name":"0e02a3643b32_added_location.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14177829532","text":"import requests\r\nfrom t_getHoney import getHoney\r\nimport re\r\nimport time\r\nimport aiohttp\r\nimport asyncio\r\nimport datetime\r\n\r\nasync def get_media_id(user_id):\r\n base_url = f'http://m.toutiao.com/profile/{user_id}/'\r\n headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}\r\n async with aiohttp.ClientSession() as session:\r\n async with session.get(base_url, headers=headers) as response:\r\n if response.status == 200:\r\n text = await response.text()\r\n media_id = re.search('data-mediaid=\"(.*?)\"', text).group(1)\r\n if media_id:\r\n return media_id\r\n\r\nasync def get_data(user_id, max_behot=0):\r\n base_url = 'https://www.toutiao.com/pgc/ma/'\r\n headers = {\r\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',\r\n 'Referer': f'http://m.toutiao.com/profile/{user_id}/'\r\n }\r\n media_id = await get_media_id(user_id)\r\n count = 0\r\n while True:\r\n _as,_cp = getHoney()\r\n params = {\r\n \"page_type\": \"1\",\r\n \"max_behot_time\": f\"{max_behot}\",\r\n \"uid\": f\"{user_id}\",\r\n \"media_id\": f\"{media_id}\",\r\n \"output\": \"json\",\r\n \"is_json\": \"1\",\r\n \"count\": \"20\",\r\n \"from\": \"user_profile_app\",\r\n \"version\": \"2\",\r\n \"as\": f\"{_as}\",\r\n \"cp\": f\"{_cp}\"\r\n }\r\n async with aiohttp.ClientSession() as session:\r\n async with session.get(base_url, params=params, headers=headers) as response:\r\n if response.status == 200:\r\n data = await response.json()\r\n max_behot = data['next']['max_behot_time']\r\n has_more = data['has_more']\r\n if len(data['data']) > 1:\r\n for item in data['data']:\r\n title = item['title']\r\n url = item['url']\r\n go_detail_count = item['go_detail_count']\r\n print(title)\r\n print(url)\r\n print(go_detail_count,'\\n')\r\n count += 1\r\n print(count, user_id)\r\n if not has_more:\r\n break\r\n time.sleep(1)\r\n\r\n\r\n\r\nstart = datetime.datetime.now()\r\nloop = asyncio.get_event_loop()\r\ntasks = [get_data(14861272888), get_data(2892047273)]\r\nloop.run_until_complete(asyncio.wait(tasks))\r\nprint((datetime.datetime.now()-start).total_seconds())\r\n","repo_name":"1176642936/toutiao","sub_path":"t96-今日头条个人文章协程爬取.py","file_name":"t96-今日头条个人文章协程爬取.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"71750486290","text":"from abc import ABC, abstractmethod\nimport os\nfrom collections.abc import Mapping\nimport pandas as pd\nfrom dmt.tk.utils import timestamp\nfrom dmt.tk.utils.string_utils import make_name\nfrom dmt.tk.field import Field, LambdaField, lazyfield, WithFields, NA\nfrom dmt.tk.author import Author\nfrom dmt.tk.plotting.figure import Figure\nfrom dmt.tk.utils.string_utils import paragraphs, make_name, make_label\n\nclass Report(WithFields):\n \"\"\"\n Report base class.\n We follow the principle of IMRAD (Introduction, Methods, Results, and\n Discussion: https://en.wikipedia.org/wiki/IMRAD )\n \"\"\"\n author = Field(\n \"\"\"\n Author of this report.\n \"\"\",\n __default_value__=Author.anonymous)\n phenomenon = Field(\n \"\"\"\n Label for the phenomenon that this report is about.\n \"\"\",\n __default_value__=NA)\n figures = Field(\n \"\"\"\n A dict mapping label to an object with a `.graphic` and `.caption`\n attributes.\n \"\"\",\n __default_value__=NA)\n measurement = Field(\n \"\"\"\n Measurement associated with this `Report`. This should be a dataframe,\n with a properly annotated index.\n \"\"\",\n __default_value__=NA)\n abstract = Field(\n \"\"\"\n Provide an abstract for the report.\n \"\"\",\n __default_value__=NA,\n __as__=paragraphs)\n introduction = Field(\n \"\"\"\n Provide the research question, and the tested hypothesis or the\n purpose of the research?\n \"\"\",\n __default_value__=NA,\n __as__=paragraphs)\n methods = Field(\n \"\"\"\n Describe the algorithm / procedure used to compute the results\n or the experimental measurement presented in this `Report`. This\n `Field` will be used in the figure caption.\n \"\"\",\n __default_value__=NA,\n __as__=paragraphs)\n sections = Field(\n \"\"\"\n An ordered list of report sections.\n \"\"\",\n __default_value__=NA)\n chapters = Field(\n \"\"\"\n An ordered list of report chapters.\n \"\"\",\n __default_value__=NA)\n results = Field(\n \"\"\"\n Answer to the research question, to be included in the figure caption.\n \"\"\",\n __default_value__=NA,\n __as__=paragraphs)\n discussion = Field(\n \"\"\"\n A longer text describing how the results presented in the report fit\n in the existing knowledge about the topic.\n What might the answer imply and why does it matter? How does it fit in\n with what other researchers have found? What are the perspectives\n for future research?\n \"\"\",\n __default_value__=NA,\n __as__=paragraphs)\n references = Field(\n \"\"\"\n References for this analysis report.\n \"\"\",\n __default_value__=NA)\n\n label = LambdaField(\n \"\"\"\n Label for this report to save data.\n \"\"\",\n lambda self: self.phenomenon)\n\n @lazyfield\n def field_values(self):\n \"\"\"...\"\"\"\n try:\n name_phenomenon = self.phenomenon.name\n except AttributeError:\n name_phenomenon = make_name(self.phenomenon, separator='_')\n return {\n \"author\": self.author,\n \"phenomenon\": name_phenomenon,\n \"figures\": self.figures,\n \"introduction\": self.introduction,\n \"methods\": self.methods,\n \"results\": self.results,\n \"discussion\": self.discussion,\n \"references\": self.references,\n \"sections\": self.sections,\n \"chapters\": self.chapters}\n\n\nclass Reporter(WithFields):\n \"\"\"\n Abstract base class.\n A `Reporter` will be able to process reports.\n \"\"\"\n\n path_output_folder = Field(\n \"\"\"\n Path to the output folder.\n \"\"\",\n __default_value__=os.getcwd())\n\n def get_output_location(self,\n report,\n path_output_folder=None,\n output_subfolder=None,\n with_time_stamp=True):\n \"\"\"\n Where should the report be saved.\n Create the folder if it does not exist.\n \"\"\"\n path_parent =\\\n path_output_folder if path_output_folder\\\n else self.path_output_folder\n if not os.path.exists(path_parent):\n os.makedirs(path_parent)\n path_report_folder =\\\n os.path.join(\n path_parent,\n make_label(report.label))\n\n if with_time_stamp:\n if isinstance(with_time_stamp, str):\n path_report_folder =\\\n os.path.join(\n path_report_folder,\n with_time_stamp)\n else:\n daytime =\\\n timestamp()\n path_report_folder =\\\n os.path.join(\n path_report_folder,\n daytime.day,\n daytime.time)\n\n if output_subfolder is not None:\n path_report_folder =\\\n os.path.join(\n path_report_folder,\n output_subfolder)\n if not os.path.exists(path_report_folder):\n os.makedirs(path_report_folder)\n return path_report_folder\n\n def get_figures_location(self,\n path_output_folder):\n \"\"\"\n Get a folder that will contain figures for a report.\n \"\"\"\n path_figures_folder = os.path\\\n .join(path_output_folder, \"figures\")\n if not os.path.exists(path_figures_folder):\n os.makedirs(path_figures_folder)\n\n return path_figures_folder\n\n @staticmethod\n def _flattened_columns(dataframe):\n \"\"\"\n Flatten MultiIndexed columns...\n \"\"\"\n return pd.DataFrame(\n dataframe.values,\n columns=pd.Index([\n '_'.join(t) if isinstance(t, tuple) else t\n for t in dataframe.columns.values]))\n\n def _get_file_path(self, folder_files, label_file, format_file):\n \"\"\"...\"\"\"\n raise NotImplementedError(\n \"\"\"Is this a stub leftover?\"\"\") \n\n def _save_figures(self, report, output_folder, format_file=\".png\"):\n \"\"\"...\"\"\"\n if not report.figures:\n return (None, {})\n if format_file[0] != '.':\n format_file = '.' + format_file\n figures_folder =\\\n self.get_figures_location(output_folder)\n figure_locations = {}\n for label, figure in report.figures.items():\n location =\\\n os.path.join(figures_folder, \"{}{}\".format(label, format_file))\n figure.save(location, dpi=100)\n figure_locations[label] = location\n\n return (figures_folder, figure_locations)\n\n def _write_attr(self, attribute, text, output_folder, format_file):\n \"\"\"...\"\"\"\n path_output_file =\\\n os.path.join(output_folder,\n \"{}{}\".format(attribute, format_file))\n with open(path_output_file, 'w') as output_file:\n try:\n output_file.write(text)\n except TypeError:\n output_file.write('\\n'.join(text))\n\n\n def _save_sections(self, report, output_folder, format_file=\".txt\"):\n \"\"\"\n Save report sections.\n \"\"\"\n def _write(attr, text):\n self._write_attr(attr, text, output_folder, format_file)\n\n if report.introduction:\n _write(\"introduction\", report.introduction)\n if report.methods:\n _write(\"methods\", report.methods)\n if report.results:\n _write(\"methods\", report.results)\n if report.discussion:\n _write(\"discussion\", report.discussion)\n if report.references:\n _write(\"references\", report.references)\n\n if report.sections:\n for section in report.sections:\n _write(section.label, section.content)\n\n return output_folder\n\n def _save_chapters(self, report, output_folder, format_file=\"txt\"):\n \"\"\"\n Save report chapters .\n \"\"\"\n if not report.chapters:\n return None\n\n for chapter in report.chapters:\n path_chapter_folder =\\\n self.get_output_location(\n report,\n path_output_folder=output_folder,\n with_time_stamp=False)\n path_figures_folder =\\\n os.path.join(path_chapter_folder, \"figures\")\n self._save_text_report(\n chapter, path_chapter_folder, path_figures_folder)\n\n def _save_text_report(self, report, output_folder, folder_figures):\n \"\"\"...\"\"\"\n def __write(output_file, attribute, text=\"\"):\n section_end = 70 *'-'\n underline = len(attribute) * '-'\n output_file.write(\n \"{}\\n{}\\n{}\\n{}\\n\".format(\n attribute.upper(),\n underline,\n text if text else getattr(report, attribute),\n section_end))\n\n with open(os.path.join(output_folder, \"report.txt\"),'w') as output_file:\n __write(\n output_file, \"introduction\")\n __write(\n output_file, \"methods\")\n __write(\n output_file, \"results\")\n __write(\n output_file, \"discussion\")\n if report.figures:\n __write(\n output_file, \"figures\", folder_figures)\n __write(\n output_file, \"figure captions\",\n \"\\n\".join(\n \"({}). {}\".format(label, figure.caption)\n for label, figure in report.figures.items()))\n\n def save(self,\n report,\n path_output_folder=None,\n output_subfolder=None,\n with_time_stamp=True):\n \"\"\"\n Save report at the path provided.\n \"\"\"\n output_folder =\\\n self.get_output_location(\n report,\n path_output_folder=path_output_folder,\n output_subfolder=output_subfolder,\n with_time_stamp=with_time_stamp)\n\n folder_figures, _ =\\\n self._save_figures(report, output_folder)\n\n self._save_measurement(report, output_folder)\n\n self._save_text_report(report, output_folder, folder_figures)\n\n for section in self.sections:\n self.save(\n section,\n path_output_folder=output_folder,\n with_time_stamp=False)\n\n for chapter in self.chapters:\n self._save_chapters(report, output_folder)\n\n return output_folder\n\n def _save_measurement(self, report, output_folder):\n \"\"\"...\"\"\"\n try:\n self._flattened_columns(report.measurement.reset_index()).to_csv(\n os.path.join(\n output_folder,\n \"{}.csv\".format(report.label)))\n except AttributeError:\n pass\n\n\n\n def post(self,\n report,\n *args, **kwargs):\n \"\"\"\n Behavior of a `Reporter` is defined by how it posts a report.\n A `Reporter` may post to a website, to a database, or simply\n save to a folder on the harddisk.\n\n The default behavior is to save the report to the disk as a text file\n and a figure file.\n \"\"\"\n return self.save(report, *args, **kwargs)\n","repo_name":"BlueBrain/DMT","sub_path":"dmt/tk/reporting/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11527,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"70683869331","text":"import cs50\nimport sys\n\n\ndef main():\n # prompt for input\n number = cs50.get_int(\"Number: \")\n num_str = str(number)\n\n # check length\n length = len(num_str)\n\n if length < 13 or length >= 17:\n print(\"INVALID\")\n sys.exit(0)\n\n num_list = []\n for num_char in num_str:\n num_list.append(int(num_char))\n\n # calc checksum\n # multiply every other digit by 2 starting with second to last digit\n # add those product digits together\n # add the sum to the sum of the digits that weren't multiplied by 2\n # if total's last digit is 0, number is valid\n\n sum = 0\n # drop last digit\n cropped_list = num_list[0:-1]\n # get every other digit starting from end\n cropped_list.reverse()\n new_list = cropped_list[0::2]\n for num in new_list:\n # multiply num by 2\n product = num * 2\n prod_str = str(product)\n\n # get product's digits\n digit1 = prod_str[0]\n digit2 = prod_str[1] if len(prod_str) > 1 else \"0\"\n\n # add digits to firstSum\n sum += int(digit1) + int(digit2)\n\n # get numbers from digits that weren't manipulated and add to sum\n num_list.reverse()\n untouched_list = num_list[0::2]\n for num in untouched_list:\n sum += num\n\n str_sum = str(sum)\n if str_sum[-1] != \"0\":\n print(\"INVALID\")\n sys.exit(0)\n\n # get first and first 2 digits of number\n first_one = num_str[0]\n first_two = num_str[0:2]\n\n 5673598276138003\n # check length & starting digits for matching card type\n # Print: AMEX, MASTERCARD, VISA, or INVALID\n if (first_two == \"34\" or first_two == \"37\") and (length >= 15 and length < 16):\n print(\"AMEX\")\n elif (first_two >= \"51\" and first_two <= \"55\") and (length >= 16 and length < 17):\n print(\"MASTERCARD\")\n elif (first_one == \"4\") and ((length >= 13 and length < 14) or (length >= 16 and length < 17)):\n print(\"VISA\")\n else:\n print(\"INVALID\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Jdubbs702/cs_solutions_python","sub_path":"credit/credit.py","file_name":"credit.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"41781598087","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 5 16:11:49 2018\n\n@author: Alverino\n\"\"\"\n\nimport cv2\nimport numpy\n\nGRAVITY = 4\nNUMFRUITS = 3\nFONT = cv2.FONT_HERSHEY_COMPLEX\nFONT_SIZE = 0.7\n\n# These determine the positions to generate \"exploded\" bits of the fruit\nEXX = [1, 0.70711, 0, -0.70711, -1, -0.70711, 0, 0.70711]\nEXY = [0, 0.70711, 1, 0.70711, 0, -0.70711, -1, -0.70711]\nEXV = 8\n\nclass Fruit:\n def __init__(this, radius, color, xPos, yPos, xVel, yVel):\n this.radius = radius\n this.color = color\n this.xPos = xPos\n this.yPos = yPos\n this.xVel = xVel\n this.yVel = yVel\n \n def draw(this, frame):\n cv2.circle(frame, (this.xPos, this.yPos), this.radius, this.color, -1)\n \n def doPhysics(this, xBound, yBound):\n this.xPos += this.xVel\n this.yPos += this.yVel\n this.yVel += GRAVITY\n \n return this.xPos >= 0 and this.xPos <= xBound and this.yPos <= yBound\n \n def explode(this, explosions):\n exRad = int(this.radius/2)\n explosions.append(Fruit(exRad, this.color, this.xPos, this.yPos, this.xVel, this.yVel))\n for i in range(8):\n explosions.append(Fruit(exRad, this.color, this.xPos + int(EXX[i]*exRad), this.yPos + int(EXY[i]*exRad), this.xVel + int(EXX[i]*EXV), this.yVel + int(EXY[i]*EXV)))\n \n def isAbove(this, yVal):\n return this.yPos < yVal\n \n def intersects(this, fingerTip):\n return (abs(fingerTip[0] - this.yPos) <= this.radius) and (abs(fingerTip[1] - this.xPos) <= this.radius)\n \nclass Bomb(Fruit):\n def __init__(this, radius, color, xPos, yPos, xVel, yVel):\n Fruit.__init__(this, radius, color, xPos, yPos, xVel, yVel)\n \n def draw(this, frame):\n cv2.rectangle(frame, (this.xPos - this.radius, this.yPos - this.radius), (this.xPos + this.radius, this.yPos + this.radius), this.color, -1)\n cv2.putText(frame, \"bomb\",(this.xPos - this.radius, this.yPos - this.radius), FONT, FONT_SIZE, (255, 255, 255), 2, cv2.LINE_AA)\n\nclass Text:\n def __init__(this, pos, color, text):\n this.pos = pos\n this.color = color\n this.text = text\n \n def write(this, frame):\n cv2.putText(frame, this.text,this.pos, FONT, FONT_SIZE, this.color, 2, cv2.LINE_AA)\n\nclass Level:\n def __init__(this, numFruits, pointsPerFruit, numBombs, numFrames):\n this.numFruits = numFruits\n this.pointsPerFruit = pointsPerFruit\n this.numBombs = numBombs\n this.numFrames = numFrames\n \ndef randomFruit(x0, y0):\n # Generates reasonable random values for the initialization of each fruit\n radius = numpy.random.randint(30, 70)\n color = (numpy.random.randint(255), numpy.random.randint(255), numpy.random.randint(255))\n xPos = numpy.random.randint(x0)\n yPos = y0\n xVel = int(((x0/2)-xPos)//10)\n yVel = -numpy.random.randint(y0//20, y0//10)\n return Fruit(radius, color, xPos, yPos, xVel, yVel)\n\ndef randomBomb(x0, y0):\n # Generates reasonable random values for the initialization of each fruit\n xPos = numpy.random.randint(x0)\n yPos = y0\n xVel = int(((x0/2)-xPos)//10)\n yVel = -numpy.random.randint(y0//20, y0//10)\n return Bomb(50, (0, 0, 0), xPos, yPos, xVel, yVel)","repo_name":"cfgong/fruitninjacv","sub_path":"gameObjects.py","file_name":"gameObjects.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"34586149368","text":"from identifier import Identifier\n\n\nclass Validator:\n def __init__(self, filename, identifier_map):\n # TODO Validator 생성 초기, 각 컬럼에 식별, ��식별자, 민감정보 태깅하기\n csv = open(filename, 'r')\n keys = csv.readline().replace('\\n', '').split(',')\n\n # 컬럼에 식별자 맵핑\n identifierMap = {}\n for i in range(len(keys)):\n key = keys[i]\n identifierMap[key] = identifier_map[i]\n\n self.identifierMap = identifierMap\n\n # CSV 데이터를 dictionary로 임포트\n dataList = []\n\n while True:\n line = csv.readline()\n values = line.replace('\\n', '').split(',')\n\n obj = {}\n for i in range(len(values)):\n key = keys[i]\n value = values[i]\n\n obj[key] = value\n\n dataList.append(obj)\n\n if not line:\n break\n\n csv.close()\n\n self.dataList = dataList\n\n def validate_K_anonymity(self, k):\n self.k = k\n\n checkList = {}\n for data in self.dataList:\n block = []\n for key, value in data.items():\n if self.identifierMap[key] == Identifier.QI:\n block.append(value)\n\n blockKey = ':'.join(map(str, block))\n if blockKey not in checkList.keys():\n checkList[blockKey] = 1\n else:\n checkList[blockKey] += 1\n\n for key, value in checkList.items():\n if value < k:\n print('K Anonimity failed - block `%s` is count %d' % (key, value))\n\n def validate_L_diversity(self, l):\n # l은 k 값보다 클 수 없음. (같거나 작음)\n if l > k:\n print('[ERROR] constant l cannot greater than k')\n return\n\n self.l = l\n\n # { 'block': { 'sa1': ['value1', 'value2'], 'sa2': ['value1', 'value2'] } }\n checkList = {}\n for data in self.dataList:\n block = []\n for key, value in data.items():\n if self.identifierMap[key] == Identifier.QI:\n block.append(value)\n\n blockKey = ':'.join(map(str, block))\n\n if blockKey not in checkList.keys():\n checkList[blockKey] = {}\n\n for key, value in data.items():\n if self.identifierMap[key] != Identifier.SA:\n continue\n\n if key not in checkList[blockKey]:\n checkList[blockKey][key] = []\n\n checkList[blockKey][key].append(value)\n\n for blockKey, saData in checkList.items():\n for key, value in saData.items():\n cnt = len(set(value))\n\n if cnt < l:\n print('L Diversity failed - block `%s`\\'s %s is count %d => ' % (blockKey, key, cnt), set(value))\n\n def validate_T_closeness(self, t):\n pass\n\n\nif __name__ == '__main__':\n validator = Validator('./privacy_dummy.csv', (Identifier.I, Identifier.QI, Identifier.QI, Identifier.QI, Identifier.SA, Identifier.SA, Identifier.SA, Identifier.I))\n\n print('input constant k: ')\n k = int(input())\n validator.validate_K_anonymity(k)\n\n print('input constant l: ')\n l = int(input())\n validator.validate_L_diversity(l)\n","repo_name":"tinyjin/anonymipy","sub_path":"validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39100224991","text":"# -*- coding: utf8 -*-\n# Filename: extractPLYmetadata.py\n#\n########################################################################\n# This is a program to extract basic information from PLY files\n# Information extracted: number of vertices and number of faces\n#\n# Martina Trognitz (martina.trognitz@oeaw.ac.at)\n#\n# Specify the path with first variable\n#\n########################################################################\nfrom __future__ import unicode_literals\n\nimport os, os.path\n\nif __name__==\"__main__\":\n # set path\n # ensure that root folder name is unique in collection\n path = r'..\\Collections'\n print (\"Working with directory: \"+path+\"\\n\")\n\n allMeta = []\n\n # walk through path and find ply files\n for root, dirs, files in os.walk(path):\n for file in files:\n fileExtension = file.rsplit('.', 1)[1].lower()\n fileNamePath = os.path.join(root, file)\n if fileExtension == 'ply':\n print (\"Found this ply: \" +fileNamePath)\n # open and read ply\n with open(fileNamePath, 'rb') as plyfile:\n fileLines = plyfile.readlines()\n fileMeta = []\n # read each line and check for element vertex and element face\n for line in fileLines:\n if b'element vertex' in line:\n plyVertices = line.split(b' vertex ')[1].strip()\n fileMeta.append(fileNamePath.encode('utf-8'))\n # extra bit, if file names already contain hint to\n # resolution used. Might be commented out\n if 'lowRes-data' in fileNamePath:\n fileMeta.append(b'Low Resolution')\n else:\n fileMeta.append(b'Higher Resolution')\n fileMeta.append(plyVertices)\n if b'element face' in line:\n plyFaces = line.split(b' face ')[1].strip()\n fileMeta.append(plyFaces)\n allMeta.append(fileMeta)\n break\n\n # write file with list of files and number of vertices and faces\n # if extra resolution bit was not commented out this is also added\n outMetaFile = open('plyMetaList.csv','wb')\n for meta in allMeta:\n writeLine = b';'.join(meta).decode('utf-8')\n outMetaFile.write((writeLine+'\\n').encode('utf-8'))\n outMetaFile.close()\n","repo_name":"acdh-oeaw/arche-curationTools","sub_path":"extractPLYmetadata.py","file_name":"extractPLYmetadata.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"17375733886","text":"from django.shortcuts import render\nfrom .logic.logic_bodega import get_all_bodegas, get_all_productos_chiper_bodega, create_a_bodega, get_bodega\nfrom .logic.logic_producto_chiper import get_all_productos_chiper, create_a_producto_chiper, get_producto_chiper\nfrom .forms import BodegaForm, ProductoChiperForm\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom chiper_Totoro.auth0backend import getRole\n\nfrom ratelimit.decorators import ratelimit\n\n\n@login_required\n@ratelimit(key='ip', rate='5/m')\ndef get_bodegas(request):\n was_limited = getattr(request, 'limited', False)\n if was_limited:\n return HttpResponseRedirect('/')\n role = getRole(request)\n if role == \"Administrador\":\n bodegas_list = get_all_bodegas()\n context = {'bodegas_list': bodegas_list}\n return render(request, 'chiper_Totoro/get_all_bodegas.html', context)\n else:\n return HttpResponse(\"Unauthorized User\")\n\n\n\n@login_required\ndef create_bodegas(request):\n role = getRole(request)\n if role == \"Administrador\":\n if request.method == 'POST':\n form = BodegaForm(request.POST)\n if form.is_valid():\n create_a_bodega(form)\n messages.add_message(request, messages.SUCCESS, 'Bodega creada de manera satisfactoria')\n return HttpResponseRedirect(reverse('bodegaList'))\n else:\n print(form.errors)\n else:\n form = BodegaForm()\n\n context = {\n 'form': form,\n }\n return render(request, 'chiper_Totoro/create_bodega.html', context)\n else:\n return HttpResponse(\"Unauthorized User\")\n\n\n@login_required\ndef get_bodega_id(request, id):\n role = getRole(request)\n if role == \"Administrador\":\n bodega = get_bodega(id)\n productos_chiper_bodega = get_all_productos_chiper_bodega(id)\n context = {'bodega': bodega, 'productos_chiper_bodega': productos_chiper_bodega}\n return render(request, 'chiper_Totoro/get_bodega_id.html', context)\n else:\n return HttpResponse(\"Unauthorized User\")\n\n\ndef get_productos_chiper(request):\n productos_chiper_list = get_all_productos_chiper()\n context = {'productos_chiper_list': productos_chiper_list}\n return render(request, 'chiper_Totoro/get_all_productos_chiper.html', context)\n\n\ndef create_producto_chiper(request):\n if request.method == 'POST':\n form = ProductoChiperForm(request.POST)\n if form.is_valid():\n create_a_producto_chiper(form)\n messages.add_message(request, messages.SUCCESS, 'Producto creado de manera satisfactoria')\n return HttpResponseRedirect(reverse('productosChiperList'))\n else:\n print(form.errors)\n else:\n form = ProductoChiperForm()\n\n context = {\n 'form': form,\n }\n return render(request, 'chiper_Totoro/create_producto_chiper.html', context)\n","repo_name":"ja-avos/Chiper","sub_path":"chiper_Totoro/logistica/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"72679842130","text":"from functools import wraps\nimport operator\nimport math\n\nimport cv2\nimport numpy as np\n\n\nGREEN = (0, 255, 0)\nYELLOW = (255, 255, 0)\nRED = (0, 0, 255)\nPINK = (0, 255, 255)\nWHITE = (255, 255, 255)\n\n\nclass Bunch(object):\n\n def __init__(self, **k):\n self.__dict__.update(k)\n\n\n def dict(self):\n return self.__dict__\n\n\n def __repr__(self):\n return \"<%s %r>\" % (\n self.__class__.__name__,\n self.__dict__,\n )\n\n\ndef create_hsv_preview(img):\n \"\"\"\n Takes an HSV-image, and returns\n a copy where the SV components\n are maxed out, in BGR-colorspace.\n\n This allows to perceive the way sectioning\n in H-space works.\n \"\"\"\n hsv_preview = img.copy()\n hsv_preview[:,:,1:] = [255, 255]\n return cv2.cvtColor(hsv_preview, cv2.COLOR_HSV2BGR)\n\n\ndef memoize(f):\n @wraps(f)\n def _d(*a, **k):\n key = a, tuple((key, value) for key, value in k.iteritems())\n if key not in f._cache:\n f._cache[key] = f(*a, **k)\n return f._cache[key]\n\n f._cache = {}\n return _d\n\n\ndef colorbar():\n return cv2.cvtColor(\n np.array(\n [[(i, 255, 255) for i in xrange(180)]],\n dtype=\"uint8\",\n ),\n cv2.COLOR_HSV2BGR,\n )\n\n\nclass RevolutionFilter(object):\n\n def __or__(self, other):\n left = self\n right = other\n class Piped(RevolutionFilter):\n\n def feed(self, input_):\n return right.feed(left.feed(input_))\n\n\n def __getattr__(self, name):\n return getattr(right, name)\n\n\n return Piped()\n\n\nclass RevolutionCounter(RevolutionFilter):\n \"\"\"\n A simple class to count revolutions\n based on input of a continuous, mononotic(!)\n sequence of atan2-values with each distinct\n step not farther than math.pi/2\n\n The counter is quadrant-based: the first value determines\n the initial quadrant, and whenever the input enters this quadrant\n again, it will trigger an increase in revolutions.\n \"\"\"\n\n def __init__(self):\n self.revolutions = 0\n self._initial_quadrant = None\n self._in_quadrant = True\n\n\n def feed(self, input_):\n q = self._quadrant(input_)\n if self._initial_quadrant is None:\n self._initial_quadrant = q\n self._in_quadrant = True\n # we count when re-entering the quadrant\n if not self._in_quadrant and q == self._initial_quadrant:\n self._in_quadrant = True\n self.revolutions += 1\n elif self._in_quadrant and q != self._initial_quadrant:\n self._in_quadrant = False\n return self.revolutions\n\n\n def _quadrant(self, input_):\n \"\"\"\n 1 | 0\n -----\n 2 | 3\n \"\"\"\n if input_ >= 0:\n return 0 if input_ < math.pi / 2 else 1\n else:\n return 2 if input_ < -math.pi / 2 else 3\n\n\nclass Atan2Monotizer(RevolutionFilter):\n\n def __init__(self, clockwise=True):\n self._last_input = None\n self._v = None\n self._op = operator.le if clockwise else operator.ge\n\n\n def feed(self, input_):\n if self._last_input is not None:\n diff = input_ - self._last_input\n while diff > math.pi / 2.0:\n diff -= math.pi\n while diff < -math.pi / 2.0:\n diff += math.pi\n\n if self._op(diff, 0):\n self._last_input = input_\n else:\n self._last_input = input_\n\n return self._last_input\n","repo_name":"deets/brombeerquark","sub_path":"opencv/bq/opencv/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33547045482","text":"# flake8: noqa\nimport pytest\n\nfrom taxi.stq import async_worker_ng\n\nfrom test_pricing_modifications_validator.plugins.mock_pricing_admin import (\n mock_pricing_admin,\n)\nfrom pricing_modifications_validator.storage import validator\nfrom pricing_modifications_validator.stq import syscalc\n\n\ndef _create_task_info(exec_tries=0):\n return async_worker_ng.TaskInfo(\n id='task_id', exec_tries=exec_tries, reschedule_counter=0, queue='',\n )\n\n\n@pytest.mark.pgsql('pricing_modifications_validator', files=['state.sql'])\n@pytest.mark.config(PMV_MAX_TASK_FAIL_RETRIES=1)\nasync def test_syscalc_stq_task_error(stq3_context, select_named):\n await syscalc.task(stq3_context, _create_task_info(1), check_id=1)\n checks_state = select_named('SELECT task_state FROM db.checks WHERE id=1')[\n 0\n ]\n assert checks_state['task_state'] == 'Terminated'\n\n\n@pytest.mark.pgsql('pricing_modifications_validator', files=['state.sql'])\nasync def test_syscalc_stq_task_good(stq3_context, select_named):\n await syscalc.task(stq3_context, _create_task_info(0), check_id=1)\n checks_state = select_named(\n 'SELECT message, task_state FROM db.checks WHERE id=1',\n )[0]\n assert checks_state['task_state'] == 'Finished'\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/test_pricing_modifications_validator/stq/test_syscalc_task.py","file_name":"test_syscalc_task.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"15904392579","text":"import os\nimport csv\nimport re\nfrom sklearn.base import TransformerMixin\nimport pandas as pd\nfrom pathlib import Path\nfrom typing import Tuple, Dict, List\n\nfrom predpy.dataset import MultiTimeSeriesDataset\n\n\ndef get_dataset(\n path: Path, window_size: int, ts_scaler: TransformerMixin = None,\n read_csv_kwargs: Dict = {'header': None}\n) -> MultiTimeSeriesDataset:\n df = pd.read_csv(\n path, **read_csv_kwargs\n )\n try:\n df.columns = df.columns.astype(int)\n except TypeError:\n pass\n if ts_scaler is not None:\n df[:] = ts_scaler.transform(df)\n dataset = MultiTimeSeriesDataset(\n sequences=[df],\n window_size=window_size,\n target=df.columns.tolist()\n )\n return dataset\n\n\ndef get_train_test_ds(\n topic: str, collection_name: str, ds_name: str,\n window_size: int, ts_scaler: TransformerMixin = None,\n fit_scaler: bool = True, read_csv_kwargs: Dict = {'header': None}\n) -> Tuple[MultiTimeSeriesDataset]:\n train_df = pd.read_csv(\n f'data/{topic}/{collection_name}/train/{ds_name}.csv',\n **read_csv_kwargs\n )\n test_df = pd.read_csv(\n f'data/{topic}/{collection_name}/test/{ds_name}.csv',\n **read_csv_kwargs\n )\n if fit_scaler:\n ts_scaler.fit(train_df)\n if ts_scaler is not None:\n train_df[:] = ts_scaler.transform(train_df)\n test_df[:] = ts_scaler.transform(test_df)\n try:\n train_df.columns = train_df.columns.astype(int)\n test_df.columns = test_df.columns.astype(int)\n except TypeError:\n pass\n trian_ds = MultiTimeSeriesDataset(\n sequences=[train_df],\n window_size=window_size,\n target=train_df.columns.tolist()\n )\n test_ds = MultiTimeSeriesDataset(\n sequences=[test_df],\n window_size=window_size,\n target=test_df.columns.tolist()\n )\n return trian_ds, test_ds\n\n\ndef get_dataset_names(path: str):\n \"\"\"Dataset path should follow pattern:\n .*/data/{topic}/{collection}/{\"train\", \"test\" or \"test_labels\"/{dataset}\"\"\"\n dir_names = path.split(os.sep)\n start_id = dir_names.index('data')\n topic = dir_names[start_id + 1]\n collection_name = dir_names[start_id + 2]\n dataset_name = dir_names[start_id + 4][:-4]\n return topic, collection_name, dataset_name\n\n\ndef _str_to_float_list(text: str) -> List[float]:\n floats = re.findall(r'\\d+.\\d+', text)\n res = []\n for f in floats:\n res += [float(f)]\n return res\n\n\ndef load_anom_scores(\n path: Path\n) -> Tuple[List[float], List[int]]:\n with open(path, 'r') as f:\n reader = csv.DictReader(f)\n scores = []\n classes = []\n for row in reader:\n scores += [_str_to_float_list(row['score'])]\n classes += [int(row['class'])]\n return scores, classes\n","repo_name":"Stashq/TS-Forecasting","sub_path":"anomaly_detection/data_loading.py","file_name":"data_loading.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"23474297440","text":"#!/usr/bin/env python\n\n# Import earthengine API\nimport ee\n\n# Initialise\nee.Initialize()\n\n# /* Calculates fractional sub-pixel abundance with Spectral Mixture Analysis (SMA) \\\n# LXEndmembers - defines a set of image endmembers for Landsat sensors (5, 7 or 8 for TM, ETM+ or OLI, respectively)\n# getSMAFractions - applies singular value decomposition to calculate fractions.\n# getNDFI - calculates NDFI from fractional images.\n# ndfiColors - defines NDFI color table\n# getCSF - TBD\n# csfColors - TBD define CSF color table\n# \"\"\"\n\n# Define Landsat 5 endmembers\nENDMEMBERS_L5 = [\n [119.0, 475.0, 169.0, 6250.0, 2399.0, 675.0], # gv\n [1514.0, 1597.0, 1421.0, 3053.0, 7707.0, 1975.0], # npv\n [1799.0, 2479.0, 3158.0, 5437.0, 7707.0, 6646.0], # soil\n [4031.0, 8714.0, 7900.0, 8989.0, 7002.0, 6607.0] # loud\n]\n\n# Define Landsat 7 endmembers\nENDMEMBERS_L7 = [\n [119.0, 475.0, 169.0, 6250.0, 2399.0, 675.0], # gv\n [1514.0, 1597.0, 1421.0, 3053.0, 7707.0, 1975.0], # npv\n [1799.0, 2479.0, 3158.0, 5437.0, 7707.0, 6646.0], # soil\n [4031.0, 8714.0, 7900.0, 8989.0, 7002.0, 6607.0] # cloud\n]\n\n# Define Landsat 8 endmembers\nENDMEMBERS_L8 = [\n [119.0, 475.0, 169.0, 6250.0, 2399.0, 675.0], # gv\n [1514.0, 1597.0, 1421.0, 3053.0, 7707.0, 1975.0], # npv\n [1799.0, 2479.0, 3158.0, 5437.0, 7707.0, 6646.0], # soil\n [4031.0, 8714.0, 7900.0, 8989.0, 7002.0, 6607.0] # cloud\n]\n\n# Define Sentinel-2 endmembers\nENDMEMBERS_S2 = [\n [119.0, 475.0, 169.0, 6250.0, 2399.0, 675.0], # gv\n [1514.0, 1597.0, 1421.0, 3053.0, 7707.0, 1975.0], # npv\n [1799.0, 2479.0, 3158.0, 5437.0, 7707.0, 6646.0], # soil\n [4031.0, 8714.0, 7900.0, 8989.0, 7002.0, 6607.0] # cloud\n]\n\n\ndef getSMAFractions(image, endmembers):\n \"\"\"Uminxing image using SDVC\n\n Parameters:\n image (ee.Image): Reflectance image containing the bands:\n blue, red, green, nir, swir1, swir2\n endmembers (list): Matrix containing the endmembers following\n this format: [\n [blue_gv, green_gv, red_gv, nir_gv, swir1_gv, swir2_gv],\n [blue_npv, green_npv, red_npv, nir_npv, swir1_npv, swir2_npv],\n [blue_soil, green_soil, red_soil, nir_soil, swir1_soil, swir2_soil],\n [blue_cloud, green_cloud, red_cloud, nir_cloud, swir1_cloud, swir2_cloud]\n ]\n\n Returns:\n ee.Image: Image unmixed\n \"\"\"\n\n outBandNames = ['gv', 'npv', 'soil', 'cloud']\n\n fractions = ee.Image(image) \\\n .select(['blue', 'green', 'red', 'nir', 'swir1', 'swir2']) \\\n .unmix(endmembers) \\\n .max(0) \\\n .multiply(100) \\\n .byte() \\\n\n fractions = fractions.rename(outBandNames)\n\n summed = fractions.expression('b(\"gv\") + b(\"npv\") + b(\"soil\")')\n\n shade = summed \\\n .subtract(100) \\\n .abs() \\\n .byte() \\\n .rename(\"shade\")\n\n fractions = fractions.addBands(shade)\n\n return ee.Image(fractions \\\n .copyProperties(image) \\\n .copyProperties(image, [\n 'system:time_start',\n 'system:time_end',\n 'system:footprint'])\n )\n\n\ndef getNDFI(image):\n \"\"\"Calculate GVS and NDFI and add them to image fractions\n\n Parameters:\n image (ee.Image): Fractions image containing the bands:\n gv, npv, soil, cloud\n\n Returns:\n ee.Image: Fractions image with gvs and ndfi bands\n \"\"\"\n\n summed = image.expression('b(\"gv\") + b(\"npv\") + b(\"soil\")')\n\n gvs = image.select(\"gv\") \\\n .divide(summed) \\\n .multiply(100) \\\n .byte() \\\n .rename(\"gvs\")\n\n npvSoil = image.expression('b(\"npv\") + b(\"soil\")')\n\n ndfi = ee.Image.cat(gvs, npvSoil) \\\n .normalizedDifference() \\\n .rename('ndfi')\n\n # rescale NDFI from 0 to 200 \\\n ndfi = ndfi.expression('byte(b(\"ndfi\") * 100 + 100)')\n\n image = image.addBands(gvs)\n image = image.addBands(ndfi)\n\n return ee.Image(image \\\n .copyProperties(image) \\\n .copyProperties(image, [\n 'system:time_start',\n 'system:time_end',\n 'system:footprint'])\n )\n\n\n# Calculate CSFI and add it to image fractions\ndef getCSFI(image):\n \"\"\"Calculate CSFI and add it to image fractions\n\n Parameters:\n image (ee.Image): Fractions image containing the bands:\n gv, npv, soil, cloud\n\n Returns:\n ee.Image: Fractions image with csfi bands\n \"\"\"\n\n csfi = image.expression(\n \"(float(b('gv') - b('shade'))/(b('gv') + b('shade')))\")\n\n csfi = csfi.multiply(100).add(100).byte().rename(['csfi'])\n\n image = image.addBands(csfi)\n\n return ee.Image(image.copyProperties(image))\n","repo_name":"mapbiomas-brazil/amazon","sub_path":"modules/SMA_NDFI.py","file_name":"SMA_NDFI.py","file_ext":"py","file_size_in_byte":4760,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"66"} +{"seq_id":"4300370108","text":"from os.path import expanduser\n\nHOME = expanduser('~')\n\nBACKBONE = 'ResNet50'\nTRAIN_BATCH_SIZE = 16\nTEST_BATCH_SIZE = 128\nSAVE_EVERY = 5\nTEST_EVERY = 5\nMAX_CHECKPOINTS = 200\nEND_EPOCH = 200\nINIT_LR = 0.001\nLR_MILESTONES = [50, 100, 150]\nLR_DECAY_RATE = 0.1\nMOMENTUM = 0.9\nWEIGHT_DECAY = 1e-4\nSTRIDE = 32\nFEATURE_DIM = 2048\nIMAGE_SIZE = 448\n\n# Path to the global-view extractor model, which also serves as a pretrained backbone for the disjoint encoder.\n# Adopted from MMAL --\n# Paper: https://arxiv.org/pdf/2003.09150.pdf\n# Code: https://github.com/ZF4444/MMAL-Net\nPRETRAINED_EXTRACTOR_PATH = './view_extractor/resnet50-19c8e357.pth'\n","repo_name":"abhrac/relational-proxies","sub_path":"src/utils/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"66"} +{"seq_id":"12821711755","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom forms import qtset, qtsea, qtadd, qtedit\nfrom PyQt5.QtWidgets import (QMainWindow, QAction, qApp, QApplication, QLineEdit, QLabel,\n QDesktopWidget, QWidget, QPushButton, QGridLayout,\n QTextEdit, QTableView, QTableWidget, QTableWidgetItem)\nfrom PyQt5.QtGui import QIcon, QStandardItemModel, QStandardItem\nfrom PyQt5.QtCore import Qt \n\n\nclass Mainwin(QMainWindow):\n\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n\n def initUI(self):\n #Инициализация окон\n self.Formset = qtset.Sett()\n self.Formsea = qtsea.Search()\n self.Formadd = qtadd.Adds()\n self.Formedit = qtedit.Edits()\n #\n #Ц��нтральный виджет интерфейса\n centralWidget = QWidget(self)\n self.setCentralWidget(centralWidget)\n ###Настройки меню\n self.exitAction = QAction(QIcon('exit.png'), '&Выход', self)\n self.exitAction.setShortcut('Ctrl+Q')\n self.exitAction.setStatusTip('Закрыть приложение.')\n self.exitAction.triggered.connect(qApp.quit)\n #\n self.addAction = QAction('&Добавить', self)\n self.addAction.setShortcut('Ctrl+A')\n self.addAction.setStatusTip('Добавить запись.')\n self.addAction.triggered.connect(self.Addform)\n #\n self.conAction = QAction('&Присоединиться', self)\n #conAction.setShortcut('Ctrl+')\n self.conAction.setStatusTip('Присоеденение к базе.')\n #conAction.triggered.connect(self.)\n #\n self.setAction = QAction('&Настройки', self)\n self.setAction.setShortcut('Ctrl+,')\n self.setAction.setStatusTip('Открыть окно настроек.')\n self.setAction.triggered.connect(self.setting)\n #\n self.editAction = QAction('&Редактирование', self)\n self.editAction.setShortcut('Ctrl+E')\n self.editAction.setStatusTip('Открыть окно редактирование записей.')\n self.editAction.triggered.connect(self.Editform)\n #\n self.searthAction = QAction('&Поиск', self)\n self.searthAction.setShortcut('Ctrl+F')\n self.searthAction.setStatusTip('Открыть окно поиска.')\n self.searthAction.triggered.connect(self.Searchform)\n ###Разделение области\n #but = QPushButton('Отправить')\n self.title1 = QLabel('ФИО')\n self.title2 = QLabel('Системы')\n self.title3 = QLabel('Учетная запись')\n self.block2 = QLineEdit()\n self.block2.setAlignment(Qt.AlignTop)\n self.tableFIO = QTableWidget(self)\n self.tableFIO.setColumnCount(1)\n self.tableFIO.setRowCount(1)\n Itemtablefio = QTableWidgetItem('Test row')\n Itemtablefio.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)\n self.tableFIO.setItem(0, 0, Itemtablefio)\n self.tableSYS = QTableWidget()\n self.tableFIO.itemSelectionChanged.connect(self.testselect)\n grid = QGridLayout()\n grid.setSpacing(1)\n \n #Реализация интерфейса окна\n grid.addWidget(self.title1, 0, 0, alignment=Qt.AlignCenter)\n grid.addWidget(self.title2, 0, 2, alignment=Qt.AlignCenter)\n grid.addWidget(self.title3, 0, 4, alignment=Qt.AlignCenter)\n grid.setColumnMinimumWidth(1, 10)\n grid.setColumnMinimumWidth(3, 10)\n grid.setColumnStretch(0, 2)\n grid.setColumnStretch(2, 2)\n grid.setColumnStretch(4, 2)\n grid.setRowStretch(0, 1)\n grid.setRowStretch(1, 8)\n grid.addWidget(self.tableFIO, 1, 0)\n grid.addWidget(self.tableSYS, 1, 2)\n grid.addWidget(self.block2, 1, 4, alignment=Qt.AlignTop)\n \n ###Реализация статусбара\n self.statusBar()\n self.menubar = self.menuBar()\n fileMenu = self.menubar.addMenu('&Файл')\n file2Menu = self.menubar.addMenu('&Редактирование')\n file2Menu.addAction(self.addAction)\n file2Menu.addAction(self.editAction)\n fileMenu.addAction(self.conAction)\n fileMenu.addAction(self.setAction)\n fileMenu.addAction(self.searthAction)\n fileMenu.addSeparator()\n fileMenu.addAction(self.exitAction)\n ###\n centralWidget.setLayout(grid)\n self.resize(600, 400)\n self.center()\n self.setWindowTitle('Password cloud')\n self.show()\n\n\n def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n \n def setting(self):\n self.Formset.setWindowModality(Qt.ApplicationModal) #Чтобы заблокировать основное окно\n self.Formset.show()\n def Searchform(self):\n self.Formsea.setWindowModality(Qt.ApplicationModal)\n self.Formsea.show()\n def Addform(self):\n self.Formadd.setWindowModality(Qt.ApplicationModal)\n self.Formadd.show()\n def Editform(self):\n self.Formedit.setWindowModality(Qt.ApplicationModal)\n self.Formedit.show()\n \n def testselect(self):\n a = self.menubar.actions()[0].text()\n print(a)\n\n #print(ex.title1)","repo_name":"kalan4iki/passcloud","sub_path":"forms/qtmain.py","file_name":"qtmain.py","file_ext":"py","file_size_in_byte":5503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9319030619","text":"import argparse\nimport os\nimport shutil\nimport string\nimport boto3\n\ngreengrass_client = boto3.client('greengrassv2')\ns3_client = boto3.client('s3')\nsts_client = boto3.client('sts')\n\n\nclass ParseKwargs(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n setattr(namespace, self.dest, dict())\n for value in values:\n key, value = value.split('=')\n getattr(namespace, self.dest)[key] = value\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('-d', '--components-directory', type=str, required=True)\nparser.add_argument('-c', '--components', nargs='+', required=True)\nparser.add_argument('-v', '--variables', nargs='*', action=ParseKwargs)\n\n\ndef generate_recipe(component_name, version):\n component_variables = args.variables.copy()\n component_variables['component_version_number'] = version\n component_variables['component_name'] = component_name\n component_variables['artifacts_zip_file_name'] = component_name\n\n # substitute variables, and generate new recipe file\n with open('{}/recipe-template.yml'.format(args.components_directory), 'r') as input_recipe:\n src = string.Template(input_recipe.read())\n result = src.safe_substitute(component_variables)\n with open('{}/{}.yml'.format(args.components_directory, component_name),\n 'w') as output_recipe:\n output_recipe.write(result)\n\n\ndef create_component_version(component_name):\n print(component_name)\n print(args.components_directory) \n \n with open('{}/{}.yml'.format(args.components_directory, component_name), 'r') as recipe_file:\n recipe = recipe_file.read().encode()\n print(recipe)\n greengrass_client.create_component_version(\n inlineRecipe=recipe\n )\n\n\ndef get_component_version(component_name, fetch_next_version):\n versions = greengrass_client.list_component_versions(\n arn='arn:aws:greengrass:{}:{}:components:{}'.format(os.environ['AWS_REGION'],\n sts_client.get_caller_identity()['Account'], component_name)\n )['componentVersions']\n\n if len(versions) == 0:\n versions = greengrass_client.list_component_versions(\n arn='arn:aws:greengrass:{}:aws:components:{}'.format(os.environ['AWS_REGION'],\n component_name)\n )['componentVersions']\n\n if len(versions) > 0:\n current_version = versions[0]['componentVersion']\n else:\n return '0.0.0'\n\n current_versions = current_version.split('.')\n\n major = int(current_versions[0])\n minor = int(current_versions[1])\n micro = int(current_versions[2])\n\n if fetch_next_version:\n component_version = '{}.{}.{}'.format(major, minor, micro + 1)\n else:\n component_version = '{}.{}.{}'.format(major, minor, micro)\n return component_version\n\n\ndef archive_upload_artifacts(component_name, next_version):\n shutil.make_archive(base_name='{}'.format(component_name),\n format='zip',\n root_dir='{}'.format(args.components_directory),\n base_dir='artifacts')\n\n bucket_name = args.variables['s3_path'].split('s3://')[1].split('/')[0]\n key_prefix = args.variables['s3_path'].split(\"s3://\")[1].split('/')[1]\n\n s3_client.upload_file('{}.zip'.format(component_name), bucket_name,\n '{}/{}/{}/{}.zip'.format(key_prefix, component_name, next_version, component_name))\n\n\nif __name__ == \"__main__\":\n\n args = parser.parse_args()\n\n print(args)\n\n for component in args.components:\n next_component_version = get_component_version(component, True)\n generate_recipe(component, next_component_version)\n archive_upload_artifacts(component, next_component_version)\n create_component_version(component)\n","repo_name":"aws4embeddedlinux/demo-auto-aws-iotfleetwise","sub_path":"src/repo-fwe-ggv2-seed/tools/ggv2-component/build_component_version.py","file_name":"build_component_version.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"66"} +{"seq_id":"36712295778","text":"import pywebio.pin as pin\nfrom pywebio.output import *\nfrom pywebio import start_server\nfrom algorithm.predict import *\n\ndef page():\n\n img = open('../static_files/images/image_1.png', 'rb').read()\n put_image(img, width='100%', height ='120px')\n\n # get team names\n home_options, away_options = get_team_name()\n\n # Team select\n put_row(\n [pin.put_select(\"hometeam\", options=home_options, label='HomeTeam:'), None,\n pin.put_select(\"awayteam\", options=away_options, label='AwayTeam:')],\n size = '47% 6% 47%'\n ).show()\n\n put_row(\n [pin.put_slider('FTHG', label=\"FTHG:\",value=0, min_value=0, max_value=100, step=1), None,\n pin.put_slider('FTAG', label=\"FTAG:\",value=0, min_value=0, max_value=100, step=1)],\n size = '47% 6% 47%'\n )\n\n put_row(\n [pin.put_slider('HTHG', label=\"HTHG:\",value=0, min_value=0, max_value=100, step=1), None,\n pin.put_slider('HTAG', label=\"HTAG:\",value=0, min_value=0, max_value=100, step=1)],\n size = '47% 6% 47%'\n )\n\n put_row(\n [pin.put_slider('HS', label=\"Shots:\",value=0, min_value=0, max_value=100, step=1), None,\n pin.put_slider('AS', label=\"Shots:\",value=0, min_value=0, max_value=100, step=1)],\n size = '47% 6% 47%'\n )\n\n put_row(\n [pin.put_slider('HST', label=\"Shots on target:\",value=0, min_value=0, max_value=100, step=1), None,\n pin.put_slider('AST', label=\"Shots on target:\",value=0, min_value=0, max_value=100, step=1)],\n size = '47% 6% 47%'\n )\n # yellow card\n put_row(\n [pin.put_slider('HY', label=\"Yellow Card: \",value=0, min_value=0, max_value=100, step=1), None,\n pin.put_slider('AY', label=\"Yellow Card: \",value=0, min_value=0, max_value=100, step=1)],\n size = '47% 6% 47%'\n )\n # red card\n put_row(\n [pin.put_slider('HR', label=\"Red Cards: \",value=0, min_value=0, max_value=100, step=1), None,\n pin.put_slider('AR', label=\"Red Cards: \",value=0, min_value=0, max_value=100, step=1)],\n size = '47% 6% 47%'\n )\n put_row([None, put_buttons([\"Predict\"], lambda _: cal_prob()), None], size = '45% 100% 45%')\n\n\ndef cal_prob():\n if pin.pin['hometeam'] == pin.pin['awayteam']:\n popup('Warning', [\n put_html('

AwayTeam and HomeTeam can not be the same

'),\n ])\n return None\n\n\n input_data = {\n 'HomeTeam': pin.pin['hometeam'],\n 'AwayTeam': pin.pin['awayteam'],\n 'FTHG': pin.pin['FTHG'],\n 'FTAG': pin.pin['FTAG'],\n 'HTHG': pin.pin['HTHG'],\n 'HTAG': pin.pin['HTAG'],\n 'HS': pin.pin['HS'],\n 'AS': pin.pin['AS'],\n 'HST': pin.pin['HST'],\n 'AST': pin.pin['AST'],\n 'HY': pin.pin['HY'],\n 'AY': pin.pin['AY'],\n 'HR': pin.pin['HR'],\n 'AR': pin.pin['AR']\n }\n result = predict(input_data)\n\n\n with use_scope('result', clear=True):\n # show the predict win probability of each team\n probs = result['probs']\n put_progressbar('home_win', probs['H'], label='Home Win')\n put_text(str(round(probs['H']*100, 2)) + '%')\n put_progressbar('away_win', probs['A'], label='Away Win')\n put_text(str(round(probs['A']*100, 2)) + '%')\n put_progressbar('draw', probs['D'], label='Draw')\n put_text(str(round(probs['D']*100, 2)) + '%')\n\n # show the predict odd of each team\n odds = result['odds']\n put_grid([\n [put_text('Home Odd'), put_text('Away Odd'), put_text('Draw Odd')],\n [put_text(odds['H']), put_text(odds['A']), put_text(odds['D'])],\n ], cell_width='33%', cell_height='50px')\n\n\nif __name__ == \"__main__\":\n start_server(page, port=8080)","repo_name":"ACM40960/project-TongSun","sub_path":"algorithm/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33243328411","text":"# funcion comun y corriente\ndef generaPares(limite):\n num = 1\n miLista = []\n while num < limite:\n miLista.append(num*2)\n num += 1\n return miLista\n\n# print(generaPares(10))\n\n# con generador\n\n\ndef generaPares2(limite):\n num = 1\n while num < limite:\n yield num*2\n num += 1\n\n\n# devuelvePares = generaPares2(10)\n# for i in devuelvePares:\n# print(i)\ndevuelvePares = generaPares2(10)\nprint(next(devuelvePares))\nprint(\"Aqui podría haber mas codigo\")\nprint(next(devuelvePares))\nprint(\"Aqui podría haber mas codigo\")\nprint(next(devuelvePares))\n","repo_name":"Jorgefebres/python-tut-pildoras","sub_path":"generadores.py","file_name":"generadores.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73751289810","text":"import sys\nsys.setrecursionlimit(5000)\ndef makeGroup(links):\n groups = {}\n involved = [[] for _ in range((len(links) + 2))]\n for link in links:\n # print(link)\n if not groups.get(link[0]):\n groups[link[0]] = [link[1]]\n else:\n groups[link[0]].append(link[1])\n result = []\n for group in groups:\n result.append([group] + groups[group])\n print(result)\n for group in range(0,len(result)):\n for member in result[group]:\n print(member)\n involved[member].append(group)\n print(involved)\n print(involved)\n return result\n\ndef dfs(cost,dep,team,heGo,costs):\n # print(cost,dep,team,heGo,costs)\n global maxCost\n global count\n # if cost >= maxCost:\n # return\n if dep >= len(team):\n # for he in range(0,len(heGo)):\n # if(heGo[he]):\n # print(he,\"번\",end=\" \")\n # print(\"참석\")\n count+=1\n if cost <= maxCost:\n maxCost = cost\n return\n \n \n for man in team[dep]:\n if heGo[man]:\n dfs(cost,dep+1,team,heGo,costs)\n else:\n heGo[man] = 1\n dfs(cost+costs[man],dep+1,team,heGo,costs)\n heGo[man] = 0\n \n \n \ndef solution(sales, links):\n global maxCost\n global count\n count = 0\n maxCost = 10000\n num = len(sales)\n answer = 0\n heGo = [0] * (num+1)\n teams = makeGroup(links)\n costs = [0] * (num+1)\n # print(teams)\n for i in range(len(sales)):\n costs[i+1] = sales[i]\n \n dfs(0,0,teams,heGo,costs)\n print(\"abc\",maxCost,count)\n return maxCost\nprint(solution([14, 17, 15, 18, 19, 14, 13, 16, 28, 17],[[10, 8], [1, 9], [9, 7], [5, 4], [1, 5], [5, 10], [10, 6], [1, 3], [10, 2]]))","repo_name":"airpong/TIL","sub_path":"algorithm/kakao/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21948249683","text":"from django.shortcuts import render, redirect, HttpResponse, get_object_or_404\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\nfrom .models import Bug\nfrom .forms import BugForm\n\n# Views for Home App below\n\n@login_required\ndef bugs(request):\n \"\"\" view returns bugs page \"\"\"\n\n add_bug_form = BugForm()\n\n user = get_object_or_404(User, username=request.user)\n if user.is_superuser:\n bugs = Bug.objects.all().order_by('-date_added')\n urgent_bugs = bugs.filter(urgency=3)\n important_bugs = bugs.filter(urgency=2)\n annoying_bugs = bugs.filter(urgency=1)\n else: \n return redirect('report_bug')\n # handles add_bug form\n if request.method == 'POST':\n if 'description' in request.POST:\n add_bug_form = BugForm(request.POST)\n if add_bug_form.is_valid():\n add_bug_form.save()\n return redirect('bugs')\n context = {\n 'bugs': bugs,\n 'urgent_bugs': urgent_bugs,\n 'important_bugs': important_bugs,\n 'annoying_bugs': annoying_bugs,\n 'add_bug_form': add_bug_form\n }\n return render(request, 'bugs/bugs.html', context)\n\n\ndef report_bug(request):\n \"\"\" view to inspect bug in detail \"\"\"\n \n add_bug_form = BugForm()\n \n if request.method == 'POST':\n if 'description' in request.POST:\n add_bug_form = BugForm(request.POST)\n if add_bug_form.is_valid():\n new_bug = add_bug_form.save()\n messages.info(\n request, f'\"{new_bug.name}\" has been reported to the bug squad.'\n )\n return redirect('index')\n \n context = {\n 'add_bug_form': add_bug_form\n }\n return render(request, 'bugs/report_bug.html', context)\n\n\ndef toggle_status(request, bug_id):\n \"\"\" toggle the complete/incomplete status of bugs \"\"\"\n bug = get_object_or_404(Bug, id=bug_id)\n bug.status = not bug.status # invert bug status\n bug.save()\n return redirect('bugs')\n\n\ndef update_urgency(request, bug_id, direction):\n \"\"\" increase or decrease bug urgency \"\"\"\n bug = get_object_or_404(Bug, id=bug_id)\n if direction == \"inc\":\n bug.urgency += 1\n else:\n bug.urgency -= 1\n bug.save()\n return redirect('bugs')\n\n\ndef edit_bug_description(request, bug_id):\n\n \"\"\" view will edit bug description \"\"\"\n if request.POST: \n bug = get_object_or_404(Bug, id=bug_id)\n bug.description = request.POST['description']\n bug.save()\n\n return redirect('bugs')\n\ndef edit_bug_name(request, bug_id):\n\n \"\"\" view will edit bug description \"\"\"\n if request.POST: \n bug = get_object_or_404(Bug, id=bug_id)\n bug.name = request.POST['name']\n bug.save()\n\n return redirect('bugs')\n\ndef delete_bug(request, bug_id):\n \"\"\" view will delete bug forever \"\"\"\n bug = get_object_or_404(Bug, id=bug_id)\n bug.delete()\n return redirect('bugs')\n","repo_name":"devmegan/abius","sub_path":"bugs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"7292500224","text":"#!/usr/bin/env python\n# coding=utf-8\n\nfrom __future__ import absolute_import, division, print_function\nfrom .due import due, Doi\n\n__all__ = []\n\n\n# Use duecredit (duecredit.org) to provide a citation to relevant work to\n# be cited. This does nothing, unless the user has duecredit installed,\n# And calls this with duecredit (as in `python -m duecredit script.py`):\ndue.cite(Doi(\"\"),\n description=\"Buzznauts: NMA-DL 2021 elated-buzzwards pod project\",\n tags=[\"NMA-elated buzzards-Buzznauts-project\"],\n path='Buzznauts')\n","repo_name":"eduardojdiniz/Buzznauts","sub_path":"Buzznauts/Buzznauts.py","file_name":"Buzznauts.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"1511149457","text":"# AEGIS - Distribution System Visualization Software\r\n# Visual tool for visualizing data\r\n# 9/25/22\r\n\r\n# Internal Imports\r\nfrom deleteData import *\r\nfrom Data_to_Node import *\r\nfrom constants import *\r\nfrom classes import *\r\nimport itertools\r\nfrom deletionAlg import deletionAlg\r\nfrom AEGIS_select import selectPhase, selectVolt\r\nimport pandas as pd\r\nimport networkx\r\nfrom bokeh.models import Range1d, Circle, MultiLine, \\\r\n NodesAndLinkedEdges, HoverTool, Div, Button, LinearColorMapper, ColorBar, TapTool\r\nfrom bokeh.plotting import figure, curdoc\r\nfrom bokeh.plotting import from_networkx\r\nfrom bokeh.models import NodesAndLinkedEdges, CheckboxGroup, CustomJS, AutocompleteInput, Slider, RadioButtonGroup, ColumnDataSource\r\nfrom importData_HH import importData_HH\r\nfrom bokeh.layouts import row, column\r\nfrom ImportCSV import *\r\nfrom nodeNormalize import *\r\n\r\n# Import Constant Data\r\nfeederName = \"Feeder_2_Solar_25\"\r\nnodeData, branchData, loadData = importData_HH(feederName + '_mod_branch_data.txt', feederName + '_mod_node_data.txt', feederName + '_mod_load_data.txt') #puts node information into corresponding class type in dictionary\r\n\r\n#gets name of node which load feeds off of\r\nfor i in loadData.keys():\r\n loadData[i].node = data_to_node(nodeData, i)\r\n \r\n# Import Data over Time, instsa are the estimated voltage at each instance\r\nlongNames, insts = csvRead(feederName + '_ResidentialVoltages.csv')\r\nlongNodeNames, nodeInsts = csvRead(feederName + '_node_voltage_a.csv')\r\nlongNN2, bNodeInsts = csvRead(feederName + '_node_voltage_b.csv')\r\nlongNN3, cNodeInsts = csvRead(feederName + '_node_voltage_c.csv')\r\nlongTripNames, tripInsts = csvRead(feederName + '_triplex_node.csv')\r\n\r\n# Removes blank/uneeded data from names\r\nlongNames = longNames[1:]\r\nlongNodeNames = longNodeNames[1:]\r\nlongTripNames = longTripNames[1:]\r\n\r\ntimes = [] # All times where data is recorded\r\n\r\n# Puts times in order and sorts data points by object\r\nfor i in range(len(insts)):\r\n times.append(insts[i][0]) #Creates list of times in order from csv\r\n insts[i] = insts[i][1:] # Insts is a list of lists, where each inner list is all data recorded for objects at point in time\r\n\r\n#Creates list of the measured voltages\r\nnodeMeasVol = []\r\nnodeMeasVolB = []\r\nnodeMeasVolC = []\r\nfor i in range(len(nodeInsts)):\r\n nodeMeasVolList = []\r\n nodeMeasVolListB = []\r\n nodeMeasVolListC = []\r\n nodeInsts[i] = nodeInsts[i][1:]\r\n bNodeInsts[i] = bNodeInsts[i][1:]\r\n cNodeInsts[i] = cNodeInsts[i][1:]\r\n\r\n #Finds magnitude of complex voltages\r\n for j in range(len(nodeInsts[i])):\r\n nodeInsts[i][j] = complex(nodeInsts[i][j])\r\n bNodeInsts[i][j] = complex(bNodeInsts[i][j])\r\n cNodeInsts[i][j] = complex(cNodeInsts[i][j])\r\n nodeMeasVolList.append(complex(nodeInsts[i][j]))\r\n nodeMeasVolListB.append(complex(bNodeInsts[i][j]))\r\n nodeMeasVolListC.append(complex(cNodeInsts[i][j]))\r\n nodeInsts[i][j] = ((nodeInsts[i][j].real**2 + nodeInsts[i][j].imag**2)**0.5)\r\n bNodeInsts[i][j] = ((bNodeInsts[i][j].real**2 + bNodeInsts[i][j].imag**2)**0.5)\r\n cNodeInsts[i][j] = ((cNodeInsts[i][j].real**2 + cNodeInsts[i][j].imag**2)**0.5)\r\n nodeMeasVol.append(nodeMeasVolList)\r\n nodeMeasVolB.append(nodeMeasVolListB)\r\n nodeMeasVolC.append(nodeMeasVolListC)\r\n\r\ntripMeasVol = []\r\nfor i in range(len(tripInsts)):\r\n tripMeasVolList = []\r\n tripInsts[i] = tripInsts[i][1:]\r\n for j in range(len(tripInsts[i])):\r\n tripInsts[i][j] = complex(tripInsts[i][j].replace('i', 'j'))\r\n tripMeasVolList.append(tripInsts[i][j])\r\n tripInsts[i][j] = ((tripInsts[i][j].real**2 + tripInsts[i][j].imag**2)**0.5)\r\n tripMeasVol.append(tripMeasVolList)\r\n\r\nshName = [] #Initializes for short names\r\ndataDict = {} #empty dictionary for data\r\naDataDict = {}\r\nbDataDict = {}\r\ncDataDict = {}\r\nunNormData= {}\r\naUnNorm = {}\r\nbUnNorm = {}\r\ncUnNorm = {}\r\nfor i in range(len(longNames)):\r\n if data_to_node(nodeData, longNames[i]):\r\n sName = data_to_node(nodeData, longNames[i])\r\n dataDict[sName] = [] #Initializes short values as dictionaries with an empty list value\r\n unNormData[sName] = []\r\n dataDict[sName].append([data[i] for data in insts])\r\n unNormData[sName].append([data[i] for data in insts])\r\nfor i in range(len(longNodeNames)):\r\n if data_to_node(nodeData, longNodeNames[i]):\r\n sName = data_to_node(nodeData, longNodeNames[i])\r\n dataDict[sName] = [] #Initializes short values as dictionaries with an empty list value\r\n unNormData[sName] = []\r\n aDataDict[sName] = [] #Creates dictionary for a phase\r\n aUnNorm[sName] = []\r\n bDataDict[sName] = []\r\n bUnNorm[sName] = []\r\n cDataDict[sName] = []\r\n cUnNorm[sName] = []\r\n dataDict[sName].append([data[i] for data in nodeInsts]) #Adds data as a value for a dictionary\r\n unNormData[sName].append([data[i] for data in nodeInsts]) #Another dictionary that will not be normalized\r\n aDataDict[sName].append([data[i] for data in nodeInsts]) #Does the same for all different phases\r\n aUnNorm[sName].append([data[i] for data in nodeInsts])\r\n bDataDict[sName].append([data[i] for data in bNodeInsts])\r\n bUnNorm[sName].append([data[i] for data in bNodeInsts])\r\n cDataDict[sName].append([data[i] for data in cNodeInsts])\r\n cUnNorm[sName].append([data[i] for data in cNodeInsts])\r\nfor i in range(len(longTripNames)): #Does the same as above for triplex nodes\r\n if data_to_node(nodeData, longTripNames[i]):\r\n sName = data_to_node(nodeData, longTripNames[i])\r\n dataDict[sName] = []\r\n unNormData[sName] = []\r\n dataDict[sName].append([data[i] for data in tripInsts])\r\n unNormData[sName].append([data[i] for data in tripInsts])\r\n\r\n#Normalizes all data in dictionaries\r\nfor key in dataDict.keys():\r\n for i in range(len(dataDict[key])):\r\n for j in range(len(dataDict[key][i])):\r\n dataDict[key][i][j] = nodeNormalize(nodeData, key, dataDict[key][i][j]) \r\n\r\n#Same as above for different phases \r\nfor key in aDataDict.keys():\r\n for i in range(len(aDataDict[key])):\r\n for j in range(len(aDataDict[key][i])):\r\n aDataDict[key][i][j] = nodeNormalize(nodeData, key, aDataDict[key][i][j]) \r\n bDataDict[key][i][j] = nodeNormalize(nodeData, key, bDataDict[key][i][j]) \r\n cDataDict[key][i][j] = nodeNormalize(nodeData, key, cDataDict[key][i][j]) \r\n\r\n# Main Function\r\ndef main():\r\n print(\"AEGIS\")\r\n\r\n # Create data frame for connecting nodes\r\n nodeFro = [fBranch.fromNode for fBranch in branchData.values()]\r\n nodeTo = [tBranch.toNode for tBranch in branchData.values()]\r\n for load in loadData.keys():\r\n nodeFro.append(loadData[load].node)\r\n nodeTo.append(load)\r\n connection = pd.DataFrame({'from': nodeFro,\r\n 'to': nodeTo})\r\n\r\n node_highlight_color = 'white'\r\n edge_highlight_color = 'black'\r\n\r\n # Create dict for node characteristics\r\n node_color_dict = {}\r\n\r\n #Color map for displaying changes over time\r\n color_mapper = LinearColorMapper(palette = \"Turbo256\", low = 0.95, high = 1.05, nan_color= 'lightgrey') \r\n\r\n #Creates dictionary for branch color\r\n bColor= {}\r\n for branch in branchData.values():\r\n bColor[branch.fromNode, branch.toNode] = 'black'\r\n for load in loadData.values():\r\n bColor[load.node, load.label] = 'black'\r\n\r\n #Used to control color of node outline\r\n outline_dict = dict(zip(itertools.chain([n for n in nodeData.keys()], [l for l in loadData.keys()]), ['black']* (len(nodeData) + len(loadData))))\r\n\r\n #Creates a graph object contining nodes and edges from all connections\r\n g = networkx.from_pandas_edgelist(connection, 'from', 'to')\r\n\r\n #Creating From node dictionary\r\n fromNodeDict = {}\r\n for node in nodeData.values():\r\n if node.index != 1:\r\n fromNodeDict[node.label] = node.fromBranch.fromNode\r\n else:\r\n fromNodeDict[node.label] = 'N/A'\r\n for load in loadData.values():\r\n fromNodeDict[load.label] = load.node\r\n\r\n #Creates dictionary for so normalized values can be displayed with hover function\r\n valueDictionary = {}\r\n for node in nodeData.values():\r\n if node.label in dataDict.keys() and 'A' in node.phases:\r\n valueDictionary[node.label] = dataDict[node.label][0][0]\r\n else:\r\n valueDictionary[node.label] = 'N/A'\r\n for load in loadData.values():\r\n valueDictionary[load.label] = valueDictionary[load.node]\r\n\r\n #Sets attributes which will be displayed by hover function\r\n networkx.set_node_attributes(g, dict(zip(itertools.chain([n.label for n in nodeData.values()], [l for l in loadData.keys()]), itertools.chain([n.baseV for n in nodeData.values()], [nodeData[l.node].baseV for l in loadData.values()]))), 'node_base_v')\r\n networkx.set_node_attributes(g, dict(zip(itertools.chain([n.label for n in nodeData.values()], [l for l in loadData.keys()]), itertools.chain([n.phases for n in nodeData.values()], [nodeData[l.node].phases for l in loadData.values()]))), 'node_phase')\r\n networkx.set_node_attributes(g, node_color_dict, 'node_color')\r\n networkx.set_node_attributes(g, fromNodeDict, 'fromNode')\r\n networkx.set_node_attributes(g, outline_dict, 'border_color')\r\n networkx.set_node_attributes(g, valueDictionary, 'sim_vals')\r\n networkx.set_node_attributes(g, dict(zip(itertools.chain([n.label for n in nodeData.values()], [l for l in loadData.keys()]), itertools.chain([15]*len(nodeData), [10]*len(loadData)))), 'size')\r\n node_outline= 'border_color'\r\n lineType = {}\r\n lineLabel = {}\r\n fromNode = {}\r\n linePhases = {}\r\n toNode = {}\r\n\r\n #This program plots branches by inputting connected nodes, so this makes a dictionary with attributes\r\n for b in branchData.values():\r\n [a, c] = b.fromNode, b.toNode\r\n lineType[a, c] = b.type\r\n lineLabel[a, c] = b.label\r\n fromNode[a, c] = a\r\n toNode[a, c] = c\r\n linePhases[a, c] = b.phases\r\n\r\n #Associated Branch Attributes with visualization location\r\n networkx.set_edge_attributes(g, lineType, 'type')\r\n networkx.set_edge_attributes(g, linePhases, 'phase')\r\n networkx.set_edge_attributes(g, lineLabel, 'label')\r\n networkx.set_edge_attributes(g, fromNode, 'from')\r\n networkx.set_edge_attributes(g, toNode, 'to')\r\n networkx.set_edge_attributes(g, bColor, 'branch_color')\r\n edge_cmap= 'branch_color'\r\n\r\n #Creates figure object with some desired widgets\r\n plot = figure(\r\n tools=\"pan,wheel_zoom,save,reset\", active_scroll='wheel_zoom',\r\n x_range=Range1d(-30.1, 30.1), y_range=Range1d(-30.1, 30.1))\r\n \r\n\r\n #Imports graph from networkx\r\n network_graph = from_networkx(g, networkx.kamada_kawai_layout, scale=30, center=(0, 0))\r\n\r\n def clickCall1(attr, old, new):\r\n #Checks that node is being selected, not deselected\r\n if(network_graph.node_renderer.data_source.selected.indices != []):\r\n\r\n #Finds selected nodes and plots its data over time\r\n nameClicked = list(itertools.chain(list(nodeData.keys()), list(loadData.keys())))[new[0]]\r\n if nameClicked in loadData.keys():\r\n nodeClicked = loadData[nameClicked].node #House voltages are the same as the node that they feed from\r\n else:\r\n nodeClicked = nameClicked\r\n\r\n if(nodeClicked in unNormData.keys()):\r\n if nodeClicked in nodeData.keys() and len(nodeData[nodeClicked].phases) > 2: #Checks if the object clicked has multiple phases\r\n source = ColumnDataSource(data=dict(aVoltage = aUnNorm[nodeClicked][0], bVoltage = bUnNorm[nodeClicked][0], cVoltage = cUnNorm[nodeClicked][0], time = times, inds = [x + 1 for x in range(len(times))])) # dates = time, nodes = energy value at that date for the node\r\n # Sets plot attributes\r\n p = figure(title = ('Voltage Over Time for ' + nameClicked))\r\n p.title.text_font_size = '20pt'\r\n p.yaxis.axis_label = 'Voltage (V)'\r\n p.xaxis.axis_label = 'Time'\r\n p.yaxis.axis_label_text_font_size = '15pt'\r\n p.xaxis.axis_label_text_font_size = '15pt'\r\n p.line('inds', 'aVoltage', source=source, line_color= 'red', legend_label= 'A Phase')\r\n p.line('inds', 'bVoltage', source=source, line_color= 'blue', legend_label= 'B Phase')\r\n p.line('inds', 'cVoltage', source=source, line_color= 'yellow', legend_label= 'C Phase')\r\n p.add_tools(HoverTool(tooltips=[('A Phase Voltage', '@aVoltage'), ('B Phase Voltage', '@bVoltage'), ('C Phase Voltage', '@cVoltage'), ('Time', '@time')]))\r\n\r\n elif nodeClicked in nodeData.keys() and len(nodeData[nodeClicked].phases) == 2 and 'N' in nodeData[nodeClicked].phases:\r\n p = figure(title = ('Voltage Over Time for ' + nameClicked))\r\n p.title.text_font_size = '20pt'\r\n p.yaxis.axis_label = 'Voltage (V)'\r\n p.xaxis.axis_label = 'Time' \r\n p.yaxis.axis_label_text_font_size = '15pt'\r\n p.xaxis.axis_label_text_font_size = '15pt'\r\n if nodeData[nodeClicked].phases == 'AN':\r\n source = ColumnDataSource(data=dict(voltage = aUnNorm[nodeClicked][0], time = times, inds = [x + 1 for x in range(len(times))])) # dates = time, nodes = energy value at that date for the node\r\n if nodeData[nodeClicked].phases == 'BN':\r\n source = ColumnDataSource(data=dict(voltage = bUnNorm[nodeClicked][0], time = times, inds = [x + 1 for x in range(len(times))])) # dates = time, nodes = energy value at that date for the node\r\n if nodeData[nodeClicked].phases == 'CN':\r\n source = ColumnDataSource(data=dict(voltage = cUnNorm[nodeClicked][0], time = times, inds = [x + 1 for x in range(len(times))])) # dates = time, nodes = energy value at that date for the node\r\n p.line('inds', 'voltage', source=source)\r\n p.add_tools(HoverTool(tooltips=[('Voltage', '@voltage'), ('Time', '@time')]))\r\n\r\n else:\r\n source = ColumnDataSource(data=dict(voltage = unNormData[nodeClicked][0], time = times, inds = [x + 1 for x in range(len(times))])) # dates = time, nodes = energy value at that date for the node\r\n p = figure(title = ('Voltage Over Time for ' + nameClicked))\r\n p.title.text_font_size='20pt'\r\n p.line('inds', 'voltage', source=source)\r\n p.yaxis.axis_label = 'Voltage (V)'\r\n p.xaxis.axis_label = 'Time'\r\n p.yaxis.axis_label_text_font_size = '15pt'\r\n p.xaxis.axis_label_text_font_size = '15pt'\r\n #p.x_range = Range1d(1, len(unNormData[nodeClicked][0]))\r\n #p.y_range = Range1d(230, 245)\r\n p.add_tools(HoverTool(tooltips=[('Voltage', '@voltage'), ('Time', '@time')]))\r\n \r\n\r\n #Finds if a plot is present and either replaces, deletes, or puts up plot accordingly\r\n if (len(r.children[1].children) == 7):\r\n r.children[1].children.append(p)\r\n else:\r\n r.children[1].children[7] = p\r\n else:\r\n del r.children[1].children[7] #deletes plot of data over time if no node is clicked\r\n else:\r\n del r.children[1].children[7] #deletes plot of data over time if no node is clicked\r\n \r\n def clickCall2(attr, old, new):\r\n if(network_graph.edge_renderer.data_source.selected.indices != []):\r\n branchClicked = list(branchData.keys())[new[0]]\r\n bColor[branchData[branchClicked].fromNode, branchData[branchClicked].toNode] = 'orange'\r\n\r\n \r\n #Sets Nodes to be Colored Via color map and simulated values\r\n network_graph.node_renderer.glyph = Circle(size='size', fill_color={'field' : 'sim_vals', 'transform' : color_mapper}, line_color= node_outline)\r\n # Set edge opacity and width\r\n network_graph.edge_renderer.glyph = MultiLine(line_alpha=1, line_width=0.5, line_color= edge_cmap)\r\n\r\n #Adds tap capability to nodes and branches\r\n tap1 = TapTool(renderers=[network_graph.node_renderer])\r\n network_graph.node_renderer.nonselection_glyph = Circle(fill_alpha=0.4, fill_color={'field' : 'sim_vals', 'transform' : color_mapper})\r\n tap2 = TapTool(renderers=[network_graph.edge_renderer])\r\n network_graph.edge_renderer.nonselection_glyph = MultiLine(line_alpha=1)\r\n\r\n # Hover function for node portion of inital data\r\n hover_nodes = HoverTool(\r\n tooltips= [(\"Node\", \"@index\"), (\"Base Voltage(V)\", \"@node_base_v\"), (\"Phase\", \"@node_phase\"), (\"From Node\", \"@fromNode\"), (\"Simulated Voltage\", \"@sim_vals\")],\r\n renderers= [network_graph.node_renderer]\r\n )\r\n\r\n # Hover function for branch portion of initial data\r\n hover_edges = HoverTool(\r\n\t\t\t\t tooltips=[('Branch', '@label'), ('Type','@type'), (\"Phase\", \"@phase\"), (\"From\", \"@from\"), (\"To\", \"@to\")],#, (\"Length\", \"@length\")],\r\n\t\t\t\t renderers=[network_graph.edge_renderer],\r\n line_policy= 'interp'\r\n\t\t\t\t )\r\n\r\n plot.add_tools(hover_edges, hover_nodes, tap1, tap2)\r\n\r\n #callback when things are clicked\r\n network_graph.node_renderer.data_source.selected.on_change('indices',clickCall1)\r\n network_graph.edge_renderer.data_source.selected.on_change('indices',clickCall2)\r\n\r\n # Set edge highlight colors\r\n network_graph.edge_renderer.selection_glyph = MultiLine(line_color=node_highlight_color, line_width=2)\r\n network_graph.edge_renderer.hover_glyph = MultiLine(line_color=edge_highlight_color, line_width=2)\r\n network_graph.selection_policy = NodesAndLinkedEdges()\r\n network_graph.inspection_policy = NodesAndLinkedEdges()\r\n\r\n # Add network graph to the plot\r\n plot.renderers.append(network_graph)\r\n\r\n #Makes Rectangle Demonstrating Color Range\r\n color_bar = ColorBar(color_mapper = color_mapper,\r\n label_standoff = 14,\r\n location = (0,0),\r\n title = 'Plot')\r\n plot.add_layout(color_bar, 'right')\r\n\r\n #Checkboxes\r\n def checkCallback(attr, old, new):\r\n altNodes, altBranches = selectPhase(new, nodeData, branchData) #Finds comps containing selected phases\r\n if len(new) < len(old): #True if a box has been deselected\r\n gNodes = []\r\n gPhaseBranches= []\r\n for node in nodeData.keys():\r\n if node not in altNodes: #Finds nodes which are not to be colorful because of phase\r\n nodeData[node].value[0] = False #marks this on node\r\n gNodes.append(node)\r\n \r\n for branch in branchData.keys():\r\n if branch not in altBranches and branch not in gPhaseBranches: #Finds branches not included\r\n branchData[branch].value[0] = False #marks on branch\r\n gPhaseBranches.append(branch)\r\n gOut(gNodes, gPhaseBranches) #greys other components\r\n\r\n else:\r\n #Components not inactive due to phase\r\n for i in altNodes:\r\n nodeData[i].value[0] = True\r\n \r\n for i in altBranches:\r\n branchData[i].value[0] = True\r\n restore(altNodes, altBranches)\r\n\r\n def nodeUpdate(index):\r\n\r\n # Changes simulated values to different time instance\r\n \r\n phaseFunc(['A', 'B', 'C'][phaseButtons.active], [aDataDict, bDataDict, cDataDict][phaseButtons.active])\r\n network_graph.node_renderer.data_source.data['sim_vals'] = list(valueDictionary.values())\r\n #Alters Colors According to New Values\r\n network_graph.node_renderer.glyph.fill_color = {'field' : 'sim_vals', 'transform' : color_mapper}\r\n \r\n\r\n def checkVCallback(attr, old, new): \r\n if len(old) > len(new): #true if value has been deselected\r\n for i in old:\r\n if i not in new: #Finds deselected voltage\r\n greyN = selectVolt(voltages[i], nodeData) #returns nodes of said voltage\r\n \r\n #marks these values grey via voltage\r\n for i in greyN:\r\n nodeData[i].value[1] = False\r\n gOut(greyN, [])\r\n\r\n else:\r\n for i in new:\r\n if i not in old: #values which have been selected\r\n colorN = selectVolt(voltages[i], nodeData) #Finds components which should have color\r\n \r\n #Marks components to nto be grey via voltage\r\n for i in colorN:\r\n nodeData[i].value[1] = True\r\n\r\n restore(colorN, []) \r\n\r\n #Phase Checkbox Widget\r\n LABELS= ['A Phase', 'B Phase', 'C Phase']\r\n checkbox_group = CheckboxGroup(labels=LABELS, active=[0,1,2])\r\n checkbox_group.js_on_event('button_click', CustomJS(code=\"\"\"\r\n console.log('checkbox_group: active=' + this.origin.active, this.toString())\r\n \"\"\"))\r\n checkbox_group.on_change('active', checkCallback)\r\n\r\n #Voltage Checkbox Widget\r\n voltages = list(set([v.baseV for v in nodeData.values()])) #Creates a list of all different voltage values in figure\r\n voltages.sort() #Sorts values\r\n VOLTAGES= []\r\n for v in voltages:\r\n VOLTAGES.append(str(int(v)) + ' V') #Adds a V after number value\r\n checkbox_v = CheckboxGroup(labels=VOLTAGES, active=list(range(len(voltages))))\r\n checkbox_v.js_on_event('button_click', CustomJS(code=\"\"\"\r\n console.log('checkbox_group: active=' + this.origin.active, this.toString())\r\n \"\"\"))\r\n checkbox_v.on_change('active', checkVCallback)\r\n \r\n def bCallback(attr, old, new): #activated when node is entered\r\n if new != '': #makes sure this is not the value resetting\r\n if bColor[branchData[new].fromNode, branchData[new].toNode] == 'lightgrey':\r\n textVal = div.text[21:] #Reads text of inactive nodes, not including the inactive components: part\r\n textVal = textVal.split(', ') #splits to get a list of inactive parts\r\n #Formats new text block\r\n if new in textVal:\r\n textVal.remove(new)\r\n if len(textVal):\r\n textVal[0] = 'Inactive Components: ' + textVal[0]\r\n if len(textVal) > 1:\r\n textVal = ', '.join(textVal)\r\n else:\r\n textVal = textVal[0]\r\n else:\r\n textVal = ''\r\n\r\n nBranch, nNode = deletionAlg(nodeData, branchData, branchData[new].toNode)\r\n nBranch.append(new)\r\n\r\n #Nodes components that theya re no longer greyed for something higher on the system becoming inactive\r\n for n in nNode:\r\n nodeData[n].value[2] = True\r\n \r\n for b in nBranch:\r\n branchData[b].value[1] = True\r\n \r\n restore(nNode, nBranch)\r\n else:\r\n if not div.text: #sees if there is any text for inactive nodes\r\n textVal= 'Inactive Components: '\r\n else:\r\n textVal= div.text\r\n\r\n gBranch, gNode= deletionAlg(nodeData, branchData, branchData[new].toNode) #returns branches and nodes to be greyed out\r\n gBranch.append(new)\r\n\r\n #Shows where node was greyed in dictionary\r\n for n in gNode:\r\n nodeData[n].value[2] = False\r\n \r\n for b in gBranch:\r\n branchData[b].value[1] = False\r\n \r\n deleteData('R2_1247_3_t11_mod_node_data_1.txt', 'node_Output.txt', [n.label for n in nodeData.values() if n.value[2] == False])\r\n deleteData('R2_1247_3_t11_mod_branch_data_1.txt', 'branch_Output.txt', [b.label for b in branchData.values() if b.value[1] == False])\r\n\r\n gOut(gNode, gBranch) #grey-out function\r\n\r\n if textVal == 'Inactive Components: ': #sees if node is the first node in list\r\n textVal = textVal + new\r\n else:\r\n textVal = textVal + ', ' + new\r\n branch_text_input.update(value='')#empties input box so same value can be put in again\r\n\r\n div.update(text=textVal)\r\n\r\n def callback(attr, old, new): #activated when node is entered\r\n if new != '': #Makes sure this is not the text box emptying\r\n if node_color_dict[new] == 'lightgrey':\r\n textVal = div.text[21:] #Reads text of inactive nodes, not including the inactive components: part\r\n textVal = textVal.split(', ') #splits to get a list of inactive parts\r\n\r\n if new in textVal: #Checks if the component entered is in the list of inactive parts\r\n textVal.remove(new) #Removes component\r\n #Checks formatting changes needed\r\n if len(textVal):\r\n textVal[0] = 'Inactive Components: ' + textVal[0]\r\n if len(textVal) > 1:\r\n textVal = ', '.join(textVal)\r\n else:\r\n textVal = textVal[0]\r\n else:\r\n textVal = ''\r\n #Finds components feeding off of entered component\r\n\r\n nBranch, nNode = deletionAlg(nodeData, branchData, new)\r\n #Marks that components are not inactive due to upstream deactivation\r\n for n in nNode:\r\n nodeData[n].value[2] = True\r\n \r\n for b in nBranch:\r\n branchData[b].value[1] = True\r\n \r\n restore(nNode, nBranch)\r\n else:\r\n if not div.text: #sees if there is any text for inactive nodes\r\n textVal= 'Inactive Components: '\r\n else:\r\n textVal= div.text\r\n if textVal == 'Inactive Components: ': #sees if node is the first node in list\r\n textVal = textVal + new\r\n else:\r\n textVal = textVal + ', ' + new\r\n \r\n gBranch, gNode= deletionAlg(nodeData, branchData, new) #returns branches and nodes to be greyed out\r\n\r\n #Marks in node and branch objects that they are inactive int this form\r\n for n in gNode:\r\n nodeData[n].value[2] = False\r\n \r\n for b in gBranch:\r\n branchData[b].value[1] = False\r\n \r\n deleteData('R2_1247_3_t11_mod_node_data_1.txt', 'node_Output.txt', [n.label for n in nodeData.values() if n.value[2] == False])\r\n deleteData('R2_1247_3_t11_mod_branch_data_1.txt', 'branch_Output.txt', [b.label for b in branchData.values() if b.value[1] == False])\r\n\r\n gOut(gNode, gBranch)\r\n text_input.update(value='') #Empties text box\r\n div.update(text=textVal) #Updates text block of deactivated parts\r\n \r\n def gOut(gNode, gBranch):\r\n #Function when components need to be turned gray \r\n for i in gNode:\r\n if node_color_dict[i] != 'lightgrey': #checks if node is alreasy inactive\r\n node_color_dict[i] = 'lightgrey' #changes color in color dictionary\r\n outline_dict[i] = 'lightgrey' #outline color change\r\n\r\n #Assigns light grey to inactive branches\r\n for branch in gBranch:\r\n bColor[branchData[branch].fromNode, branchData[branch].toNode] = 'lightgrey'\r\n update()\r\n\r\n def restore(nodes, edges):\r\n #Function which changes certain attibutes if parts no longer need to be gray\r\n for i in nodes:\r\n if(node_color_dict[i] != 'skyblue' and node_color_dict[i] != 'red' and node_color_dict[i] != 'yellow'): #Checks if node has been deactivated by another source\r\n #Checks for node color based on color\r\n if nodeData[i].index == 1:\r\n node_color_dict[i] = 'yellow'\r\n elif nodeData[i].baseV == 7200:\r\n node_color_dict[i] = 'red'\r\n else:\r\n node_color_dict[i] = 'skyblue'\r\n outline_dict[i] = 'black' \r\n\r\n\r\n #Updates branch color in dictionary\r\n for branch in edges:\r\n if branchData[branch].value[0] and branchData[branch].value[1]:\r\n bColor[branchData[branch].fromNode, branchData[branch].toNode] = 'black'\r\n update() \r\n \r\n def update(): \r\n network_graph.node_renderer.data_source.data['node_color']=(list(node_color_dict.values()))\r\n network_graph.node_renderer.data_source.data['border_color']=(list(outline_dict.values()))\r\n network_graph.edge_renderer.data_source.data['branch_color']=(list(bColor.values()))\r\n\r\n plot.update(renderers = [network_graph])\r\n \r\n text_input = AutocompleteInput(title=\"Enter Node to be Put In or Out of Service:\", completions= [n for n in nodeData.keys()], value=\"\") #Autocomplete text box\r\n text_input.on_change(\"value\", callback) #activates when text is entered\r\n \r\n branch_text_input = AutocompleteInput(title= \"Enter Branch to be Put In or Out of Service:\", completions= [b for b in branchData.keys()], value='')\r\n branch_text_input.on_change(\"value\", bCallback) #activates when text is entered\r\n\r\n def movieCall():\r\n #indexes through all times\r\n for i in range(len(times)):\r\n slider.update(value= i + 1)\r\n \r\n #Adds button to movie functionality\r\n button = Button(label='Movie')\r\n button.on_click(movieCall)\r\n\r\n def slideCall(attr, old, new):\r\n #Callnack when slider value is changes\r\n slider.update(title = times[new -1]) #Updates slider title to display time\r\n nodeUpdate(new -1)\r\n\r\n def phaseFunc(phase, phData):\r\n #Changes which phase that is displayed\r\n vals = list(valueDictionary.keys())\r\n for l in vals:\r\n if l in nodeData.keys():\r\n if(phase in nodeData[l].phases): #Checks if phase is in node\r\n #updates value to phase\r\n if aDataDict[l] != []: #checks if node\r\n valueDictionary[l] = phData[l][0][slider.value -1]\r\n else:\r\n valueDictionary[l] = dataDict[l][0][slider.value -1]\r\n else:\r\n valueDictionary[l] = 'N/A' #changes to non-number value\r\n\r\n if l in loadData.keys():\r\n #Same as above for loads\r\n if(phase not in loadData[l].phases):\r\n valueDictionary[l] = 'N/A'\r\n else:\r\n valueDictionary[l] = dataDict[loadData[l].node][0][slider.value -1] \r\n\r\n def nodePhaseCall(attr, old, new):\r\n #checks phase and sends information to change to correct graph\r\n if(new==0):\r\n phaseFunc('A', aDataDict)\r\n \r\n if(new==1):\r\n phaseFunc('B', bDataDict)\r\n\r\n if(new==2):\r\n phaseFunc('C', cDataDict)\r\n\r\n network_graph.node_renderer.data_source.data['sim_vals'] = list(valueDictionary.values()) #Updates simulated values\r\n\r\n #Alters Colors According to New Values\r\n network_graph.node_renderer.glyph.fill_color = {'field' : 'sim_vals', 'transform' : color_mapper}\r\n\r\n #Button widgets for phase\r\n phaseButtons = RadioButtonGroup(labels = ['A Phase', 'B Phase', 'C Phase'], active=0)\r\n phaseButtons.on_change('active', nodePhaseCall)\r\n\r\n #Slider widget for time\r\n slider = Slider(start= 1, end = len(times), value = 1, title = times[0])\r\n slider.on_change('value', slideCall)\r\n\r\n div = Div(text = '') #Creates initial empty widget for text block of deactivated items\r\n\r\n r = row(children= [plot, column(children= [row(children=[checkbox_group, checkbox_v]), text_input, branch_text_input, div, row(children = [button]), phaseButtons, slider])]) #formatting\r\n curdoc().add_root(r) #adds plot to server\r\n\r\nmain()\r\n","repo_name":"aroshy/AEGIS-Sp23-Visualization","sub_path":"AEGIS2Orig.py","file_name":"AEGIS2Orig.py","file_ext":"py","file_size_in_byte":32742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"579863464","text":"import os\nimport pickle\nimport numpy\n\nfrom models import NDSparseMatrix\nfrom algs.StateMapper import StateMapper\nfrom utils import Utils\n\nstate_mapper = StateMapper()\n\n\nclass DTMC:\n def __init__(self):\n self.dtmc = {}\n\n def get_probability(self, s_from, s_to):\n return self.dtmc.get((s_from, s_to), 0)\n\n def add_probability(self, s_from, s_to, value):\n value += self.get_probability(s_from, s_to)\n if value > 0:\n self.dtmc[(s_from, s_to)] = value\n\n def save(self, file_name='dtmc', base_dir='data/'):\n # Write tra file\n file = open(os.path.join(base_dir, file_name + '.tra'), 'w')\n file.write('dtmc\\n')\n for (s_from, s_to), prob in sorted(self.dtmc.items()):\n file.write('{} {} {}\\n'.format(s_from, s_to, prob))\n file.close()\n # Write lab file\n file = open(os.path.join(base_dir, file_name + '.lab'), 'w')\n file.write('#DECLARATION\\n')\n file.write('init fallen far collided goal\\n')\n file.write('#END\\n')\n file.write('{} init\\n'.format(state_mapper.INITIAL_STATE))\n file.write('{} fallen\\n'.format(state_mapper.fallen_state))\n file.write('{} collided\\n'.format(state_mapper.self_collided_state))\n file.write('{} far\\n'.format(state_mapper.too_far_state))\n file.write('{} goal\\n'.format(state_mapper.goal_state))\n file.close()\n\n def compute_probabilities(self, base_dir='data/repair'):\n import stormpy\n import stormpy.logic\n goal_formula_str = \"P=? [ F \\\"goal\\\" ]\"\n goal_formula = stormpy.parse_properties(goal_formula_str)\n fallen_formula_str = \"P=? [ F \\\"fallen\\\" ]\"\n fallen_formula = stormpy.parse_properties(fallen_formula_str)\n far_formula_str = \"P=? [ F \\\"far\\\" ]\"\n far_formula = stormpy.parse_properties(far_formula_str)\n collided_formula_str =\"P=? [ F \\\"collided\\\" ]\"\n collided_formula = stormpy.parse_properties(collided_formula_str)\n total_formula_str = \"P=? [ F (\\\"far\\\" | \\\"collided\\\" | \\\"fallen\\\")]\"\n total_formula = stormpy.parse_properties(total_formula_str)\n self.save('temp', base_dir)\n #model = stormpy.parse_explicit_model(os.path.join(base_dir, 'temp.tra'),\n# os.path.join(base_dir, 'temp.lab'))\n model = stormpy.build_sparse_model_from_explicit(os.path.join(base_dir, 'temp.tra'),\n os.path.join(base_dir, 'temp.lab'))\n\t\n goal_prob = stormpy.model_checking(model, goal_formula[0])\n goal_prob_Init=stormpy.ExplicitQuantitativeCheckResult.at(goal_prob,state_mapper.INITIAL_STATE)\n fallen_prob = stormpy.model_checking(model, fallen_formula[0])\n fallen_prob_Init=stormpy.ExplicitQuantitativeCheckResult.at(fallen_prob,state_mapper.INITIAL_STATE)\n far_prob = stormpy.model_checking(model, far_formula[0])\n far_prob_Init=stormpy.ExplicitQuantitativeCheckResult.at(far_prob,state_mapper.INITIAL_STATE)\n collided_prob = stormpy.model_checking(model, collided_formula[0])\n collided_prob_Init=stormpy.ExplicitQuantitativeCheckResult.at(collided_prob,state_mapper.INITIAL_STATE)\n total_prob = stormpy.model_checking(model, total_formula[0])\n total_prob_Init=stormpy.ExplicitQuantitativeCheckResult.at(total_prob,state_mapper.INITIAL_STATE)\n #print('At Init Probs: goal: {} , fallen: {} far: {} collided : {} total : {}'.format(goal_prob_Init,fallen_prob_Init,far_prob_Init,collided_prob_Init,total_prob_Init))\n return {'goal': goal_prob_Init, 'fallen': fallen_prob_Init, 'far': far_prob_Init,\n 'collided': collided_prob_Init, 'total': total_prob_Init}\n\n #above print added , to verify if below is to be replaced by above\n #return {'goal': goal_prob, 'fallen': fallen_prob, 'far': far_prob,\n # 'collided': collided_prob, 'total': total_prob}\n\n\nclass DTMCGenerator:\n safe_shutdown_action = Utils.N_ACTIONS\n\n def __init__(self, ttable_file_path, qtable_file_path, temp=1):\n self.t_table = NDSparseMatrix(ttable_file_path)\n self.trans_prob_dict = self.compute_transition_probabilities_dict()\n with open(qtable_file_path, 'rb') as file:\n qtable = pickle.load(file)\n self.Q = qtable.reshape(len(qtable) // Utils.N_ACTIONS, Utils.N_ACTIONS)\n self.temp = temp\n self.policy = None\n\n def compute_policy(self):\n n_states, n_actions = self.Q.shape\n policy = numpy.zeros((n_states, n_actions + 1), dtype=float)\n\n for state in range(n_states):\n if state == state_mapper.too_far_state or state == state_mapper.fallen_state or \\\n state == state_mapper.self_collided_state or state == state_mapper.goal_state:\n policy[state, self.safe_shutdown_action] = 1\n continue\n actions = []\n for action in range(n_actions):\n if self.Q[state, action] != 10 and action != Utils.NULL_ACTION: # and Q[state, action] >= 0:\n actions.append((action, self.Q[state, action]))\n if len(actions) > 0:\n if self.temp > 0:\n values = self.softmax(actions, self.temp)\n else:\n values = self.deterministic(actions)\n for i in range(len(values)):\n policy[state, values[i][0]] = values[i][1]\n else:\n policy[state, self.safe_shutdown_action] = 1\n self.policy = policy\n return policy\n\n def save_policy(self, file_name='policy.pkl', base_dir='data/'):\n with open(os.path.join(base_dir, file_name), 'wb') as file:\n pickle.dump(self.policy, file)\n\n def load_policy(self, file_name='policy.pkl', base_dir='data/'):\n with open(os.path.join(base_dir, file_name), 'rb') as file:\n self.policy = pickle.load(file)\n\n def get_possible_actions(self, state):\n n_states, n_actions = self.Q.shape\n possible_actions = []\n for action in range(n_actions + 1):\n if self.policy[state, action] > 0:\n possible_actions.append(action)\n return possible_actions\n\n def compute_dtmc(self):\n n_states, n_actions = self.Q.shape\n dtmc = DTMC()\n if self.policy is None:\n self.compute_policy()\n for state in range(n_states):\n for action in range(n_actions + 1):\n if self.policy[state, action] == 0:\n continue\n successors = self.get_successor_states(state, action)\n for (succ_state, prob) in successors:\n p = self.policy[state, action] * prob\n dtmc.add_probability(state, succ_state, p)\n return dtmc\n\n def get_successor_states(self, state, action):\n total = 0\n successors = []\n if state == state_mapper.goal_state:\n return [(state, 1)]\n if action == self.safe_shutdown_action or state == state_mapper.too_far_state \\\n or state == state_mapper.fallen_state or state == state_mapper.self_collided_state:\n return [(state_mapper.INITIAL_STATE, 1)]\n successors = self.trans_prob_dict.get((state, action), [])\n if len(successors) == 0:\n successors = [(state_mapper.too_far_state, 1)]\n return successors\n\n def compute_transition_probabilities_dict(self, transition_counters=None):\n trans_prob_dict = {}\n\n for key, value in self.t_table.elements.items():\n if value <= 0:\n continue\n new_key = (key[0], key[1])\n v = trans_prob_dict.get(new_key, [])\n v.append((key[2], value))\n trans_prob_dict[new_key] = v\n\n for (s1, a), successors in trans_prob_dict.items():\n total = 0\n for a, v in successors:\n total += v\n\n val = int(numpy.exp(- total / 50 * len(successors)) * 100)\n if val > 0:\n total += val\n index = -1\n # Check if the far state is already in the successors vector\n for i, succ in enumerate(successors):\n if succ[0] == state_mapper.too_far_state:\n index = i\n if index < 0:\n successors.append((state_mapper.too_far_state, val))\n else:\n successors[index] = (successors[index][0], successors[index][1] + val)\n\n for i, succ in enumerate(successors):\n successors[i] = (succ[0], succ[1] / total)\n\n return trans_prob_dict\n\n @staticmethod\n def softmax(items, temp):\n values = []\n for v in items:\n # if v[1] > 500:\n # value = 500\n # else:\n # value = v[1]\n value = v[1]\n values.append(numpy.exp(value / temp))\n den = numpy.sum(values)\n for i, value in enumerate(values):\n values[i] = (items[i][0], values[i] / den)\n return values\n\n @staticmethod\n def deterministic(items):\n values = []\n max = 0\n for i in range(len(items)):\n if items[i][1] > items[max][1]:\n max = i\n for i in range(len(items)):\n if i == max:\n values.append((items[i][0], 1))\n else:\n values.append((items[i][0], 0))\n return values\n","repo_name":"SimoV8/bioloid-standup","sub_path":"src/models/DTMC.py","file_name":"DTMC.py","file_ext":"py","file_size_in_byte":9526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14625701220","text":"import os.path\nimport sys\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom html2text import html2text\n\n# import urllib3\n\n# if len(sys.argv) > 1:\n# problem_code = sys.argv[1]\n# else:\n# problem_code = \"LADDU\"\n# problem_code = problem_code.upper()\nurl = sys.argv[1]\n# print(url)\nr = requests.get(url)\n\nsoup = BeautifulSoup(r.content, \"html.parser\")\npreTags = soup(\"pre\")\n\n\n# for tag in preTags:\n# print(tag.text)\n# print (preTags)\n# soup = BeautifulSoup(str(preTags), \"html.parser\")\n# # print (soup.prettify())\n# tags = soup.find_all(\"b\");\n# # print (tags)\n# # print(tags[4].next_sibling)\n# # print (len(tags))\ndef join_with_newline(text_list):\n ret = \"\"\n for t in text_list:\n ret = ret + \"\\n\" + t\n return ret.strip(' \\t\\n\\r')\n\n\nnum_test = 1\n# # os.chdir()\ninput_file = \"input\"\noutput_file = \"output\"\nfor i in range(0, len(preTags), 2):\n input_file_name = input_file + str(num_test) + \".txt\"\n # if not os.path.isfile(input_file_name) or True: # always True\n f = open(input_file_name, \"w\")\n text_list = list(map(str.strip, html2text(str(preTags[i])).strip().split('\\n')))\n text = join_with_newline(text_list)\n f.write(text + \"\\n\")\n f.close()\n\n output_file_name = output_file + str(num_test) + \".txt\"\n # if not os.path.isfile(output_file_name):\n f = open(output_file_name, \"w\")\n text_list = list(map(str.strip, html2text(str(preTags[i + 1])).strip().split('\\n')))\n text = join_with_newline(text_list)\n f.write(html2text(str(preTags[i + 1])).strip() + \"\\n\")\n f.close()\n num_test += 1\nwith open(\"num_tests\", \"w\") as f:\n print(num_test - 1, file=f)\n\n # while(sibling is not None):\n # print(sibling)\n # sibling = sibling.b.next_sibling\n\n # print(soup)\n # for t in preTags:\n #\n # print (soup.b.next_sibling)\n # pass\n # [x.extract() for x in t.find_all(\"b\")]\n # print(t)\n # children = t.findChildren()\n # for child in children:\n # print (child)\n # print (t.findNextSibling(text=None))\n\n\n # inp = pre[1].strip('\\n') + '\\n'\n # out = pre[3].strip('\\n') +'\\n'\n\n # print(pre)\n\n\n # f = open('in','w')\n # f.write(inp) # python will convert \\n to os.linesep\n # f.close() # you can omit in most cases as the destructor will call it\n\n # f = open('out','w')\n # f.write(out) # python will convert \\n to os.linesep\n # f.close() # you can omit in most cases as the destructor will call it\n\n\n # print(inp.strip('\\n'))\n # print (out.strip('\\n'))\n # for elem in pre.contents:\n # print(elem)\n # print(pre.contents[1])\n # [x.extract() for x in pre.find_all(\"b\")]\n # print (pre)\n # print (soup.prettify())\n # pre = BeautifulSoup(soup.find_all(\"pre\"), \"lxml\")\n # print (pre.prettify())\n # [x.extract() for x in pre.find_all(\"b\")]\n # print (pre)\n # print soup.html.head.title\n # print(len(links))\n # for link in pre:\n # print (link)\n # print (r.content)\n\n # print ('hello')\n","repo_name":"vikrant1433/problem_parser","sub_path":"codeforces/codeforces.py","file_name":"codeforces.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36790211769","text":"from lp_reader.read import read\nfrom constraint_description import GreaterEqualThan, LessEqualThan\nfrom solver.helper.tableaus import TableauBuilder\n\n\ndef builder_from_file(filepath):\n\n obj, constraints = read(filepath)\n\n builder = TableauBuilder()\n for left_side, ctype, right_side in constraints:\n if ctype == \"<=\":\n builder.add_constraint(LessEqualThan(left_side, right_side))\n elif ctype == \">=\":\n builder.add_constraint(GreaterEqualThan(left_side, right_side))\n else:\n raise NotImplementedError\n\n builder.set_objective(obj)\n return builder\n\n\ndef tableau_from_file(filepath):\n return builder_from_file(filepath).get()\n","repo_name":"oblr/simplex-py","sub_path":"solver/helper/tableaus/read_from_files.py","file_name":"read_from_files.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"5674906306","text":"\nimport os\n\n#ENV = \"DEV\"\nENV = \"PROD\"\n\n\n## server\nhost = \"0.0.0.0\"\nport = int(os.environ.get(\"PORT\", 5000))\n\n\n## info\napp_name = \"SOCOAPP\"\ncontacts = \"\"\ncode = \"\"\ntutorial = \"\"\nfontawesome = \"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css\"\n\nabout = \"\"\n\n## fs\n#root = os.path.dirname(os.path.dirname(__file__)) + \"/\"","repo_name":"BITACC/soc-app-served","sub_path":"settings/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"9326723952","text":"from confutil import rprint, assert_request\nimport confutil\nfrom skitai import testutil\nfrom skitai.handlers import vhost_handler\nimport skitai\nimport os\n\t\ndef test_websocket_handler (wasc, app, client):\n\t@app.route (\"/echo\")\n\tdef echo (was, message):\n\t\tif was.wsinit ():\n\t\t\treturn was.wsconfig (skitai.WS_SIMPLE, 60)\n\t\telif was.wsopened ():\n\t\t\treturn \"Welcome Client %s\" % was.wsclient ()\n\t\telif was.wshasevent (): # ignore the other events\n\t\t\treturn\n\t\twas.websocket.send (\"You said,\" + message)\n\t\twas.websocket.send (\"acknowledge\")\n\t\n\t@app.route (\"/chat\")\n\tdef chat (was, message, roomid):\n\t\tif was.wsinit ():\n\t\t\treturn was.wsconfig (skitai.WS_GROUPCHAT, 60)\n\t\telif was.wsopened ():\n\t\t\treturn \"Client %s has entered\" % was.wsclient ()\n\t\telif was.wsclosed ():\n\t\t\treturn \"Client %s has leaved\" % was.wsclient ()\n\t\treturn \"Client %s Said: %s\" % (was.wsclient (), message)\n\t\n\tvh = testutil.install_vhost_handler ()\n\troot = confutil.getroot ()\n\tpref = skitai.pref ()\n\tvh.add_route (\"default\", (\"/ws\", app, root), pref)\n\tapp.access_control_allow_origin = [\"http://www.skitai.com:80\"]\n\t\n\t# WEBSOCKET\t\n\ttestutil.enable_threads ()\t\n\tresp = client.ws (\"http://www.skitai.com/ws/echo\", \"Hello\")\n\tassert resp.status_code == 101\n\t\n\tresp = client.ws (\"http://www.skitai.com/ws/chat\", \"Hello\")\n\tassert resp.status_code == 500\n\tresp = client.ws (\"http://www.skitai.com/ws/chat?roomid=1\", \"Hello\")\n\tassert resp.status_code == 101\n\t\n\ttestutil.disable_threads ()\n\tresp = client.ws (\"http://www.skitai.com/ws/echo\", \"Hello\")\n\tassert resp.status_code == 101\n\tresp = client.ws (\"http://www.skitai.com/ws/chat?roomid=1\", \"Hello\")\n\tassert resp.status_code == 101\n\t\n\t","repo_name":"AmesianX/skitai","sub_path":"tests/level3/test_websocket_handler.py","file_name":"test_websocket_handler.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"66"} +{"seq_id":"33831020638","text":"# https://leetcode.com/problems/top-k-frequent-elements/\nfrom collections import Counter\nfrom itertools import chain\n\n\nclass Solution:\n def topKFrequent(self, nums, k):\n\n bucket = [[] for _ in range(len(nums) + 1)]\n\n count = Counter(nums)\n\n for f, c in count.items():\n bucket[c].append(f)\n all_elements = list(chain(*bucket))\n\n return all_elements[::-1][:k]\n\n\nif __name__ == \"__main__\":\n obj = Solution()\n print(obj.topKFrequent(nums=[1,1,1,2,2,3], k=2))","repo_name":"snigi-gupta/Leetcode","sub_path":"Top_K_Frequent_Elements.py","file_name":"Top_K_Frequent_Elements.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"23839327757","text":"import sys\ndef input():\n return sys.stdin.readline().rstrip()\n\ndef binary_search():\n global ans\n start, end = 0, max(arr)\n \n # mid가 아니라 end가 h가 되는 거였음....\n while start <= end:\n mid = (start + end) // 2\n total_length = 0\n for i in arr:\n if i - mid >= 0:\n total_length += i - mid\n\n # 필요한 나무의 길이보다 자른 나무의 양이 적을 때\n if total_length < M:\n end = mid - 1\n # 필요한 나무의 길이가 충족 및 이상일 때\n else:\n start = mid + 1\n ans = end\n\nN, M = map(int, input().split())\narr = list(map(int, input().split()))\nans = 0\nbinary_search()\nprint(ans)\n","repo_name":"ksy990628/CodingTest","sub_path":"KBS/Binary Search/2805.py","file_name":"2805.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"72406226130","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport io\nimport re\nimport json\nimport time\nimport random\nimport asyncio\nimport logging\nimport hashlib\nimport unittest\nimport functools\nimport threading\nimport mimetypes\nimport concurrent.futures\nfrom pprint import pprint\nfrom urllib.parse import urlparse, unquote\nfrom collections import OrderedDict\n\nimport aiohttp\nimport requests\nimport async_timeout\nfrom lxml import etree\nfrom pyqrcode import QRCode\nfrom requests.utils import cookiejar_from_dict\n\n\ndef set_logger(name, level=logging.INFO):\n formatter = logging.Formatter('[%(levelname)1.1s %(asctime)s '\n '%(module)s:%(lineno)d] %(message)s')\n stream_handler = logging.StreamHandler()\n stream_handler.formatter = formatter\n _logger = logging.getLogger(name)\n _logger.addHandler(stream_handler)\n _logger.setLevel(level)\n return _logger\n\n\nlogger = set_logger('wechat')\n\n\nclass NamedVKDict(object):\n \"\"\"\n usage:\n >>> country = NamedVKDict({'CHINA': 0, 'AMERICA': 1, 'BRITAIN': 2})\n >>> country.CHINA\n >>> 0\n >>> country[0]\n >>> 'CHINA'\n \"\"\"\n def __init__(self, value):\n self._attr = value\n self._index = {v: k for k, v in value.items()}\n\n def __getattr__(self, value):\n if value in self._attr:\n return self._attr[value]\n else:\n raise AttributeError\n\n def __getitem__(self, value):\n return self._index[value]\n\n\ndef fix_emoji(val):\n \"\"\"\n _emoji_debugger is for bugs about emoji match caused by wechat\n backstage like :face with tears of joy: will be replaced with\n :cat face with tears of joy:\n \"\"\"\n def _emoji_debugger(val):\n s = val.replace('')\n\n def __fix_miss_match(m):\n return '' % ({\n '1f63c': '1f601', '1f639': '1f602', '1f63a': '1f603',\n '1f4ab': '1f616', '1f64d': '1f614', '1f63b': '1f60d',\n '1f63d': '1f618', '1f64e': '1f621', '1f63f': '1f622',\n }.get(m.group(1), m.group(1)))\n return WeChatMeta.RE['emoji'].sub(__fix_miss_match, s)\n\n def _emoji_formatter(m):\n s = m.group(1)\n if len(s) == 6:\n return ('\\\\U%s\\\\U%s'%(s[:2].rjust(8, '0'), s[2:].rjust(8, '0')))\\\n .encode('utf8').decode('unicode-escape', 'replace')\n elif len(s) == 10:\n return ('\\\\U%s\\\\U%s'%(s[:5].rjust(8, '0'), s[5:].rjust(8, '0')))\\\n .encode('utf8').decode('unicode-escape', 'replace')\n else:\n return ('\\\\U%s'%m.group(1).rjust(8, '0'))\\\n .encode('utf8').decode('unicode-escape', 'replace')\n val = _emoji_debugger(val)\n val = WeChatMeta.RE['emoji'].sub(_emoji_formatter, val)\n return val\n\n\n##############\n# Exceptions #\n##############\n\nclass WeChatError(Exception):\n pass\n\n\nclass LoginFailedError(WeChatError):\n pass\n\n\nclass MultiListenThreadError(WeChatError):\n pass\n\n\nclass MessageDataCorruptionError(WeChatError):\n pass\n\n\nclass WeChatMeta(object):\n APP_ID = 'wx782c26e4c19acffb' # Got from itChat\n GROUP_PREFIX = '@@'\n INVITE_BY_MYSELF = '你'\n MP_FLAG = 'gh_'\n COOKIE_DOMAIN = '.qq.com'\n TIME_FORMAT = '%a %b %d %Y %H:%M:%S GMT+0800 (CST)'\n\n FILE_MESSAGE_TEMPLATE = (\n \"{}\"\n \"6\"\n \"{}{}\"\n \"{}\"\n )\n\n LOGIN_URI = 'https://login.weixin.qq.com'\n URL = {\n 'uuid': LOGIN_URI + '/jslogin',\n 'push_login': LOGIN_URI + '/cgi-bin/mmwebwx-bin/webwxpushloginurl',\n 'login_status': LOGIN_URI + '/cgi-bin/mmwebwx-bin/login',\n 'qr_code': LOGIN_URI + '/l/',\n 'upload_media': '/webwxuploadmedia',\n 'sync_check': '/synccheck',\n 'web_sync': '/webwxsync',\n 'web_init': '/webwxinit',\n 'web_status': '/webwxstatusnotify',\n 'get_contacts': '/webwxgetcontact',\n 'bget_contacts': '/webwxbatchgetcontact',\n 'send_message': '/webwxsendmsg',\n 'send_image': '/webwxsendmsgimg',\n 'send_video': '/webwxsendvideomsg',\n 'send_file': '/webwxsendappmsg',\n 'update_group': '/webwxupdatechatroom',\n 'create_group': '/webwxcreatechatroom',\n 'set_pin': '/webwxoplog',\n 'group_avatar': '/webwxgetheadimg',\n 'user_avatar': '/webwxgeticon',\n }\n\n RE = {\n 'uuid': re.compile(r'QRLogin\\.uuid = \"(?P\\S+)\"'),\n 'login_status': re.compile(r'window\\.code=(?P\\d+)'),\n 'main_uri': re.compile(r'window.redirect_uri=\"(?P\\S+)\"'),\n 'uin': re.compile(r'(?P[^<]*?)<'),\n 'sync_check': re.compile(r'synccheck=\\{retcode:\"(?P\\d+)\",'\n r'selector:\"(?P\\d+)\"\\}'),\n 'group_msg': re.compile(u'(?P@[0-9a-z]+):
'\n u'(@(?P.*?)\\u2005)?'\n u'(?P.*)'),\n 'invite': re.compile('.*?(邀请\"(?P.*?)\"|'\n '\"(?P.*?)\"通过)'),\n 'remove': re.compile('\"(?P.*?)\"移出了群聊'),\n 'emoji': re.compile(r''),\n }\n\n\nMESSAGE_TYPE = NamedVKDict({\n 'TEXT': 1,\n 'IMAGE': 3,\n 'FILE': 6,\n 'CONTACT_CARD': 42,\n 'VIDEO': 43,\n 'SHARE': 49,\n 'INITIALIZE': 51,\n 'SYSTEM': 10000,\n})\n\n\nclass Contact(object):\n RAW_FIELD = ['UserName', 'NickName', 'MemberList', 'DisplayName']\n\n def __init__(self, raw_contact=None, account=None, is_group=False,\n dumps=None):\n self.__bool = False or bool(raw_contact)\n if dumps or not self.__bool:\n return\n\n member_list = raw_contact.get('MemberList', [])\n\n self.account = account\n self.user_id = raw_contact['UserName']\n self.nickname = fix_emoji(raw_contact['NickName'])\n self.display_name = fix_emoji(raw_contact.get('DisplayName', ''))\n self.is_owner = self._is_owner(member_list)\n self.members = self.process_members(member_list)\n self.is_group = is_group\n\n @property\n def avatar(self):\n return self.account.get_avatar(self.user_id)\n\n def process_members(self, members):\n return {m['UserName']: Contact(m, self.account) for m in members}\n\n def _is_owner(self, members):\n if not members:\n return False\n return members[0]['UserName'] == self.account.username\n\n @classmethod\n def is_data_corruption(cls, raw_contact):\n if not raw_contact:\n return True\n for field in cls.RAW_FIELD:\n if field not in raw_contact:\n return True\n return True\n\n def dump(self, avatar=False):\n contact = {\n 'user_id': self.user_id,\n 'nickname': self.nickname,\n 'display_name': self.display_name,\n 'is_owner': self.is_owner,\n 'is_group': self.is_group,\n 'members': {\n user_id: member.dump(avatar)\n for user_id, member in self.members.items()\n },\n 'account': {\n 'username': self.account.username,\n 'nickname': self.account.nickname,\n 'uin': self.account.uin,\n },\n }\n if avatar:\n avatar_bin = self.account.get_avatar(self.user_id)\n contact['avatar_md5'] = hashlib.md5(avatar_bin).hexdigest()\n return contact\n\n @classmethod\n def load(cls, dump, account=None):\n if not isinstance(dump, dict):\n contact = json.loads(dump)\n\n contact = cls(dumps=True)\n contact.account = account\n contact.user_id = dump['user_id']\n contact.nickname = dump['nickname']\n contact.display_name = dump['display_name']\n contact.is_owner = dump['is_owner']\n contact.is_group = dump['is_group']\n contact.members = {\n user_id: cls.load(member, account)\n for user_id, member in dump['members'].items()\n }\n return contact\n\n def __bool__(self):\n return self.__bool\n\n\nclass WeChatClient(object):\n HEADERS = {\n 'ContentType': 'application/json; charset=UTF-8',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/58.0.3029.110 Safari/537.36',\n }\n\n CHUNK_SIZE = 1024 * 512 # 512KB\n\n def __init__(self, credential=None):\n self.login = False\n self._alive = False\n self.uuid = None\n self.login_info = {}\n self.invite_start_count = 40\n\n self.session = requests.Session()\n self.session.headers = self.HEADERS\n\n self.friends = {}\n self.groups = {}\n self.mp = {}\n\n self.logout_callback = []\n self.message_callback = {}\n self.credential_update_callback = []\n self.group_update_callback = {}\n self.tick_hooks = {}\n\n self.uin = None\n self.username = None\n self.nickname = None\n self.alias = None\n\n self._listen_thread = None\n self.listening = False\n\n if credential:\n self.load_credential(credential)\n\n @property\n def alive(self):\n return bool(self._alive and self.listen_thread and\n self.listen_thread.isAlive())\n\n @property\n def listen_thread(self):\n return self._listen_thread\n\n @listen_thread.setter\n def listen_thread(self, value):\n if self.listen_thread and self.listen_thread.isAlive():\n raise MultiListenThreadError\n self._listen_thread = value\n\n ##################\n # login & logout #\n ##################\n\n def login_by_qrcode(self, timeout=180, thread=False, callback=None):\n def polling():\n start_time = time.time()\n while not self.login:\n if time.time() - start_time > timeout:\n return self.login\n self.login = self._polling_login()\n time.sleep(0.1)\n\n self._login_init()\n self._alive = True\n\n if callback:\n self._run_callback([callback])\n self.listen_message()\n return self.login\n\n if not thread:\n return polling()\n\n polling_thread = threading.Thread(target=polling)\n polling_thread.setDaemon(True)\n polling_thread.start()\n\n def print_cli_qrcode(self):\n self.uuid = self.get_login_uuid()\n qr_code = QRCode(WeChatMeta.URL['qr_code'] + self.uuid)\n qr_code.svg('uca-url.svg', scale=6)\n print(qr_code.terminal(quiet_zone=1))\n\n @classmethod\n def get_login_uuid(cls):\n resp = requests.get(\n WeChatMeta.URL['uuid'],\n params={'appid': WeChatMeta.APP_ID, 'fun': 'new'}\n )\n result = WeChatMeta.RE['uuid'].search(resp.text)\n assert result, 'Failed get uuid from {}'.format(WeChatMeta.URL['uuid'])\n return result.group('uuid')\n\n @classmethod\n def generate_qrcode(cls, uuid):\n qr_storage = io.BytesIO()\n qr_code = QRCode(WeChatMeta.URL['qr_code'] + uuid)\n qr_code.svg(qr_storage, scale=10)\n return qr_storage.getvalue()\n\n def get_qrcode(self):\n self.uuid = self.get_login_uuid()\n return self.generate_qrcode(self.uuid)\n\n def _polling_login(self):\n if not self.uuid:\n return False\n timestamp = int(time.time())\n params = {\n 'uuid': self.uuid,\n 'loginicon': True,\n 'tip': 0,\n 'r': timestamp / 1579, # Magic number: 1579, from ItChat\n '_': timestamp\n }\n resp = self.session.get(WeChatMeta.URL['login_status'], params=params)\n result = WeChatMeta.RE['login_status'].search(resp.text)\n if not result:\n return False\n\n status = result.group('status')\n if status != '200':\n return False\n else:\n self._extract_login_credential(resp.text)\n return True\n\n def _extract_login_credential(self, content):\n result = WeChatMeta.RE['main_uri'].search(content)\n if not result:\n raise LoginFailedError('Failed extract redirect uri '\n 'after login success')\n redirect_uri = result.group('main_uri')\n resp = self.session.get(redirect_uri, allow_redirects=False)\n\n credit = self.login_info\n resp_xml = etree.fromstring(resp.text)\n\n parsed_uri = urlparse(redirect_uri)\n essentials = (parsed_uri.scheme, parsed_uri.netloc,\n parsed_uri.path[:parsed_uri.path.rfind('/')])\n credit['main_uri'] = '{}://{}{}'.format(*essentials)\n credit['upload_uri'] = '{}://file.{}{}'.format(*essentials)\n credit['web_sync_uri'] = '{}://webpush.{}{}'.format(*essentials)\n credit['deviceid'] = 'e' + repr(random.random())[2:17]\n try:\n br = credit['base_request'] = {'DeviceID': credit['deviceid']}\n credit['skey'] = br['Skey'] = resp_xml.xpath('//skey')[0].text\n credit['wxsid'] = br['Sid'] = resp_xml.xpath('//wxsid')[0].text\n credit['wxuin'] = resp_xml.xpath('//wxuin')[0].text\n br['Uin'] = int(credit['wxuin'])\n credit['pass_ticket'] = unquote(\n resp_xml.xpath('//pass_ticket')[0].text\n )\n self.uin = credit['wxuin']\n except TypeError:\n self.login_info = {}\n raise LoginFailedError(\n 'Failed extract login credential from login xml'\n )\n\n def _web_init(self):\n url = self.login_info['main_uri'] + WeChatMeta.URL['web_init']\n\n resp = self.session.post(\n url, params={'r': int(time.time())},\n json={'BaseRequest': self.login_info['base_request']}\n )\n\n result = self._decode_content(resp.content)\n credit = self.login_info\n credit['sync_check_key'] = result['SyncKey']\n self.username = fix_emoji(result['User']['UserName'])\n self.nickname = fix_emoji(result['User']['NickName'])\n self.invite_start_count = int(result['InviteStartCount'])\n self.save_credential()\n\n def _get_initialize_contacts(self):\n url = self.login_info['main_uri'] + WeChatMeta.URL['web_status']\n params = {\n 'lang': 'zh_CN',\n 'pass_ticket': self.login_info['pass_ticket'],\n }\n data = {\n 'BaseRequest': self.login_info['base_request'],\n 'Code': 3,\n 'FromUserName': self.username,\n 'ToUserName': self.username,\n 'ClientMsgId': int(time.time()),\n }\n resp = self.session.post(url, params=params, json=data)\n return resp.json()['BaseResponse']['Ret'] == 0\n\n def _get_all_contacts(self):\n url = self.login_info['main_uri'] + WeChatMeta.URL['get_contacts']\n\n def fetch_fragment(seq=0):\n contacts = []\n params = {\n 'r': int(time.time()),\n 'seq': seq,\n 'skey': self.login_info['skey'],\n }\n\n resp = self.session.get(url, params=params)\n data = self._decode_content(resp.content)\n contacts.extend(data.get('MemberList', []))\n new_seq = data.get('Seq', 0)\n if new_seq != 0:\n contacts.extend(fetch_fragment(new_seq))\n else:\n return contacts\n\n all_contacts = fetch_fragment()\n self._process_contacts_change(all_contacts)\n\n def _login_init(self):\n self._web_init()\n self._get_initialize_contacts()\n self._get_all_contacts()\n\n def export_credential(self):\n return {\n 'cookies': self.session.cookies.get_dict(),\n 'login_info': self.login_info,\n 'username': self.username,\n 'nickname': self.nickname,\n 'uin': self.uin,\n }\n\n def load_credential(self, credential):\n self.login_info = credential['login_info']\n self.session.cookies = cookiejar_from_dict(credential['cookies'])\n self.uin = self.login_info['wxuin']\n self.nickname = credential['nickname']\n self.username = credential['username']\n\n def login_by_credential(self, credential=None):\n if credential:\n self.load_credential(credential)\n\n cookies = self.session.cookies.get_dict()\n cookies.update({\n 'login_frequency': '2',\n 'last_wxuin': self.login_info['wxuin'],\n 'MM_WX_NOTIFY_STATE': '1',\n 'MM_WX_SOUND_STATE': '1',\n })\n self.session.cookies = cookiejar_from_dict(cookies)\n\n success, message, contacts = self._fetch_server_change()\n if success:\n self.login = True\n self._alive = True\n self._login_init()\n self.listen_message()\n return True\n else:\n return False\n\n def _push_login(self):\n uin = self.login_info['wxuin']\n resp = self.session.get(WeChatMeta.URL['push_login'],\n params={'uin': uin})\n result = resp.json()\n if 'uuid' in result and str(result.get('ret')) == '0':\n self.uuid = result['uuid']\n return True\n else:\n return False\n\n def logout(self):\n self._alive = False\n self.login = False\n old_listen_thread = self._listen_thread\n del old_listen_thread\n self._listen_thread = None\n for cb in self.logout_callback:\n cb(self)\n\n #################\n # Contacts data #\n #################\n\n def save_group(self, group):\n if not group:\n return\n self._run_callback(self.group_update_callback, group)\n self.groups[group.user_id] = group\n\n def get_group_by_username(self, username, force_remote=False):\n if force_remote or username not in self.groups:\n group = self._query_entity(username)\n if not group:\n return None\n self.save_group(group)\n return self.groups[username]\n\n def get_group_by_nickname(self, nickname):\n for group_id, group in self.groups.items():\n if group.nickname == nickname:\n return group\n\n def get_group_member(self, group_id, user_id):\n group = self.get_group_by_username(group_id)\n if not group:\n return None\n return group.members.get(user_id)\n\n @classmethod\n def _process_fetch(cls, session, req):\n return cls._decode_content(session.post(**req).content)\n\n def _build_username_req(self, user_ids, group_id):\n if not group_id:\n request_list = [\n {'UserName': user, 'EncryChatRoomId': ''}\n for user in user_ids\n ]\n else:\n request_list = [\n {'UserName': user, 'EncryChatRoomId': group_id}\n for user in user_ids\n ]\n return {\n 'url': self.login_info['main_uri'] + WeChatMeta.URL['bget_contacts'],\n 'params': {'type': 'ex', 'r': int(time.time())},\n 'data': json.dumps({\n 'BaseRequest': self.login_info['base_request'],\n 'Count': len(user_ids),\n 'List': request_list,\n })\n }\n\n def _query_entity(self, username):\n result = self._query_entities([username])\n return result[0] if result else {}\n\n def _query_entities(self, user_ids):\n try:\n loop = asyncio.get_event_loop()\n except RuntimeError:\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n def package_req(items, group_id=None):\n reqs = []\n for i in range(len(items)//50 + 1):\n seg = items[i*50: (i+1) * 50]\n reqs.append(self._build_username_req(seg, group_id))\n return reqs\n\n def do_request(reqs, groups=None):\n with concurrent.futures.ProcessPoolExecutor() as executor:\n result = list(executor.map(\n self._process_fetch, [self.session] * len(reqs), reqs)\n )\n return zip(result, groups) if groups else result\n\n # FIXME: async request work not meeting expectations\n async def _fetch(req):\n cookies = self.session.cookies.get_dict()\n try:\n with async_timeout.timeout(100):\n async with aiohttp.ClientSession(\n loop=loop, cookies=cookies,\n headers=self.HEADERS) as session:\n async with session.post(**req) as res:\n res = await res.text(encoding='utf-8')\n return json.loads(res)\n except asyncio.TimeoutError as err:\n logger.error('Failed get req: {} response, '\n 'err: {}'.format(req, err))\n except Exception as err:\n logger.error('*** Terrible things happened ***, '\n 'error: {}'.format(err))\n\n def do_async_request(reqs, groups=None):\n futures = [_fetch(r) for r in reqs]\n result = loop.run_until_complete(asyncio.gather(*futures))\n return zip(result, groups) if groups else result\n\n resp = do_request(package_req(user_ids))\n entities = [g for r in resp if resp for g in r.get('ContactList') if g]\n entities = {g['UserName']: Contact(g, self, True) for g in entities}\n if not entities:\n logger.error('Aysnc request failed: raw resp: {}'.format(resp))\n\n req_queue = []\n group_queue = []\n for group_id, group in entities.items():\n members = list(group.members.keys())\n sub_req = package_req(members, group_id)\n req_queue.extend(sub_req)\n group_queue.extend([group_id] * len(sub_req))\n group.members = {}\n if not req_queue:\n return list(entities.values())\n\n resp = do_request(req_queue, group_queue)\n for member, group_id in resp:\n group = entities[group_id]\n if member:\n for m in member['ContactList']:\n m = Contact(m, self)\n group.members[m.user_id] = m\n return list(entities.values())\n\n def _process_contacts_change(self, contacts):\n for contact in contacts:\n user = contact['UserName']\n if contact.get('KeyWord') == WeChatMeta.MP_FLAG:\n self.mp[user] = contact\n elif contact['UserName'].startswith(WeChatMeta.GROUP_PREFIX):\n if contact['MemberList']:\n c = Contact(contact, self)\n self.save_group(c)\n else:\n self.save_group(self._query_entity(user))\n else:\n self.friends[user] = contact\n\n def get_avatar(self, user_id):\n params = {\n 'username': user_id,\n 'seq': int(time.time() * 4.36), # 4.36: magic number by myself\n 'skey': self.login_info['skey'],\n }\n if user_id.startswith(WeChatMeta.GROUP_PREFIX):\n path = WeChatMeta.URL['group_avatar']\n else:\n path = WeChatMeta.URL['user_avatar']\n url = self.login_info['main_uri'] + path\n resp = self.session.get(url, params=params)\n return resp.content\n\n def get_avatar_md5(self, user_id):\n return hashlib.md5(self.get_avatar(user_id)).hexdigest()\n\n ##################\n # handle message #\n ##################\n\n def _process_new_message(self, messages):\n for msg in messages:\n try:\n msg = self._reform_raw_msg(msg)\n if msg:\n self._run_callback(self.message_callback, msg)\n except Exception:\n logger.exception('Failed process raw message')\n\n def _handle_private_msg(self, msg):\n # TODO: implement\n pass\n\n def _handle_group_msg(self, from_user, to_user):\n group = self.get_group_by_username(to_user)\n if not group or from_user not in group.members:\n print('from', from_user, 'to', to_user, 'group', group)\n raise MessageDataCorruptionError\n\n user = group.members[from_user]\n return {\n 'from_user': from_user,\n 'from_nickname': user.display_name or user.nickname,\n 'to_user': to_user,\n 'to_nickname': group.nickname,\n }\n\n def _handle_initialize_msg(self, msg):\n users = msg['StatusNotifyUserName'].split(',')\n group_ids = [u for u in users\n if u.startswith(WeChatMeta.GROUP_PREFIX)]\n for group in self._query_entities(group_ids):\n self.save_group(group)\n\n def _handle_system_msg(self, msg, to_user):\n invite = WeChatMeta.RE['invite'].search(msg['Content'])\n if not invite:\n return {}\n group = self.get_group_by_username(to_user, force_remote=True)\n if not group:\n return {}\n new = {\n 'invitee': '',\n 'member_count': len(group.members),\n 'invitee_nickname': invite.group('invitee1') or \\\n invite.group('invitee2'),\n }\n\n for member_id, member in group.members.items():\n display_name = member.display_name or member.nickname\n if display_name == new['invitee_nickname']:\n new['invitee'] = member_id\n break\n\n return {'new_member': new}\n\n def _reform_raw_msg(self, raw_msg):\n msg_type = raw_msg.get('MsgType')\n if msg_type == MESSAGE_TYPE.INITIALIZE:\n self._handle_initialize_msg(raw_msg)\n return\n\n try:\n content_type = MESSAGE_TYPE[msg_type].lower()\n except KeyError:\n content_type = 'other'\n\n new_msg = {\n 'is_at_me': False,\n 'message_type': 'private',\n 'content': raw_msg['Content'],\n 'new_member': None,\n 'content_type': content_type,\n }\n\n to_user, from_user = raw_msg['ToUserName'], raw_msg['FromUserName']\n if from_user.startswith(WeChatMeta.GROUP_PREFIX):\n to_user, from_user = from_user, to_user\n\n if not to_user.startswith(WeChatMeta.GROUP_PREFIX):\n self._handle_private_msg(raw_msg)\n return None\n else:\n new_msg['message_type'] = 'group'\n if msg_type == MESSAGE_TYPE.TEXT:\n content = raw_msg['Content']\n matched = WeChatMeta.RE['group_msg'].search(content)\n if matched:\n new_msg['content'] = matched.group('content')\n from_user = matched.group('username')\n me = self.get_group_member(to_user, self.username) or {}\n my_nickname = me.display_name or self.nickname\n if matched.group('nickname') == my_nickname:\n new_msg['is_at_me'] = True\n elif msg_type == MESSAGE_TYPE.SYSTEM:\n new_msg.update(self._handle_system_msg(raw_msg, to_user))\n\n new_msg.update(self._handle_group_msg(from_user, to_user))\n return new_msg\n\n ############\n # Callback #\n ############\n\n @classmethod\n def _run_callback(cls, callbacks, *args, **kwargs):\n if isinstance(callbacks, dict):\n callbacks = callbacks.values()\n for cb in callbacks:\n try:\n cb(*args, **kwargs)\n except Exception as err:\n logger.error('Failed run callback {}, args: {}, kwargs: '\n '{}, error: {}'.format(cb, args, kwargs, err))\n\n def save_credential(self):\n for cb in self.credential_update_callback:\n cb(self.uin, self.export_credential())\n\n def register_credential_update_callback(self, callback, *args, **kwargs):\n self.credential_update_callback.append(\n functools.partial(callback, *args, **kwargs))\n\n ################\n # Send message #\n ################\n\n def _upload_media_by_url(self, url, media_type, to_user):\n resp = requests.get(url)\n file_size = len(resp.content)\n file_md5 = hashlib.md5(resp.content).hexdigest()\n file_type = mimetypes.guess_type(url)[0] or \\\n 'application/octet-stream'\n\n upload_media_request = json.dumps(OrderedDict([\n ('UploadType', 2),\n ('BaseRequest', self.login_info['base_request']),\n ('ClientMediaId', int(time.time() * 1e4)),\n ('TotalLen', file_size),\n ('StartPos', 0),\n ('DataLen', file_size),\n ('MediaType', 4),\n ('FromUserName', self.username),\n ('ToUserName', to_user),\n ('FileMd5', file_md5),\n ]), separators=(',', ':'))\n\n result = None\n params = {'f': 'json'}\n chunks = (file_size - 1) // self.CHUNK_SIZE + 1\n last_chunk = 0\n for chunk in range(1, chunks+1):\n last_modified = time.strftime(WeChatMeta.TIME_FORMAT)\n data_ticket = self.session.cookies.get(\n 'webwx_data_ticket', domain=WeChatMeta.COOKIE_DOMAIN\n )\n chunk_data = resp.content[self.CHUNK_SIZE * last_chunk:\n self.CHUNK_SIZE * chunk]\n files = OrderedDict([\n ('id', (None, 'WU_FILE_0')),\n ('name', (None, os.path.basename(url))),\n ('type', (None, file_type)),\n ('lastModifiedDate', (None, last_modified)),\n ('size', (None, str(file_size))),\n ('mediatype', (None, media_type)),\n ('uploadmediarequest', (None, upload_media_request)),\n ('webwx_data_ticket', (None, data_ticket)),\n ('pass_ticket', (None, self.login_info['pass_ticket'])),\n ('filename', (os.path.basename(url), chunk_data, file_type))\n ])\n last_chunk = chunk\n if chunks != 1:\n files['chunk'] = (None, str(chunk))\n files['chunks'] = (None, str(chunks))\n\n upload_url = self.login_info['upload_uri'] + \\\n WeChatMeta.URL['upload_media']\n resp = self.session.post(upload_url, params=params, files=files)\n try:\n result = resp.json()['MediaId']\n except (TypeError, ValueError):\n result = None\n return result\n\n def _send(self, to_user, msg_type, url, content=None, media_id=None):\n params = {\n 'fun': 'async', 'f': 'json',\n 'pass_ticket': self.login_info['pass_ticket'],\n }\n timestamp = int(time.time() * 1e4)\n current_user = self.username\n data = {\n 'BaseRequest': self.login_info['base_request'],\n 'Scene': 0,\n 'Msg': {\n 'Type': msg_type,\n 'Content': content,\n 'MediaId': media_id,\n 'FromUserName': current_user,\n 'ToUserName': to_user if to_user else current_user,\n 'LocalID': timestamp,\n 'ClientMsgId': timestamp,\n }\n }\n resp = self.session.post(\n url, params=params,\n data=json.dumps(data, ensure_ascii=False).encode('utf8')\n )\n return self._decode_content(resp.content)\n\n def send_message(self, to_user, msg_type, payload=None, media_id=None):\n to_user = self._handle_group_id(to_user) or to_user\n\n assert payload or media_id, \\\n 'Requires at least one argument of payload and media_id'\n try:\n msg_type = getattr(MESSAGE_TYPE, msg_type.upper())\n except KeyError:\n raise ValueError('Unsupported message type: {}'.format(msg_type))\n\n if msg_type == MESSAGE_TYPE.TEXT:\n url = self.login_info['main_uri'] + WeChatMeta.URL['send_message']\n result = self._send(to_user, msg_type, url, content=payload)\n return result['BaseResponse']['Ret'] == 0\n\n if msg_type == MESSAGE_TYPE.IMAGE:\n media_type = 'pic'\n path = WeChatMeta.URL['send_image']\n elif msg_type == MESSAGE_TYPE.VIDEO:\n media_type = 'video'\n path = WeChatMeta.URL['send_video']\n elif msg_type == MESSAGE_TYPE.FILE:\n media_type = 'doc'\n path = WeChatMeta.URL['send_file']\n else:\n raise ValueError('Unsupported message type: {}'.format(msg_type))\n\n media_id = self._upload_media_by_url(payload, media_type, to_user)\n assert media_id, 'Failed upload file: {}'.format(payload)\n\n url = self.login_info['main_uri'] + path\n\n if msg_type == MESSAGE_TYPE.FILE:\n content = self._build_file_message_content(payload, media_id)\n media_id = None\n else:\n content = None\n\n result = self._send(to_user, msg_type, url,\n media_id=media_id, content=content)\n return result['BaseResponse']['Ret'] == 0\n\n @staticmethod\n def _build_file_message_content(file_path, media_id):\n return WeChatMeta.FILE_MESSAGE_TEMPLATE.format(\n os.path.basename(file_path), str(os.path.getsize(file_path)),\n media_id, os.path.splitext(file_path)[1].replace('.', ''),\n )\n\n #################\n # Group manager #\n #################\n\n def _handle_group_id(self, group_id):\n if group_id.startswith(WeChatMeta.GROUP_PREFIX):\n return group_id\n group = self.get_group_by_nickname(group_id)\n return group.user_id if group else None\n\n def del_group_member(self, group_id, member_ids):\n url = self.login_info['main_uri'] + WeChatMeta.URL['update_group']\n params = {\n 'fun': 'delmember',\n 'pass_ticket': self.login_info['pass_ticket'],\n }\n data = {\n 'BaseRequest': self.login_info['base_request'],\n 'ChatRoomName': group_id,\n 'DelMemberList': ','.join(member_ids),\n }\n resp = self.session.post(url, params=params, json=data)\n self.save_group(self._query_entity(group_id))\n return resp.json()['BaseResponse']['Ret'] == 0\n\n def update_group_nickname(self, group_id, nickname):\n username = self._handle_group_id(group_id)\n if not username:\n logger.error('Failed update group nickname,'\n ' invalid group_id: {}'.format(group_id))\n return False\n\n url = self.login_info['main_uri'] + WeChatMeta.URL['update_group']\n params = {\n 'fun': 'modtopic',\n 'pass_ticket': self.login_info['pass_ticket'],\n }\n data = {\n 'BaseRequest': self.login_info['base_request'],\n 'ChatRoomName': username,\n 'NewTopic': nickname,\n }\n resp = self.session.post(\n url, params=params,\n data=json.dumps(data, ensure_ascii=False).encode('utf8', 'ignore'),\n )\n self.save_group(self._query_entity(username))\n return resp.json()['BaseResponse']['Ret'] == 0\n\n def add_group_number(self, group_id, member_list):\n username = self._handle_group_id(group_id)\n if not username or username not in self.groups:\n logger.error('Failed delete group members,'\n ' invalid group_id: {}'.format(group_id))\n return False\n\n url = self.login_info['main_uri'] + WeChatMeta.URL['update_group']\n params = {'pass_ticket': self.login_info['pass_ticket']}\n data = {\n 'BaseRequest': self.login_info['base_request'],\n 'ChatRoomName': username,\n }\n\n members = ','.join(member_list)\n group = self.groups.get(username)\n\n if len(group.members) > self.invite_start_count:\n params['fun'] = 'invitemember'\n data['InviteMemberList'] = members\n else:\n params['fun'] = 'addmember'\n data['AddMemberList'] = members\n\n resp = self.session.post(url, params=params, json=data)\n self.save_group(self._query_entity(username))\n return resp.json()['BaseResponse']['Ret'] == 0\n\n def create_group(self, member_list, name=''):\n \"\"\"\n :param member_list: member username list\n :param name: group name\n :return: group info dict\n \"\"\"\n url = self.login_info['main_uri'] + WeChatMeta.URL['create_group']\n params = {\n 'pass_ticket': self.login_info['pass_ticket'],\n 'r': int(time.time()),\n }\n data = {\n 'BaseRequest': self.login_info['base_request'],\n 'MemberCount': len(member_list),\n 'MemberList':\n [{'UserName': member} for member in member_list],\n 'Topic': name,\n }\n resp = self.session.post(\n url, params=params,\n data=json.dumps(data, ensure_ascii=False).encode('utf8', 'ignore')\n )\n result = resp.json()\n if not result['BaseResponse']['Ret'] == 0:\n return None\n else:\n username = result['ChatRoomName']\n self.send_message(username, 'text', 'Everyone welcome!')\n self._get_initialize_contacts()\n return username\n\n ##################\n # Listen message #\n ##################\n\n def listen_message(self, retries=3, thread=True):\n def fetch_event():\n _, messages, contacts = self._fetch_server_change()\n self._process_new_message(messages)\n self._process_contacts_change(contacts)\n\n def receive_loop(_retries):\n fetch_event()\n while self._alive:\n self._run_callback(self.tick_hooks, self)\n try:\n check_data = self._sync_check()\n if check_data > 0:\n fetch_event()\n elif check_data == 0:\n _retries = retries\n elif _retries > 0:\n _retries -= 1\n else:\n return self.logout()\n time.sleep(1)\n except (requests.ConnectionError, requests.Timeout,\n requests.HTTPError) as err:\n logger.error('Error in listen thread: {}'.format(err))\n\n if self.listening:\n return\n self.listening = True\n\n if not thread:\n return receive_loop(retries)\n\n if self._listen_thread:\n raise MultiListenThreadError\n self.listen_thread = threading.Thread(target=receive_loop,\n args=(retries,))\n self.listen_thread.setDaemon(True)\n self.listen_thread.start()\n\n def _fetch_server_change(self):\n url = self.login_info['main_uri'] + WeChatMeta.URL['web_sync']\n params = {\n 'sid': self.login_info['wxsid'],\n 'skey': self.login_info['skey'],\n 'pass_ticket': self.login_info['pass_ticket'],\n }\n data = {\n 'BaseRequest': self.login_info['base_request'],\n 'SyncKey': self.login_info['sync_check_key'],\n 'rr': ~int(time.time()),\n }\n resp = self.session.post(url, params=params, json=data)\n result = self._decode_content(resp.content)\n self.login_info['sync_check_key'] = result['SyncCheckKey']\n self.login_info['synckey'] = '|'.join([\n '{}_{}'.format(item['Key'], item['Val'])\n for item in result['SyncCheckKey']['List']\n ])\n self.save_credential()\n success = result['BaseResponse']['Ret'] == 0\n return success, result['AddMsgList'], result['ModContactList']\n\n def _sync_check(self):\n url = self.login_info['web_sync_uri'] + WeChatMeta.URL['sync_check']\n timestamp = int(time.time() * 1000)\n params = {\n 'r': timestamp,\n 'skey': self.login_info['skey'],\n 'sid': self.login_info['wxsid'],\n 'uin': self.login_info['wxuin'],\n 'deviceid': self.login_info['deviceid'],\n 'synckey': self.login_info['synckey'],\n '_': timestamp,\n }\n resp = self.session.get(url, params=params)\n matched = WeChatMeta.RE['sync_check'].search(resp.text)\n if not matched or matched.group('retcode') != '0':\n logger.debug('unexpected sync check result')\n return -1\n else:\n return int(matched.group('selector'))\n\n ###################\n # other operation #\n ###################\n\n def set_pin(self, username, pin=True):\n url = self.login_info['main_uri'] + WeChatMeta.URL['set_pin']\n params = {\n 'pass_ticket': self.login_info['pass_ticket'],\n 'lang': 'zh_CN',\n }\n data = {\n 'UserName': username,\n 'CmdId': 3,\n 'OP': int(pin),\n 'RemarkName': '',\n 'BaseRequest': self.login_info['base_request'],\n }\n resp = self.session.post(url, params=params, json=data)\n return resp.json()['BaseResponse']['Ret'] == 0\n\n @staticmethod\n def _decode_content(content):\n return json.loads(content.decode('utf-8', 'replace'))\n\n\nclass WeChatUnitTest(unittest.TestCase):\n def test_get_login_uuid(self):\n uuid = WeChatClient.get_login_uuid()\n self.assertIsInstance(uuid, str)\n\n\nclass WeChatDemo(object):\n def __init__(self):\n self.client = WeChatClient()\n\n @staticmethod\n def msg_callback(msg):\n pprint(msg)\n\n def run(self):\n client = self.client\n client.message_callback = [self.msg_callback]\n client.print_cli_qrcode()\n client.login_by_qrcode(timeout=120)\n print('Nickname: {}\\n'\n 'Username: {}\\n'\n 'Uin: {}\\n'\n 'alias: {}\\n'\n 'Time: {}\\n'\n 'Main Uri: {}\\n'\n .format(client.nickname, client.username, client.uin,\n client.alias, time.ctime(),\n client.login_info['main_uri'])\n )\n client.listen_message(thread=False)\n while True:\n logger.info('Waiting for event...')\n time.sleep(30)\n\n\nif __name__ == '__main__':\n WeChatDemo().run()\n","repo_name":"leohowell/barbossa","sub_path":"wechat/wechat.py","file_name":"wechat.py","file_ext":"py","file_size_in_byte":43021,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"66"} +{"seq_id":"32892740645","text":"from .models import CartItem, Cart\nfrom .views import _cart_id\n\n\n\n# To get the number of items in our cart\ndef counter(request):\n cart_count=0\n if 'admin' in request.path: #if we are inside admin, then we dont want to see anything, so returning an empty dict.\n return {}\n else:\n try:\n cart = Cart.objects.filter(cart_id = _cart_id(request)) #bring the cart id (which is the session key)\n cart_items = CartItem.objects.all().filter(cart=cart[:1]) #filter it and we only need one result thats why cart[:1]\n for cart_item in cart_items:\n cart_count += cart_item.quantity #we are getting the quantity of from CartItem.quantity (in models)\n except Cart.DoesNotExist:\n cart_count=0\n return dict(cart_count=cart_count) ","repo_name":"87saswat/ecom-demo-grtcrt","sub_path":"cart/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"20064106362","text":"#\n# @lc app=leetcode.cn id=500 lang=python3\n#\n# [500] 键盘行\n#\n# https://leetcode-cn.com/problems/keyboard-row/description/\n#\n# algorithms\n# Easy (69.05%)\n# Likes: 101\n# Dislikes: 0\n# Total Accepted: 18.2K\n# Total Submissions: 26.3K\n# Testcase Example: '[\"Hello\",\"Alaska\",\"Dad\",\"Peace\"]'\n#\n# 给定一个单词列表,只返回可以使用在键盘同一行的字母打印出来的单词。键盘如下图所示。\n#\n#\n#\n#\n#\n#\n#\n# 示例:\n#\n# 输入: [\"Hello\", \"Alaska\", \"Dad\", \"Peace\"]\n# 输出: [\"Alaska\", \"Dad\"]\n#\n#\n#\n#\n# 注意:\n#\n#\n# 你可以重复使用键盘上同一字符。\n# 你可以假设输入的字符串将只包含字母。\n#\n#\n\n\n# @lc code=start\nclass Solution:\n\n def findWords(self, words: List[str]) -> List[str]:\n T = []\n set1 = set('qwertyuiop')\n set2 = set('asdfghjkl')\n set3 = set('zxcvbnm')\n\n def match(set0, str0):\n if not (set(str0.lower()) - set0): return True\n return False\n\n for i in range(len(words)):\n if match(set1, words[i]) or match(set2, words[i]) or match(set3, words[i]):\n T.append(words[i])\n return T\n\n\n# @lc code=end\n","repo_name":"mahatmaWM/leetcode","sub_path":"leetcode/editor/cn/500.键盘行.py","file_name":"500.键盘行.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"21351118028","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Aug 31 16:29:36 2018\r\n\r\n@author: nilesh_indore\r\n\r\nThis program is to send data in chunks and at client end join those chunks\r\n\"\"\"\r\n\r\nimport socket\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\ns.bind(('',9000))\r\ns.listen(5)\r\nwhile 2>1:\r\n data,addr = s.accept()\r\n print(\"Connection received from \", addr)\r\n msg = data.recv(1024)\r\n data.sendall(str(msg).encode('utf-8'))\r\ns.close()","repo_name":"indorenilesh/SocketProgramming","sub_path":"server1.py","file_name":"server1.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33390207795","text":"'''\n## OS Module in PYthon ##\n#########################\n -The OS module in python provides functions for interacting with the operating system.\n -OS, comes under Python’s standard utility modules.\n -This module provides a portable way of using operating system dependent functionality.\n -The *os* and *os.path* modules include many functions to interact with the file system.\n\n# Functions of OS Module:>\n 1. os.name:->\n This function gives the name of the operating system dependent module imported.\n The following names have currently been registered: ‘posix’, ‘nt’, ‘os2’, ‘ce’, ‘java’ and ‘riscos’\n 2. os.getcwd():->\n Function os.getcwd(), returns the Current Working Directory(CWD) of the file used to execute the code,\n can vary from system to system.\n 4. os.popen():->\n This method opens a pipe to or from command.\n The return value can be read or written depending on whether mode is ‘r’ or ‘w’.\n\n 5. os.close():->\n Close file descriptor fd. A file opened using open(), can be closed by close()only.\n But file opened through os.popen(), can be closed with close() or os.close().\n If we try closing a file opened with open(), using os.close(),\n Python would throw TypeError.\n\n 6. os.rename():->\n A file old.txt can be renamed to new.txt, using the function os.rename().\n The name of the file changes only if,\n the file exists and user has sufficient privilege permission to change the file.\n'''\nprint(\"\")\n\nimport os\nprint('os name:',os.name)\nprint(\"\\nGetcwd() method.\")\n\nprint('Current working Directory',os.getcwd())\n\n\nprint('\\nPopen() method.')\n\nfd = \"GFG.txt\"\n# popen() is similar to open()\nfile = open(fd, 'w')\nfile.write(\"Hello\")\nfile.close()\nfile = open(fd, 'r')\ntext = file.read()\nprint(text)\n# popen() provides a pipe/gateway and accesses the file directly\nfile = os.popen(fd, 'w')\nfile.write(\"Hello\")\nfile.close\n\n\n\nprint(\"\\nClose() method\")\nimport os\nfd = \"GFG.txt\"\nfile = open(fd, 'r')\ntext = file.read()\nprint(text)\nos.close(file)\n\nprint(\"\\nRename() method.\")\nfd = \"GFG.txt\"\nos.rename(fd,'New.txt')\nos.rename(fd,'New.txt')\n","repo_name":"MyHackInfo/Python-3-on-geeksforgeeks","sub_path":"043-OS Module in Python.py","file_name":"043-OS Module in Python.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"6241630205","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom django.utils.text import slugify\nfrom ckeditor_uploader.fields import RichTextUploadingField\n\n# Create your models here.\n\nclass Topic(models.Model):\n topic_title = models.TextField()\n slug = models.SlugField(unique = True, blank = True)\n\n def __str__(self):\n return self.topic_title\n def save(self, *args, **kwargs):\n self.slug = self.slug or (slugify(self.topic_title))\n super().save(*args, **kwargs)\n\nclass Post(models.Model):\n title = models.TextField()\n intro = models.TextField(blank = True)\n content = RichTextUploadingField()\n author = models.ForeignKey(User, on_delete = models.CASCADE)\n date = models.DateTimeField(default = timezone.now)\n slug = models.SlugField(unique = True, blank = True)\n num_claps = models.BigIntegerField(default = 0)\n num_comments = models.BigIntegerField(default = 0)\n topic = models.ForeignKey(Topic, on_delete = models.CASCADE)\n\n\n\n def __str__(self):\n return self.title\n def save(self, *args, **kwargs):\n self.slug = self.slug or (slugify(self.title) + slugify(self.topic.topic_title) +slugify(str(self.date)))\n c_intro = ''\n for c in range(0,len(self.content)):\n c_intro += self.content[c]\n if(c == 20):\n break\n self.intro = self.intro or c_intro\n super().save(*args,**kwargs)\n\n\nclass UserFollows(models.Model):\n user = models.ForeignKey(User, on_delete = models.CASCADE)\n follows = models.ForeignKey(Topic, on_delete = models.CASCADE)\n\n\n\nclass Comment(models.Model):\n post = models.ForeignKey(Post, on_delete = models.CASCADE)\n comment_by = models.ForeignKey(User, on_delete = models.CASCADE)\n comment = models.TextField()\n date = models.DateTimeField(default = timezone.now)\n slug = models.SlugField(unique = True, blank = True)\n def save(self, *args, **kwargs):\n self.slug = self.slug or (slugify(self.comment_by.username) + slugify(self.post.title) +slugify(str(self.date)))\n super().save(*args,**kwargs)\n\n\n\nclass Clap(models.Model):\n post = models.ForeignKey(Post, on_delete = models.CASCADE)\n clappeded_by = models.ForeignKey(User, on_delete = models.CASCADE)\n","repo_name":"kamrulhasan0/pluto","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"20271599578","text":"import pygame\r\nfrom constants import *\r\n\r\n\r\nclass Player:\r\n INVICIBLE_TIME = 2000\r\n\r\n def __init__(self, game):\r\n # Variaveis da imagem\r\n self.alive_img = pygame.transform.scale(HEART_IMG, (game.rect.w // 15, game.rect.w // 15)).convert()\r\n self.invicible_img = pygame.transform.scale(HEART_INVICIBLE_IMG, (game.rect.w // 15, game.rect.w // 15)).convert_alpha()\r\n self.img = self.alive_img\r\n\r\n self.rect = self.img.get_rect(center=game.rect.center)\r\n self.game = game\r\n self.velocity = 5\r\n self.invicible = False\r\n self.inviciblity_timer = 0\r\n\r\n def set_invicible(self):\r\n HURT_SOUND.play()\r\n self.img = self.invicible_img\r\n self.invicible = True\r\n self.inviciblity_timer = pygame.time.get_ticks()\r\n\r\n def update(self, keys):\r\n # Checa se o jogador está no modo invisível\r\n if self.invicible and pygame.time.get_ticks() - self.inviciblity_timer >= self.INVICIBLE_TIME:\r\n self.img = self.alive_img\r\n self.invicible = False\r\n\r\n if keys[pygame.K_LEFT]:\r\n self.rect.x -= self.velocity\r\n if keys[pygame.K_RIGHT]:\r\n self.rect.x += self.velocity\r\n if keys[pygame.K_UP]:\r\n self.rect.y -= self.velocity\r\n if keys[pygame.K_DOWN]:\r\n self.rect.y += self.velocity\r\n\r\n def draw(self, screen):\r\n # Mostra a imagem do jogador e o seu retângulo\r\n #pygame.draw.rect(screen, RED, self.rect, 1)\r\n screen.blit(self.img, self.rect)\r\n","repo_name":"Ribs2004/Pygame","sub_path":"Pygame_final/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"71128118612","text":"from flask import Flask, render_template,request\r\nfrom flasgger import Swagger\r\nimport pickle\r\nimport pandas as pd\r\n\r\napp = Flask(__name__)\r\ntest_df = pd.read_csv(\"test_dataset.csv\")\r\npickle_file = open('Credit_risk.pkl','rb')\r\nclassifier = pickle.load(pickle_file)\r\nSwagger(app)\r\n\r\n@app.route(\"/\")\r\ndef base_route():\r\n return \"Welcome to Credit Risk Prediction API\",200\r\n\r\n@app.route(\"/predictForSample\",methods=['GET'])\r\ndef predictRate():\r\n \"\"\"Swagger App for Credit Risk Prediction\r\n --------\r\n parameters:\r\n - name: ExistingCreditsCount\r\n description: Waht is the existing credit count\r\n in: query\r\n type: integer\r\n required: true\r\n - name: CurrentResidenceDuration\r\n description : Curresnt resident duration\r\n in: query\r\n type: integer\r\n required: true\r\n - name: Age\r\n description : Age of the customer\r\n in: query\r\n type: integer\r\n required: true\r\n - name: Dependents\r\n description : How amny dependents are there\r\n in: query\r\n type: integer\r\n required: true\r\n - name: LoanDuration\r\n description : Duration of previous loan\r\n in: query\r\n type: integer\r\n required: true\r\n - name: LoanAmount\r\n description : Loan amount for previous loan\r\n in: query\r\n type: integer\r\n required: true\r\n - name: InstallmentPercent\r\n description : What was the percentage of installment amount\r\n in: query\r\n type: integer\r\n required: true\r\n - name: Risk\r\n description : Target variable\r\n in: query\r\n type: integer\r\n required: true\r\n responses:\r\n 200:\r\n description : Predicted for Sample Customers\r\n 201:\r\n description : Predicted for file containing all Customers\r\n \"\"\"\r\n\r\n ExistingCreditsCount = request.args.get(\"ExistingCreditsCoun\")\r\n CurrentResidenceDuration = request.args.get(\"CurrentResidenceDuration\")\r\n Age = request.args.get(\"Age\")\r\n Dependents = request.args.get(\"Dependents\")\r\n LoanDuration = request.args.get(\"LoanDuration\")\r\n LoanAmount = request.args.get(\"LoanAmount\")\r\n InstallmentPercent = request.args.get(\"InstallmentPercent\")\r\n Risk = request.args.get(\"Risk\")\r\n\r\n\r\n result = classifier.predict([[ExistingCreditsCount, CurrentResidenceDuration, Age, Dependents,\r\n LoanDuration, LoanAmount, InstallmentPercent, Risk]])\r\n\r\n if(result in [0.0,\"0.0\"]) : return \"No Risk\"\r\n if(result in [1.0,\"1.0\"]) : return \"Risk\"\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True, host= \"127.0.0.1\", port= 5000)","repo_name":"GouthamKumar-R/Credit-Risk-Project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6521807154","text":"\n# https://leetcode.com/problems/longest-consecutive-sequence/\n\n\nclass Solution:\n def longestConsecutive(self, nums: List[int]) -> int:\n \n if len(nums) == 0:\n return 0\n\n nums = set(nums)\n\n longest = 0\n length = 0\n\n for num in nums:\n \n if num-1 not in nums:\n length = 1\n while num+length in nums:\n length += 1\n longest = max(longest, length)\n \n return longest\n ","repo_name":"rawatraghav/LeetcodeSols2023","sub_path":"MISC/longest-consecutive-sequence.py","file_name":"longest-consecutive-sequence.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"26828324009","text":"import argparse\nimport os\n\nimport gitlab\nimport gitlab_api\nfrom AutoTestScript.RunnerConfigs.Config import Config\n\nSSC_BUILD_JOB_MAP = {\n 'ESP32': 'build_ssc_esp32',\n 'ESP32C3': 'build_ssc_esp32c3',\n}\nNEEDED_FILES = [\n 'flasher_args.json',\n 'bootloader/bootloader.bin',\n 'partition_table/partition-table.bin',\n 'ssc.bin',\n 'ssc.elf',\n]\nIDF_PATH = os.environ.get('IDF_PATH')\n\n\ndef try_to_download_artifacts(bin_path: str) -> None:\n '''\n bin_path: \"SSC/ssc_bin/ESP32[C3]/SSC[_APP]\"\n '''\n project_id = os.getenv('CI_PROJECT_ID')\n pipeline_id = os.getenv('CI_PIPELINE_ID')\n gitlab_inst = gitlab_api.Gitlab(project_id)\n build_job_name = SSC_BUILD_JOB_MAP[bin_path.split('/')[-2]]\n job_list = gitlab_inst.find_job_id(build_job_name, pipeline_id=pipeline_id)\n files_to_download = [os.path.join(bin_path, f) for f in NEEDED_FILES]\n for job_info in job_list:\n try:\n gitlab_inst.download_artifact(job_info['id'], files_to_download, IDF_PATH)\n print('Downloaded {} from {}'.format(bin_path, job_info['id']))\n break\n except gitlab.exceptions.GitlabError as e:\n if e.response_code == 404:\n continue\n raise\n\n\ndef main() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'test_config_file',\n help='The test config file to be used.'\n )\n args = parser.parse_args()\n\n configs = Config.parse(args.test_config_file)\n test_bin_paths = configs.get_bin_paths()\n\n for _path in test_bin_paths:\n if os.path.exists(_path):\n continue\n relative_path = os.path.relpath(_path, IDF_PATH)\n try_to_download_artifacts(relative_path)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"zephyrproject-rtos/hal_espressif","sub_path":"tools/ci/integration_test/prepare_test_bins.py","file_name":"prepare_test_bins.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"66"} +{"seq_id":"20189452361","text":"\n#%%\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nimport torch.utils.data as data\nimport torchvision as tv\nimport torchvision.transforms as tf\nimport torchvision.models as mod\nfrom PIL import Image\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n#%% [markdown]\n# # torch.nn.init\n#%% [markdown]\n# ### torch.nn.init.calculate_gain(nonlinearity, param=None) : 为给定的非线性函数,返回推荐的增加值. 非线性函数可以是:\n# - Linear/Identity\t1\n# - Conv{1,2,3}D 1\n# - Sigmoid 1\n# - Tanh 5/3\n# - ReLU sqrt(2)\n# \n# \n# 第一列是非线性函数名,第二列是增加的值.\n\n#%%\ngain=nn.init.calculate_gain('relu')##注意参数小写\ngain\n\n#%% [markdown]\n# ### torch.nn.init.uniform\\_(tensor, a=0, b=1):以均匀分布初始化输入张量.\n\n#%%\nX=torch.empty(2,3)\nnn.init.uniform_(X)\nX\n\n#%% [markdown]\n# ### torch.nn.init.normal\\_(tensor, mean=0, std=1):以正太分布初始化输入张量.\n\n#%%\nnn.init.normal_(X)\n\n#%% [markdown]\n# ### torch.nn.init.constant\\_(tensor, val): 以常量初始化输入张量.\n\n#%%\nnn.init.constant_(X,2)\nX\n\n#%% [markdown]\n# ### torch.nn.init.eye\\_(tensor):以恒等矩阵初始化输入的二维张量. 在线性层保持输入的恒等性,在该层使得尽量多的输入保持恒等.\n\n#%%\n#即使非方阵,也可调用\nnn.init.eye_(X)\nX\n\n\n#%%\nX=X.reshape(3,2)\nnn.init.eye_(X)\nX\n\n#%% [markdown]\n# ### torch.nn.init.dirac_(tensor):以 Dirac-delta 函数对3、4、5维张量进行初始化. 保持在卷积层的恒等性,在该层使得尽量多的输入保持恒等.\n\n#%%\nX=torch.empty(2,3,4)\nnn.init.dirac_(X)\nX\n\n#%% [markdown]\n# ### torch.nn.init.xavier\\_uniform\\_(tensor, gain=1): 根据 “Understanding the difficulty of training deep feedforward neural networks”中描述的方法,使用均匀分布U(-a,a)为输入张量进行初始化.gain值是上面介绍的calculate_gain函数,是对a的缩放系数.\n\n#%%\nX=torch.empty(2,3)\nnn.init.xavier_uniform_(X,gain=nn.init.calculate_gain('relu'))\nX\n\n#%% [markdown]\n# ### torch.nn.init.xavier\\_normal\\_(tensor, gain=1):根据如上描述的方法,使用正太分布N(0,std)为输入变量进行初始化. gain值如上述,是std的缩放系数.\n\n#%%\nnn.init.xavier_normal_(X,gain=nn.init.calculate_gain('sigmoid'))\nX\n\n#%% [markdown]\n# ### torch.nn.init.kaiming\\_uniform\\_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'): 使用均匀分布U(-bound,bound)为输入张量进行初始化,系数影响bound值.\n# 见文档: https://pytorch.org/docs/stable/nn.html#torch.nn.init.kaiming_uniform_\n\n#%%\nnn.init.kaiming_uniform_(X)\nX\n\n#%% [markdown]\n# ### torch.nn.init.kaiming\\_normal\\_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):使用正太分布为输入张量做初始化. \n# \n# 见文档:https://pytorch.org/docs/stable/nn.html#torch.nn.init.kaiming_normal_\n\n#%%\nnn.init.kaiming_normal_(X)\nX\n\n#%% [markdown]\n# ### torch.nn.init.orthogonal\\_(tensor, gain=1): 使用半正交矩阵为输入张量进行初始化.\n# \n# 见:https://pytorch.org/docs/stable/nn.html#torch.nn.init.orthogonal_\n\n#%%\nnn.init.orthogonal_(X)\nX\n\n#%% [markdown]\n# ### torch.nn.init.sparse\\_(tensor, sparsity, std=0.01): 把输入的二维张量初始化为稀疏矩阵,非0元素将从N(0,std)中采样.\n# \n# 见:https://pytorch.org/docs/stable/nn.html#torch.nn.init.sparse_\n\n#%%\nnn.init.sparse_(X,sparsity=0.5)\nX\n\n\n#%%\n##########test#############\nX=torch.randn(2,3,2,2,dtype=torch.float)\nf=nn.Dropout2d()\nf(X)\n\n\n#%%\nf(X)\n\n\n#%%\n##推断时,模型的权值是最终训练模型的1/2. 这是所谓的 权重比例推断原则.(weight scaling inference rule).\nf.eval()\nf(X)\n\n\n#%%\nf=nn.BatchNorm2d\n\n\n#%%\n\n\n\n","repo_name":"qinhaihong-red/TorchDaily","sub_path":"Daily1023_pytorch(二十三):参数初始化函数.py","file_name":"Daily1023_pytorch(二十三):参数初始化函数.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"42785255996","text":"#coding=utf-8\n\nimport csv\nimport sys\n\n\n\nx2_read = csv.DictReader(open('booklist2.csv','r',encoding='utf-8'))\n#header = x2_read.fieldnames\n\n\n\nwith open('booklist1.csv','a',encoding='utf-8') as f:\n\tmy_read = csv.DictWriter(f, fieldnames=['條碼書目','書名'])\n\tmy_read.writeheader()\n\tmy_read.writerows(x2_read)\n\t#my_read.writerows(x2_read)\n\n\n\t\t#x1_read = csv.reader(x1)\n\t\t#members1 = [(row[0], row[1], row[2], row[3])for row in x1_read]\n\t\t#members1 = x1_read.fieldnames\n\n\n\n\n\n","repo_name":"YuShengRu/library","sub_path":"booklist.py","file_name":"booklist.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"75235317011","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n\r\n\"\"\"\r\nTopic: 第十四章:测试、调试和异常\r\nDescription: 试验还是很棒的,但是调试?就没那么有趣了。事实是,在 Python 测试代码之前\r\n没有编译器来分析你的代码,因此使的测试成为开发的一个重要部分。本章的目标是\r\n讨论一些关于测试、调试和异常处理的常见问题。但是并不是为测试驱动开发或者单\r\n元测试模块做一个简要的介绍。因此,笔者假定读者熟悉测试概念。\r\n\r\nTitle: 给你的程序做性能测试\r\nIssue: 你想测试你的程序运行所花费的时间并做性能测试。\r\nAnswer: 如果你只是简单的想测试下你的程序整体花费的时间,通常使用 Unix 时间函数就\r\n行了\r\n\"\"\"\r\n\"\"\"\r\n比如:\r\nbash % time python3 someprogram.py\r\nreal 0m13.937s\r\nuser 0m12.162s\r\nsys 0m0.098s\r\nbash %\r\n\r\n如果你还需要一个程序各个细节的详细报告,可以使用 cProfile 模块\r\nbash % python3 -m cProfile someprogram.py\r\n859647 function calls in 16.016 CPU seconds\r\nOrdered by: standard name\r\nncalls tottime percall cumtime percall filename:lineno(function)\r\n263169 0.080 0.000 0.080 0.000 someprogram.py:16(frange)\r\n513 0.001 0.000 0.002 0.000 someprogram.py:30(generate_mandel)\r\n262656 0.194 0.000 15.295 0.000 someprogram.py:32()\r\n1 0.036 0.036 16.077 16.077 someprogram.py:4()\r\n262144 15.021 0.000 15.021 0.000 someprogram.py:4(in_mandelbrot)\r\n1 0.000 0.000 0.000 0.000 os.py:746(urandom)\r\n1 0.000 0.000 0.000 0.000 png.py:1056(_readable)\r\n1 0.000 0.000 0.000 0.000 png.py:1073(Reader)\r\n1 0.227 0.227 0.438 0.438 png.py:163()\r\n512 0.010 0.000 0.010 0.000 png.py:200(group)\r\n...\r\nbash %\r\n\r\n\r\n不过通常情况是介于这两个极端之间。比如你已经知道代码运行时在少数几个函数\r\n中花费了绝大部分时间。对于这些函数的性能测试,可以使用一个简单的装饰器:\r\nimport time\r\nfrom functools import wraps\r\ndef timethis(func):\r\n @wraps(func)\r\n def wrapper(*args, **kwargs):\r\n start = time.perf_counter()\r\n r = func(*args, **kwargs)\r\n end = time.perf_counter()\r\n print('{}.{} :{}'.format(func.__module__, func.__name__, end - start))\r\n return r\r\n return wrapper\r\n\"\"\"\r\n\r\nimport time\r\nfrom functools import wraps\r\ndef timethis(func):\r\n @wraps(func)\r\n def wrapper(*args, **kwargs):\r\n start = time.perf_counter()\r\n r = func(*args, **kwargs)\r\n end = time.perf_counter()\r\n print('{}.{} :{}'.format(func.__module__, func.__name__, end - start))\r\n return r\r\n return wrapper\r\n\r\n\r\n\"\"\"要使用这个装饰器,只需要将其放置在你要进行性能测试的函数定义前即可,比\r\n如:\"\"\"\r\n@timethis\r\ndef countdown(n):\r\n while n > 0:\r\n n -= 1\r\n\r\ncountdown(10000000)\r\n# __main__.countdown :0.6399373729273985\r\n\r\n\"\"\"\r\n要测试某个代码块运行时间,你可以定义一个上下文管理器,例如:\r\n\"\"\"\r\nfrom contextlib import contextmanager\r\n\r\n@contextmanager\r\ndef timeblock(label):\r\n start = time.perf_counter()\r\n try:\r\n yield\r\n finally:\r\n end = time.perf_counter()\r\n print('{} :{}'.format(label, end - start))\r\n\r\n\"\"\"\r\n下面是使用这个上下文管理器的例子:\r\n\"\"\"\r\nwith timeblock('counting'):\r\n n = 10000000\r\n while n > 0:\r\n n -= 1\r\n\r\n\"\"\"\r\n对于测试很小的代码片段运行性能,使用 timeit 模块会很方便,例如:\r\n\"\"\"\r\nfrom timeit import timeit\r\ntimeit('math.sqrt(2)', 'import math')\r\ntimeit('sqrt(2)', 'from math import sqrt')\r\n\r\n\"\"\"\r\ntimeit 会执行第一个参数中语句 100 万次并计算运行时间。第二个参数是运行测\r\n试之前配置环境。如果你想改变循环执行次数,可以像下面这样设置 number 参数的值\r\n\"\"\"\r\ntimeit('math.sqrt(2)', 'import math', number=10000000)\r\ntimeit('sqrt(2)', 'from math import sqrt', number=10000000)\r\n\r\n\"\"\"\r\n当 执 行 性 能 测 试 的 时 候, 需 要 注 意 的 是 你 获 取 的 结 果 都 是 近 似 值。\r\ntime.perf counter() 函数会在给定平台上获取最高精度的计时值。不过,它仍然\r\n还是基于时钟时间,很多因素会影响到它的精确度,比如机器负载。如果你对于执行\r\n时间更感兴趣,使用 time.process_time() 来代替它。例如:\r\n\"\"\"\r\nfrom functools import wraps\r\ndef timethis(func):\r\n @wraps(func)\r\n def wrapper(*args, **kwargs):\r\n start = time.process_time()\r\n r = func(*args, **kwargs)\r\n end = time.process_time()\r\n print('{}.{}: {}'.format(func.__module__, func.__name__, end - start))\r\n return r\r\n return wrapper\r\n\r\n\"\"\"\r\n最后, 如果你想进行更深入的性能分析,那么你需要详细阅读 time 、 timeit 和其\r\n他相关模块的文档。这样你可以理解和平台相关的差异以及一些其他陷阱。还可以参\r\n考 13.13 小节中相关的一个创建计时器类的例子\r\n\"\"\"","repo_name":"bodii/test-code","sub_path":"python/python-cookbook-test-code/14 section/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"36137959331","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef import_dataframe(path, shuffle=True):\n frame = pd.read_csv(path)\n if shuffle:\n frame.sample(frac=1).reset_index(drop=True)\n frame['GalaxyID'] = frame['GalaxyID'].astype(str) + '.jpg'\n return frame\n\n\ndef plot_history(history, plot_path):\n plt.figure(1)\n plt.subplot(211)\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper right')\n\n plt.subplot(212)\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper right')\n\n plt.savefig(plot_path)\n","repo_name":"anhydrous99/galaxmobilenet","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33517000797","text":"import socket\nfrom numpy import polynomial \n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.bind(('127.0.0.1', 49001)) \nsock.listen(1)\n\nconn, adr = sock.accept()\n\nwhile True:\n try:\n data = conn.recv(1024).decode(\"utf-8\")\n data = data.split()\n a, b, c = int(data[0]), int(data[1]), int(data[2])\n\n if (b*b - 4*a*c) >= 0:\n pol = polynomial.Polynomial([a, b, c])\n pol = pol.roots()\n roots = str(pol[0]) + ' ' + str(pol[1])\n print('Корни найдены')\n else:\n roots = \"Нет корней\"\n print('Корни не найдены')\n conn.send(roots.encode())\n except:\n conn.close()\n break","repo_name":"MaksNick/ITMO_ICT_WebDevelopment_2023-2024","sub_path":"students/k33422/Nikonorov_Maxim/lab_1/task2/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"66"} +{"seq_id":"41619646639","text":"import sys\nimport torch\n\nKEYS_TO_DELETE = [\n 'optimizer_states',\n 'epoch',\n 'global_step',\n 'pytorch-lightning_version',\n 'callbacks',\n 'lr_schedulers',\n]\n\nif __name__ == \"__main__\":\n knowledge_path = sys.argv[1]\n\n print(f'Loading {knowledge_path}')\n model = torch.load(knowledge_path)\n for name in KEYS_TO_DELETE:\n del model[name]\n\n # for name in model.keys():\n # print(name)\n # print('-' * 60)\n # print(model[name])\n # print('-' * 60)\n # print('\\n'.join(model.keys()))\n torch.save(model, f'{knowledge_path}.out')\n print('=> done')\n","repo_name":"HydroFrame-ML/sandtank-ml","sub_path":"server/utils/dist-knowledge.py","file_name":"dist-knowledge.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33256245649","text":"import numpy as np\nimport pickle as pkl\nimport networkx as nx\nimport scipy.sparse as sp\nfrom scipy.sparse.linalg.eigen.arpack import eigsh\nimport sys\nimport scipy.io as sio\nimport torch\n\ndef adj_to_bias(adj, sizes, nhood=1):\n nb_graphs = adj.shape[0]\n mt = np.empty(adj.shape)\n for g in range(nb_graphs):\n mt[g] = np.eye(adj.shape[1])\n # print(mt)\n for _ in range(nhood):\n mt[g] = np.matmul(mt[g], (adj[g] + np.eye(adj.shape[1])))\n for i in range(sizes[g]):\n for j in range(sizes[g]):\n if mt[g][i][j] > 0.0:\n mt[g][i][j] = 1.0\n\n return -1e9 * (1.0 - mt)\n\n\n###############################################\n# This section of code adapted from tkipf/gcn #\n###############################################\n\ndef parse_index_file(filename):\n \"\"\"Parse index file.\"\"\"\n index = []\n for line in open(filename):\n index.append(int(line.strip()))\n return index\n\n\ndef sample_mask(idx, l):\n \"\"\"Create mask.\"\"\"\n mask = np.zeros(l)\n mask[idx] = 1\n return np.array(mask, dtype=np.bool)\n\ndef load_data_new():\n\n print(\"open mat file\")\n path = '/Users/pingping/Desktop/dblpdata/DBLP4057_GAT_with_idx.mat' # data address\n\n data = sio.loadmat(path)\n\n adj_list = [data['net_APCPA'], data['net_APA'], data['net_APTPA']]\n # adj_list = [data['net_APCPA'], data['net_APA']] # list\n # adj_list = [data['net_APCPA']] # list\n\n # DBLP\n features = data['features']\n\n\n labels_matrix = data['label']\n idx_train = data['train_idx']\n idx_val = data['val_idx']\n idx_test = data['test_idx']\n\n train_mask = sample_mask(idx_train, labels_matrix.shape[0])\n val_mask = sample_mask(idx_val, labels_matrix.shape[0])\n test_mask = sample_mask(idx_test, labels_matrix.shape[0])\n y_train = np.zeros(labels_matrix.shape)\n y_val = np.zeros(labels_matrix.shape)\n y_test = np.zeros(labels_matrix.shape)\n\n y_train[train_mask, :] = labels_matrix[train_mask, :]\n y_val[val_mask, :] = labels_matrix[val_mask, :]\n y_test[test_mask, :] = labels_matrix[test_mask, :]\n\n features = features.astype(float)\n\n print(\"*************************load_data_finish*************************\")\n return adj_list, features, y_train, y_val, y_test, train_mask, val_mask, test_mask\n\n\ndef load_random_data(size):\n\n adj = sp.random(size, size, density=0.002) # density similar to cora\n features = sp.random(size, 1000, density=0.015)\n int_labels = np.random.randint(7, size=(size))\n labels = np.zeros((size, 7)) # Nx7\n labels[np.arange(size), int_labels] = 1\n\n train_mask = np.zeros((size,)).astype(bool)\n train_mask[np.arange(size)[0:int(size/2)]] = 1\n\n val_mask = np.zeros((size,)).astype(bool)\n val_mask[np.arange(size)[int(size/2):]] = 1\n\n test_mask = np.zeros((size,)).astype(bool)\n test_mask[np.arange(size)[int(size/2):]] = 1\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_mask, :] = labels[train_mask, :]\n y_val[val_mask, :] = labels[val_mask, :]\n y_test[test_mask, :] = labels[test_mask, :]\n\n return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask\n\ndef sparse_to_tuple(sparse_mx):\n \"\"\"Convert sparse matrix to tuple representation.\"\"\"\n def to_tuple(mx):\n if not sp.isspmatrix_coo(mx):\n mx = mx.tocoo()\n coords = np.vstack((mx.row, mx.col)).transpose()\n values = mx.data\n shape = mx.shape\n return coords, values, shape\n\n if isinstance(sparse_mx, list):\n for i in range(len(sparse_mx)):\n sparse_mx[i] = to_tuple(sparse_mx[i])\n else:\n sparse_mx = to_tuple(sparse_mx)\n\n return sparse_mx\n\ndef standardize_data(f, train_mask):\n \"\"\"Standardize feature matrix and convert to tuple representation\"\"\"\n # standardize data\n f = f.todense()\n mu = f[train_mask == True, :].mean(axis=0)\n sigma = f[train_mask == True, :].std(axis=0)\n f = f[:, np.squeeze(np.array(sigma > 0))]\n mu = f[train_mask == True, :].mean(axis=0)\n sigma = f[train_mask == True, :].std(axis=0)\n f = (f - mu) / sigma\n return f\n\ndef preprocess_features(features):\n \"\"\"Row-normalize feature matrix and convert to tuple representation\"\"\"\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n # return features.todense(), sparse_to_tuple(features)\n return features, features\n\ndef normalize_adj(adj):\n \"\"\"Symmetrically normalize adjacency matrix.\"\"\"\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n\n\ndef preprocess_adj(adj):\n \"\"\"Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation.\"\"\"\n adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))\n return sparse_to_tuple(adj_normalized)\n\ndef preprocess_adj_bias(adj):\n num_nodes = adj.shape[0]\n adj = adj + sp.eye(num_nodes) # self-loop\n adj[adj > 0.0] = 1.0\n if not sp.isspmatrix_coo(adj):\n adj = adj.tocoo()\n adj = adj.astype(np.float32)\n indices = np.vstack((adj.col, adj.row)).transpose() # This is where I made a mistake, I used (adj.row, adj.col) instead\n # return tf.SparseTensor(indices=indices, values=adj.data, dense_shape=adj.shape)\n return indices, adj.data, adj.shape\n","repo_name":"pingpingand/SAHRN","sub_path":"utils/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":5623,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"14734277621","text":"num = int(input())\r\n\r\nfor i in range(1, 100000): # 어느 그룹에 속하는 수인지 확인\r\n if num <= i*(i+1) / 2:\r\n break\r\nstep = int(num - i*(i-1) / 2) # 그룹 내의 몇번째 수인지 확인\r\n\r\nif i % 2 == 1: # 짝수번째 수이면\r\n print('{0}/{1}'.format(i-(step-1), step))\r\nelse: # 홀수번째 수이면\r\n print('{0}/{1}'.format(step, i-(step-1)))","repo_name":"bokkuembab/BOJAutoPush","sub_path":"백준/Bronze/1193. 분수찾기/분수찾기.py","file_name":"분수찾기.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"73289818129","text":"import sys\nimport re\n\nclass Instrumenter:\n\n def __init__(self):\n\n self._methodStartPattern = r'\\w+[ ]*(\\<.*?\\>)?[ ]+\\w+[ ]*\\(([\\[\\]a-zA-Z1-9_,.=<>\\\"\\s ]+)?\\)\\s*\\{'\n self._instrumentationString = \"\\n\\t\\t\\tComm.Log.LogBroker.Instance.TraceDebug(\\\"-INSTRUMENTER-\\\" + Duid);\\n\"\n self._instrumentedFileContent = ''\n\n def Instrument(self, pathToFile):\n\n file = open(pathToFile, 'r')\n fileContent = file.read()\n file.close()\n\n methodStartPatternObj = re.compile(self._methodStartPattern)\n match = methodStartPatternObj.search(fileContent, re.DOTALL)\n\n while match is not None:\n\n self._instrumentedFileContent += fileContent[0: match.end()]\n fileContent = fileContent[match.end() : len(fileContent)]\n\n value = match.group()\n\n toIgnore = re.compile(r'(while|if|else if|for|switch|catch|using|ForEach|\\s?new\\s?)')\n\n if not toIgnore.match(value):\n\n self._instrumentedFileContent += self._instrumentationString\n\n match = methodStartPatternObj.search(fileContent)\n\n self._instrumentedFileContent += fileContent\n\n if '' != self._instrumentedFileContent:\n\n file = open(pathToFile, 'w')\n file.write(self._instrumentedFileContent)\n file.close()\n self._instrumentedFileContent = ''\n\n\nif __name__ == \"__main__\":\n\n instrumenter = Instrumenter()\n\n for i in range(1, len(sys.argv)):\n\n toInstrument = sys.argv[i]\n print('STO PARSANDO ====> ' + toInstrument)\n instrumenter.Instrument(toInstrument)\n","repo_name":"fede-marsiglia/CsharpInstrumenter","sub_path":"mainIpcClient.py","file_name":"mainIpcClient.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"16191144035","text":"from __future__ import annotations\n\nimport os\nimport pathlib\nfrom typing import TYPE_CHECKING, List\n\nfrom ada.config import logger\nfrom ada.fem import StepEigen\nfrom ada.fem.exceptions.fea_execution import (\n FEAnalysisUnableToStart,\n FEAnalysisUnsuccessfulError,\n)\nfrom ada.fem.formats.utils import DatFormatReader\n\nfrom .read_odb import get_odb_data\n\nif TYPE_CHECKING:\n from ada.fem.results.concepts import ElementDataOutput, FEMDataOutput, Results\n from ada.fem.results.eigenvalue import EigenDataSummary\n\n\ndef get_eigen_data(dat_file: str | os.PathLike) -> EigenDataSummary:\n from ada.fem.results.eigenvalue import EigenDataSummary, EigenMode\n\n dtr = DatFormatReader()\n\n re_compiled = dtr.compile_ff_re([int] + [float] * 5)\n re_compiled_2 = dtr.compile_ff_re([int] + [float] * 6)\n\n eig_str = \"eigenvalueoutput\"\n part_str = \"participationfactors\"\n eff_modal = \"effectivemass\"\n\n eig_res = dtr.read_data_lines(dat_file, re_compiled, eig_str, part_str, split_data=True)\n part_res = dtr.read_data_lines(dat_file, re_compiled_2, part_str, eff_modal, split_data=True)\n modalmass = dtr.read_data_lines(dat_file, re_compiled_2, eff_modal, split_data=True)\n\n eigen_modes: List[EigenMode] = []\n\n dof_base = [\"x\", \"y\", \"z\", \"rx\", \"ry\", \"rz\"]\n part_factor_names = [\"p\" + x for x in dof_base]\n eff_mass_names = [\"ef\" + x for x in dof_base]\n\n # Note! participation factors and effective modal mass are each deconstructed into 6 degrees of freedom\n for eig, part, modal in zip(eig_res, part_res, modalmass):\n mode, eig_value, freq_rad, freq_cycl, gen_mass, composite_modal_damping = eig\n eig_output = dict(eigenvalue=eig_value, f_rad=freq_rad, f_hz=freq_cycl)\n participation_data = {pn: p for pn, p in zip(part_factor_names, part[1:])}\n eff_mass_data = {pn: p for pn, p in zip(eff_mass_names, part[1:])}\n eigen_modes.append(EigenMode(no=mode, **eig_output, **participation_data, **eff_mass_data))\n\n return EigenDataSummary(eigen_modes)\n\n\ndef read_abaqus_results(results: \"Results\", file_ref: pathlib.Path, overwrite):\n dat_file = file_ref.with_suffix(\".dat\")\n if results.assembly is not None and results.assembly.fem.steps[0] == StepEigen:\n # TODO: Figure out if it is worthwhile adding support for reading step information or if it should be explicitly\n # stated\n pass\n\n if dat_file.exists():\n results.eigen_mode_data = get_eigen_data(dat_file)\n\n check_execution(file_ref)\n\n logger.error(\"Result mesh data extraction is not supported for abaqus\")\n\n return odb_data_to_results(file_ref, results)\n\n\ndef check_execution(file_ref: pathlib.Path):\n sta_file = file_ref.with_suffix(\".sta\")\n if sta_file.exists() is False:\n raise FEAnalysisUnableToStart()\n\n with open(sta_file, \"r\") as f:\n if \"THE ANALYSIS HAS NOT BEEN COMPLETED\" in f.read():\n raise FEAnalysisUnsuccessfulError()\n\n\ndef odb_data_to_results(odb_file: pathlib.Path, results: Results) -> None:\n from ada.fem.results.concepts import HistoryStepDataOutput, ResultsHistoryOutput\n\n odb_data = get_odb_data(odb_file)\n res = ResultsHistoryOutput()\n\n for step in odb_data[\"steps\"].values():\n name = step[\"name\"]\n step_type = step[\"procedure\"]\n step_res = HistoryStepDataOutput(name=name, step_type=step_type)\n res.steps.append(step_res)\n\n for reg in step[\"historyRegions\"].values():\n history_outputs = reg[\"historyOutputs\"].values()\n name = reg[\"name\"]\n if \"element\" in name.lower():\n step_res.element_data[name] = get_element_component_data(name, history_outputs)\n else:\n step_res.fem_data = get_fem_data_output(history_outputs)\n\n results.history_output = res\n\n\ndef get_element_component_data(name: str, history_outputs: dict) -> ElementDataOutput:\n from ada.fem.results.concepts import ElementDataOutput, ElemForceComp\n\n cu_map = {\"CU1\": 0, \"CU2\": 1, \"CU3\": 2, \"CUR1\": 3, \"CUR2\": 4, \"CUR3\": 5}\n cf_map = {\"CTF1\": 0, \"CTF2\": 1, \"CTF3\": 2, \"CTM1\": 3, \"CTM2\": 4, \"CTM3\": 5}\n displ_data = dict()\n force_data = dict()\n for data in history_outputs:\n comp = data[\"name\"]\n cu = cu_map.get(comp, None)\n cf = cf_map.get(comp, None)\n if cu is not None:\n displ_data[cu] = [tuple(x) for x in data[\"data\"]]\n elif cf is not None:\n force_data[cf] = ElemForceComp(comp, [tuple(x) for x in data[\"data\"]])\n\n return ElementDataOutput(name=name, displacements=displ_data, forces=force_data)\n\n\ndef get_fem_data_output(history_outputs) -> dict[str, FEMDataOutput]:\n from ada.fem.results.concepts import FEMDataOutput\n\n return {x[\"name\"]: FEMDataOutput(x[\"name\"], [tuple(y) for y in x[\"data\"]]) for x in history_outputs}\n","repo_name":"Krande/adapy","sub_path":"src/ada/fem/formats/abaqus/results/_results.py","file_name":"_results.py","file_ext":"py","file_size_in_byte":4825,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"66"} +{"seq_id":"41493087518","text":"# *********************************************************************\r\n# Fonctions associées au projet P7 - Implémentez un modèle de scoring *\r\n# *********************************************************************\r\nimport numpy as np\r\nimport pandas as pd\r\nimport scipy.stats as st\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\nfrom tqdm import tqdm\r\nimport gc # garbage collector\r\nfrom contextlib import contextmanager # provides utilities for resource allocation to the 'with' statement\r\n\r\n# Display options\r\nfrom IPython.display import display, display_html, display_png, display_svg\r\npd.set_option('display.max_rows', 200)\r\npd.set_option('display.max_columns', 200)\r\npd.set_option('display.width', 1000)\r\npd.set_option('display.max_colwidth', 199)\r\npd.set_option('display.colheader_justify', 'center')\r\npd.set_option('display.precision', 3)\r\n\r\n# Colorama\r\nfrom colorama import init, Fore, Back, Style\r\n#init()\r\n# Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.\r\n# Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.\r\n# Style: DIM, NORMAL, BRIGHT, RESET_ALL\r\n\r\n# Répertoires\r\ndata_path = './P7_data/'\r\ninput_data_path = data_path + 'input_data/'\r\nfig_path = './P7_fig/'\r\n\r\n\r\ndef elapsed_format(elapsed):\r\n \"\"\"\r\n Formate le temps écoulé entre 2 time.time()\r\n :param elapsed: float, temps écoulé en secondes\r\n :return: str, durée formatée\r\n \"\"\"\r\n h = int(elapsed / 3600)\r\n hh = '0' + str(h) if h<10 else str(h)\r\n m = int((elapsed - h * 3600) / 60)\r\n mm = '0' + str(m) if m < 10 else str(m)\r\n sec = elapsed - h * 3600 - m * 60\r\n s = int(sec)\r\n ss = '0' + str(s) if s < 10 else str(s)\r\n ms = int((sec - s) * 1000)\r\n if elapsed >= 60:\r\n return(f\"{hh}:{mm}:{ss}\")\r\n elif elapsed >= 1:\r\n return(f\"{sec:.3f}s\")\r\n else:\r\n return(f\"{ms}ms\")\r\n\r\nimport time\r\n@contextmanager\r\ndef timer(process_title):\r\n \"\"\"\r\n Mesure le temps d'exécution des instructions dans une section avec l'instruction 'with'.\r\n :param title: str, nom du processus dont on mesure le temps d'exécution\r\n :return: None\r\n \"\"\"\r\n # Exécuté avant les instructions dans la section avec l'instruction 'with'\r\n start_time = time.time()\r\n # yield déclenche l'exécution des instructions dans la section avec l'instruction 'with'\r\n yield # équivalent à 'yield None'\r\n # Exécuté après l'exécution des instructions dans la section avec l'instruction 'with'\r\n elapsed = time.time() - start_time\r\n print(f\"'{process_title}' exécuté en {elapsed_format(elapsed)}\\n\")\r\n\r\n\r\nimport glob\r\nimport os\r\ndef list_dir(dir_path, extension=None, verbose=False):\r\n \"\"\"\r\n Liste les fichiers dans un répertoire, en se limitant\r\n de manière optionnelle à ceux de la spécification donnée\r\n par extension, ex: '*.csv'.\r\n :param dir_path: str, chemin du répertoire.\r\n :param extension: str, extension des fichiers à lister\r\n default: None, liste tous les fichiers.\r\n :param verbose: bool, mode verbose.\r\n :return: list, liste des fichiers\r\n \"\"\"\r\n if extension is not None:\r\n path = dir_path + extension\r\n else:\r\n path = dir_path\r\n list_filenames = glob.glob(path)\r\n for index in range(len(list_filenames)):\r\n list_filenames[index] = list_filenames[index].replace('\\\\', '/')\r\n\r\n if verbose:\r\n print(f\"Liste des {len(list_filenames)} fichiers de '{path}':\")\r\n for file in list_filenames:\r\n filename = os.path.basename(file)\r\n print(\" →\", filename)\r\n\r\n return list_filenames\r\n\r\n\r\nimport csv\r\nimport os\r\ndef change_csv_sep(filepath, old_csv=',', new_csv=';', suffix=None):\r\n \"\"\"\r\n Change le séparateur du fichier csv, notamment pour\r\n le rendre directement lisible par MS-Excel\r\n :param filepath: str, chemin complet du fichier\r\n :param old_csv: str, séparateur du fichier existant la place de l'existant\r\n :param suffix: str, pour créer un nouveau fichier identifié avec un suffixe\r\n default:None pour écrasement du fichier existant\r\n :return output_filepath: str, chemin du nouveau fichier (None si erreur)\r\n \"\"\"\r\n if filepath[-4:] != '.csv':\r\n print(\"Error: filename must have '.csv' extension\")\r\n output_filepath = None\r\n else:\r\n input_file = open(filepath, \"r\")\r\n reader = csv.reader(input_file, delimiter=old_csv)\r\n if suffix is not None:\r\n new_dir = os.path.dirname(filepath) + '/' + suffix + '/'\r\n if not os.path.exists(new_dir):\r\n os.makedirs(new_dir)\r\n output_filepath = new_dir + os.path.basename(filepath)[:-4] + '_' + suffix + \".csv\"\r\n print('output_filepath:', output_filepath)\r\n else:\r\n output_filepath = os.path.dirname(filepath) + \"/tmp.txt\"\r\n output_file = open(output_filepath, 'w', newline='')\r\n writer = csv.writer(output_file, delimiter=new_csv)\r\n writer.writerows(reader)\r\n input_file.close()\r\n output_file.close()\r\n if suffix is None:\r\n os.remove(filepath)\r\n os.rename(output_filepath, filepath)\r\n output_filepath = filepath\r\n return output_filepath\r\n\r\n\r\nimport os\r\ndef get_features_dict(list_filenames, verbose=False):\r\n \"\"\"\r\n Construit le dictionnaire dont les clés sont les features\r\n et les valeurs les noms des fichiers dans lesquels on peut les trouver\r\n :param list_filenames: list, liste des fichiers de données\r\n :param verbose: bool, mode verbose\r\n :return: dict, dictionnaire des features\r\n \"\"\"\r\n # Variables pour la fonction dataset_tables et get_data\r\n global features, n_features, data_dimensions, key_features\r\n n_features = {}\r\n features = {}\r\n data_dimensions = {}\r\n key_features = {}\r\n # Fonction get_features_dict\r\n features_dict = {}\r\n for filename in list_filenames:\r\n f_name = os.path.basename(filename)[:-4]\r\n if verbose: print(f\"Lecture des features du fichier '{f_name}'\")\r\n df = pd.read_csv(filename, encoding='utf-8-sig', encoding_errors='surrogateescape', low_memory=False)\r\n n_features[f_name] = df.shape[1]\r\n features[f_name] = df.columns.tolist()\r\n data_dimensions[f_name] = df.shape\r\n key_features[f_name] = []\r\n count = 0\r\n for feature in df.columns.tolist():\r\n if df.duplicated(subset=feature).any()==False:\r\n key_features[f_name].append(feature)\r\n if features_dict.get(feature)==None:\r\n count += 1\r\n features_dict[feature] = [f_name]\r\n else:\r\n features_dict[feature].append(f_name)\r\n if verbose: print(f\" → {count} features sur {df.shape[1]} ajoutées au dictionnaire\")\r\n return features_dict\r\n\r\n\r\ndef sort_n_filter_features_dict(features_dict, verbose=False):\r\n \"\"\"\r\n Trie les features par valeurs décroissantes du nombre de fichiers\r\n dans lesquels elles sont présentes.\r\n Filtre les entrées non communes à plusieurs fichiers.\r\n :param features_dict: dict, dictionnaire des features\r\n (format: {feature: [fichiers de données]})\r\n :param verbose: bool, mode verbose.\r\n :return: dict, dictionnaire des features trié et filtré.\r\n \"\"\"\r\n # Tri descendant des features en fonction du nombre de fichiers dans lesquels elles sont présentes\r\n fns_features_dict = {k: v for k, v in sorted(features_dict.items(), key=lambda x: len(x[1]), reverse=True)}\r\n # Filtrage des features représentées dans un seul fichier de données\r\n size = len(features_dict)\r\n keys_to_rm = []\r\n for k in fns_features_dict.keys():\r\n if len(fns_features_dict[k])<=1:\r\n keys_to_rm.append(k)\r\n for k in keys_to_rm:\r\n fns_features_dict.pop(k, None)\r\n new_size = len(fns_features_dict)\r\n if verbose: print(f\"\\nNombre total de features: {size} → \"\r\n f\"features communes à plusieurs fichiers: {new_size}\")\r\n return fns_features_dict\r\n\r\n\r\ndef get_dataset_info():\r\n \"\"\"\r\n Donne les dimensions (shape) des fichiers de données ainsi que\r\n les features clés (celles capables d'indexer le fichier).\r\n Utilise 2 variables globales de la fonction 'get_features_dict'.\r\n :return: dict, dictionnaire de format {filename: shape}\r\n :return: dict, dictionnaire de format {filename: [features]}\r\n \"\"\"\r\n return data_dimensions, key_features\r\n\r\n\r\ndef dataset_tables(filenames, features_dict, verbose=False):\r\n \"\"\"\r\n Etablit les tables des relations entre les fichiers des jeux de données.\r\n :param filenames: list, liste des noms de fichier du dataset sans le chemin ni l'extension\r\n :param features_dict: dict, dictionnaire des features établi par get_features_dict ou sort_n_filter_features_dict\r\n :param verbose: bool, mode verbose\r\n :return: dataframe, dataframe, dataframe, dataframe\r\n - df_nrel, dataframe du nombre de features communes par paire de fichiers\r\n - df_feat, dataframe de la liste des features communes par paire de fichiers\r\n - df_keynrel, dataframe du nombre de features-clés communes par paire de fichiers\r\n - df_keyfeat, dataframe de la liste des features-clés communes par paire de fichiers\r\n \"\"\"\r\n # Initialisation des tables\r\n df_nrel = pd.DataFrame(np.zeros((len(filenames), len(filenames)), dtype=int), index=filenames, columns=filenames)\r\n df_keynrel = pd.DataFrame(np.zeros((len(filenames), len(filenames)), dtype=int), index=filenames, columns=filenames)\r\n df_feat = pd.DataFrame([], index=filenames, columns=filenames)\r\n df_keyfeat = pd.DataFrame([], index=filenames, columns=filenames)\r\n for a in filenames:\r\n for b in filenames:\r\n if a==b:\r\n # Les valeurs sont produites par la fonction get_features_dict\r\n df_nrel.at[a, b] = n_features[a]\r\n df_feat.at[a, b] = features[a]\r\n df_keyfeat.at[a, b] = key_features[a]\r\n df_keynrel.at[a, b] = len(key_features[a])\r\n else:\r\n df_feat.at[a, b] = []\r\n df_keyfeat.at[a, b] = []\r\n\r\n # Constitution des tables df_nrel et df_feat\r\n for dict_key, dict_value in features_dict.items():\r\n pairs = [(a,b) for idx, a in enumerate(dict_value) for b in dict_value[idx+1:]]\r\n for pair in pairs:\r\n a, b = pair[0], pair[1]\r\n df_nrel.at[a,b] += 1\r\n df_nrel.at[b,a] += 1\r\n df_feat.at[a,b].append(dict_key)\r\n df_feat.at[b,a].append(dict_key)\r\n\r\n # Constitution de la table df_keyfeat\r\n for dict_key, dict_value in features_dict.items():\r\n pairs = [(a, b) for idx, a in enumerate(dict_value) for b in dict_value[idx + 1:]]\r\n for pair in pairs:\r\n a, b = pair[0], pair[1]\r\n kfs = list(set(key_features[a] + key_features[b]))\r\n if kfs :\r\n for kf in kfs:\r\n if kf in df_feat.at[a,b] and kf not in df_keyfeat.at[a, b]:\r\n df_keyfeat.at[a, b].append(kf)\r\n df_keyfeat.at[b, a].append(kf)\r\n df_keynrel.at[a, b] = len(df_keyfeat.at[a, b])\r\n df_keynrel.at[b, a] = len(df_keyfeat.at[b, a])\r\n\r\n # Affichage des tables\r\n if verbose:\r\n print(Fore.BLACK + Style.BRIGHT + Back.WHITE\r\n + \"Table des nombres de relations entre les fichiers du jeu de données:\\n\"\r\n + Style.RESET_ALL)\r\n display(df_nrel)\r\n print(Fore.BLACK + Style.BRIGHT + Back.WHITE\r\n + \"Table des features mettant en relation les fichiers du jeu de données:\\n\"\r\n + Style.RESET_ALL)\r\n display(df_feat)\r\n print(Fore.BLACK + Style.BRIGHT + Back.WHITE\r\n + \"Table des nombres de relations clés entre les fichiers du jeu de données:\\n\"\r\n + Style.RESET_ALL)\r\n display(df_keynrel)\r\n print(Fore.BLACK + Style.BRIGHT + Back.WHITE\r\n + \"Table des features-clés mettant en relation les fichiers du jeu de données:\\n\"\r\n + Style.RESET_ALL)\r\n display(df_keyfeat)\r\n\r\n return df_nrel, df_feat, df_keynrel, df_keyfeat\r\n\r\n\r\nimport networkx as nx\r\nprint(f'- Version de la librairie networkx : {nx.__version__}')\r\ndef dataset_graph(df_nrel, df_feat, df_keynrel, df_keyfeat, max_eli=1, save=None,\r\n with_labels=True, node_size=20000, node_shape='o', alpha=0.85):\r\n \"\"\"\r\n Trace le graphe des relations (feature de même nom) entre les fichiers de données.\r\n Les relations contenant une feature clés sont labellisées.\r\n L'épaisseur des relations est proportionnelle au nombre de features communes.\r\n :param df_nrel: dataframe du nombre de relations entre paires de fichiers de données\r\n :param df_feat: dataframe des features entre paires de fichiers de données\r\n :param df_keynrel: dataframe du nombre de relations-clés (features clés) entre paires de fichiers de données\r\n :param df_keyfeat: dataframe des features-clés entre paires de fichiers de données\r\n :param max_eli: int, nombre max de features représentées dans la relation entre 2 fichiers\r\n :param save: str, nom du fichier (.png) de sauvegarde graphique\r\n :return: None\r\n \"\"\"\r\n G = nx.Graph()\r\n\r\n # Création des nœuds\r\n nodes = df_nrel.columns.tolist()\r\n G.add_nodes_from(nodes)\r\n pos = nx.spring_layout(G)\r\n\r\n # Création des relations\r\n rel_edges = [(a,b,df_nrel.at[a,b])\r\n for idx, a in enumerate(nodes)\r\n for b in nodes[idx+1:]\r\n if df_nrel.at[a,b]>0 and df_keynrel.at[a,b]==0]\r\n G.add_weighted_edges_from(rel_edges, color='dimgray')\r\n\r\n key_edges = [(a,b, df_nrel.at[a,b])\r\n for idx, a in enumerate(nodes)\r\n for b in nodes[idx+1:]\r\n if df_keynrel.at[a,b]>0]\r\n G.add_weighted_edges_from(key_edges, color='coral')\r\n\r\n edges = G.edges()\r\n colors = [G[u][v]['color'] for u, v in edges]\r\n weights = [G[u][v]['weight'] for u, v in edges]\r\n\r\n # Ajout des labels des relations impliquant une feature clé\r\n edge_labels = {(a, b): df_keyfeat.at[a, b][:max_eli]\r\n for idx, a in enumerate(nodes)\r\n for b in nodes[idx + 1:]\r\n if df_keynrel.at[a,b]>0}\r\n\r\n # Tracé du graphe\r\n plt.figure(figsize=(15, 12))\r\n ax = plt.gca()\r\n ax.margins(0.08)\r\n nx.draw(G, pos, edge_color=colors, width=weights,\r\n with_labels=False, node_size=node_size,\r\n node_shape=node_shape, alpha=alpha)\r\n if with_labels:\r\n nx.draw_networkx_labels(G, pos, font_size=12,\r\n font_color='k', font_weight='bold')\r\n nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)\r\n plt.axis(\"off\")\r\n plt.tight_layout()\r\n if save is not None:\r\n plt.savefig(fig_path+save, dpi=300)\r\n plt.show()\r\n return\r\n\r\n\r\ndef one_hot_encoder(df, nan_as_category=True):\r\n \"\"\"\r\n Encode avec 'get_dummies' les colonnes de type 'object' d'un dataframe.\r\n :param df: dataframe, contient les colonnes à encoder.\r\n :param nan_as_category: bool, ajoute éventuellement une catégorie '_nan'.\r\n Cela permet en particulier d'imputer les NaN.\r\n :return: dataframe, list: le dataframe en entrée auquel est ajouté les colonnes encodées,\r\n et la liste des colonnes ajoutées\r\n \"\"\"\r\n original_columns = df.columns.tolist()\r\n categorical_columns = [col for col in df.columns if df[col].dtype == 'object']\r\n df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category)\r\n new_columns = [c for c in df.columns if c not in original_columns]\r\n del original_columns\r\n del categorical_columns\r\n gc.collect()\r\n return df, new_columns\r\n\r\n\r\ndef agg_df_withdict(df1, df2, agg_dict, key_feature, prefix='', drop_key=True, keep_df2=False):\r\n \"\"\"\r\n Agrège 2 dataframes par la colonne 'key_feature'\r\n en regroupant les valeurs de df2\r\n selon le dictionnaire agg_dict\r\n :param df1: dataframe, avec feature-clé 'key_feature'\r\n :param df2: dataframe, à grouper par de 'key_feature' avec le dictionnaire agg_dict\r\n :param agg_dict: dict, du type {feature: list(operations)}\r\n :param key_feature: feature-clé de df1\r\n :param prefix: str, ajout d'un préfixe aux features\r\n :param drop_key: bool, supprime la colonne de la 'key_feature'\r\n :return: dataframe, résultant de l'agrégation des 2 dataframes\r\n \"\"\"\r\n df2_agg = df2.groupby(key_feature).agg(agg_dict)\r\n # Concaténation des 2 niveaux de label de colonne (feature, opération)\r\n df2_agg.columns = pd.Index([str(prefix) + e[0] + \"_\" + e[1].upper() for e in df2_agg.columns.tolist()])\r\n df = df1.join(df2_agg, how='left', on=key_feature)\r\n if drop_key: df.drop([key_feature], axis=1, inplace=True)\r\n del df2_agg\r\n if not keep_df2: del df2\r\n gc.collect()\r\n return df\r\n# Exemple\r\n# ar1 = [('a', 1), ('b',2), ('c', 3)]\r\n# ar2 = [('a', 1, 1), ('a', 3, 20), ('a', 11, 99), ('b', 4, 8), ('b', 0, -1), ('b', 20, 30), ('c', 9, 27)]\r\n# df1 = pd.DataFrame(ar1, columns=['key', 'value1'])\r\n# display(df1)\r\n# df2 = pd.DataFrame(ar2, columns=['key', 'value2', 'value3'])\r\n# display(df2)\r\n# agg_dict1 = {'value2': 'mean'}\r\n# agg_dict2 = {'value3': ['min', 'max']}\r\n# agg_dict = {}\r\n# for d in (agg_dict1, agg_dict2): agg_dict.update(d)\r\n# print(agg_dict)\r\n# prefix='AGG_'\r\n# print(prefix + 'DF')\r\n# display(agg_df_withdict(df1, df2, agg_dict, 'key', prefix=prefix))\r\n\r\n\r\ndef get_df_nan_rate(df, verbose=True):\r\n \"\"\"\r\n Calcule le taux de NaN d'un dataframe.\r\n :param df: dataframe, dont on veut calculer le taux de NaN\r\n :param verbose: bool, mode verbose\r\n :return: float, taux de NaN (compris entre 0 et 1)\r\n \"\"\"\r\n nan_rate = float(df.isnull().sum(axis=0).sum()) / (df.shape[0] * df.shape[1])\r\n if verbose: print(f\"Taux de NaN: {100*nan_rate:.2f}%\")\r\n return nan_rate\r\n\r\ndef check_for_inf(df, replace_with_nan=True, verbose=True):\r\n \"\"\"\r\n Vérifie si le dataframe contient des valeurs infinie,\r\n affiche les informations en mode verbose, et\r\n remplace ces valeurs par np.NaN si replace_with_nan=True.\r\n :param df: dataframe, que l'on souhaite vérifier\r\n :param replace_with_nan: bool, remplace inf par np.NaN si True\r\n default=True\r\n :param verbose: bool, mode verbose pour afficher le nombre et\r\n taux de inf dans le dataframe et lister les features\r\n (colonnes) concernées.\r\n :return: dataframe modifié (sans affecter le dataframe en\r\n entrée) si replace_with_nan=True,\r\n int, nombre de valeurs infinies sinon.\r\n \"\"\"\r\n nb_inf = 0\r\n col_with_inf = []\r\n for feature in df.columns:\r\n nb_inf_col = np.isinf(df[feature]).sum()\r\n if nb_inf_col > 0:\r\n nb_inf += nb_inf_col\r\n col_with_inf.append(feature)\r\n\r\n if verbose:\r\n if nb_inf == 0:\r\n print(\"Les données ne contiennent pas de valeur infinie\")\r\n else:\r\n print(f\"Les données contiennent {nb_inf} valeurs infinies\"\r\n f\" ({float(nb_inf)/(df.shape[0] * df.shape[1]):.3f}%)\"\r\n f\" pour les features {col_with_inf}\")\r\n\r\n del col_with_inf\r\n gc.collect()\r\n\r\n if replace_with_nan:\r\n if verbose:\r\n print(\"Les valeurs infinies sont remplacées par np.NaN\")\r\n return df.replace([np.inf, -np.inf], np.NaN)\r\n else:\r\n return nb_inf>0\r\n\r\n\r\ndef outliers(data, method='best', strategy='id_number', value=0.0):\r\n \"\"\"\r\n Examine data à la recherche d'outlier selon plusieurs méthodes\r\n et remplace éventuellement les outliers par de nouvelles\r\n valeurs selon la stratégie choisie.\r\n :param data: array, contenant la liste des valeurs de la donnée.\r\n :param method: str, methode de détection des outliers\r\n 'std': moyenne et écart type, suppose la distribution normale\r\n 'iq': interquantile si la distribution n'est pas normale\r\n default='best', teste si la distribution est normale et\r\n applique la stratégie 'std' si oui et 'iq' sinon\r\n :param strategy: stratégie d'identification, voire de traitement\r\n des outliers.\r\n - Identification d'outliers: 'id_number' (nombre), 'id_rate'\r\n (taux), 'id_index_lower' (index outliers bas), 'id_index_upper'\r\n (index outliers haut), 'id_index' (index tous outliers).\r\n - Remplacement des outliers: 'replace_min_max' (si sous le seuil\r\n bas, valeur du seuil bas et vice-versa pour seuil haut),\r\n 'replace_mean' (moyenne), 'replace_median' (médiane),\r\n 'replace_value' (valeur=value), 'replace_nan' (np.NaN).\r\n :param value: float, valeur de remplacement pour la stratégie\r\n 'replace_value'.\r\n :return: int (nombre d'outliers), float (taux d'outliers), ou\r\n list (index concernés de data) si stratégie d'identification.\r\n (int, array), si stratégie de remplacement avec (nombre\r\n d'outliers, données modifiées). Note: les données modifiées\r\n ne modifient pas les données d'entrée.\r\n \"\"\"\r\n with np.errstate(all='ignore'):\r\n mean = np.nanmean(data)\r\n std = np.nanstd(data)\r\n median = np.nanmedian(data)\r\n\r\n # Test de distribution normale de data avec le test de Shapiro\r\n #if method=='best':\r\n # from random import sample\r\n # sample_data = sample(sorted(data), 5000) if len(data)>5000 else data\r\n # stat, p = st.shapiro(sample_data)\r\n # method = 'std' if p > 0.05 else 'iq'\r\n\r\n # Test de distribution normale de data avec le test K² d'Agostino\r\n if method=='best':\r\n stat, p = st.normaltest(data)\r\n method = 'std' if p > 0.05 else 'iq'\r\n\r\n # Calcule les bornes lower et upper selon la méthode\r\n if method=='std':\r\n lower, upper = mean - 3 * std, mean + 3 * std\r\n elif method=='iq':\r\n q25, q75 = np.nanpercentile(data, 25), np.nanpercentile(data, 75)\r\n lower, upper = q25 - 1.5 * (q75 - q25), q75 + 1.5 * (q75 - q25)\r\n\r\n index_lower = np.where(np.logical_and(data < lower, ~np.isnan(data)))[0]\r\n index_upper = np.where(np.logical_and(data > upper, ~np.isnan(data)))[0]\r\n outliers_nb = len(index_lower) + len(index_upper)\r\n\r\n if strategy=='id_number':\r\n return outliers_nb\r\n elif strategy=='id_rate':\r\n return float(outliers_nb) / len(data)\r\n elif strategy=='id_index_lower':\r\n return index_lower\r\n elif strategy=='id_index_upper':\r\n return index_upper\r\n elif strategy=='id_index':\r\n return np.sort(np.concatenate([index_lower, index_upper]))\r\n\r\n elif 'replace_' in strategy:\r\n data_repl = np.copy(data)\r\n index_all = np.sort(np.concatenate([index_lower, index_upper]))\r\n if strategy=='replace_min_max':\r\n data_repl[index_lower] = lower\r\n data_repl[index_upper] = upper\r\n elif strategy=='replace_mean':\r\n data_repl[index_all] = mean\r\n elif strategy=='replace_median':\r\n data_repl[index_all] = median\r\n elif strategy=='replace_value':\r\n data_repl[index_all] = value\r\n elif strategy=='replace_nan':\r\n data_repl[index_all] = np.NaN\r\n return outliers_nb, data_repl\r\n\r\n else:\r\n print(\"Erreur sur la valeur de 'method' ou 'strategy'\")\r\n\r\n\r\ndef features_with_nan(df, num_pattern_list=None, cat_pattern_list=None, verbose=True):\r\n \"\"\"\r\n Pour chaque feature de df, caractérise les NaN et recommande une stratégie de traitement.\r\n :param df: dataframe, dont on veut caractériser les NaN.\r\n :param num_pattern_list: list, chaines de caractères dans le nom de la feature signifiant\r\n qu'il s'agit d'une feature numérique ; default = None\r\n :param cat_pattern_list: list, chaines de caractères dans le nom de la feature signifiant\r\n qu'il s'agit d'une feature catégorielle ; default = None\r\n :param verbose: bool, mode verbose ; default = True\r\n :return: dataframe, table des NaN précisant pour chaque feature:\r\n - 'feature': nom de la feature\r\n - 'nan_nb': nombre de NaN\r\n - 'nan_rate': taux de NaN\r\n - 'type': type de variable ('num', 'cat_bin_num', 'cat_mul_num', 'cat_bin_str', 'cat_mul_tr')\r\n - 'nunique': nombre de valeurs uniques\r\n - 'unique': valeurs uniques\r\n - 'recommended strategy': stratégie recommandée de traitement des NaN\r\n \"\"\"\r\n # Initialisations\r\n df_nan = None\r\n nuniq_max = min(0.01 * len(df), 100)\r\n features_list = ['feature', 'nan_nb', 'nan_rate', 'nan_minclass_rate', 'type', 'nunique', 'unique', 'recommended_strategy']\r\n for feature in df.columns.tolist():\r\n nan_nb = df[feature].isnull().sum(axis=0)\r\n if nan_nb>0:\r\n\r\n # Caractéristiques de la feature\r\n nan_rate = 100.0 * df[feature].isnull().sum(axis=0) / len(df)\r\n nan_min_class_rate = 100.0 * df.loc[df[feature].isnull() & df['TARGET'] == 1, 'TARGET'].sum() / nan_nb\r\n type_feat = 'num' if np.issubdtype(df[feature].dtype, np.number) else 'cat'\r\n nuniq = df[feature].nunique() # exclut NaN\r\n\r\n # Noms de feature contenant un str signifiant que la feature est numérique ou catégorielle\r\n if num_pattern_list is not None:\r\n contain_num_pattern = True if any(pattern in feature for pattern in num_pattern_list) else False\r\n else:\r\n contain_num_pattern = False\r\n if cat_pattern_list is not None:\r\n contain_cat_pattern = True if any(pattern in feature for pattern in cat_pattern_list) else False\r\n else:\r\n contain_cat_pattern = False\r\n\r\n # Type de variable catégorielle: 'cat' + ('_bin' ou '_mul') + ('_num' ou '_str')\r\n if type_feat=='cat' or nuniq nuniq_max:\r\n strategy = 'mean' if (outliers(df[feature], strategy='id_rate') < 0.01) else 'median'\r\n else:\r\n strategy = 'most_frequent'\r\n\r\n # Renseignement de la table des NaN\r\n info_list= [feature, nan_nb, nan_rate, nan_min_class_rate, type_feat, nuniq, uniq, strategy]\r\n if df_nan is None:\r\n df_tmp = pd.DataFrame([info_list], columns=features_list)\r\n df_nan = df_tmp.copy()\r\n else:\r\n df_tmp = pd.DataFrame([info_list], columns=features_list)\r\n df_nan = pd.concat([df_nan, df_tmp], axis=0, ignore_index=True)\r\n\r\n # Formatage et affichage optionnel de la table des NaN\r\n df_nan['nan_rate'] = df_nan['nan_rate'].map('{:.1f}%'.format)\r\n df_nan['nan_minclass_rate'] = df_nan['nan_minclass_rate'].map('{:.2f}%'.format)\r\n if verbose:\r\n print(\"Caractérisation des features contenant des valeurs manquantes:\")\r\n display(df_nan)\r\n\r\n # Nettoyage des variables\r\n del nuniq_max, features_list, nan_nb, nan_rate, type_feat, nuniq\r\n del contain_num_pattern, contain_cat_pattern, uniq, df_tmp\r\n gc.collect()\r\n return df_nan\r\n\r\n\r\ndef nan_treament_decisions(df_nan, nan_decisions=None, save=None):\r\n \"\"\"\r\n Intègre les décisions de traitements spécifiques des valeurs\r\n manquantes par feature dans le dataframe df_nan.\r\n :param df_nan: dataframe issue de la fonction features_with_nan\r\n :param nan_decisions: dict, associe aux features sélectionnées\r\n une décision spécifique ({'feature': 'decision'}).\r\n Si la dénomination de la feature commence par '#' alors\r\n la décision est appliquée à tous les noms de features\r\n contenant la chaine de caractère après le '#'.\r\n :param save: str, nom de fichier de sauvegarde, defaut=None,\r\n pas de sauvegarde.\r\n :return: dataframe df_nan augmenté de 2 colonnes:\r\n - 'decision': bool, si une décision spécifique est spécifiée\r\n - 'nan_treatment': le traitement spécifié s'il existe\r\n sinon le traitement recommandé ('recommended_strategy').\r\n \"\"\"\r\n # Table des décisions de traitement des NaN\r\n df_nan.set_index(keys='feature', drop=False, inplace=True)\r\n list_nan_features = df_nan['feature'].values.tolist()\r\n list_nan_decisions = nan_decisions.keys()\r\n df_nan['decision'] = False\r\n df_nan['nan_treatment'] = 'uncovered'\r\n for feat in list_nan_features:\r\n if feat in list_nan_decisions:\r\n df_nan.at[feat, 'decision'] = True\r\n df_nan.at[feat, 'nan_treatment'] = nan_decisions[feat]\r\n else:\r\n df_nan.at[feat, 'nan_treatment'] = df_nan.at[feat, 'recommended_strategy']\r\n for item in list_nan_decisions:\r\n if (item[0] == '#') and (item[1:] in feat):\r\n df_nan.at[feat, 'decision'] = True\r\n df_nan.at[feat, 'nan_treatment'] = nan_decisions[item]\r\n break\r\n\r\n # Sauvegarde éventuelle\r\n if save is not None: df_nan.to_csv(save, sep=';', index=False)\r\n\r\n # Affichage du résultat\r\n uncovered = 0 if 'uncovered' not in df_nan['nan_treatment'].tolist() else df_nan['nan_treatment'].value_counts()[\r\n 'uncovered']\r\n print(f\"{uncovered} feature non couverte{'' if uncovered == 0 else ':'} \"\r\n f\"{', '.join(df_nan.index[df_nan['nan_treatment'] == 'uncovered'].tolist())}\")\r\n decision_rate = df_nan.loc[df_nan['decision'], 'importance'].sum() / df_nan['importance'].sum()\r\n print(f\"Taux de décisions spécifiques relatif à l'importance des features: {100 * decision_rate:.2f}%\")\r\n df_nan.reset_index(drop=True, inplace=True)\r\n\r\n del nan_decisions, list_nan_features, feat, item, uncovered, decision_rate\r\n gc.collect()\r\n return df_nan\r\n\r\n\r\ndef nan_treatment(df, df_nan, mode='auto', save=None):\r\n \"\"\"\r\n Traite les valeurs manquantes de df avec les consignes contenues\r\n dans df_nan.\r\n :param df: dataframe, avec valeurs manquantes à traiter.\r\n :param df_nan: dataframe, contenant les features 'feature'\r\n (noms des features de df), 'recommended_strategy' (pour le\r\n mode='auto') et 'nan_treatment' (consigne de traitement\r\n pour le mode='forced').\r\n :mode: str, choix de l'alternative de traitement:\r\n - 'auto': utilise la stratégie recommandée ('recommended_strategy')\r\n - 'forced': utilise les décisions ('nan_treatment')\r\n :param save: nom du fichier de sauvegarde du dataframe df traité\r\n ('.csv'), default=None: pas de sauvegarde.\r\n :return: dataframe, copie profonde de df avec valeurs manquantes\r\n traitées (df n'est pas modifié).\r\n \"\"\"\r\n df_tmp = df.copy()\r\n df_nan.set_index(keys='feature', drop=False, inplace=True)\r\n nan_treat_feature = 'recommended_strategy' if mode=='auto' else 'nan_treatment'\r\n for feature in df_nan['feature']:\r\n nan_treat = df_nan.at[feature, nan_treat_feature]\r\n if nan_treat=='drop':\r\n print(f\"Feature {feature}: suppression de {df_tmp[feature].isnull().sum()} individus\")\r\n df_tmp.dropna(subset=feature, inplace=True)\r\n elif nan_treat=='max':\r\n df_tmp[feature].fillna(value=df_tmp[feature].max(), inplace=True)\r\n elif nan_treat=='mean':\r\n df_tmp[feature].fillna(value=df_tmp[feature].mean(), inplace=True)\r\n elif nan_treat=='median':\r\n df_tmp[feature].fillna(value=df_tmp[feature].median(), inplace=True)\r\n elif nan_treat=='min':\r\n df_tmp[feature].fillna(value=df_tmp[feature].min(), inplace=True)\r\n elif nan_treat=='most_frequent':\r\n df_tmp[feature].fillna(value=df_tmp[feature].value_counts().idxmax(), inplace=True)\r\n elif (nan_treat==0) or (nan_treat=='0'):\r\n df_tmp[feature].fillna(value=0, inplace=True)\r\n elif (nan_treat==1) or (nan_treat=='1'):\r\n df_tmp[feature].fillna(value=1, inplace=True)\r\n else:\r\n print(f\"Cas de traitement non couvert: '{nan_treat}'\")\r\n\r\n df_nan.reset_index(drop=True, inplace=True)\r\n if save is not None: df_tmp.to_csv(save, sep=';', index=False)\r\n del feature, nan_treat\r\n gc.collect()\r\n return df_tmp\r\n\r\n\r\ndef normalization_info(df, save=None, verbose=True):\r\n \"\"\"\r\n Donne, sous forme d'un dataframe, les informations relative\r\n au besoin de normaliser 'df'.\r\n :param df: dataframe, contenant les données des features\r\n numériques du jeu de données.\r\n Note: pour éviter la fuite de données dans l'utilisation\r\n du résultat de cette fonction, 'df' devrait ne contenir\r\n que les données du jeu d'entrainement.\r\n :param save: str, chemin complet du fichier de sauvegarde\r\n du dataframe contenant les informations ; default=None,\r\n pas de sauvegarde.\r\n :param verbose: mode verbose.\r\n :return: dataframe, bool\r\n - dataframe contenant les informations de normalisation\r\n de la série de données de chaque feature de 'df':\r\n → 'feature': str, nom de la feature numérique\r\n → 'gauss': bool, si la distribution des données suit\r\n une loi normale\r\n → 'amplitude': float, max-min\r\n → 'min': float, min\r\n → 'max': float, max\r\n → 'variation_coef': float, std/mean\r\n → 'skewness': float, asymétrie\r\n �� 'kurtosis': float: aplatissement\r\n → 'skew_treatment': bool, si l'asymétrie a besoin\r\n d'être traitée\r\n → 'outliers_rate': float, taux d'outliers en %\r\n → 'outliers_treatment': bool, s'il faut surveiller\r\n le besoin de traiter les outliers après normalisation\r\n → 'normalization': fonction de normalisation recommandée\r\n - bool indiquant le besoin de normaliser le jeu de\r\n données.\r\n \"\"\"\r\n # Attention df ne doit contenir que des données d'apprentissage\r\n df_norm = None\r\n outliers_thr = min(0.01, 100.0 / len(df))\r\n skew_thr = 1 # <0.5 pour normalité et modérément asymétrique pour 0.5 0.05 else False\r\n normalization = 'StandardScaler' if normal_distrib else 'RobustScaler'\r\n\r\n # Amplitude, min, max, nunique des données d'entrée - voir synthèse sur l'ensemble des features\r\n data_min = df[feature].min()\r\n data_max = df[feature].max()\r\n amplitude = data_max - data_min\r\n variation_coef = round(df[feature].std() / df[feature].mean(), 2) if df[feature].mean() != 0 else np.NaN\r\n nunique = df[feature].nunique()\r\n\r\n # Dissymétrie avec forte amplitude\r\n skewness = round(df[feature].skew(), 2)\r\n kurtosis = round(df[feature].kurtosis(), 2)\r\n kurt_toohigh = True if kurtosis > kurt_thr else False\r\n skew_treatment = True if abs(skewness) > skew_thr and kurt_toohigh else False\r\n\r\n # Outliers\r\n outliers_rate = round(100.0 * outliers(df[feature], method='best', strategy='id_rate'), 2)\r\n outliers_treatment = True if not skew_treatment and kurt_toohigh and outliers_rate > outliers_thr else False\r\n\r\n # Enregistrement des informations dans la table\r\n info_list = [feature, normal_distrib, amplitude, data_min, data_max,\r\n variation_coef, nunique, skewness, kurtosis, skew_treatment,\r\n outliers_rate, outliers_treatment, normalization]\r\n if df_norm is None:\r\n df_tmp = pd.DataFrame([info_list], columns=features_list)\r\n df_norm = df_tmp.copy()\r\n else:\r\n df_tmp = pd.DataFrame([info_list], columns=features_list)\r\n df_norm = pd.concat([df_norm, df_tmp], axis=0, ignore_index=True)\r\n\r\n # Dispersion d'amplitude des données d'entrée\r\n #df_norm.set_index(keys='feature', drop=False, inplace=True)\r\n q25, q75 = np.percentile(df_norm['amplitude'], 25), np.percentile(df_norm['amplitude'], 75)\r\n lower, upper = q25 - 1.5 * (q75 - q25), q75 + 1.5 * (q75 - q25)\r\n feat_lower_list = df_norm.loc[df_norm['amplitude'] < lower, 'feature'].tolist()\r\n feat_upper_list = df_norm.loc[df_norm['amplitude'] > upper, 'feature'].tolist()\r\n scaling_required = True if len(feat_lower_list) + len(feat_upper_list) > 0 else False\r\n if verbose and scaling_required and len(feat_lower_list)>0:\r\n print(f\"Traitement complémentaire de {len(feat_lower_list)} features qui ont une amplitude atypiquement\")\r\n max_list = min(10, len(feat_lower_list))\r\n print(\"-\", '\\n- '.join(feat_lower_list[:max_list]), '\\n')\r\n if verbose and scaling_required and len(feat_upper_list)>0:\r\n print(f\"{len(feat_upper_list)} features ont une amplitude atypiquement haute:\")\r\n max_list = min(10, len(feat_upper_list))\r\n print(\"-\", '\\n- '.join(feat_upper_list[:max_list]), '\\n')\r\n\r\n # Amplitude relative à l'amplitude médiane\r\n df_norm['relative_amplitude'] = 100.0 * df_norm['amplitude'] / df_norm['amplitude'].median()\r\n df_norm['relative_amplitude'] = df_norm['amplitude'].apply(lambda x: np.round(x, 0))\r\n\r\n # Sauvegarde et affichage optionnels de la table des NaN\r\n if save is not None: df_norm.to_csv(save, sep=';', index=False)\r\n if verbose:\r\n print(\"Caractérisation des features en vue de leur normalisation:\")\r\n display(df_norm.head())\r\n\r\n # Nettoyage des variables\r\n del df_tmp\r\n gc.collect()\r\n return df_norm, scaling_required\r\n\r\n\r\nfrom scipy.stats import skew\r\ndef skew_treatment(data, val_range=None, train_indexes=None, feat_name=None, max_iter=1000, verbose=False):\r\n \"\"\"\r\n Transforme les données 'data' avec un skew élevé pour se rapprocher\r\n d'une loi normale en minimisant skew. Transformation inverse:\r\n - skew > 0: data = data_min + eps + eps * exp(data_transformed)\r\n - skew < 0: data = data_max + eps - eps * exp(data_transformed)\r\n :param data: numpy array, données d'entrée à transformer.\r\n :param val_range: tuple, (min(data), max(data)), default=None calcule\r\n min et max sur l'ensemble complet des données 'data' (légère\r\n fuite de données) → nécessaire pour le calcul du log.\r\n :param train_indexes: numpy array, liste des index du jeu d'entrainement\r\n default=None, toutes les valeurs de data sont prises en compte\r\n :param feat_name: str, nom optionnel de feature pour 'data'\r\n :param max_iter: int, nombre maximum d'itération de l'algorithme de\r\n transformation.\r\n :return: bool, numpy array, dict\r\n - err: indique si erreur de convergence\r\n - data_transformed: données transformées\r\n - skew_param: dict, paramètres de transformation:\r\n → sk_right_skewed: bool, indique le sens de la queue de 'data'\r\n → sk_eps: float, paramètre optimisé (skew=0) de la fonction\r\n de transformation\r\n → sk_data_min: float, paramètre (min(data)) de la fonction\r\n de transformation\r\n → sk_data_max: float, paramètre (max(data)) de la fonction\r\n de transformation\r\n \"\"\"\r\n # Initialisations\r\n if verbose: print(f\"Traitement de l'asymétrie de {feat_name}\")\r\n train_indexes = np.arange(len(data)) if train_indexes is None else train_indexes\r\n data_train = data[train_indexes]\r\n err = True\r\n right_skewed = True if skew(data_train) >= 0 else False\r\n data_min = val_range[0] if val_range is not None else np.array(data).min()\r\n data_max = val_range[1] if val_range is not None else np.array(data).max()\r\n\r\n eps0 = (data_max - data_min) / 1000\r\n if right_skewed:\r\n sk0 = skew(np.log((np.array(data_train) - data_min + eps0) / eps0))\r\n else:\r\n sk0 = skew(np.log((data_max - np.array(data_train) + eps0) / eps0))\r\n sk = [sk0]\r\n ep = [eps0]\r\n eps1 = 2 * eps0\r\n\r\n # Algorithme de convergence: eps pour minimiser abs(skew)\r\n for n_iter in range(1, max_iter+1):\r\n if right_skewed:\r\n sk1 = skew(np.log((np.array(data_train) - data_min + eps1) / eps1))\r\n else:\r\n sk1 = skew(np.log((data_max - np.array(data_train) + eps1) / eps1))\r\n ep.append(eps1)\r\n sk.append(sk1)\r\n if abs(sk1) > 0.1:\r\n if sk0 * sk1 < 0:\r\n eps = (eps0 + eps1) / 2\r\n else:\r\n alpha = abs(sk1 - sk0) * abs(sk1)\r\n eps = eps0 if sk1 * (sk1 - sk0) > 0 else eps1\r\n if (sk1 - sk0) * (eps1 - eps0) > 0:\r\n alpha = min(0.9, alpha) if sk1>0 else min(10, alpha)\r\n eps = eps * (1 - sk1 / abs(sk1) * alpha)\r\n else:\r\n alpha = min(0.9, alpha) if sk1 < 0 else min(10, alpha)\r\n eps = eps * (1 + sk1 / abs(sk1) * alpha)\r\n eps = max(eps, 1e-20)\r\n eps0 = eps1\r\n eps1 = eps\r\n if ((eps0==1e-20) and (eps1==1e-20)) or (sk1==sk0):\r\n break\r\n else:\r\n err = False\r\n if verbose:\r\n print(f\"Convergence à la {n_iter}{'ème' if n_iter > 1 else 'ère'} itération\")\r\n # Paramètres de transformation des données\r\n skew_param = {'sk_right_skewed': right_skewed,\r\n 'sk_eps': eps1,\r\n 'sk_data_min': data_min,\r\n 'sk_data_max': data_max}\r\n break\r\n\r\n # Si le skew n'a pas pu être réduit au seuil spécifié\r\n if err == True:\r\n eps1 = ep[np.argmin(np.array(sk))]\r\n skew_param = {'sk_right_skewed': right_skewed,\r\n 'sk_eps': eps1,\r\n 'sk_data_min': data_min,\r\n 'sk_data_max': data_max}\r\n\r\n # Courbes skew et eps en fonction des itérations\r\n if verbose:\r\n if err==True: print(f\"-> Le skew n'a pas pu être réduit au dessous du seuil\")\r\n sk = np.array(sk)\r\n ep = np.array(ep)\r\n fig, (ax1, ax3) = plt.subplots(nrows=1, ncols=2, figsize=(15,5))\r\n ax1.plot(np.arange(len(sk)), sk, color='steelblue', label='skew')\r\n ax1.set_xlabel('itération', fontsize=12)\r\n ax1.set_ylabel('skew', color='steelblue', fontsize=14)\r\n ax2 = ax1.twinx()\r\n ax2.plot(np.arange(len(ep)), ep, color='coral', label='eps')\r\n ax2.set_ylabel('eps', color='coral', fontsize=14)\r\n\r\n ax3.hist(data, bins=100, color='steelblue')\r\n ax3.hist(data_train, bins=100, color='coral')\r\n ax3.set_xlabel('histogramme', fontsize=12)\r\n\r\n if feat_name is not None: plt.suptitle(feat_name, fontsize=14)\r\n plt.subplots_adjust(wspace=0.3)\r\n plt.show()\r\n\r\n\r\n # Transformation des données d'entrainement + test\r\n if right_skewed:\r\n data_transformed = np.log((np.array(data) - data_min + eps1) / eps1)\r\n else:\r\n data_transformed = np.log((data_max - np.array(data) + eps1) / eps1)\r\n if verbose:\r\n print(f\"Evolution du skew de {skew(data_train)} vers {skew(data_transformed[train_indexes])}\")\r\n\r\n # Nettoyage des données\r\n del data_train, sk0, sk1, n_iter, sk, ep, eps0\r\n gc.collect()\r\n\r\n return err, data_transformed, skew_param\r\n\r\n\r\ndef data_gen(nbr=100, seed=0, gauss=True, mean=0, std=1, sk=0.5, minmax=(0,1), right_skew=True):\r\n \"\"\"\r\n Génère une matrice de ('nbr') nombres aléatoires ('seed'):\r\n - soit gaussienne (paramètres 'mean' et 'std'),\r\n - soit asymétrique (paramètres 'sk', 'minmax' et\r\n 'right_skew').\r\n :param nbr: int, taille de la matrice de sortie.\r\n :param seed: int, seed du générateur aléatoire.\r\n :param gauss: bool, si les points sont générés selon une\r\n distribution gaussienne ou pas.\r\n :param mean: float, spécifier la moyenne s'il est attendu\r\n une distribution gaussienne.\r\n :param std: float,spécifier l'écart type s'il est attendu\r\n une distribution gaussienne.\r\n :param sk: float pour une distribution non gaussienne,\r\n paramètre influent pour le skewness et kurtosis, qui\r\n augmentent avec sk.\r\n :param minmax: (float, float), pour une distribution non\r\n gaussienne, spécifie les bornes des données de sortie\r\n :param right_skew: bool, pour une distribution non gaussienne,\r\n spécifie si la queue est à droite (True) ou gauche\r\n (False).\r\n :return: numpy.array, matrice de taille 'nbr' des données.\r\n \"\"\"\r\n np.random.seed(seed)\r\n X = np.random.randn(nbr)\r\n if gauss:\r\n X = mean + std * np.random.randn(nbr)\r\n else:\r\n X = (np.exp(sk * X) - 1)\r\n if right_skew:\r\n a = float((minmax[1] - minmax[0])) / (X.max() - X.min())\r\n b = minmax[1] - a * X.max()\r\n else:\r\n a = -float((minmax[1] - minmax[0])) / (X.max() - X.min())\r\n b = minmax[1] - a * X.min()\r\n X = a * X + b\r\n del a, b\r\n gc.collect()\r\n return X\r\n\r\n\r\nfrom sklearn.preprocessing import StandardScaler, RobustScaler\r\ndef df_normalization(df, df_norm, train_indexes=None, save_df=None, save_dfnorm=None, verbose=1):\r\n \"\"\"\r\n Normalise le jeu de données contenu dans 'df' (features\r\n de 'df_norm' seulement), avec les informations de 'df_norm'.\r\n Afin d'éviter les fuites de données (implication du jeu\r\n de test), les paramètres des fonctions de transformation\r\n sont calculés sur le jeu d'entrainement, correspondant\r\n aux indices 'train_indexes' de 'df'. Les paramètres de\r\n transformation sont spécifiés dans 'df_norm' qui peut\r\n être sauvegardé.\r\n Transformations inverses (chaque transformation est\r\n applicable si ses paramètres existes), dans l'ordre:\r\n - Amplitude : X' = X / ampl_coef\r\n - RobustScaler: X' = (rsp_q3-rsp_q1) * X + rsp_q2\r\n - StandardScaler: X' = ssp_std * X - ssp_mean\r\n - Skew, selon sk_right_skewed:\r\n True: X' = sk_data_min + sk_eps + sk_eps * exp(X)\r\n False: X' = sk_data_max + sk_eps - sk_eps * exp(X)\r\n :param data: numpy array, données d'entrée à transformer.\r\n :param df: dataframe, jeu de données à normaliser.\r\n :param df_norm: dataframe, informations de normalisation\r\n de df retourné par 'normalization_info'.\r\n :param train_indexes: array, liste des index du jeu\r\n d'entrainement. Note: l'index de df doit être numéroté\r\n de 0 à len(df)-1.\r\n :param save_df: str, chemin complet du fichier de\r\n sauvegarde de df transofrmé ; default=None, pas de\r\n sauvegarde.\r\n :param save_dfnorm: str, chemin complet du fichier de\r\n sauvegarde de df_norm (tq modifié par cette fonction\r\n pour inclure les paramètres de transformation) ;\r\n default=None, pas de sauvegarde.\r\n :param verbose: int, niveaux:\r\n - 0: off\r\n - 1: verbose sauf pour 'skew_treatment'\r\n - 2: full verbose, y compris 'skew_treatment'\r\n :return: dataframe, 'df' transformé normalisé sur les\r\n features de 'df_norm'.\r\n \"\"\"\r\n # Initialisations\r\n full_verbose = True if verbose==2 else False\r\n verbose = True if verbose>0 else False\r\n df_normalized = df.copy()\r\n df_norm.set_index(keys='feature', drop=False, inplace=True)\r\n train_indexes = np.arange(len(df)) if train_indexes is None else train_indexes\r\n num_features = df_norm['feature'].tolist()\r\n\r\n # Traitement des asymétries et mises à l'échelle\r\n with tqdm(total=len(num_features), position=0, leave=True, desc='Normalisation') as pbar:\r\n count_skt = 0\r\n count_skp = 0\r\n for feat in num_features:\r\n if df_norm.at[feat, 'skew_treatment']:\r\n count_skt += 1\r\n err, data_transformed, skew_param = \\\r\n skew_treatment(df[feat], train_indexes=train_indexes, feat_name=feat, verbose=full_verbose)\r\n if not err:\r\n df_normalized[feat] = data_transformed\r\n for param in skew_param.keys():\r\n if param not in df_norm.columns: df_norm[param] = None\r\n df_norm.at[feat, param] = skew_param[param]\r\n else:\r\n count_skp += 1\r\n if df_norm.at[feat, 'normalization'] == 'StandardScaler':\r\n scaler = StandardScaler()\r\n ss_param = {'ssp_mean': df_normalized.loc[train_indexes, feat].mean(),\r\n 'ssp_std': df_normalized.loc[train_indexes, feat].std()}\r\n for param in ss_param.keys():\r\n if param not in df_norm.columns: df_norm[param] = None\r\n df_norm.at[feat, param] = ss_param[param]\r\n else:\r\n scaler = RobustScaler()\r\n rs_param = {'rsp_q1': np.quantile(df_normalized.loc[train_indexes, feat].values, 0.25),\r\n 'rsp_q2': np.quantile(df_normalized.loc[train_indexes, feat].values, 0.5),\r\n 'rsp_q3': np.quantile(df_normalized.loc[train_indexes, feat].values, 0.75)}\r\n for param in rs_param.keys():\r\n if param not in df_norm.columns: df_norm[param] = None\r\n df_norm.at[feat, param] = rs_param[param]\r\n scaler.fit(df_normalized.loc[train_indexes, feat].values.reshape(-1, 1))\r\n df_normalized[feat] = scaler.transform(df_normalized[feat].values.reshape(-1, 1))\r\n pbar.update()\r\n if verbose:\r\n print(f\"Normalisation effectuée, dont {count_skp} réductions \"\r\n f\"partielles de l'asymétrie sur {count_skt}\\n\")\r\n\r\n # Vérification de la mise à l'échelle\r\n df_norm_verif, scaling_required = normalization_info(df_normalized.loc[train_indexes, num_features],\r\n verbose=full_verbose)\r\n\r\n # Traitement complémentaire éventuel de mise à l'échelle\r\n if scaling_required:\r\n df_norm_verif.set_index(keys='feature', drop=False, inplace=True)\r\n q25, q75 = np.percentile(df_norm_verif['amplitude'], 25), np.percentile(df_norm_verif['amplitude'], 75)\r\n lower, upper = q25 - 1.5 * (q75 - q25), q75 + 1.5 * (q75 - q25)\r\n feat_lower_list = df_norm_verif.loc[df_norm_verif['amplitude'] < lower, 'feature'].tolist()\r\n feat_upper_list = df_norm_verif.loc[df_norm_verif['amplitude'] > upper, 'feature'].tolist()\r\n if verbose and len(feat_lower_list)>0:\r\n print(f\"Traitement résiduel sur l'amplitude atypiquement basse de {len(feat_lower_list)} features\")\r\n if full_verbose:\r\n print(\"-\", '\\n- '.join(feat_lower_list), '\\n')\r\n for feat in feat_lower_list:\r\n df_normalized[feat] = df_normalized[feat] * lower / df_norm_verif.at[feat, 'amplitude']\r\n df_norm.at[feat, 'ampl_coef'] = lower / df_norm_verif.at[feat, 'amplitude']\r\n if verbose and len(feat_upper_list)>0:\r\n print(f\"Traitement résiduel sur l'amplitude atypiquement haute de {len(feat_upper_list)} features\")\r\n if full_verbose:\r\n print(\"-\", '\\n- '.join(feat_upper_list), '\\n')\r\n for feat in feat_upper_list:\r\n df_normalized[feat] = df_normalized[feat] * upper / df_norm_verif.at[feat, 'amplitude']\r\n df_norm.at[feat, 'ampl_coef'] = upper / df_norm_verif.at[feat, 'amplitude']\r\n df_norm_verif, scaling_required = normalization_info(df_normalized.loc[train_indexes, num_features],\r\n verbose=full_verbose)\r\n del q25, q75, feat_lower_list, feat_upper_list\r\n\r\n # Résultat final du processus de normalisation\r\n print(\"Normalisation terminée avec succès:\", scaling_required)\r\n df_norm_verif.set_index(keys='feature', drop=False, inplace=True)\r\n df_norm['relative_amplitude'] = df_norm_verif['relative_amplitude'].copy()\r\n\r\n # Sauvegarde éventuelle du jeu de données normalisé et table de normalisation\r\n if save_df is not None:\r\n df_normalized.to_csv(save_df, sep=';', index=False)\r\n if save_dfnorm is not None:\r\n df_norm.to_csv(save_dfnorm, sep=';', index=False)\r\n\r\n # Nettoyage des variables\r\n if len(num_features)>0: del count_skt, count_skp, feat, scaler, param\r\n if 'err' in locals(): del err, data_transformed, skew_param\r\n if 'ss_param' in locals(): del ss_param\r\n if 'rs_param' in locals(): del rs_param\r\n del df_norm, num_features, df_norm_verif, scaling_required\r\n gc.collect()\r\n\r\n return df_normalized\r\n\r\n\r\nimport re\r\nimport random\r\ndef load_dataset(set='forced', subset='train', subset_size=1.0, debug=False):\r\n \"\"\"\r\n Charge le dataset dans un dataframe.\r\n :param set: str, nom de la version du dataset:\r\n - 'auto': dataset avec nan traitées automatiquement\r\n - 'forced': dataset avec nan traitées avec décisions\r\n :param subset: str, default='train':\r\n - 'train': jeu avec 'TARGET'=NaN\r\n - 'test': jeu avec 'TARGET'!=NaN\r\n :param subset_size: float ]0.0, 1.0], default=1.0,\r\n utilisé seulement si subset='test',\r\n proportion du jeu de test à charger.\r\n :param debug: bool, mode debug\r\n :return: dataframe, Series:\r\n - X: dataframe, matrice des entrées X,\r\n - y: Series, matrice des classes vraies.\r\n \"\"\"\r\n fname = data_path + 'P7_data_preprocessed_woNaN_forced_normalized.csv' if set=='forced'\\\r\n else data_path + 'P7_data_preprocessed_woNaN_auto_normalized.csv'\r\n nrows = 10000 if debug else None\r\n df = pd.read_csv(fname, sep=';', nrows=nrows)\r\n # Pour éviter \"LightGBMError: Do not support special JSON characters in feature name\"\r\n df = df.rename(columns=lambda x: re.sub('[^A-Za-z0-9_]+', '', x))\r\n # Features d'entrée de la prédiction\r\n non_input_features_list = ['TARGET', 'SK_ID_CURR', 'SK_ID_BUREAU', 'SK_ID_PREV', 'index']\r\n x_columns = [f for f in df.columns.tolist() if f not in non_input_features_list]\r\n # Type categorical pour la prédiction avec lightGBM\r\n with open(data_path + 'P7_cat_features.txt', \"r\") as file:\r\n categorical_columns = json.load(file)\r\n categorical_columns = [re.sub('[^A-Za-z0-9_]+', '', f) for f in categorical_columns if\r\n f not in non_input_features_list]\r\n for col in categorical_columns:\r\n df[col] = pd.Categorical(df[col])\r\n # production des sorties selon 'subset'\r\n if subset=='train':\r\n train_indexes = df.index[~df['TARGET'].isnull()]\r\n X = df.loc[train_indexes, x_columns].copy()\r\n y = df.loc[train_indexes, 'TARGET'].copy()\r\n cat_index = [X.columns.get_loc(col) for col in categorical_columns]\r\n print(f\"Dimensions du jeu de données: X = {X.shape}, y = {y.shape}\\n\")\r\n del df, non_input_features_list, x_columns, train_indexes\r\n gc.collect()\r\n return X, y, cat_index\r\n else:\r\n indexes = df.index[df['TARGET'].isnull()].tolist()\r\n idx = random.sample(indexes, int(subset_size*len(indexes)))\r\n df = df.loc[idx, :].sort_values(by='SK_ID_CURR', ignore_index=True)\r\n X = df.loc[:, x_columns].copy()\r\n print(f\"Dimensions du jeu de données: X = {X.shape}\\n\")\r\n id_list = list(df['SK_ID_CURR'])\r\n del df, non_input_features_list, x_columns, indexes, idx\r\n gc.collect()\r\n return X, id_list\r\n\r\n\r\nimport sklearn\r\nimport lightgbm\r\nprint(\"Versions des librairies des modèles:\")\r\nprint('- Scikit-learn : ' + sklearn.__version__)\r\nprint('- LightGBM : ' + lightgbm.__version__)\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom lightgbm import LGBMClassifier\r\nfrom sklearn.model_selection import RepeatedStratifiedKFold, cross_val_score\r\nfrom sklearn.metrics import get_scorer_names\r\nimport time\r\ndef model_evaluation(X, y, model, metric='average_precision',\r\n n_splits=5, n_repeats=3,\r\n random_state=None, n_jobs=-1, verbose=False):\r\n \"\"\"\r\n Evalue le modèle en terme de scores et temps de fit+évaluation\r\n unitaire.\r\n :param X: Dataframe, matrice d'entrée.\r\n :param y: Series, matrice des classes vraies.\r\n :param model: instance de modèle.\r\n :param metric: str ou scorer, métrique sous forme de str ou telle\r\n que renvoyée par la fonction sklearn.metrics.make_scorer.\r\n :param n_splits: int, nombre de découpe du dataset.\r\n :param n_repeats: int, nombre de répétition de découpe du dataset.\r\n :param random_state: int ou None.\r\n :param n_jobs: int ou None, nombre de processus exécutés en\r\n parallèle ; default=-1, maximum.\r\n :param verbose: bool, mode verbose.\r\n :return: numpy array, float\r\n - scores: array, matrice des n_repeats * n_splits scores\r\n - exec_time: float, temps d'exécution unitaire.\r\n \"\"\"\r\n if (type(metric) == str) and (metric not in get_scorer_names()):\r\n print(\"Le nom de la métrique n'est pas valide\")\r\n return\r\n start_time = time.time()\r\n cv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=random_state)\r\n v = 11 if verbose else 0\r\n scores = cross_val_score(model, X, y, scoring=metric, cv=cv, n_jobs=n_jobs, verbose=v)\r\n elapsed = time.time() - start_time\r\n if verbose:\r\n print(f\"→ Evaluation exécutée en {elapsed_format(elapsed)}\")\r\n exec_time = elapsed/(n_splits*n_repeats)\r\n del cv, start_time, elapsed\r\n gc.collect()\r\n return scores, exec_time\r\n\r\ndef display_models_eval(model_list):\r\n \"\"\"\r\n Affiche la comparaison des modèles sous forme de 2 graphiques:\r\n - gauche: boite à moustaches concernant les scores ;\r\n - droite: barres concernant les temps de calcul unitaires.\r\n :param model_list: list, liste des modèles à comparer, telle\r\n que créée par la fonction 'create_models' du Notebook.\r\n Chaque élément de la liste est un dictionnaire avec au\r\n minimum les clés:\r\n - 'name': str, acronyme du modèle ;\r\n - 'model': instance du modèle ;\r\n - 'scores': scores de l'évaluation tels que renvoyés par\r\n model_evaluation ;\r\n - 'time': temps d'exécution tels que renvoyés par\r\n model_evaluation.\r\n :return: pas de retour\r\n \"\"\"\r\n labels = [model['name'] for model in model_list]\r\n scores = np.column_stack([model['eval']['scores'] for model in model_list])\r\n times = np.array([model['eval']['time'] for model in model_list])\r\n plt.figure(figsize=(15, 6))\r\n plt.subplot(121)\r\n plt.boxplot(scores, labels=labels, showmeans=True)\r\n plt.title(\"Scores\")\r\n plt.subplot(122)\r\n plt.bar(x= labels, height=times)\r\n plt.title(\"Times\")\r\n plt.suptitle(\"Comparaison des modèles\", fontsize=14)\r\n plt.show()\r\n del labels, scores, times\r\n gc.collect()\r\n\r\n\r\nimport optuna\r\nprint('Version de la librairie Optuna: ' + optuna.__version__, '\\n')\r\n\r\nfrom imblearn.pipeline import Pipeline\r\nfrom imblearn.under_sampling import TomekLinks\r\nfrom imblearn.under_sampling import EditedNearestNeighbours\r\nfrom imblearn.combine import SMOTETomek, SMOTEENN\r\ndef objective(trial, model_name, X, y, resampling=True,\r\n metric='average_precision', n_splits=5, n_repeats=3,\r\n random_state=None, njobs=-1, verbose=False):\r\n \"\"\"\r\n Fonction 'objective' telle que spécifiée par la librairie\r\n 'Optuna'.\r\n - Définit les plages des paramètres à optimiser.\r\n - Applique successivement:\r\n . fonction de resempling (over puis under sampling)\r\n . modèle.\r\n - Calcule le score avec la fonction 'model_evaluation'.\r\n :param trial: paramètre d'Optuna, correspond à un essai.\r\n :param model_name: str, nom du modèle.\r\n :param X: Dataframe, matrice d'entrée.\r\n :param y: Series, matrice des classes vraies.\r\n :param resampling: bool, spécifie si des méthodes de\r\n resampling doivent être explorées.\r\n :param metric: str ou scorer, métrique sous forme de str ou telle\r\n que renvoyée par la fonction sklearn.metrics.make_scorer.\r\n :param n_splits: n_splits: int, nombre de découpe du dataset.\r\n :param n_repeats: int, nombre de répétition de découpe du dataset.\r\n :param random_state: int ou None.\r\n :param n_jobs: int ou None, nombre de processus exécutés en\r\n parallèle ; default=-1, maximum.\r\n :param verbose: bool, mode verbose.\r\n :return: float, score de l'essai ('trial').\r\n \"\"\"\r\n # Plages des paramètres des modèles\r\n if model_name=='LR':\r\n lr_c = trial.suggest_float('C', 1e-10, 1e10, log=True)\r\n lr_cw = trial.suggest_categorical('class_weight', [None, 'balanced'])\r\n lr_s = trial.suggest_categorical('solver', ['saga'])\r\n clf = LogisticRegression(C=lr_c, class_weight=lr_cw, solver=lr_s, random_state=random_state, n_jobs=njobs)\r\n elif model_name=='RF':\r\n rf_ne = trial.suggest_int('n_estimators', 100, 200)\r\n rf_cw = trial.suggest_categorical('class_weight', [None, 'balanced'])\r\n rf_cr = trial.suggest_categorical('criterion', ['gini', 'entropy', 'log_loss'])\r\n clf = RandomForestClassifier(n_estimators=rf_ne, class_weight=rf_cw, criterion=rf_cr, random_state=random_state, n_jobs=njobs)\r\n elif model_name=='LG':\r\n lg_ne = trial.suggest_int('n_estimators', 100, 200)\r\n lg_lr = trial.suggest_float('learning_rate', 1e-3, 1e-1, log=True)\r\n lg_md = trial.suggest_int('max_depth', 7, 11)\r\n clf = LGBMClassifier(n_estimators=lg_ne, learning_rate=lg_lr, max_depth=lg_md, random_state=random_state, n_jobs=njobs)\r\n else:\r\n print(f\"model_name={model_name} n'est pas répertorié\")\r\n return 0\r\n\r\n # Resampling\r\n if resampling:\r\n sampling = trial.suggest_categorical('sampling', ['None', 'ST', 'SE'])\r\n if sampling=='ST':\r\n spl = SMOTETomek(tomek=TomekLinks(n_jobs=njobs), random_state=random_state, n_jobs=njobs)\r\n pipe = Pipeline(steps=[('sample', spl), ('model', clf)])\r\n elif sampling=='SE':\r\n spl = SMOTEENN(enn=EditedNearestNeighbours(n_jobs=njobs), random_state=random_state, n_jobs=njobs)\r\n pipe = Pipeline(steps=[('sample', spl), ('model', clf)])\r\n else:\r\n pipe = Pipeline(steps=[('model', clf)])\r\n else:\r\n pipe = Pipeline(steps=[('model', clf)])\r\n\r\n # Calcul du score\r\n scores, elapsed = model_evaluation(X, y, pipe, metric=metric,\r\n n_splits=n_splits, n_repeats=n_repeats,\r\n random_state=None, n_jobs=njobs, verbose=verbose)\r\n if verbose:\r\n print(f\"Score: {np.mean(scores)} ({np.var(scores)})\")\r\n return np.mean(scores)\r\n\r\n\r\nimport json\r\nfrom sklearn.decomposition import PCA\r\ndef perform_study(model_name, dataset='forced', resampling=True, pca_var=0.95,\r\n metric='average_precision', n_splits=5, n_repeats=3, n_trials=1,\r\n random_state=None, n_jobs=-1, debug=True, verbose=True):\r\n \"\"\"\r\n Exécute une étude ('study') d'optimisation des paramètres du\r\n modèle, telle que spécifiée par la librairie 'Optuna'.\r\n :param model_name: str, nom (acronyme) du modèle concerné.\r\n :param dataset: str, nom du dataset concerné, qui est chargé\r\n par la fonction dans les matrices X et y.\r\n :param resampling: bool, spécifie si des méthodes de\r\n resampling doivent être explorées.\r\n :param pca_var: float compris dans l'intervale ]0, 1[,\r\n variance expliquée de la réduction de dimentionnalité\r\n avec PCA.\r\n :param metric: str ou scorer, métrique sous forme de str ou telle\r\n que renvoyée par la fonction sklearn.metrics.make_scorer.\r\n :param n_splits: n_splits: int, nombre de découpe du dataset.\r\n :param n_repeats: int, nombre de répétition de découpe du dataset.\r\n :param n_trials: int, nombre d'essais de l'étude.\r\n :param random_state:int ou None.\r\n :param n_jobs: int ou None, nombre de processus exécutés en\r\n parallèle ; default=-1, maximum.\r\n :param debug: bool, mode 'debug'\r\n :param verbose: bool, mode verbose.\r\n :return: objet 'study' tel que défini par la librairie 'Optuna'\r\n \"\"\"\r\n mode_debug = ' - Mode debug' if debug else ''\r\n print(Fore.BLACK + Style.BRIGHT + Back.WHITE +\r\n f\"Dataset '{dataset}' - Optimisation des hyperparamètres du modèle '{model_name}'{mode_debug}:\\n\"\r\n + Style.RESET_ALL)\r\n\r\n # Chargement du dataset et réduction de dimensionnalité\r\n X, y, _ = load_dataset(set=dataset, debug=debug)\r\n\r\n # Réduction de la dimensionalité\r\n # Suppression des éventuelles colonnes '_nan' (se déduisent des autres catégories)\r\n non_input_features_list = ['TARGET', 'SK_ID_CURR', 'SK_ID_BUREAU', 'SK_ID_PREV', 'index']\r\n with open(data_path + 'P7_cat_features.txt', \"r\") as file:\r\n categorical_columns = json.load(file)\r\n categorical_columns = [re.sub('[^A-Za-z0-9_]+', '', f) for f in categorical_columns if\r\n f not in non_input_features_list]\r\n n_categorical_columns = len(categorical_columns)\r\n categorical_columns = [col for col in categorical_columns if col[-4:] != '_nan']\r\n if len(categorical_columns) < n_categorical_columns:\r\n print(f\"Suppression de {n_categorical_columns - len(categorical_columns)}\"\r\n f\"features catégorielles correspondant aux NaN\")\r\n # Normalisation des features catégorielles en vue du PCA\r\n for col in categorical_columns:\r\n X[col] = X[col] / np.sqrt(X[col].sum() / len(X[col])) if X[col].sum() > 0 else 0\r\n X[col] = X[col] - X[col].mean()\r\n if get_df_nan_rate(X, verbose=False) > 0:\r\n print(\"La réduction PCA ne peut s'effectuer avec des NaN: pas de réduction de dimension effectuée\")\r\n else:\r\n dimX = X.shape\r\n pca = PCA(n_components=pca_var, random_state=random_state)\r\n X = pd.DataFrame(pca.fit_transform(X))\r\n print(f\"Réduction de dimensionnalité de X (variance expliquée=\"\r\n f\"{pca_var:.3f}): {dimX} → {X.shape}\", '\\n')\r\n\r\n # Définition de l'étude\r\n study_name = model_name.lower() + '_' + dataset + '_debug' if debug else model_name.lower() + '_' + dataset\r\n storage = 'sqlite:///' + data_path + study_name + '.db'\r\n study = optuna.create_study(study_name=study_name,\r\n storage=storage,\r\n load_if_exists=True,\r\n direction=\"maximize\",\r\n sampler=optuna.samplers.TPESampler())\r\n if verbose: print(f\"Sampler is {study.sampler.__class__.__name__}\")\r\n\r\n # Exécution de l'étude\r\n objective_func = lambda trial: objective(trial, model_name, X, y,\r\n resampling=resampling,\r\n metric=metric,\r\n n_splits=n_splits,\r\n n_repeats=n_repeats,\r\n random_state=random_state,\r\n njobs=n_jobs,\r\n verbose=verbose)\r\n start_time = time.time()\r\n study.optimize(objective_func, n_trials=n_trials)\r\n elapsed = time.time() - start_time\r\n print(f\"\\n→ Recherche exécutée en {elapsed_format(elapsed)}\\n\")\r\n\r\n # Nettoyage des variables\r\n del X, y, mode_debug, study_name, storage, start_time, elapsed\r\n gc.collect()\r\n return study\r\n\r\n\r\ndef get_study_results(model_name, dataset='forced', debug=True, save=True, display=True):\r\n \"\"\"\r\n Charge et affiche les résultats de l'étude ('study') d'optimisation\r\n des hyperparamètres et retourne le meilleur résultat.\r\n :param model_name: str, nom (acronyme) du modèle concerné.\r\n :param dataset: str, nom du dataset concerné, qui est chargé\r\n par la fonction dans les matrices X et y.\r\n :param debug: bool, mode 'debug'.\r\n :param save: bool, sauvegarde l'étude sous forme d'un fichier csv.\r\n :param display: bool, affichage de l'étude.\r\n :return: trial (tq défini par la librairie 'optuna') du meilleur\r\n score.\r\n \"\"\"\r\n mode_debug = ' - Mode debug' if debug else ''\r\n print(Fore.BLACK + Style.BRIGHT + Back.WHITE +\r\n f\"Résultat de la recherche d'hyperparamètres du modèle '{model_name}'{mode_debug}:\\n\"\r\n + Style.RESET_ALL)\r\n study_name = model_name.lower() + '_' + dataset + '_debug' if debug else model_name.lower() + '_' + dataset\r\n storage = 'sqlite:///' + data_path + study_name + '.db'\r\n\r\n study = optuna.study.load_study(study_name=study_name, storage=storage)\r\n print(f\"L'étude comprend {len(study.trials)} essais\")\r\n best_trial = study.best_trial\r\n print(f\"score = {best_trial.value}\")\r\n print(f\"Meilleurs hyperparamètres: {best_trial.params}\")\r\n\r\n if save:\r\n df_results = study.trials_dataframe(attrs=(\"number\", \"value\", \"params\", \"state\"))\r\n df_results.to_csv(data_path + study_name + '.csv', sep=';', index=False)\r\n del df_results\r\n\r\n if display:\r\n #fig = optuna.visualization.plot_contour(study)\r\n #fig.show()\r\n fig = optuna.visualization.plot_optimization_history(study)\r\n fig.show()\r\n fig = optuna.visualization.plot_param_importances(study)\r\n fig.show()\r\n del fig\r\n\r\n del mode_debug, study_name, storage, study\r\n gc.collect()\r\n return best_trial\r\n\r\n\r\ndef del_study(model_name, dataset='forced', debug=True):\r\n \"\"\"\r\n Efface l'étude correspondant au nom de modèle et mode debug.\r\n :param model_name: str, nom (acronyme) du modèle concerné.\r\n :param dataset: str, nom du dataset concerné, qui est chargé\r\n par la fonction dans les matrices X et y.\r\n :param debug: bool, mode 'debug'.\r\n :return: pas de retour.\r\n \"\"\"\r\n study_name = model_name.lower() + '_' + dataset + '_debug' if debug else model_name.lower() + '_' + dataset\r\n storage = 'sqlite:///' + data_path + study_name + '.db'\r\n print(f\"Suppression de l'étude {study_name}\")\r\n optuna.study.delete_study(study_name=study_name, storage=storage)\r\n\r\n\r\nfrom math import isnan\r\ndef get_optuna_bestparam(model, study_name):\r\n \"\"\"\r\n Extrait les meilleurs hyperparamètres déterminés par Optuna\r\n et les ajoute sous forme de dictionnaires au modèle.\r\n :param model: dict, modèle tel que créé par par la fonction\r\n 'create_models' du Notebook.\r\n :param study_name: str, nom de l'étude (study) réalisée avec\r\n Optuna.\r\n :return: aucun retour, 'model' est modifié pour lui ajouter:\r\n - dict, 'optuna_hyperparam': meilleurs hyperparamètres\r\n de l'étude concernant le modèle ;\r\n - dict, 'optuna_sampling': meilleur hyperparamètre de\r\n l'étude concernant l'échantillonnage.\r\n \"\"\"\r\n optuna_results = pd.read_csv(data_path + study_name + '.csv', sep=';')\r\n optuna_results = optuna_results[optuna_results['state']=='COMPLETE'].fillna(\r\n '').sort_values(by='value', ascending=False, ignore_index=True)\r\n hyperparam_names = [name.split('params_')[1] for name in optuna_results.columns.tolist()\r\n if 'params_' in name and '_sampling' not in name]\r\n hyperparam = dict()\r\n for name in hyperparam_names:\r\n value = optuna_results.at[0, 'params_'+name]\r\n if value!='':\r\n hyperparam[name] = value\r\n model['optuna_hyperparam'] = hyperparam\r\n model['optuna_sampling'] = optuna_results.at[0, 'params_sampling']\r\n\r\n\r\nimport shap\r\ndef shap_feature_impact(shap_values, class_value=1, top_n=10, show=True, save=None):\r\n \"\"\"\r\n Calcule l'impact des features vers la classe 'class_value' et\r\n affiche le graphe des 'top_n' features les plus impactantes.\r\n :param shap_values: shap_values retournées par l'objet\r\n 'shap.Explainer'.\r\n :param class_value: int, valeur de la classe cible.\r\n :param top_n: int, nombre de features à représenter sur le\r\n graphique.\r\n :param save: str, chemin complet du fichier pour\r\n l'enregistrement du graphique ; default=None, pas\r\n d'enregistrement.\r\n :return: dataframe, impact des features:\r\n - 'feature': nom de la feature ;\r\n - 'SHAP_abs': valeur absolue de la valeur de Shapley ;\r\n - 'Sign': signe de la valeur de Shapley ('coral': >0).\r\n \"\"\"\r\n # SHAP values pour la classe et données d'entrée X sous forme de dataframe\r\n feature_list = shap_values.feature_names\r\n df_shap_val = pd.DataFrame(shap_values[..., class_value].values, columns=feature_list)\r\n df_X = pd.DataFrame(shap_values.data, columns=feature_list)\r\n\r\n # Signe de correlation entre les données et valeurs de Shapley\r\n corr_list = list()\r\n for feature in feature_list:\r\n corr = '#ff0051' if df_shap_val[feature].corr(df_X[feature])>0 else '#008bfb'\r\n corr_list.append(corr)\r\n corr_df = pd.concat([pd.Series(feature_list), pd.Series(corr_list)], axis=1).fillna('lightgray')\r\n corr_df.columns = ['Feature','Sign']\r\n\r\n # Bar-graph des valeurs d'impact signées\r\n df_impact = pd.DataFrame(np.abs(df_shap_val).mean()).reset_index()\r\n df_impact.columns = ['Feature','SHAP_abs']\r\n df_impact = df_impact.merge(corr_df, left_on='Feature', right_on='Feature', how='inner')\r\n df_impact = df_impact.sort_values(by='SHAP_abs', ascending = False)\r\n fig, ax = plt.subplots()\r\n ax = df_impact[:top_n].plot.barh(x='Feature', y='SHAP_abs', ax=ax, color=df_impact['Sign'],\r\n figsize=(10, 1+min(12, int(0.2*top_n))), legend=False)\r\n ax.set_ylabel(\"\")\r\n ax.invert_yaxis()\r\n ax.set_xlabel(\"SHAP Value\", fontsize=12)\r\n plt.title(f\"Impact des top {top_n} features sur la prédiction de la classe {class_value}\\n\"\r\n f\"(Corrélation: rouge=positive, bleu=négative)\", fontsize=14)\r\n plt.tight_layout()\r\n if save is not None:\r\n plt.savefig(save, dpi=300)\r\n if show:\r\n plt.show()\r\n else:\r\n return fig\r\n\r\n\r\ndef bivariate_cat_cat(df_cat1_cat_2, alpha=0.05, save=None):\r\n \"\"\"\r\n Effectue l'analyse bivariée entre 2 variables catégorielles.\r\n Affiche la heatmap et effectue le test du chi2 avec un\r\n seuil de 5% pour évaluer la dépendance des features.\r\n :param df_cat1_cat_2: dataframe, contenant en ligne toutes\r\n les observations et 2 colonnes, une pour chaque feature.\r\n :param alpha: float, seuil de test de la pvalue.\r\n :param save: str, chemin vers le fichier d'enregistrement du\r\n graphique ; default=None, pas d'enregistrement.\r\n :return: rien\r\n \"\"\"\r\n # Format des étiquettes de valeur unique\r\n df = df_cat1_cat_2.copy()\r\n features = df.columns.tolist()\r\n if len(features)!=2:\r\n print(f\"Le dataframe ne correspond pas à une paire de features.\")\r\n return\r\n for feature in features:\r\n is_feat_num = True if np.issubdtype(df[feature].dtype, np.number) else False\r\n if is_feat_num:\r\n df[feature] = pd.to_numeric(df[feature], errors='coerce')\r\n is_int = np.array([x%1==0 for x in pd.unique(df[feature])]).all()\r\n if is_int:\r\n df[feature] = df[feature].astype(int)\r\n\r\n # Table de contingence\r\n cont = df.pivot_table(index=features[0],\r\n columns=features[1],\r\n aggfunc=len,\r\n margins=True,\r\n margins_name='total')\r\n # Table ξ (xi) des corrélations\r\n tx = cont.loc[:,[\"total\"]]\r\n ty = cont.loc[[\"total\"],:]\r\n n = len(df)\r\n indep = tx.dot(ty) / n\r\n cont = cont.fillna(0)\r\n measure = (cont-indep)**2/indep\r\n xi_n = measure.sum().sum()\r\n\r\n # Test CHI2 (note: xi_n=chi2) - H0: variables indépendantes\r\n chi2, p_value, ddl, exp = st.chi2_contingency(cont)\r\n indep = False if p_value < alpha else True\r\n\r\n # Heatmap (échelle 0-1)\r\n table = measure/xi_n\r\n sns.heatmap(table.iloc[:-1,:-1],\r\n # valeurs de la table des contingences\r\n annot=cont.iloc[:-1,:-1].astype(int),\r\n # format de 'annot'\r\n fmt='d',\r\n cbar_kws={'label': '← independance - dependance →'})\r\n dep = 'variables non corrélées' if indep else 'variables corrélées'\r\n plt.title(f\"Heatmap analyse bivariée ({dep})\", fontsize=14)\r\n plt.tight_layout()\r\n if save is not None:\r\n plt.savefig(save, dpi=300)\r\n plt.show()\r\n\r\n # Nettoyage des variables\r\n del cont, tx, ty, n, indep, measure, xi_n, chi2, p_value, ddl, exp, table, dep\r\n gc.collect()\r\n\r\n\r\ndef eta_squared(x, y):\r\n \"\"\"\r\n Calcul du rapport de corrélation entre une variable\r\n catégorielle x et une variable quantitative y.\r\n :param x: pandas Series, variable catégorielle.\r\n :param y: pandas Series, variable numérique.\r\n :return: float, coefficient de corrélation η²\r\n \"\"\"\r\n moyenne_y = y.mean()\r\n classes = []\r\n for classe in x.unique():\r\n yi_classe = y[x == classe]\r\n classes.append({'ni': len(yi_classe),\r\n 'moyenne_classe': yi_classe.mean()})\r\n SCT = sum([(yj - moyenne_y) ** 2 for yj in y])\r\n SCE = sum([c['ni'] * (c['moyenne_classe'] - moyenne_y) ** 2 for c in classes])\r\n eta_squared = SCE / SCT\r\n del moyenne_y, classes, yi_classe, SCT, SCE\r\n gc.collect()\r\n return eta_squared\r\n\r\n\r\ndef welch_ttest(x, y, alpha=0.05):\r\n \"\"\"\r\n Test de Welch avec H0: égalité des moyennes entre x et y.\r\n :param x: numpy array ou pandas Series\r\n :param y: numpy array ou pandas Series\r\n :param alpha:seuil de test de la p-value ; default=0.05\r\n :return: bool:\r\n - True: H0 vraie (égalité)\r\n - False: H0 rejetée (inégalité)\r\n \"\"\"\r\n dof = (x.var() / x.size + y.var() / y.size) ** 2 / (\r\n (x.var() / x.size) ** 2 / (x.size - 1) + (y.var() / y.size) ** 2 / (y.size - 1))\r\n t, p = st.ttest_ind(x, y, equal_var=False)\r\n result = p > alpha\r\n del dof, t, p\r\n gc.collect()\r\n return result\r\n\r\n\r\nfrom numpy.polynomial import polynomial as P\r\nimport warnings\r\ndef anova(df_cat_num, nb_cat=5, alpha=0.05, save=None, verbose=0):\r\n \"\"\"\r\n Effectue l'ANOVA pour une paire de variables (cat, num).\r\n :param data: dataframe, contenant en ligne toutes les\r\n observations et 2 colonnes, une pour chaque feature.\r\n :param nb_cat: int, nombre maximum de catégories à afficher\r\n pour la variables catégorielle.\r\n :param alpha: float, seuil des tests (normalité, Welch, Fligner).\r\n :param save: str, chemin vers le fichier d'enregistrement du\r\n graphique ; default=None, pas d'enregistrement.\r\n :param verbose: int, niveau de verbosité:\r\n - 0: n'affiche que le graphique et filtre les UserWarning\r\n - 1: affiche de plus les résultats des tests et la régression\r\n linéaire.\r\n - 2 ou plus: affiche de plus les informations sur les\r\n résultats de test et les 'UserWarning'.\r\n :return:\r\n \"\"\"\r\n pair = df_cat_num.columns.tolist()\r\n if len(pair) != 2:\r\n print(f\"Le dataframe ne correspond pas à une paire de features.\")\r\n return\r\n if verbose>0:\r\n print(Fore.GREEN + \"► ANOVA pour la paire : \" + Style.RESET_ALL, pair)\r\n df = df_cat_num[pair].copy(deep=True)\r\n\r\n # Filtrage des UserWarning en mode non verbose\r\n if verbose < 2:\r\n warnings.filterwarnings(action=\"ignore\", category=UserWarning)\r\n\r\n # Format des étiquettes de valeur unique\r\n cat_feat = df.columns.tolist()[0]\r\n is_feat_num = True if np.issubdtype(df[cat_feat].dtype, np.number) else False\r\n if is_feat_num:\r\n df[cat_feat] = pd.to_numeric(df[cat_feat], errors='coerce')\r\n is_int = np.array([x % 1 == 0 for x in pd.unique(df[cat_feat])]).all()\r\n if is_int:\r\n df[cat_feat] = df[cat_feat].astype(int)\r\n\r\n # Filtrage des catégories qui contiennent moins de 'n_samples_per_cat_min' lignes (min=3)\r\n df_cat = df.groupby(pair[0], as_index=False).agg(\r\n means=(pair[1], \"mean\"),size=(pair[0], \"size\")).sort_values(\r\n by='means', ascending=False).reset_index(drop=True)\r\n n_samples_per_cat_min = 3\r\n list_cat = df_cat.loc[df_cat['size'] >= n_samples_per_cat_min, pair[0]].tolist()\r\n df.drop(index=df.loc[~df[pair[0]].isin(list_cat), :].index, inplace=True)\r\n df_cat.drop(index=df_cat.loc[~df_cat[pair[0]].isin(list_cat), :].index, inplace=True)\r\n df_cat.reset_index(drop=True, inplace=True)\r\n\r\n # Filtrage des nb_cat pour lesquelles la moyenne des valeurs numériques est la plus élevée\r\n df_cat = df_cat.head(nb_cat)\r\n list_cat = df_cat[pair[0]].head(nb_cat).values.tolist()\r\n nb_cat = min(nb_cat, len(list_cat))\r\n df.drop(index=df.loc[~df[pair[0]].isin(list_cat), :].index, inplace=True)\r\n df[pair[0]] = pd.Categorical(df[pair[0]], categories=list_cat, ordered=True)\r\n\r\n # Calcul du rapport de corrélation\r\n eta_sqr = eta_squared(df[pair[0]], df[pair[1]])\r\n if verbose > 0:\r\n print(\" → Rapport de corrélation pour les k=\", nb_cat, \"catégories du graphique et n=\",\r\n df.shape[0], \"données : η²=\" + f\"{eta_sqr:.3f}\")\r\n\r\n # Remplacement des catégories par une valeur numérique\r\n df['cat'] = df[pair[0]].copy()\r\n df['cat'] = df['cat'].astype('object').astype(\"category\")\r\n df['cat'].replace(df['cat'].cat.categories, [i for i in range(0, len(df['cat'].cat.categories))], inplace=True)\r\n df['cat'] = df['cat'].astype(\"int\")\r\n\r\n # Tests sur les variables\r\n # Test de normalité (H0: distribution normale)\r\n tn = True\r\n list_norm_neg = {'category': [], 'statistic': [], 'p-value': []}\r\n for cat in range(nb_cat):\r\n stat, pvalue = st.normaltest(df.loc[df['cat'] == cat, pair[1]].values)\r\n tn = tn and (pvalue > alpha)\r\n if pvalue <= alpha:\r\n list_norm_neg['category'].append(cat)\r\n list_norm_neg['statistic'].append(stat)\r\n list_norm_neg['p-value'].append(pvalue)\r\n if verbose>0:\r\n if tn:\r\n print(\" → Test de normalité positif pour toutes les catégories\")\r\n else:\r\n print(\" → Test de normalité négatif sur certaines catégories\")\r\n if verbose>1:\r\n display(pd.DataFrame.from_dict(list_norm_neg))\r\n\r\n # Test d'homoscédasticité (H0: variances égales entre les catégories)\r\n gb = df.groupby(pair[0])[pair[1]]\r\n stat, p_fligner = st.fligner(*[gb.get_group(x).values for x in gb.groups.keys()])\r\n is_fligner_test_positive = p_fligner > alpha\r\n if verbose > 0:\r\n if is_fligner_test_positive:\r\n print(\" → Test d'homoscédasticité de Fligner-Killeen positif \"\r\n \"(Ecarts types égaux entre les catégories)\")\r\n else:\r\n print(f\" → Test d'homoscédasticité de Fligner-Killeen négatif \"\r\n f\"(Ecarts types non égaux entre les catégories)\")\r\n if verbose > 1:\r\n std = pd.DataFrame(data=[gb.get_group(x).values.std() for x in gb.groups.keys()],\r\n columns=['std'], index=gb.groups.keys())\r\n display(std)\r\n\r\n # Test de Welch (H0: égalité des moyennes entre catégories), si test d'homoscédasticité négatif\r\n # Table de groupe des catégories en fonction du résultat du test\r\n tw_true = True\r\n tw_false = True\r\n dgr = pd.DataFrame(data=np.arange(len(list_cat)), index=[list_cat], columns=['group'])\r\n for i in range(len(list_cat) - 1):\r\n for j in range(i + 1, len(list_cat)):\r\n is_welch_ttest_positive = welch_ttest(gb.get_group(list_cat[i]).values, gb.get_group(list_cat[j]).values)\r\n tw_true = tw_true and is_welch_ttest_positive\r\n tw_false = tw_false and not is_welch_ttest_positive\r\n # Si le test est positif, les moyennes des 2 catégories sont équivalentes\r\n if is_welch_ttest_positive:\r\n gr = dgr.loc[list_cat[i]]['group']\r\n dgr.at[list_cat[j], 'group'] = gr\r\n # Valeurs de l'ordonnée pour le grouper les catégories ayant des moyennes non dissemblables\r\n rows = [-0.5]\r\n for i in range(1, len(list_cat)):\r\n if dgr['group'].values[i]!=dgr['group'].values[i-1]:\r\n rows.append(i-0.5)\r\n rows.append(len(list_cat)-0.5)\r\n # Affichage du résultat du test de Welch\r\n if verbose > 0:\r\n if tw_true:\r\n print(\" → Test de Welch positif (égalité des moyennes) \"\r\n \"pour toutes les catégories\")\r\n elif tw_false:\r\n print(\" → Test de Welch (égalité des moyennes) négatif \"\r\n \"pour toutes les catégories\")\r\n else:\r\n print(\" → Test de Welch (égalité des moyennes) positifs \"\r\n \"pour les catégories de même groupe sur le graphique\")\r\n\r\n # Test statistique de Fisher\r\n dfn = nb_cat - 1\r\n dfd = df.shape[0] - nb_cat\r\n F_crit = st.f.ppf(1 - alpha, dfn, dfd)\r\n F_stat, p = st.f_oneway(df['cat'], df[pair[1]])\r\n sign_F = \">\" if F_stat > F_crit else \"<\"\r\n sign_p = \">\" if p > alpha else \"<\"\r\n if (sign_F == \">\") and (sign_p == \"<\"):\r\n res_test = \"positif\"\r\n else:\r\n res_test = \"négatif\"\r\n if verbose > 0:\r\n print(f\" → Test de Fisher {res_test}\")\r\n if verbose>1:\r\n print(f\"\\tF={F_stat:.2f} {sign_F} {F_crit:.2f}\",\r\n f\" , et p-value={p:.2e} {sign_p} {alpha:0.2f}\")\r\n\r\n # Définition des dimensions du graphique global\r\n fig_h = nb_cat if nb_cat < 6 else int((5 * nb_cat + 40) / 15)\r\n\r\n # Propriétés graphiques\r\n medianprops = {'color': \"black\"}\r\n meanprops = {'marker': 'o', 'markeredgecolor': 'black', 'markerfacecolor': 'firebrick'}\r\n\r\n fig, ax = plt.subplots(figsize=(15, fig_h))\r\n ax = sns.boxplot(x=pair[1], y=pair[0], data=df, showfliers=False, ax=ax,\r\n medianprops=medianprops, showmeans=True, meanprops=meanprops)\r\n xmin, xmax = ax.get_xlim()\r\n\r\n # Tracé des lignes reliant les valeurs moyennes de chaque catégorie\r\n plt.plot(df_cat.means.values, df_cat.index.values, linestyle='--', c='#000000')\r\n\r\n # Bloc de séparation graphique des groupes de moyennes non différenciées (test de Welch négatif)\r\n if not tw_true and len(rows)>1:\r\n for i in range(len(rows)-1):\r\n plt.fill_between([xmin, xmax], [rows[i], rows[i]], [rows[i+1], rows[i+1]], alpha=0.2)\r\n\r\n # Régression linéaire sur les valeurs moyennes\r\n reg = P.polyfit(df_cat.means.values, df_cat.index.values, deg=1, full=True)\r\n yPredict = P.polyval(df_cat.means.values, reg[0])\r\n\r\n if verbose > 0:\r\n if nb_cat > 2:\r\n coef_cor = 1 - reg[1][0][0] / (np.var(df_cat.index.values) * len(df_cat.index.values))\r\n else:\r\n coef_cor = 1\r\n a = -1 / reg[0][1]\r\n mu = -reg[0][0] / reg[0][1] - a * (df_cat.shape[0] - 1)\r\n sign = '+' if a >= 0 else '-'\r\n print(f\"\\n → Moyenne catégorielle : '{pair[1]}' = {mu:.2f} {sign} {abs(a):.2f} * '{pair[0]}', avec :\",\r\n f\"'{df_cat[pair[0]][df_cat.shape[0] - 1]}'= 0 , …, '{df_cat[pair[0]][0]}'= {df_cat.shape[0] - 1}\")\r\n print(f\" → Coefficient de corrélation r² ={coef_cor:.2f}\")\r\n\r\n # Tracé de la droite de régression linéaire\r\n plt.plot(df_cat.means.values, yPredict, linewidth=2, linestyle='-', c='#FF0000')\r\n\r\n plt.ylim(top=-1, bottom=nb_cat)\r\n plt.title(f\"ANOVA - analyse bivariée (corrélation η²={eta_sqr:.3f})\", fontsize=14)\r\n plt.tight_layout()\r\n if save is not None:\r\n plt.savefig(save, dpi=300)\r\n plt.show()\r\n\r\n # Retour à la gestion des warnings par défaut\r\n if verbose > 1:\r\n warnings.filterwarnings(action=\"default\", category=UserWarning)\r\n\r\n # Nettoyage des variables\r\n del pair, df, df_cat, n_samples_per_cat_min, list_cat\r\n del nb_cat, eta_sqr, tn, list_norm_neg, cat, stat, pvalue\r\n del gb, p_fligner, is_fligner_test_positive, tw_true\r\n del tw_false, dgr, i, j, p, is_welch_ttest_positive\r\n del rows, dfn, dfd, F_crit, F_stat, sign_F, sign_p\r\n del res_test, fig_h, medianprops, meanprops, ax\r\n del xmin, xmax, reg, yPredict, cat_feat, is_feat_num\r\n if 'gr' in locals(): del gr\r\n if 'coef_cor' in locals(): del coef_cor\r\n if 'a' in locals(): del a\r\n if 'mu' in locals(): del mu\r\n if 'is_int' in locals(): del is_int\r\n gc.collect()\r\n\r\n\r\ndef pair_plot(data, save=None):\r\n pair = data.columns.tolist()\r\n if len(pair) != 2:\r\n print(f\"Le dataframe ne correspond pas à une paire de features.\")\r\n return\r\n df = data[pair].copy().apply(pd.to_numeric, axis=1)\r\n coef_p = st.pearsonr(df[pair[0]], df[pair[1]])[0]\r\n plot = sns.jointplot(data=df, x=pair[0], y=pair[1], kind=\"reg\", marginal_kws=dict(bins=20, fill=True))\r\n plt.suptitle(f\"Analyse bivariée (corrélation r²={coef_p:.3f})\", fontsize=14)\r\n plt.tight_layout()\r\n if save is not None:\r\n plt.savefig(save, dpi=300)\r\n plt.show()\r\n del pair, df, coef_p, plot\r\n gc.collect()\r\n\r\n","repo_name":"EricPaul075/OCP7-Scoring-model-implementation","sub_path":"P7_functions.py","file_name":"P7_functions.py","file_ext":"py","file_size_in_byte":90635,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19454299571","text":"from storage_providers import DefaultAccountProvider, SQLiteAccountProvider\n\n\ndef register_storages(settings: \"SettingsProvider\"):\n \"\"\"Register the storage providers\n\n Args:\n settings (SettingsProvider): The settings provider to be used for the storage\n \"\"\"\n print(\"[I] Registering account storage providers\")\n for c in [\n DefaultAccountProvider,\n SQLiteAccountProvider\n # Add storage providers\n ]:\n try:\n c(settings)\n except Exception as e:\n print(\"[E] Failed to register\", c.__name__, \":\", type(e).__name__)\n","repo_name":"lite-corp/LeLonMo","sub_path":"src/server/account_storage.py","file_name":"account_storage.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"27713221036","text":"from application import app, db\nfrom application.models import Ticket, Fix\nfrom flask import render_template, request, redirect\n\n@app.route('/')\ndef homepage():\n return render_template(\"home.html\")\n\n@app.route('/user', methods=[\"GET\", \"POST\"])\ndef user():\n tickets= \"\"\n tickets=Ticket.query.all()\n if request.form:\n user = request.form['user']\n issue = request.form['issue']\n ticket = Ticket(user=user, issue=issue)\n db.session.add(ticket)\n db.session.commit()\n tickets = Ticket.query.all()\n return render_template(\"user.html\", tickets=tickets)\n \n\n\n@app.route('/tech', methods=[\"GET\", \"POST\"])\ndef tech():\n fixes = \"\"\n fixes=Fix.query.all()\n if request.form:\n ticket_id=request.form['ticket_id']\n status=request.form['status']\n fix = Fix(status=status, ticket_id=ticket_id)\n db.session.add(fix)\n db.session.commit()\n fixes = Fix.query.all()\n return render_template(\"tech.html\", fixes=fixes)\n\n\n@app.route('/updateuser', methods=[\"GET\", \"POST\"])\ndef updateuser():\n newissue=request.form.get(\"newissue\")\n oldissue=request.form.get(\"oldissue\")\n issue=Ticket.query.filter_by(issue=oldissue).first()\n issue.issue=newissue\n db.session.commit()\n return redirect(\"/user\")\n\n@app.route('/updatetech', methods=[\"GET\", \"POST\"])\ndef updatetech():\n newstatus=request.form.get(\"newstatus\")\n oldstatus=request.form.get(\"oldstatus\")\n status=Fix.query.filter_by(status=oldstatus).first()\n status.status=newstatus\n db.session.commit()\n return redirect(\"/tech\")\n\n@app.route('/deleteuser', methods=[\"GET\", \"POST\"])\ndef deleteuser():\n issue= request.form.get(\"issue\")\n user= request.form.get(\"user\")\n ticket = Ticket.query.filter_by(issue=issue, user=user).first()\n db.session.delete(ticket)\n db.session.commit()\n return redirect(\"/user\")\n\n@app.route('/deletetech', methods=[\"GET\", \"POST\"])\ndef deletetech():\n ticket_id=request.form.get(\"ticket_id\")\n status=request.form.get(\"status\")\n fix = Fix.query.filter_by(ticket_id=ticket_id, status=status).first()\n db.session.delete(fix)\n db.session.commit()\n return redirect(\"/tech\")","repo_name":"itZian94/devops_fundamentals","sub_path":"application/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"22063403269","text":"import argparse\nfrom classes.dbnqa_dataset import DBNQADataset\n\ndef main(args: argparse.Namespace) -> None:\n processed_dataset = DBNQADataset(args.nl, args.sparql, subset=args.subset)\n processed_dataset.save(f'{args.out_dir}/dataset.json')\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Build expected format dataset from lcquad.\")\n\n parser.add_argument(\"--nl\", type=str, default='raw_data/DBNQA/data.en',\n help=\"path to the json file containing the train original lcquad dataset\")\n\n parser.add_argument(\"--sparql\", type=str, default='raw_data/DBNQA/data.sparql',\n help=\"path to the json file containing the test original lcquad dataset\")\n\n parser.add_argument(\"--out_dir\", type=str, default='out_data/DBNQA',\n help=\"path to save all data\")\n \n parser.add_argument(\"--subset\", type=int, default=None,\n help=\"create a subset of size N\")\n\n\n args = parser.parse_args()\n\n if not args.nl or not args.sparql:\n parser.error(\n \"This program requires values for --train and --test\")\n\n main(args)\n","repo_name":"Lama-West/SPARQL_Query_Generation_aacl-ijcnl2022","sub_path":"Data/src/DBNQA/build_dataset.py","file_name":"build_dataset.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"66"} +{"seq_id":"71664554451","text":"from typing import Tuple, Union, Dict, Literal, Optional\nimport pickle\nimport os\n\nimport pandas as pd\nimport numpy as np\n\nfrom gfibot import CONFIG\nfrom .utils import SklearnCompatibleClassifier, get_binary_classifier_metrics\n\n# where to find models\ntry:\n GFIBOT_MODEL_PATH = CONFIG[\"gfibot\"][\"model_path\"]\nexcept KeyError:\n GFIBOT_MODEL_PATH = \"./models\"\n\n# where to find cache\ntry:\n GFIBOT_CACHE_PATH = CONFIG[\"gfibot\"][\"cache_path\"]\nexcept KeyError:\n GFIBOT_CACHE_PATH = \"./.cache\"\n\n\nclass GFIModel(object):\n def __init__(\n self,\n classifier: SklearnCompatibleClassifier,\n ):\n self._clf = classifier\n self._X_train, self._X_test, self._y_train, self._y_test = [None] * 4\n\n def load_dataset(\n self,\n X_train: pd.DataFrame,\n X_test: pd.DataFrame,\n y_train: pd.Series,\n y_test: pd.Series,\n ):\n self._X_train = X_train\n self._y_train = y_train\n self._X_test = X_test\n self._y_test = y_test\n\n def fit(self, *args, **kwargs):\n if self._X_train is None:\n raise ValueError(\"Dataset not loaded: call load_dataset first\")\n self._clf.fit(self._X_train, self._y_train, *args, **kwargs)\n\n def predict(self, X: pd.DataFrame, *args, **kwargs) -> np.ndarray:\n _r = self._clf.predict_proba(X, *args, **kwargs)[:, 1]\n return _r\n\n def get_metrics(self, gfi_thres: int = 0.5):\n if self._X_test is None:\n raise ValueError(\"Dataset not loaded: call load_dataset first\")\n y_pred = self.predict(self._X_test)\n return get_binary_classifier_metrics(self._y_test, y_pred, gfi_thres)\n\n def get_feature_importances(self, X: Optional[pd.DataFrame] = None) -> pd.Series:\n if X is None:\n if self._X_test is None:\n raise ValueError(\"Dataset not loaded: call load_dataset first\")\n X = self._X_test\n _imp = self._clf.feature_importances_\n _names = X.columns\n return pd.Series(_imp, index=_names).sort_values(ascending=False)\n\n @classmethod\n def from_pickle(cls, path: str, *args, **kwargs) -> \"GFIModel\":\n with open(path, \"rb\") as f:\n clf = pickle.load(f)\n return cls(clf, *args, **kwargs)\n\n def to_pickle(self, path: str):\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n with open(path, \"wb\") as f:\n pickle.dump(self._clf, f)\n\n def to_portable_format(self, path: str):\n if hasattr(self._clf, \"_Booster\") and hasattr(self._clf._Booster, \"save_model\"):\n self._clf._Booster.save_model(path)\n else:\n raise NotImplementedError(\"Only supports XGBClassifier and LGBMClassifier\")\n","repo_name":"osslab-pku/gfi-bot","sub_path":"gfibot/model/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"66"} +{"seq_id":"11702249571","text":"from falcon import API\nimport logging\n\n\nclass API(API):\n def __init__(self, versions=None, **kwargs):\n super().__init__(**kwargs)\n self.versions = versions\n self.auto_route()\n\n def auto_route(self):\n for version in self.versions:\n try:\n urls = getattr(__import__(version + '.urls').urls, 'urls')\n for route, instance in urls:\n self.add_route('/{}/{}'.format(version, route.strip().strip('/')), instance)\n except ModuleNotFoundError as e:\n logging.exception(e)\n","repo_name":"paneru-rajan/falcon-url-versioning","sub_path":"settings/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"39424706313","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 9 18:36:44 2021\r\n\r\n@author: Shiv\r\n\"\"\"\r\n\r\nfrom math import sqrt\r\nfrom numpy import arange\r\nimport datetime\r\n\r\n# Class Location\r\nclass Location():\r\n def __init__(self, x=0, y=0, z=0):\r\n self.x = x\r\n self.y = y\r\n self.z = z\r\n\r\n def toString(self):\r\n return \"(\" + str(self.x) + \" ; \" + str(self.y) + \" ; \" + str(self.z) + \")\"\r\n\r\n def distanceTo(self, loc):\r\n return sqrt(pow(self.x - loc.x, 2) + pow(self.y - loc.y, 2) + pow(self.z - loc.z,2))\r\n\r\n# N-Lateration Algorithm\r\ndef NLateration(data, step=.1, xSize=0.0, ySize=0.0, zSize=0.0, md=.0,\\\r\n dmax=10):\r\n minLoc = Location()\r\n minDist = 0.0\r\n for k in data:\r\n minDist += abs(k[0].distanceTo(Location()) - k[1])\r\n xSize = k[0].x if k[0].x > xSize else xSize\r\n ySize = k[0].y if k[0].y > ySize else ySize\r\n zSize = k[0].z if k[0].z > zSize else zSize\r\n for k in arange(0,xSize,step):\r\n for l in arange(0,ySize,step):\r\n for m in arange(0,zSize,step):\r\n d = .0\r\n for n in data:\r\n d += abs(n[0].distanceTo(Location(k,l,m)) - n[1])\r\n if d < minDist:\r\n minDist = d\r\n minLoc = Location(round(k,2),round(l,2),round(m,2))\r\n \r\n \r\n return (minLoc, minDist)\r\n\r\n\r\n# Use dataset for 4 AP's in 3D space.\r\ndataset = [(Location(.5,.5,.5), 3.0), (Location(4.0,.0,.0), 2.0), (Location(4.0,5.0,5.0), 4.2), (Location(3.0,3.0,3.0), 2.5)]\r\n# Use dataset1 for 3 AP's in 2D space.\r\ndataset1 = [(Location(.5,.5,.5), 3.0), (Location(4.0,.0,.0), 2.0), (Location(4.0,5.0,5.0), 4.2)]\r\n\r\n# Get Location of Mobile Terminal\r\nstart = datetime.datetime.now()\r\nresult = NLateration(dataset, step=.1)\r\nprint(\"\\r\\nLocation : \" + result[0].toString())\r\nend = datetime.datetime.now()\r\nprint (end-start)","repo_name":"ShivamDa/Postioning_Systems_and_Techniques","sub_path":"N-lateration/N-Lateration.py","file_name":"N-Lateration.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"8154637082","text":"\"\"\"\nQuestion 4: Create a new column to categorize cereals as 'healthy' vs 'unhealthy'.\n\nHealthy Cereals:\n Low Calories(<100),\n Low Sodium(<150),\n Low Sugar(<9),\n High fiber(>3),\n High Protein(>2),\n\nUnhealthy: (Other)\n\nName: Garnett Grant\nStudent Number: 301188923\nDate: Oct 5th 2022\nFile name: question4.py\n\"\"\"\n\nimport pandas as pd\n\ndf = pd.read_csv(\n r'G:/Centennial College 3412/Semester 3/COMP 237 - Introduction to Artificial Intelligence/A1_Mosab/cereal.csv')\n\n\ndef create_column():\n df['Healthy vs Unhealthy'] = 'Unhealthy'\n\n\ndef categorize_cereals():\n create_column()\n for i in df.index:\n if int(df['Calories'][i]) < 100:\n if int(df['Sodium'][i]) < 150:\n if int(df['Sugars'][i]) < 9:\n if int(df['Dietary Fiber'][i]) > 3:\n if int(df['Protein (g)'][i]) > 2:\n df['Healthy vs Unhealthy'][i] = \"Healthy\"\n print(df.to_string())\n\n\ncategorize_cereals()\n# print(df.to_string())\n","repo_name":"GarnettGrant/C237AI","sub_path":"A1_Comp237/question4.py","file_name":"question4.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"1739229180","text":"from itertools import combinations\n\nimport numpy as np\nfrom sklearn.utils.validation import (as_float_array, assert_all_finite,\n check_X_y, column_or_1d, indexable)\n\n\ndef group_by(X, y, *, category_orders=None, operation=lambda x: x.mean(axis=0)):\n \"\"\"Groups the samples in X by labels in y and applies `operation`\n to the aggregated groups.\n\n Parameters\n __________\n X: array-like of shape (n_samples, n_features)\n The data matrix.\n y: array-like of shape (n_samples,)\n The class labels.\n category_orders: array-like of shape (np.unique(y).size,)\n Order of class labels to use when constructing the matrix.\n If None, will sort the class labels alphabetically.\n operation: callable\n The function to apply to the aggregated groups.\n \"\"\"\n X, y = check_X_y(X, y, accept_sparse=[\"csr\"])\n X = indexable(X)[0]\n\n if category_orders is None:\n category_orders = np.unique(y)\n elif not set(category_orders).issubset(set(y)):\n # To avoid getting nan values\n raise ValueError(\"Found categories not present in `y`.\")\n else:\n category_orders = column_or_1d(category_orders)\n\n if not callable(operation):\n raise ValueError(\"Please pass a callable operation.\")\n\n M = np.zeros((len(category_orders), X.shape[1]))\n\n for i, category in enumerate(category_orders):\n _agg_values = operation(X[y == category])\n _agg_values = as_float_array(_agg_values).flatten()\n if len(_agg_values) != X.shape[1]:\n raise ValueError(\n \"Operation must return a vector of size X.shape[1]\"\n f\"but instead found vector of size {len(_agg_values)}.\"\n )\n assert_all_finite(_agg_values)\n M[i] = _agg_values\n\n return M\n\n\ndef pairwise_differences(\n X, y,\n *,\n classes=None,\n ordered=False,\n operation=lambda x: x.mean(axis=0)):\n \"\"\"\n Given an data matrix X, if ordered is False, construct a matrix P of shape\n (n * (n-1) / 2, X.shape[1]) where n is the number of classes in y.\n The (i*j, g) entry of P corresponds to the average expression of feature g\n in group i - average expression of feature g in group j, in absolute value.\n If ordered is True, the shape of P will be (n * (n-1), X.shape[1]) and\n the pairwise distances will be clipped at 0.\n\n Returns P and a dictionary of mappings: label, label -> index.\n\n Parameters\n _________\n X: np.ndarray of shape (n_samples, n_features)\n y: np.ndarray of shape (n_samples,)\n classes: np.ndarray or None, unique class labels in y\n ordered: bool, if True will construct a matrix of ordered\n pairwise differences. In this case the shape of P is\n (n * (n-1), X.shape[1]).\n operation: callable, operation to use when constructing the class vector.\n \"\"\"\n if classes is None:\n classes = np.unique(y)\n\n n_classes = len(classes)\n # All pairwise combinations\n n_class_pairs = n_classes * (n_classes - 1) // 2\n\n # Cache the average vector of each class\n class_averages = group_by(\n X, y, category_orders=classes, operation=operation)\n\n # Compute the actual pairwise differences\n P = np.zeros((n_class_pairs * (1 if not ordered else 2), X.shape[1]))\n index_to_pair_dict = {}\n\n # Make sure to use range(n_classes) when indexing instead of classes,\n # to allow for arbitrary class labels.\n for index, (i, j) in enumerate(combinations(range(n_classes), 2)):\n difference = class_averages[i] - class_averages[j]\n if ordered:\n # Clip negative values to 0\n # Assign i - j to index and j - i to index + n_class_pairs\n P[index] = np.clip(difference, 0, None)\n index_to_pair_dict[index] = (i, j)\n P[index + n_class_pairs] = np.clip(-difference, 0, None)\n index_to_pair_dict[index + n_class_pairs] = (j, i)\n else:\n P[index] = np.abs(difference)\n index_to_pair_dict[index] = (i, j)\n\n return P, index_to_pair_dict\n","repo_name":"euxhenh/phenotype-cover","sub_path":"src/phenotype_cover/_operations.py","file_name":"_operations.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"66"} +{"seq_id":"26312917250","text":"#Jose Guadarrama\r\n#10/15/2014\r\n\r\n\r\ncount=0\r\n\r\n#Opens the text file\r\nChargAcc = open('charge_accounts.txt', 'r')\r\n\r\n#ask user account number to be input\r\nuserValue=input('Enter Charge Account Number: ')\r\n\r\n#reads the lines from text\r\nlines =ChargAcc.readlines()\r\n\r\nfor i in range(len(lines)):\r\n lines[i] = lines[i].rstrip('\\n')\r\n\r\n\r\nif userValue not in lines:\r\n print('Invalid')\r\nelse:\r\n print('Valid')\r\n\r\n\r\n#close text\r\nChargAcc.close()","repo_name":"SolidDarrama/Python","sub_path":"Ch7 Ex/ch7 ex5.py","file_name":"ch7 ex5.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"35685461049","text":"import LinkedList as ll\nclass returnObject:\n def __init__(self, node, level):\n self.node = node\n self.level = level\n\ndef Kth_to_last_naive(llist, k):\n # Return type: ll.LinkedListNode\n l_size = len(llist)\n x = l_size - k\n node = llist.head\n for i in range(x):\n node = node.next\n return node\n\ndef Kth_to_last_recursive(node, k):\n # Return tpye: returnObject\n if node == None:\n return returnObject(None, -1)\n\n result = Kth_to_last_recursive(node.next, k)\n if result.level == k:\n node = result.node\n return returnObject(node, result.level)\n\n return returnObject(node, result.level + 1)\n\ndef Kth_to_last_runner(llist, k):\n # Return type: ll.LinkedListNode\n current = runner = llist.head\n for i in range(k):\n if runner == None:\n return None\n runner = runner.next\n while runner:\n current = current.next\n runner = runner.next\n return current\n\nif __name__ == \"__main__\":\n llist = ll.LinkedList().generate(10, 0, 40)\n print(llist)\n print(len(llist))\n # print(Kth_to_last_naive(llist, 0))\n print(Kth_to_last_runner(llist, 4))\n","repo_name":"ChingChieh/CTCI","sub_path":"Chapter2/2_2.py","file_name":"2_2.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"18844008440","text":"from .. import events\n\nclass DefaultModelEventSender(object):\n ''' Base class which sends a 'modelChanged' event when an attribute of a\n model changes.\n '''\n def __setattr__(self, name, value):\n old_value = getattr(self, name, '')\n object.__setattr__(self, name, value)\n events.send( 'modelChanged', object = self, attributeName = name, oldAttributeValue = old_value, newAttributeValue = value )\n ","repo_name":"svn2github/wxPython","sub_path":"3rdParty/branches/FloatCanvas/SOC2008_FloatCanvas/floatcanvas2/floatcanvas/models/eventSender.py","file_name":"eventSender.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"66"} +{"seq_id":"3387795824","text":"#!/usr/bin/python\n\n\"\"\" \n Starter code for exploring the Enron dataset (emails + finances);\n loads up the dataset (pickled dict of dicts).\n\n The dataset has the form:\n enron_data[\"LASTNAME FIRSTNAME MIDDLEINITIAL\"] = { features_dict }\n\n {features_dict} is a dictionary of features associated with that person.\n You should explore features_dict as part of the mini-project,\n but here's an example to get you started:\n\n enron_data[\"SKILLING JEFFREY K\"][\"bonus\"] = 5600000\n \n\"\"\"\n\nimport pickle\n\nfile = open(\"./final_project/final_project_dataset.pkl\", \"rb\")\nenron_data = pickle.load(file)\n\n\ndef data_main():\n print(enron_data)\n print(\"number of dicts(person) is \", len(enron_data)) # number of dicts(person):146 person\n print(\"number of values(features) of key is \",\n len(enron_data[\"GLISAN JR BEN F\"])) # number of values(features) of key \"GLISAN JR BEN F\": 21\n # count poi\n cnt = 0\n for person_name in enron_data:\n if enron_data[person_name][\"poi\"] is True:\n cnt = cnt + 1\n print(\"number of poi is \", cnt) # 18\n # in fact, the poi number should be 35\n\n print(\"stock belonging to J P is \", enron_data[\"PRENTICE JAMES\"][\"total_stock_value\"]) # 1095040\n print(\"number of emails sent by W C to poi is \", enron_data[\"COLWELL WESLEY\"]['from_this_person_to_poi']) # 11\n print(\"value of stock options by J S is \", enron_data[\"SKILLING JEFFREY K\"]['exercised_stock_options'])\n # count salary people\n cnt = 0\n for person_name in enron_data:\n if enron_data[person_name][\"salary\"] != 'NaN':\n cnt = cnt + 1\n print(\"number of person have a quantified salary is \", cnt) # 95\n # count email people\n cnt = 0\n for person_name in enron_data:\n if enron_data[person_name][\"email_address\"] != 'NaN':\n cnt = cnt + 1\n print(\"number of person have a email address is \", cnt) # 111\n # count no total payment people\n cnt = 0\n for person_name in enron_data:\n if enron_data[person_name][\"total_payments\"] == 'NaN':\n cnt = cnt + 1\n print(\"number of person have no total payments data is \", cnt) # 21\n print(\"the rate of that is \", round(cnt / 146, 3)) # 21\n # count no total payment people in poi\n cnt_poi = 0\n cnt_NaN = 0\n for person_name in enron_data:\n if enron_data[person_name][\"poi\"] is True:\n cnt_poi = cnt_poi + 1\n if enron_data[person_name][\"total_payments\"] == 'NaN':\n cnt_NaN = cnt_NaN + 1\n print(\"in poi, number of person have no total payments data is \", cnt_NaN) # 0\n print(\"in poi, the rate of that is \", round(cnt_NaN / cnt_poi, 3)) # 0\n","repo_name":"zhiyunl/MLCoursesCode_uda","sub_path":"ud120/datasets_questions/explore_enron_data.py","file_name":"explore_enron_data.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"36951210959","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom django.utils.translation import ugettext_lazy as _\nfrom smartmin.views import SmartListView, SmartReadView, SmartCRUDL\nfrom temba.airtime.models import AirtimeTransfer\nfrom temba.orgs.views import OrgPermsMixin, OrgObjPermsMixin\n\n\nclass AirtimeCRUDL(SmartCRUDL):\n model = AirtimeTransfer\n actions = ('list', 'read')\n\n class List(OrgPermsMixin, SmartListView):\n fields = ('status', 'message', 'amount', 'contact', 'created_on')\n title = _(\"Recent Airtime Transfers\")\n default_order = ('-created_on',)\n field_config = dict(created_on=dict(label=\"Time\"))\n link_fields = ('message',)\n\n def get_status(self, obj):\n return obj.get_status_display()\n\n def derive_queryset(self, **kwargs):\n org = self.derive_org()\n return AirtimeTransfer.objects.filter(org=org)\n\n def get_channel(self, obj): # pragma: needs cover\n if obj.channel:\n return obj.channel\n return \"--\"\n\n def get_context_data(self, **kwargs):\n context = super(AirtimeCRUDL.List, self).get_context_data(**kwargs)\n context['org'] = self.derive_org()\n return context\n\n class Read(OrgObjPermsMixin, SmartReadView):\n title = _(\"Airtime Transfer Details\")\n field_config = dict(created_on=dict(label=\"Time\"))\n\n def get_context_data(self, **kwargs):\n context = super(AirtimeCRUDL.Read, self).get_context_data(**kwargs)\n context['show_logs'] = self.show_logs()\n return context\n\n def show_logs(self):\n org = self.derive_org()\n user = self.request.user\n if not org.is_anon or user.is_superuser or user.is_staff:\n return True\n return False # pragma: needs cover\n\n def derive_fields(self):\n if self.show_logs():\n return ('contact', 'status', 'channel', 'amount', 'message',\n 'recipient', 'denomination', 'created_on')\n\n return ('contact', 'status', 'channel', 'amount', 'message', 'created_on') # pragma: needs cover\n\n def get_status(self, obj):\n return obj.get_status_display()\n\n def derive_queryset(self, **kwargs):\n org = self.derive_org()\n return AirtimeTransfer.objects.filter(org=org)\n\n def get_channel(self, obj):\n if obj.channel:\n return obj.channel # pragma: needs cover\n return \"--\"\n","repo_name":"mekjr1/im3nsa","sub_path":"temba/airtime/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"66"} +{"seq_id":"34252933690","text":"# Advent of Code 2020\n# Day 06\n# Author: irobin591\n\nimport os\nimport doctest\n\nwith open(os.path.join(os.path.dirname(__file__), \"input.txt\"), 'r') as input_file:\n input_data = input_file.read().strip().split('\\n\\n')\n\n\ndef part1(input_data):\n \"\"\"\n >>> part1([\"abc\"])\n 3\n >>> part1([\"a\\\\nb\\\\nc\"])\n 3\n >>> part1([\"ab\\\\nac\"])\n 3\n >>> part1([\"a\\\\na\\\\na\\\\na\"])\n 1\n >>> part1([\"b\"])\n 1\n \"\"\"\n total_count = 0\n for group in input_data:\n ppl = group.split('\\n')\n yes_questions = {}\n for person in ppl:\n for answer in person:\n if not answer in yes_questions:\n yes_questions[answer] = 0\n yes_questions[answer] += 1\n total_count += len(yes_questions)\n return total_count\n\n\ndef part2(input_data):\n \"\"\"\n >>> part2([\"abc\"])\n 3\n >>> part2([\"a\\\\nb\\\\nc\"])\n 0\n >>> part2([\"ab\\\\nac\"])\n 1\n >>> part2([\"a\\\\na\\\\na\\\\na\"])\n 1\n >>> part2([\"b\"])\n 1\n \"\"\"\n total_count = 0\n for group in input_data:\n ppl = group.split('\\n')\n yes_questions = {}\n for person in ppl:\n for answer in person:\n if not answer in yes_questions:\n yes_questions[answer] = 0\n yes_questions[answer] += 1\n total_count += len(list(filter(lambda key: yes_questions[key] == len(ppl), yes_questions)))\n return total_count\n\n\nif __name__ == \"__main__\":\n doctest.testmod()\n print(\"Part One: {}\".format(part1(input_data)))\n print(\"Part Two: {}\".format(part2(input_data)))\n pass","repo_name":"irobin591/advent-of-code","sub_path":"2020/06/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"22326259239","text":"import networkx\nfrom numpy import linspace, ones, array, flipud, sin, outer, pi\nfrom numpy.random import rand, uniform\nfrom scipy.integrate import odeint\n\nfrom .exceptions import DataException\nfrom .decorators import unweighted\n\n\nclass Kuramoto:\n @unweighted\n def __init__(self, graph, time: int = 20, dt: float = 0.01, connectivity=None, phases=None, frequencies=None):\n self.graph = graph\n self.dt = dt\n self.graph_array = networkx.to_numpy_array(self.graph)\n self.nodes_count = self.graph.number_of_nodes()\n self.time = linspace(self.dt, time * self.dt, time)\n self.array_of_ones = ones(self.nodes_count)\n\n self.connectivity = (array(connectivity) if connectivity.__len__() == self.nodes_count else\n DataException('Connectivity must be equal to the number of oscillators').\n raise_this()) if connectivity is not None else uniform(0.1, 10, self.nodes_count)\n\n self.null_theta = (phases if phases.__len__() == self.nodes_count\n else DataException('Phases must be equal to the number of oscillators').raise_this()) \\\n if phases is not None else 2 * pi * rand(self.nodes_count)\n\n self.omega = (frequencies if frequencies.__len__() == self.nodes_count\n else DataException('Frequencies must be equal to the number of oscillators').raise_this()) \\\n if frequencies is not None else uniform(0.9, 1.1, self.nodes_count)\n\n def simulate(self):\n wrapper = odeint(self.theta, self.null_theta, self.time, (self.connectivity, self.omega, self.graph_array))\n return flipud(wrapper.T) % (2 * pi)\n\n def theta(self, null_theta, time, connectivity, omega, graph_array):\n return omega + connectivity / self.nodes_count * \\\n (self.graph_array * sin(outer(self.array_of_ones, null_theta)\n - outer(null_theta, self.array_of_ones))).dot(self.array_of_ones)","repo_name":"smoothbronx/Astromodel","sub_path":"Astromodel/common/utils/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"12236268369","text":"import time\n\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.keys import Keys\n\nfrom amazon_config import (\n get_web_driver_options,\n get_chrome_web_driver,\n set_ignore_certificate_error,\n set_browser_as_incognito,\n BASE_URL\n)\n\n\nclass AmazonAPI:\n def __init__(self, base_url):\n self.base_url = base_url\n options = get_web_driver_options()\n set_ignore_certificate_error(options)\n set_browser_as_incognito(options)\n self.driver = get_chrome_web_driver(options)\n\n def run(self):\n self.driver.get(self.base_url)\n info = self.get_info()\n time.sleep(1)\n self.driver.quit()\n return info\n\n def get_info(self):\n title = self.get_title()\n availability = self.get_availability()\n if title and availability:\n ps5_info = {\n 'title': title,\n 'availability': availability\n }\n return ps5_info\n \n def get_availability(self):\n try:\n return self.driver.find_element_by_id('availability').text\n except Exception as e:\n print(e)\n print('Couldn\\'t find availability info')\n return None\n\n def get_title(self):\n try:\n return self.driver.find_element_by_id('productTitle').text\n except Exception as e:\n print(e)\n print('Couldn\\'t find title')\n return None\n\n\nif __name__ == '__main__':\n print('WORKING....')\n amazon = AmazonAPI(BASE_URL)\n data = amazon.run()\n print(data)\n\n\n","repo_name":"TomSOIreland/AmazonTracker","sub_path":"ps5tracker.py","file_name":"ps5tracker.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"2450182727","text":"repeat = True\r\n\r\ndef yesOrNo():\r\n i = 0\r\n while i < 2:\r\n answer = input(\"Do you want try again? (yes or no)\")\r\n if any(answer.lower() == f for f in [\"yes\", 'y', '1', 'ye']):\r\n return True\r\n elif any(answer.lower() == f for f in ['no', 'n', '0']):\r\n return False\r\n else:\r\n i += 1\r\n if i < 2:\r\n print('Please enter yes or no')\r\n else:\r\n print(\"Nothing done\")\r\n\r\n\r\nwhile repeat:\r\n print(\"This program is checking if the two inputs are Anagram\")\r\n inpText1 = input(\"Enter your first input: \")\r\n inpText2 = input(\"Enter your second input: \")\r\n\r\n temp1 = inpText1.replace(\" \", \"\").lower()\r\n temp2 = inpText2.replace(\" \", \"\").lower()\r\n \r\n if temp1==temp2:\r\n print(True)\r\n else:\r\n print(False)\r\n repeat = yesOrNo()\r\n","repo_name":"Rsentinell/MiNyProjectsPython","sub_path":"Anagram/Anagram.py","file_name":"Anagram.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"8306420475","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom com.mason.redis.constant import const\r\nfrom com.mason.redis_client import redisClient\r\n\r\n\r\n#\r\n#\r\n# # 文章发布\r\n# def post_article(conn, user, title, link):\r\n# article_id = str(conn.incr(\"article:\"))\r\n# voted = \"voted:\" + article_id\r\n# # 自己不能给自己投票\r\n# conn.sadd(voted, user)\r\n# # 设置过期时间\r\n# conn.expire(voted, const.ONE_WEEK_IN_SECONDS)\r\n#\r\n# now = time.time()\r\n# article = \"article:\" + article_id\r\n# print(\"post_article:{article_name}\".format(article_name=article))\r\n# # 存储文章的内容\r\n# conn.hmset(article, {\r\n# \"title\": title,\r\n# \"link\": link,\r\n# \"poster\": user,\r\n# \"time\": now,\r\n# \"votes:\": 1,\r\n# \"n_votes:\": 0\r\n# })\r\n#\r\n# # 设置文章的初始分数--这样岂不是,越靠后发布,分数越高?\r\n# conn.zadd(\"score:\", {article: now + const.VOTE_SCORES})\r\n# conn.zadd(\"time:\", {article: now})\r\n#\r\n# return article_id\r\n#\r\n#\r\n# article_id = post_article(redisClient, \"mason\", \"学习redis\", \"www.baidu.com\")\r\n# print(article_id)\r\n# article = \"article:\" + article_id\r\n# print(article)\r\n# print(redisClient.hgetall(article))\r\n\r\n\r\n# 获取指定页码的文章列表\r\ndef get_articles(conn, page, order=\"score:\"):\r\n start = (page - 1) * const.ARTICLES_PER_PAGE\r\n end = start + const.ARTICLES_PER_PAGE - 1\r\n\r\n # 返回order中,start到end的内容,按score倒序\r\n ids = conn.zrevrange(order, start, end)\r\n articles = []\r\n for article_id in ids:\r\n article_data = conn.hgetall(article_id)\r\n article_data[article_id] = article_id\r\n articles.append(article_data)\r\n\r\n return articles\r\n\r\n\r\narticles = get_articles(redisClient, 1)\r\n\r\nfor article in articles:\r\n print(article)\r\n","repo_name":"MasonEcnu/RedisInAction","sub_path":"com/mason/redis/part_one/chapter01/chapter0132.py","file_name":"chapter0132.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"75253863891","text":"# -----------------------------------------------------------\n# demonstrates how to work with commandline arguments\n# using the argparse module (available from Python 3.2)\n# with an optional argument\n#o\n# (C) 2017 Frank Hofmann, Berlin, Germany\n# Released under GNU Public License (GPL)\n# email frank.hofmann@efho.de\n# -----------------------------------------------------------\n\n# include standard modules\nimport argparse\n\n# initiate the parser\nparser = argparse.ArgumentParser()\n\n# add long and short argument\nparser.add_argument(\"--version\", \"-V\", help=\"show program version\", action=\"store_true\")\n\n# read arguments from the command line\nargs = parser.parse_args()\n\n# check for --version\nif args.version:\n print(\"this is myprogram version 0.1\")\n","repo_name":"hofmannedv/training-python","sub_path":"commandline/arguments-argparse-optional.py","file_name":"arguments-argparse-optional.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"66"} +{"seq_id":"33312431675","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as f\r\nfrom time import time \r\nimport global_v as glv\r\n\r\n\r\ndef psp(inputs, network_config):\r\n shape = inputs.shape\r\n n_steps = network_config['n_steps']\r\n tau_s = network_config['tau_s']\r\n\r\n syn = torch.zeros((shape[0], shape[1], shape[2], shape[3]), dtype=glv.dtype, device=glv.device)\r\n syns = torch.zeros((shape[0], shape[1], shape[2], shape[3], n_steps), dtype=glv.dtype, device=glv.device)\r\n\r\n for t in range(n_steps):\r\n syn = syn - syn / tau_s + inputs[..., t]\r\n syns[..., t] = syn / tau_s\r\n\r\n return syns\r\n\r\n\r\n\r\nclass TPA_back_linear(torch.autograd.Function):\r\n @staticmethod\r\n def forward(ctx, inputs, weights):\r\n temp_inputs = inputs.view(inputs.shape[0], inputs.shape[1] * inputs.shape[2] * inputs.shape[3], inputs.shape[4])\r\n temp_inputs = temp_inputs.transpose(1,2)\r\n outputs = f.linear(temp_inputs, weights)\r\n outputs = outputs.transpose(1,2)\r\n outputs = outputs.view(outputs.shape[0], outputs.shape[1], 1, 1, outputs.shape[2])\r\n\r\n\r\n ctx.save_for_backward(inputs, weights)\r\n return outputs\r\n\r\n def backward(ctx, grad_delta):\r\n (inputs, forward_weights) = ctx.saved_tensors\r\n #weight gradients here\r\n\r\n\r\n temp_inputs = inputs.view(inputs.shape[0], inputs.shape[1] * inputs.shape[2] * inputs.shape[3], inputs.shape[4])\r\n\r\n temp_grads = grad_delta.view(grad_delta.shape[0], grad_delta.shape[1] * grad_delta.shape[2] * grad_delta.shape[3], grad_delta.shape[4])\r\n\r\n weight_grad = torch.einsum('abc, afc -> bf', temp_grads, temp_inputs)\r\n weight_grad = torch.clamp(weight_grad, -100, 100)\r\n return None, weight_grad\r\n\r\n\r\nclass TPA_linear_layer(torch.autograd.Function):\r\n @staticmethod\r\n def forward(ctx, inputs, network_config, layer_config, forward_weights, forward_bias, backward_weights):\r\n temp_inputs = inputs.view(inputs.shape[0], inputs.shape[1] * inputs.shape[2] * inputs.shape[3], inputs.shape[4])\r\n temp_inputs = temp_inputs.transpose(1,2)\r\n outputs = f.linear(temp_inputs, forward_weights)\r\n outputs = outputs.transpose(1,2)\r\n outputs = outputs.view(outputs.shape[0], outputs.shape[1], 1, 1, outputs.shape[2])\r\n\r\n\r\n ctx.save_for_backward(inputs, forward_weights, backward_weights, forward_bias)\r\n return outputs\r\n\r\n @staticmethod\r\n def backward(ctx, grad_delta):\r\n (inputs, forward_weights, backward_weights, forward_bias) = ctx.saved_tensors\r\n #weight gradients ehre\r\n\r\n temp_inputs = inputs.view(inputs.shape[0], inputs.shape[1] * inputs.shape[2] * inputs.shape[3], inputs.shape[4])\r\n\r\n temp_grads = grad_delta.view(grad_delta.shape[0], grad_delta.shape[1] * grad_delta.shape[2] * grad_delta.shape[3], grad_delta.shape[4])\r\n\r\n weight_grad = torch.einsum('abc, afc -> bf', temp_grads, temp_inputs)\r\n\r\n\r\n\r\n outputs = torch.einsum('abc, db -> adc', temp_grads, backward_weights.transpose(0,1))\r\n\r\n outputs = outputs.view(inputs.shape)\r\n outputs = torch.clamp(outputs, -100, 100)\r\n weight_grad = torch.clamp(weight_grad, -100, 100)\r\n return outputs, None, None,weight_grad, None,None\r\n\r\n\r\nclass PSP_spike_large_batch(torch.autograd.Function): \r\n @staticmethod\r\n def forward(ctx, inputs, network_config, layer_config):\r\n shape = inputs.shape\r\n n_steps = network_config['n_steps']\r\n theta_m = 1/network_config['tau_m']\r\n theta_s = 1/network_config['tau_s']\r\n threshold = layer_config['threshold']\r\n\r\n mem = torch.zeros((shape[0], shape[1], shape[2], shape[3]), dtype=glv.dtype, device=glv.device)\r\n syn = torch.zeros((shape[0], shape[1], shape[2], shape[3]), dtype=glv.dtype, device=glv.device)\r\n ref = torch.zeros((shape[0], shape[1], shape[2], shape[3], shape[4]), dtype=glv.dtype, device=glv.device)\r\n delta_refs = torch.zeros((shape[0], shape[1], shape[2], shape[3], shape[4], shape[4]), dtype=glv.dtype, device=glv.device)\r\n est_mem_updates = torch.zeros((shape[0], shape[1], shape[2], shape[3], shape[4]), dtype=glv.dtype, device=glv.device)\r\n spike_dist = torch.zeros((shape[0], shape[1], shape[2], shape[3]), dtype=torch.long, device=glv.device)\r\n # prev_spikes = prev_spikes - 1\r\n gen = torch.arange(shape[4], device=glv.device).repeat((shape[0], shape[1], shape[2], shape[3], 1))\r\n est_delta_u = torch.zeros((shape[0], shape[1], shape[2], shape[3], shape[4]), dtype=glv.dtype, device=glv.device)\r\n mems = []\r\n mem_updates = []\r\n outputs = []\r\n syns_posts = []\r\n for t in range(n_steps):\r\n mem_update = (-theta_m) * mem + inputs[..., t]\r\n delta_ref = (-theta_m) * ref\r\n mem += mem_update\r\n ref += delta_ref\r\n\r\n out = mem > threshold\r\n out = out.type(glv.dtype)\r\n\r\n\r\n #########################################mem update estimation section\r\n\r\n est_mem_updates[...,t] = 1\r\n\r\n # denoms = glv.partial_currents[spike_dist]\r\n adjust = torch.einsum('abcde, abcd -> abcde', est_mem_updates, torch.mul(out,glv.partial_currents[spike_dist]))\r\n # print(adjust.shape)\r\n # est_mem_updates *= (1-out)*(-1)\r\n est_mem_updates = torch.einsum('abcd, abcde -> abcde', (out-1)*(-1), est_mem_updates)\r\n\r\n est_mem_updates = est_mem_updates - (theta_m * est_mem_updates)\r\n \r\n spike_dist = torch.mul(spike_dist + 1, (out -1)*(-1)).long()\r\n\r\n est_delta_u += adjust\r\n\r\n \r\n\r\n ################################\r\n\r\n\r\n\r\n mems.append(mem)\r\n if t > 0:\r\n out_tmp = out.unsqueeze(-1).repeat(1, 1, 1, 1, t)\r\n ref[..., 0:t] *= (1-out_tmp)\r\n delta_ref[..., 0:t] *= out_tmp\r\n ref[..., t] = (-1) * mem * out\r\n delta_refs[..., 0:t, t] = delta_ref[..., 0:t]\r\n\r\n mem = mem * (1-out)\r\n outputs.append(out)\r\n mem_updates.append(mem_update)\r\n\r\n syn = syn + (out - syn) * theta_s\r\n syns_posts.append(syn)\r\n mems = torch.stack(mems, dim = 4)\r\n mem_updates = torch.stack(mem_updates, dim = 4)\r\n syns_posts = torch.stack(syns_posts, dim = 4)\r\n outputs = torch.stack(outputs, dim = 4)\r\n\r\n \r\n\r\n\r\n ##filler options##\r\n \r\n if (network_config[\"tpa_filler\"] == \"low\"):#padd ending as if lowest filler level\r\n est_mem_updates *= glv.partial_currents[-1]/2\r\n est_delta_u += est_mem_updates\r\n elif (network_config[\"tpa_filler\"] == \"avg\"):\r\n # print(\"nah\")\r\n num_spikes = torch.sum(outputs, dim=4)\r\n timediff = shape[-1] - spike_dist\r\n vals = torch.nan_to_num(num_spikes/timediff)\r\n est_mem_updates = torch.einsum('abcde, abcd -> abcde', est_mem_updates, vals)\r\n est_delta_u += est_mem_updates\r\n elif (network_config[\"tpa_filler\"] == \"avg_low\"):\r\n # print(\"nah\")\r\n num_spikes = torch.sum(outputs, dim=4)\r\n timediff = shape[-1] - spike_dist\r\n vals = torch.nan_to_num(num_spikes/timediff, nan=-1 * glv.partial_currents[-1]/2)\r\n est_mem_updates = torch.einsum('abcde, abcd -> abcde', est_mem_updates, vals)\r\n est_delta_u += est_mem_updates\r\n\r\n elif (network_config[\"tpa_filler\"] == \"final_spike\"):\r\n # print(\"??\")\r\n est_mem_updates = torch.einsum('abcde, abcd -> abcde', est_mem_updates, glv.partial_currents[torch.clamp(spike_dist, max=shape[-1]-1)])\r\n est_delta_u += est_mem_updates\r\n elif (network_config[\"tpa_filler\"] == \"empty_diff\"):\r\n est_mem_updates *= -1 * glv.partial_currents[-1]/2\r\n est_delta_u += est_mem_updates\r\n elif (network_config[\"tpa_filler\"] == \"diff_all\"):\r\n negate = torch.ones_like(spike_dist)\r\n negate[spike_dist == shape[-1]] = -1\r\n est_mem_updates = torch.einsum('abcde, abcd -> abcde', est_mem_updates, glv.partial_currents[torch.clamp(spike_dist, max=shape[-1]-1)])\r\n est_mem_updates = torch.einsum('abcde, abcd -> abcde', est_mem_updates, negate)\r\n est_delta_u += est_mem_updates\r\n\r\n ##end filler##\r\n est_delta_u *= threshold\r\n\r\n if(network_config[\"tpa_bg\"]):\r\n est_delta_u = mem_updates\r\n ctx.save_for_backward(mem_updates, outputs, mems, est_delta_u, torch.tensor([threshold]))\r\n return syns_posts, est_delta_u, outputs\r\n\r\n @staticmethod\r\n def backward(ctx, grad_delta, du_grads, dontcare):\r\n (delta_u, outputs, u, est_delta_u, others) = ctx.saved_tensors\r\n start_time = time()\r\n shape = outputs.shape\r\n n_steps = glv.n_steps\r\n threshold = others[0].item()\r\n\r\n mini_batch = shape[0]\r\n partial_a_inter = glv.partial_a.repeat(mini_batch, shape[1], shape[2], shape[3], 1, 1)\r\n grad_a = torch.empty_like(delta_u)\r\n\r\n\r\n for i in range(int(shape[0]/mini_batch)):\r\n partial_a_all = partial_a_inter\r\n\r\n grad_a[i*mini_batch:(i+1)*mini_batch, ...] = torch.einsum('...ij, ...j -> ...i', partial_a_all, grad_delta[i*mini_batch:(i+1)*mini_batch, ...])\r\n\r\n if torch.sum(outputs)/(shape[0] * shape[1] * shape[2] * shape[3] * shape[4]) > 0.1:\r\n partial_u = torch.clamp(1 / est_delta_u, -10, 10) * outputs\r\n grad = grad_a * partial_u\r\n else:\r\n # warm up\r\n a = 0.2\r\n f = torch.clamp((-1 * u + threshold) / a, -8, 8)\r\n f = torch.exp(f)\r\n f = f / ((1 + f) * (1 + f) * a)\r\n\r\n grad = grad_a * f\r\n return grad, None, None, None, None, None, None, None, None\r\n\r\n\r\n","repo_name":"neurips2022submission/neurips2022","sub_path":"functions/TPA.py","file_name":"TPA.py","file_ext":"py","file_size_in_byte":9859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"14335786331","text":"import pyZiagn\n\nTest5 = pyZiagn.uniaxialTensileTest(\n length0=75, Area0=20.73\n) # if uCrosshead lenght0=33\nTest5.Title = 'Test5'\nTest5.TestMachine = 'unibz MTS E0.10 upper'\nTest5.dataSets = ['uCrosshead', 'Force', 't', 'uExtensometer']\nTest5.importTestData('data/5D.txt')\nTest5.disp = -Test5.uExtensometer\nTest5.disp = Test5.uCrosshead\nTest5.changeUnits()\nTest5.plotForceDisp()\n# Test5.cutData(\"disp\", 3.7)\n# Test5.smoothForce()\nTest5.plotForceDisp()\nTest5.calcStressEng()\nTest5.calcStrainEng()\nTest5.plotStressStrainEng()\nTest5.calcStressTrue()\nTest5.calcStrainTrue()\nTest5.plotStressStrainTrue()\nTest5.plotStressStrainEngTrue()\nTest5.calcElasticModulus(strain0=0.0000, strain1=0.001)\nTest5.zeroStrain()\nTest5.calcRP02()\nTest5.calcLinearLimit()\nTest5.calcStressUltimate()\nTest5.calcLength()\nTest5.calcArea()\nTest5.calcBreak()\nTest5.plotStressStrainEngAll()\nprint(Test5.stressUltimate)\nprint(Test5.stressEng[-1])\nprint(Test5.stressRP02)\nprint(Test5.stressLinLimit)\nprint(Test5.YoungsModulus)\n","repo_name":"e-dub/pyZiagn","sub_path":"examples/test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"19403410076","text":"from sqlalchemy import or_, between\n\nfrom database.database import Session\nfrom database import tables\n\n\ndef create_user(user_id, user_name):\n session = Session()\n\n user = tables.User(user_id=user_id, user_name=user_name)\n access = tables.Access(user_id=user_id, access=user_id)\n settings = tables.Settings(user_id=user_id)\n\n try:\n session.add(user)\n session.add(access)\n session.add(settings)\n session.commit()\n except Exception as e:\n session.rollback()\n raise e\n finally:\n session.close()\n\n\ndef is_user_exist(user_id):\n session = Session()\n user = session.query(tables.User).filter(tables.User.user_id == user_id).first()\n session.close()\n if user:\n return user\n else:\n return False\n\n\ndef update_username(user_id, user_name):\n session = Session()\n try:\n session.query(tables.User).filter(\n tables.User.user_id == user_id).update(\n {'user_name': user_name})\n session.commit()\n except Exception as e:\n session.rollback()\n raise e\n finally:\n session.close()\n\n\ndef is_user_exist_by_username(user_name):\n session = Session()\n user = session.query(tables.User).filter(tables.User.user_name == user_name).first()\n session.close()\n if user:\n return user\n else:\n return False\n\n\ndef user_settings(user_id):\n session = Session()\n settings = session.query(tables.Settings).filter(tables.Settings.user_id == user_id).first()\n session.close()\n return settings\n\n\ndef change_settings(user_id, value):\n session = Session()\n try:\n session.query(tables.Settings).filter(\n tables.Settings.user_id == user_id).update(\n {'lite': value})\n session.commit()\n except Exception as e:\n session.rollback()\n raise e\n finally:\n session.close()\n\n\ndef get_users():\n session = Session()\n\n users = session.query(tables.User).all()\n\n session.close()\n\n return users\n\n\ndef delete_user(user_id):\n session = Session()\n session.query(tables.User).filter(tables.User.user_id == user_id).delete()\n session.commit()\n session.close()\n","repo_name":"markneonin/TheShoppingBasketBot","sub_path":"database/db_working/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"42395294687","text":"import os\nfrom pdf2emb_nlp.scraper import DocumentScraper\nfrom pdf2emb_nlp.arrange_text import CorpusGenerator\nfrom PyPDF2 import PdfFileReader\nimport esg_predictor\nfrom pathlib import Path\n\nDATABASE_DIRECTORY = Path(os.getcwd()) / \"database\"\nUPLOAD_DIRECTORY = DATABASE_DIRECTORY / \"upload_files\"\nGENERATE_DIRECTORY = DATABASE_DIRECTORY / \"generate_files\"\nCORPUS_DIRECTORY = DATABASE_DIRECTORY / \"corpus\"\nneg_words = ['coal', 'oil', 'waste', 'metal']\n\ndef get_esg_score(path, filetype='pdf'):\n text = scrape_file(path, filetype)\n score_json = esg_predictor.nlp_model(text)\n return score_json\n\n\ndef scrape_file(path, filetype):\n if filetype == 'pdf':\n text = scrape_pdf(path)\n elif filetype == 'txt':\n with open(path) as f:\n text = f.read()\n else:\n text = \"empty\"\n return text\n\n\ndef scrape_pdf(path):\n path = Path(path)\n file_name = path.name\n txt_name = '.'.join(file_name.split('.')[:-1]) + '.txt'\n txt_path = GENERATE_DIRECTORY / txt_name\n if os.path.exists(txt_path):\n with open(txt_path) as f:\n text = f.read()\n else:\n scraper = DocumentScraper(path)\n df_by_page = scraper.document_corpus_to_pandas_df()\n generator = CorpusGenerator(df_by_page)\n text = generator.df_by_page_to_df_by_sentence()\n\n return text\n\n\n\nif __name__ == '__main__':\n pdfs_folder = UPLOAD_DIRECTORY\n\n path = UPLOAD_DIRECTORY / '2019-Annual-Report.pdf'\n scraper = DocumentScraper(path)\n df_by_page = scraper.document_corpus_to_pandas_df()\n generator = CorpusGenerator(df_by_page)\n text = generator.df_by_page_to_df_by_sentence()\n\n score = get_esg_score(path)\n print(score)\n","repo_name":"AlibiMelis/Outliers_FinTech_CityHack22","sub_path":"backend/file_scraper.py","file_name":"file_scraper.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"552040717","text":"import struct\n\n\ndef is_bmp(filename):\n # if isinstance(filename, bytes):\n # f = open(filename, 'rb')\n # s = filename.read(30)\n s = struct.unpack('ccIIIIIIHH', filename)\n try:\n if s[0] == b'b' and s[1] == b'M':\n print('文件是一个Windows位图,图片大小为%d×%d,颜色数为%d' % (s[6], s[7], s[9]))\n elif s[0] == b'b' and s[1] == b'M':\n print('文件是一个OS/2位图,图片大小为%d×%d,颜色数为%d' % (s[6], s[7], s[9]))\n else:\n print('文件不是一个位图')\n finally:\n print('End')\n # else:\n # print('请输入一个二进制比特数据')\n\n\nt = (b'\\x42\\x4d\\x38\\x8c\\x0a\\x00\\x00\\x00\\x00\\x00\\x36\\x00\\x00\\x00\\x28\\x00\\x00\\x00\\x80\\x02\\x00\\x00\\x68\\x01\\x00\\x00\\x01'\n + b'\\x00\\x18\\x00')\nis_bmp(t)\n","repo_name":"WanliSun415/learnPython3.5","sub_path":"is_bmp.py","file_name":"is_bmp.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"72797545171","text":"\"\"\" Libraries \"\"\"\nimport os\nimport cv2\nimport json\nfrom tqdm import tqdm\n\n\n\"\"\" Parameters \"\"\"\nRESIZE_RATIO = 2\n\n\n\"\"\" Functions \"\"\"\ndef resize_data():\n\n raw_data_dir = \"raw/train_integrate\"\n save_data_dir = f\"raw/train_integrate_resize-{RESIZE_RATIO}\"\n save_data_dir_cubic = f\"{save_data_dir}_CUBIC\"\n save_data_dir_lanczos4 = f\"{save_data_dir}_LANCZOS4\"\n\n os.makedirs(save_data_dir_cubic, exist_ok=True)\n os.makedirs(save_data_dir_lanczos4, exist_ok=True)\n\n for i in tqdm(range(len(os.listdir(raw_data_dir))), desc=f\"Resizing and saving images\", ascii=True):\n img = cv2.imread(f\"{raw_data_dir}/image_{i:05}.jpg\")\n resize_shape = (img.shape[1]*RESIZE_RATIO, img.shape[0]*RESIZE_RATIO)\n cv2.imwrite(f\"{save_data_dir_cubic}/image_{i:05}.jpg\", cv2.resize(img, resize_shape, interpolation=cv2.INTER_CUBIC))\n cv2.imwrite(f\"{save_data_dir_lanczos4}/image_{i:05}.jpg\", cv2.resize(img, resize_shape, interpolation=cv2.INTER_LANCZOS4))\n\n with open(f\"{raw_data_dir}.json\", \"r\", encoding=\"utf-8\") as raw_json:\n\n raw_data = json.load(raw_json)\n images = raw_data[\"images\"]\n annotations = raw_data[\"annotations\"]\n\n raw_data[\"images\"] = [{\n \"id\" : image[\"id\"],\n \"width\" : image[\"width\"]*RESIZE_RATIO,\n \"height\" : image[\"height\"]*RESIZE_RATIO,\n \"license\" : image[\"license\"],\n \"file_name\": image[\"file_name\"],\n } for image in images ]\n raw_data[\"annotations\"] = [{\n \"id\" : annot[\"id\"],\n \"image_id\" : annot[\"image_id\"],\n \"category_id\" : annot[\"category_id\"],\n \"segmentation\": annot[\"segmentation\"],\n \"bbox\" : [ num*RESIZE_RATIO for num in annot[\"bbox\"] ],\n \"area\" : annot[\"area\"]*pow(RESIZE_RATIO, 2),\n \"iscrowd\" : annot[\"iscrowd\"],\n } for annot in annotations ]\n\n with open(f\"{save_data_dir}.json\", \"w\", encoding=\"utf-8\") as resize_json:\n json.dump(raw_data, resize_json, indent=4)\n \n return\n\n\n\"\"\" Execution \"\"\"\nif __name__ == \"__main__\":\n resize_data()","repo_name":"aisu-programming/Literature-Defect-Detection","sub_path":"data/images_resizing.py","file_name":"images_resizing.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"71303957332","text":"\"\"\"\nAutomatic plotting of densities or MOs with vmd.\n\"\"\"\nfrom __future__ import print_function, division\nimport os\nimport sys\nfrom .actions import Action\nfrom colt.lazyimport import LazyImportCreator, LazyImporter\n\n\nwith LazyImportCreator() as importer:\n error_handler = importer.lazy_import_as('..error_handler', 'error_handler')\n theo_header = importer.lazy_import_as('..theo_header', 'theo_header')\n input_options = importer.lazy_import_as('..input_options', 'input_options')\n lib_struc = importer.lazy_import_as('..lib_struc', 'lib_struc')\n lib_file = importer.lazy_import_as('..lib_file', 'lib_file')\n lib_util = importer.lazy_import_as('..lib_util', 'lib_util')\n\n\nclass vmd_options(input_options.write_options):\n def vmd_input(self):\n self.read_yn('Compute volume integrals over cube files for isovalues?', 'do_vol', False)\n self.read_yn('Use special DNTO mode?', 'dnto', False)\n if self['dnto']:\n self['niso'] = 2\n if not self['do_vol']:\n self.read_float('Isovalue for conditional density', 'iso1', 0.003)\n self.read_float('Isovalue for probe density', 'iso2', self['iso1'])\n else:\n self.read_float('Volume integral for conditional density', 'iso1', 0.75)\n self.read_float('Volume integral for probe density', 'iso2', self['iso1'])\n self.read_str('VMD Material for conditional density [AOShiny, EdgyGlass, ...]', 'mat1', 'AOShiny')\n self.read_str('VMD Material for probe density [Glass1, AOEdgy, ...]', 'mat2', 'Glass1')\n else:\n self.read_int('How many isovalues (1 or 2)?', 'niso', 2)\n if not self['do_vol']:\n self.read_float('First isovalue', 'iso1', 0.003)\n else:\n self.read_float('Volume integral for first density', 'iso1', 0.5)\n self.read_str('VMD Material for first density', 'mat1', 'AOShiny')\n if self['niso'] >= 2:\n if not self['do_vol']:\n self.read_float('Second isovalue', 'iso2', self['iso1']/3.)\n else:\n self.read_float('Volume integral for second density', 'iso2', 0.9)\n self.read_str('VMD Material for second density', 'mat2', 'Glass3')\n else:\n self['iso2'] = 100.\n\n self.read_int('Width of images in output html file', 'width', 400)\n self.read_int('Number of columns in the output html file', 'ncol', 4)\n if self.ret_yn('Adjust file names?', False):\n self.read_str('Name of the file used to load data into VMD', 'lfile', 'load_all.vmd')\n self.read_str('Name of the file used to plot in VMD', 'pfile', 'plot_all.vmd')\n self.read_str('Name of the file used to call GIMP convert', 'cfile', 'convert.bash')\n self.read_str('Name of the HTML file with the plots', 'hfile', 'vmd_plots.html')\n else:\n self['lfile'] = 'load_all.vmd'\n self['pfile'] = 'plot_all.vmd'\n self['cfile'] = 'convert.bash'\n self['hfile'] = 'vmd_plots.html'\n\n def mod_pltfiles(self, pltfiles):\n \"\"\"\n Separate plotfiles for DNTO mode.\n \"\"\"\n hfiles = []\n auxhfiles = []\n efiles = []\n auxefiles = []\n for pltf in pltfiles:\n if 'rho_p' in pltf and not 'elec' in pltf:\n hfiles.append(pltf)\n auxhfiles.append(pltf.replace('rho_p', 'rho_h'))\n elif 'rho_h' in pltf and not 'hole' in pltf:\n efiles.append(pltf)\n auxefiles.append(pltf.replace('rho_h', 'rho_p'))\n return hfiles + efiles, auxhfiles + auxefiles\n\n def write_lfile(self, pltfiles, auxfiles=[]):\n \"\"\"\n File for loading data.\n \"\"\"\n lf = open(self['lfile'], 'w')\n lf.write(\"\"\"material change opacity Glass3 0.150000\nmaterial change diffuse Glass3 0.10000\naxes location Off\ndisplay projection Orthographic\ndisplay depthcue off\ncolor Display Background white\nmenu graphics on\nmol modstyle 0 0 Licorice 0.100000 30.000000 30.000000\n\"\"\")\n# material change diffuse Ghost 0.000000\n# material change ambient Ghost 0.300000\n# material change opacity Ghost 0.100000\n# material change shininess Ghost 0.000000\n\n iso1 = 0.001 if self['do_vol'] else self['iso1']\n lf.write(\"\"\"mol addrep 0\nmol addrep 0\nmol modmaterial 1 0 %s\nmol modmaterial 2 0 %s\nmol modstyle 1 0 Isosurface %.5f 0 0 0 1 1\nmol modstyle 2 0 Isosurface -%.5f 0 0 0 1 1\nmol modcolor 1 0 ColorID 0\nmol modcolor 2 0 ColorID 1\n\"\"\"%(self['mat1'], self['mat1'], iso1, iso1))\n\n if self['niso'] >= 2:\n iso2 = 0.001 if self['do_vol'] else self['iso2']\n lf.write(\"\"\"mol addrep 0\nmol addrep 0\nmol modmaterial 3 0 %s\nmol modmaterial 4 0 %s\nmol modstyle 3 0 Isosurface %.5f 0 0 0 1 1\nmol modstyle 4 0 Isosurface -%.5f 0 0 0 1 1\nmol modcolor 3 0 ColorID 0\nmol modcolor 4 0 ColorID 1\n\"\"\"%(self['mat2'], self['mat2'], iso2, iso2))\n\n struc = lib_struc.structure()\n for pltf in pltfiles:\n ftyp = struc.guess_file_type(pltf)\n lf.write(\"mol addfile %s type %s\\n\"%(pltf,ftyp))\n for pltf in auxfiles:\n ftyp = struc.guess_file_type(pltf)\n lf.write(\"mol addfile %s type %s\\n\"%(pltf,ftyp))\n\n lf.close()\n print(\"File %s written.\"%lf.name)\n\n def write_pfile(self, pltfiles, auxfiles=[]):\n \"\"\"\n File used for plotting.\n \"\"\"\n iso1 = self['iso1']\n iso2 = self['iso2']\n pf = open(self['pfile'], 'w')\n for iplt, pltf in enumerate(pltfiles):\n if self['do_vol']:\n isovals = lib_util.cube_file(pltf).ret_isovals([self['iso1'], self['iso2']], lvprt=1)\n iso1 = isovals[0]\n pf.write(\"mol modstyle 1 0 Isosurface %.5f %i 0 0 1 1\\n\"%(iso1, iplt))\n pf.write(\"mol modstyle 2 0 Isosurface -%.5f %i 0 0 1 1\\n\"%(iso1, iplt))\n if self['dnto']:\n if self['do_vol']:\n iso2 = lib_util.cube_file(auxfiles[iplt]).ret_isovals([self['iso2']], lvprt=1)[0]\n pf.write(\"mol modstyle 3 0 Isosurface %.5f %i 0 0 1 1\\n\"%(iso2, iplt + len(pltfiles)))\n pf.write(\"mol modstyle 4 0 Isosurface -%.5f %i 0 0 1 1\\n\"%(iso2, iplt + len(pltfiles)))\n elif self['niso'] >= 2:\n if self['do_vol']:\n iso2 = isovals[1]\n pf.write(\"mol modstyle 3 0 Isosurface %.5f %i 0 0 1 1\\n\"%(iso2, iplt))\n pf.write(\"mol modstyle 4 0 Isosurface -%.5f %i 0 0 1 1\\n\"%(iso2, iplt))\n pf.write(\"render TachyonInternal %s.tga\\n\"%pltf)\n\n pf.close()\n print(\"File %s written.\"%pf.name)\n\n def write_cfile(self, pltfiles):\n \"\"\"\n File for file conversion.\n \"\"\"\n cf = open(self['cfile'], 'w')\n\n cf.write('#!/bin/bash\\n')\n for pltf in pltfiles:\n cf.write(\"convert %s.tga %s.png && \"%(pltf, pltf))\n cf.write(\"rm %s.tga\\n\"%pltf)\n\n cf.close()\n print(\"File %s written.\"%cf.name)\n\n def write_hfile(self, pltfiles):\n \"\"\"\n HTML File.\n \"\"\"\n ho = lib_file.htmlfile(self['hfile'])\n ho.pre('VMD plots')\n\n ht = lib_file.htmltable(ncol=self['ncol'])\n for pltf in pltfiles:\n el = ''%(pltf, self['width'])\n el += '
%s'%pltf\n ht.add_el(el)\n\n ht.close_table()\n ho.write(ht.ret_table())\n\n ho.post(lvprt=1)\n\nclass VMDPlots(Action):\n\n name = 'vmd_plots'\n\n _colt_description = 'Automatic plotting of cube files in VMD'\n\n _user_input = \"\"\"\n # List of cube files (or other format VMD can read)\n pltfiles = :: list(existing_file)\n \"\"\"\n\n _lazy_imports = LazyImporter({\n '..error_handler': 'error_handler',\n '..theo_header': 'theo_header',\n '..input_options': 'input_options',\n '..lib_struc': 'lib_struc',\n '..lib_file': 'lib_file',\n '..lib_util': 'lib_util',\n })\n\n def run(pltfiles):\n theo_header.print_header(title=__class__._colt_description)\n\n print('%i Files analyzed:' % len(pltfiles), end=' ')\n print(\", \".join(os.path.basename(filename) for filename in pltfiles))\n\n vopt = vmd_options('vmd.in')\n vopt.vmd_input()\n auxfiles = []\n if vopt['dnto']:\n pltfiles, auxfiles = vopt.mod_pltfiles(pltfiles)\n\n vopt.write_lfile(pltfiles, auxfiles)\n vopt.write_pfile(pltfiles, auxfiles)\n vopt.write_cfile(pltfiles)\n vopt.write_hfile(pltfiles)\n\n print(\"Converting coordinate file ...\")\n struc = lib_struc.structure()\n try:\n struc.read_file(file_path=pltfiles[0], file_type=None)\n struc.make_coord_file(file_path='coord.xyz',file_type='xyz',lvprt=1)\n except:\n print(\"*** WARNING: The coordinate file coord.xyz could not be created. ***\")\n print(\" Please create this file yourself.\\n\\n\")\n\n print(\"\"\"\nFiles created. Now do the following:\n1. vmd coord.xyz\n2. File - Load Visualization State - %s\n3. Adjust the perspective\n4. File - Load Visualization State - %s\n5. bash %s\n6. Open in browser: %s\n\"\"\"%(vopt['lfile'], vopt['pfile'], vopt['cfile'], vopt['hfile']))\n","repo_name":"felixplasser/theodore-qc","sub_path":"theodore/actions/vmd_plots.py","file_name":"vmd_plots.py","file_ext":"py","file_size_in_byte":9388,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"66"} +{"seq_id":"40874234330","text":"#!/usr/bin/env python\n\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QDialog, QLabel, QListWidgetItem\nfrom PyQt5.QtCore import QCoreApplication, Qt, QTimer\nfrom PyQt5.uic import loadUi\nfrom PyQt5 import QtCore, QtWidgets, uic\nimport time\nimport requests\nimport threading\nimport RPi.GPIO as GPIO\nfrom mfrc522 import SimpleMFRC522\n\nGPIO.setwarnings(False)\n\nreader = SimpleMFRC522()\n\nurl_get_cnt = \"http://203.253.128.177:7579/Mobius/20191546?fu=1&ty=3&lim=20\"\nurl_post_personcheck = \"http://203.253.128.177:7579/Mobius/20191546/personcheck\"\ndata_post = {\"m2m:cin\": {\"con\": \"00\"}}\nsearch = \"Mobius/20191546\"\n\nheaders ={\n 'Accept': 'application/json',\n 'X-M2M-RI': '12345',\n 'X-M2M-Origin': 'SluN3OkDey-',\n 'Content-Type': 'application/vnd.onem2m-res+json; ty=4'\n}\n\nclass paydialog_2(QMainWindow): #옵션ui\n def __init__(self):\n super().__init__()\n loadUi('payform_2.ui', self)\n self.check_button_2.hide()\n\nclass OptionDialog(QMainWindow): #옵션ui\n def __init__(self):\n super().__init__()\n loadUi('optionn.ui', self)\n \n check_labels = [self.check_label_2, self.check_label_3, self.check_label_4, self.check_label_6, self.check_label_7]\n for check_label in check_labels:\n check_label.hide()\n self.price_label_2.setText('0')\n \n self.hot_button.clicked.connect(self.hotbutton)\n self.cold_button.clicked.connect(self.coldbutton)\n self.shot_button.clicked.connect(self.shotbutton)\n self.ice_button.clicked.connect(self.icebutton)\n self.s_button.clicked.connect(self.sbutton)\n self.m_button.clicked.connect(self.mbutton)\n self.l_button.clicked.connect(self.lbutton)\n self.cancel_button_2.clicked.connect(self.cancelbutton)\n \n def cancelbutton(self): #옵션화면 초기화\n check_labels = [self.check_label_2, self.check_label_3, self.check_label_4, self.check_label_6, self.check_label_7]\n for check_label in check_labels:\n check_label.hide()\n self.check_label_1.show()\n self.check_label_5.show()\n self.price_label_2.setText('0')\n self.hide()\n \n def hotbutton(self): #HOT 버튼 클릭\n if self.check_label_2.isVisible():\n self.check_label_2.hide()\n self.check_label_1.show()\n \n def coldbutton(self): #COLD 버튼 클릭\n if self.check_label_1.isVisible():\n self.check_label_1.hide()\n self.check_label_2.show()\n \n def shotbutton(self): #샷 추가 버튼 클릭\n if self.check_label_3.isVisible():\n self.check_label_3.hide()\n price_label2 = int(self.price_label_2.text())\n self.price_label_2.setText(str(price_label2-500))\n else:\n self.check_label_3.show()\n price_label2 = int(self.price_label_2.text())\n self.price_label_2.setText(str(price_label2+500))\n \n def icebutton(self): #얼음 추가 버튼 클릭\n if self.check_label_4.isVisible():\n self.check_label_4.hide()\n else:\n self.check_label_4.show()\n \n def sbutton(self): #Small 버튼 클릭\n if self.check_label_6.isVisible():\n self.check_label_6.hide()\n self.check_label_5.show()\n price_label2 = int(self.price_label_2.text())\n self.price_label_2.setText(str(price_label2-1000))\n elif self.check_label_7.isVisible():\n self.check_label_7.hide()\n self.check_label_5.show()\n price_label2 = int(self.price_label_2.text())\n self.price_label_2.setText(str(price_label2-2000))\n \n def mbutton(self): #Medium 버튼 클릭\n if self.check_label_5.isVisible():\n self.check_label_5.hide()\n self.check_label_6.show()\n price_label2 = int(self.price_label_2.text())\n self.price_label_2.setText(str(price_label2+1000))\n elif self.check_label_7.isVisible():\n self.check_label_7.hide()\n self.check_label_6.show()\n price_label2 = int(self.price_label_2.text())\n self.price_label_2.setText(str(price_label2-1000))\n\n def lbutton(self): #Large 버튼 클릭\n if self.check_label_5.isVisible():\n self.check_label_5.hide()\n self.check_label_7.show()\n price_label2 = int(self.price_label_2.text())\n self.price_label_2.setText(str(price_label2+2000))\n elif self.check_label_6.isVisible():\n self.check_label_6.hide()\n self.check_label_7.show()\n price_label2 = int(self.price_label_2.text())\n self.price_label_2.setText(str(price_label2+1000))\n \n \nclass paydialog(QMainWindow): #결제화면ui\n def __init__(self):\n super().__init__()\n loadUi('payform.ui',self)\n \n\nclass MyWidget(QMainWindow): #메인화면ui\n def __init__(self):\n super().__init__()\n \n self.setWindowFlag(QtCore.Qt.FramelessWindowHint)\n loadUi('untitled.ui', self)\n \n self.testbutton.hide()\n self.lineEdit.hide()\n self.testlabel.hide()\n \n\n self.option_dialog = OptionDialog() #옵션ui 가져오기\n self.option_dialog.hide() #옵션ui 숨기기\n \n self.pay_dialog = paydialog() #결제화면ui 가져오기\n self.pay_dialog.hide() #결제화면ui 숨기기\n\n self.pay_dialog_2 = paydialog_2() #결제화면2 ui 가져오기\n self.pay_dialog_2.hide() #결제화면2 ui 가져오기\n\n self.button1.clicked.connect(lambda: self.show_option_dialog(1)) #1번 메뉴 클릭\n self.button2.clicked.connect(lambda: self.show_option_dialog(2)) #2번 메뉴 클릭\n self.button3.clicked.connect(lambda: self.show_option_dialog(3)) #3번 메뉴 클릭\n \n self.wcbutton.clicked.connect(self.disable_wcbutton) #처음 화면 클릭\n\n self.button1.clicked.connect(lambda: self.increment_num(1)) #1번메뉴 클릭\n self.button2.clicked.connect(lambda: self.increment_num(2)) #2번메뉴 클릭\n self.button3.clicked.connect(lambda: self.increment_num(3)) #3번메뉴 클릭\n \n self.plusbutton_1.clicked.connect(lambda: self.increment_num(1)) # 1번 +버튼 클릭\n self.plusbutton_2.clicked.connect(lambda: self.increment_num(2)) # 2번 +버튼 클릭\n self.plusbutton_3.clicked.connect(lambda: self.increment_num(3)) # 3번 +버튼 클릭\n \n self.minusbutton_1.clicked.connect(lambda: self.decrement_num(1)) # 1번 -버튼 클릭\n self.minusbutton_2.clicked.connect(lambda: self.decrement_num(2)) # 2번 -버튼 클릭\n self.minusbutton_3.clicked.connect(lambda: self.decrement_num(3)) # 3번 -버튼 클릭\n\n self.cancel_button.clicked.connect(self.cancel) # \"취소하기\" 버튼 클릭\n\n self.pay_button.clicked.connect(self.pay_check) # \"결제하기\" 버튼 클릭\n self.pay_button.clicked.connect(self.open_payform) # \"결제하기\" 버튼 클릭\n\n self.testbutton.clicked.connect(self.test1) # mobius 테스트 버튼\n\n self.quitbutton.clicked.connect(QCoreApplication.instance().quit) #강제종료 버튼\n self.pay_dialog.cardpay_button.clicked.connect(self.card_pay) #카드 결제 클릭\n self.pay_dialog.cashpay_button.clicked.connect(self.cash_pay) #현금 결제 클릭\n self.pay_dialog_2.check_button.clicked.connect(self.check_button) #결제 후 확인 버튼 클릭\n \n \n self.option_dialog.pay_button_2.clicked.connect(self.option_price)\n \n \n def option_price(self): #옵션가격 확인 및 주문내역 출력\n option_text = self.option_dialog.coffee_name.text() + \" / \"\n for number in range(1, 3): \n check_label = getattr(self.option_dialog, f\"check_label_{number}\")\n option = getattr(self.option_dialog, f\"option_{number}\")\n\n if check_label.isVisible(): #체크되어있는 항목만 출력\n text = option.text()\n option_text += text + \" / \"\n \n option_text += \"\\n\"\n \n for number in range(3, 7):\n check_label = getattr(self.option_dialog, f\"check_label_{number}\")\n option = getattr(self.option_dialog, f\"option_{number}\")\n\n if check_label.isVisible():\n text = option.text()\n option_text += text + \" / \"\n \n item = QListWidgetItem()\n item.setText(option_text)\n item.setTextAlignment(Qt.AlignCenter) # 가운데 정렬\n \n option_pay = self.option_dialog.price_label_2.text()\n item_text = option_text + option_pay\n item.setText(item_text)\n \n self.order_list.addItem(item) #리스트위젯에 옵션값 입력\n self.option_dialog.cancelbutton()\n \n \n def check_button(self): #결제 완료 후 확인버튼\n self.pay_dialog.hide()\n self.pay_dialog_2.hide()\n self.cancel()\n \n r_read = requests.get(url_post_personcheck+\"/la\", headers=headers)\n r_read.raise_for_status()\n data = r_read.json()\n dataread = data[\"m2m:cin\"][\"con\"]\n if dataread==\"1\": #Mobius에서 값을 받아옴, PRI센서와 초음파센서가 작동하면 1\n self.wcbutton.setVisible(True) #초기 화면 나타남\n self.wcbutton.setEnabled(True)\n self.wcbutton.raise_() \n \n\n \n \n def card_pay_2(self): #카드 결제\n id,text=reader.read()\n self.pay_dialog.label_5.setText(str(id))\n \n r_read = requests.get(url_get_cnt, headers=headers)\n r_read.raise_for_status()\n data = r_read.json()\n id, text = reader.read()\n findcnt=search+\"/\"+str(id)\n if findcnt in data ['m2m:uril']:\n url_money = \"http://203.253.128.177:7579/Mobius/20191546/\" + str(id)\n money = requests.get(url_money+\"/la\", headers=headers)\n money.raise_for_status()\n dataa = money.json()\n money_value = dataa[\"m2m:cin\"][\"con\"]\n money_value_2 = self.price_label.text()\n \n if int(money_value) >= int(money_value_2) : #카드잔액>=결제금액 조건이 성립하면 실행\n money_value_3 = int(money_value) - int(money_value_2)\n self.testlabel.setText(str(money_value_3))\n \n money_data = {\n \"m2m:cin\" : {\n \"con\" : str(money_value_3)\n }\n }\n \n money_post = requests.post(url_money,headers = headers, json = money_data)\n \n else: #카드잔액이 부족할 경우\n self.testlabel.setText(\"no money\")\n \n \n requests.post(url_post_personcheck, headers = headers, json = data_post)\n self.pay_dialog_2.check_button_2.hide()\n self.pay_dialog_2.check_button.show()\n else:\n asd = \"invalid card\"\n self.pay_dialog.label_5.setText(str(asd))\n\n\n def card_pay (self): #카드결제 버튼 클릭 시\n def card_pay_thread():\n self.card_pay_2()\n \n self.pay_dialog_2.show()\n self.pay_dialog_2.check_button.hide()\n self.pay_dialog_2.check_button_2.show()\n self.pay_dialog_2.label1.setText(\"카드 결제\")\n self.pay_dialog_2.label2.setText(\"카드를 리더기에 접촉시켜 주세요\")\n self.pay_dialog_2.label3.setText(\"결제가 완료될 때 까지 카드를 떼지 마세요!\")\n \n thread = threading.Thread (target = card_pay_thread)\n thread.start()\n \n \n \n #QMessageBox.information(self.pay_dialog, '카드 결제', '카드를 리더기에 접촉시켜 주세요')\n \n \n \n \n \n \n def cash_pay(self):\n self.pay_dialog_2.show()\n self.pay_dialog_2.label1.setText(\"현금 결제\")\n self.pay_dialog_2.label2.setText(\"현금 결제는 카운터에서 가능합니다\")\n self.pay_dialog_2.label3.setText(\"카운터로 이동해주세요\")\n \n \n def cancel(self): #취소버튼\n self.price_label.setText('0') \n self.num_label.setText('0')\n self.count_label_1.setText('0')\n self.count_label_2.setText('0')\n self.count_label_3.setText('0')\n self.order_list.clear()\n #각 메뉴 갯수 및 주문수량, 주문금액 버튼값 0으로 설정\n\n\n def test1 (self): #mobius 테스트\n r = requests.get(url_get, headers = headers)\n r.raise_for_status()\n jr = r.json()\n self.testlabel.setText(jr[\"m2m:cin\"][\"con\"])\n \n \n \n def pay_check(self): #결제버튼\n pay_values = [int(self.count_label_1.text()), int(self.count_label_2.text()), int(self.count_label_3.text())] #메뉴 개수\n \n price_value = self.price_label.text() #주문금액 값\n num_value = self.num_label.text() #주문수량 값\n \n self.pay_dialog.label_6.setText(\"주문 금액 \"+str(price_value)+\"원\")\n self.pay_dialog.label_7.setText(\"주문 수량 \"+str(num_value)+\"개\")\n \n #data = {\"m2m:cin\": {\"con\" : price_value}} #mobius에 값 전송\n\n #r = requests.post(url_post, headers=headers, json=data)\n\n\n def increment_num(self,index): # +버튼\n if index == 1:\n count_label = self.count_label_1\n price_value = 3000 #1번메뉴 가격\n elif index == 2:\n count_label = self.count_label_2\n price_value = 4000 #2번메뉴 가격\n elif index == 3:\n count_label = self.count_label_3\n price_value = 5000 #3번메뉴 가격\n else:\n return\n\n current_value = int(count_label.text()) \n count_label.setText(str(current_value + 1)) #메뉴 개수 증가\n self.num_label_update() #주문수량 업데이트 함수로 이동\n \n current_price = int(self.price_label.text()) \n self.price_label.setText(str(current_price + price_value)) #주문금액에 추가된 메뉴가격만큼 증가\n \n def decrement_num(self,index): # -버튼\n if index == 1:\n count_label = self.count_label_1\n price_value = 3000\n elif index == 2:\n count_label = self.count_label_2\n price_value = 4000\n elif index == 3:\n count_label = self.count_label_3\n price_value = 5000\n else:\n return\n\n current_value = int(count_label.text())\n\n if current_value>0: #개수가 0이하일 경우 동작하지 않음\n count_label.setText(str(current_value - 1))\n self.num_label_update()\n \n current_price = int(self.price_label.text())\n self.price_label.setText(str(current_price - price_value))\n \n \n def num_label_update(self): #주문수량 업데이트\n count_1 = int(self.count_label_1.text())\n count_2 = int(self.count_label_2.text())\n count_3 = int(self.count_label_3.text())\n\n total_count = count_1 + count_2 + count_3 #1번,2번,3번메뉴 수량 합침\n\n self.num_label.setText(str(total_count))\n \n\n def show_option_dialog(self, index): #옵션ui 불러오기\n if index == 1:\n self.option_dialog.coffee1.show()\n self.option_dialog.coffee2.hide()\n self.option_dialog.coffee3.hide()\n self.option_dialog.coffee_name.setText(\"아메리카노\")\n self.option_dialog.coffee_price.setText(\"₩ 3,000\")\n elif index == 2:\n self.option_dialog.coffee1.hide()\n self.option_dialog.coffee2.show()\n self.option_dialog.coffee3.hide()\n self.option_dialog.coffee_name.setText(\"카푸치노\")\n self.option_dialog.coffee_price.setText(\"₩ 4,000\")\n elif index == 3:\n self.option_dialog.coffee1.hide()\n self.option_dialog.coffee2.hide()\n self.option_dialog.coffee3.show()\n self.option_dialog.coffee_name.setText(\"카라멜 마키아또\")\n self.option_dialog.coffee_price.setText(\"₩ 5,000\")\n \n self.option_dialog.show() \n \n\n def disable_wcbutton(self): #첫 화면 숨김 및 비활성화\n self.wcbutton.setEnabled(False)\n self.wcbutton.setVisible(False)\n self.wcbutton.lower()\n \n def open_payform(self): #결제화면ui 불러오기\n self.pay_dialog.show()\n self.pay_dialog.label_4.returnPressed.connect(self.value_in) #엔터 입력시 결제화면ui값 계산 함수로 이동\n \n \n \n def value_in(self): #결제화면ui 값 계산\n text = self.pay_dialog.label_4.text()\n if text.isdigit(): #결제화면 label에 값이 있을 경우 실행 \n value=int(text)\n if value >= 0:\n self.pay_dialog.hide() \n self.lineEdit.setText(str(int(self.pay_dialog.label_4.text()) - int(self.price_label.text()))) #메인화면에 카드잔액-결제금액 값 표시\n self.pay_dialog.label_4.clear() #결제화면ui에 있는 값 초기화\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n widget = MyWidget()\n widget.show()\n sys.exit(app.exec_())\n","repo_name":"PCY00/IoT_PlatForm","sub_path":"Project/Final/GUI/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":17547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"1974507336","text":"input_data= input()\nrow=int(input_data[1])\ncolumn=int(ord(input_data[0]))-int(ord('a'))+1\n\ncount=0\n\nstep=[(-2,-1),(-2,1),(2,-1),(2,1),(-1,-2),(1,-2),(-1,2),(1,2)]\n\nfor i in step:\n next_row= row + i[0]\n next_column= column + i[1]\n if next_row>=1 and next_row<=8 and next_column>=1 and next_column<=8:\n count+=1\nprint(count)\n \n","repo_name":"ISANGDEV/Algorithm_Study","sub_path":"2_Simulation/hanseong/implementation -knight.py","file_name":"implementation -knight.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"6607604704","text":"#Dieses Script nimmt automatische Annotationen im TEI XML Standard vor.\n#Autor: Benjamin Spendrin\n#Email: benjamin.spendrin@posteo.de\n\n#ToDo\n#Was passiert, wenn erst PauseN und dann Pause annotiert werden?\n#Dateipfade eingeben statt hard codiert\n#Sanity Checks --> Gucken, dass Tag nicht schon vorhanden\n#Ausgabe, wenn sowas passiert?\n#Fehlerausgaben\n#Ganz weit weg: simple GUI, mit Eingabe der Tags?\n\n###########################################################\n#Backups anlegen\ndef BackupXML(ListFilename):\n import shutil #Bibliothek zum Dateien kopieren\n fileList = open(ListFilename, 'r').read().splitlines()\n for line in fileList:\n shutil.copyfile((line+\".xml\"),(line+\".xml.backup\"))\n###########################################################\n\n###########################################################\n#Backups einspielen (nur fürs Testen)\ndef restoreXMLBackups(ListFilename):\n import shutil #Bibliothek zum Dateien kopieren\n fileList = open(ListFilename, 'r').read().splitlines()\n for line in fileList:\n shutil.copyfile((line+\".xml.backup\"),(line+\".xml\"))\n###########################################################\n\n###########################################################\n#Einlesen von Tags und Wörtern, die getaggt werden sollen\ndef readAnnotationData(WordListPath):\n listOfWords = open(WordListPath, 'r').read().splitlines()\n###########################################################\n\n###########################################################\n###########################################################\n###########################################################\n\nimport re #Library für RegEx\nimport os\n\n\n#Pfade für Dateien\nworkingDir = \"./\"\nListFilename = \"./fileList.txt\"\nWordListPath = \"./wordList.txt\"\nSpeicherortPath=\"./annotierteDateien/\"\n#Nur im Testen: Backups wiederherstellen (später Backups anlegen!)\n#restoreXMLBackups(ListFilename)\n#BackupXML(ListFilename)\n\n#Gehe alle Dateien aus der fileList durch\nfileList = open(ListFilename, 'r').read().splitlines()\n#print(fileList)\n\nfor currFile in fileList:\n\n # Aktuelle Datei einlesen\n with open((workingDir+currFile+\".xml\"), 'r') as file :\n filedata = file.read()\n print(\"Opening File: \" + currFile + \".xml\")\n\n # Zeichenkette ersetzen\n ### Einlesen von Tags und Wörtern\n wordList = open(WordListPath, 'r').read().splitlines()\n\n #Gehe alle Wörter durch\n for currWord in wordList:\n #Wenn aktuelles Wort mit # beginnt, setze aktuelles Tag darauf\n if currWord.startswith(\"#\"):\n currTag = currWord[1:] #Entferne das # (=den 1. char des strings)\n print(\"Current Tag: \" + currTag)\n else:\n #Annotiert wird:\n #Wenn das Wort von Leerzeichen angeführt und gefolgt wird\n #a) von einem Leerzeichen\n filedata = filedata.replace(\" \" + currWord + \" \", (\" \"+ currWord + \" \"))\n #b) von einem Komma\n filedata = filedata.replace(\" \" + currWord + \",\", (\" \"+ currWord + \",\"))\n #c) von einem Punkt (= Satzende)\n filedata = filedata.replace(\" \" + currWord + \".\", (\" \"+ currWord + \".\"))\n #d) von einem Ausrufezeichen\n filedata = filedata.replace(\" \" + currWord + \"!\", (\" \"+ currWord + \"!\"))\n #e) von einem Fragezeichen\n filedata = filedata.replace(\" \" + currWord + \"?\", (\" \"+ currWord + \"?\"))\n\n\n # {\\b\\w*(Paus)\\w*\\b}\n #aktueller RegEx, nach der gesucht werden soll: Optionen r (kein Escape bei \\; f --> Variable kann in {} stehen\n #currRegEx = f'\\b\\w*({currWord})\\w*\\b'\n\n\n #aktueller String, der eingesetzt werden soll (= mit dem Tag)\n #currRepString = \"\"+ currWord + \"\"\n# currRepString = \"\"\n\n# print(\"Current Word: \" + currWord)\n# print(\"CurrRegEx: \" + f'\\b\\w*({currWord})\\w*\\b')\n# print(\"Curr RepString: \\n\" + currRepString)\n #filedata = re.sub(currWord, currRepString, filedata)\n #filedata = re.sub(currRegEx, \"\" + r\"\\1\" + \"<\\term>\", filedata, flags=re.IGNORECASE)\n\n # Datei schreiben\n if os.path.exists(SpeicherortPath) == False:\n os.makedirs(SpeicherortPath)\n\n with open(SpeicherortPath + currFile + \".xml\", \"w\") as file:\n file.write(filedata)\n print(\"Wrote File: \" + SpeicherortPath+currFile+\".xml\")","repo_name":"bspendrin/AnnotationTool","sub_path":"source/deprecated/AnnotationTool.py","file_name":"AnnotationTool.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33637176622","text":"import datetime\nimport enum\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\n\nimport pytest\n\nfrom tests_tags.tags import tags_tools\nfrom tests_tags.tags import yql_tools\n\n_TEST_PROVIDERS = [\n tags_tools.Provider.from_id(1000, True),\n tags_tools.Provider.from_id(1001, False),\n tags_tools.Provider.from_id(1002, False),\n tags_tools.Provider.from_id(1003, True),\n tags_tools.Provider.from_id(1004, True),\n tags_tools.Provider.from_id(1005, True),\n tags_tools.Provider.from_id(1006, True),\n tags_tools.Provider.from_id(1007, True),\n tags_tools.Provider.from_id(1008, False),\n tags_tools.Provider.from_id(1009, False),\n tags_tools.Provider.from_id(1010, False),\n tags_tools.Provider.from_id(1011, False),\n tags_tools.Provider.from_id(1012, True),\n tags_tools.Provider.from_id(1013, True),\n tags_tools.Provider.from_id(1014, False),\n tags_tools.Provider.from_id(1015, False),\n]\n\n_TEST_ENTITIES = [\n tags_tools.Entity(1000, 'license_0'),\n tags_tools.Entity(1001, 'license_1'),\n]\n\n_TEST_TAG_NAMES = [\n tags_tools.TagName(1000, 'tag_0'),\n tags_tools.TagName(1001, 'tag_1'),\n tags_tools.TagName(1002, 'tag_2'),\n tags_tools.TagName(1003, 'tag_3'),\n]\n\n_TEST_SERVICE_NAMES = ['service_test_0', 'service_test_1']\n\n_BASE = 'base'\n_AUDITED = 'audited'\n_UNIQUE_DRIVER_ID = 'udid'\n_INFINITY = datetime.datetime(9999, 12, 31, 23, 59, 59, 999999)\n_NOW = datetime.datetime(2019, 10, 25, 12, 45, 32, 0)\n_EXPIRATION = _NOW + datetime.timedelta(hours=24)\n\n\ndef _tag(\n name_index: int,\n provider_index: int,\n entity_index: int,\n updated: Optional[str] = None,\n ttl: datetime.datetime = _INFINITY,\n):\n return tags_tools.Tag(\n _TEST_TAG_NAMES[name_index].tag_name_id,\n _TEST_PROVIDERS[provider_index].provider_id,\n _TEST_ENTITIES[entity_index].entity_id,\n updated=updated,\n ttl=ttl,\n )\n\n\ndef _search_result(providers: List[tags_tools.Provider]):\n not_sorted = [provider.search_result() for provider in providers]\n return sorted(not_sorted, key=lambda provider: provider.get('id'))\n\n\ndef _insert_service_providers(data: List[Tuple[int, List[str], str]]):\n query = (\n 'INSERT INTO service.providers'\n ' (provider_id, service_names, authority) '\n 'VALUES '\n )\n\n first_value = True\n for row in data:\n if not first_value:\n query += ', '\n services = '{' + ','.join('\\\"' + name + '\\\"' for name in row[1]) + '}'\n query += (\n '({provider_id}, \\'{service_names}\\', \\'{authority}\\')'.format(\n provider_id=row[0], service_names=services, authority=row[2],\n )\n )\n first_value = False\n\n return query\n\n\ndef _insert_yql_provider(\n provider_id: int,\n entity: str,\n enabled: bool = True,\n name: Optional[str] = None,\n author: str = 'author_test',\n last_modifier: str = 'last_modifier_test',\n changed: str = '2018-08-30T12:34:56.0',\n created: str = '2018-08-30T12:34:56.0',\n period: int = 1800,\n query: str = 'USE hahn via SQL;',\n syntax: str = 'SQL',\n):\n if name is None:\n name = 'name_test_' + str(provider_id)\n return yql_tools.insert_queries(\n [\n yql_tools.Query(\n name=name,\n provider_id=provider_id,\n entity_type=entity,\n tags=[],\n author=author,\n last_modifier=last_modifier,\n enabled=enabled,\n changed=changed,\n created=created,\n period=period,\n query=query,\n syntax=syntax,\n ),\n ],\n )\n\n\n@pytest.mark.nofilldb()\n@pytest.mark.pgsql(\n 'tags',\n queries=[\n tags_tools.insert_providers(_TEST_PROVIDERS),\n _insert_service_providers(\n [\n (_TEST_PROVIDERS[0].provider_id, _TEST_SERVICE_NAMES, _BASE),\n (\n _TEST_PROVIDERS[1].provider_id,\n [_TEST_SERVICE_NAMES[0]],\n _AUDITED,\n ),\n (_TEST_PROVIDERS[2].provider_id, _TEST_SERVICE_NAMES, _BASE),\n (\n _TEST_PROVIDERS[3].provider_id,\n _TEST_SERVICE_NAMES,\n _AUDITED,\n ),\n (\n _TEST_PROVIDERS[5].provider_id,\n [_TEST_SERVICE_NAMES[1]],\n _BASE,\n ),\n (\n _TEST_PROVIDERS[6].provider_id,\n _TEST_SERVICE_NAMES,\n _AUDITED,\n ),\n (\n _TEST_PROVIDERS[8].provider_id,\n [_TEST_SERVICE_NAMES[0]],\n _BASE,\n ),\n (\n _TEST_PROVIDERS[11].provider_id,\n _TEST_SERVICE_NAMES,\n _AUDITED,\n ),\n ],\n ),\n _insert_yql_provider(\n _TEST_PROVIDERS[12].provider_id, _UNIQUE_DRIVER_ID,\n ),\n _insert_yql_provider(\n _TEST_PROVIDERS[13].provider_id, _UNIQUE_DRIVER_ID,\n ),\n _insert_yql_provider(\n _TEST_PROVIDERS[14].provider_id, _UNIQUE_DRIVER_ID,\n ),\n _insert_yql_provider(\n _TEST_PROVIDERS[15].provider_id, _UNIQUE_DRIVER_ID,\n ),\n ],\n)\n@pytest.mark.parametrize(\n 'types, only_active, only_verified',\n [\n pytest.param(None, False, None, id='no-types'),\n pytest.param(None, True, False, id='only-active'),\n pytest.param(None, False, True, id='only-verified'),\n pytest.param(['service'], None, False, id='only-service'),\n pytest.param(['service'], None, True, id='only-verified-service'),\n pytest.param(['yql'], None, None, id='only-yql'),\n pytest.param(['manual'], None, None, id='only-manual'),\n pytest.param(\n ['service', 'yql', 'manual'], None, True, id='all-types-verified',\n ),\n pytest.param(['service', 'yql', 'manual'], None, None, id='all-types'),\n ],\n)\n@pytest.mark.parametrize(\n 'name_part, offset, limit',\n [\n pytest.param(None, None, None, id='no-limits'),\n pytest.param(None, 2, 2, id='limits-2'),\n pytest.param('abc', 0, 1, id='one-name'),\n pytest.param('01', 0, 2, id='two-names'),\n ],\n)\nasync def test_search(\n taxi_tags,\n only_active: Optional[bool],\n only_verified: Optional[bool],\n types: List[str],\n name_part: str,\n limit: int,\n offset: int,\n):\n service_providers = {\n _TEST_PROVIDERS[0].provider_id: (_TEST_SERVICE_NAMES, _BASE),\n _TEST_PROVIDERS[1].provider_id: ([_TEST_SERVICE_NAMES[0]], _AUDITED),\n _TEST_PROVIDERS[2].provider_id: (_TEST_SERVICE_NAMES, _BASE),\n _TEST_PROVIDERS[3].provider_id: (_TEST_SERVICE_NAMES, _AUDITED),\n _TEST_PROVIDERS[5].provider_id: ([_TEST_SERVICE_NAMES[1]], _BASE),\n _TEST_PROVIDERS[6].provider_id: (_TEST_SERVICE_NAMES, _AUDITED),\n _TEST_PROVIDERS[8].provider_id: ([_TEST_SERVICE_NAMES[0]], _BASE),\n _TEST_PROVIDERS[11].provider_id: (_TEST_SERVICE_NAMES, _AUDITED),\n }\n yql_providers = frozenset(\n [\n _TEST_PROVIDERS[12].provider_id,\n _TEST_PROVIDERS[13].provider_id,\n _TEST_PROVIDERS[14].provider_id,\n _TEST_PROVIDERS[15].provider_id,\n ],\n )\n\n data: Dict[str, Any] = dict()\n if types:\n data['types'] = types\n if only_active is not None:\n data['only_active'] = only_active\n if name_part is not None:\n data['name_part'] = name_part\n if types and 'service' in types and only_verified is not None:\n data['only_verified'] = only_verified\n if limit is not None:\n data['limit'] = limit\n if offset is not None:\n data['offset'] = offset\n\n response = await taxi_tags.post('v1/providers/search', data)\n assert response.status_code == 200\n\n service_type = 'service' in types if types else False\n yql_type = 'yql' in types if types else False\n manual_type = 'manual' in types if types else False\n result = []\n for provider in _TEST_PROVIDERS:\n provider_id = provider.provider_id\n if only_active and not provider.is_active:\n continue\n if name_part and provider.name.find(name_part) == -1:\n continue\n skip = service_type or yql_type or manual_type\n if (\n service_type\n and provider_id in service_providers\n and (\n not only_verified\n or service_providers[provider_id][1] == _AUDITED\n )\n ):\n skip = False\n if yql_type and provider_id in yql_providers:\n skip = False\n if (\n manual_type\n and provider_id not in service_providers\n and provider_id not in yql_providers\n ):\n skip = False\n if skip:\n continue\n provider_type = 'manual'\n names = None\n authority = None\n if provider_id in service_providers:\n provider_type = 'service'\n names, authority = service_providers[provider_id]\n elif provider_id in yql_providers:\n provider_type = 'yql'\n names = ['name_test_' + str(provider_id)]\n item = {\n 'id': provider.name,\n 'is_active': provider.is_active,\n 'description': provider.desc,\n 'source': {'type': provider_type},\n }\n if names is not None:\n item['source']['names'] = names\n if authority is not None:\n item['source']['authority'] = authority\n result.append(item)\n\n offset = offset or 0\n limit = limit or 200\n data_json = response.json().get('data')\n assert data_json == result[offset : offset + limit]\n\n\n@pytest.mark.nofilldb()\n@pytest.mark.parametrize(\n 'data, expected_code',\n [\n ({'limit': 0, 'offset': 0}, 400),\n ({'limit': -1, 'offset': 10}, 400),\n ({'offset': -1}, 400),\n ({'only_verified': True}, 400),\n ({'only_verified': True, 'types': ['yql', 'manual']}, 400),\n ({'types': []}, 400),\n ({'types': ['type_invalid', 'yql', 'manual']}, 400),\n ({'name_part': ''}, 400),\n ],\n)\nasync def test_search_bad_input(taxi_tags, data, expected_code):\n response = await taxi_tags.post('v1/providers/search', data)\n assert response.status_code == expected_code\n\n\n@pytest.mark.pgsql(\n 'tags',\n queries=[\n tags_tools.insert_providers(_TEST_PROVIDERS[:4]),\n _insert_service_providers(\n [\n (\n _TEST_PROVIDERS[0].provider_id,\n _TEST_SERVICE_NAMES[:1],\n _BASE,\n ),\n (\n _TEST_PROVIDERS[1].provider_id,\n _TEST_SERVICE_NAMES,\n _AUDITED,\n ),\n ],\n ),\n _insert_yql_provider(\n _TEST_PROVIDERS[2].provider_id, _UNIQUE_DRIVER_ID,\n ),\n ],\n)\n@pytest.mark.parametrize('verified', [False, True])\n@pytest.mark.parametrize(\n 'provider_id, data, expected_code, expected_search',\n [\n (None, {}, 400, None),\n ('new_provider', {}, 400, []),\n (\n _TEST_PROVIDERS[0].name,\n {},\n 400,\n [\n {\n 'id': _TEST_PROVIDERS[0].name,\n 'is_active': _TEST_PROVIDERS[0].is_active,\n 'description': _TEST_PROVIDERS[0].desc,\n 'source': {\n 'type': 'service',\n 'names': _TEST_SERVICE_NAMES[:1],\n 'authority': _BASE,\n },\n },\n ],\n ),\n (' invalid_provider_name ', {}, 400, None),\n (\n _TEST_PROVIDERS[0].name,\n {'description': 'overwrite_base_query'},\n 200,\n [\n {\n 'id': _TEST_PROVIDERS[0].name,\n 'is_active': _TEST_PROVIDERS[0].is_active,\n 'description': 'overwrite_base_query',\n 'source': {\n 'type': 'service',\n 'names': _TEST_SERVICE_NAMES[:1],\n 'authority': _BASE,\n },\n },\n ],\n ),\n (\n _TEST_PROVIDERS[0].name,\n {\n 'description': 'overwrite_base_query',\n 'services': _TEST_SERVICE_NAMES,\n },\n 200,\n [\n {\n 'id': _TEST_PROVIDERS[0].name,\n 'is_active': _TEST_PROVIDERS[0].is_active,\n 'description': 'overwrite_base_query',\n 'source': {\n 'type': 'service',\n 'names': _TEST_SERVICE_NAMES,\n 'authority': _BASE,\n },\n },\n ],\n ),\n (\n _TEST_PROVIDERS[2].name,\n {'description': 'overwrite_yql_query'},\n 200,\n [\n {\n 'id': _TEST_PROVIDERS[2].name,\n 'is_active': True,\n 'description': 'overwrite_yql_query',\n 'source': {\n 'type': 'yql',\n 'names': [\n 'name_test_' + str(_TEST_PROVIDERS[2].provider_id),\n ],\n },\n },\n ],\n ),\n (\n _TEST_PROVIDERS[3].name,\n {'description': 'overwrite_manual_query'},\n 200,\n [\n {\n 'id': _TEST_PROVIDERS[3].name,\n 'is_active': True,\n 'description': 'overwrite_manual_query',\n 'source': {'type': 'manual'},\n },\n ],\n ),\n (\n 'new_provider',\n {'description': 'create_manual_query'},\n 200,\n [\n {\n 'id': 'new_provider',\n 'is_active': True,\n 'description': 'create_manual_query',\n 'source': {'type': 'manual'},\n },\n ],\n ),\n (\n 'new_provider',\n {\n 'description': 'create_service_query',\n 'services': _TEST_SERVICE_NAMES,\n },\n 200,\n [\n {\n 'id': 'new_provider',\n 'is_active': True,\n 'description': 'create_service_query',\n 'source': {\n 'type': 'service',\n 'names': _TEST_SERVICE_NAMES,\n 'authority': _BASE,\n },\n },\n ],\n ),\n (\n _TEST_PROVIDERS[2].name,\n {\n 'description': 'overwrite_yql_query',\n 'services': _TEST_SERVICE_NAMES,\n },\n 403,\n [\n {\n 'id': _TEST_PROVIDERS[2].name,\n 'is_active': _TEST_PROVIDERS[2].is_active,\n 'description': _TEST_PROVIDERS[2].desc,\n 'source': {\n 'type': 'yql',\n 'names': [\n 'name_test_' + str(_TEST_PROVIDERS[2].provider_id),\n ],\n },\n },\n ],\n ),\n (\n _TEST_PROVIDERS[3].name,\n {\n 'description': 'overwrite_yql_query',\n 'services': _TEST_SERVICE_NAMES,\n },\n 403,\n [\n {\n 'id': _TEST_PROVIDERS[3].name,\n 'is_active': _TEST_PROVIDERS[3].is_active,\n 'description': _TEST_PROVIDERS[3].desc,\n 'source': {'type': 'manual'},\n },\n ],\n ),\n ],\n)\n@pytest.mark.nofilldb()\nasync def test_providers(\n taxi_tags, verified, provider_id, data, expected_code, expected_search,\n):\n query = 'v1/providers/verified/items' if verified else 'v1/providers/items'\n if provider_id:\n query += '?id=%s' % provider_id\n response = await taxi_tags.put(query, data)\n assert response.status_code == expected_code\n\n if expected_search is not None:\n search_result = await taxi_tags.post(\n 'v1/providers/search', {'name_part': provider_id},\n )\n assert search_result.status_code == 200\n data_json = search_result.json().get('data')\n assert data_json == expected_search\n\n\n@pytest.mark.pgsql(\n 'tags',\n queries=[\n tags_tools.insert_providers(_TEST_PROVIDERS[:1]),\n _insert_service_providers(\n [(_TEST_PROVIDERS[0].provider_id, _TEST_SERVICE_NAMES, _AUDITED)],\n ),\n ],\n)\n@pytest.mark.parametrize(\n 'verified, provider_id, data, expected_code, expected_search',\n [\n (\n False,\n _TEST_PROVIDERS[0].name,\n {'description': 'overwrite_audited_query'},\n 403,\n [\n {\n 'id': _TEST_PROVIDERS[0].name,\n 'is_active': _TEST_PROVIDERS[0].is_active,\n 'description': _TEST_PROVIDERS[0].desc,\n 'source': {\n 'type': 'service',\n 'names': _TEST_SERVICE_NAMES,\n 'authority': _AUDITED,\n },\n },\n ],\n ),\n (\n True,\n _TEST_PROVIDERS[0].name,\n {'description': 'overwrite_audited_query'},\n 200,\n [\n {\n 'id': _TEST_PROVIDERS[0].name,\n 'is_active': True,\n 'description': 'overwrite_audited_query',\n 'source': {\n 'type': 'service',\n 'names': _TEST_SERVICE_NAMES,\n 'authority': _AUDITED,\n },\n },\n ],\n ),\n (\n True,\n _TEST_PROVIDERS[0].name,\n {\n 'description': 'overwrite_audited_query',\n 'services': _TEST_SERVICE_NAMES[:1],\n },\n 200,\n [\n {\n 'id': _TEST_PROVIDERS[0].name,\n 'is_active': True,\n 'description': 'overwrite_audited_query',\n 'source': {\n 'type': 'service',\n 'names': _TEST_SERVICE_NAMES[:1],\n 'authority': _AUDITED,\n },\n },\n ],\n ),\n ],\n)\n@pytest.mark.nofilldb()\nasync def test_audited_providers(\n taxi_tags, verified, provider_id, data, expected_code, expected_search,\n):\n query = 'v1/providers/verified/items' if verified else 'v1/providers/items'\n if provider_id:\n query += '?id=%s' % provider_id\n response = await taxi_tags.put(query, data)\n assert response.status_code == expected_code\n\n search_result = await taxi_tags.post(\n 'v1/providers/search', {'name_part': provider_id},\n )\n assert search_result.status_code == 200\n data_json = search_result.json().get('data')\n assert data_json == expected_search\n\n\ndef _verify_updated_tags(provider_name: str, db, tags_before: Dict):\n provider_id = tags_tools.find_provider_id(provider_name, db)\n assert provider_id\n\n cursor = db.cursor()\n cursor.execute(\n f'SELECT tag_name_id, provider_id, entity_id, '\n 'updated, ttl, revision, entity_type '\n f'FROM state.tags WHERE provider_id={provider_id};',\n )\n\n rows = list(row for row in cursor)\n for row in rows:\n tag_name_id, updated = row[0], row[3]\n if tag_name_id in tags_before:\n _, last_updated = tags_before[tag_name_id]\n assert updated > last_updated\n\n\nasync def _verify_tag_in_cache(\n taxi_tags,\n entity_type: str,\n entity_value: str,\n tag_name: str,\n should_exist: bool,\n):\n data = {'entities': [{'type': entity_type, 'id': entity_value}]}\n response = await taxi_tags.post('v1/match', data)\n\n assert response.status_code == 200\n json = response.json()['entities']\n assert len(json) == 1\n tags = set(json[0]['tags'])\n assert (tag_name in tags) == should_exist\n\n\ndef collect_provider_info(provider_name: str, db):\n provider_id = tags_tools.find_provider_id(provider_name, db, 1)\n cursor = db.cursor()\n cursor.execute(\n 'SELECT t.tag_name_id, p.active, t.updated FROM '\n 'state.tags as t JOIN state.providers as p ON t.provider_id=p.id '\n 'WHERE provider_id={};'.format(provider_id),\n )\n\n rows = list(row for row in cursor)\n data = {row[0]: row[1:] for row in rows}\n return data\n\n\nclass Action(enum.Enum):\n Activate = 1\n Deactivate = 2\n\n\nasync def _check_provider_active(taxi_tags, provider_id: int, activate: bool):\n search_result = await taxi_tags.post('v1/providers/search', {})\n assert search_result.status_code == 200\n\n data_json = search_result.json().get('data')\n for record in data_json:\n if record.get('id') == provider_id:\n assert record.get('is_active') == activate\n return\n assert False, f'expected to find record with id={provider_id}'\n\n\ndef _verify_status_queue_empty(db):\n cursor = db.cursor()\n cursor.execute('SELECT * FROM service.tags_update_queue;')\n rows = list(row for row in cursor)\n assert not rows\n\n\n@pytest.mark.pgsql(\n 'tags',\n queries=[\n tags_tools.insert_providers(_TEST_PROVIDERS[:4]),\n tags_tools.insert_tag_names(_TEST_TAG_NAMES),\n tags_tools.insert_entities([_TEST_ENTITIES[0]]),\n tags_tools.insert_tags(\n [\n tags_tools.Tag(\n _TEST_TAG_NAMES[0].tag_name_id,\n _TEST_PROVIDERS[0].provider_id,\n _TEST_ENTITIES[0].entity_id,\n entity_type=_TEST_ENTITIES[0].type,\n ),\n tags_tools.Tag(\n _TEST_TAG_NAMES[1].tag_name_id,\n _TEST_PROVIDERS[1].provider_id,\n _TEST_ENTITIES[0].entity_id,\n entity_type=_TEST_ENTITIES[0].type,\n ),\n tags_tools.Tag(\n _TEST_TAG_NAMES[2].tag_name_id,\n _TEST_PROVIDERS[2].provider_id,\n _TEST_ENTITIES[0].entity_id,\n entity_type=_TEST_ENTITIES[0].type,\n ),\n tags_tools.Tag(\n _TEST_TAG_NAMES[3].tag_name_id,\n _TEST_PROVIDERS[3].provider_id,\n _TEST_ENTITIES[0].entity_id,\n entity_type=_TEST_ENTITIES[0].type,\n ),\n ],\n ),\n ],\n)\n@pytest.mark.parametrize(\n 'record_index, actions',\n [\n (0, [Action.Activate, Action.Deactivate]),\n (1, [Action.Deactivate, Action.Activate]),\n (2, [Action.Activate]),\n (3, [Action.Activate, Action.Deactivate]),\n ],\n)\nasync def test_providers_activate(\n taxi_tags, taxi_config, record_index, actions, pgsql,\n):\n final_action = actions[-1]\n provider_name = _TEST_PROVIDERS[record_index].name\n tag_name = _TEST_TAG_NAMES[record_index].name\n\n old_data = collect_provider_info(provider_name, pgsql['tags'])\n\n for action in actions:\n is_activate = action == Action.Activate\n query = 'v1/providers/activation_status?id=%s' % provider_name\n response = await taxi_tags.put(query, {'activate': is_activate})\n assert response.status_code == 200\n assert response.json() == {'status': 'ok'}\n await _check_provider_active(taxi_tags, provider_name, is_activate)\n\n await tags_tools.activate_task(taxi_tags, 'tags-updater')\n await tags_tools.activate_task(taxi_tags, 'customs-officer')\n await taxi_tags.invalidate_caches()\n\n _verify_updated_tags(provider_name, pgsql['tags'], old_data)\n await _verify_tag_in_cache(\n taxi_tags,\n 'driver_license',\n 'license_0',\n tag_name,\n final_action == Action.Activate,\n )\n _verify_status_queue_empty(pgsql['tags'])\n\n\n@pytest.mark.pgsql(\n 'tags',\n queries=[\n tags_tools.insert_providers(_TEST_PROVIDERS[:2]),\n _insert_service_providers(\n [\n (\n _TEST_PROVIDERS[0].provider_id,\n _TEST_SERVICE_NAMES[:1],\n _BASE,\n ),\n (\n _TEST_PROVIDERS[1].provider_id,\n _TEST_SERVICE_NAMES,\n _AUDITED,\n ),\n ],\n ),\n ],\n)\n@pytest.mark.parametrize(\n 'provider_name', [_TEST_PROVIDERS[0].name, _TEST_PROVIDERS[1].name],\n)\nasync def test_providers_verify(taxi_tags, provider_name):\n query = '/v1/providers/verify?id=%s' % provider_name\n response = await taxi_tags.put(query)\n assert response.status_code == 200\n\n search_result = await taxi_tags.post(\n 'v1/providers/search',\n {\n 'name_part': provider_name,\n 'types': ['service'],\n 'only_verified': True,\n },\n )\n assert search_result.status_code == 200\n data = search_result.json().get('data')\n provider = next((x for x in data if x.get('id') == provider_name), None)\n assert provider is not None\n\n\n@pytest.mark.parametrize(\n 'provider_id, expected', [(None, 400), ('', 400), ('query_0', 404)],\n)\nasync def test_activate_bad_input(\n taxi_tags, taxi_config, provider_id, expected,\n):\n query = 'v1/providers/activation_status'\n if provider_id:\n query += '?id=%s' % provider_id\n response = await taxi_tags.put(query, {'activate': True})\n assert response.status_code == expected\n\n\n@pytest.mark.nofilldb()\n@pytest.mark.now(_NOW.isoformat())\n@pytest.mark.pgsql(\n 'tags',\n queries=[\n tags_tools.insert_providers(_TEST_PROVIDERS[:2]),\n _insert_service_providers(\n [(_TEST_PROVIDERS[1].provider_id, _TEST_SERVICE_NAMES, _BASE)],\n ),\n ],\n)\n@pytest.mark.parametrize(\n 'provider_id, expected_code, expected_text',\n [\n (None, 400, 'Missing id in query'),\n ('query_0', 404, 'provider with name \"query_0\" was not found'),\n (\n _TEST_PROVIDERS[0].name,\n 404,\n 'type of provider with name \"name_1000\" is not equal to service',\n ),\n ],\n)\nasync def test_verified_bad_input(\n taxi_tags, provider_id, expected_code, expected_text,\n):\n query = '/v1/providers/verify'\n if provider_id:\n query += '?id=%s' % provider_id\n response = await taxi_tags.put(query)\n assert response.status_code == expected_code\n json = response.json()\n assert json == {'code': f'{expected_code}', 'message': expected_text}\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/tests_tags/tags/test_v1_providers.py","file_name":"test_v1_providers.py","file_ext":"py","file_size_in_byte":27582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11539687419","text":"#!/usr/bin/env python3\n# ===- gen_std.py - ------------------------------------------*- python -*--===#\n#\n# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n# See https://llvm.org/LICENSE.txt for license information.\n# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n#\n# ===------------------------------------------------------------------------===#\n\n\"\"\"gen_std.py is a tool to generate a lookup table (from qualified names to\ninclude headers) for C/C++ Standard Library symbols by parsing archived HTML\nfiles from cppreference.\n\nThe generated files are located in clang/include/Tooling/Inclusions.\n\nCaveats and FIXMEs:\n - only symbols directly in \"std\" namespace are added, we should also add std's\n subnamespace symbols (e.g. chrono).\n - symbols with multiple variants or defined in multiple headers aren't added,\n e.g. std::move, std::swap\n\nUsage:\n 1. Install BeautifulSoup dependency, see instruction:\n https://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-beautiful-soup\n 2. Download cppreference offline HTML files (html_book_20220730.zip in Unofficial Release) at\n https://en.cppreference.com/w/Cppreference:Archives\n 3. Unzip the zip file from step 2 (e.g., to a \"cppreference\" directory). You should\n get a \"cppreference/reference\" directory.\n 4. Run the command:\n // Generate C++ symbols\n python3 gen_std.py -cppreference cppreference/reference -symbols=cpp > StdSymbolMap.inc\n // Generate C symbols\n python3 gen_std.py -cppreference cppreference/reference -symbols=c > CSymbolMap.inc\n\"\"\"\n\n\nimport cppreference_parser\nimport argparse\nimport datetime\nimport os\nimport sys\nimport re\n\n\nCODE_PREFIX = \"\"\"\\\n//===-- gen_std.py generated file -------------------------------*- C++ -*-===//\n//\n// Used to build a lookup table (qualified names => include headers) for %s\n// Standard Library symbols.\n//\n// This file was generated automatically by\n// clang/tools/include-mapping/gen_std.py, DO NOT EDIT!\n//\n// Generated from cppreference offline HTML book (modified on %s).\n//===----------------------------------------------------------------------===//\n\"\"\"\n\n\ndef ParseArg():\n parser = argparse.ArgumentParser(description=\"Generate StdGen file\")\n parser.add_argument(\n \"-cppreference\",\n metavar=\"PATH\",\n default=\"\",\n help=\"path to the cppreference offline HTML directory\",\n required=True,\n )\n parser.add_argument(\n \"-symbols\",\n default=\"cpp\",\n help=\"Generate c or cpp (removed) symbols. One of {cpp, c, cpp_removed}.\",\n required=True,\n )\n return parser.parse_args()\n\n\ndef AdditionalHeadersForIOSymbols(symbol):\n # IO-related symbols declared in the header, per C++\n # [iosfwd.syn 31.3.1]:\n iosfwd_symbols = [\n \"basic_ios\",\n \"basic_streambuf\",\n \"basic_istream\",\n \"basic_ostream\",\n \"basic_iostream\",\n \"basic_stringbuf\",\n \"basic_istringstream\",\n \"basic_ostringstream\",\n \"basic_stringstream\",\n \"basic_spanbuf\",\n \"basic_ispanstream\",\n \"basic_ospanstream\",\n \"basic_spanstream\",\n \"basic_filebuf\",\n \"basic_ifstream\",\n \"basic_ofstream\",\n \"basic_fstream\",\n \"basic_syncbuf\",\n \"basic_osyncstream\",\n \"istreambuf_iterator\",\n \"ostreambuf_iterator\",\n \"ios\",\n \"wios\",\n \"streambuf\",\n \"istream\",\n \"ostream\",\n \"iostream\",\n \"stringbuf\",\n \"istringstream\",\n \"ostringstream\",\n \"stringstream\",\n \"spanbuf\",\n \"ispanstream\",\n \"ospanstream\",\n \"spanstream\",\n \"filebuf\",\n \"ifstream\",\n \"ofstream\",\n \"fstream\",\n \"syncbuf\",\n \"osyncstream\",\n \"wstreambuf\",\n \"wistream\",\n \"wostream\",\n \"wiostream\",\n \"wstringbuf\",\n \"wistringstream\",\n \"wostringstream\",\n \"wstringstream\",\n \"wspanbuf\",\n \"wispanstream\",\n \"wospanstream\",\n \"wspanstream\",\n \"wfilebuf\",\n \"wifstream\",\n \"wofstream\",\n \"wfstream\",\n \"wsyncbuf\",\n \"wosyncstream\",\n \"fpos\",\n \"streampos\",\n \"wstreampos\",\n \"u8streampos\",\n \"u16streampos\",\n \"u32streampos\",\n ]\n assert len(symbol.headers) == 1\n sym_header = symbol.headers[0]\n headers = []\n # is preferred than \n\n # is an alternative of , , , .\n # per C++ [iostream.syn 31.4.1]\n if sym_header in [\"\", \"\", \"\", \"\"]:\n headers.append(\"\")\n\n if symbol.name in iosfwd_symbols:\n headers.append(\"\")\n\n return headers\n\n\ndef GetCCompatibilitySymbols(symbol):\n # C++ form of the C standard headers.\n c_compat_headers = {\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n }\n # C++ [support.c.headers.other] 17.14.7\n # ..., behaves as if each name placed in the standard library namespace by\n # the corresponding header is placed within the global namespace\n # scope, except for the functions described in [sf.cmath], the\n # std​::​lerp function overloads ([c.math.lerp]), the declaration of\n # std​::​byte ([cstddef.syn]), and the functions and function templates\n # described in [support.types.byteops].\n exception_symbols = {\n \"(assoc_)?laguerre[f|l]?\",\n \"(assoc_|sph_)?legendre[f|l]?\",\n \"beta[f|l]?\",\n \"(comp_)?ellint_[1-3][f|l]?\",\n \"(cyl_|sph_)?bessel_[i-k][f|l]?\",\n \"(cyl_|sph_)?neumann[f|l]?\",\n \"expint[f|l]?\",\n \"hermite[f|l]?\",\n \"riemann_zeta[f|l]?\",\n \"lerp\",\n \"byte\",\n }\n assert len(symbol.headers) == 1\n header = symbol.headers[0]\n if header not in c_compat_headers:\n return []\n if any(re.fullmatch(x, symbol.name) for x in exception_symbols):\n return []\n\n # Introduce two more entries, both in the global namespace, one using the\n # C++-compat header and another using the C header.\n results = []\n if symbol.namespace != None:\n # avoid printing duplicated entries, for C macros!\n results.append(cppreference_parser.Symbol(symbol.name, None, [header]))\n c_header = \"<\" + header[2:-1] + \".h>\" # => \n results.append(cppreference_parser.Symbol(symbol.name, None, [c_header]))\n return results\n\n\ndef main():\n args = ParseArg()\n if args.symbols == \"cpp\":\n page_root = os.path.join(args.cppreference, \"en\", \"cpp\")\n symbol_index_root = os.path.join(page_root, \"symbol_index\")\n parse_pages = [\n (page_root, \"symbol_index.html\", \"std::\"),\n # std sub-namespace symbols have separated pages.\n # We don't index std literal operators (e.g.\n # std::literals::chrono_literals::operator\"\"d), these symbols can't be\n # accessed by std::.\n #\n # std::placeholders symbols are handled manually in StdSpecialSymbolMap.inc\n (symbol_index_root, \"chrono.html\", \"std::chrono::\"),\n (symbol_index_root, \"execution.html\", \"std::execution::\"),\n (symbol_index_root, \"numbers.html\", \"std::numbers::\"),\n (symbol_index_root, \"filesystem.html\", \"std::filesystem::\"),\n (symbol_index_root, \"pmr.html\", \"std::pmr::\"),\n (symbol_index_root, \"ranges.html\", \"std::ranges::\"),\n\n (symbol_index_root, \"views.html\", \"std::ranges::views::\"),\n # std::ranges::views can be accessed as std::views.\n (symbol_index_root, \"views.html\", \"std::views::\"),\n\n (symbol_index_root, \"regex_constants.html\", \"std::regex_constants::\"),\n (symbol_index_root, \"this_thread.html\", \"std::this_thread::\"),\n # Zombie symbols that were available from the Standard Library, but are\n # removed in the following standards.\n (symbol_index_root, \"zombie_names.html\", \"std::\"),\n (symbol_index_root, \"macro.html\", None),\n ]\n elif args.symbols == \"c\":\n page_root = os.path.join(args.cppreference, \"en\", \"c\")\n symbol_index_root = page_root\n parse_pages = [(page_root, \"index.html\", None)]\n\n if not os.path.exists(symbol_index_root):\n exit(\"Path %s doesn't exist!\" % symbol_index_root)\n\n symbols = cppreference_parser.GetSymbols(parse_pages)\n\n # We don't have version information from the unzipped offline HTML files.\n # so we use the modified time of the symbol_index.html as the version.\n index_page_path = os.path.join(page_root, \"index.html\")\n cppreference_modified_date = datetime.datetime.fromtimestamp(\n os.stat(index_page_path).st_mtime\n ).strftime(\"%Y-%m-%d\")\n print(CODE_PREFIX % (args.symbols.upper(), cppreference_modified_date))\n for symbol in symbols:\n if len(symbol.headers) == 1:\n augmented_symbols = [symbol]\n augmented_symbols.extend(GetCCompatibilitySymbols(symbol))\n for s in augmented_symbols:\n s.headers.extend(AdditionalHeadersForIOSymbols(s))\n for header in s.headers:\n # SYMBOL(unqualified_name, namespace, header)\n print(\"SYMBOL(%s, %s, %s)\" % (s.name, s.namespace, header))\n elif len(symbol.headers) == 0:\n sys.stderr.write(\"No header found for symbol %s\\n\" % symbol.name)\n else:\n # FIXME: support symbols with multiple headers (e.g. std::move).\n sys.stderr.write(\n \"Ambiguous header for symbol %s: %s\\n\"\n % (symbol.name, \", \".join(symbol.headers))\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"llvm/llvm-project","sub_path":"clang/tools/include-mapping/gen_std.py","file_name":"gen_std.py","file_ext":"py","file_size_in_byte":10169,"program_lang":"python","lang":"en","doc_type":"code","stars":22888,"dataset":"github-code","pt":"66"} +{"seq_id":"4222580830","text":"import pyrebase\nimport sys\nimport os\n\nconfig = {\n\t\"authDomain\":\"fileencryption-214d3.firebaseio.com/\",\n\t\"databaseURL\":\"https://fileencryption-214d3.firebaseio.com/\",\n\t\"apiKey\":\"AIzaSyALXFqw4_qtLSaR6mpGnHkAQXRxot2Uuzg\",\n\t\"storageBucket\": \"https://fileencryption-214d3.firebaseio.com/\"\n\n}\n\n\nfirebase = pyrebase.initialize_app(config)\n\ndb = firebase.database() \n\ndef check_available(name):\n\tcheck = db.child(\"public\").get()\n\tcheck = check.val()\n\tfor k, v in check.items():\n\t\tif name in check:\n\t\t\tcheck[name]\n\t\t\treturn False\n\t\treturn True\n\n\ndef new_upload(name,key):\n\t'''\n\tUploads the following as:\n\tName of user|\n\t\t\t\t|_public_key: publickey\n\t\t\t\t|\n\t\t\t\t|_contacts: None (currently as they're a new user) \n\n\t'''\n\tdb.child(\"public\").child(name.lower()).child(\"public_key\").set(str(key))\n\tdb.child(\"public\").child(name.lower()).child(\"contacts\").set(\"\")\n\n\n#def upload_key(name,key):\n\t'''\n\tUploads the following as:\n\tName of user: their public key\n\tkey : value\n\t'''\n#\tdb.child(\"public\").child(name.lower()).set(str(key))\n\t\n\ndef update_last_contacted(user_to_add):\n\t# obtain the username\n\twith open(\"../facial/name.txt\") as file:\n\t\tname = file.read()\n\t# Query firebase for the user information\n\tcontacted = db.child(\"public\").child(name).child(\"contacts\").get()\n\t\n\tif contacted.val() != None:\n\t\tcontacted = contacted.val()\n\t\tif type(contacted) == list:\n\t\t\t# put latest contact at the start\n\t\t\tcontacted.insert(0,user_to_add)\n\t\telse:\n\t\t\ttry:\n\t\t\t\t# Setup turns this to a string whereas a current user will return a list\n\t\t\t\tcontacted = contacted.split(',') \n\t\t\texcept AttributeError:\n\t\t\t\tpass \n\t\t\tcontacted.insert(0,user_to_add) # This makes the latest user the top of the list.\n\t\t\n\t\t# Update firebase with the new addition\n\t\tupdate = db.child(\"public\").child(name.lower()).child(\"contacts\").set(contacted) \n\n\n\n\ndef query_retrieve_key(name):\n\t'''\n\tAll keys are currently stored under public -> name of user -> public key : key\n\t'''\n\tusers = db.child(\"public\").child(name).child(\"public_key\").get()\n\tif users.val() != None:\n\t\treturn( users.val())\n\telse:\n\t\treturn(False)\n\ndef query_retrieve_contacts():\n\t'''\n\tQuery the last three contacted of user\n\t'''\n\twith open(\"../facial/name.txt\") as file:\n\t\tname = file.read()\n\n\tcontacted = db.child(\"public\").child(name).child(\"contacts\").get()\n\t\n\tif contacted.val() != None:\n\t\treturn contacted.val()\n\telse:\n\t\treturn False\n\n\n\ndef query_contact_list():\n\t'''\n\tQuery list of our users\n\t'''\n\n\tusers = db.child(\"public\").get()\n\t\n\tif users.val() != None:\n\t\tfor u in users.val():\n\t\t\treturn u\n\t\n\nif __name__ == '__main__':\n\tquery_retrieve_contacts()","repo_name":"PbernsY/PythonPam","sub_path":"code/facial/fire_data.py","file_name":"fire_data.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"11982230012","text":"import math\n#negative weights included\ndef bellman_ford(vertices,edges,src):\n # array storing distance bw two nodes after relaxing\n distance=[math.inf]*(vertices+1)\n # distance from starting point to starting point\n distance[src]=0\n # run loop for vertices-1 time to find best possible and\n for i in range(vertices-1):\n for edge in edges:\n src,dest,weight=edge[0],edge[1],edge[2]\n if distance[src]+weight 1:\n uris = []\n pls = []\n for uri in args[1:]:\n try:\n uri = GLib.filename_to_uri(uri)\n except:\n pass\n f = Gio.File.new_for_uri(uri)\n if not f.query_exists():\n uri = GLib.filename_to_uri(\n \"%s/%s\" % (GLib.get_current_dir(), uri))\n f = Gio.File.new_for_uri(uri)\n if is_audio(f):\n uris.append(uri)\n elif is_pls(f):\n pls.append(uri)\n else:\n info = f.query_info(Gio.FILE_ATTRIBUTE_STANDARD_TYPE,\n Gio.FileQueryInfoFlags.NONE,\n None)\n if info.get_file_type() == Gio.FileType.DIRECTORY:\n uris.append(uri)\n if pls:\n from gi.repository import TotemPlParser\n parser = TotemPlParser.Parser.new()\n parser.connect(\"entry-parsed\",\n self.__on_entry_parsed, uris)\n parser.parse_async(uri, True, None,\n self.__on_parse_finished, uris)\n else:\n self.__on_parse_finished(None, None, uris)\n elif self.__window is not None:\n if not self.__window.is_visible():\n self.__window.present()\n self.player.emit(\"status-changed\")\n self.player.emit(\"current-changed\")\n Gdk.notify_startup_complete()\n except Exception as e:\n Logger.error(\"Application::__on_command_line(): %s\", e)\n return 0\n\n def __on_parse_finished(self, parser, result, uris):\n \"\"\"\n Play stream\n @param parser as TotemPlParser.Parser\n @param result as Gio.AsyncResult\n @param uris as [str]\n \"\"\"\n def scanner_update():\n self.__scanner_timeout_id = None\n self.player.play_uris(self.__scanner_uris)\n self.scanner.update(ScanType.EPHEMERAL, self.__scanner_uris)\n self.__scanner_uris = []\n\n if self.__scanner_timeout_id is not None:\n GLib.source_remove(self.__scanner_timeout_id)\n self.__scanner_uris += uris\n self.__scanner_timeout_id = GLib.timeout_add(500,\n scanner_update)\n\n def __on_entry_parsed(self, parser, uri, metadata, uris):\n \"\"\"\n Add playlist entry to external files\n @param parser as TotemPlParser.Parser\n @param uri as str\n @param metadata as GLib.HastTable\n @param uris as str\n \"\"\"\n uris.append(uri)\n\n def __hide_on_delete(self, widget, event):\n \"\"\"\n Hide window\n @param widget as Gtk.Widget\n @param event as Gdk.Event\n \"\"\"\n # Quit if background mode is on but player is off\n if not self.settings.get_value(\"background-mode\") or\\\n not self.player.is_playing:\n self.player.pause()\n GLib.timeout_add(500, self.quit, True)\n return widget.hide_on_delete()\n\n def __on_activate(self, application):\n \"\"\"\n Call default handler\n @param application as Gio.Application\n \"\"\"\n self.__window.present()\n","repo_name":"refi64/lollypop-viperfx","sub_path":"lollypop/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":22259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"12667615253","text":"import tensorflow as tf\nimport tensorflow.examples.tutorials.mnist.input_data as input_data\nimport matplotlib.pyplot as plt\nimport time\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\n\ndef conv_variable(shape):\n\treturn tf.Variable(tf.truncated_normal(shape, stddev=0.1))\n\n\ndef conv2d(x, W):\n\treturn tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef max_pool_2x2(x):\n\treturn tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n\ndef bias_variable(shape):\n\treturn tf.Variable(tf.constant(0.1, shape=shape))\n\n\nx = tf.placeholder(tf.float32, [None, 784])\ny_ = tf.placeholder(tf.float32, [None, 10])\n\nx_image = tf.reshape(x, [-1, 28, 28, 1])\n\n# 第一层卷积\nfilter1 = conv_variable([5, 5, 1, 32])\nbias1 = bias_variable([32])\nconv1 = conv2d(x_image, filter1)\nh_conv1 = tf.nn.relu(conv1 + bias1)\n\n# 池化\nmax_pool1 = max_pool_2x2(h_conv1)\n\n# 第二层卷积\nfilter2 = conv_variable([5, 5, 32, 64])\nbias2 = bias_variable([64])\nconv2 = conv2d(max_pool1, filter2)\nh_conv2 = tf.nn.relu(conv2 + bias2)\n\n# 池化\nmax_pool2 = max_pool_2x2(h_conv2)\n\n'''\n# 第三层卷积\nfilter3 = conv_variable([5, 5, 64, 128])\nbias3 = conv_variable([128])\nconv3 = conv2d(max_pool2, filter3)\nh_conv3 = tf.nn.sigmoid(conv3 + bias3)\n\n# 池化\nmax_pool3 = max_pool_2x2(h_conv3)\n'''\n\n# 将卷积结果展开\nh_pool2_flat = tf.reshape(max_pool2, [-1, 7 * 7 * 64])\n\n# 隐含层1\nw_fc1 = conv_variable([7 * 7 * 64, 1024])\nb_fc1 = bias_variable([1024])\nh_fc1 = tf.nn.relu(tf.add(tf.matmul(h_pool2_flat, w_fc1), b_fc1))\n\n# 隐含层2\nw_fc2 = conv_variable([1024, 128])\nb_fc2 = bias_variable([128])\nh_fc2 = tf.nn.relu(tf.add(tf.matmul(h_fc1, w_fc2), b_fc2))\n\n# 输出层\nw_fc3 = conv_variable([128, 10])\nb_fc3 = bias_variable([10])\n\n# 输出层计算\ny_res = tf.nn.softmax(tf.add(tf.matmul(h_fc2, w_fc3), b_fc3))\n\nloss = -tf.reduce_sum(y_ * tf.log(y_res))\ntrain_op = tf.train.AdamOptimizer(1e-5).minimize(loss)\n\n# 计算正确率\ncorrect_prediction = tf.equal(tf.argmax(y_res, 1), tf.argmax(y_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nsess = tf.InteractiveSession()\nsess.run(tf.global_variables_initializer())\n\nc = []\n\nstart_time = time.time()\n\nfor i in range(1000):\n\tbatch_xs, batch_ys = mnist.train.next_batch(200)\n\n\tif i % 2 == 0 :\n\t\ttrain_accuracy = accuracy.eval(feed_dict={x: batch_xs, y_: batch_ys})\n\t\tprint(\"step %d, training accuracy %g\" % (i, train_accuracy))\n\t\tc.append(train_accuracy)\n\t\tend_time = time.time()\n\t\tprint(\"time: \", (end_time - start_time))\n\t\tstart_time = end_time\n\n\ttrain_op.run(feed_dict={x: batch_xs, y_: batch_ys})\n\nsess.close()\nplt.plot(c)\nplt.tight_layout()\nplt.savefig('C:/Users/Jonty/Desktop/Project/trash/exam_image/13-14.png', dpi=200)\n\n","repo_name":"JalexDooo/pythonlearning","sub_path":"test/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"71449704852","text":"__author__ = 'alextc'\nimport datetime\nimport logging\n\n\nclass DateTimeUtils(object):\n\n datetime_format_string = '%Y, %m, %d, %H, %M, %S, %f'\n\n def __init__(self):\n pass\n\n def get_current_utc_datetime_as_formatted_string(self):\n right_now = datetime.datetime.utcnow()\n formatted = right_now.strftime(self.datetime_format_string)\n return formatted\n\n def parse_datetime_from_formatted_string(self, formatted_datetime):\n return datetime.datetime.strptime(formatted_datetime, self.datetime_format_string)\n\n @staticmethod\n def datetime_to_formatted_string(date_time):\n return date_time.strftime(DateTimeUtils.datetime_format_string)\n\n @staticmethod\n def datetime_to_timestamp(date_time):\n \"\"\"\n :type date_time: datetime.datetime\n \"\"\"\n return int(date_time.strftime(\"%s\"))\n\n def is_datetime_in_expected_format(self, datetime_formatted_string):\n try:\n formatted = datetime.datetime.strptime(datetime_formatted_string, self.datetime_format_string)\n if formatted:\n return True\n except ValueError:\n return False\n\n return False\n\n @staticmethod\n def get_total_seconds_for_timedelta(timedelta):\n \"\"\"\n :type timedelta: datetime.timedelta\n :rtype: int\n \"\"\"\n logging.debug(\"About to calculate delta_in_seconds for {0}\".format(timedelta))\n delta_in_seconds = timedelta.days * 86400 + timedelta.seconds\n logging.debug(\"_get_total_seconds_for_time_delta is returning: {0}\".format(delta_in_seconds))\n return delta_in_seconds\n\n @staticmethod\n def strip_microseconds(datetime_with_microseconds):\n return datetime.datetime(\n year=datetime_with_microseconds.year,\n month=datetime_with_microseconds.month,\n day=datetime_with_microseconds.day,\n hour=datetime_with_microseconds.hour,\n minute=datetime_with_microseconds.minute,\n second=datetime_with_microseconds.second)\n","repo_name":"alextc/isicopysvc","sub_path":"CopyService/Common/datetimeutils.py","file_name":"datetimeutils.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"28616942672","text":"import numpy as np\n\nimport torch\nfrom torch.autograd import Variable\n\nfrom HyperSphere.GP.inference.inference import Inference\nfrom HyperSphere.BO.acquisition.acquisition_maximization import deepcopy_inference\n\n\nclass ShadowInference(Inference):\n\tdef __init__(self, train_data, model):\n\t\tsuper(ShadowInference, self).__init__(train_data, model)\n\n\tdef predict(self, pred_x, hyper=None, stability_check=False):\n\t\tif hyper is not None:\n\t\t\tparam_original = self.model.param_to_vec()\n\t\t\tself.cholesky_update(hyper)\n\t\tn_pred = pred_x.size(0)\n\t\tpred_x_radius = torch.sqrt(torch.sum(pred_x ** 2, 1, keepdim=True))\n\t\tassert (pred_x_radius.data > 0).all()\n\t\tsatellite = pred_x / pred_x_radius * pred_x.size(1) ** 0.5\n\n\t\tK_train_pre = self.model.kernel(self.train_x, pred_x)\n\t\tK_train_sat = self.model.kernel(self.train_x, satellite)\n\n\t\tchol_solver = torch.gesv(torch.cat([K_train_pre, self.mean_vec, K_train_sat], 1), self.cholesky)[0]\n\t\tchol_solve_k = chol_solver[:, :n_pred]\n\t\tchol_solve_y = chol_solver[:, n_pred:n_pred + 1]\n\t\tchol_solve_s = chol_solver[:, n_pred + 1:]\n\t\tpred_mean = torch.mm(chol_solve_k.t(), chol_solve_y) + self.model.mean(pred_x)\n\t\tpred_var = self.model.kernel.forward_on_identical() - (chol_solve_k ** 2).sum(0).view(-1, 1)\n\n\t\tif not (pred_var.data >= 0).all():\n\t\t\tneg_pred_var_mask = pred_var.data < 0\n\t\t\tnegative_pred_var = pred_var.data[neg_pred_var_mask]\n\t\t\tmin_negative_pred_var = torch.min(negative_pred_var)\n\t\t\tmax_negative_pred_var = torch.max(negative_pred_var)\n\t\t\tkernel_max = self.model.kernel.forward_on_identical().data[0]\n\t\t\tprint('negative %d/%d pred_var range %.4E(%.4E) ~ %.4E(%.4E)' % (torch.sum(neg_pred_var_mask), pred_var.numel(), min_negative_pred_var, min_negative_pred_var / kernel_max, max_negative_pred_var, max_negative_pred_var / kernel_max))\n\t\t\tprint('kernel max %.4E / noise variance %.4E' % (kernel_max, torch.exp(self.model.likelihood.log_noise_var.data)[0]))\n\t\t\tprint('jitter %.4E' % self.jitter)\n\t\t\tprint('-' * 50)\n\t\t\tprint('-' * 50)\n\t\t\tprint('-' * 50)\n\t\tif stability_check:\n\t\t\tassert (pred_var.data >= 0).all()\n\t\tnumerically_stable = (pred_var.data >= 0).all()\n\n\t\tk_satellite_pred_diag = torch.cat([self.model.kernel(pred_x[i:i + 1], satellite[i:i + 1]) for i in range(pred_x.size(0))], 0)\n\t\treduction_numer = ((chol_solve_k * chol_solve_s).sum(0).view(-1, 1) - k_satellite_pred_diag) ** 2\n\t\tsatellite_pred_var = self.model.kernel.forward_on_identical() - (chol_solve_s ** 2).sum(0).view(-1, 1)\n\n\t\t# By adding jitter, result is the same as using inference but reduction effect becomes very small\n\t\t# TODO : the effect of maintaining jitter, having it is reasonable, if not more drastic effect in variance reduction\n\t\treduction_denom = satellite_pred_var.clamp(min=1e-8) + self.model.likelihood(pred_x).view(-1, 1) + self.jitter\n\t\treduction = reduction_numer / reduction_denom\n\t\tpred_var_reduced = (pred_var.clamp(min=1e-8) - reduction)\n\n\t\tif not (satellite_pred_var.data >= 0).all():\n\t\t\tmin_pred_var = torch.min(pred_var.data)\n\t\t\tmax_pred_var = torch.max(pred_var.data)\n\t\t\tkernel_max = self.model.kernel.forward_on_identical().data[0]\n\t\t\tprint('satellite_pred_var %.4E / ratio w.r.t max %.4E' % (satellite_pred_var.data.squeeze()[0], satellite_pred_var.data.squeeze()[0] / kernel_max))\n\t\t\tprint('pred_var range %.4E(%.4E) ~ %.4E(%.4E)' % (min_pred_var, min_pred_var / kernel_max, max_pred_var, max_pred_var / kernel_max))\n\t\t\tprint('kernel max %.4E / noise variance %.4E' % (kernel_max, torch.exp(self.model.likelihood.log_noise_var.data)[0]))\n\t\t\tprint('-' * 50)\n\t\t\tprint('-' * 50)\n\t\t\tprint('-' * 50)\n\t\tif stability_check:\n\t\t\tassert (satellite_pred_var >= 0).data.all()\n\t\tnumerically_stable = numerically_stable and (satellite_pred_var >= 0).data.all()\n\n\t\tif not (pred_var_reduced.data >= 0).all():\n\t\t\tneg_pred_var_reduced_mask = pred_var_reduced.data < 0\n\t\t\tnegative_pred_var_reduced = pred_var_reduced.data[neg_pred_var_reduced_mask]\n\t\t\tmin_negative_pred_var_reduced = torch.min(negative_pred_var_reduced)\n\t\t\tmax_negative_pred_var_reduced = torch.max(negative_pred_var_reduced)\n\t\t\tkernel_max = self.model.kernel.forward_on_identical().data[0]\n\t\t\tprint('negative %d/%d pred_var_reduced range %.4E(%.4E) ~ %.4E(%.4E)' % (torch.sum(neg_pred_var_reduced_mask), pred_var_reduced.numel(), min_negative_pred_var_reduced, min_negative_pred_var_reduced / kernel_max, max_negative_pred_var_reduced, max_negative_pred_var_reduced / kernel_max))\n\t\t\tprint('kernel max %.4E / noise variance %.4E' % (kernel_max, torch.exp(self.model.likelihood.log_noise_var.data)[0]))\n\t\t\tprint('jitter %.4E' % self.jitter)\n\t\t\tprint('-' * 50)\n\t\t\tprint('-' * 50)\n\t\t\tprint('-' * 50)\n\t\tif stability_check:\n\t\t\tassert (pred_var_reduced >= 0).data.all()\n\t\tnumerically_stable = numerically_stable and (pred_var_reduced >= 0).data.all()\n\n\t\tzero_pred_var = (pred_var_reduced.data <= 0).all()\n\n\t\tif hyper is not None:\n\t\t\tself.cholesky_update(param_original)\n\t\treturn pred_mean, pred_var_reduced.clamp(min=1e-8), numerically_stable, zero_pred_var\n\n\nif __name__ == '__main__':\n\timport math\n\tfrom mpl_toolkits.mplot3d import Axes3D\n\tfrom copy import deepcopy\n\timport matplotlib.pyplot as plt\n\tfrom torch.autograd._functions.linalg import Potrf\n\tfrom HyperSphere.GP.kernels.modules.radialization import RadializationKernel\n\tfrom HyperSphere.GP.models.gp_regression import GPRegression\n\tfrom HyperSphere.BO.acquisition.acquisition_maximization import acquisition\n\n\tndata = 3\n\tndim = 2\n\tsearch_radius = ndim ** 0.5\n\tx_input = Variable(torch.FloatTensor(ndata, ndim).uniform_(-1, 1))\n\tx_input.data[0, :] = 0\n\tx_input.data[1, :] = 1\n\toutput = torch.cos(x_input[:, 0:1] + (x_input[:, 1:2] / math.pi * 0.5) + torch.prod(x_input, 1, keepdim=True))\n\treference = torch.min(output).data.squeeze()[0]\n\ttrain_data = (x_input, output)\n\n\tmodel_normal = GPRegression(kernel=RadializationKernel(3, search_radius))\n\tmodel_shadow = GPRegression(kernel=RadializationKernel(3, search_radius))\n\n\tinference_normal = Inference((x_input, output), model_normal)\n\tinference_shadow = ShadowInference((x_input, output), model_shadow)\n\tinference_normal.init_parameters()\n\tinference_shadow.init_parameters()\n\n\tparams_normal = inference_normal.learning(n_restarts=5)\n\tinference_shadow.cholesky_update(model_normal.param_to_vec())\n\n\tif ndim == 2:\n\t\tx1_grid, x2_grid = np.meshgrid(np.linspace(-1, 1, 50), np.linspace(-1, 1, 50))\n\t\tx_pred_points = Variable(torch.from_numpy(np.vstack([x1_grid.flatten(), x2_grid.flatten()]).astype(np.float32)).t())\n\t\tpred_mean_normal, pred_var_normal = inference_normal.predict(x_pred_points)\n\t\tpred_std_normal = pred_var_normal ** 0.5\n\t\tacq_normal = acquisition(x_pred_points, deepcopy_inference(inference_normal, params_normal), reference=reference)\n\n\t\tpred_mean_shadow, pred_var_shadow = inference_shadow.predict(x_pred_points)\n\t\tpred_std_shadow = pred_var_shadow ** 0.5\n\t\tacq_shadow = acquisition(x_pred_points, deepcopy_inference(inference_shadow, params_normal), reference=reference)\n\n\t\t# ShadowInference unit test\n\t\tsatellite = x_pred_points / torch.sqrt(torch.sum(x_pred_points ** 2, dim=1)).view(-1, 1) * ndim ** 0.5\n\t\tvar_input_map_list = []\n\t\tjitter_list = []\n\t\tmodel_sanity = deepcopy(model_shadow)\n\t\toutput = torch.cat([output, output[:1]], 0)\n\t\tfor i in range(x_pred_points.size(0)):\n\t\t\tinference_input_map = Inference((torch.cat([satellite[i:i + 1], x_input], 0), output), model_sanity)\n\t\t\tinference_input_map.cholesky_update(model_normal.param_to_vec())\n\n\t\t\t# inference_input_map.gram_mat_update()\n\t\t\t# inference_input_map.jitter = inference_shadow.jitter\n\t\t\t# eye_mat = Variable(torch.eye(inference_input_map.gram_mat.size(0)).type_as(inference_input_map.gram_mat.data))\n\t\t\t# inference_input_map.cholesky = Potrf.apply(inference_input_map.gram_mat + eye_mat * inference_input_map.jitter, False)\n\n\t\t\tjitter_list.append(inference_input_map.jitter)\n\t\t\t_, var_input_map = inference_input_map.predict(x_pred_points[i:i + 1])\n\t\t\tvar_input_map_list.append(var_input_map)\n\t\tpred_var_input_map = torch.cat(var_input_map_list, 0)\n\t\tjitter = torch.from_numpy(np.array(jitter_list)).view(-1, 1).type_as(x_pred_points.data)\n\t\tshadow_jitter = jitter.clone().fill_(inference_shadow.jitter)\n\t\tdata = torch.cat([pred_var_shadow.data, pred_var_input_map.data, jitter, shadow_jitter], 1)\n\t\tprint(torch.min(pred_var_shadow).data[0], torch.max(pred_var_shadow).data[0])\n\t\tprint('l2 distance', torch.dist(pred_var_shadow, pred_var_input_map).data[0])\n\t\tprint('l-inf distance', torch.max(torch.abs(pred_var_shadow - pred_var_input_map)).data[0])\n\n\t\tmask_more = (pred_var_shadow < pred_var_input_map).data\n\t\tprint('fake data var < element wise var', torch.sum(mask_more))\n\t\tind_differ = torch.sort(mask_more, 0, descending=True)[1][:torch.sum(mask_more)].squeeze()\n\t\tprint('decreased jitter', torch.sum(jitter < shadow_jitter))\n\n\t\tmask_less = (pred_var_shadow > pred_var_input_map).data\n\t\tprint('fake data var > element wise var', torch.sum(mask_less))\n\t\tif torch.sum(mask_less) > 0:\n\t\t\tind_less = torch.sort(mask_less, 0, descending=True)[1][:torch.sum(mask_less)].squeeze()\n\t\t# exit()\n\n\t\tfig = plt.figure()\n\t\tacq_list = [acq_normal, acq_shadow]\n\t\tpred_mean_list = [pred_mean_normal, pred_mean_shadow]\n\t\tpred_std_list = [pred_std_normal, pred_std_shadow]\n\t\tfor i in range(2):\n\t\t\tax = fig.add_subplot(2, 6, 6 * i + 1)\n\t\t\tif torch.min(acq_list[i].data) < torch.max(acq_list[i].data):\n\t\t\t\tax.contour(x1_grid, x2_grid, acq_list[i].data.numpy().reshape(x1_grid.shape))\n\t\t\tax.plot(x_input.data.numpy()[:, 0], x_input.data.numpy()[:, 1], 'rx')\n\t\t\tif i == 0:\n\t\t\t\tax.set_ylabel('normal')\n\t\t\telif i == 1:\n\t\t\t\tax.set_ylabel('shadow')\n\t\t\tax = fig.add_subplot(2, 6, 6 * i + 2, projection='3d')\n\t\t\tax.plot_surface(x1_grid, x2_grid, acq_list[i].data.numpy().reshape(x1_grid.shape))\n\t\t\tif i == 0:\n\t\t\t\tax.set_title('acquistion')\n\t\t\tax = fig.add_subplot(2, 6, 6 * i + 3)\n\t\t\tax.contour(x1_grid, x2_grid, pred_mean_list[i].data.numpy().reshape(x1_grid.shape))\n\t\t\tax = fig.add_subplot(2, 6, 6 * i + 4, projection='3d')\n\t\t\tax.plot_surface(x1_grid, x2_grid, pred_mean_list[i].data.numpy().reshape(x1_grid.shape))\n\t\t\tif i == 0:\n\t\t\t\tax.set_title('pred mean')\n\t\t\tax = fig.add_subplot(2, 6, 6 * i + 5)\n\t\t\tif torch.min(pred_std_list[i].data) != torch.max(pred_std_list[i].data):\n\t\t\t\tax.contour(x1_grid, x2_grid, pred_std_list[i].data.numpy().reshape(x1_grid.shape))\n\t\t\tax.plot(x_input.data.numpy()[:, 0], x_input.data.numpy()[:, 1], 'rx')\n\t\t\tax = fig.add_subplot(2, 6, 6 * i + 6, projection='3d')\n\t\t\tax.plot_surface(x1_grid, x2_grid, pred_std_list[i].data.numpy().reshape(x1_grid.shape))\n\t\t\tif i == 0:\n\t\t\t\tax.set_title('pred std')\n\n\tplt.show()\n","repo_name":"ChangYong-Oh/HyperSphere","sub_path":"HyperSphere/BO/shadow_inference/inference_sphere_satellite.py","file_name":"inference_sphere_satellite.py","file_ext":"py","file_size_in_byte":10516,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"66"} +{"seq_id":"13591654462","text":"import xarray as xr\nimport pandas as pd\nimport numpy as np\nimport scipy\nfrom scipy import signal\nfrom scipy.stats.mstats import ttest_ind\nfrom scipy.stats import pearsonr\nfrom scipy.stats import linregress\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport cartopy.crs as ccrs\nimport cartopy.mpl.ticker as cticker\nfrom cartopy.util import add_cyclic_point #进行循环\nimport cartopy.feature as cfeature #用于添加地理属性的库\nfrom cartopy.mpl.ticker import LongitudeFormatter,LatitudeFormatter #添加经纬度需要\nfrom matplotlib.patches import Rectangle\n\n# 遥相关指数\nTELIS = pd.read_csv(r'F:\\7_Data\\0_Science_Program\\1_Graduation_Program\\Teleconnectivity\\Tel_index_STD.csv', usecols=['NAI_STD', 'EUI_STD', 'EAI_STD', 'WAI_STD', 'CNAI_STD', 'ANAI_STD',\n 'BSI_STD'])\n# 计算T—N WAVE 的三个变量\n# z300,u300,v300 为2017,2018年夏季的300hPa位势高度场,U、V风场的月平均\n# z_tmp,u_tmp,v_tmp为1979-2018年夏季月的气候态\nf_z = xr.open_dataset(r'F:\\7_Data\\0_Science_Program\\1_Graduation_Program\\Ncep\\hgt.mon.mean.nc')\nf_u = xr.open_dataset(r'F:\\7_Data\\0_Science_Program\\1_Graduation_Program\\Ncep\\uwnd.mon.mean.nc')\nf_v = xr.open_dataset(r'F:\\7_Data\\0_Science_Program\\1_Graduation_Program\\Ncep\\vwnd.mon.mean.nc')\n# 17,18年夏季ZUV\nlatrange = np.arange(0, 92.5, 2.5)\nz300 = f_z.hgt.sel(level=300).loc[(f_z.time.dt.month.isin([6, 7, 8]))].loc['1949-01-01':'2018-12-01']\nu300 = np.array(f_u.uwnd.sel(level=300).loc[(f_u.time.dt.month.isin([6, 7, 8]))].loc['2017-01-01':'2018-12-01'].mean('time'))\nv300 = np.array(f_v.vwnd.sel(level=300).loc[(f_v.time.dt.month.isin([6, 7, 8]))].loc['2017-01-01':'2018-12-01'].mean('time'))\n\n# 气候态ZUV\nztmp = np.array(f_z.hgt.sel(level=300).loc[(f_z.time.dt.month.isin([6, 7, 8]))].loc['1949-01-01':'2018-12-01'].mean('time'))/9.8\nutmp = np.array(f_u.uwnd.sel(level=300).loc[(f_u.time.dt.month.isin([6, 7, 8]))].loc['1949-01-01':'2018-12-01'].mean('time'))\nvtmp = np.array(f_v.vwnd.sel(level=300).loc[(f_v.time.dt.month.isin([6, 7, 8]))].loc['1949-01-01':'2018-12-01'].mean('time'))\n\n# lat和lon\nlat, lon = f_z.lat, f_z.lon\n\n# 一些常数\n# 要把经纬度转换成角度量,所以做(*np.pi/180.0)处理\n# 因为最终要计算Fx,Fy,所以统一数组shape,使用.reshape((1,-1))或(-1,1)处理\n# 只有经度维的使用((1,-1)),只有纬度维的使用((-1,1))\na = 6400000 #地球半径\nomega = 7.292e-5 # 自转角速度\nlev = 300/1000 # p/p0\n\ndlon=(np.gradient(lon)*np.pi/180.0).reshape((1,-1))\ndlat=(np.gradient(lat)*np.pi/180.0).reshape((-1,1))\ncoslat = (np.cos(np.array(lat)*np.pi/180)).reshape((-1,1))\nsinlat = (np.sin(np.array(lat)*np.pi/180)).reshape((-1,1))\n\n#计算科氏力\nf = np.array(2*omega*np.sin(lat*np.pi/180.0)).reshape((-1,1))\n#计算|U|\nwind = np.sqrt(utmp**2+vtmp**2)\n#计算括号外的参数,a^2可以从括号内提出\nc = (lev)*coslat/(2*a*a*wind)\n# 回归到遥相关指数的Z300\n# 计算与指数相关的streamf\nz300 = z300.groupby('time.year').mean(dim='time', skipna=True)\nz300 = z300/9.8\ns, r, Z300 = np.zeros((73, 144)), np.zeros((73, 144)), np.zeros((73, 144))\nfor i in range(73):\n for j in range(144):\n s[i, j], _, r[i, j], Z300[i, j], _ = linregress(TELIS.iloc[:, 6], z300[:, i, j])\n#Φ`\nza = Z300 - ztmp.mean(1).reshape((-1,1))\n#Ψ`\ng = 9.8\nstreamf = g*za/f\n\n# # 计算与指数相关的streamf\n# s,r,streamf = np.zeros((7, 37, 144)),np.zeros((7, 37, 144)),np.zeros((7, 37, 144))\n# for i in range(37):\n# for j in range(144):\n# s[i, j], _, r[i, j], streamf[i, j], _ = linregress(TELIS.BSI_STD, streamff[i,j])\n\n# 计算各个部件,难度在于二阶导,变量的名字应该可以很容易看出我是在计算哪部分\ndzdlon = np.gradient(streamf, axis = 1)/dlon\nddzdlonlon = np.gradient(dzdlon, axis = 1)/dlon\ndzdlat = np.gradient(streamf, axis = 0)/dlat\nddzdlatlat = np.gradient(dzdlat, axis = 0)/dlat\nddzdlatlon = np.gradient(dzdlat, axis = 1)/dlon\n# 这是X,Y分量共有的部分\nx_tmp = dzdlon*dzdlon-streamf*ddzdlonlon\ny_tmp = dzdlon*dzdlat-streamf*ddzdlatlon\n# 计算两个分量\nfx = c * ((utmp/coslat/coslat)*x_tmp+vtmp*y_tmp/coslat)\nfy = c * ((utmp/coslat)*y_tmp+vtmp*x_tmp)\n\n\nfig = plt.figure(figsize=(12,8))\nproj = ccrs.PlateCarree(central_longitude=180)\nleftlon, rightlon, lowerlat, upperlat = (0,180,0,90)\nimg_extent = [leftlon, rightlon, lowerlat, upperlat]\nax = fig.add_axes([0.1, 0.1, 0.8, 0.6],projection = proj)\nax.set_extent(img_extent, crs=ccrs.PlateCarree())\nax.add_feature(cfeature.COASTLINE)\nax.set_xticks(np.arange(leftlon,rightlon+60,60), crs=ccrs.PlateCarree())\nax.set_yticks(np.arange(lowerlat,upperlat+30,30), crs=ccrs.PlateCarree())\nlon_formatter = cticker.LongitudeFormatter()\nlat_formatter = cticker.LatitudeFormatter()\nax.xaxis.set_major_formatter(lon_formatter)\nax.yaxis.set_major_formatter(lat_formatter)\n# ax.quiver(lon[::2],lat[::2],u_cli[0,::2,::2],v_cli[0,::2,::2],transform=ccrs.PlateCarree(),scale=150,color='r')\nax.contourf(lon, lat, streamf, transform=ccrs.PlateCarree(), cmap='coolwarm')\nax.quiver(lon[::2], lat[::2], fx[::2,::2], fy[::2,::2], wind[::2,::2], transform=ccrs.PlateCarree(), scale=100, cmap=plt.cm.jet)\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Yanghh-Y/Pycharm_Project_03_02","sub_path":"5_Explanation/WAVE-FlUX/1_GHT-Tel_TN-WAVE_Error.py","file_name":"1_GHT-Tel_TN-WAVE_Error.py","file_ext":"py","file_size_in_byte":5197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"33634360192","text":"import pytest\n\n\nINSERT_APPLICATION = (\n 'INSERT INTO rescue.application '\n '(park_id, driver_profile_id, order_id, '\n ' longtitude, latitude, st_ticket)'\n ' VALUES (\\'{}\\', \\'{}\\', \\'{}\\', {}, {}, \\'{}\\')'\n)\n\nINSERT_MEDIA = (\n 'INSERT INTO rescue.media '\n '(order_id, media_id, attach_id, content_type)'\n ' VALUES (\\'{}\\', \\'{}\\', \\'{}\\', \\'{}\\')'\n)\n\nHANDLER = 'admin/rescue/v1/sos/order-media'\n\nBODY = {'order_id': 'order_id_1', 'position': {'lat': 56.89, 'lon': 68.9}}\n\nTICKET_KEY = 'TICKET-1'\n\n\n@pytest.mark.parametrize('use_media_storage', [True, False])\nasync def test_admin_v1_sos_order_media(\n taxi_rescue, pgsql, taxi_config, media_storage, use_media_storage,\n):\n taxi_config.set_values(\n {'RESCUE_SAVE_AUDIO_TO_MEDIA_STORAGE': use_media_storage},\n )\n cursor = pgsql['rescue'].cursor()\n cursor.execute(\n INSERT_APPLICATION.format(\n 'db',\n 'uuid',\n BODY['order_id'],\n BODY['position']['lon'],\n BODY['position']['lat'],\n TICKET_KEY,\n ),\n )\n for media_id in ['1', '2']:\n cursor.execute(\n INSERT_MEDIA.format(\n BODY['order_id'],\n media_id + BODY['order_id'],\n media_id + BODY['order_id'],\n 'some_content_type',\n ),\n )\n if use_media_storage:\n media_storage.store_object(media_id + BODY['order_id'])\n\n response = await taxi_rescue.get(\n HANDLER, params={'order_id': BODY['order_id']},\n )\n assert response.status_code == 200\n response_json = response.json()\n assert response_json['ticket'] == TICKET_KEY\n assert response_json['position'] == BODY['position']\n assert len(response_json['media']) == 2\n\n\nasync def test_admin_v1_sos_order_media_no_application(taxi_rescue):\n response = await taxi_rescue.get(\n HANDLER, params={'order_id': 'some_not_existing_id'},\n )\n assert response.status_code == 200\n response_json = response.json()\n assert response_json['code'] == '200'\n assert response_json['message'] == 'Have not pushed SOS'\n","repo_name":"Alexander-Berg/2022-tests-examples-2","sub_path":"Taxi/tests_rescue/test_admin_v1_sos_order_media.py","file_name":"test_admin_v1_sos_order_media.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"31345624807","text":"#!/usr/bin/env python3\n\nimport torch\nfrom torch.autograd import Function\n\nfrom .. import settings\n\n\ndef _solve(linear_op, rhs):\n if (\n settings.fast_computations.solves.off()\n or settings.fast_computations.log_prob.off()\n or linear_op.size(-1) <= settings.max_cholesky_size.value()\n ):\n return linear_op.cholesky()._cholesky_solve(rhs)\n else:\n with torch.no_grad():\n preconditioner = linear_op.detach()._solve_preconditioner()\n return linear_op._solve(rhs, preconditioner)\n\n\nclass InvQuad(Function):\n \"\"\"\n Given a PSD matrix A (or a batch of PSD matrices A), this function computes b A^{-1} b\n where b is a vector or batch of vectors\n \"\"\"\n\n @staticmethod\n def forward(ctx, representation_tree, *args):\n \"\"\"\n *args - The arguments representing the PSD matrix A (or batch of PSD matrices A)\n If inv_quad is true, the first entry in *args is inv_quad_rhs (Tensor)\n - the RHS of the matrix solves.\n\n Returns:\n - (Scalar) The inverse quadratic form (or None, if inv_quad is False)\n - (Scalar) The log determinant (or None, if logdet is False)\n \"\"\"\n inv_quad_rhs, *matrix_args = args\n ctx.representation_tree = representation_tree\n # Get closure for matmul\n linear_op = ctx.representation_tree(*matrix_args)\n\n # RHS for inv_quad\n ctx.is_vector = False\n if inv_quad_rhs.ndimension() == 1:\n inv_quad_rhs = inv_quad_rhs.unsqueeze(-1)\n ctx.is_vector = True\n\n # Perform solves (for inv_quad) and tridiagonalization (for estimating logdet)\n inv_quad_solves = _solve(linear_op, inv_quad_rhs)\n inv_quad_term = (inv_quad_solves * inv_quad_rhs).sum(-2)\n\n to_save = matrix_args + [inv_quad_solves]\n ctx.save_for_backward(*to_save)\n\n if settings.memory_efficient.off():\n ctx._linear_op = linear_op\n\n return inv_quad_term\n\n @staticmethod\n def backward(ctx, inv_quad_grad_output):\n *matrix_args, inv_quad_solves = ctx.saved_tensors\n\n if hasattr(ctx, \"_linear_op\"):\n linear_op = ctx._linear_op\n else:\n linear_op = ctx.representation_tree(*matrix_args)\n\n # Fix grad_output sizes\n inv_quad_grad_output = inv_quad_grad_output.unsqueeze(-2)\n neg_inv_quad_solves_times_grad_out = inv_quad_solves.mul(inv_quad_grad_output).mul(-1)\n\n matrix_arg_grads = [None] * len(matrix_args)\n\n # input_1 gradient\n if any(ctx.needs_input_grad[2:]):\n left_factors = neg_inv_quad_solves_times_grad_out\n right_factors = inv_quad_solves\n matrix_arg_grads = linear_op._bilinear_derivative(left_factors, right_factors)\n\n # input_2 gradients\n if ctx.needs_input_grad[1]:\n inv_quad_rhs_grad = neg_inv_quad_solves_times_grad_out.mul(-2)\n else:\n inv_quad_rhs_grad = torch.zeros_like(inv_quad_solves)\n if ctx.is_vector:\n inv_quad_rhs_grad.squeeze_(-1)\n\n res = tuple([None] + [inv_quad_rhs_grad] + list(matrix_arg_grads))\n return tuple(res)\n","repo_name":"cornellius-gp/linear_operator","sub_path":"linear_operator/functions/_inv_quad.py","file_name":"_inv_quad.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"66"} +{"seq_id":"14416346445","text":"#!/usr/bin/env python3\ndef load(file):\n with open(file) as f:\n return [line.split() for line in f.read().split('\\n\\n')]\n\ndef part2(groups):\n '''\n >>> part2(load('test1.txt'))\n 6\n '''\n group_declarations = []\n for group in groups:\n declarations = set(group[0])\n for declaration in group[1:]:\n declarations &= set(declaration)\n group_declarations.append(declarations)\n \n return sum(len(declarations) for declarations in group_declarations)\n\ndef part1(groups):\n '''\n >>> part1(load('test1.txt'))\n 11\n '''\n group_declarations = []\n for group in groups:\n declarations = set(declaration for declarations in group for declaration in list(declarations))\n group_declarations.append(declarations)\n \n return sum(len(declarations) for declarations in group_declarations)\n\ndef main():\n groups = load('input.txt')\n value = part1(groups)\n print(f'Part 1: {value}')\n assert value == 6259\n\n value = part2(groups)\n print(f'Part 2: {value}')\n assert value == 3178\n\nif __name__ == '__main__':\n main()\n","repo_name":"rawlink/advent-of-code-2020","sub_path":"day6/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"43140497396","text":"#!/usr/bin/env python\n# coding=utf-8\n\"\"\"\nverify=False Requests 也能忽略对 SSL 证书的验证\ncert=('/path/server.crt', '/path/key') 也可以指定一个本地证书用作客户端证书\n\nauthor = \"minglei.weng@dianjoy.com\"\ncreated = \"2016/10/14 0014\"\n\"\"\"\nimport json\nimport traceback\nimport urllib\nimport urlparse\nfrom collections import OrderedDict\n\nimport logging\n\nimport datetime\nimport requests\nimport urllib3\nfrom utils import settings\n\nurllib3.disable_warnings()\n\n\nclass FiddlerRequestException(Exception):\n pass\n\n\nclass FiddlerRequestTimeOutException(FiddlerRequestException):\n pass\n\n\nclass FiddlerError(Exception):\n def __init__(self, info):\n self.info = info\n\n def __str__(self):\n return repr(self.info)\n\n\nclass RawToPython(object):\n def __init__(self, file_name=None, file_raw=None, is_https=None, is_session=False,\n try_real_simulation=False, retry_count=0):\n if not (file_name or file_raw):\n raise FiddlerError(\"must had file_name or file_data\")\n self.method = None\n self.__url_host = None\n self.url = None\n self.headers = None\n self.headers_small = None\n self.req_data = None\n self.req_json = None\n self.req_param = None\n self.url_parse = None\n self.retry_count = retry_count\n self.try_real_simulation = try_real_simulation\n self.__file = file_name\n self.__raw = file_raw\n self.__lines = None\n self.__is_https = is_https\n self.__get_raw()\n self.__to_python()\n\n def __get_raw(self):\n if self.__file:\n with open(self.__file, \"rb\") as f:\n self.__raw = f.read().strip()\n self.__lines = self.__raw.splitlines()\n\n def __to_method_and_url(self):\n line_split = self.__lines[0].strip().split(\" \")\n line_split = filter(lambda ch: ch,\n [each.strip() for each in line_split])\n self.method = line_split[0]\n self.url = line_split[1]\n if not self.url.startswith(\"http\"):\n if self.__is_https is None:\n if self.headers_small.get(\"referer\"):\n http_host = self.headers_small[\"referer\"].split(\":\", 1)[0]\n elif self.headers_small.get(\"origin\"):\n http_host = self.headers[\"origin\"].split(':', 1)[0]\n else:\n http_host = \"https\"\n else:\n http_host = \"https\" if self.__is_https else \"http\"\n http_host += \"://\" + self.headers_small[\"host\"]\n self.__url_host = http_host\n self.url = http_host + self.url\n self.url_parse = urlparse.urlparse(self.url)\n\n def __to_headers(self):\n ready_to_dict = []\n self.headers_small = {}\n for line in self.__lines[1:]:\n line = line.strip()\n if not line:\n break\n line_split = line.split(\":\", 1)\n line_split = [each.strip() for each in line_split]\n self.headers_small[line_split[0].lower()] = line_split[1]\n ready_to_dict.append(line_split)\n self.headers = OrderedDict(ready_to_dict)\n\n def __to_body(self):\n if self.__lines[-2].strip():\n return\n body_data = urllib.unquote(self.__lines[-1])\n line = body_data.strip(\"&\")\n try:\n self.req_json = json.loads(line)\n except Exception:\n line_split = line.split(\"&\")\n ready_to_dict = []\n for each in line_split:\n ready_to_dict.append(each.split(\"=\", 1))\n self.req_data = OrderedDict(ready_to_dict)\n\n def __to_python(self):\n self.__to_headers()\n self.__to_method_and_url()\n self.__to_body()\n\n def __set_url_param(self, param):\n if not isinstance(param, dict):\n raise FiddlerError(\"param must be dict\")\n base_url = self.url_parse.scheme + \"://\" + self.url_parse.netloc + \\\n self.url_parse.path\n if not self.try_real_simulation:\n url_param = dict([(k, v[0]) for k, v in urlparse.parse_qs(\n self.url_parse.query).items()])\n else:\n url_param = OrderedDict(\n [x.split('=') for x in self.url_parse.query.split('&')])\n url_param.update(param)\n logging.debug(\"fd: set_url_param: \" + str(param))\n if not self.try_real_simulation:\n self.url = base_url + '?' + urllib.urlencode(url_param)\n else:\n self.url = base_url + '?' + '&'.join(\n ['='.join([str(k), str(v)]) for k, v in url_param.items()])\n self.url_parse = urlparse.urlparse(self.url)\n\n def set_param(self, url_param=None, req_param=None):\n if url_param is not None:\n self.__set_url_param(url_param)\n self.req_param = {\"url\": self.url,\n \"headers\": self.headers}\n if self.method == \"POST\":\n if self.req_data is not None:\n if req_param is not None:\n self.req_data.update(req_param)\n self.req_param[\"data\"] = self.req_data\n logging.debug(\"fd: set_date_param: \" + str(req_param))\n elif self.req_json is not None:\n if req_param is not None:\n self.req_json.update(req_param)\n # self.req_param[\"json\"] = self.req_json\n self.req_param[\"data\"] = json.dumps(self.req_json)\n logging.debug(\"fd: set_json_param: \" + str(req_param))\n\n def set_head(self, **kwargs):\n self.headers = self.headers or {}\n self.headers.update(kwargs)\n\n def __reset_req_param(self, req_param):\n if self.url != req_param['url']:\n if 'HOST' in req_param['headers']:\n req_param['headers']['HOST'] = urlparse.urlsplit(req_param['url']).netloc\n else:\n req_param['headers']['host'] = urlparse.urlsplit(req_param['url']).netloc\n\n def __requests_reset_url(self, req_param, reset_url):\n if not reset_url:\n return\n if reset_url.startswith('http'):\n req_param[\"url\"] = reset_url\n else:\n if reset_url.startswith('/'):\n req_param[\"url\"] = self.__url_host + reset_url\n else:\n req_param[\"url\"] = self.__url_host + '/' + reset_url\n return req_param\n\n def requests(self, reset_url=None, is_test=False, auto_parm=True, **kwargs):\n \"\"\"\n url=None, data=None, json=None, headers=None, timeout=None\n \"\"\"\n # TODO 请求超时可以重试 n次\n if auto_parm:\n self.set_param()\n req_param = self.req_param\n req_param.update(kwargs)\n if 'timeout' not in req_param:\n req_param['timeout'] = 30\n self.__requests_reset_url(req_param, reset_url)\n self.__reset_req_param(req_param)\n if is_test:\n url_list = req_param[\"url\"].split(\"/\", 3)\n url_list[2] = \"127.0.0.1\"\n req_param[\"url\"] = \"/\".join(url_list)\n if self.method == \"GET\":\n tmp_retry_count = 0\n while True:\n try:\n web_data = requests.get(verify=False, **req_param)\n return web_data\n except requests.Timeout as e:\n error_msg = \"{time}: fd: requests get error: {url}: {e}\".format(\n time=datetime.datetime.now(), url=req_param[\"url\"], e=e)\n logging.exception(error_msg)\n tmp_retry_count += 1\n if tmp_retry_count >= self.retry_count:\n raise FiddlerRequestTimeOutException(error_msg)\n except Exception as e:\n error_msg = \"{time}: fd: requests get error: {url}: {e}\".format(\n time=datetime.datetime.now(), url=req_param[\"url\"], e=e)\n logging.exception(error_msg)\n tmp_retry_count += 1\n if tmp_retry_count >= self.retry_count:\n raise FiddlerRequestException(error_msg)\n elif self.method == \"POST\":\n while True:\n tmp_retry_count = 0\n try:\n web_data = requests.post(verify=False, **req_param)\n return web_data\n except requests.Timeout as e:\n error_msg = \"{time}: fd: requests post error: {url}: {e}\\n\\n{detail}\".format(\n time=datetime.datetime.now(), url=req_param[\"url\"], e=e,\n detail=traceback.format_exc())\n logging.exception(error_msg)\n tmp_retry_count += 1\n if tmp_retry_count >= self.retry_count:\n raise FiddlerRequestTimeOutException(error_msg)\n except Exception as e:\n error_msg = \"{time}: fd: requests post error: {url}: {e}\\n\\n{detail}\".format(\n time=datetime.datetime.now(), url=req_param[\"url\"], e=e,\n detail=traceback.format_exc())\n logging.exception(error_msg)\n tmp_retry_count += 1\n if tmp_retry_count >= self.retry_count:\n raise FiddlerRequestException(error_msg)\n else:\n raise FiddlerRequestException('{time}:No Find Method'.format(\n time=datetime.datetime.now()))\n\n def new_requests(self, reset_url=None, is_test=False, auto_parm=True, **kwargs):\n pass\n","repo_name":"cherry2020-com/python","sub_path":"utils/fiddler.py","file_name":"fiddler.py","file_ext":"py","file_size_in_byte":9620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"40850400861","text":"import pandas as pd\nimport nltk\nfrom nltk.tokenize import sent_tokenize\n\npresidents = pd.read_csv('inaug_speeches.csv', encoding='ISO-8859-1') #take care of Unicode characters\n\n# print(presidents)\nbush = presidents[presidents['Name'] == 'George W. Bush'] #filter for just Bush\n# print(bush)\n\nbush2001 = bush.iloc[0, :] #data frame with first Bush speech\nbush2005 = bush.iloc[1, :] #data frame with 2nd Bush speech\n\nsent2001 = sent_tokenize(bush2001['text']) #tokenize into sentences with nltk\nsent2005 = sent_tokenize(bush2005['text'])\n\nprint(len(sent2001)) #96 sentences\n","repo_name":"sslogar/text-project","sub_path":"sentenceSplit.py","file_name":"sentenceSplit.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"27159183180","text":"import tensorflow as tf\nimport numpy as np\nfrom utils_training import train_linreg, TfLinreg\nfrom utils_OutputFormat import PrintOutput_1b\n \nX_train = np.load('X_train.npy')\ny_train = np.load('y_train.npy')\n\nlrmodel = TfLinreg(x_dim=X_train.shape[1], learning_rate=0.001)\n\nsess = tf.Session(graph=lrmodel.g)\ntraining_costs, session = train_linreg(sess, lrmodel, X_train, y_train, 1500)\n\nweights = np.array(session.run('weight:0'), dtype=np.float32)\nprint(weights)\nPrintOutput_1b(weights)\n\nimport matplotlib.pyplot as plt\n\nplt.plot(range(1,len(training_costs) + 1), training_costs)\nplt.title('Training loss')\nplt.xlabel('Epoch')\nplt.ylabel('Training Cost')\nplt.savefig('TrainLoss.jpg')","repo_name":"dariopa/Intro-to-ML","sub_path":"Task1b/CodeDario/TF_Regression.py","file_name":"TF_Regression.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"25349859521","text":"from aiogram import Bot, types\r\nfrom aiogram.dispatcher import Dispatcher\r\nfrom aiogram.utils import executor\r\n\r\n# засунул токен сразу, не вижу смысла на 1 переменную создавать модуль\r\nTOKEN = '1413691817:AAEVWElmzbS5nSMqCG8AtMQ6HVLDqil_IWc'\r\n\r\nbot = Bot(token=TOKEN)\r\ndp = Dispatcher(bot)\r\n\r\n\r\n@dp.message_handler(commands=['start'])\r\nasync def process_start_command(message: types.Message):\r\n await message.reply(\"Напиши мне, отправлю такое же сообщение\")\r\n\r\n\r\n@dp.message_handler(commands=['help'])\r\nasync def process_help_command(message: types.Message):\r\n await message.reply(\"Я бот, который возвращает написанное Вами сообщение\")\r\n\r\n\r\n@dp.message_handler()\r\nasync def echo_message(msg: types.Message):\r\n await bot.send_message(msg.from_user.id, msg.text)\r\n\r\n\r\nif __name__ == '__main__':\r\n executor.start_polling(dp)\r\n","repo_name":"paivazov/echo","sub_path":"echo.py","file_name":"echo.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"66"} +{"seq_id":"4202865348","text":"import pytest\n\nfrom homework2.src.Rectangle import Rectangle\n\n\n@pytest.mark.parametrize(\"side_a,side_b\", [(2, 3),\n (4, 5),\n (7, 10)])\ndef test_create_rectangle_area(side_a, side_b):\n rtl = Rectangle(side_a, side_b)\n area_rtl = rtl.area\n extended_rtl = side_a * side_b\n assert area_rtl == extended_rtl\n\n\n@pytest.mark.parametrize(\"side_a,side_b\", [(2, 3),\n (4, 5),\n (7, 10)])\ndef test_create_rectangle_perimeter(side_a, side_b):\n rtl = Rectangle(side_a, side_b)\n area_rtl = rtl.perimeter\n extended_rtl = side_a * 2 + side_b * 2\n assert area_rtl == extended_rtl\n\n\ndef test_class_definition_rectangle(class_definition_rectangle):\n status = isinstance(class_definition_rectangle, Rectangle)\n assert status\n\n\ndef test_add_area_rectangle(class_definition_rectangle, class_definition_square):\n area_square = class_definition_square.area\n area_rectangle = class_definition_rectangle.area\n expected_sum_area = area_square + area_rectangle\n sum_area = class_definition_rectangle.add_area(class_definition_square)\n assert sum_area == expected_sum_area\n","repo_name":"Zelkova19/otus_homework","sub_path":"homework2/test/test_rectangle.py","file_name":"test_rectangle.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20556265849","text":"from django import template\nfrom django.conf import settings\n\nfrom settings.models import SettingProperties\n\nregister = template.Library()\n\nALLOWABLE_SETTING_VALUES = (\"OPPIA_ANDROID_DEFAULT_PACKAGEID\",\n \"BASESITE_URL\",\n \"OPPIA_MAX_UPLOAD_SIZE\")\nALLOWABLE_DB_SETTINGS = (\"OPPIA_ANDROID_PACKAGEID\",\n \"OPPIA_ANDROID_ON_GOOGLE_PLAY\", \"OPPIA_HOSTNAME\")\n\n\n# settings value (based on https://stackoverflow.com/a/21593607)\n@register.simple_tag\ndef settings_value(name):\n if name in ALLOWABLE_SETTING_VALUES:\n return getattr(settings, name, '')\n if name in ALLOWABLE_DB_SETTINGS:\n return SettingProperties.get_property(name, None)\n return ''\n","repo_name":"DigitalCampus/django-oppia","sub_path":"helpers/templatetags/settings_value.py","file_name":"settings_value.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"} +{"seq_id":"14763159791","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 25 11:15:23 2021\n\n@author: user0220\n\"\"\"\n\n\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn import datasets\nfrom matplotlib import pyplot as plt\nfrom sklearn.linear_model import SGDClassifier\n\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.metrics import accuracy_score\n\nfrom sklearn.metrics import mean_squared_error\n\nfrom sklearn.model_selection import cross_validate\n\nfrom sklearn.model_selection import KFold\n\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\n\nfrom sklearn.metrics import f1_score\n\nimport time\n\nRND_SEED = 42\nCPU_CORES = -1\nK_FOLD = 8\nN_SAMPLES = 10000\nN_FEATURES = 200 ##독립변수=피처\nN_CLUSTERS_OF_A_CLASS = 1\nN_CLASSES = 3 ##종속변수=클래스\nCLUSTERS_SPLIT = [0.7, 0.2, 0.1]\nRANDOM_NOISE = 0.005 # 0.5%의 오차 노이즈\nN_INF = 15 ##독립 변수 중 종속 변수와 상관 관계가 있는 성분의 수, 디폴트 2\nN_RED = 10 ##독립 변수 중 다른 독립 변수의 선형 조합으로 나타나는 성분의 수, 디폴트 2\nN_REP = 5 ##독립 변수 중 단순 중복된 성분의 수, 디폴트 0\n\n##데이터 생성. 분포도 조절이 가능함\nfrom sklearn.datasets import make_classification\nX, y = make_classification(n_samples=N_SAMPLES, n_features=N_FEATURES, \n n_informative=N_INF, n_redundant=N_RED, n_repeated=N_REP, n_classes=N_CLASSES, \n n_clusters_per_class=N_CLUSTERS_OF_A_CLASS, weights=CLUSTERS_SPLIT, \n flip_y=RANDOM_NOISE, random_state=RND_SEED)\n# np.save('./Xset',X)\n# np.save('./yset',y)\n\n##세트 분할\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, \n shuffle=True, stratify=y, random_state=RND_SEED)\nprint('shape:{}\\nTrain data: {}, Test data: {}\\n'.format(X.shape, X_train.shape[0], X_test.shape[0]))\n\n##SGD모델 정의\nSGD_PENALTY = \"l2\" ## l1\nSGD_LOOP = 1000\nsgd = SGDClassifier(penalty=SGD_PENALTY, max_iter=SGD_LOOP)\nfrom sklearn.svm import SVC\nsvm = SVC(kernel='linear')\n\n##피처 스케일링\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nsc.fit(X_train)\nX_train_std = sc.fit_transform(X_train)\nX_test_std = sc.transform(X_test)\n\n##KFold그리드 서치로 파라미터 탐색\nfrom sklearn.model_selection import GridSearchCV\n##alpha default=0.0001 ##learning_rate에 영향 줌\nparam_grid = {'loss':['hinge','log'],\n 'alpha':[0.0001, 0.001, 0.01, 0.1, 1, 1]}\ngs = GridSearchCV(estimator=sgd, param_grid=param_grid, scoring='f1_micro',\n cv=K_FOLD, n_jobs=CPU_CORES)\ngs.fit(X_train_std,y_train)\n# print(gs.best_score_)\nprint('Grid search:',gs.best_params_,'\\n')\n\n\n##f1 스코어 평가\nnormal_sgd = sgd\nnormal_sgd.fit(X_train_std, y_train)\nprint('F1-score micro\\n sgd. Train, Acc: %.4f Test Acc: %.4f'%(normal_sgd.score(X_train_std,y_train),\n normal_sgd.score(X_test_std,y_test)))\nGSsgd = SGDClassifier(penalty=SGD_PENALTY, loss=gs.best_params_['loss'], \n alpha=gs.best_params_['alpha'] , max_iter=SGD_LOOP)\nnormal_GSsgd = GSsgd\nnormal_GSsgd.fit(X_train_std, y_train)\nprint('GSsgd. Train, Acc: %.4f Test Acc: %.4f'%(normal_GSsgd.score(X_train_std,y_train),\n normal_GSsgd.score(X_test_std,y_test)))\nnormal_svm = svm\nnormal_svm.fit(X_train_std, y_train)\nprint(' svm. Train, Acc: %.4f Test Acc: %.4f\\n'%(normal_svm.score(X_train_std,y_train),\n normal_svm.score(X_test_std,y_test)))\n\nN_TREES = 100 ##default=100\n##Tree based feature selection\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.ensemble import ExtraTreesClassifier\nclf = ExtraTreesClassifier(n_estimators=N_TREES).fit(X_train_std, y_train)\nclf.feature_importances_ \nmodel = SelectFromModel(clf, prefit=True)\nX_tree = model.transform(X)\nprint('Tree based festure sel:',X_tree.shape)\n\n## L1-based feature selection\nfrom sklearn.svm import LinearSVC\nlsvc01 = LinearSVC(C=0.01, penalty=\"l1\", dual=False).fit(X_train_std, y_train)\nlsvc001 = LinearSVC(C=0.001, penalty=\"l1\", dual=False).fit(X_train_std, y_train)\nmodel = SelectFromModel(lsvc01, prefit=True)\nX_01l1 = model.transform(X)\nprint('L1 based feature C=0.01:',X_01l1.shape)\nmodel = SelectFromModel(lsvc001, prefit=True)\nX_001l1 = model.transform(X)\nprint('L1 based feature C=0.001:',X_001l1.shape)\n\n##Tree based score 계산\ntree_imp = clf.feature_importances_\ntree_ix = np.argsort(tree_imp)[::-1] \n# X_train_std[:,X_tree.shape[1]] X_test_std[:,X_tree.shape[1]]\nX_tree_train_std = X_train_std[:, tree_ix[:X_tree.shape[1]]]\nX_tree_test_std = X_test_std[:, tree_ix[:X_tree.shape[1]]]\ntree_fs = GSsgd\ntree_fs.fit(X_tree_train_std,y_train)\nprint('tree_fsGSsgd. Train, Acc: %.4f Test Acc: %.4f\\n'%(tree_fs.score(X_tree_train_std,y_train),\n tree_fs.score(X_tree_test_std,y_test)))\n\n# ##L1 베이스 피처선택은 어떻게 주요피처 정렬하는지 안나와있어서 모르겠음\n# lsvc001.feature_importances_\n# lsvc01_imp = lsvc01.feature_importances_\n# lsvc01_ix = np.argsort(lsvc01_imp)[::-1] \n# # X_train_std[:,X_tree.shape[1]] X_test_std[:,X_tree.shape[1]]\n# X_lsvc01_train_std = X_train_std[:, lsvc01_ix[:X_01l1.shape[1]]]\n# X_lsvc01_test_std = X_test_std[:, lsvc01_ix[:X_01l1.shape[1]]]\n# lsvc01_fs = GSsgd음\n# lsvc01_fs.fit(X_lsvc01_train_std,y_train)\n# print('Lsvc01_fsGSsgd. Train, Acc: %.4f Test Acc: %.4f\\n'%(lsvc01_fs.score(X_lsvc01_train_std,y_train),\n# lsvc01_fs.score(X_lsvc01_test_std,y_test)))\n\n\n\n##랜덤 포레스트를 활용한 피처 중요도 평가\n##나무기반 모델은 표준화, 정규화가 필요하지 않음\nfrom sklearn.ensemble import RandomForestClassifier\nforest = RandomForestClassifier(n_estimators=N_TREES,n_jobs=CPU_CORES,random_state=RND_SEED)\nforest.fit(X_train,y_train)\nimportances = forest.feature_importances_\nplt.title('Random Forest Feature Importances')\nplt.bar(range(X_train.shape[1]),importances,color='lightblue')\n# plt.xticks(range(X_train.shape[1]),)\nplt.show()\n\nplt.title('Random Forest Feature Importances')\nix = np.argsort(importances)[::-1] ##인덱스를 크기순으로 정렬하는 법, 높은 중요도순으로 ix저장\nplt.bar(range(X_train.shape[1]),importances[ix],color='lightblue')\nplt.xticks(range(X_train.shape[1]),ix)\nplt.show()\n\nNUM_F = 30\nplt.title('Random Forest Feature Importances')\nplt.bar(range(NUM_F),importances[ix[:NUM_F]],color='lightblue')\nplt.xticks(range(NUM_F),ix[:NUM_F])\nplt.show()\n\nX_sel = SelectFromModel(forest,threshold=0.01,prefit=True).transform(X_train)\nprint('0.01',X_sel.shape)\nX_sel = SelectFromModel(forest,threshold=0.02,prefit=True).transform(X_train)\nprint('0.02',X_sel.shape,'\\n')\n\n\n##피처 선택 스코어\nX_sel_train_std = X_train_std[:, ix[:NUM_F]]\nX_sel_test_std = X_test_std[:, ix[:NUM_F]]\nFSsgd = sgd\nFSsgd.fit(X_sel_train_std,y_train)\nprint('fssgd. Train, Acc: %.4f Test Acc: %.4f\\n'%(FSsgd.score(X_sel_train_std,y_train),\n FSsgd.score(X_sel_test_std,y_test)))\n\n##그리드서치+피처 선택 스코어\nX_sel_train_std = X_train_std[:, ix[:NUM_F]]\nX_sel_test_std = X_test_std[:, ix[:NUM_F]]\nGSFSsgd = GSsgd\nGSFSsgd.fit(X_sel_train_std,y_train)\nprint('GSFSsgd. Train, Acc: %.4f Test Acc: %.4f\\n'%(GSFSsgd.score(X_sel_train_std,y_train),\n GSFSsgd.score(X_sel_test_std,y_test)))\n\n# ##교차 검증을 이용한 모델 평가\n# print('\\n교차 검증을 이용한 모델의 평균 성능 평가')\n# from sklearn.model_selection import cross_val_score\n# scores = cross_val_score(estimator=sgd, X=X_train_std, y=y_train, cv=K_FOLD, n_jobs=CPU_CORES)\n# print('SGD Train score\\nmean: %.4f std: %.4f' % (np.mean(scores), np.std(scores)))\n# print('Acc: {}'.format(scores[:]))\n# # scores = cross_val_score(estimator=FSsgd, X=X_train_std[:,FS], y=y_train, cv=K_FOLD, n_jobs=CPU_CORES)\n# # print('\\nFS-SGD Train score\\nmean: %.4f std: %.4f' % (np.mean(scores), np.std(scores)))\n# # print('Acc: %s' %scores)\n# scores = cross_val_score(estimator=sgd, X=X_test_std, y=y_test, cv=K_FOLD, n_jobs=CPU_CORES)\n# print('\\nSGD Test score\\nmean: %.4f std: %.4f' % (np.mean(scores), np.std(scores)))\n# print('Acc: %s' %scores)\n# # scores = cross_val_score(estimator=FSsgd, X=X_test_std[:,FS], y=y_test, cv=K_FOLD, n_jobs=CPU_CORES)\n# # print('\\nFS-SGD Test score\\nmean: %.4f std: %.4f' % (np.mean(scores), np.std(scores)))\n# # print('Acc: %s' %scores)\n\n\n\n\n# ##f1 마이크로 스코어를 출력하는 방법\n# print('Train F1: %.4f, Test F1: %.4f'%(f1_score(y_train, sgd.predict(X_train_std), average='micro'), f1_score(y_test, sgd.predict(X_test_std), average='micro')))\n# print('Train CM:\\n {},\\n\\n Test CM:\\n {}'.format(confusion_matrix(y_train, sgd.predict(X_train_std)), confusion_matrix(y_test, sgd.predict(X_test_std))))\n# print('Train F1: %.4f, Test F1: %.4f'%(f1_score(y_train, FSsgd.predict(X_train_std[:,FS]), average='micro'), f1_score(y_test, FSsgd.predict(X_test_std[:,FS]), average='micro')))\n# print('Train CM:\\n {},\\n\\n Test CM:\\n {}'.format(confusion_matrix(y_train, FSsgd.predict(X_train_std[:,FS])), confusion_matrix(y_test, FSsgd.predict(X_test_std[:,FS]))))\n\n\n\n\n'''\n# 분류기\n\nstart = time.time() \n\nclafi = SGDClassifier(loss=\"hinge\", penalty=\"l2\", max_iter=100)\n#clf = SGDClassifier(loss=\"hinge\", alpha=0.001, max_iter=10)\nclafi.fit(X_train, y_train)\n\nprint(\"SVM-SGD clf Time:\", time.time() - start) # 현재시각 - 시작시간 = 실행 시간\n\n# # 교차검증, 5등분\n# from sklearn.model_selection import cross_val_score\n# scores = cross_val_score(clafi, X_train, y_train, cv=5)\n# print(\"@@@ %0.4f acc with a s.d of %0.4f @@@\" % (scores.mean(), scores.std()))\n\n# # 그리드 서치\n# from sklearn import svm\n# from sklearn.model_selection import GridSearchCV\n# parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}\n# svc = svm.SVC()\n# clf = GridSearchCV(svc, parameters)\n# clf.fit(X_train, y_train)\n# print(sorted(clf.cv_results_.keys()) )\n\n\n# 예측: 학습 셋, 검증 셋\n\nstart = time.time() \n\ntrain_pred = clafi.predict(X_train)\ntest_pred = clafi.predict(X_test)\n\nprint(\"SVM-SGD pred Time:\", time.time() - start) # 현재시각 - 시작시간 = 실행 시간\n\n# # 정확도를 소수점 4째 자리까지 표기\ntr_acc = np.round(accuracy_score(y_train, train_pred),4)\nte_acc = np.round(accuracy_score(y_test, test_pred),4)\nprint('SVM-SGD Train Acc: {}, Test Acc: {}'.format(tr_acc, te_acc))\n\n\n\n# 다른 dict 생성 방법 \n# pr_val_dic = { i : 1 for i in range ( 2 , len(feature_-2) )} \n# pr_ran_dic = { i : 1 for i in range ( 2 , len(feature_-2) )} \n\n# 혼동 행렬, f1 스코어\nprint('Train F1: {}, Test F1: {}'.format(f1_score(y_train, train_pred, average='micro'), f1_score(y_test, test_pred, average='micro')))\nprint('Train CM:\\n {},\\n\\n Test CM:\\n {}'.format(confusion_matrix(y_train, train_pred), confusion_matrix(y_test, test_pred)))\n\n\n# # 결정영역 시각화 고차원 대응을 위한 부분\n# pr_val_dic = {}\n# pr_ran_dic = {}\n# for i in range(X.shape[1] - 2):\n# pr_val_dic[2+i] = 1\n# pr_ran_dic[2+i] = 1\n\nstart = time.time() \n\n####### 시각화 삭제, 시각화 실행시간 삭제\n\nprint('\\n')\n############################################################\n\nstart = time.time() \n\nsvm = SVC(kernel='linear')\nsvm.fit(X_train, y_train)\n\nprint(\"SVM clf Time:\", time.time() - start) # 현재시각 - 시작시간 = 실행 시간\n\n# 교차검증, 5등분\nfrom sklearn.model_selection import cross_val_score\nscores = cross_val_score(svm, X_train, y_train, cv=5)\nprint(\"@@@ %0.4f acc with a s.d of %0.4f @@@\" % (scores.mean(), scores.std()))\n\n\nstart = time.time() \n\ntrain_pred = svm.predict(X_train)\ntest_pred = svm.predict(X_test)\n\nprint(\"SVM pred Time:\", time.time() - start) # 현재시각 - 시작시간 = 실행 시간\n\n# # 정확도를 소수점 4째 자리까지 표기\ntr_acc = np.round(accuracy_score(y_train, train_pred),4)\nte_acc = np.round(accuracy_score(y_test, test_pred),4)\nprint('SVM Train Acc: {}, Test Acc: {}'.format(tr_acc, te_acc))\n\ntr_f1 = f1_score(y_train, train_pred, average='micro')\nte_f1 = f1_score(y_test, test_pred, average='micro')\nprint('SVM Train F1: {}, Test F1: {}'.format(tr_f1, te_f1))\nprint('SVM Train CM:\\n {},\\n\\n Test CM:\\n {}'.format(confusion_matrix(y_train, train_pred), confusion_matrix(y_test, test_pred)))\n\n####### 시각화 삭제, 시각화 실행시간 삭제\n\n#######테스트 입력 삭제\n'''\n","repo_name":"ruvi-hlab/scikit-learn-SGD","sub_path":"sgd-fs-gs.py","file_name":"sgd-fs-gs.py","file_ext":"py","file_size_in_byte":12535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6019296029","text":"import pygame\nimport random\n\npygame.init()\nwin = pygame.display.set_mode((500, 500))\npygame.display.set_caption(\"Snaek\")\n\n\nclass cube:\n width = 500\n rows = 20\n\n def __init__(self, start, color=(0, 255, 0)):\n self.pos = start\n self.color = color\n self.dirx = 1\n self.diry = 0\n self.eyes = False\n\n def move(self, dirx, diry):\n self.dirx = dirx\n self.diry = diry\n self.pos = (self.pos[0] + self.dirx, self.pos[1] + self.diry)\n\n def draw(self, win):\n dis = self.width // self.rows\n i = self.pos[0]\n j = self.pos[1]\n pygame.draw.rect(win, self.color, (i * dis + 1, j * dis + 1, dis - 2, dis - 2))\n\n if self.eyes:\n centre = dis // 2\n radius = 3\n circleMiddle = (i * dis + centre - radius, j * dis + 8)\n circleMiddle2 = (i * dis + dis - radius * 2, j * dis + 8)\n pygame.draw.circle(win, (0, 0, 0), circleMiddle, radius)\n pygame.draw.circle(win, (0, 0, 0), circleMiddle2, radius)\n\n\nclass snaek:\n body = []\n turns = {}\n\n def __init__(self, color, pos):\n self.color = color\n self.head = cube(pos)\n self.body.append(self.head)\n self.dirx = 1\n self.diry = 0\n self.score = 0\n\n def move(self):\n\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT]:\n self.dirx = -1\n self.diry = 0\n self.turns[self.head.pos[:]] = [self.dirx, self.diry]\n\n elif keys[pygame.K_RIGHT]:\n self.dirx = 1\n self.diry = 0\n self.turns[self.head.pos[:]] = [self.dirx, self.diry]\n\n elif keys[pygame.K_DOWN]:\n self.dirx = 0\n self.diry = 1\n self.turns[self.head.pos[:]] = [self.dirx, self.diry]\n\n elif keys[pygame.K_UP]:\n self.dirx = 0\n self.diry = -1\n self.turns[self.head.pos[:]] = [self.dirx, self.diry]\n\n for i, c in enumerate(self.body):\n p = c.pos[:]\n if p in self.turns:\n turn = self.turns[p]\n c.move(turn[0], turn[1])\n if i == len(self.body) - 1:\n self.turns.pop(p)\n\n else:\n if c.dirx == -1 and c.pos[0] <= 0:\n c.pos = (c.rows - 1, c.pos[1])\n elif c.dirx == 1 and c.pos[0] >= c.rows - 1:\n c.pos = (0, c.pos[1])\n elif c.diry == -1 and c.pos[1] <= 0:\n c.pos = (c.pos[0], c.rows - 1)\n elif c.diry == 1 and c.pos[1] >= c.rows - 1:\n c.pos = (c.pos[0], 0)\n else:\n c.move(c.dirx, c.diry)\n\n def AddCube(self):\n tail = self.body[-1]\n if tail.dirx == 1 and tail.diry == 0:\n nstart = (tail.pos[0] - 1, tail.pos[1])\n elif tail.dirx == -1 and tail.diry == 0:\n nstart = (tail.pos[0] + 1, tail.pos[1])\n elif tail.dirx == 0 and tail.diry == 1:\n nstart = (tail.pos[0], tail.pos[1] - 1)\n elif tail.dirx == 0 and tail.diry == -1:\n nstart = (tail.pos[0], tail.pos[1] + 1)\n self.body.append(cube(nstart))\n self.body[-1].dirx = tail.dirx\n self.body[-1].diry = tail.diry\n\n def Reset(self):\n self.pos = (5, 5)\n s.head = cube(self.pos)\n self.dirx = 1\n self.diry = 0\n self.body = []\n self.body.append(s.head)\n self.turns = {}\n self.score = 0\n\n def draw(self, win):\n for i, c in enumerate(self.body):\n if i == 0:\n c.eyes = True\n c.draw(win)\n else:\n c.draw(win)\n\n\ndef GameOver(win):\n win.fill((0, 0, 0))\n font1 = pygame.font.Font(\"ARCADE.TTF\", 80)\n go_txt = font1.render(\"GAME OVER\",True,(255,255,255))\n win.blit(go_txt,(75,50))\n font2 = pygame.font.Font(\"ARCADE.TTF\", 50)\n score_txt = font2.render(\"SCORE: \" + str(final_score), True, (255, 255, 255))\n win.blit(score_txt, (150, 180))\n font3 = pygame.font.Font(\"ARCADE.TTF\", 30)\n press_space_txt = font3.render(\"PRESS SPACE TO RESTART\", True, (255, 255, 255))\n win.blit(press_space_txt, (80, 350))\n pygame.display.update()\n\n\ndef Respawnfruit():\n x = random.randint(0, 19)\n y = random.randint(0, 19)\n return (x, y)\n\n\ndef drawgrid(width, rows, win):\n dist = round(width / rows)\n x = 0\n y = 0\n for r in range(rows):\n x = x + dist\n y = y + dist\n pygame.draw.line(win, (255, 255, 255), (x, 0), (x, width))\n pygame.draw.line(win, (255, 255, 255), (0, y), (width, y))\n\n\ndef redraw():\n global width, rows, s\n win.fill((0, 0, 0))\n s.draw(win)\n drawgrid(width, rows, win)\n fruit.draw(win)\n pygame.display.update()\n\n\ndef main():\n global width, rows, s, fruit, playing, running\n running = True\n width = 500\n rows = 20\n s = snaek((0, 255, 0), (5, 5))\n fruitx = random.randint(0, rows - 1)\n fruity = random.randint(0, rows - 1)\n fruit = cube((fruitx, fruity), (255, 0, 0))\n playing = True\n clock = pygame.time.Clock()\n\n while running:\n if playing:\n pygame.time.delay(100)\n clock.tick(10)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n s.move()\n if s.body[0].pos == fruit.pos:\n print(\"omer is the king\")\n s.score += 1\n s.AddCube()\n fruit = cube(Respawnfruit(), (255, 0, 0))\n CubesPos = []\n for c in s.body:\n if c == s.body[0]:\n pass\n else:\n CubesPos.append(c.pos)\n if s.head.pos in CubesPos:\n global final_score\n final_score = s.score\n s.Reset()\n playing = False\n\n redraw()\n if not playing:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n keys = pygame.key.get_pressed()\n if keys[pygame.K_SPACE]:\n playing = True\n else:\n GameOver(win)\n\n\nmain()\n","repo_name":"TomLipshits5/pygame","sub_path":"sneak/main_2.py","file_name":"main_2.py","file_ext":"py","file_size_in_byte":6269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17580910579","text":"import expressionClasses\r\nimport fileClasses\r\nimport time\r\nimport random\r\nimport os\r\n\r\nGLOBAL_R_ATTR_DICT = dict()\r\nGLOBAL_S_ATTR_DICT = dict()\r\n\r\ndef menu():\r\n string = \"Please choose the number of the rule to perform from the following:\\n\"\r\n string += \"1) 4\\n\"\r\n string += \"2) 4A\\n\"\r\n string += \"3) 5A\\n\"\r\n string += \"4) 6\\n\"\r\n string += \"5) 6A\\n\"\r\n string += \"6) 11B\\n\"\r\n string += \"Enter 1-6\\n\"\r\n return string\r\n\r\ndef partOne(queryStr):\r\n print(\"\\nPart One\\n========\")\r\n time.sleep(2)\r\n\r\n SQLQuery = expressionClasses.convertSQLQueryToAlgebraicExpression(queryStr)\r\n\r\n print(\"\\nThe algebraic expression of the query is:\\n\" + SQLQuery.toString() + '\\n')\r\n\r\n Continue = True\r\n ruleChosen = \"\"\r\n\r\n while Continue:\r\n print(menu())\r\n choice = str(input())\r\n\r\n if choice == '1':\r\n SQLQuery.rule4()\r\n ruleChosen = 'rule 4: \"Sigma[P1 AND P2](R) = Sigma[P1]( Sigma[P2](R) )\"'\r\n Continue = False\r\n valid = True\r\n\r\n elif choice == '2':\r\n SQLQuery.rule4A()\r\n ruleChosen = 'rule 4A: \"Sigma[P1]( Sigma[P2](R) ) = Sigma[P2]( Sigma[P1](R) )\"'\r\n Continue = False\r\n valid = True\r\n\r\n elif choice == '3':\r\n SQLQuery.rule5A()\r\n ruleChosen = 'rule 5A: \"Pi[X]( Sigma[p](R) ) = Sigma[p]( Pi[X](R) )\"'\r\n Continue = False\r\n valid = True\r\n\r\n elif choice == '4':\r\n SQLQuery.rule6()\r\n ruleChosen = 'rule 6: \"Sigma[P]( Cartesian(R,S) ) = Cartesian( Sigma[P](R) ,S)\"'\r\n Continue = False\r\n valid = True\r\n\r\n elif choice == '5':\r\n SQLQuery.rule6A()\r\n ruleChosen = '\"rule 6A: Sigma[P]( Cartesian(R,S) ) = Cartesian(R ,Sigma[P](S) )\"'\r\n Continue = False\r\n valid = True\r\n\r\n elif choice == '6':\r\n SQLQuery.rule11B()\r\n ruleChosen = '\"rule 11B: Sigma[P]( Cartesian(R,S) ) = NJOIN(R,S)\"'\r\n Continue = False\r\n valid = True\r\n\r\n else:\r\n print(\"illegal option, please choose a valid rule\\n\")\r\n valid = False\r\n\r\n if valid:\r\n print(\"After rule \" + ruleChosen + \" the algebraic expression is:\\n\")\r\n print(SQLQuery.toString() + '\\n')\r\n print(\"End of part One\\n===============\\n\\n\")\r\n time.sleep(2)\r\n return\r\n\r\ndef partTwo(queryStr):\r\n SQL1 = expressionClasses.convertSQLQueryToAlgebraicExpression(queryStr)\r\n SQL2 = expressionClasses.convertSQLQueryToAlgebraicExpression(queryStr)\r\n SQL3 = expressionClasses.convertSQLQueryToAlgebraicExpression(queryStr)\r\n SQL4 = expressionClasses.convertSQLQueryToAlgebraicExpression(queryStr)\r\n\r\n print(\"\\nPart Two\\n========\")\r\n time.sleep(2)\r\n\r\n print(\"\\nLogical Query Plan 1\\n====================\")\r\n time.sleep(1)\r\n doTenRandomRules(SQL1)\r\n\r\n print(\"\\nLogical Query Plan 2\\n====================\")\r\n time.sleep(1)\r\n doTenRandomRules(SQL2)\r\n\r\n print(\"\\nLogical Query Plan 3\\n====================\")\r\n time.sleep(1)\r\n doTenRandomRules(SQL3)\r\n\r\n print(\"\\nLogical Query Plan 4\\n====================\")\r\n time.sleep(1)\r\n doTenRandomRules(SQL4)\r\n\r\n print(\"\\nThe 4 Logical Query Plans are:\\n====================================================================\")\r\n printQueryPlans(SQL1, SQL2, SQL3, SQL4)\r\n print(\"====================================================================\\nEnd of part two\\n\")\r\n time.sleep(2)\r\n return [SQL1, SQL2, SQL3, SQL4]\r\n\r\n\r\ndef doTenRandomRules(SQL):\r\n print(\"SQL query is:\")\r\n print(SQL.toString())\r\n\r\n for i in range(10):\r\n num = random.randint(1,6)\r\n\r\n if num == 1:\r\n SQL.rule4()\r\n print(\"\\nAfter rule 4 query is:\")\r\n print(SQL.toString())\r\n\r\n elif num == 2:\r\n SQL.rule4A()\r\n print(\"\\nAfter rule 4A query is:\")\r\n print(SQL.toString())\r\n\r\n elif num == 3:\r\n SQL.rule5A()\r\n print(\"\\nAfter rule 5A query is:\")\r\n print(SQL.toString())\r\n\r\n elif num == 4:\r\n SQL.rule6()\r\n print(\"\\nAfter rule 6 query is:\")\r\n print(SQL.toString())\r\n\r\n elif num == 5:\r\n SQL.rule6A()\r\n print(\"\\nAfter rule 6A query is:\")\r\n print(SQL.toString())\r\n\r\n elif num == 6:\r\n SQL.rule11B()\r\n print(\"\\nAfter rule 11B query is:\")\r\n print(SQL.toString())\r\n return\r\n\r\n\r\ndef printQueryAnalys(sqlOutputArray):\r\n queryNum = 1\r\n for outputArray in sqlOutputArray:\r\n print(\"\\nLogical Query Plan \" + str(queryNum) + \"\\n====================\")\r\n for output in outputArray:\r\n print(output.toString())\r\n queryNum += 1\r\n\r\n\r\ndef partThree(logicalQueryPlanList):\r\n print(\"\\nPart Three\\n========\")\r\n schemeFromFileArray1 = fileClasses.initiateSchemeFromFile()\r\n schemeFromFileArray2 = fileClasses.initiateSchemeFromFile()\r\n schemeFromFileArray3 = fileClasses.initiateSchemeFromFile()\r\n schemeFromFileArray4 = fileClasses.initiateSchemeFromFile()\r\n\r\n output1 = logicalQueryPlanList[0].queryAnalysis(logicalQueryPlanList[0], schemeFromFileArray1[0], schemeFromFileArray1[1])\r\n output2 = logicalQueryPlanList[1].queryAnalysis(logicalQueryPlanList[1], schemeFromFileArray2[0], schemeFromFileArray2[1])\r\n output3 = logicalQueryPlanList[2].queryAnalysis(logicalQueryPlanList[2], schemeFromFileArray3[0], schemeFromFileArray3[1])\r\n output4 = logicalQueryPlanList[3].queryAnalysis(logicalQueryPlanList[3], schemeFromFileArray4[0], schemeFromFileArray4[1])\r\n\r\n printQueryAnalys([output1,output2,output3,output4])\r\n\r\n\r\ndef printQueryPlans(SQL1, SQL2, SQL3, SQL4):\r\n print(SQL1.toString())\r\n print(SQL2.toString())\r\n print(SQL3.toString())\r\n print(SQL4.toString())\r\n return\r\n\r\n\r\nif __name__ == '__main__':\r\n print(\"Please enter a valid SQL query:\")\r\n queryStr = input()\r\n\r\n partOne(queryStr)\r\n fourSQL = partTwo(queryStr)\r\n partThree(fourSQL)\r\n\r\n os.system(\"PAUSE\")\r\n # SELECT R.D , S.E FROM R,S WHERE (R.D=9 AND S.E = R.E) AND (R.A = 7 AND R.B = R.E);","repo_name":"AryeVarman/MySQL-query-parser-and-analysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38025324902","text":"import env \nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# Exercise 4.1 : function to implement TD(0)\ndef td0(episodes):\n\tV = np.zeros((5,7))\n\tdiscount = 0.9\n\talpha = 0.2\n\tfor _ in np.arange(episodes):\n\t\tstartingState = (3,0)\n\t\tgrid = env.env(startingState)\n\t\tterminate = 0\n\t\twhile terminate==0:\n\t\t\ta = np.random.choice([1, 3, 5], p = [0.25, 0.5, 0.25])\n\t\t\ts = grid.state\t\t\t\n\t\t\t(r,terminate) = grid.step(a)\n\t\t\tV[s] = V[s] + alpha * ( r + discount*V[grid.state] - V[s] )\n\treturn V\n\t\t\n\n#Exercise 4.2 : Functions to implement Q-learning using epsilon greedy strategy\ndef QLearningUsingEpsilonGreedy(episodes):\n\tQ = np.zeros((5,7,8))\n\tdiscount = 0.9\n\talpha = 0.2\n\tepsilon = 0.1\n\tfor _ in np.arange(episodes):\n\t\tstartingState = (3,0)\n\t\tgrid = env.env(startingState)\n\t\tterminate = 0\n\t\twhile terminate == 0:\n\t\t\ts = grid.state\n\t\t\tif np.random.rand() > epsilon:\n\t\t\t\ta = np.argmax(Q[s[0],s[1],:])\n\t\t\telse:\n\t\t\t\ta = np.random.choice([1, 3, 5], p = [0.25, 0.5, 0.25])\n\t\t\t(r,terminate) = grid.step(a)\n\t\t\tQ[s[0],s[1],a] = Q[s[0],s[1],a] + alpha*(r + (discount * np.max(Q[grid.state[0],grid.state[1],:])) - Q[s[0],s[1],a] )\n\treturn Q\t\n\t\n\n\n#Function to draw policy \n\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import Rectangle\nimport warnings\nwarnings.filterwarnings('ignore')\n\ndef drawPolicy(pi, cells , startstate, terminalStates ,wstates):\n xticks = np.arange(0,7)\n yticks = np.arange(0,5)\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.axis([0,7,0,5],'equal')\n ax.set_xticks(xticks)\n ax.set_yticks(yticks)\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n fig.gca().set_aspect('equal')\n fig.gca().invert_yaxis()\n ax.grid(True,color='k')\n for axi in (ax.xaxis, ax.yaxis):\n for tic in axi.get_major_ticks():\n tic.tick1On = tic.tick2On = False\n tic.label1On = tic.label2On = False\n for cell in cells:\n if cell in startstate:\n rect = Rectangle((cell[0],cell[1]),1,1,color='palegreen')\n ax.add_patch(rect)\n\t \n if cell in terminalStates:\n rect = Rectangle((cell[0],cell[1]),1,1,color='red')\n ax.add_patch(rect)\n\t \n elif cell in wstates:\n rect = Rectangle((cell[0],cell[1]),1,1,color='blue')\n ax.add_patch(rect)\n else:\n if pi[cell[1],cell[0]]==0:\n ax.arrow(cell[0]+0.85,cell[1]+0.85,-0.7,-0.7,head_width=0.2, head_length=0.2,length_includes_head=True,\n fill = True)\n elif pi[cell[1],cell[0]]== 1:\n ax.arrow(cell[0]+0.5,cell[1]+0.9,0,-0.8,head_width=0.2, head_length=0.2,length_includes_head=True,\n fill = True)\n elif pi[cell[1],cell[0]]==2:\n ax.arrow(cell[0]+0.15,cell[1]+0.85,0.7,-0.7,head_width=0.2, head_length=0.2,length_includes_head=True,\n fill = True)\n elif pi[cell[1],cell[0]]==3:\n ax.arrow(cell[0]+0.1,cell[1]+0.5,0.8,0,head_width=0.2, head_length=0.2,length_includes_head=True,\n fill = True)\n elif pi[cell[1],cell[0]]==4:\n ax.arrow(cell[0]+0.15,cell[1]+0.15,0.7,0.7,head_width=0.2, head_length=0.2,length_includes_head=True,\n fill = True)\n elif pi[cell[1],cell[0]]==5:\n ax.arrow(cell[0]+0.5,cell[1]+0.1,0,0.8,head_width=0.2, head_length=0.2,length_includes_head=True,\n fill = True)\n elif pi[cell[1],cell[0]]==6:\n ax.arrow(cell[0]+0.85,cell[1]+0.15,-0.7,0.7,head_width=0.2, head_length=0.2,length_includes_head=True,\n fill = True)\n elif pi[cell[1],cell[0]]==7:\n ax.arrow(cell[0]+0.9,cell[1]+0.5,-0.8,0,head_width=0.2, head_length=0.2,length_includes_head=True,\n fill = True)\n\n fig.savefig('optimalpolicy.png')\n\n\nprint('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\nprint('state-value V(s) for each visited state using TD(0) Policy Evaluation :')\nprint(td0(1000))\nprint('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\nprint('Optimal V(s) for each visited state using epsilon greedy Q Learning:')\nQ = QLearningUsingEpsilonGreedy(10000)\nV_optimal = np.max( Q, axis = 2)\nprint(V_optimal)\n\nprint('Optimal Policy :')\noptimalPolicy = np.argmax( Q, axis=2)\nprint(optimalPolicy)\n\nstates = [(y,x) for y in range(len(env.gridworld[0])) for x in range(len(env.gridworld))]\nterminalStates = [(5,0)]\nstartstate = [(0,3)]\nwstates =[(3,0),(4,0),(4,1),(4,6),(1,2),(2,2),(4,2),(2,3),(5,3),(2,4),(6,1)]\ndrawPolicy(optimalPolicy,states,startstate,terminalStates,wstates)\n\n\n\n","repo_name":"joker2904/Reinforcement-Learning","sub_path":"assignment4/td.py","file_name":"td.py","file_ext":"py","file_size_in_byte":4666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2441628196","text":"import random\nimport jieba\nimport pandas as pd\n\n# 加载停用词\nstopwords = pd.read_csv('stopwords.txt', index_col=False, quoting=3, sep=\"\\t\", names=['stopword'], encoding='utf-8')\nstopwords = stopwords['stopword'].values\n\n# 加载语料\nlaogong_df = pd.read_csv('beilaogongda.csv', encoding='utf-8', sep=',')\nlaopo_df = pd.read_csv('beilaopoda.csv', encoding='utf-8', sep=',')\nerzi_df = pd.read_csv('beierzida.csv', encoding='utf-8', sep=',')\nnver_df = pd.read_csv('beinverda.csv', encoding='utf-8', sep=',')\n\n# 删除语料的nan行\nlaogong_df.dropna(inplace=True)\nlaopo_df.dropna(inplace=True)\nerzi_df.dropna(inplace=True)\nnver_df.dropna(inplace=True)\n\n# 转换\nlaogong = laogong_df.segment.values.tolist()\nlaopo = laopo_df.segment.values.tolist()\nerzi = erzi_df.segment.values.tolist()\nnver = nver_df.segment.values.tolist()\n\n# 定义分词和打标签函数preprocess_text\n# 参数content_liness即为上面转换的list\n# 参数sentences是定义的空list,用来存储打标签之后的数据\n# 参数category是类型标签\ndef preprocess_text(content_lines, sentences, category):\n for line in content_lines:\n try:\n segs = jieba.lcut(line)\n segs = [v for v in segs if not str(v).isdigit()]\n segs = list(filter(lambda x: x.strip(), segs))\n segs = list(filter(lambda x: len(x)>1, segs))\n segs = list(filter(lambda x: x not in stopwords, segs))\n sentences.append((\" \".join(segs), category))\n except Exception:\n print(line)\n continue\n\nsentences = []\npreprocess_text(laogong, sentences, 0)\npreprocess_text(laopo, sentences, 1)\npreprocess_text(erzi, sentences, 2)\npreprocess_text(nver, sentences, 3)\n\nrandom.shuffle(sentences)\n\nfor sentence in sentences[:10]:\n print(sentence[0], sentence[1])\n\n# 抽取词向量特征\nfrom sklearn.feature_extraction.text import CountVectorizer\nvec = CountVectorizer(\n analyzer='word',\n max_features=4000,\n)\n\nfrom sklearn.model_selection import train_test_split\nx, y = zip(*sentences)\nx_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1256)\n\nvec.fit(x_train)\n\n# 算法建模和模型训练\n\n# 训练模型:NB\nfrom sklearn.naive_bayes import MultinomialNB\nclassifier = MultinomialNB()\nclassifier.fit(vec.transform(x_train), y_train)\n\nprint(classifier.score(vec.transform(x_test), y_test))\n\nvec2 = CountVectorizer(\n analyzer='word',\n ngram_range=(1,4),\n max_features=20000,\n)\nvec2.fit(x_train)\n\nclassifier2 = MultinomialNB()\nclassifier2.fit(vec2.transform(x_train), y_train)\nprint(classifier2.score(vec2.transform(x_test), y_test))\n\n# 训练模型:SVM\nfrom sklearn.svm import SVC\nsvm = SVC(kernel='linear')\nsvm.fit(vec.transform(x_train), y_train)\nprint(svm.score(vec.transform(x_test), y_test))\n\nimport xgboost as xgb\nfrom sklearn.model_selection import StratifiedKFold\nimport numpy as np\n# xgb矩阵赋值\nxgb_train = xgb.DMatrix(vec.transform(x_train), label=y_train)\nxgb_test = xgb.DMatrix(vec.transform(x_test))\n\nparams = {\n 'booster': 'gbtree', #使用gbtree\n 'objective': 'multi:softmax', # 多分类的问题、\n # 'objective': 'multi:softprob', # 多分类概率\n #'objective': 'binary:logistic', #二分类\n 'eval_metric': 'merror', #logloss\n 'num_class': 4, # 类别数,与 multisoftmax 并用\n 'gamma': 0.1, # 用于控制是否后剪枝的参数,越大越保守,一般0.1、0.2这样子。\n 'max_depth': 8, # 构建树的深度,越大越容易过拟合\n 'alpha': 0, # L1正则化系数\n 'lambda': 10, # 控制模型复杂度的权重值的L2正则化项参数,参数越大,模型越不容易过拟合。\n 'subsample': 0.7, # 随机采样训练样本\n 'colsample_bytree': 0.5, # 生成树时进行的列采样\n 'min_child_weight': 3,\n # 这个参数默认是 1,是每个叶子里面 h 的和至少是多少,对正负样本不均衡时的 0-1 分类而言\n # 假设 h 在 0.01 附近,min_child_weight 为 1 叶子节点中最少需要包含 100 个样本。\n 'silent': 0, # 设置成1则没有运行信息输出,最好是设置为0.\n 'eta': 0.03, # 如同学习率\n 'seed': 1000,\n 'nthread': -1, # cpu 线程数\n 'missing': 1\n}\nnum_round = 4\nbst = xgb.train(params,xgb_train,num_round)\nbst.save_model('xgboost.model')\n\npreds = bst.predict(xgb_test) # 得到的是第一类别的概率\np_label = [round(value) for value in preds] # 得到预测标签\n","repo_name":"ElsaQf/NLP_zh_learning","sub_path":"classify_text.py","file_name":"classify_text.py","file_ext":"py","file_size_in_byte":4605,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"5946835387","text":"\n# Imports from 3rd party libraries\nimport dash\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport base64\nimport pickle\n\n# Imports from this application\nfrom app import app, server\nfrom pages import index, predictions, insights, process\n\n# Navbar docs: https://dash-bootstrap-components.opensource.faculty.ai/l/components/navbar\nnavbar = dbc.NavbarSimple(\n brand='MediCabinet',\n brand_href='/', \n children=[\n dbc.NavItem(dcc.Link('Predictions', href='/predictions', className='nav-link')), \n dbc.NavItem(dcc.Link('Insights', href='/insights', className='nav-link')), \n dbc.NavItem(dcc.Link('Process', href='/process', className='nav-link')),\n dbc.NavItem(dcc.Link('Pagename', href='/pagename', className='nav-link')), \n ],\n sticky='top',\n color='info', \n light=True, \n dark=False\n)\n\n# Footer docs:\n# dbc.Container, dbc.Row, dbc.Col: https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout\n# html.P: https://dash.plot.ly/dash-html-components\n# fa (font awesome) : https://fontawesome.com/icons/github-square?style=brands\n# mr (margin right) : https://getbootstrap.com/docs/4.3/utilities/spacing/\n# className='lead' : https://getbootstrap.com/docs/4.3/content/typography/#lead\nfooter = dbc.Container(\n dbc.Row(\n dbc.Col(\n html.P(\n [\n html.Span('Brendan Hoss', className='Ds-19'), \n html.A(html.I(className='fas fa-envelope-square mr-1'), href='mailto:@.com'), \n html.A(html.I(className='fab fa-github-square mr-1'), href='https://github.com/Bh0ss/bw3template'), \n html.A(html.I(className='fab fa-twitter-square mr-1'), href='https://twitter.com/bhoss13'), \n ], \n className='lead'\n )\n )\n )\n)\n\n\n# image_file = 'assets\\lambda.png'\n# encoded_image = base64.b64encode(open(image_file, 'rb').read())\n\n# Layout docs:\n# html.Div: https://dash.plot.ly/getting-started\n# dcc.Location: https://dash.plot.ly/dash-core-components/location\n# dbc.Container: https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout\napp.layout = html.Div([\n dcc.Location(id='url', refresh=False), \n navbar, \n dbc.Container(id='page-content', className='mt-4'), \n html.Hr(), \n footer,\n # html.Img(src='data:image/png;base64,{}'.format(encoded_image))\n])\n\n\n# URL Routing for Multi-Page Apps: https://dash.plot.ly/urls\n@app.callback(Output('page-content', 'children'),\n [Input('url', 'pathname')])\ndef display_page(pathname):\n if pathname == '/':\n return index.layout\n elif pathname == '/predictions':\n return predictions.layout\n elif pathname == '/insights':\n return insights.layout\n elif pathname == '/process':\n return process.layout\n elif pathname == '/pagename':\n return pagename.layout\n else:\n return dcc.Markdown('## Page not found')\n\n# Run app server: https://dash.plot.ly/getting-started\nif __name__ == '__main__':\n app.run_server(debug=True)","repo_name":"Bh0ss/bw3template","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38200540678","text":"import math\r\nfrom copy import deepcopy\r\nimport random\r\n\r\n\r\nC = 1\r\n\r\n\r\nclass MctsNode:\r\n\r\n def __init__(self, board, parent=None, move=None, done=False, N=0, T=0, children=None):\r\n self.parent = parent\r\n self.children = children\r\n self.T = T\r\n self.N = N\r\n self.move = move\r\n self.board = board\r\n self.done = done\r\n\r\n def __str__(self, level=0):\r\n ret = \" \" * level + repr(self) + \"\\n\"\r\n if self.children:\r\n for child in self.children.values():\r\n ret += child.__str__(level + 1)\r\n return ret\r\n\r\n def __repr__(self):\r\n return f\"N: {self.N}, T: {self.T}, move: {self.move}\"\r\n\r\n def __add__(self, other):\r\n if isinstance(other, type(self)):\r\n # Perform the summation of N and T parameters\r\n summed_N = self.N + other.N\r\n summed_T = self.T + other.T\r\n if self.children and other.children:\r\n # Recursively sum the children lists\r\n summed_children = {\r\n child.move: child + other.children[child.move]\r\n for child in self.children.values()\r\n if child.move in other.children\r\n }\r\n elif self.children and not other.children:\r\n summed_children = self.children\r\n elif not self.children and other.children:\r\n summed_children = other.children\r\n else:\r\n summed_children = None\r\n # Create a new instance of SomeObject with the summed values and children\r\n return type(self)(board=self.board, parent=self.parent, move=self.move, N=summed_N, T=summed_T, children=summed_children)\r\n else:\r\n # Raise an exception if the addition is not supported\r\n raise TypeError(\"Unsupported operand type for +\")\r\n\r\n def ucb_score(self):\r\n if self.N == 0:\r\n return float('inf')\r\n top_node = self\r\n if top_node.parent:\r\n top_node = top_node.parent\r\n score = (self.T / self.N)/1000 + (C * (math.sqrt(math.log(top_node.N) / self.N)))\r\n\r\n return score\r\n\r\n def add_children(self, node_class):\r\n self.children = dict()\r\n for move in self.board.legal_moves:\r\n new_board = deepcopy(self.board)\r\n new_board.push(move)\r\n done = True if new_board.outcome() else False\r\n child = node_class(new_board, self, move, done)\r\n self.children[move] = child\r\n\r\n def explore(self):\r\n node = self\r\n while node.children:\r\n move_ucb_score = {key: value.ucb_score() for key, value in node.children.items()}\r\n max_value = max(move_ucb_score.values())\r\n node_with_max_value = [key for key, value in move_ucb_score.items() if value == max_value]\r\n random_max_node = random.choice(node_with_max_value)\r\n node = node.children[random_max_node]\r\n return node\r\n\r\n def rollout(self):\r\n if self.done:\r\n return 0\r\n tot_reward = 0\r\n done = False\r\n new_board = deepcopy(self.board)\r\n color = 'WHITE' if new_board.turn else 'BLACK'\r\n while not done and new_board.legal_moves:\r\n move = random.choice(list(new_board.legal_moves))\r\n new_board.push(move)\r\n reward = 0\r\n if new_board.outcome() and not new_board.outcome().winner:\r\n reward = 0.5\r\n done = True\r\n elif new_board.outcome() and new_board.outcome().winner == color:\r\n reward = 1\r\n done = True\r\n elif new_board.outcome() and new_board.outcome().winner != color:\r\n reward = -1\r\n done = True\r\n tot_reward += reward\r\n return tot_reward\r\n\r\n def backpropagation(self, reward):\r\n node = self\r\n while node.parent:\r\n node.T += reward\r\n node.N += 1\r\n node = node.parent\r\n node.T += reward\r\n node.N += 1","repo_name":"GiovanniGrotto/ChessMCTS","sub_path":"MctsNode.py","file_name":"MctsNode.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"45021084058","text":"import logging\nfrom logs import server_log_config\nLOGGER = logging.getLogger('server')\nfrom common.variables import DEFAULT_PORT\n\n\nclass VerifyPort:\n\n def __set__(self, instance, value):\n if not 1023 < value < 65535:\n LOGGER.critical(\n f'Попытка запуска сервера с указанием неподходящего порта '\n f'{value}. Допустимы адреса с 1024 до 65535'\n f'сервер будет запущен с порта 7777'\n )\n instance.__dict__[self.name] = DEFAULT_PORT\n\n instance.__dict__[self.name] =value\n\n def __set_name__(self, owner, name):\n self.name = name\n","repo_name":"kodo4/client-server-app-v2.0","sub_path":"descriptors.py","file_name":"descriptors.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12265365669","text":"import grpc\n\nfrom google.protobuf.timestamp_pb2 import Timestamp\n\nimport enums_pb2 as Enums\nimport MeterReader_pb2 as MeterReader\nimport MeterReader_pb2_grpc as MeterReaderService\n\ndef main():\n print(\"Calling gRPC Service\")\n\n with open(\"localhost.crt\", \"rb\") as file:\n cert = file.read()\n\n credentials = grpc.ssl_channel_credentials(cert)\n\n channel = grpc.secure_channel(\"localhost:5001\", credentials)\n stub = MeterReaderService.MeterReadingServiceStub(channel)\n\n request = MeterReader.ReadingPacket(successful = Enums.ReadingStatus.SUCCESS)\n now = Timestamp()\n now.GetCurrentTime()\n reading = MeterReader.ReadingMessage(customerId = 1, \n\t\t\t\t\t\t\t\t\t readingValue = 10000, \n\t\t\t\t\t\t\t\t\t\t readingTime = now)\n request.readings.append(reading)\n\n result = stub.AddReading(request)\n if (result.success == Enums.ReadingStatus.SUCCESS):\n print(\"Success\")\n else:\n print(\"Failure\")\n\n\nmain()\n","repo_name":"profjordanov/dotNetCore2020","sub_path":"gRPC/MeterReader/PythonClient/PythonClient.py","file_name":"PythonClient.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"35639464376","text":"import numpy as np # use this for infinity\nfrom random import choice\nimport platform\nimport time\nfrom os import system\n\n\nclass TICTACTOE_COMPUTER(object):\n \"\"\"\n Class is a collection of shape detection tools based in opencv Image tools. Function image inputs require opencv image types.\n \"\"\"\n def __init__(self):\n self.infinity = np.inf\n self.HUMAN = -1\n self.COMP = +1\n self.board = [\n [0, 0, 0],\n [0, 0, 0],\n [0, 0, 0], ]\n\n def evaluate(self,state):\n \"\"\"\n Function to heuristic evaluation of state.\n :param state: the state of the current board\n :return: +1 if the computer wins; -1 if the human wins; 0 draw\n \"\"\"\n # depth is the number of empty cells which doesn't mean that the computer or human didn't win already\n depth = len(self.empty_cells(state)) \n # print('depth',depth)\n if self.wins(state, self.COMP): # if comp won\n score = +1\n elif self.wins(state, self.HUMAN): # if human won\n score = -1\n elif depth == 0: # if neither comp nor human & no empty cells left, then score = 0 --> tie\n score = 0\n else:\n return \n\n return score\n def Evaluate_Game(self,board):\n '''\n params:\n boardCode = state of board that computer reads\n uses titactoe_brain script\n which refers to this repo: https://github.com/Cledersonbc/tic-tac-toe-minimax\n Returns game which exits the while loop in main if Game = False\n '''\n winner = self.evaluate(board)\n\n if winner == 1:\n print('Game Over..\\n Winner: A.I Computer\\n\\n\\n')\n game = False\n exit()\n\n elif winner == -1:\n print('Game Over..\\n Winner: Human\\n\\n\\n')\n game = False\n exit()\n\n elif winner == 0:\n print('Tie Game!\\n\\n\\n')\n game = False\n exit()\n\n else:\n print('The game continues..')\n game = True\n\n return game\n\n def minimax(self,state, depth, player): # not using anymore. See ttt.py script\n \"\"\"\n AI function that choice the best move\n :param state: current state of the board\n :param depth: node index in the tree (0 <= depth <= 9),\n but never nine in this case (see iaturn() function)\n :param player: an human or a computer\n :return: a list with [the best row, best col, best score]\n \"\"\"\n if player == self.COMP:\n best = [-1, -1, -self.infinity]\n else:\n best = [-1, -1, +self.infinity]\n\n if depth == 0 or self.game_over(state):\n score = self.evaluate(state)\n return [-1, -1, score]\n\n for cell in self.empty_cells(state):\n x, y = cell[0], cell[1]\n state[x][y] = player\n score = self.minimax(state, depth - 1, -player)\n state[x][y] = 0\n score[0], score[1] = x, y\n\n if player == self.COMP:\n if score[2] > best[2]:\n best = score # max value\n else:\n if score[2] < best[2]:\n best = score # min value\n\n \n return best\n\n def empty_cells(self,state):\n \"\"\"\n Each empty cell will be added into cells' list\n :param state: the state of the current board\n :return: a list of empty cells\n \"\"\"\n cells = []\n\n for x, row in enumerate(state):\n for y, cell in enumerate(row):\n if cell == 0:\n cells.append([x, y])\n\n return cells\n\n def ai_turn(self,c_choice, h_choice,board):\n \"\"\"\n It calls the minimax function if the depth < 9,\n else it choices a random coordinate.\n :param c_choice: computer's choice X or O\n :param h_choice: human's choice X or O\n :return:\n \"\"\"\n print('ai_turn: before self.board',self.board)\n self.board = board\n print('ai_turn: after self.board',self.board)\n depth = len(self.empty_cells(self.board))\n if depth == 0 or self.game_over(self.board):\n self.Evaluate_Game(self.board)\n \n if depth == 9: # if board is blank, randomly choose a spot\n print('Board is Blank')\n x = choice([0, 1, 2])\n y = choice([0, 1, 2])\n move = [x,y]\n else: # else apply minimax function\n move = self.minimax(self.board, depth, self.COMP)\n x, y = move[0], move[1]\n print('IN TTT_computer: ai_turn: {}'.format((move[0],move[1])))\n\n self.set_move(x, y, self.COMP) # checks valid move & says computer made the move\n time.sleep(1)\n\n if move[0] == 0:\n pose_number = move[1]\n if move[0] == 1:\n pose_number = 3 + move[1]\n if move[0] == 2:\n pose_number = 6 + move[1]\n\n self.board[move[0]][move[1]] = 1\n\n print('IN TTT_computer ai_turn - expected board:',self.board)\n return self.board, pose_number\n\n def game_over(self,state):\n \"\"\"\n This function test if the human or computer wins\n :param state: the state of the current board\n :return: True if the human or computer wins\n \"\"\"\n return self.wins(state, self.HUMAN) or self.wins(state, self.COMP)\n\n def valid_move(self,x, y):\n \"\"\"\n A move is valid if the chosen cell is empty\n :param x: X coordinate\n :param y: Y coordinate\n :return: True if the board[x][y] is empty\n \"\"\"\n if [x, y] in self.empty_cells(self.board):\n return True\n else:\n return False\n\n\n def set_move(self,x, y, player):\n \"\"\"\n Set the move on board, if the coordinates are valid\n :param x: X coordinate\n :param y: Y coordinate\n :param player: the current player\n \"\"\"\n if self.valid_move(x, y):\n self.board[x][y] = player\n return True\n else:\n return False\n\n def wins(self,state, player):\n \"\"\"\n This function tests if a specific player wins. Possibilities:\n * Three rows [X X X] or [O O O]\n * Three cols [X X X] or [O O O]\n * Two diagonals [X X X] or [O O O]\n :param state: the state of the current board\n :param player: a human or a computer\n :return: True if the player wins\n \"\"\"\n win_state = [\n [state[0][0], state[0][1], state[0][2]],\n [state[1][0], state[1][1], state[1][2]],\n [state[2][0], state[2][1], state[2][2]],\n [state[0][0], state[1][0], state[2][0]],\n [state[0][1], state[1][1], state[2][1]],\n [state[0][2], state[1][2], state[2][2]],\n [state[0][0], state[1][1], state[2][2]],\n [state[2][0], state[1][1], state[0][2]],\n ]\n if [player, player, player] in win_state:\n return True\n else:\n return False\n\n def render(self, state):\n \"\"\"\n Print the board on console\n :param state: current state of the board\n \"\"\"\n\n chars = {\n -1: 'O',\n +1: 'X',\n 0: ' '\n }\n str_line = '-------------'\n\n print('\\n' + str_line)\n\n for row in state:\n print('|'),\n for cell in row:\n symbol = chars[cell]\n print(\"{} |\".format(symbol)),\n print('\\n' + str_line) \n \n def combine_board(self,boardO,board):\n print('TTT comp: board',board)\n print('TTT comp: boardO',boardO)\n for row in range(3):\n for col in range(3):\n if boardO[row][col] == -1:\n if board[row][col] == 0:\n board[row][col] = -1\n elif board[row][col] == -1:\n print('Previous O detected')\n else:\n print(\"Overlapping O and X!\") \n return board \n\n ","repo_name":"OSU-AIMS/tic-tac-toe","sub_path":"scripts/tictactoe_computer.py","file_name":"tictactoe_computer.py","file_ext":"py","file_size_in_byte":8102,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"5720794728","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 12 18:06:20 2022\r\n\r\n@author: Augusto\r\n\"\"\"\r\n\r\nimport pandas as pd \r\nimport GoogleNews\r\n\r\n# Create dictionary to store data\r\nnews_frame = {'Keyword':[], 'Header':[], 'Date':[], 'Link':[]}\r\n\r\n# Read Excel file using pandas. Will be used to search the values inside a column\r\nkeyword_table = pd.read_csv(r'file_path\\\\filename.csv') # or read_excel and .xlsx if file is in Excel format\r\n\r\n# Reads the dataframe created previously. df[''] selects the column\r\nfor keyword in keyword_table['Column name for search']: \r\n \r\n googlenews = GoogleNews(lang = 'pt')\r\n googlenews.get_news(keyword)\r\n news = googlenews.results()\r\n \r\n # Second loop used to extract the results as they come in nested dictionaries\r\n for results in news: \r\n \r\n # Assign variables to the values\r\n news_header = results['title']\r\n news_date = results['date']\r\n news_link = results['link']\r\n \r\n # Append the values from GoogleNews dictionary to the table \"news_frame\" created previously\r\n news_frame['Keyword'].append(keyword)\r\n news_frame['Header'].append(news_header)\r\n news_frame['Date'].append(news_date)\r\n news_frame['Link'].append(news_link) \r\n \r\n\r\n# Transform the list storing values to a proper Pandas Dataframe\r\ndf_news = pd.Dataframe(news_frame) \r\n\r\n# Save to a file\r\npath = 'path\\\\news.csv'\r\ndf_news.to_csv(path)\r\n\r\n# Check the \"headers\" column for duplicates. If it exists, keeps only the last one. Useful because the date is displayed as \"5 days ago\" when the news is recently released\r\ndf_news.drop_duplicates(subset = 'Header', keep = 'last')\r\n","repo_name":"aspedrini/Google-News-to-Pandas","sub_path":"GoogleNewsToCSV.py","file_name":"GoogleNewsToCSV.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25794260634","text":"import cv2\nimport numpy as np\n\nimagemA = cv2.imread(\"imagemTeste.jpg\")\nimagemB = cv2.imread(\"imagemTeste3.jpg\")\n\nimgEscaladaA\t=\tcv2.resize( \n imagemA,\t\n None,\t\n fx\t= 0.5,\t\n fy\t= 0.5, \n interpolation\t=\tcv2.INTER_CUBIC \n )\n\nimgEscaladaB\t=\tcv2.resize( \n imagemB,\t\n None,\t\n fx\t= 0.5,\t\n fy\t= 0.5, \n interpolation\t=\tcv2.INTER_CUBIC \n)\n\n#nova imagem da diferença da imagem A para imagem B \n#as duas imagens devem ser posições diferentes do mesmo objeto observado\nimgRGB\t=\tcv2.subtract(imagemA, imagemB) \n\n#montagem de uma nova imagem com a diferença e retirando o segundo plano\nimgHSV\t=\tcv2.cvtColor(imgRGB,\tcv2.COLOR_BGR2HSV)\n\n\ntomClaro\t=\tnp.array([0,\t120,\t120]) \ntomEscuro\t=\tnp.array([180,\t255,\t255])\n\n\nimgSegmentada\t=\tcv2.inRange(imgHSV,\ttomClaro,\ttomEscuro)\n\nimgSegmentadaEscalada\t=\tcv2.resize( \n imgSegmentada,\t\n None,\t\n fx\t= 0.5,\t\n fy\t= 0.5, \n interpolation\t=\tcv2.INTER_CUBIC \n)\n\n\ncv2.imshow(\"Segmentada\",\timgSegmentadaEscalada)\n\ncv2.waitKey(0) \ncv2.destroyAllWindows()","repo_name":"ThayDias/visao-computacional","sub_path":"Introducao a Visao Computacional/9. Segmentacao de Objetos/segmentacao_movimento.py","file_name":"segmentacao_movimento.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"20238157095","text":"#!/usr/bin/env python\n\n# 需要安装Python3\n# cmd 执行 pip install xlrd2 \n\nimport xlrd2\nimport csv\nimport codecs\nimport os\n\nDATA_PATH = os.getcwd() + '/Datas/'\n\n\ndef xlsx_to_csv(name):\n src = DATA_PATH + name + '.xlsx'\n res = DATA_PATH + name + '.csv'\n\n workbook = xlrd2.open_workbook(src)\n table = workbook.sheet_by_index(0)\n with codecs.open(res, 'w', encoding='ansi') as f:\n write = csv.writer(f)\n for row_num in range(table.nrows):\n row_value = table.row_values(row_num)\n if row_value[5] == 1:\n row_value[5] = 'TRUE'\n elif row_value[5] == 0:\n row_value[5] = 'FALSE'\n write.writerow(row_value)\n\n\nif __name__ == '__main__':\n xlsx_to_csv(\"__tables__\")\n","repo_name":"wankcn/Temp-Store","sub_path":"VoTools/Excel/Configs/xlsx_to_csv.py","file_name":"xlsx_to_csv.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73771108967","text":"# -*- coding: utf-8 -*-\r\n\r\n########################################\r\n## import packages\r\n########################################\r\nimport os\r\nimport re\r\nimport csv\r\nimport codecs\r\nimport numpy as np\r\nimport pandas as pd\r\nimport sys\r\n\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem import SnowballStemmer\r\nfrom string import punctuation\r\n\r\nfrom gensim.models import KeyedVectors\r\nfrom keras.preprocessing import text, sequence\r\nfrom keras.preprocessing.text import Tokenizer\r\nfrom keras.preprocessing.sequence import pad_sequences\r\nfrom keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, Bidirectional,GRU\r\nfrom keras.layers.merge import concatenate\r\nfrom keras.models import Model, load_model\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\r\n\r\nfrom keras.callbacks import Callback\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import roc_auc_score\r\n\r\nfrom keras import backend as K\r\nfrom keras.engine.topology import Layer\r\n#from keras import initializations\r\nfrom keras import initializers, regularizers, constraints\r\n\r\nclass RocAucEvaluation(Callback):\r\n def __init__(self, validation_data=(), interval=1):\r\n super(Callback, self).__init__()\r\n\r\n self.interval = interval\r\n self.X_val, self.y_val = validation_data\r\n\r\n def on_epoch_end(self, epoch, logs={}):\r\n if epoch % self.interval == 0:\r\n y_pred = self.model.predict(self.X_val, verbose=0)\r\n score = roc_auc_score(self.y_val, y_pred)\r\n print(\"\\n ROC-AUC - epoch: %d - score: %.6f \\n\" % (epoch + 1, score))\r\n\r\nclass Attention(Layer):\r\n def __init__(self, step_dim,\r\n W_regularizer=None, b_regularizer=None,\r\n W_constraint=None, b_constraint=None,\r\n bias=True, **kwargs):\r\n \"\"\"\r\n Keras Layer that implements an Attention mechanism for temporal data.\r\n Supports Masking.\r\n Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]\r\n # Input shape\r\n 3D tensor with shape: `(samples, steps, features)`.\r\n # Output shape\r\n 2D tensor with shape: `(samples, features)`.\r\n :param kwargs:\r\n Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.\r\n The dimensions are inferred based on the output shape of the RNN.\r\n Example:\r\n model.add(LSTM(64, return_sequences=True))\r\n model.add(Attention())\r\n \"\"\"\r\n self.supports_masking = True\r\n #self.init = initializations.get('glorot_uniform')\r\n self.init = initializers.get('glorot_uniform')\r\n\r\n self.W_regularizer = regularizers.get(W_regularizer)\r\n self.b_regularizer = regularizers.get(b_regularizer)\r\n\r\n self.W_constraint = constraints.get(W_constraint)\r\n self.b_constraint = constraints.get(b_constraint)\r\n\r\n self.bias = bias\r\n self.step_dim = step_dim\r\n self.features_dim = 0\r\n super(Attention, self).__init__(**kwargs)\r\n\r\n def build(self, input_shape):\r\n assert len(input_shape) == 3\r\n\r\n self.W = self.add_weight((input_shape[-1],),\r\n initializer=self.init,\r\n name='{}_W'.format(self.name),\r\n regularizer=self.W_regularizer,\r\n constraint=self.W_constraint)\r\n self.features_dim = input_shape[-1]\r\n\r\n if self.bias:\r\n self.b = self.add_weight((input_shape[1],),\r\n initializer='zero',\r\n name='{}_b'.format(self.name),\r\n regularizer=self.b_regularizer,\r\n constraint=self.b_constraint)\r\n else:\r\n self.b = None\r\n\r\n self.built = True\r\n\r\n def compute_mask(self, input, input_mask=None):\r\n # do not pass the mask to the next layers\r\n return None\r\n\r\n def call(self, x, mask=None):\r\n # eij = K.dot(x, self.W) TF backend doesn't support it\r\n\r\n # features_dim = self.W.shape[0]\r\n # step_dim = x._keras_shape[1]\r\n\r\n features_dim = self.features_dim\r\n step_dim = self.step_dim\r\n\r\n eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)), K.reshape(self.W, (features_dim, 1))), (-1, step_dim))\r\n\r\n if self.bias:\r\n eij += self.b\r\n\r\n eij = K.tanh(eij)\r\n\r\n a = K.exp(eij)\r\n\r\n # apply mask after the exp. will be re-normalized next\r\n if mask is not None:\r\n # Cast the mask to floatX to avoid float64 upcasting in theano\r\n a *= K.cast(mask, K.floatx())\r\n\r\n # in some cases especially in the early stages of training the sum may be almost zero\r\n a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())\r\n\r\n a = K.expand_dims(a)\r\n weighted_input = x * a\r\n #print weigthted_input.shape\r\n return K.sum(weighted_input, axis=1)\r\n\r\n def compute_output_shape(self, input_shape):\r\n #return input_shape[0], input_shape[-1]\r\n return input_shape[0], self.features_dim\r\n \r\npath = 'data'\r\nEMBEDDING_FILE = os.path.join(path, 'fasttext-crawl-300d-2m/crawl-300d-2M.vec')\r\n\r\nMAX_SEQUENCE_LENGTH = 150\r\nMAX_NB_WORDS = 100000\r\nEMBEDDING_DIM = 300\r\nVALIDATION_SPLIT = 0.1\r\n\r\nnum_lstm = 300\r\nnum_dense = 256\r\nrate_drop_lstm = 0.25\r\nrate_drop_dense = 0.25\r\n\r\nact = 'relu'\r\n\r\n########################################\r\n## index word vectors\r\n########################################\r\nprint('Indexing word vectors')\r\n'''\r\n#Glove Vectors\r\nembeddings_index = {}\r\nf = open(EMBEDDING_FILE, encoding='utf-8')\r\nfor line in f:\r\n values = line.split()\r\n word = values[0]\r\n coefs = np.asarray(values[-300:], dtype='float32')\r\n embeddings_index[word] = coefs\r\nf.close()\r\n\r\nprint('Total %s word vectors.' % len(embeddings_index))'''\r\n\r\n#train_df = pd.read_csv(TRAIN_DATA_FILE)\r\n#test_df = pd.read_csv(TEST_DATA_FILE)\r\n#train_df = pd.read_csv('data/train_nodate.csv')\r\n#test_df = pd.read_csv('data/test_nodate.csv')\r\n\r\ntrain = pd.read_csv('data/cleaned-toxic-comments/train_preprocessed.csv')\r\ntest = pd.read_csv('data/cleaned-toxic-comments/test_preprocessed.csv')\r\n\r\nX_train = train[\"comment_text\"].fillna(\"fillna\").values\r\ny_train = train[[\"toxic\", \"severe_toxic\", \"obscene\", \"threat\", \"insult\", \"identity_hate\"]].values\r\nX_test = test[\"comment_text\"].fillna(\"fillna\").values\r\n\r\ntokenizer = text.Tokenizer(num_words=MAX_NB_WORDS)\r\ntokenizer.fit_on_texts(list(X_train) + list(X_test))\r\nX_train = tokenizer.texts_to_sequences(X_train)\r\nX_test = tokenizer.texts_to_sequences(X_test)\r\nx_train = sequence.pad_sequences(X_train, maxlen=MAX_SEQUENCE_LENGTH)\r\nx_test = sequence.pad_sequences(X_test, maxlen=MAX_SEQUENCE_LENGTH)\r\n\r\n'''\r\n########################################\r\n## process texts in datasets\r\n########################################\r\nprint('Processing text dataset')\r\n\r\n#Regex to remove all Non-Alpha Numeric and space\r\nspecial_character_removal=re.compile(r'[^a-z\\d ]',re.IGNORECASE)\r\n\r\n#regex to replace all numerics\r\nreplace_numbers=re.compile(r'\\d+',re.IGNORECASE)\r\n\r\ndef text_to_wordlist(text, remove_stopwords=False, stem_words=False):\r\n # Clean the text, with the option to remove stopwords and to stem words.\r\n \r\n # Convert words to lower case and split them\r\n text = text.lower().split()\r\n\r\n # Optionally, remove stop words\r\n if remove_stopwords:\r\n stops = set(stopwords.words(\"english\"))\r\n text = [w for w in text if not w in stops]\r\n \r\n text = \" \".join(text)\r\n \r\n #Remove Special Characters\r\n text=special_character_removal.sub('',text)\r\n \r\n #Replace Numbers\r\n text=replace_numbers.sub('n',text)\r\n\r\n # Optionally, shorten words to their stems\r\n if stem_words:\r\n text = text.split()\r\n stemmer = SnowballStemmer('english')\r\n stemmed_words = [stemmer.stem(word) for word in text]\r\n text = \" \".join(stemmed_words)\r\n \r\n # Return a list of words\r\n return(text)\r\n\r\nlist_sentences_train = train[\"fewer_dates\"].fillna(\"NA\").values\r\nlist_classes = [\"toxic\", \"severe_toxic\", \"obscene\", \"threat\", \"insult\", \"identity_hate\"]\r\ny = train[list_classes].values\r\nlist_sentences_test = test[\"fewer_dates\"].fillna(\"NA\").values\r\n\r\n\r\ncomments = []\r\nfor text in list_sentences_train:\r\n comments.append(text_to_wordlist(text))\r\n \r\ntest_comments=[]\r\nfor text in list_sentences_test:\r\n test_comments.append(text_to_wordlist(text))\r\n\r\ntokenizer = Tokenizer(num_words=MAX_NB_WORDS)\r\ntokenizer.fit_on_texts(comments + test_comments)\r\n\r\nsequences = tokenizer.texts_to_sequences(comments)\r\ntest_sequences = tokenizer.texts_to_sequences(test_comments)\r\n\r\nword_index = tokenizer.word_index\r\nprint('Found %s unique tokens' % len(word_index))\r\n\r\ndata = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)\r\nprint('Shape of data tensor:', data.shape)\r\nprint('Shape of label tensor:', y.shape)\r\n\r\ntest_data = pad_sequences(test_sequences, maxlen=MAX_SEQUENCE_LENGTH)\r\nprint('Shape of test_data tensor:', test_data.shape)\r\n\r\nword_index = tokenizer.word_index\r\n########################################\r\n## prepare embeddings\r\n########################################\r\nprint('Preparing embedding matrix')\r\nnb_words = min(MAX_NB_WORDS, len(word_index))\r\nembedding_matrix = np.zeros((nb_words, EMBEDDING_DIM))\r\nfor word, i in word_index.items():\r\n if i >= MAX_NB_WORDS:\r\n continue\r\n embedding_vector = embeddings_index.get(word)\r\n if embedding_vector is not None:\r\n # words not found in embedding index will be all-zeros.\r\n embedding_matrix[i] = embedding_vector\r\n\r\nprint('Null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0))'''\r\n\r\ndef get_coefs(word, *arr): return word, np.asarray(arr, dtype='float32')\r\n\r\n# BUILD EMBEDDING MATRIX \r\nprint('Preparing embedding matrix...')\r\n# Read the FastText word vectors (space delimited strings) into a dictionary from word->vector\r\nembeddings_index = dict(get_coefs(*o.strip().split()) for o in open(EMBEDDING_FILE, encoding=\"utf-8\"))\r\nprint(\"embeddings_index size: \", len(embeddings_index))\r\n\r\n# https://github.com/uclmr/emoji2vec\r\n# e2v = gsm.Word2Vec.load_word2vec_format('emoji2vec.bin', binary=True)\r\n\r\nword_index = tokenizer.word_index\r\nprint(\"word_index size: \", len(word_index)) \r\nwords_not_found = []\r\nnb_words = min(MAX_NB_WORDS, len(word_index))\r\nembedding_matrix = np.zeros((nb_words, EMBEDDING_DIM))\r\nfor word, i in word_index.items(): \r\n if i >= MAX_NB_WORDS: \r\n continue\r\n embedding_vector = embeddings_index.get(word)\r\n if (embedding_vector is not None) and len(embedding_vector) > 0:\r\n embedding_matrix[i] = embedding_vector\r\n else:\r\n words_not_found.append(word)\r\n\r\n########################################\r\n## sample train/validation data\r\n########################################\r\n# np.random.seed(1234)\r\n'''\r\nperm = np.random.permutation(len(data))\r\nidx_train = perm[:int(len(data)*(1-VALIDATION_SPLIT))]\r\nidx_val = perm[int(len(data)*(1-VALIDATION_SPLIT)):]\r\n\r\ndata_train=data[idx_train]\r\nlabels_train=y[idx_train]\r\nprint(data_train.shape,labels_train.shape)\r\n\r\ndata_val=data[idx_val]\r\nlabels_val=y[idx_val]\r\n\r\nprint(data_val.shape,labels_val.shape)'''\r\n\r\n########################################\r\n## define the model structure\r\n########################################\r\nword_index = tokenizer.word_index\r\nprint(\"word_index size: \", len(word_index)) \r\nnb_words = min(MAX_NB_WORDS, len(word_index))\r\nembedding_layer = Embedding(nb_words,\r\n EMBEDDING_DIM,\r\n weights=[embedding_matrix],\r\n input_length=MAX_SEQUENCE_LENGTH,\r\n trainable=False)\r\n\r\nlstm_layer = LSTM(num_lstm, dropout=rate_drop_lstm, recurrent_dropout=rate_drop_lstm,return_sequences=True)\r\ngru = Bidirectional(GRU(128, return_sequences=True,dropout=0.1,recurrent_dropout=0.1))\r\n\r\ncomment_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')\r\nembedded_sequences= embedding_layer(comment_input)\r\nx = gru(embedded_sequences)\r\nx = Dropout(rate_drop_dense)(x)\r\nmerged = Attention(MAX_SEQUENCE_LENGTH)(x)\r\nmerged = Dense(num_dense, activation=act)(merged)\r\nmerged = Dropout(rate_drop_dense)(merged)\r\nmerged = BatchNormalization()(merged)\r\npreds = Dense(6, activation='sigmoid')(merged)\r\n\r\n########################################\r\n## train the model\r\n########################################\r\nmodel = Model(inputs=[comment_input], \\\r\n outputs=preds)\r\nmodel.compile(loss='binary_crossentropy',\r\n optimizer='rmsprop',\r\n metrics=['accuracy'])\r\nprint(model.summary())\r\n\r\nSTAMP = 'simple_lstm_glove_vectors_%.2f_%.2f'%(rate_drop_lstm,rate_drop_dense)\r\nprint(STAMP)\r\n\r\nearly_stopping =EarlyStopping(monitor='val_loss', patience=5, verbose=1)\r\nbst_model_path = STAMP + '.h5'\r\nmodel_checkpoint = ModelCheckpoint(bst_model_path, save_best_only=True, save_weights_only=True, verbose=1)\r\n\r\nX_tra, X_val, y_tra, y_val = train_test_split(x_train, y_train, test_size=0.1)\r\n \r\nRocAuc = RocAucEvaluation(validation_data=(X_val, y_val), interval=1) \r\n\r\nrrp = ReduceLROnPlateau(\r\n monitor='val_loss',\r\n factor=0.5,\r\n patience=3 , min_lr=0.00001) \r\n'''\r\nhist = model.fit(X_tra, y_tra, \\\r\n validation_data=(X_val, y_val), \\\r\n epochs=50, batch_size=256, shuffle=True, \\\r\n callbacks=[early_stopping, model_checkpoint, RocAuc, rrp])'''\r\n \r\nmodel.load_weights(bst_model_path)\r\n#bst_val_score = min(hist.history['val_loss'])\r\n#bst_val_score = 0.9847\r\n\r\n#######################################\r\n## make the submission\r\n########################################\r\nprint('Start making the submission before fine-tuning')\r\n\r\ny_test = model.predict([x_test], batch_size=1024, verbose=1)\r\n\r\nsample_submission = pd.read_csv(\"data/sample_submission.csv\")\r\nsample_submission[[\"toxic\", \"severe_toxic\", \"obscene\", \"threat\", \"insult\", \"identity_hate\"]] = y_test\r\n\r\nsample_submission.to_csv(STAMP+'.csv', index=False)","repo_name":"Jeff09/Toxic-Comment-Classification-Challenge","sub_path":"gru_Attention_glove.py","file_name":"gru_Attention_glove.py","file_ext":"py","file_size_in_byte":14065,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"39736186426","text":"import pyrogram\nfrom pyrogram import Filters\nfrom userbyte import byte, cmd\nimport random\n\n@byte.on_message(Filters.command('slap', cmd) & Filters.me)\nasync def slep(byte, message):\n\t\n\tif not message.reply_to_message:\n\t\tawait message.edit(\"`📌 Reply To Any User's Message! 😈 Else I'll Slap You`\")\n\telse:\n\t\tfrom_user = await byte.get_users(message.reply_to_message.from_user.id)\n\t\n\t\tITEMS = [\n\t\t \"kids generator machine\",\n\t\t \"egg flavored comdom\",\n\t\t \"one glass of lava\"\n\t\t \"large trout\",\n\t\t \"baseball bat\",\n\t\t \"cricket bat\",\n\t\t \"wooden cane\",\n\t\t \"nail\",\n\t\t \"printer\",\n\t\t \"shovel\",\n\t\t \"CRT monitor\",\n\t\t \"physics textbook\",\n\t\t \"toaster\",\n\t\t \"portrait of Richard Stallman\",\n\t\t \"television\",\n\t\t \"five ton truck\",\n\t\t \"roll of duct tape\",\n\t\t \"book\",\n\t\t \"laptop\",\n\t\t \"old television\",\n\t\t \"sack of rocks\",\n\t\t \"rainbow trout\",\n\t\t \"rubber chicken\",\n\t\t \"spiked bat\",\n\t\t \"fire extinguisher\",\n\t\t \"heavy rock\",\n\t\t \"chunk of dirt\",\n\t\t \"beehive\",\n\t\t \"piece of rotten meat\",\n\t\t \"bear\",\n\t\t \"ton of bricks\",\n\t\t \"jhony's dickkoo\",\n\t\t \"potty of dinosaur\",\n\t\t \"mjonir\",\n\t\t \"strombreaker\",\n\t\t \"cap's shield\",\n\t\t \"smelly egg\",\n\t\t \"hulk's hand\",\n\t\t \"majnu bhai ki painting\",\n\t\t \n\t\t]\n\t\t\n\t\tTHROW = [\n\t\t \"throws\",\n\t\t \"flings\",\n\t\t \"chucks\",\n\t\t \"hurls\",\n\t\t]\n\t\t\n\t\tHIT = [\n\t\t \"hits\",\n\t\t \"whacks\",\n\t\t \"slaps\",\n\t\t \"smacks\",\n\t\t \"bashes\",\n\t\t \"fek ke mari\",\n\t\t]\n\t\t\n\t\thits = random.choice(HIT)\n\t\titem = random.choice(ITEMS)\n\t\tthrows = random.choice(THROW)\n\t\tvictim = f'[{from_user.first_name}](tg://user?id={from_user.id})'\n\t\t\n\t\tSLAP_TEMPLATES = [\n\t\t f\"{hits} {victim} with a {item}.\",\n\t\t f\"{hits} {victim} in the face with a {item}.\",\n\t\t f\"{hits} {victim} around a bit with a {item}.\",\n\t\t f\"{throws} a {item} at {victim}.\",\n\t\t f\"grabs a {item} and {throws} it at {victim}'s face.\",\n\t\t f\"launches a {item} in {victim}'s general direction.\",\n\t\t f\"starts slapping {victim} silly with a {item}.\",\n\t\t f\"pins {victim} down and repeatedly {hits} them with a {item}.\",\n\t\t f\"grabs up a {item} and {hits} {victim} with it.\",\n\t\t f\"ties {victim} to a chair and {throws} a {item} at them.\",\n\t\t f\"gave a friendly push to help {victim} learn to swim in lava.\"\n\t\t f\"ohh no i cant slap this alien {victim}\",\n\t\t]\n\t\t\n\t\tslapped = random.choice(SLAP_TEMPLATES)\n\t\tawait message.edit(slapped)\n","repo_name":"DevcodeOfficial/userbyte","sub_path":"userbyte/plugins/slap.py","file_name":"slap.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"8552834758","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef F(vec):\r\n y1 = vec[0]\r\n y2 = vec[1]\r\n return np.array([[y1+y2], [-y1+y2]])\r\n\r\n\r\ndef EulerOnAutSyst(F, Yinit, n, xn):\r\n h = xn / n\r\n m = Yinit.size\r\n W = np.zeros([m, n + 1])\r\n W[:, 0] = Yinit[:, 0]\r\n for i in range(0, n):\r\n W[:, i + 1] = W[:, i] + h * F(W[:, i])[:, 0]\r\n return W\r\n\r\n\r\nYinit = np.array([[1], [0]]) # y_1(0)=1, y_2(0)=0\r\nxn = 1\r\nn = 10\r\nh = xn/n\r\nW = EulerOnAutSyst(F, Yinit, n, xn)\r\nfor i in range(n + 1):\r\n print(\"t=%.3f\" % (h * i), \"n =\", i, W[:, i])\r\n\r\ny_1 = lambda t: np.e**t * np.cos(t)\r\ny_2 = lambda t: -np.e**t * np.sin(t)\r\nx = np.linspace(0,1,50)\r\nx_w = np.linspace(0,1,n+1)\r\n\r\n#pyplot\r\nplt.figure()\r\nplt.title('h='+str(h))\r\nplt.plot(x_w,W[0])\r\nplt.plot(x, y_1(x))\r\nplt.plot(x_w,W[1])\r\nplt.plot(x, y_2(x))\r\nplt.legend(['ApproxY1','Y1','ApproxY2','Y2'])\r\nplt.show()\r\n\r\n#error\r\nprint('e1=',abs(W[0][n]-y_1(xn)))\r\nprint('e2=',abs(W[1][n]-y_2(xn)))\r\n","repo_name":"tdl1304/private-misc","sub_path":"Dataingenior5.semester/Math_py/initial_problem/eulerODESystems.py","file_name":"eulerODESystems.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24229006131","text":"\n# Complete the countWaysToClimb function below.\ndef countWaysToClimb(steps, n):\n\n if n == 1 or n == 2: return n\n\n table = [n + 1]\n\n for i in range(2, n):\n table[i] = table[i - 1] + table[i - 2]\n\n return table[n]\n\n\n\n\nprint(countWaysToClimb([2, 3], 7))\n","repo_name":"mistermaxx/Algorithms-and-Data-Structures","sub_path":"dynamic_programming_count_ways_n_steps_uplevel/dynamic_programming_count_ways_n_steps_uplevel.py","file_name":"dynamic_programming_count_ways_n_steps_uplevel.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16393162662","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.integrate as integrate\n\nf=1.0/7.0\nN=10000\na=1\nk1=-3\nk2=-8\nstart=0.0\nstop=200.0\n\nt=np.linspace(start,stop,num=N,endpoint=True)\n\ndef derivs(state,t):\n x,y=state\n deltax=y\n deltay=a*np.sin(2.0*np.pi*f*t)-k1*y-k2*x\n return deltax,deltay\n\nx0=5.0\ny0=0.0\nz0=[x0,y0]\nz=integrate.odeint(derivs,z0,y0)\n\nplt.plot(t,np.sin(2*np.pi*f*t),'k')\nplt.axis([0,stop,-1.1,1.1])\nplt.show()\n","repo_name":"gianna7wu/biol133compneuro","sub_path":"lowpass-2pole.py","file_name":"lowpass-2pole.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72084604967","text":"import random\r\n\r\n\r\n# Define the objective function\r\ndef objective_function(x, y):\r\n return x*2 + y*2 # Example objective function: minimize x^2 + y^2\r\n\r\n# Define the parameters\r\nnum_employed_bees = 10\r\nnum_onlooker_bees = 10\r\nnum_iterations = 10\r\nlower_bound = -10\r\nupper_bound = 10\r\nlimit = 10 # Maximum number of trials for a bee\r\nnum_best_sites = 3\r\n\r\n# Generate random initial solutions\r\nsolutions = []\r\nfor _ in range(num_employed_bees + num_onlooker_bees):\r\n x = random.uniform(lower_bound, upper_bound)\r\n y = random.uniform(lower_bound, upper_bound)\r\n fitness = objective_function(x, y)\r\n solutions.append((x, y, fitness))\r\n\r\n# Define the Artificial Bee Colony algorithm\r\ndef artificial_bee_colony():\r\n best_solution = min(solutions, key=lambda x: x[2])\r\n for iteration in range(num_iterations):\r\n # Employed bee phase\r\n for i in range(num_employed_bees):\r\n solution = solutions[i]\r\n new_solution = explore_neighborhood(solution)\r\n if new_solution[2] < solution[2]:\r\n solutions[i] = new_solution\r\n else:\r\n solutions[i] = solution\r\n\r\n # Onlooker bee phase\r\n probabilities = calculate_probabilities()\r\n for i in range(num_onlooker_bees):\r\n selected_index = select_bee(probabilities)\r\n solution = solutions[selected_index]\r\n new_solution = explore_neighborhood(solution)\r\n if new_solution[2] < solution[2]:\r\n solutions[selected_index] = new_solution\r\n\r\n # Scout bee phase\r\n best_solution = min(solutions, key=lambda x: x[2])\r\n if best_solution[2] > limit:\r\n best_solution = generate_random_solution()\r\n solutions.append(best_solution)\r\n\r\n # Print the best solution in each iteration\r\n print(\"Iteration:\", iteration + 1)\r\n print(\"Best solution: (x = {}, y = {})\".format(best_solution[0], best_solution[1]))\r\n print(\"Best fitness:\", best_solution[2])\r\n print()\r\n\r\n return best_solution\r\n\r\n# Explore the neighborhood of a solution by adding random perturbation\r\ndef explore_neighborhood(solution):\r\n x = solution[0] + random.uniform(-1, 1)\r\n y = solution[1] + random.uniform(-1, 1)\r\n x = max(min(x, upper_bound), lower_bound)\r\n y = max(min(y, upper_bound), lower_bound)\r\n fitness = objective_function(x, y)\r\n return (x, y, fitness)\r\n\r\n# Calculate the probabilities for the onlooker bee phase\r\ndef calculate_probabilities():\r\n total_fitness = sum(solution[2] for solution in solutions)\r\n probabilities = [solution[2] / total_fitness for solution in solutions]\r\n return probabilities\r\n\r\n# Select a bee based on probabilities\r\ndef select_bee(probabilities):\r\n r = random.uniform(0, 1)\r\n cumulative_probability = 0\r\n for i, probability in enumerate(probabilities):\r\n cumulative_probability += probability\r\n if r <= cumulative_probability:\r\n return i\r\n return len(probabilities) - 1\r\n\r\n# Generate a random solution\r\ndef generate_random_solution():\r\n x = random.uniform(lower_bound, upper_bound)\r\n y = random.uniform(lower_bound, upper_bound)\r\n fitness = objective_function(x, y)\r\n return (x, y, fitness)\r\n\r\n# Run the Artificial Bee Colony algorithm\r\nbest_solution = artificial_bee_colony()\r\n\r\n# Print the best solution and its fitness\r\nprint(\"Final Best solution: (x = {}, y = {})\".format(best_solution[0], best_solution[1]))\r\nprint(\"Final Best fitness:\", best_solution[2])","repo_name":"mohamedashraf20/Soft_Computing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17612803177","text":"__author__ = 'bcm'\nimport pygame\nfrom random import random, choice\nfrom common import Colors as C\n\nclass Vertex():\n def __init__(self, inner_color=C.vertex_color,\n outer_color=C.vertex_boarder_color,\n view_size_scaler=1,\n name=None):\n self.name = name\n self.x = None\n self.y = None\n self.dx = int(random() * 100)\n self.dy = int(random() * 100)\n self.thickness = 0\n self.color = inner_color\n self.border_color = outer_color\n self.degree = 0\n self.size = 30\n self.view_size_scaler = view_size_scaler\n if self.name is None:\n self.name = 30\n\n def __str__(self):\n return \"(\" + str(self.x) + \", \" + str(self.y) + \")\"\n\n def __eq__(self, other):\n return self.name == other.name\n\n def display(self, screen):\n #fill\n pygame.draw.circle(screen, self.border_color, (\n int(self.x), int(self.y)), self.size * self.view_size_scaler + 3)\n #outline\n pygame.draw.circle(screen, self.color, (\n int(self.x), int(self.y)), self.size * self.view_size_scaler,\n self.thickness)\n\n\n\n\n fontObj = pygame.font.Font(None, max(self.size, 12))\n label = fontObj.render(str(self.name), False, C.word_color)\n\n #pygame.draw.ellipse(screen, self.color, (int(self.x), int(self.y)),\n # (self.x - label.get_width() / 2,\n # self.y - label.get_height() / 2))\n\n screen.blit(label, (int(self.x - label.get_width() / 2),\n int(self.y - label.get_height() / 2)))\n\n def move(self):\n self.x -= self.dx\n self.y -= self.dy","repo_name":"escherize/python_crawler","sub_path":"graph/Vertex.py","file_name":"Vertex.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33010347082","text":"import json\n\nfrom pathlib import Path\nfrom evtech import camera_from_json\n\ndef load_dataset(dir_path, loader = camera_from_json):\n \"\"\" Loads a dataset into two arrays of cameras\n \n :param dir_path: Path to the dataset\n :type dir_path: string\n :param loader: function(str, str), optional, defaults to camera_from_json\n :type loader: function\n\n :return: A tuple with list of nadir cams and list of oblique cams\n :rtype: tuple: list,list\n \"\"\"\n nadir_path = Path(dir_path).joinpath(\"nadirs\")\n oblique_path = Path(dir_path).joinpath(\"obliques\")\n\n # Find Jpg/Json pairs\n def load(path):\n cams = []\n for img in path.glob('*.jpg'):\n img_data_path = img.with_suffix('').with_suffix(\".json\")\n\n # Load json data\n with open(img_data_path) as f:\n img_data = json.load(f)\n\n # Add camera\n cams.append(loader(img_data,img))\n return cams\n\n nadirs = load(nadir_path)\n obliques = load(oblique_path)\n\n return nadirs, obliques\n","repo_name":"dnilosek/evtech","sub_path":"evtech/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"6252309144","text":"import logging\n\nfrom sqlalchemy_orm import Model, Database\nfrom .almacenar_orden import Orden, db\nfrom schemas.orden_pb2 import Orden as ProtoOrden\n\nBase = Model()\n\nlogger = logging.getLogger('db:order')\nlogger.setLevel(logging.DEBUG)\nconsola = logging.StreamHandler()\nconsola.setLevel(logging.DEBUG)\nlogger.addHandler(consola)\n\n\ndef actualizar_orden(transaction):\n logger.info(\"actualizar_orden orden %s\", str(transaction.order_id))\n session = db.session()\n order = session.query(Orden).filter_by(message_id=str(transaction.order_id)).first()\n order.status = transaction.status\n order_proto = ProtoOrden(\n id=order.message_id,\n client_id=order.client_id,\n address=order.address,\n status=order.status,\n created_at=order.created_at.timestamp()\n )\n session.commit()\n return order_proto\n","repo_name":"jandresboyaca/Non-monolithic-applications","sub_path":"order/infraestructura/actualizar_orden.py","file_name":"actualizar_orden.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14737631651","text":"from typing import List\n\nimport importlib\n\nfrom provider.aws.security.command import SecurityOptions\nfrom provider.aws.security.data.commands_enabled import COMMANDS_ENABLED\nfrom shared.common import (\n ResourceProvider,\n Resource,\n message_handler,\n)\nfrom shared.error_handler import exception\n\n\ndef build_formatted_commands():\n formatted_commands = []\n for detail_command in COMMANDS_ENABLED:\n parameters = COMMANDS_ENABLED[detail_command][\"parameters\"][0][\"name\"]\n default_value = COMMANDS_ENABLED[detail_command][\"parameters\"][0][\n \"default_value\"\n ]\n formated_command = '{}=\"{}={}\"'.format(\n detail_command, parameters, default_value\n )\n formatted_commands.append(formated_command)\n return formatted_commands\n\n\nclass SecuritytResources(ResourceProvider):\n def __init__(self, options: SecurityOptions):\n \"\"\"\n All resources\n\n :param options:\n \"\"\"\n super().__init__()\n self.options = options\n\n @exception\n # pylint: disable=too-many-locals\n def get_resources(self) -> List[Resource]:\n\n commands = self.options.commands\n\n result = []\n\n # commands informed, checking for specific commands\n if not commands:\n commands = build_formatted_commands()\n # show all commands to check\n if commands[0] == \"list\":\n message_handler(\"\\nFollowing commands are enabled\\n\", \"HEADER\")\n for detail_command in COMMANDS_ENABLED:\n parameters = COMMANDS_ENABLED[detail_command][\"parameters\"][0][\"name\"]\n default_value = COMMANDS_ENABLED[detail_command][\"parameters\"][0][\n \"default_value\"\n ]\n description = COMMANDS_ENABLED[detail_command][\"short_description\"]\n\n formated_command = 'cloudiscovery aws-security -c {}=\"{}={}\"'.format(\n detail_command, parameters, default_value\n )\n message_handler(\n \"{} - {} \\nExample: {}\\n\".format(\n detail_command, description, formated_command\n ),\n \"OKGREEN\",\n )\n else:\n for command in commands:\n command = command.split(\"=\")\n\n # First position always is command\n if command[0] not in COMMANDS_ENABLED:\n message_handler(\n \"Command {} doesn't exists.\".format(command[0]), \"WARNING\"\n )\n else:\n # Second and thrid parameters are class and method\n _class = COMMANDS_ENABLED[command[0]][\"class\"]\n _method = COMMANDS_ENABLED[command[0]][\"method\"]\n _parameter = {\n command[1].replace('\"', \"\"): command[2].replace('\"', \"\")\n }\n\n module = importlib.import_module(\n \"provider.aws.security.resource.commands.\" + _class\n )\n instance = getattr(module, _class)(self.options)\n result = result + getattr(instance, _method)(**_parameter)\n\n return result\n","repo_name":"Cloud-Architects/cloudiscovery","sub_path":"cloudiscovery/provider/aws/security/resource/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","stars":697,"dataset":"github-code","pt":"53"} +{"seq_id":"36736672337","text":"import cv2\r\nimport numpy as np\r\nimport os\r\n\r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\nimport glob\r\n\r\n# mypath = 'RUGD_all/danet_color'\r\n# onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\r\n\r\nimg_array = []\r\n# rgb_path = '/home/ispl3/Documents/cityscapes/leftImg8bit/test/all_test/'\r\n# result_path = \"/home/ispl3/PycharmProjects/pytorch/PSPNet/exp/cityscapes/gateseg101_aspp8170_tv/result/epoch_220_retrainval/test/ms/color/\"\r\nrgb_path = '/home/ispl3/Documents/ECCV_result/rgb/'\r\nresult_path = \"/home/ispl3/Documents/ECCV_result/gateseg_color_sub/\"\r\nonlyfiles = [f for f in listdir(result_path) if isfile(join(result_path, f))]\r\n\r\n# out = cv2.VideoWriter('project.avi', cv2.VideoWriter_fourcc(*'DIVX'), 30, size)\r\nonlyfiles.sort()\r\nfor filename in onlyfiles:\r\n img1 = cv2.imread(rgb_path + filename)\r\n img4 = cv2.imread(result_path + filename)\r\n # vis = (0.7*img1 + 0.3*img4).astype('uint8')\r\n vis = np.concatenate((img1, img4), axis=1)\r\n height, width, layers = vis.shape\r\n size = (width,height)\r\n img_array.append(vis)\r\n\r\nout2 = cv2.VideoWriter('RUGD_video_4fps.avi', cv2.VideoWriter_fourcc(*'DIVX'), 4, size)\r\n\r\nfor i in range(len(img_array)):\r\n out2.write(img_array[i])\r\nout2.release()","repo_name":"youngsjjn/TrSeg","sub_path":"util/img2vid.py","file_name":"img2vid.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"16152725020","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom util import get_mnist, ImageExperiment\nfrom gsdr import GSDRStack\n\nnp.random.seed(123)\n\n# Get the data\ndata, target = get_mnist()\nprint(\"Data Shape:\", data.shape)\nprint(\"Target Shape:\", target.shape)\ninput_size = (28, 28)\ninput_count = data.shape[1]\n\n# Create the network\nhidden_count = 256\nprint(\"Hidden count:\", hidden_count)\n\nforced_latent_count = 10\n\ngsdr = GSDRStack()\ngsdr.add(input_count=input_count, hidden_count=hidden_count, sparsity=0.20)\ngsdr.add(hidden_count=hidden_count, sparsity=0.15)\ngsdr.add(hidden_count=hidden_count, sparsity=0.10)\ngsdr.add(hidden_count=hidden_count, sparsity=0.05, forced_latent_count=forced_latent_count)\n\nlast_layer_index = len(gsdr._layers)-1\ndigit_forced_latents = np.eye(forced_latent_count)\nforced_latents = [{last_layer_index: digit_forced_latents[target[i]]} for i in range(data.shape[0])]\n\ndef plot(exp):\n f, ax = plt.subplots(2, forced_latent_count)\n \n f.set_size_inches(30, 30)\n\n # Generate all digits from 0 to 9\n for j in range(forced_latent_count):\n generated = exp.gsdr.generate(forced_latents={last_layer_index: digit_forced_latents[j]})\n generated = (255 * np.clip(generated, 0, 1).reshape(input_size)).astype(np.uint8)\n img = Image.fromarray(generated)\n ax[0, j].imshow(img)\n ax[0, j].set_title(str(j))\n ax[0, j].axes.get_xaxis().set_visible(False)\n ax[0, j].axes.get_yaxis().set_visible(False)\n\n # Interpolate between 2 and 3\n for j in range(forced_latent_count):\n latent = j / (forced_latent_count-1) * digit_forced_latents[3] + (1 - j / (forced_latent_count-1)) * digit_forced_latents[2]\n generated = exp.gsdr.generate(forced_latents={last_layer_index: latent})\n generated = (255 * np.clip(generated, 0, 1).reshape(input_size)).astype(np.uint8)\n img = Image.fromarray(generated)\n ax[1, j].imshow(img)\n ax[1, j].set_title(\"%.2f\" % (2 + j / (forced_latent_count-1)))\n ax[1, j].axes.get_xaxis().set_visible(False)\n ax[1, j].axes.get_yaxis().set_visible(False)\n\n plt.tight_layout()\n plt.show()\n\nexp = ImageExperiment(gsdr, data, input_size, target=target, plot_func=plot, forced_latents=forced_latents, plot_iters=5000, learn_rate=0.003, epochs=1)\nexp.run()\n","repo_name":"RobinKa/gsdr","sub_path":"examples/mnistlabeled.py","file_name":"mnistlabeled.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"53"} +{"seq_id":"4618771630","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nsys.path.insert(0, os.pardir)\nsys.path.insert(0, os.path.join(os.pardir, 'openmoc'))\nfrom testing_harness import PlottingTestHarness\nfrom input_set import NonUniformLatticeInput\nfrom openmoc.plotter import plot_spatial_fluxes\nimport openmoc\n\n\nclass PlotSpatialFluxesTestHarness(PlottingTestHarness):\n \"\"\"Test spatial flux plotting with a 4x4 lattice.\"\"\"\n\n def __init__(self):\n super(PlotSpatialFluxesTestHarness, self).__init__()\n self.input_set = NonUniformLatticeInput()\n self.num_polar = 4\n self.azim_spacing = 0.5\n self.z_spacing = 2.0\n self.max_iters = 10\n\n def _create_trackgenerator(self):\n \"\"\"Instantiate a TrackGenerator.\"\"\"\n geometry = self.input_set.geometry\n geometry.initializeFlatSourceRegions()\n self.track_generator = \\\n openmoc.TrackGenerator3D(geometry, self.num_azim, self.num_polar,\n self.azim_spacing, self.z_spacing)\n self.track_generator.setSegmentFormation(openmoc.OTF_STACKS)\n\n def _create_solver(self):\n super(PlotSpatialFluxesTestHarness, self)._create_solver()\n # Use only 1 thread for FSR numbering reproducibility\n self.solver.setNumThreads(1)\n\n def _run_openmoc(self):\n \"\"\"Run OpenMOC and plot the spatial fluxes in the geometry.\"\"\"\n\n # Run an eigenvalue calculation\n super(PlotSpatialFluxesTestHarness, self)._run_openmoc()\n\n # Specify energy groups for which to plot the spatial flux\n energy_groups = [1, 3, 5, 7]\n\n # Create a series of Matplotlib Figures / PIL Images for different\n # plotting parameters and append to figures list\n self.figures.extend(\n plot_spatial_fluxes(self.solver, gridsize=100, offset=0.1,\n get_figure=True, energy_groups=energy_groups))\n self.figures.extend(\n plot_spatial_fluxes(self.solver, gridsize=100, get_figure=True,\n xlim=(0., 2.), ylim=(0., 2.), offset=0.1,\n energy_groups=energy_groups))\n self.figures.extend(\n plot_spatial_fluxes(self.solver, gridsize=100, get_figure=True,\n energy_groups=energy_groups, offset=0.1,\n library='pil'))\n self.figures.extend(\n plot_spatial_fluxes(self.solver, gridsize=100, offset=0.1,\n plane='yz', get_figure=True, energy_groups=energy_groups))\n self.figures.extend(\n plot_spatial_fluxes(self.solver, gridsize=100, get_figure=True,\n xlim=(0., 2.), ylim=(0., 2.), offset=0.1, plane='xz',\n energy_groups=energy_groups))\n\nif __name__ == '__main__':\n harness = PlotSpatialFluxesTestHarness()\n harness.main()\n","repo_name":"mit-crpg/OpenMOC","sub_path":"tests/test_plot_3D_spatial_fluxes/test_plot_3D_spatial_fluxes.py","file_name":"test_plot_3D_spatial_fluxes.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","stars":135,"dataset":"github-code","pt":"53"} +{"seq_id":"28114465","text":"# a, b, c, x, y = map(int, input().split())\n\n# ans = 0\n# for i in range(10001):\n# for j in range(10001):\n# for k in range(10001):\n# if (i == x and j == y) or (i * (k * 0.5) == x and j * (k * 0.5) == y):\n# ans = min(ans, i * x + j * y + k * c)\n# print(ans)\n\n\n# a, b, c, x, y = map(int, input().split())\n# ans = 1000000000\n# t = a * x + b * y\n# for i in range(x+1):\n# for j in range((t - a * x) / b):\n# k = x + y - (i + j)\n# if k < 0:\n# break\n# if (i == x and j == y) or (i + (k * 0.5) == x and j + (k * 0.5) == y):\n# ans = min(ans, i * a + j * b + k * c)\n# print(ans)\n\n# -----------------------------\n# 他者の解答\n# -----------------------------\nA, B, C, X, Y = map(int, input().split())\nmx = max(X, Y)\nmn = min(X, Y)\nans = A * X + B * Y\nfor i in range(mx+1):\n # i枚ずつ減らす代わりに、Cを1枚ずつ増やして、最小値を求める\n ans = min(ans, A*max(X-i, 0)+B*max(Y-i, 0)+C*2*i)\nprint(ans)\n","repo_name":"mizutaninaoki/AtCoderPractice","sub_path":"deer_book/intermediate_level/full_search/arc096_a.py","file_name":"arc096_a.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21154925348","text":"from os.path import join, dirname\n\nfrom ovos_plugin_common_play.ocp import MediaType, PlaybackType\nfrom ovos_utils.parse import fuzzy_match\nfrom ovos_workshop.skills.common_play import OVOSCommonPlaybackSkill, \\\n ocp_search\nfrom ovos_utils.process_utils import RuntimeRequirements\nfrom ovos_utils import classproperty\nfrom tutubo.ytmus import *\n\n\nclass YoutubeMusicSkill(OVOSCommonPlaybackSkill):\n def __init__(self):\n super(YoutubeMusicSkill, self).__init__(\"YoutubeMusic\")\n self.supported_media = [MediaType.GENERIC, MediaType.MUSIC]\n self.skill_icon = join(dirname(__file__), \"ui\", \"ytmus.png\")\n\n @classproperty\n def runtime_requirements(self):\n return RuntimeRequirements(internet_before_load=True,\n network_before_load=True,\n gui_before_load=False,\n requires_internet=True,\n requires_network=True,\n requires_gui=False,\n no_internet_fallback=False,\n no_network_fallback=False,\n no_gui_fallback=True)\n\n # score\n def calc_score(self, phrase, match, idx=0, base_score=0,\n media_type=MediaType.GENERIC):\n # idx represents the order from youtube\n score = base_score - idx * 5 # - 5% as we go down the results list\n\n if isinstance(match, MusicVideo):\n score -= 10 # penalty for video results\n\n if match.artist:\n score += 80 * fuzzy_match(phrase.lower(), match.artist.lower())\n if match.title:\n score += 80 * fuzzy_match(phrase.lower(), match.title.lower())\n\n if media_type == MediaType.GENERIC:\n score -= 10\n return min(100, score)\n\n # common play\n @ocp_search()\n def search_youtube_music(self, phrase, media_type):\n # match the request media_type\n base_score = 0\n if media_type == MediaType.VIDEO:\n base_score += 25\n\n if self.voc_match(phrase, \"youtube\"):\n # explicitly requested youtube\n base_score += 50\n phrase = self.remove_voc(phrase, \"youtube\")\n\n idx = 0\n for v in search_yt_music(phrase, as_dict=False):\n if isinstance(v, MusicPlaylist):\n # albums / artists / playlists\n score = self.calc_score(phrase, v, idx,\n base_score=base_score,\n media_type=media_type)\n pl = [\n {\n \"match_confidence\": score,\n \"media_type\": MediaType.MUSIC,\n \"length\": entry.length * 1000 if entry.length else 0,\n \"uri\": \"youtube//\" + entry.watch_url,\n \"playback\": PlaybackType.AUDIO,\n \"image\": v.thumbnail_url,\n \"bg_image\": v.thumbnail_url,\n \"skill_icon\": self.skill_icon,\n \"title\": entry.title,\n \"album\": v.title,\n \"artist\": entry.artist,\n \"skill_id\": self.skill_id\n } for entry in v.tracks\n ]\n if pl:\n if isinstance(v, MusicArtist):\n title = v.artist + \" (Featured Tracks)\"\n elif isinstance(v, MusicAlbum):\n title = v.title + \" (Full Album)\"\n elif isinstance(v, MusicPlaylist):\n title = v.title + \" (Playlist)\"\n else:\n title = v.title\n\n yield {\n \"match_confidence\": score,\n \"media_type\": MediaType.MUSIC,\n \"playlist\": pl,\n \"playback\": PlaybackType.AUDIO,\n \"skill_icon\": self.skill_icon,\n \"image\": v.thumbnail_url,\n \"bg_image\": v.thumbnail_url,\n \"title\": title\n }\n\n else:\n # videos / songs\n score = self.calc_score(phrase, v, idx,\n base_score=base_score,\n media_type=media_type)\n # return as a video result (single track dict)\n yield {\n \"match_confidence\": score,\n \"media_type\": MediaType.VIDEO if isinstance(v, MusicVideo) else MediaType.MUSIC,\n \"length\": v.length * 1000 if v.length else 0,\n \"uri\": \"youtube//\" + v.watch_url,\n \"playback\": PlaybackType.AUDIO,\n \"image\": v.thumbnail_url,\n \"bg_image\": v.thumbnail_url,\n \"skill_icon\": self.skill_icon,\n \"title\": v.title,\n \"artist\": v.artist,\n \"skill_id\": self.skill_id\n }\n idx += 1\n\n\ndef create_skill():\n return YoutubeMusicSkill()\n","repo_name":"OpenVoiceOS/skill-ovos-youtube-music","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2018383642","text":"from turtle import color, bgcolor, speed, right, left, circle, forward, mainloop\r\n\r\n\r\ncolor(\"white\")\r\nbgcolor(\"black\")\r\nspeed(250)\r\nright(45)\r\nfor n in range(150):\r\n if 7 < n < 62:\r\n left(5)\r\n if 80 < n < 133:\r\n right(5)\r\n circle(30)\r\n if n < 80:\r\n forward(10)\r\n else:\r\n forward(5)\r\nmainloop()\r\n","repo_name":"henryvfx/infinity-symbol","sub_path":"infinity_symbol.py","file_name":"infinity_symbol.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36260095685","text":"class Solution:\n def __init__(self):\n self.res = []\n def permute(self, nums):\n self.backTrack(nums, [])\n return self.res\n\n def backTrack(self, nums, track):\n if len(nums) == len(track):\n self.res.append(track[:])\n return\n for i in nums:\n if i in track:\n continue\n track.append(i)\n self.backTrack(nums, track)\n track.remove(i)\n\n\nslu = Solution()\nprint(slu.permute([1]))\n","repo_name":"kefirzhang/algorithms","sub_path":"leetcode/python/medium/p046_permute.py","file_name":"p046_permute.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32699606412","text":"import re\n\ndef is_valid_ipv4(ip):\n pattern = r'^(\\d{1,3}\\.){3}\\d{1,3}$'\n\n if re.match(pattern, ip):\n octets = ip.split('.')\n for octet in octets:\n if not (0 <= int(octet) <= 255):\n return False\n return True\n else:\n return False\n\nwhile True:\n ip_address = input(\"Enter an IPv4 address (TO QUIT, TYPE 'exit'): \")\n\n if ip_address.lower() == 'exit':\n break\n\n if is_valid_ipv4(ip_address):\n print(f\"{ip_address} is a valid IPv4 address.\")\n else:\n print(f\"{ip_address} is not a valid IPv4 address.\")\n","repo_name":"Vdntrai/COB-PythonDev","sub_path":"IPV4_VALIDATOR.py","file_name":"IPV4_VALIDATOR.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31627108728","text":"# oj t -c \"python main.py\" -d \"./tests/\" \n\n# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\n# import sys\n# read = sys.stdin.buffer.read\n# readline = sys.stdin.buffer.readline\n# readlines = sys.stdin.buffer.readlines\n\n# 検討?分 実装分 バグとり分\n\n# import sys\n# import os\n# f = open('../../../input.txt', 'r')\n# sys.stdin = f\n\n# 再帰メモ化\nimport sys\nsys.setrecursionlimit(10**6)\nfrom functools import lru_cache\n\nn,x = map(int,input().split())\na = list(map(int,input().split()))\nx %= a[-1]\nif(x == 0):\n print(1)\n exit()\n\n@lru_cache(maxsize=10**9)\ndef calc(m):\n # print(m)\n l = 1\n if(m == a[-1]) or (m==0):\n return 1\n for i in range(n-2,0,-1):\n if( m % a[i] == 0):\n l = i+1\n break\n\n res = calc(m - m%a[l])\n res += calc(m - m%a[l] + a[l])\n # now = (m//a[l] +1) * a[l]\n # for i in range(l,n):\n # second = (m//a[i] +1) * a[i]\n # if(now != second):\n # res += calc(now)\n # now = second\n # res += calc(now)\n \n # print(m)\n # print(cand)\n\n return res\n\nans = calc(x)\nprint(ans)\n\n\n\n\n\n\n \n \n\n\n\n\n\n","repo_name":"komajun365/competitive_programming","sub_path":"abc/abc182/f/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38274187498","text":"from bs4 import BeautifulSoup\nfrom .base import Retriever, Meta, ArticleLink\n\nURL = {'base':\"https://www.nature.com\",\n 'search':\"https://www.nature.com/search?q={}&journal=srep,%20ismej,%20ncomms&order=relevance\",\n }\n\nclass Nature(Retriever):\n\n def __init__(self) -> None:\n super().__init__(journal='nature', base_url=URL['base'], search_url=URL['search'])\n self.num_pages = 1\n self.meta = {\n 'author':'dc.creator', \n 'citation_date':'dc.date',\n 'publication_date':'dc.date', \n 'doi':'citation_doi',\n 'journal':'citation_journal_title', \n 'publisher':'citation_publisher',\n 'article_type':'citation_article_type',\n 'pdf':'citation_pdf_url',\n 'link':'citation_fulltext_html_url'}\n\n def get_num_pages(self, page_soup):\n pages = page_soup.find_all('li', {'class':'c-pagination__item'})\n if len(pages) >= 2:\n self.num_pages = int(pages[-2].get(\"data-page\"))\n \n def get_page_links(self, page_soup):\n article_links = []\n articles = page_soup.find_all('article')\n for article in articles:\n link = article.find('a')\n uri = link.get('href')\n article_links.append(ArticleLink(title=link.text, url=self.base_url+uri, doi=\"\"))\n return article_links\n\n def get_meta(self, page_soup)->Meta:\n data = {}\n for k,v in self.meta.items():\n els = page_soup.find_all('meta',{'name':v})\n els = [el.get('content') for el in els]\n if len(els)==1:\n data.update({k:els[0]})\n else:\n data.update({k:els})\n return Meta(**data)\n\n def get_sections(self,soup, level=0):\n sections = soup.find_all(self.levels[level])\n if len(sections)==0 and level<4:\n return self.get_sections(soup, level+1)\n if len(sections)==0:\n return None\n if level==4:\n return [s.text for s in sections if s.text]\n else:\n parse = []\n sections = [section for section in sections if section.get('id')]\n for i in range(len(sections)):\n div = soup.find_all('div',{'id':sections[i].get('id')+'-content'})\n if not div:\n parent = str(sections[i].parent)\n start = str(sections[i])\n div = parent[parent.rfind(start):]\n if i==len(sections)-1:\n div = BeautifulSoup(div, \"lxml\")\n else:\n end = str(sections[i+1])\n div = BeautifulSoup(div[:div.rfind(end)], \"lxml\")\n else:\n div=div[0]\n part = {'title':sections[i].text, 'text':self.get_sections(div, level+1)}\n if part['text']:\n parse.append(part)\n return parse","repo_name":"nikitcha/open_parser","sub_path":"open_parser/nature.py","file_name":"nature.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23433495162","text":"import pandas as pd\nimport praw\nimport numpy as np\nimport gc\n#import nltk\n\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom statistics import median\n\n# Metrics class which is used to evaluate whether a user is a bot\n\nclass metrics:\n\n def __init__(self):\n pass\n\n\n def getAuthorData(self, r, author):\n user = praw.models.Redditor(r, name = str(author))\n author_df = pd.DataFrame(columns = ['author', 'body', 'created_utc', 'parent_created_utc', 'isRoot'])\n comments = user.comments.new(limit=50)\n for comment in comments:\n author_df = author_df.append({'author':comment.author,\n 'body':comment.body,\n 'created_utc':comment.created_utc,\n 'parent_created_utc':comment.parent().created_utc,\n 'isRoot': comment.is_root,\n }, ignore_index=True)\n author_df = self.__cleanText(author_df)\n return author_df\n\n\n def __getAuthorText(self, data):\n return data['body'].tolist()\n\n\n def __cleanText(self, data):\n data['body'] = data['body'].str.lower()\n data['body'] = data['body'].str.replace('.', ' ')\n data['body'] = data['body'].str.replace('/', ' ')\n data['body'] = data['body'].str.replace(':', ' ')\n data['body'] = data['body'].str.replace('-', ' ')\n data['body'] = data['body'].str.replace('_', ' ')\n return data\n\n\n def __getCommentDiff(self, data):\n return data['created_utc']-data['parent_created_utc']\n\n\n def avgTFIDFCosineSimilarity(self, data):\n data = self.__cleanText(data)\n text = self.__getAuthorText(data)\n text = filter(None, text)\n\n vectorizer = TfidfVectorizer(stop_words=None, strip_accents='ascii')\n v = vectorizer.fit_transform(text)\n avgCosSimilarity = cosine_similarity(v).mean()\n\n return avgCosSimilarity\n\n\n def avgCountCosineSimilarity(self, data):\n data = self.__cleanText(data)\n text = self.__getAuthorText(data)\n text = filter(None, text)\n\n vectorizer = CountVectorizer(stop_words=None, strip_accents='ascii')\n v = vectorizer.fit_transform(text)\n avgCosSimilarity = cosine_similarity(v).mean()\n\n return avgCosSimilarity\n\n\n def avgCommentRate(self, data):\n numComments = data['created_utc'].count()\n delta = (data['created_utc'].max() - data['created_utc'].min())\n return numComments/delta\n\n\n def topLevelProportion(self, data):\n return data['isRoot'].sum()/data['isRoot'].count()\n\n\n #getting parent comments takes a very long time\n def getMedianReplyTime(self, data):\n diff = self.__getCommentDiff(data)\n avgReplyTime = median(diff)\n return avgReplyTime\n\n\n def aggregateMetrics(self, data):\n metrics_df = pd.DataFrame(columns=['count_similarity', 'tfidf_similarity', 'comment_rate', 'top_level_proportion'])\n\n metrics_df = metrics_df.append({'count_similarity': self.avgCountCosineSimilarity(data),\n 'tfidf_similarity': self.avgTFIDFCosineSimilarity(data),\n 'comment_rate': self.avgCommentRate(data),\n 'top_level_proportion': self.topLevelProportion(data),\n 'reply_time': self.getMedianReplyTime(data)}, ignore_index=True)\n return metrics_df\n\n","repo_name":"MatthewTourond/Reddit-Bot-Detector","sub_path":"metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41095599381","text":"from django.shortcuts import render, get_object_or_404,redirect\nfrom apps.galeria.models import Fotografia\nfrom apps.galeria.forms import FotografiaForm\nfrom django.contrib import messages\n\ndef index(request):\n if not request.user.is_authenticated:\n messages.error(request,'Usuario nao logado')\n return redirect('login')\n fotografias = Fotografia.objects.order_by('data_fotografia').filter(publicada=True)\n return render(request,'index.html',{'cards': fotografias})\n\ndef imagem(request, foto_id):\n fotografia = get_object_or_404(Fotografia, pk=foto_id)\n return render(request,'imagem.html',{'fotografia': fotografia})\n\ndef buscar(request):\n if not request.user.is_authenticated:\n messages.error(request,'Usuario nao logado')\n return redirect('login')\n fotografias = Fotografia.objects.order_by('data_fotografia').filter(publicada=True)\n if 'q' in request.GET:\n nome_a_buscar = request.GET['q']\n if nome_a_buscar:\n fotografias = fotografias.filter(nome__icontains=nome_a_buscar)\n return render(request, 'index.html',{'cards': fotografias})\n\ndef nova_imagem(request):\n form = FotografiaForm()\n if request.method == 'POST':\n form = FotografiaForm(request.POST,request.FILES)\n if form.is_valid():\n form.save()\n messages.success(request,'Enviado com sucesso')\n return redirect('index')\n return render(request, 'nova_imagem.html',{'form':form})\ndef editar_imagem(request, foto_id):\n fotografia = Fotografia.objects.get(id=foto_id)\n form = FotografiaForm(\n instance=fotografia,\n )\n if request.method == 'POST':\n form = FotografiaForm(request.POST,request.FILES,instance=fotografia)\n form.save()\n messages.success(request,'Editado com sucesso')\n return redirect('index')\n return render(request,'editar_imagem.html',{'form':form})\n\ndef deletar_imagem(request,foto_id):\n fotografia = Fotografia.objects.get(id=foto_id)\n messages.success(request,f'Fotografia Deletada com sucesso {fotografia}')\n fotografia.delete()\n return redirect('index')\n\ndef filtro(request,categoria):\n categoria = Fotografia.objects.order_by('data_fotografia').filter(categoria=categoria,publicada=True)\n return render(request,'index.html',{'cards':categoria})","repo_name":"Duarts-D/Projeto-Galeria-de-fotos","sub_path":"apps/galeria/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43446697105","text":"import unittest, random, time\nfrom pi import WSServerFactory, Cache, Pi\nfrom json import dumps\ndef randFloat(limit=10.0):\n return random.uniform(0.0,limit)\nclass TestWSServer(unittest.TestCase):\n def test_onMessageToDB(self):\n p = Pi([])\n p.start(start_wamp_client=False, cacheName='onMessageTest')\n total = random.randint(0,30)\n device_id = 'TESTDEVICEID'\n j = 0\n while j 10:\n raise ValueError(str(targets))\n\n colors = self.colors[: len(targets)]\n markers = self.markers[: len(targets)]\n\n xlabs = f'{PC[0]} ({float(self.vardf.values[self.vardf[\"PC\"] == PC[0], 0])}%)'\n ylabs = f'{PC[1]} ({float(self.vardf.values[self.vardf[\"PC\"] == PC[1], 0])}%)'\n\n fig, ax = Style().paper()\n for target, color, mark in zip(targets, colors, markers):\n indicesToKeep = self.pcadf[\"label\"] == target\n x = self.pcadf.loc[indicesToKeep, PC[0]]\n y = self.pcadf.loc[indicesToKeep, PC[1]]\n ax.scatter(x, y, c=color, marker=mark, s=s, label=str(target))\n\n if elip:\n confidence_ellipse(x, y, ax, edgecolor=color)\n\n ax.set_xlabel(xlabs)\n ax.set_ylabel(ylabs)\n if legend:\n ax.legend(loc=loc)\n return fig\n\n def screenplot(self, **options):\n # a = options.get('adj_left', 0.1)\n # b = options.get('adj_bottom', 0.2)\n lim = options.get(\"PC\", None)\n\n if lim is None:\n data_ = self.vardf\n else:\n data_ = self.vardf.loc[:lim, :]\n\n fig, _ = Style().paper()\n sns.pointplot(x=\"PC\", y=\"Var (%)\", data=data_)\n plt.xticks(rotation=\"vertical\")\n plt.xlabel(\"Principal Component\")\n plt.ylabel(\"Percentage of Variance (%)\")\n return fig\n\n def loadingplot(self, **options):\n lim = options.get(\"alim\", 1.1)\n circle = options.get(\"circle\", 2)\n\n PC = [\"PC1\", \"PC2\"]\n xlabs = f'{PC[0]} ({float(self.vardf.values[self.vardf[\"PC\"] == PC[0], 0])}%)'\n ylabs = f'{PC[1]} ({float(self.vardf.values[self.vardf[\"PC\"] == PC[1], 0])}%)'\n\n fig, ax = Style().paper()\n for i in range(0, self.pca.components_.shape[1]):\n ax.arrow(\n 0,\n 0,\n self.pca.components_[0, i],\n self.pca.components_[1, i],\n head_width=0.05,\n head_length=0.05,\n )\n plt.text(\n self.pca.components_[0, i] + 0.05,\n self.pca.components_[1, i] + 0.05,\n self.featurename[i],\n size=18,\n )\n\n an = np.linspace(0, circle * np.pi, 100)\n plt.plot(np.cos(an), np.sin(an)) # Add a unit circle for scale\n plt.axis(\"equal\")\n ax.set_xlim([-lim, lim])\n ax.set_ylim([-lim, lim])\n ax.set_xlabel(xlabs)\n ax.set_ylabel(ylabs)\n plt.axhline(y=0.0, color=\"b\", linestyle=\"--\")\n plt.axvline(x=0.0, color=\"b\", linestyle=\"--\")\n return fig\n\n\nclass CalcLDA:\n \"\"\"\n ============================\n Linear discriminant analysis\n ============================\n\n CalcLDA(round_, scaler, cv)\n\n Methods:\n\n - fit(x, y)\n - getvarkd()\n - getscore()\n - plotlda(adj_left, adj_bottom, acending)\n\n \"\"\"\n\n def __init__(self, **options):\n self.x = None\n self.xval = None\n self.y = None\n self.yval = None\n self.ldaval = None\n self.dual = None\n\n self.round_ = options.get(\"round_\", 1)\n self.vardf = pd.DataFrame()\n self.ldadf = pd.DataFrame()\n self.lda = LinearDiscriminantAnalysis()\n self.scaler = options.get(\"scaler\", StandardScaler())\n self.colors = options.get(\"colors\", COLORs())\n self.markers = options.get(\"markers\", MARKERs())\n self.cv = options.get(\"cv\", 10)\n\n def __repr__(self):\n return f\"{self.__class__.__name__}(\" f\"{self.lda!r})\"\n\n def fit(self, *arrays):\n if len(arrays) == 2:\n self.x = arrays[0]\n self.y = arrays[1]\n self.dual = False\n else:\n self.x = arrays[0]\n self.xval = arrays[1]\n self.y = arrays[2]\n self.yval = arrays[3]\n self.ldaval = None\n self.dual = True\n\n scaler = self.scaler\n X = scaler.fit_transform(self.x)\n self.lda.fit(X, self.y)\n ldax = self.lda.transform(X)\n\n ldname = [f\"LD{i + 1}\" for i in range(ldax.shape[1])]\n self.ldadf = pd.DataFrame(ldax, columns=ldname)\n Y = pd.DataFrame(data=self.y, columns=[\"label\"])\n self.ldadf = pd.concat([self.ldadf, Y], axis=1)\n\n tot = sum(self.lda.explained_variance_ratio_)\n var_exp = [\n round((i / tot) * 100, self.round_)\n for i in sorted(self.lda.explained_variance_ratio_, reverse=True)\n ]\n self.vardf = pd.DataFrame({\"Var (%)\": var_exp, \"LD\": ldname})\n\n if self.dual:\n Xval = scaler.transform(self.xval)\n ldax = self.lda.transform(Xval)\n self.ldaval = pd.DataFrame(ldax, columns=ldname)\n Y = pd.DataFrame(data=self.yval, columns=[\"label\"])\n self.ldaval = pd.concat([self.ldaval, Y], axis=1)\n\n def getvarld(self):\n if self.dual:\n ldaDF1 = pd.concat(\n [\n self.ldadf,\n pd.DataFrame(data=self.ldadf[\"label\"].values, columns=[\"Class\"]),\n ],\n axis=1,\n )\n ldaDF1[\"Class\"] = \"Training\"\n\n ldaDF2 = pd.concat(\n [\n self.ldaval,\n pd.DataFrame(data=self.ldaval[\"label\"].values, columns=[\"Class\"]),\n ],\n axis=1,\n )\n ldaDF2[\"Class\"] = \"Testing\"\n\n ldaDF = pd.concat([ldaDF1, ldaDF2], axis=0)\n else:\n ldaDF = self.ldadf\n\n return ldaDF, self.vardf\n\n def getscore(self):\n from sklearn.model_selection import cross_val_score\n\n return cross_val_score(LinearDiscriminantAnalysis(), self.x, self.y, cv=self.cv)\n\n def plotlda(self, **options):\n elip = options.get(\"ellipse\", True)\n ascending = options.get(\"ascending\", True)\n legend = options.get(\"legend\", True)\n loc = options.get(\"loc\", \"best\")\n\n self.ldadf = self.ldadf.sort_values(by=[\"label\"], ascending=ascending)\n nlabel = np.unique(self.y)\n if len(nlabel) < 3:\n fig, ax = Style().paper()\n s = options.get(\"size\", 10)\n\n if self.dual:\n self.ldaval = self.ldaval.sort_values(by=[\"label\"], ascending=ascending)\n ax = sns.stripplot(x=\"label\", y=\"LD1\", color=\"k\", size=s, data=self.ldadf)\n ax = sns.stripplot(\n x=\"label\",\n y=\"LD1\",\n marker=\"^\",\n color=\"red\",\n size=s,\n data=self.ldaval,\n )\n else:\n ax = sns.stripplot(x=\"label\", y=\"LD1\", size=s, data=self.ldadf)\n\n ax.set_xlabel(\"Classes\")\n ax = plt.axhline(y=0, linewidth=1.5, color=\"black\", linestyle=\"--\")\n return fig\n else:\n targets = list(self.ldadf[\"label\"].unique())\n\n s = options.get(\"size\", 90)\n if len(targets) > 10:\n raise ValueError(str(targets))\n\n colors = self.colors[: len(targets)]\n markers = self.markers[: len(targets)]\n\n xlabs = f\"LD1 ({self.vardf.values[0, 0]}%)\"\n ylabs = f\"LD2 ({self.vardf.values[1, 0]}%)\"\n\n fig, ax = Style().paper()\n for target, color, mark in zip(targets, colors, markers):\n indicesToKeep = self.ldadf[\"label\"] == target\n x = self.ldadf.loc[indicesToKeep, \"LD1\"]\n y = self.ldadf.loc[indicesToKeep, \"LD2\"]\n ax.scatter(x, y, c=color, marker=mark, s=s, label=target)\n\n if elip:\n confidence_ellipse(x, y, ax, edgecolor=color)\n\n if self.dual:\n self.ldaval = self.ldaval.sort_values(by=[\"label\"], ascending=ascending)\n\n for target, color, mark in zip(targets, colors, markers):\n indicesToKeep = self.ldaval[\"label\"] == target\n x = self.ldaval.loc[indicesToKeep, \"LD1\"]\n y = self.ldaval.loc[indicesToKeep, \"LD2\"]\n ax.scatter(\n x,\n y,\n marker=mark,\n s=s,\n facecolors=\"none\",\n edgecolors=color,\n label=f\"{target} - test\",\n )\n\n if legend:\n ax.legend(loc=loc)\n ax.set_xlabel(xlabs)\n ax.set_ylabel(ylabs)\n\n return fig\n","repo_name":"Shidiq/intro-python-ds","sub_path":"notebooks/snhlib/dataviz.py","file_name":"dataviz.py","file_ext":"py","file_size_in_byte":12923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"919599303","text":"# app/__init__.py\n\n# Library imports\nfrom flask import request, jsonify, abort, make_response\nfrom flask_api import FlaskAPI\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import and_\nfrom datetime import datetime\n\n# Local imports\nfrom instance.config import app_config\n\n# Initialize SQLAlchemy\ndb = SQLAlchemy()\n\ndef create_app(config_name):\n\n from app.models import Item, Bid, User\n\n app = FlaskAPI(__name__, instance_relative_config=True)\n app.config.from_object(app_config[config_name])\n app.config.from_pyfile('config.py')\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n db.init_app(app)\n\n\n @app.route('/')\n def index():\n return 'Auction API'\n\n @app.route('/items/all', methods=['GET'])\n def items_all():\n items = Item.get_all()\n results = []\n for item in items:\n obj = {\n 'id': item.item_id,\n 'name': item.name,\n 'description': item.description,\n 'start_time': item.start_time,\n 'end_time': item.end_time,\n 'start_amount': item.start_amount\n }\n results.append(obj)\n response = jsonify(results)\n response.status_code = 200\n return response\n\n @app.route('/items/upcoming', methods=['GET'])\n def items_upcoming():\n current_time = str(datetime.now())\n items = Item.query.filter(current_time < Item.start_time).all()\n results = []\n for item in items:\n obj = {\n 'id': item.item_id,\n 'name': item.name,\n 'description': item.description,\n 'start_time': item.start_time,\n 'end_time': item.end_time,\n 'start_amount': item.start_amount\n }\n results.append(obj)\n response = jsonify(results)\n response.status_code = 200\n return response\n\n @app.route('/items/previous', methods=['GET'])\n def items_previous():\n current_time = str(datetime.now())\n items = Item.query.filter(current_time > Item.end_time).all()\n results = []\n for item in items:\n obj = {\n 'id': item.item_id,\n 'name': item.name,\n 'description': item.description,\n 'start_time': item.start_time,\n 'end_time': item.end_time,\n 'start_amount': item.start_amount\n }\n results.append(obj)\n response = jsonify(results)\n response.status_code = 200\n return response\n\n @app.route('/item/', methods=['GET'])\n def item_details(id):\n result = []\n current_time = str(datetime.now())\n #For previous auctions\n item = Item.query.filter(and_(Item.item_id == id, current_time > Item.end_time)).first()\n if item is not None:\n obj = {\n 'id': item.item_id,\n 'name': item.name,\n 'buyer': '',\n 'amount': '',\n 'status': 'auction complete'\n }\n #For ongoing auctions\n else:\n item = Item.query.filter(and_(Item.item_id == id, current_time > Item.start_time, current_time < Item.end_time)).first()\n if item is not None:\n obj = {\n 'id': item.item_id,\n 'name': item.name,\n 'highest_bid': '',\n 'status': 'auction ongoing'\n }\n else:\n obj = {\n 'status': 'auction has not been started yet'\n }\n result.append(obj)\n response = jsonify(result)\n response.status_code = 200\n return response\n\n @app.route('/bids/user/', methods=['GET'])\n def bids_by_user(id):\n result = []\n user = User.query.get(id)\n #If user_id doesn't exist\n if not user:\n obj = {\n 'message': 'User not registered'\n }\n result.append(obj)\n response = jsonify(result)\n response.status_code = 404\n return response\n\n bids = Bid.query.filter_by(placed_by=user.user_id).all()\n #If no bid by current user\n if not bids:\n obj = {\n 'message': 'No bids placed by this User'\n }\n result.append(obj)\n response = jsonify(result)\n response.status_code = 404\n return response\n\n #Display bids by user and item names\n for bid in bids:\n item = Item.query.get(bid.bid_on_item)\n obj = {\n 'bid_id': bid.bid_id,\n 'bid_price': bid.bid_amount,\n 'item_name': item.name\n }\n result.append(obj)\n response = jsonify(result)\n response.status_code = 200\n return response\n\n @app.route('/item/bid', methods=['POST'])\n def bid_on_item():\n \"\"\"Check whether user is logged in or not\"\"\"\n auth_header = request.headers.get('Authorization')\n access_token = auth_header.split(\" \")[1]\n\n if access_token:\n #Attempt to decode the token and get the user_id\n user_id = User.decode_token(access_token)\n if not isinstance(user_id, str):\n #User is authenticated\n\n if request.method=='POST':\n bid_amount = float(request.data.get('bid_amount', ''))\n bid_on_item = int(request.data.get('bid_on_item', ''))\n if bid_amount and bid_on_item:\n placed_bid = Bid(placed_by=user_id, bid_amount=bid_amount, bid_on_item=bid_on_item)\n placed_bid.save()\n item = Item.query.filter_by(item_id=placed_bid.bid_on_item).first()\n response = jsonify({\n 'status': 'bid successfully placed',\n 'bid_id': placed_bid.bid_id,\n 'bid_amount': placed_bid.bid_amount,\n 'placed_on': item.name,\n })\n return make_response(response), 201\n else:\n response = jsonify({\n 'status': 'send a POST request with bid_amount and bid_on_item as form data to place bid'\n })\n return make_response(response), 200\n else:\n #User is not legit\n message = user_id\n response = {\n 'message': message\n }\n return make_response(jsonify(response)), 401\n\n\n #Register auth blueprint\n from .auth import auth_blueprint\n app.register_blueprint(auth_blueprint)\n\n return app\n","repo_name":"abhishekgupta5/auction_api","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6941,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3188818849","text":"import matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nimport pandas as pd\nimport numpy as np\nfrom grave import FactorizationMachine\nfrom utils import color_by_band_gap\n\n\nif __name__ == '__main__':\n fm = FactorizationMachine.load_model(\"../out/all_stable_bandgap_dim20.fm.ctx10_add_cont.model\")\n\n atom_vectors = fm.W\n\n df = pd.read_pickle(\"../out/all_stable_bandgap.pkl\")\n\n exclude_zero = False\n\n X = []\n color_map = []\n band_gaps = []\n for i in range(len(df['structure'])):\n struct = df['structure'][i]\n band_gap = df['band_gap'][i]\n\n if band_gap == 0.0 and exclude_zero:\n continue\n\n vectors = []\n for element in struct.species:\n vectors.append(np.array(atom_vectors[fm.dictionary[element.name]]))\n X.append(np.mean(vectors, axis=0))\n color_map.append(color_by_band_gap(band_gap))\n band_gaps.append(band_gap)\n\n pca = PCA(n_components=2)\n result = pca.fit_transform(X)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(result[:, 0], result[:, 1], c=color_map)\n\n plt.show()\n","repo_name":"lantunes/materials-sandbox","sub_path":"scripts/pca_mean_grave_vectors.py","file_name":"pca_mean_grave_vectors.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23571451767","text":"import cv2\nfrom time import sleep\ncap = cv2.VideoCapture(0)\nsleep(0.05)\n\ndef getImg(display= False,size=[480,240]):\n _, img = cap.read()\n img = cv2.resize(img,(size[0],size[1]))\n if display:\n cv2.imshow('IMG',img)\n return img\n\nif __name__ == '__main__':\n while True:\n img = getImg(True)\n cv2.waitKey(0)\n cap.release()\n cv2.destroyAllWindows()","repo_name":"jasonN17/AutonomousCar","sub_path":"WebcamModule.py","file_name":"WebcamModule.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7954234543","text":"import bioread\nimport datetime\nimport numpy as np\n\nimport pandas\n\ndata = bioread.read_file(\"/Users/odayan/Downloads/NIH_Doors_sample_2222.acq\")\n\n\ndf = pandas.DataFrame()\ndf['Time'] = data.channels[1].time_index\n\nprint(data.channels)\nchannelsData = pandas.DataFrame(data.channels)\nprint(channelsData)\nprint(data.channels[1])\nprint(data.channels[1].data)\n\n\n# dataArray = np.array(data.channels[1].data).byteswap().newbyteorder()\n\ndf['Data'] = data.channels[1].data\n\ndf[\"Time\"] = df[\"Time\"].astype(float)\ndf[\"Time\"] = df[\"Time\"].round(3)\ndf = df.set_index(\"Time\")\n\nprint(df)\n\ngameDF = pandas.read_csv(\"./Df.csv\")\ngameDF[\"CurrentTime\"] = gameDF[\"CurrentTime\"].round(2)\ngameDF = gameDF.set_index(\"CurrentTime\")\n\nprint(df.dtypes)\nprint(\"============\")\nprint(gameDF.dtypes)\n\nmergedDF = gameDF.join(df)\n#\n# gameDF.set_index(\"CurrentTime\").join(df.set_index(\"Time\"))\nprint(mergedDF)\nmergedDF.to_csv(\"./merged.csv\")\n","repo_name":"omerday/Doors-Task","sub_path":"Assignments/Doors/ACQConvert.py","file_name":"ACQConvert.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72264397927","text":"def xml(entity):\n entity.__original_representation__ = xmls_to_objs(entity)\n return entity\n\nclass _container(object):\n pass\n\ndef xmls_to_objs(entity):\n @staticmethod\n def xml_to_entity_objs(objs):\n new_objs = list()\n cls_name = entity.__realname__\n dimension_names = entity.__dimensions__\n for xml_obj in objs.getElementsByTagName(cls_name):\n new_obj = _container()\n new_obj.__class__ = entity\n for dim in dimension_names:\n dim_value = xml_obj.getElementsByTagName(dim.name)[0]\n setattr(new_obj, dim.name, dim.type(dim_value.firstChild.nodeValue))\n new_objs.append(new_obj)\n return new_objs\n return xml_to_entity_objs\n\n\nclass xmlpath(object):\n def __init__(self, node_name):\n self.__node_name = node_name\n\n def __call__(self, entity):\n entity.__XML__ = self.__node_name\n entity.__original_representation__ = \"XML\"\n","repo_name":"lhn136/Webcrawler--master","sub_path":"spacetime-crawler-master/rtypes/connectors/xml.py","file_name":"xml.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"14743076207","text":"from matplotlib import pyplot as plt\nfrom sklearn import tree\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.cluster import KMeans\nimport webbrowser\nfrom pandas import DataFrame\n\ndatos = []\nwith open ('CDMXYEDO.csv','r') as archivo:\n lineas = archivo.read().splitlines()\n lineas.pop(0)\n for i in lineas:\n linea = i.split(',')\n datos.append([float(linea[1]), float(linea[2])])\n\ny = []\nz = []\n\nfor elem in sorted(datos):\n y.append(elem[0])\n z.append(elem[1])\nprint(y)\nprint(z)\n\nx=np.array([2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020]).reshape((-1,1))\n\n\n\nmodel=LinearRegression()\nmodel.fit(x,y)\nR_sq= model.score(x,y)\ny_pred_CDMX = model.predict(x)\nprint(y_pred_CDMX)\n\nmodel2=LinearRegression()\nmodel2.fit(x,z)\nR_sq2= model2.score(x,z)\ny_pred_EDO = model2.predict(x)\nprint(y_pred_EDO)\n\ndatosStr=\"[\"\nfor i in range(len(x)):\n if(i==len(x)-1):\n datosStr=datosStr+str(y_pred_CDMX[i])+\"]\"\n elif(i\n\n\n\t\n\t\t\n\tModulo de python\n\t\n \n\n\t\n\t\n\t\n\n\n \n \n \n \n\n\n\n\"\"\"\nf.write(mensaje)\nf.close()\nwebbrowser.open_new_tab('algoritmos.html')","repo_name":"Benjamngarcia/Machine-Learning","sub_path":"Algoritmo.py","file_name":"Algoritmo.py","file_ext":"py","file_size_in_byte":16786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2724234218","text":"import re\nfrom collections import Counter\n\nfrom parse_dictionary import get_translation\n\n\nKNOWN_WORDS = 200\n\nsegmented = open('segmented_book.txt').readlines()[:1000]\nPUNCTUATION = \"!?。。"#$%&'()*-+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏.\"\n\nc = Counter()\nfor line in segmented:\n words = re.sub(rf'[{PUNCTUATION}]+', ' ', line).split()\n c.update(words)\n\nprint(c.most_common(KNOWN_WORDS))\nprint(f'{len(c)}=')\n\nprint('')\nprint('KNOWN WORDS')\nknown_words = set()\nfor word, _ in c.most_common(KNOWN_WORDS):\n print(word, get_translation(word))\n known_words.add(word)\nprint('/KNOWN WORDS')\nprint()\n\nDEBUG = False\n\nfor line in segmented[:20]:\n print(line)\n line = line[:-1] + ' '\n word = ''\n for character in line:\n # print(f'{character=}')\n if character in PUNCTUATION:\n if DEBUG:\n print('char: ', character)\n else:\n print(character, end=' ')\n word = ''\n elif character == ' ':\n if word:\n if word in known_words:\n if DEBUG:\n print('known word: ', word)\n else:\n print(word, end=' ')\n else:\n # print(f'{word=}')\n translation = get_translation(word, debug=DEBUG)\n if translation[0] == '*':\n translation = ' '.join([get_translation(c, debug=DEBUG) for c in word])\n if DEBUG:\n print('translation:', translation)\n else:\n print(translation, end=' ')\n word = ''\n else:\n word += character\n print()\n print()\n","repo_name":"vpavlenko/bazooka","sub_path":"content/segmenter_playground/print_chapter_with_replacement.py","file_name":"print_chapter_with_replacement.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24238262458","text":"\ndef teachInsertBelSelected():\n\n global XcurPos\n global YcurPos\n global ZcurPos\n global RxcurPos\n global RycurPos\n global RzcurPos\n global WC\n global J7PosCur\n\n checkSpeedVals()\n\n try:\n selRow = tab1.progView.curselection()[0]\n selRow += 1\n except:\n last = tab1.progView.index(\"end\")\n selRow = last\n tab1.progView.select_set(selRow)\n\n Speed = speedEntryField.get()\n speedtype = speed_option.get()\n\n if speedtype == \"Seconds\":\n speedPrefix = \"Ss\"\n if speedtype == \"mm per Sec\":\n speedPrefix = \"Sm\"\n if speedtype == \"Percent\":\n speedPrefix = \"Sp\"\n\n ACCspd = ACCspeedField.get()\n DECspd = DECspeedField.get()\n ACCramp = ACCrampField.get()\n Rounding = roundEntryField.get()\n movetype = options.get()\n \n def get_new_pos():\n \"\"\"docstring\"\"\"\n\n return (\n f\"{movetype} [*] X {XcurPos} Y {YcurPos} Z {ZcurPos} Rz {RzcurPos} \"\n f\"Ry {RycurPos} Rx {RxcurPos} J7 {J7PosCur} J8 {J8PosCur} J9 {J9PosCur} \"\n f\"{speedPrefix} {Speed} Ac {ACCspd} Dc {DECspd} Rm {ACCramp} $ {WC}\"\n )\n\n def case1():\n \"\"\"docstring\"\"\"\n\n tab1.progView.insert(selRow, get_new_pos())\n tab1.progView.selection_clear(0, END)\n tab1.progView.select_set(selRow)\n value = tab1.progView.get(0, END)\n pickle.dump(value, open(ProgEntryField.get(), \"wb\"))\n\n def case2():\n \"\"\"docstring\"\"\"\n\n tab1.progView.insert(selRow, alt_new_pos)\n tab1.progView.selection_clear(0, END)\n tab1.progView.select_set(selRow)\n value = tab1.progView.get(0, END)\n pickle.dump(value, open(ProgEntryField.get(), \"wb\"))\n\n\n if movetype == \"OFF J\":\n movetype = movetype + \" [ PR: \" + str(SavePosEntryField.get()) + \" ]\"\n case1()\n\n if movetype == \"Move Vis\":\n movetype = movetype + \" [ PR: \" + str(SavePosEntryField.get()) + \" ]\"\n case1()\n\n elif movetype == \"Move PR\":\n movetype = movetype + \" [ PR: \" + str(SavePosEntryField.get()) + \" ]\"\n case1()\n\n elif movetype == \"OFF PR \":\n movetype = (\n movetype\n + \" [ PR: \"\n + str(SavePosEntryField.get())\n + \" ] offs [ *PR: \"\n + str(int(SavePosEntryField.get()) + 1)\n + \" ] \"\n )\n case1()\n\n elif movetype == \"Move J\":\n case1()\n\n elif movetype == \"Move L\":\n case1()\n\n elif movetype == \"Move R\":\n case1()\n\n elif movetype == \"Move A Mid\":\n case1()\n\n elif movetype == \"Move A End\":\n case1()\n\n elif movetype == \"Move C Center\":\n case1()\n\n elif movetype == \"Move C Start\":\n\n alt_new_pos = movetype + \" [*] X \" + XcurPos + \" Y \" + YcurPos + \" Z \" + ZcurPos\n case2()\n\n elif movetype == \"Move C Plane\":\n\n alt_new_pos = movetype + \" [*] X \" + XcurPos + \" Y \" + YcurPos + \" Z \" + ZcurPos\n case2()\n\n elif movetype == \"Start Spline\" or movetype == \"End Spline\":\n\n alt_new_pos = movetype\n case2()\n\n elif movetype == \"Teach PR\":\n PR = str(SavePosEntryField.get())\n SPE6 = \"Position Register \" + PR + \" Element 6 = \" + RxcurPos\n tab1.progView.insert(selRow, SPE6)\n SPE5 = \"Position Register \" + PR + \" Element 5 = \" + RycurPos\n tab1.progView.insert(selRow, SPE5)\n SPE4 = \"Position Register \" + PR + \" Element 4 = \" + RzcurPos\n tab1.progView.insert(selRow, SPE4)\n SPE3 = \"Position Register \" + PR + \" Element 3 = \" + ZcurPos\n tab1.progView.insert(selRow, SPE3)\n SPE2 = \"Position Register \" + PR + \" Element 2 = \" + YcurPos\n tab1.progView.insert(selRow, SPE2)\n SPE1 = \"Position Register \" + PR + \" Element 1 = \" + XcurPos\n tab1.progView.insert(selRow, SPE1)\n value = tab1.progView.get(0, END)\n pickle.dump(value, open(ProgEntryField.get(), \"wb\"))\n\n\ndef teachReplaceSelected():\n try:\n deleteitem()\n selRow = tab1.progView.curselection()[0]\n tab1.progView.select_set(selRow - 1)\n\n except:\n last = tab1.progView.index(\"end\")\n selRow = last\n tab1.progView.select_set(selRow)\n teachInsertBelSelected()\n\n\n","repo_name":"mhyatt000/AR4","sub_path":"general/teach.py","file_name":"teach.py","file_ext":"py","file_size_in_byte":4223,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73030256169","text":"import heapq\nINF = int(1e9)\n\nt = int(input())\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\nfor _ in range(t):\n n = int(input())\n graph = []\n for i in range(n):\n graph.append(list(map(int, input().split())))\n\n distances = [[INF] * n for _ in range(n)]\n\n x, y = 0, 0\n q = [(graph[x][y], x, y)]\n distances[x][y] = graph[x][y]\n\n while q:\n dist, x, y = heapq.heappop(q)\n if distances[x][y] < dist:\n continue\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < n and 0 <= ny < n:\n cost = dist + graph[nx][ny]\n if distances[nx][ny] > cost:\n distances[nx][ny] = cost\n heapq.heappush(q, (cost, nx, ny))\n\n print(distances[n-1][n-1])\n\n\n# 3\n# 3\n# 5 5 4\n# 3 9 1\n# 3 2 7\n# 5\n# 3 7 2 0 1\n# 2 8 0 9 1\n# 1 2 1 8 1\n# 9 8 9 2 0\n# 3 6 5 1 5\n# 7\n# 9 0 5 1 1 5 3\n# 4 1 2 1 6 5 3\n# 0 7 6 1 6 8 5\n# 1 1 7 8 3 2 3\n# 9 4 0 7 6 4 1\n# 5 8 3 2 4 8 3\n# 7 4 8 4 8 3 4","repo_name":"CHOSIYEON/Algorithms","sub_path":"이코테/Shortest Path/record/화성 탐사.py","file_name":"화성 탐사.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9887523987","text":"from typing import TYPE_CHECKING, Any, Generic, Optional, Type, TypeVar, Union, cast\n\nfrom pydantic import Field\nfrom typing_extensions import Annotated, dataclass_transform, get_origin\n\nfrom dagster._core.errors import DagsterInvalidDagsterTypeInPythonicConfigDefinitionError\n\nfrom .type_check_utils import safe_is_subclass\n\ntry:\n # Pydantic 2.x\n from pydantic.main import ModelMetaclass\nexcept ImportError:\n # Pydantic 1.x\n from pydantic._internal._model_construction import ModelMetaclass # type: ignore\n\nif TYPE_CHECKING:\n from dagster._config.pythonic_config import PartialResource\n\n\n# Since a metaclass is invoked by Resource before Resource or PartialResource is defined, we need to\n# define a temporary class to use as a placeholder for use in the initial metaclass invocation.\n#\n# These initial invocations will use the placeholder values, which is fine, since there's no\n# attributes on the Resource class which would be affected. The only time the metaclass will\n# actually change the type annotations is when it's invoked for a user-created subclass of Resource,\n# at which point the placeholder values will be replaced with the actual types.\nclass LateBoundTypesForResourceTypeChecking:\n _TResValue = TypeVar(\"_TResValue\")\n\n class _Temp(Generic[_TResValue]):\n pass\n\n _ResourceDep: Type = _Temp\n _Resource: Type = _Temp\n _PartialResource: Type = _Temp\n\n @staticmethod\n def get_resource_rep_type() -> Type:\n return LateBoundTypesForResourceTypeChecking._ResourceDep\n\n @staticmethod\n def get_resource_type() -> Type:\n return LateBoundTypesForResourceTypeChecking._Resource\n\n @staticmethod\n def get_partial_resource_type(base: Type) -> Type:\n # LateBoundTypesForResourceTypeChecking._PartialResource[base] would be the more\n # correct thing to return, but to enable that deeper pydantic integration\n # needs to be done on the PartialResource class\n # https://github.com/dagster-io/dagster/issues/18017\n return LateBoundTypesForResourceTypeChecking._PartialResource\n\n @staticmethod\n def set_actual_types_for_type_checking(\n resource_dep_type: Type, resource_type: Type, partial_resource_type: Type\n ) -> None:\n LateBoundTypesForResourceTypeChecking._ResourceDep = resource_dep_type\n LateBoundTypesForResourceTypeChecking._Resource = resource_type\n LateBoundTypesForResourceTypeChecking._PartialResource = partial_resource_type\n\n\n@dataclass_transform(kw_only_default=True, field_specifiers=(Field,))\nclass BaseConfigMeta(ModelMetaclass): # type: ignore\n def __new__(cls, name, bases, namespaces, **kwargs) -> Any:\n annotations = namespaces.get(\"__annotations__\", {})\n\n # Need try/catch because DagsterType may not be loaded when some of the base Config classes are\n # being created\n # Any user-created Config class will have DagsterType loaded by the time it's created, so this\n # will only affect the base Config classes (where this error won't be an issue)\n try:\n from dagster._core.types.dagster_type import DagsterType\n\n for field in annotations:\n if isinstance(annotations[field], DagsterType):\n raise DagsterInvalidDagsterTypeInPythonicConfigDefinitionError(name, field)\n\n except ImportError:\n pass\n\n return super().__new__(cls, name, bases, namespaces, **kwargs)\n\n\n@dataclass_transform(kw_only_default=True, field_specifiers=(Field,))\nclass BaseResourceMeta(BaseConfigMeta):\n \"\"\"Custom metaclass for Resource and PartialResource. This metaclass is responsible for\n transforming the type annotations on the class so that Pydantic constructor-time validation\n does not error when users provide partially configured resources to resource params.\n\n For example, the following code would ordinarily fail Pydantic validation:\n\n .. code-block:: python\n\n class FooResource(ConfigurableResource):\n bar: BarResource\n\n # Types as PartialResource[BarResource]\n partial_bar = BarResource.configure_at_runtime()\n\n # Pydantic validation fails because bar is not a BarResource\n foo = FooResource(bar=partial_bar)\n\n This metaclass transforms the type annotations on the class so that Pydantic validation\n accepts either a PartialResource or a Resource as a value for the resource dependency.\n \"\"\"\n\n def __new__(cls, name, bases, namespaces, **kwargs) -> Any:\n # Gather all type annotations from the class and its base classes\n annotations = namespaces.get(\"__annotations__\", {})\n for field in annotations:\n if not field.startswith(\"__\"):\n # Check if the annotation is a ResourceDependency\n if (\n get_origin(annotations[field])\n == LateBoundTypesForResourceTypeChecking.get_resource_rep_type()\n ):\n # arg = get_args(annotations[field])[0]\n # If so, we treat it as a Union of a PartialResource and a Resource\n # for Pydantic's sake.\n annotations[field] = Annotated[Any, \"resource_dependency\"]\n elif safe_is_subclass(\n annotations[field], LateBoundTypesForResourceTypeChecking.get_resource_type()\n ):\n # If the annotation is a Resource, we treat it as a Union of a PartialResource\n # and a Resource for Pydantic's sake, so that a user can pass in a partially\n # configured resource.\n base = annotations[field]\n annotations[field] = Annotated[\n Union[\n LateBoundTypesForResourceTypeChecking.get_partial_resource_type(base),\n base,\n ],\n \"resource_dependency\",\n ]\n\n namespaces[\"__annotations__\"] = annotations\n return super().__new__(cls, name, bases, namespaces, **kwargs)\n\n\nSelf = TypeVar(\"Self\", bound=\"TypecheckAllowPartialResourceInitParams\")\n\n\nclass TypecheckAllowPartialResourceInitParams:\n \"\"\"Implementation of the Python descriptor protocol (https://docs.python.org/3/howto/descriptor.html)\n to adjust the types of resource inputs and outputs, e.g. resource dependencies can be passed in\n as PartialResources or Resources, but will always be returned as Resources.\n\n For example, given a resource with the following signature:\n\n .. code-block:: python\n\n class FooResource(Resource):\n bar: BarResource\n\n The following code will work:\n\n .. code-block:: python\n\n # Types as PartialResource[BarResource]\n partial_bar = BarResource.configure_at_runtime()\n\n # bar parameter takes BarResource | PartialResource[BarResource]\n foo = FooResource(bar=partial_bar)\n\n # initialization of FooResource succeeds,\n # populating the bar attribute with a full BarResource\n\n # bar attribute is typed as BarResource, since\n # it is fully initialized when a user accesses it\n print(foo.bar)\n\n Very similar to https://github.com/pydantic/pydantic/discussions/4262.\n \"\"\"\n\n def __set_name__(self, _owner, name):\n self._assigned_name = name\n\n def __get__(self: \"Self\", obj: Any, __owner: Any) -> \"Self\":\n # no-op implementation (only used to affect type signature)\n return cast(Self, getattr(obj, self._assigned_name))\n\n def __set__(\n self: \"Self\", obj: Optional[object], value: Union[\"Self\", \"PartialResource[Self]\"]\n ) -> None:\n # no-op implementation (only used to affect type signature)\n setattr(obj, self._assigned_name, value)\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster/dagster/_config/pythonic_config/typing_utils.py","file_name":"typing_utils.py","file_ext":"py","file_size_in_byte":7802,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"43054368614","text":"import pprint\nfrom datetime import datetime\n\nimport dotenv\n\nfrom PvMapping.db import Database\n\ndotenv.load_dotenv()\n\n\ndef test_get_lat_lon() -> None:\n db = Database()\n df_plants, df_meters = db.get_lat_lon()\n print(df_plants)\n print(df_meters)\n\n\ndef test_set_nearest_meters() -> None:\n db = Database()\n db.set_nearest_meters([0, 1, 2], [1, 2, 3])\n\n\ndef test_get_meter_metadata() -> None:\n db = Database()\n db.get_meters()\n\n\ndef test_get_real_time_data() -> None:\n db = Database()\n df = db.get_real_time_data(\n datetime.fromisoformat(\"2022-08-01\"), datetime.fromisoformat(\"2022-08-02\")\n )\n pprint.pp(df)\n\n\nif __name__ == \"__main__\":\n # test_get_lat_lon()\n # test_set_nearest_meters()\n # test_get_meter_metadata()\n test_get_real_time_data()\n","repo_name":"simonneidhart/pv_mapping_ch","sub_path":"test/test_db.py","file_name":"test_db.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"33288544150","text":"from tkinter import Frame, Button\nimport tkintermapview\nimport view.config as cf\nfrom view.top_level import PopupImage\n\n\nclass Frame1:\n def __init__(\n self,\n frame_root,\n _config_frame_body,\n ubicacion_view,\n destino_view\n ) -> None:\n\n self.map_view = 0\n self.ubicacion_view = ubicacion_view\n self.destino_view = destino_view\n self.frame_root = frame_root\n self._config_frame_body = _config_frame_body\n # self.toplevel = PopupImage()\n\n self.frame1 = Frame(\n self.frame_root,\n width=800,\n height=450,\n background=cf.PRIMARY_COLOR\n )\n self._config_frame_body(self.frame1)\n\n # MAPA\n\n self.map_widget = tkintermapview.TkinterMapView(\n self.frame1,\n width=800,\n height=450,\n corner_radius=20\n )\n\n self.map_1 = \"https://mt0.google.com/vt/lyrs=m&hl=en&x={x}&y={y}&z={z}&s=Ga\"\n\n self.map_2 = \"https://mt0.google.com/vt/lyrs=s&hl=en&x={x}&y={y}&z={z}&s=Ga\"\n\n self.maps = (self.map_2, self.map_1)\n\n self.map_widget.grid(row=0, column=0)\n self.map_widget.set_position(-24.790245, -65.4021057)\n self.map_widget.set_zoom(13)\n self.change_map()\n\n\n self.ubicacion_view.ver_ubicaciones(self.frame_root, self.map_widget, self.destino_view)\n\n # Footer\n footer1 = Frame(self.frame1, background=cf.PRIMARY_COLOR)\n footer1.grid(row=1, column=0)\n\n self.button1 = Button(\n footer1,\n text=\"Mostrar Ubicaciones\",\n background=cf.THIRD_COLOR,\n font=cf.SECONDARY_FONT,\n foreground=cf.SECONDARY_COLOR,\n borderwidth=0,\n relief='ridge',\n width=16,\n command=lambda: self.ubicacion_view.ver_ubicaciones(self.frame_root, self.map_widget, self.destino_view)\n )\n\n self.button1.bind(\"\", self._change_to_red)\n self.button1.bind(\"\", self._change_to_original)\n\n self.button1.grid(row=0, column=0, padx=(18, 20), pady=50)\n\n self.button2 = Button(\n footer1,\n text=\"Cambiar mapa\",\n background=cf.THIRD_COLOR,\n font=cf.SECONDARY_FONT,\n foreground=cf.SECONDARY_COLOR,\n borderwidth=0,\n relief='ridge',\n width=16,\n command=self.change_map\n )\n self.button2.bind(\"\", self._change_to_red)\n self.button2.bind(\"\", self._change_to_original)\n self.button2.grid(row=0, column=1, padx=(20, 20))\n\n self.button3 = Button(\n footer1,\n text=\"Limpiar mapa\",\n background=cf.THIRD_COLOR,\n font=cf.SECONDARY_FONT,\n foreground=cf.SECONDARY_COLOR,\n borderwidth=0,\n relief='ridge',\n width=16,\n )\n\n self.button3.bind(\"\", self._change_to_red)\n self.button3.bind(\"\", self._change_to_original)\n self.button3.grid(row=0, column=2, padx=(20, 20))\n\n def _change_to_red(self, event):\n event.widget['background'] = '#191970'\n\n def _change_to_original(self, event):\n event.widget['background'] = cf.THIRD_COLOR\n\n def change_map(self):\n self.map_view = (self.map_view + 1) % 2\n return self.map_widget.set_tile_server(self.maps[self.map_view], max_zoom=22)\n\n def get_frame(self):\n return self.frame1\n\n\n\n\n","repo_name":"carlos8788/FoodTravel","sub_path":"view/components/frames/frame_1.py","file_name":"frame_1.py","file_ext":"py","file_size_in_byte":3866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70251492327","text":"#!/usr/bin/env python3\n#\n# UnitTest Exercise for title(s): Function\n# titleS.py\n# Another quality exercise by David Jackson\n# For OST Python 2 and Instructor Pat Barton\n# Lesson 2 Project 1\n# January 8, 2015\n#\n\"\"\"\nThis module defines a function and tests it using a TestTitleS class\nthat I define in here. TitleS in the exercise description does\nnot perform like the standard Python string.title() method. Instead,\nit uppercases the first character and leaves the rest of the\ncharacters alone. \n\nThe assignment function was: return s[0].upper.()+s[1:]\nHowever, the standard title() method would be more like:\nreturn s[0].upper.()+s[1:].lower() for each word in the string\nAnd this is my correction to the exercise so it behaves more\nlike the standard Python string.title() method.\n\nI was not able to duplicate a string like '123s and ABCs'.\nstr.title() would return '123S and Abcs'. I could not find a way\nto keep the return '123S'\n\"\"\"\nimport unittest\n\ndef title(s):\n '''Corrected function returns string capitalized like standard\n Python string.title() method. '''\n small_words = ['of','to','for','by','in','and','on','if']\n new_str = \"\"\n for word in s.split():\n if word.lower() not in small_words:\n new_str = new_str + \" \" + word[0].upper()+word[1:].lower()\n else:\n new_str = new_str + \" \" + word.lower()\n return new_str.lstrip()\n \n\n\nclass TestTitle(unittest.TestCase):\n \n def test_correct_title(self):\n cor_title = 'A Tale of Two Cities'\n cor_title_expected = 'A Tale of Two Cities'\n self.assertEqual(title(cor_title), cor_title_expected, \"Test matched built-in: Input: {}; Expected: {}; Produced: {}\".format(cor_title, cor_title.title(), title(cor_title)))\n\n def test_incorrect_title(self):\n incor_title = 'a tale of two cities'\n cor_title_expected = 'A Tale of Two Cities'\n self.assertEqual(title(incor_title), cor_title_expected, \"Input: {}; Expected: {} matches Built-in: {}\".format(incor_title, cor_title_expected, incor_title.title()))\n\n def test_weirdtitle(self):\n weird_title = '123s and ABCs'\n weird_title_expected = '123s and Abcs'\n weird_title_correct = '123s and ABCs'\n self.assertEqual(title(weird_title), weird_title_expected, \"Requires Hand Editing--Expected: {}; built-in: {}; proper: {}\".format(weird_title_expected, weird_title.title(), weird_title_correct))\n\n def test_McName(self):\n mc_title = 'McBride goes home'\n mc_title_expected = 'Mcbride Goes Home'\n mc_title_correct = 'McBride Goes Home'\n self.assertEqual(title(mc_title), mc_title_expected, \"Built-in is {}; Should be {}; Must hand edit\".format(mc_title.title(), mc_title_correct))\n \n def test_Irish_Priest(self):\n test_name = \"o'leary\"\n expected = \"O'Leary\"\n self.assertNotEqual(title(test_name), test_name.title(), \"Title case was not executed according to title() method. Expected: {}; built-in: {}; upgraded: {}\".format(expected, test_name.title(), title(test_name)))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n \n","repo_name":"deepbsd/OST_Python","sub_path":"ostPython2/titleS.py","file_name":"titleS.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"252535788","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nSolution for day21 2017\n\"\"\"\n\n__author__ = 'Guido Minieri'\n__license__ = 'GPL'\n\n\nwith open('input.txt', 'r') as f:\n data = f.read().splitlines()\n\nfrom itertools import chain, combinations\nfrom math import sqrt\n\ndef parse_rules(data):\n rules = {}\n for rule in data:\n key, value = rule.split(' => ')\n value = tuple(list(x) for x in value.split('/'))\n rules[key] = value\n return rules\n\ndef stringify(pattern):\n return '/'.join([''.join(x) for x in pattern])\n\ndef rotate90(pattern):\n return list(zip(*pattern[::-1]))\n\ndef rotations(pattern):\n yield pattern\n for _ in range(3):\n pattern = rotate90(pattern)\n yield pattern\n\ndef orientations(pattern):\n yield from rotations(pattern)\n yield from rotations(pattern[::-1])\n\ndef decompose(pattern):\n if len(pattern[0]) in [2, 3]: return pattern\n\n elif len(pattern) % 2 == 0:\n stripes = [[v, pattern[i+1]] for i, v in enumerate(pattern) if i % 2 == 0]\n\n pattern = []\n for stripe in stripes:\n unord_stripe = list(zip(*stripe))\n unord_stripe = [[v, unord_stripe[i+1]] for i, v in enumerate(unord_stripe) if i % 2 == 0]\n for couple in unord_stripe:\n pattern.append([[i, j] for i, j in zip(*couple)])\n\n elif len(pattern) % 3 == 0:\n stripes = [[v, pattern[i+1], pattern[i+2]] for i, v in enumerate(pattern) if i % 3 == 0]\n pattern = []\n for stripe in stripes:\n unord_stripe = list(zip(*stripe))\n unord_stripe = [[v, unord_stripe[i+1], unord_stripe[i+2]] for i, v in enumerate(unord_stripe) if i % 3 == 0]\n for couple in unord_stripe:\n pattern.append([[i, j, k] for i, j, k in zip(*couple)])\n\n return pattern\n\ndef compose(quads):\n\n item_len = len(quads[0][0])\n size = int(sqrt(len(quads)))\n pattern = []\n stripes = [quads[x:x+size] for x in range(0, len(quads), size)]\n\n for s in stripes:\n pattern.append([[x] for x in zip(*s)])\n\n res = []\n for ext in pattern:\n for el in ext:\n el = [list(chain.from_iterable(i)) for i in el]\n res.append(el)\n\n return [item for sublist in res for item in sublist]\n\ndef count_pixels(pattern):\n return sum(1 for x in [i for sub in pattern for i in sub] if x == '#')\n\ndef step(pattern, iterations):\n for _ in range(iterations):\n if len(pattern) % 2 == 0 or len(pattern) % 3 == 0:\n perm = decompose(pattern)\n if len(perm) in [2, 3]:\n for rot in orientations(perm):\n if stringify(rot) in rules.keys():\n pattern = list(rules[stringify(rot)])\n break\n else:\n quads = []\n for quad in perm:\n for rot in orientations(quad):\n if stringify(rot) in rules.keys():\n quads.append(rules[stringify(rot)])\n break\n pattern = compose(quads)\n return pattern\n\npattern = \"\"\".#.\n..#\n###\"\"\"\n\nrules = parse_rules(data)\npattern = [list(x) for x in pattern.splitlines()]\n# pt 1\nres1 = step(pattern, 5)\nprint(count_pixels(res1))\n# pt 2\nres2 = step(pattern, 18)\nprint(count_pixels(res2))\n","repo_name":"gmnr/advent-of-code","sub_path":"2017/21/day21.py","file_name":"day21.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27907228743","text":"from __future__ import annotations\n\n__author__ = 'github.com/wardsimon'\n__version__ = '0.0.1'\n\n# SPDX-FileCopyrightText: 2023 easyCrystallography contributors \n# SPDX-License-Identifier: BSD-3-Clause\n# © 2022-2023 Contributors to the easyCore project \n\nfrom typing import List, NoReturn, TYPE_CHECKING, ClassVar, Tuple\n\nfrom .template import CIF_Template, gemmi\nfrom easyCrystallography.Components.Lattice import Lattice as _Lattice\n\nif TYPE_CHECKING:\n from easyCore.Utils.typing import B\n\n\nclass Lattice(CIF_Template):\n\n _CIF_SECTION_NAME: ClassVar[str] = \"_cell\"\n _CIF_CONVERSIONS: ClassVar[List[Tuple[str, str]]] = [\n (\"length_a\", \"_length_a\"),\n (\"length_b\", \"_length_b\"),\n (\"length_c\", \"_length_c\"),\n (\"angle_alpha\", \"_angle_alpha\"),\n (\"angle_beta\", \"_angle_beta\"),\n (\"angle_gamma\", \"_angle_gamma\"),\n ]\n\n def __init__(self, reference_class=_Lattice):\n super().__init__()\n self._CIF_CLASS = reference_class\n\n def from_cif_block(self, block: gemmi.cif.Block) -> B:\n kwargs = {}\n errors = {}\n is_fixed = {}\n for item in self._CIF_CONVERSIONS:\n value = block.find_pair_item(self._CIF_SECTION_NAME + item[1])\n V, E, F = self.string_to_variable(value.pair[1])\n if E:\n errors[item[0]] = E\n if F is not None and not F:\n is_fixed[item[0]] = F\n kwargs[item[0]] = V\n obj = self._CIF_CLASS(**kwargs)\n for error in errors.keys():\n setattr(getattr(obj, error), 'error', errors[error])\n for atr in is_fixed.keys():\n setattr(getattr(obj, atr), 'fixed', is_fixed[atr])\n return obj\n\n def add_to_cif_block(self, obj: B, block: gemmi.cif.Block) -> NoReturn:\n for item in self._CIF_CONVERSIONS:\n value = getattr(obj, item[0])\n block.set_pair(self._CIF_SECTION_NAME + item[1], self.variable_to_string(value))\n\n def from_cif_string(self, cif_string: str) -> List[B]:\n\n if \"data_\" not in cif_string:\n cif_string = \"data_temp\\n\" + cif_string\n\n cif_blocks = gemmi.cif.read_string(cif_string)\n objs = []\n for block in cif_blocks:\n objs.append(self.from_cif_block(block))\n return objs\n","repo_name":"easyScience/easyCrystallography","sub_path":"easyCrystallography/io/cif/lattice.py","file_name":"lattice.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69817373929","text":"from PyQt5.QtWidgets import (QDockWidget, QGroupBox, QHBoxLayout, QComboBox, QPushButton, QCheckBox,QSlider, QGraphicsItemGroup)\nfrom PyQt5 import QtCore\nfrom node import Node\n\nclass compression_widget(QDockWidget):\n old_value = 0\n def __init__(self, owner):\n super(compression_widget, self).__init__()\n self.listClass = owner.classVisible.listClass\n self.setWindowTitle(\"Tracing Parameters\")\n self.ClassBox = QGroupBox(self)\n self.ClassBox.classVisibleBoxLayout = QHBoxLayout()\n # self.ClassBox.setTitle(\"\")\n self.setWidget(self.ClassBox)\n self.setFloating(False)\n\n\n self.zoomSlider = QSlider(QtCore.Qt.Horizontal, self.ClassBox)\n self.zoomSlider.setMinimum(0)\n self.zoomSlider.setMaximum(500)\n self.zoomSlider.setValue(250)\n self.ClassBox.classVisibleBoxLayout.addWidget(self.zoomSlider)\n self.ClassBox.setLayout(self.ClassBox.classVisibleBoxLayout)\n #self.compression(owner)\n\n # def compression(self, owner):\n # # scale = pow(2, (self.zoomSlider.value() - 250) / 50.)\n # scale = 100\n # print(scale)\n # class_node = self.listClass[0]\n # self.node_group = QGraphicsItemGroup()\n # print(owner.view.scene.items())\n # owner.view.scene.addItem(self.node_group)\n #\n # for node in owner.view.scene.items():\n # if (type(node) is Node):\n # # print(node.nodeclass == class_node)\n # # if (node.nodeclass == class_node):\n # self.node_group.addToGroup(node)\n # self.node_group.setScale(scale)\n # # self.node_group.setRotation(180)\n # owner.view.scene.destroyItemGroup(self.node_group)","repo_name":"interpolatio/news_stream_analysis","sub_path":"compression_dockwidget.py","file_name":"compression_dockwidget.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38837511227","text":"from django.db import models\nfrom django.contrib.auth import get_user_model\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom account.send_email import send_notification\n\nfrom product.models import Product\n\nUser = get_user_model()\n\nSTATUS_CHOICES = (\n ('open', 'Открыт'),\n ('in_process', 'В обработке'),\n ('closed', 'Закрыт')\n)\n\n\nclass OrderItem(models.Model):\n # 'Order' пишется в ковычках, потому что класс 'Order' находится ниже класса 'OrderItem' \n order = models.ForeignKey('Order', related_name='items', on_delete=models.CASCADE)\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n quantity = models.SmallIntegerField(default=1) \n\n\nclass Order(models.Model):\n user = models.ForeignKey(User, related_name='orders', on_delete=models.RESTRICT)\n product = models.ManyToManyField(Product, through=OrderItem)\n status = models.CharField(max_length=20, choices=STATUS_CHOICES)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return f'{self.user}'\n\n@receiver(post_save, sender=Order)\ndef order_post_save(sender, instance, *args, **kwargs):\n send_notification(instance.user, instance.id)","repo_name":"t4iler/my_first_django_project","sub_path":"order/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26662646478","text":"from django.conf.urls import url\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [url(r'^$', views.index, name='index'), ]\n\n#lista libros # name => para on click \nurlpatterns += [path('book/', views.list_book, name='books'), ]\nurlpatterns += [path('book/', views.book_detail, name='book-detail'), ]\n\n#lista autor\nurlpatterns += [path('author/', views.AuthorListView.as_view(), name='authors'), ]\nurlpatterns += [path('mybooks/', views.LoanedBooksByUserListView.as_view(), name='my-borrowed'), ]\n\n#ingresar author\nurlpatterns += [path('author/create/', views.AuthorCreate.as_view(), name='author_create'), ]\n\n#ingresar book\nurlpatterns += [path('book/create/', views.BookCreate.as_view(), name='book_create'), ]","repo_name":"lucas-mercado/LocalLibrery","sub_path":"catalog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18138415725","text":"import tensorflow as tf\nimport numpy as np\n\nclass SaveTrainableParamsCount(tf.train.SessionRunHook):\n \"\"\"Hook which saves total count of trainable parameters.\n logdir is intended to be the same path as passed to the estimator.\"\"\"\n\n def __init__(self, logdir):\n super().__init__()\n self._logdir = logdir\n\n def begin(self):\n tvars = tf.trainable_variables()\n count = np.sum([np.prod(var.get_shape().as_list()) for var in tvars])\n with open('%s/params_%i.txt' % (self._logdir, int(count)), mode='w') as txt_file:\n txt_file.write('This network contains %i trainable parameters.' % int(count))\n","repo_name":"zhangqiuhao/Unsupervised_flow","sub_path":"Python/hooks/Hooks.py","file_name":"Hooks.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73136032167","text":"def solution(genres, plays):\n answer = []\n album = []\n albumIdx = {}\n idx = 0\n for i in range(len(genres)):\n if genres[i] not in albumIdx:\n albumIdx[genres[i]] = idx\n idx += 1\n album.append([])\n album[albumIdx[genres[i]]].append((plays[i], i))\n rank = []\n counts = {}\n for plays in album:\n count = 0\n for play in plays:\n count += play[0]\n rank.append(count)\n plays = [(i, -j) for i, j in plays]\n counts[count] = plays\n rank.sort(reverse = True)\n for i in rank:\n counts[i] = sorted(counts[i], reverse = True)\n answer.append(-counts[i][0][1])\n if len(counts[i]) >= 2:\n answer.append(-counts[i][1][1])\n return answer","repo_name":"juajang/algorithm","sub_path":"Hash/베스트앨범.py","file_name":"베스트앨범.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"9012725334","text":"import sys\nfrom PySide6.QtCore import Slot, QStringListModel\nfrom PySide6.QtWidgets import QWidget, QApplication, QCompleter\nfrom Widgets.InvestListWidget import InvestListWidget\nfrom util.Loading import Loading\nfrom UI.OverviewUI import Ui_Form\nfrom util.Ticker import Ticker\n\n\nclass OverviewWidget(QWidget, Ui_Form):\n def __init__(self, parent=None, ticker=\"KRW-BTC\"):\n super().__init__(parent)\n self.setupUi(self)\n self.ticker = ticker\n self.loading = Loading(self) # loading 위젯 로드\n self.getTicker = Ticker()\n self.investListWidget = InvestListWidget()\n self.ticker_cb.addItems(self.getTicker.tickers_kor) # 한글 코인이름으로 목록 작성\n\n # 코인이름 autocomplete 설정\n model = QStringListModel()\n model.setStringList(self.getTicker.tickers_kor)\n completer = QCompleter()\n completer.setModel(model)\n self.ticker_cb.setCompleter(completer)\n self.ticker_cb.currentIndexChanged.connect(self.select_ticker)\n\n self.loading.start()\n\n def select_ticker(self):\n # 한글 이름을 market_code로 변경\n for select_coin in self.getTicker.tickers:\n if select_coin['korean_name'] == self.ticker_cb.currentText():\n self.ticker = select_coin['market']\n self.loading.start() # 로딩 시작\n\n @Slot(float, str)\n def set_data(self, trade_price, change_rate, acc_trade_volume_24h, high_price, acc_trade_price_24h, low_price,\n acc_bid_volume, acc_ask_volume, prev_closing_price, change, current_ticker):\n watch_list = [{\"code\": \"KRW-WAVES\", \"price\": 25220}, {\"code\": \"KRW-ETH\", \"price\": 3799000}]\n if self.ticker == current_ticker:\n if trade_price < 100:\n self.trade_price.setText(f\"{trade_price:.2f}\")\n self.prev_closing_price.setText(f\"{prev_closing_price:.2f}\")\n self.high_price.setText(f\"{high_price:.2f}\")\n self.low_price.setText(f\"{low_price:.2f}\")\n else:\n self.trade_price.setText(f\"{int(trade_price):,}\")\n self.prev_closing_price.setText(f\"{int(prev_closing_price):,}\")\n self.high_price.setText(f\"{int(high_price):,}\")\n self.low_price.setText(f\"{int(low_price):,}\")\n\n if change == 'RISE':\n self.change_rate.setText(f\"+{change_rate * 100:.2f}%\")\n else:\n self.change_rate.setText(f\"-{change_rate * 100:.2f}%\")\n\n self.trade_volume_24H.setText(f\"{round(acc_trade_volume_24h):,} {current_ticker.replace('KRW-', '')}\")\n self.volume.setText(f\"{acc_trade_price_24h / 100000000:,.2f} 억\")\n self.volume_power.setText(f\"{acc_bid_volume / acc_ask_volume * 100:+.2f}%\")\n\n self.loading.stop() # 로딩 멈춤\n self.update_style(change)\n\n # for a in watch_list:\n # if current_ticker == a[\"code\"] and trade_price <= a[\"price\"]:\n # print(f\"{current_ticker} 현재가격: {trade_price}, 감시가격: {a['price']}, 감시 가��� 도달\")\n\n def update_style(self, change):\n if change == \"FALL\":\n self.trade_price.setStyleSheet(\"color:blue;\")\n self.change_rate.setStyleSheet(\"background-color:blue;color:white;\")\n elif change == \"RISE\":\n self.trade_price.setStyleSheet(\"color:red;\")\n self.change_rate.setStyleSheet(\"background-color:red;color:white;\")\n elif change == \"EVEN\":\n self.trade_price.setStyleSheet(\"color:black;\")\n self.change_rate.setStyleSheet(\"background-color:black;color:white;\")\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n ob = OverviewWidget()\n ob.show()\n exit(app.exec())\n","repo_name":"BlackRussians/Upbit_Auto_Trade","sub_path":"Widgets/OverviewWidget.py","file_name":"OverviewWidget.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71591906729","text":"from django.urls import path\r\n\r\nfrom . import views\r\nfrom .views import quiz_create\r\n\r\nurlpatterns = [\r\n path('', views.home, name='home'),\r\n path('register/', views.register, name='register'),\r\n path('login/', views.Login, name='login'),\r\n\r\n path('quiz/',views.quiz,name='quiz'),\r\n path('quiz/save/', views.quiz_create,name='quiz_create'),\r\n path('score/',views.submit_result,name='score'),\r\n]","repo_name":"rupinder319/Quiz-application-","sub_path":"pythonProject2/termproject/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16567813538","text":"# append the times table to our jabberwocky poem\n\n# the first column of numbers should be right justified\nnieces = [\"Emily\", \"Oliver\", \"Sophie\", \"Selina\"]\n\nwith open(\"sample2.txt\", 'w') as tables:\n for i in range(2, 13):\n for j in range(1, 13):\n print(f\"{i} times {j} is {i * j}\", file=tables)\n print(\"-\" * 20, file=tables)\n\nwith open(\"newfile.txt\", 'w+') as entries:\n for niece in nieces:\n print(f\"{niece} is my favorite niece\", end='\\n', file=entries)\n\nwith open(\"newfile.txt\", 'r') as entries:\n lines = entries.readlines()\n\nfor line in lines:\n print(line, end='')\n","repo_name":"Jspriddy/Pycharm-projects","sub_path":"FileIO/challenge.py","file_name":"challenge.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3121950761","text":"import flask\nimport DnD\nimport sqlite3\n\n\n\napp = flask.Flask(__name__)\n\n@app.route('/')\ndef index():\n return flask.render_template(\n 'index.html',\n pageTitle = \"DM's Toolbox\"\n )\n\n@app.route('/players')\ndef listPlayers():\n con = sqlite3.connect('DM_Toolbox.db')\n cur = con.cursor()\n try:\n players = cur.execute('SELECT * FROM players')\n html = ''\n # print(players.fetchone()[0])\n for player in players:\n html += f'''\n \n Character name: {player[0]} |\n ac: {player[1]} |\n hp: {player[2]} \n '''\n con.close()\n return flask.render_template(\"players.html\", pageTitle = \"Player list\", players = html)\n except sqlite3.OperationalError:\n con.close()\n return flask.render_template(\n \"players.html\", \n players = \"\", \n pageTitle = \"Player list\"\n )\n\n@app.route('/newPlayer')\ndef newPlayer(): \n return flask.render_template(\n \"newPlayer.html\",\n pageTitle = \"Add new character\"\n )\n\n@app.route('/addPlayer', methods = ['POST', 'GET']) #The Backend\ndef addPlayer():\n\n name = flask.request.form['name']\n ac = flask.request.form['ac']\n hp = flask.request.form['hp']\n con = sqlite3.connect('DM_Toolbox.db')\n cur = con.cursor()\n cur.execute('CREATE TABLE IF NOT EXISTS players(name, ac, hp)')\n cur.execute(\n 'INSERT INTO players VALUES(?, ?, ?)',\n [name, ac, hp]\n )\n con.commit()\n return flask.render_template(\"template.html\", pageTitle = \"SUCCESS, new character created!\")\n\n@app.route(\"/editPlayer/\")\ndef editPlayer(name):\n con = sqlite3.connect('DM_Toolbox.db')\n cur = con.cursor()\n try:\n players = cur.execute('SELECT * FROM players WHERE name = ?', [name])\n\n player = players.fetchone()\n\n player = f'''\n Character name: {player[0]}, \n armor class: {player[1]}, \n hit points: {player[2]} \n \n \n '''\n\n return flask.render_template(\"singlePlayer.html\", pageTitle = name, player = player)\n except sqlite3.OperationalError as err:\n return(f\"Big fail in sqlite query for {name}: {err}\")\n \n@app.route(\"/encounter\")\ndef encounter():\n return(\"Add encounter screen\")\n\n@app.route('/test')\ndef test():\n return 'This is a test'\n\napp.run('0.0.0.0', debug=True)","repo_name":"Coranath/DnD-Utility","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1059035451","text":"from typing import Optional, Tuple\nimport pygame\nimport math\n\nfrom common.item import Item\nfrom core.camera import Camera\nfrom core.math import BBox\nfrom copy import deepcopy\nfrom dataclasses import replace\n\nfrom core.color import Color\n\n\nclass Tile:\n def __init__(\n self,\n img_path: str,\n collision: bool,\n item: Optional[Item] = None,\n interactible=None,\n ) -> None:\n try:\n self.img = pygame.image.load(img_path).convert_alpha()\n except:\n self.img = None\n self.collision = collision\n self.item = item\n self.interactible = interactible\n\n def _resize(self, size: int) -> None:\n if self.img is not None:\n self.img = pygame.transform.scale(self.img, (size, size))\n\n\nclass Map:\n def __init__(self, tiles: dict, tile_size: int) -> None:\n self._tiles = tiles\n self._tile_size = tile_size\n self._map_size = (0, 0)\n self._map: list(Tile) = []\n self._interactibles = set()\n\n # Scale all tiles to desired resolution\n for tile in self._tiles.values():\n tile._resize(tile_size + 1)\n\n def load_from_file(self, path: str) -> None:\n self.clear()\n\n img = pygame.image.load(path).convert_alpha()\n\n self._map_size = img.get_size()\n\n for x in range(self._map_size[0]):\n for y in range(self._map_size[1]):\n color = img.get_at((x, y))\n color = (color.r, color.g, color.b)\n if color not in self._tiles:\n raise ValueError(\n f\"Color {color} at pixel ({x}, {y}) is not a valid tile.\"\n )\n\n tile = self._tiles[color] # This must not be copied.\n self._map.append(tile)\n\n if tile.interactible is not None:\n self._interactibles.add(tile.interactible)\n\n def clear(self):\n self._map_size = (0, 0)\n self._map.clear()\n self._interactibles.clear()\n\n def update(self, window):\n for interactible in self._interactibles:\n interactible.update(window)\n\n def get_tile_size(self) -> int:\n return self._tile_size\n\n def get_map_size(self) -> Tuple[int, int]:\n return self._map_size\n\n def get_tile(self, x: int, y: int) -> Tile:\n return self._map[x * self._map_size[1] + y]\n\n def draw(self, camera: Camera) -> None:\n for x in range(self._map_size[0]):\n for y in range(self._map_size[1]):\n tile = self._map[x * self._map_size[1] + y]\n\n # Draw the tile onto the screen\n if tile.img is not None:\n camera.blit(\n tile.img,\n (x * self._tile_size, y * self._tile_size),\n )\n if tile.interactible is not None:\n camera.blit(\n pygame.transform.scale(\n tile.interactible.animation.get_frame(),\n (self._tile_size, self._tile_size),\n ),\n (x * self._tile_size, y * self._tile_size),\n )\n\n def rect_collision(self, bbox: BBox) -> bool:\n for x in range(math.floor(bbox.x), math.floor(bbox.x + bbox.w) + 1):\n for y in range(math.floor(bbox.y), math.floor(bbox.y + bbox.h) + 1):\n if x < 0 or y < 0 or x >= self._map_size[0] or y >= self._map_size[1]:\n return True\n\n tile = self._map[x * self._map_size[1] + y]\n if tile.collision:\n return True\n\n return False\n\n def take_usable_collision(\n self, bbox: BBox, replacement_color: Tuple[int, int, int] = Color.WHITE\n ) -> Optional[Item]:\n for x in range(math.floor(bbox.x), math.floor(bbox.x + bbox.w) + 1):\n for y in range(math.floor(bbox.y), math.floor(bbox.y + bbox.h) + 1):\n if x < 0 or y < 0 or x >= self._map_size[0] or y >= self._map_size[1]:\n return None\n\n tile = self._map[x * self._map_size[1] + y]\n if tile.item is not None:\n item = tile.item\n self._map[x * self._map_size[1] + y] = self._tiles[\n replacement_color\n ]\n return item\n\n def interaction_collision(self, bbox: BBox) -> Optional[int]:\n for x in range(math.floor(bbox.x), math.floor(bbox.x + bbox.w) + 1):\n for y in range(math.floor(bbox.y), math.floor(bbox.y + bbox.h) + 1):\n if x < 0 or y < 0 or x >= self._map_size[0] or y >= self._map_size[1]:\n return None\n\n tile = self._map[x * self._map_size[1] + y]\n if tile.interactible is not None:\n return tile.interactible\n\n return None\n","repo_name":"hyunmila/steel-works-jam-2023","sub_path":"src/components/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":4944,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"32942322322","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\npd.set_option('display.max_columns', 100)\npd.set_option('display.max_rows', 250)\npd.set_option('display.width', 1000)\n\n\ndata = pd.read_csv('FIFA18_Ultimate_Team_players_2.csv')\n\n\ndf = data.head(1000)\n\nsns.boxplot(x='overall', data=df, orient='h')\nplt.show()\n\ntop_leagues = df['league'].value_counts().sort_values(ascending=False).head(5).index.values\nsns.boxplot(y='league', x='overall', data=df[df['league'].isin(top_leagues)], orient='h')\nplt.show()\n\ndf = pd.read_csv('clients.csv')\nprint(df.shape)\nprint(df.info())\n\nprint(df['churn'].value_counts())\ndf['churn'].value_counts().plot(kind='bar')\nplt.show()\n\n\ncorr_matrix = df.drop(['state', 'international plan', 'voice mail plan', 'area code'], axis=1).corr()\nsns.heatmap(corr_matrix)\nplt.show()\n\ndf = df.drop(['total day charge', 'total night charge', 'total eve charge', 'total intl charge'], axis=1)\n\ncorr_matrix = df.drop(['state', 'international plan', 'voice mail plan', 'area code'], axis=1).corr()\nsns.heatmap(corr_matrix)\nplt.show()\n","repo_name":"RBVV23/PythonForKids","sub_path":"Unit 2 - Analysis and visualization/Lesson_10/Classwork.py","file_name":"Classwork.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14132787835","text":"from django import forms\nfrom app2.models import employee\nfrom app2.models import merchants\nfrom app2.models import partners\nfrom app2.models import manufacturer\n\nclass EmpForms(forms.ModelForm):\n class Meta:\n model = employee\n fields = \"__all__\"\n\nclass MerchantForms(forms.ModelForm):\n class Meta:\n model = merchants\n fields = \"__all__\"\n\nclass PartnerForms(forms.ModelForm):\n class Meta:\n model = partners\n fields = \"__all__\"\n\nclass ManufacturerForms(forms.ModelForm):\n billingtype = forms.CharField(\n max_length=200,\n required=False,\n )\n materialtype = forms.CharField(\n max_length=200,\n required=False,\n\n )\n addonstype = forms.CharField(\n max_length=200,\n required=False,\n\n )\n addonsnumber = forms.CharField(\n max_length=200,\n required=False,\n\n )\n\n class Meta:\n model = manufacturer\n # fields = \"__all__\"\n fields = ['id1', 'name' , 'emailid' , 'address' , 'passcode' , 'billingtype' ,'materialtype' , 'addonstype' , 'addonsnumber' , 'phoneno' ]","repo_name":"Karthikgg1995/Safeplate123","sub_path":"djangoProject/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21753122063","text":"import os\nimport sys\n\nFILES_CORE = [\n\t'Sorollet.js',\n\t'utils/Math.js',\n\t'core/Voice.js',\n\t'core/ADSR.js',\n\t'player/Player.js',\n\t'player/Pattern.js'\n]\n\nFILES_GUI = [\n\t'libs/EventTarget.js',\n\t'libs/signals.min.js',\n\t'libs/UI.js',\n\t'libs/StringFormat.js',\n\t'gui/ADSRGUI.js',\n\t'gui/VoiceGUI.js',\n\t'gui/KeyboardGUI.js',\n\t'gui/KnobGUI.js',\n\t'gui/WaveTypeSelectGUI.js',\n\t'gui/MultipleStatePushButton.js',\n\t'gui/ScopeGraph.js'\n]\n\nALL_FILES = FILES_CORE + FILES_GUI\n\ndef merge(files):\n\tbuffer = []\n\n\tfor filename in files:\n\t\tprint(filename)\n\t\twith open(os.path.join('..', 'src', filename), 'r') as f:\n\t\t\tbuffer.append(f.read())\n\n\treturn \"\".join(buffer)\n\n\ndef output(text, filename):\n\twith open(os.path.join('..', 'build', filename), 'w') as f:\n\t\tf.write(text)\n\n\ndef add_header(text):\n\treturn('// sorollet.js - http://github.com/sole/sorollet.js\\n' + text)\n\n\ndef build(files, minified, filename):\n\n\ttext = merge(files)\n\n\toutput(add_header(text), filename)\n\n\n# ---\n\ndef main(argv=None):\n\n\tbuild(ALL_FILES, False, 'Sorollet.js')\n\tbuild(FILES_CORE, False, 'Sorollet-core.js')\n\n# ---\n\nif __name__ == '__main__':\n\tmain()\n\n\n","repo_name":"sole/sorollet.js","sub_path":"utils/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"53"} +{"seq_id":"31161020500","text":"import json\r\nimport logging\r\nimport pickle\r\n\r\nfrom chirpy.core.util import query_es_index, get_elasticsearch\r\nimport tqdm\r\n\r\nfrom chirpy.core.logging_utils import setup_logger, PROD_LOGGER_SETTINGS\r\n\r\nsetup_logger(PROD_LOGGER_SETTINGS)\r\n\r\nMAX_ES_SEARCH_SIZE = 500\r\n\r\nANCHORTEXT_QUERY_TIMEOUT = 3.0 # seconds\r\nENTITYNAME_QUERY_TIMEOUT = 1.0 # seconds\r\n\r\nARTICLES_INDEX_NAME = 'enwiki-20201201-articles'\r\n\r\n# These are the fields we DO want to fetch from ES\r\nFIELDS_FILTER = ['doc_title', 'doc_id', 'categories', 'pageview', 'linkable_span_info', 'wikidata_categories_all',\r\n 'redirects', 'plural']\r\n\r\n\r\nlogger = logging.getLogger('chirpylogger')\r\n\r\ndef gen_list_of_terms():\r\n times = ['20th', '21st']\r\n nations = ['American', 'British', 'English', 'Indian', 'Japanese', 'French', 'Spanish', 'Italian', 'Australian',\r\n 'German', 'Mexican', 'Swedish', 'Danish']\r\n professions = ['male actors', 'female actors', 'actresses', 'singers', 'rappers', 'YouTubers', 'bloggers', 'male models', 'female models', 'socialites', 'comedians']\r\n template = \"{tms}-century {nation} {prof}\"\r\n all_temps = []\r\n for t in times:\r\n for n in nations:\r\n for p in professions:\r\n all_temps.append(template.format(tms=t, nation=n, prof=p))\r\n for n in nations:\r\n for p in professions:\r\n all_temps.append(n + \" \" + p)\r\n return all_temps\r\n\r\n\r\ndef scrape_es():\r\n all_temps = gen_list_of_terms()\r\n all_celebs = []\r\n for t in all_temps:\r\n query = {'query': {'bool': {\"must\": [{'terms': {'categories.keyword': [t]}},\r\n {\"range\": {\"pageview\": {\"gte\": 20000}}}]}},\r\n 'sort': {'pageview': 'desc'}}\r\n results = query_es_index(es, ARTICLES_INDEX_NAME, query, size=MAX_ES_SEARCH_SIZE,\r\n timeout=ANCHORTEXT_QUERY_TIMEOUT,\r\n filter_path=['hits.hits._source.{}'.format(field) for field in FIELDS_FILTER])\r\n for s in results:\r\n all_celebs.append((s['_source']['doc_title'], s['_source']['pageview'], t))\r\n print(len(all_celebs))\r\n # get top 300 for each category\r\n tops_celebs = {}\r\n for t in all_temps:\r\n core_profession = t.split(\" \")[-1]\r\n if core_profession not in tops_celebs:\r\n tops_celebs.update({core_profession: []})\r\n curr_list_celebs = [x for x in all_celebs if x[2] == t]\r\n curr_list_celebs = [(x[0], x[1]) for x in curr_list_celebs]\r\n tops_celebs[core_profession].extend(curr_list_celebs)\r\n for c in tops_celebs:\r\n tops_celebs[c] = list(set(tops_celebs[c]))\r\n tops_celebs[c].sort(key=lambda x: x[1], reverse=True)\r\n tops_celebs[c] = tops_celebs[c][:300]\r\n return tops_celebs\r\n\r\n\r\ndef filter_entities(ent):\r\n query_term = {'query': {'bool': {'should': [\r\n {'terms': {'doc_title.keyword': [ent]}}]}},\r\n 'sort': {'pageview': 'desc'}}\r\n results = query_es_index(es, ARTICLES_INDEX_NAME, query_term, size=MAX_ES_SEARCH_SIZE,\r\n timeout=ANCHORTEXT_QUERY_TIMEOUT,\r\n filter_path=['hits.hits._source.{}'.format(field) for field in FIELDS_FILTER])\r\n if len(results):\r\n return True\r\n return False\r\n\r\n\r\ndef run_test_es():\r\n ent = \"Wrexham\"\r\n\r\n query = {'query': {'bool': {\"must\": [{'terms': {'categories.keyword': [\"20th-century American male actors\"]}},\r\n {\"range\": {\"pageview\": {\"gte\": 100000}}}]}},\r\n 'sort': {'pageview': 'desc'}}\r\n query_term = {'query': {'bool': {'should': [\r\n {'terms': {'doc_title.keyword': [ent]}}]}},\r\n 'sort': {'pageview': 'desc'}}\r\n results = query_es_index(es, ARTICLES_INDEX_NAME, query_term, size=MAX_ES_SEARCH_SIZE,\r\n timeout=ANCHORTEXT_QUERY_TIMEOUT,\r\n filter_path=['hits.hits._source.{}'.format(field) for field in FIELDS_FILTER])\r\n for r in results:\r\n print(r['_source'].keys())\r\n print(results)\r\n\r\n\r\ndef filter_all_celebs():\r\n filtered_celeb = {}\r\n all_celeb_info = json.load(open(\"all_celeb_info_newest1.json\"))\r\n\r\n for c in tqdm.tqdm(all_celeb_info):\r\n filtered_celeb.update({c: {}})\r\n for k in all_celeb_info[c]:\r\n if k != \"pronoun\" and k != \"total_pg\":\r\n filtered_celeb[c].update({k: []})\r\n for e in all_celeb_info[c][k]:\r\n if filter_entities(e[0]):\r\n filtered_celeb[c][k].append(e)\r\n else:\r\n filtered_celeb[c].update({k: all_celeb_info[c][k]})\r\n\r\n json.dump(filtered_celeb, open(\"all_celeb_info_newest1.json\", \"w+\"))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n es = get_elasticsearch()\r\n # all_celebs = scrape_es()\r\n # pickle.dump(all_celebs, open(\"scraped_celebs.p\", \"wb+\"))\r\n filter_all_celebs()\r\n\r\n","repo_name":"shashank2000/chirpy_lambda","sub_path":"package/chirpy/response_generators/celeb/scripts/es_celeb.py","file_name":"es_celeb.py","file_ext":"py","file_size_in_byte":4927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1946689824","text":"import tensorflow as tf\nimport tensorlayer as tl\nfrom transformers import BertTokenizer, TFBertModel\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.backend import one_hot\nimport numpy as np\nimport chatbot_utils\nimport os\nimport time\n\nprint(f\"using tensorflow v{tf.__version__}\")\nprint(f\"using tensorflow.keras v{tf.keras.__version__}\")\n\n\n\n\n# chatbot model\nclass BertChatbot(object):\n\n \"\"\"\n Trains a chatbot model using tensorflow\n basic architecture: convolutional neural network\n\n This tweaked CNN will remember the history chat between the user and the bot.\n inputs:\n - name of bot\n - name of user\n - chat history (from the start) [state]\n - chat history (a few lines back) [words]\n - current input [words]\n\n # positivity of input (maybe? not sure yet)\n\n outputs:\n - reply [words]\n - chat history (from start) [state]\n \"\"\"\n\n def __init__(self,\n vocab_size=30522,\n max_input=30,\n max_output=30,\n latent_dim=256,\n learning_rate=1e-3,\n n_layer=3):\n self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n\n self.max_input = max_input\n self.max_output = max_output\n self.vocab_size = vocab_size\n self.latent_dim = latent_dim\n self.learning_rate = learning_rate\n self.n_layer = n_layer\n self.cls_id = self.tokenizer.cls_token_id\n self.sep_id = self.tokenizer.sep_token_id\n self.pad_id = self.tokenizer.pad_token_id\n\n self.bert_model = TFBertModel.from_pretrained('bert-base-uncased')\n self.bert_model.trainable = False\n\n # defining all layers\n self.enc_inputs = tf.keras.Input(shape=(None,), dtype=tf.int32, name=\"enc_inputs\")\n self.dec_inputs = tf.keras.Input(shape=(None,), dtype=tf.int32, name=\"dec_inputs\")\n\n self.gru = tf.keras.layers.GRU\n self.dense_out = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(self.vocab_size,\n activation=\"softmax\",\n name=\"dense_out\"))\n\n self.enc_layers = [self.gru(self.latent_dim, return_state=True, return_sequences=True) for i in range(self.n_layer)]\n self.dec_layers = [self.gru(self.latent_dim, return_state=True, return_sequences=True) for i in range(self.n_layer)]\n self.enc_states = [None for i in range(self.n_layer)] # [None, None, None]\n self.dec_states = [None for i in range(self.n_layer)] # [None, None, None]\n\n def models(self):\n\n enc_output = self.bert_model(self.enc_inputs, training=False)[0]\n\n for i in range(self.n_layer):\n enc_output, self.enc_states[i] = self.enc_layers[i](enc_output)\n\n dec_output = self.bert_model(self.dec_inputs, training=False)[0]\n\n for i in range(self.n_layer):\n dec_output, self.dec_states[i] = self.dec_layers[i](dec_output, initial_state=self.enc_states[i])\n\n dense_output = self.dense_out(dec_output)\n\n # create training model\n model = tf.keras.Model(inputs=[self.enc_inputs, self.dec_inputs], outputs=dense_output)\n\n # create encoder model\n enc_model = tf.keras.Model(inputs=self.enc_inputs, outputs=self.enc_states)\n\n # create decoder model\n dec_state_input1 = tf.keras.Input(shape=(self.latent_dim,))\n dec_state_input2 = tf.keras.Input(shape=(self.latent_dim,))\n dec_state_input3 = tf.keras.Input(shape=(self.latent_dim,))\n dec_state_inputs = [dec_state_input1, dec_state_input2, dec_state_input3]\n\n dec_states_inf = [None for i in range(self.n_layer)]\n dec_output_inf = self.bert_model(self.dec_inputs, training=False)[0]\n\n for i in range(self.n_layer):\n dec_output_inf, dec_states_inf[i] = self.dec_layers[i](dec_output_inf, initial_state=dec_state_inputs[i])\n\n dense_output = self.dense_out(dec_output_inf)\n\n dec_model = tf.keras.Model(inputs=[self.dec_inputs] + dec_state_inputs,\n outputs=[dense_output] + dec_states_inf)\n\n return model, enc_model, dec_model\n\n def decode_sequence(self, input_seq, enc_model, dec_model):\n # Encode the input as state vectors.\n states_value = enc_model.predict(input_seq)\n\n # Populate the first character of target sequence with the start character.\n target_seq = np.zeros((1, 1))\n target_seq[0] = self.cls_id\n # Sampling loop for a batch of sequences\n # (to simplify, here we assume a batch of size 1).\n stop_condition = False\n decoded_tokens = list()\n while not stop_condition:\n output_tokens, state1, state2, state3 = dec_model.predict([target_seq] + states_value)\n\n # Sample a token\n sampled_token_index = np.argmax(output_tokens[0, -1, :])\n if sampled_token_index != self.sep_id:\n decoded_tokens.append(sampled_token_index)\n\n # Exit condition: either hit max length or find stop character.\n if (sampled_token_index == self.sep_id or len(decoded_tokens) > self.max_output):\n stop_condition = True\n\n # Update the target sequence (of length 1).\n target_seq = np.zeros((1, 1))\n target_seq[0] = sampled_token_index\n # Update states\n states_value = [state1, state2, state3]\n\n decoded_sentence = self.tokenizer.decode(decoded_tokens)\n return decoded_sentence\n\n def train(self, weights_filepath, enc_weights_filepath, dec_weights_filepath, old_weights=None,\n epochs=1000, steps_per_epoch=100, test_after_train=False):\n start_time = time.time()\n print(\"\\n\\nMODE: Train\")\n print(f\"Test after training: {test_after_train}\\n\") \n if not old_weights:\n model, enc_model, dec_model = self.models()\n elif old_weights:\n model, enc_model, dec_model = self.models()\n print(\"Loading last trained weights...\")\n model.load_weights(old_weights)\n print(\"Loaded!\\n\")\n time.sleep(0.5)\n\n model.summary()\n\n converse_filepath = \"./data/movie_conversations.txt\"\n twitter_filepath = \"./data/chat.txt\"\n lines_filepath = \"./data/movie_lines.txt\"\n checkpoint_path = \"./checkpoints\"\n log_dir = \"./logs\"\n model_filepath = \"./models\"\n\n for path in [checkpoint_path, log_dir, model_filepath]:\n if not os.path.exists(path):\n os.mkdir(path)\n\n twitter_data = chatbot_utils.pull_twitter(twitter_filepath)\n twitter_generator = chatbot_utils.twitter_generator(twitter_data)\n callbacks = list()\n # callbacks.append(tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, verbose=1))\n callbacks.append(tf.keras.callbacks.TensorBoard(log_dir=log_dir))\n optimizer = tf.keras.optimizers.Adam(self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-9)\n\n model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=[tf.keras.metrics.categorical_accuracy])\n model.fit_generator(generator=twitter_generator,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n callbacks=callbacks,\n shuffle=False)\n print(\"Saving training_model weights...\")\n model.save_weights(weights_filepath)\n print(\"Saving enc_model weights...\")\n enc_model.save_weights(enc_weights_filepath)\n print(\"Saving dec_model weights...\")\n dec_model.save_weights(dec_weights_filepath)\n print(\"Done!\")\n\n end_time = time.time()\n print(f\"Time taken: {(end_time-start_time)/60} min(s)\")\n if test_after_train:\n self.test(enc_weights_filepath, dec_weights_filepath)\n\n\n def test(self, enc_weights_filepath, dec_weights_filepath):\n print(\"\\n\\nMODE: Test\")\n # load encoder model and decoder model\n _, enc_model, dec_model= self.models()\n print(\"Loading enc_model weights...\")\n enc_model.load_weights(enc_weights_filepath)\n print(\"Loading dec_model weights...\")\n dec_model.load_weights(dec_weights_filepath)\n\n exit_keyword = \".exit\"\n\n while True:\n usr_input = input(\"[USER]: \")\n if usr_input == exit_keyword:\n print(\"Exiting BertChatbot...\")\n break\n else:\n usr_input = self.tokenizer.encode(usr_input, add_special_tokens=True)\n usr_input = pad_sequences(sequences=[usr_input], maxlen=self.max_input,\n padding=\"post\", truncating=\"post\")\n\n decoded_sentence = self.decode_sequence(usr_input, enc_model, dec_model)\n print(f\"[BertChatbot]: {decoded_sentence}\")\n\n\nif __name__ == \"__main__\":\n save_path = r\"D:\\Nyx\\Codes\\SAModels\\chatbot\"\n WEIGHTS_FILEPATH = rf\"{save_path}\\weights.h5\"\n ENC_WEIGHTS_FILEPATH = rf\"{save_path}\\enc_weights.h5\"\n DEC_WEIGHTS_FILEPATH = rf\"{save_path}\\dec_weights.h5\"\n\n bert_chatbot = BertChatbot(learning_rate=0.001)\n bert_chatbot.train(old_weights=WEIGHTS_FILEPATH, epochs=2000,\n weights_filepath=WEIGHTS_FILEPATH,\n enc_weights_filepath=ENC_WEIGHTS_FILEPATH,\n dec_weights_filepath=DEC_WEIGHTS_FILEPATH,\n test_after_train=True)\n # BertChatbot().test(enc_weights_filepath=rf\"{save_path}\\enc_weights.h5\",\n # dec_weights_filepath=rf\"{save_path}\\dec_weights.h5\",)\n\n\n","repo_name":"khabya/SmartAssistant","sub_path":"client/modules/deep_learning/chatbot/chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":9096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22277774280","text":"import open3d as o3d\nimport sys\nfrom pathlib import Path\nfrom os.path import dirname, abspath\n# include path python\ninclude_dir = str(Path(__file__).resolve().parent)\nsys.path.append(include_dir)\nprint(include_dir)\ninclude_dir = str(Path(__file__).resolve().parent.parent)\nsys.path.append(include_dir)\nprint(include_dir)\ninclude_dir = str(Path(__file__).resolve().parent.parent.parent)\nsys.path.append(include_dir)\nprint(include_dir)\nimport teaserpp_python\nimport numpy as np \nimport copy\nfrom helpers import pcd2xyz, extract_fpfh, get_teaser_solver, Rt2T, find_correspondences\nimport ICP_utils\nfrom UTILS.pcd_numpy_utils import *\nfrom scipy.spatial.transform import Rotation as Rot\nimport cv2\nfrom pykdtree.kdtree import KDTree\n\n# \n#\n#\n#\n\nclass PointCloudRegistration():\n def DenseAlignment(self,Source,Target):\n '''\n input: CAD pointcloud (Source) and Local dense scan of the workpiece (Target) as numpy array\n output: Transformed Source, rotation list RT_ls and translation list T_ls\n '''\n print('beginning dense alignment')\n ##\n ## ICP registration\n ##\n RT_TOT = []\n T_TOT = []\n #invert source and target since its easier to align the piece with the total\n # frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=500)\n # o3d.visualization.draw_geometries([NumpyToPCD(Source).paint_uniform_color([1, 0, 0]),NumpyToPCD(Target).paint_uniform_color([0, 1, 0]), frame]) # used just to show the initial position\n temp_source = copy.deepcopy(Target)\n temp_target = copy.deepcopy(Source)\n RT, O, error = ICP_utils.ICP_Registration(temp_source,temp_target,npoints = 50000, bound_l = 10, bound_d = 0.1 )\n\n Source_n = copy.deepcopy((RT.T@Source.T).T - O)\n # o3d.visualization.draw_geometries([NumpyToPCD(Source_n).paint_uniform_color([0, 0, 0]),NumpyToPCD(Source).paint_uniform_color([1, 0, 0]),\n # NumpyToPCD(Target).paint_uniform_color([0, 1, 0]), frame]) # used just to show the final position\n\n RT_TOT.append(RT.T)\n T_TOT.append(-O)\n\n return Source_n, RT_TOT, T_TOT\n\n\n def SparseAlignment(self,Source, Target,VOX):\n '''\n input: Source and Target pointcloud as numpy array, \n alignment is performed stepwise, first the principal direction are aligned,\n then the footprint in teh XY plane is aligned\n then an icp registration of the keypoints is performed\n then a final icp registration of the whole pointclouds downsampled randomly\n output: Transformed Source pointcloud and ordered list of RT (rotation) and T (traslation) \n '''\n VOXEL_SIZE =VOX #Main parameter that affect the footprint alignment, need to be choosen wisely\n RT_TOT = []\n T_TOT = []\n VISUALIZE = False\n if VISUALIZE:\n o3d.visualization.draw_geometries([NumpyToPCD(Source).paint_uniform_color([0, 0, 1]),NumpyToPCD(Target).paint_uniform_color([1.0, 0, 0.0])])\n ########################################################\n ######## ADDED RANDOM ROTATION TO TEST + NOISE #########\n ########################################################\n #____________________________________________________________________________________________________________________________\n # mu = 0\n # sigma = 0\n # noise0 = np.random.normal(mu,sigma, size = (len(Source),3))\n # Source = Source+noise0\n # ROT = np.asarray(Rot.random().as_matrix()) # random rotation to test consistency\n # Source = (ROT@Source.T).T\n # frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=500)\n # RT_TOT.append(ROT)\n # T_TOT.append(np.asarray([0,0,0]))\n # # if VISUALIZE:\n # # o3d.visualization.draw_geometries([NumpyToPCD(Source),NumpyToPCD(Target), frame])\n # # #____________________________________________________________________________________________________________________________\n\n #####################################\n ###### MEAN-SHIFT TO TARGET #########\n #####################################\n\n source_pmedio = np.mean(Source,axis=0)\n target_pmedio = np.mean(Target,axis=0)\n Source = Source - source_pmedio + target_pmedio\n I = np.asarray([[1,0,0],\n [0,1,0],\n [0,0,1]])\n T_TOT.append(-source_pmedio)\n RT_TOT.append(I)\n T_TOT.append(target_pmedio)\n RT_TOT.append(I)\n\n # Source_pcd = NumpyToPCD(Source)\n # Target_pcd = NumpyToPCD(Target)\n\n if VISUALIZE:\n o3d.visualization.draw_geometries([NumpyToPCD(Source).paint_uniform_color([0, 0, 1]),NumpyToPCD(Target).paint_uniform_color([1.0, 0, 0.0])])\n #disegno origine\n #____________________________________________________________________________________________________________________________\n ###########################\n ##### PCA ALIGNMENT #######\n ###########################\n Source_a, RT1, RT2, am, bm=ICP_utils.PCA_alignment(copy.deepcopy(Source), copy.deepcopy(Target))\n\n RT_TOT.append(RT1.T)\n T_TOT.append(np.asarray([0,0,0]))\n RT_TOT.append(RT2)\n T_TOT.append(np.asarray([0,0,0]))\n T_TOT.append(-am)\n RT_TOT.append(I)\n T_TOT.append(bm)\n RT_TOT.append(I)\n\n Source_nn = (RT1.T@Source.T).T\n Source_nn = (RT2@Source_nn.T).T\n Source_nn = Source_nn -am\n Source_nn = Source_nn +bm\n\n if VISUALIZE:\n o3d.visualization.draw_geometries([NumpyToPCD(Source_nn).paint_uniform_color([0, 0, 1]),NumpyToPCD(Target).paint_uniform_color([1.0, 0, 0.0])])\n\n Source_nn_pcd = copy.deepcopy(NumpyToPCD(Source_nn))\n # if VISUALIZE:\n # o3d.visualization.draw_geometries([Target_pcd.paint_uniform_color([1.0, 0, 0.0]),NumpyToPCD(Source_nn).paint_uniform_color([0, 0, 1])])\n #____________________________________________________________________________________________________________________________\n ######################################\n ##### TEASER_PP REGISTRATION ######### feature recognition alignement based on teaser++ package, used for footstamp alignment in xy plane\n ######################################\n error = 1000\n print(len(np.squeeze(Target)))\n print(\"error limit is : \" , len(np.squeeze(Target))/1000 )\n while error>len(np.squeeze(Target))/1000:\n ##Source_nn_pcd_tmp = o3d.geometry.keypoint.compute_iss_keypoints(Source_nn_pcd)\n #arget_pcd_tmp = o3d.geometry.keypoint.compute_iss_keypoints(Target_pcd)\n Source_2d = copy.deepcopy(Source_nn)\n #Source_2d[:,2] = np.zeros(len(Source_2d))\n Target_2d = copy.deepcopy(Target)\n #Target_2d[:,2] = np.zeros(len(Target_2d))\n #o3d.visualization.draw_geometries([NumpyToPCD(Source_2d).paint_uniform_color([1,0,0]),NumpyToPCD(Target_2d).paint_uniform_color([0,0,1])])\n A_pcd_raw = NumpyToPCD(Source_2d)\n B_pcd_raw = NumpyToPCD(Target_2d)\n A_pcd = A_pcd_raw.voxel_down_sample(voxel_size=VOXEL_SIZE)\n B_pcd = B_pcd_raw.voxel_down_sample(voxel_size=VOXEL_SIZE)\n\n A_xyz = pcd2xyz(A_pcd) # np array of size 3 by N\n B_xyz = pcd2xyz(B_pcd) # np array of size 3 by M\n\n # extract FPFH features\n A_feats = extract_fpfh(A_pcd,VOXEL_SIZE)\n B_feats = extract_fpfh(B_pcd,VOXEL_SIZE)\n\n # establish correspondences by nearest neighbour search in feature space\n corrs_A, corrs_B = find_correspondences(\n A_feats, B_feats, mutual_filter=True)\n A_corr = A_xyz[:,corrs_A] # np array of size 3 by num_corrs\n B_corr = B_xyz[:,corrs_B] # np array of size 3 by num_corrs\n\n num_corrs = A_corr.shape[1]\n print(f'FPFH generates {num_corrs} putative correspondences.')\n\n # visualize the point clouds together with feature correspondences\n points = np.concatenate((A_corr.T,B_corr.T),axis=0)\n lines = []\n for i in range(num_corrs):\n lines.append([i,i+num_corrs])\n colors = [[0, 1, 0] for i in range(len(lines))] # lines are shown in green\n line_set = o3d.geometry.LineSet(\n points=o3d.utility.Vector3dVector(points),\n lines=o3d.utility.Vector2iVector(lines),\n )\n line_set.colors = o3d.utility.Vector3dVector(colors)\n if VISUALIZE:\n o3d.visualization.draw_geometries([A_pcd,B_pcd,line_set])\n\n # robust global registration using TEASER++\n NOISE_BOUND = VOXEL_SIZE\n teaser_solver = get_teaser_solver(NOISE_BOUND)\n teaser_solver.solve(A_corr,B_corr)\n solution = teaser_solver.getSolution()\n R_teaser = solution.rotation\n t_teaser = solution.translation\n T_teaser = Rt2T(R_teaser,t_teaser)\n\n # Visualize the registration results\n #A_pcd_T_teaser = copy.deepcopy(Source_nn_pcd).transform(T_teaser)\n temp_a = A_pcd.transform(T_teaser)\n temp_b = B_pcd\n\n\n # visualize the registration after ICP refinement\n Source_nn_pcd1 = copy.deepcopy(NumpyToPCD(Source_2d).transform(T_teaser))\n #Target_pcd = B_pcd\n tree = KDTree(PCDToNumpy(temp_a))\n dist = tree.query(PCDToNumpy(temp_b),k=1)[0]\n error =np.sqrt(np.sum(dist**2)/len(dist))\n print(\"error is : \", error)\n #o3d.visualization.draw_geometries([temp_a.paint_uniform_color([1,0,0]),temp_b.paint_uniform_color([0,0,1])])\n \n # o3d.visualization.draw_geometries([Source_nn_pcd1.paint_uniform_color([1,0,0]),NumpyToPCD(Target).paint_uniform_color([0,0,1])])\n VOXEL_SIZE = VOXEL_SIZE-1\n if VOXEL_SIZE<5:\n break\n\n RT_TOT.append(R_teaser) #maybe convert to numpy\n T_TOT.append(np.asarray(t_teaser))\n Source_nn = (R_teaser@Source_nn.T).T + np.asarray(t_teaser)\n #____________________________________________________________________________________________________________________________\n #\n #source and target are now sufficiently close to use ICP\n #\n #now I invert source and target\n #temp_source_pcd = copy.deepcopy(Target_pcd)\n temp_source = copy.deepcopy(Target)\n #temp_target_pcd = copy.deepcopy(Source_nn_pcd)\n temp_target = copy.deepcopy(Source_nn)\n\n #######################################\n ####### ICP with Keypoints ############\n #######################################\n\n # keypoints0_pcd = o3d.geometry.keypoint.compute_iss_keypoints(temp_source_pcd)\n # keypoints1_pcd = o3d.geometry.keypoint.compute_iss_keypoints(temp_target_pcd)\n # keypoints0_pcd = copy.deepcopy(temp_source_pcd)\n # keypoints1_pcd = copy.deepcopy(temp_target_pcd)\n\n # if VISUALIZE:\n # o3d.visualization.draw_geometries([keypoints0_pcd.paint_uniform_color([0, 0, 1]),keypoints1_pcd.paint_uniform_color([1.0, 0, 0.0])])\n # keypoints0 = PCDToNumpy(keypoints0_pcd)\n # keypoints1 = PCDToNumpy(keypoints1_pcd)\n RT3, O, error = ICP_utils.ICP_Registration(temp_source,temp_target,1000, sparse=True)\n temp_source_n = copy.deepcopy((RT3@temp_source.T).T + O)\n Source_nnn = copy.deepcopy((RT3.T@Source_nn.T).T - O) #use inverse rotation and offset\n\n RT_TOT.append(RT3.T)\n T_TOT.append(-O)\n\n \n\n if VISUALIZE:\n o3d.visualization.draw_geometries([NumpyToPCD(Source_nnn).paint_uniform_color([0, 0, 1]) ,NumpyToPCD(Target).paint_uniform_color([1, 0, 0]), NumpyToPCD(Source_nn).paint_uniform_color([0,1,1])]) #plot\n # # local refinement using ICP\n \n #########################################\n ######## final ICP Registration #########\n #########################################\n\n #now I invert again source and target\n #temp_source_pcd2 = copy.deepcopy(Target_pcd)\n temp_source2 = copy.deepcopy(Target)\n #temp_target_pcd2 = copy.deepcopy(Source_nnn_pcd)\n temp_target2 = copy.deepcopy(Source_nnn)\n\n RT4, O2,error = ICP_utils.ICP_Registration(temp_source2,temp_target2,npoints = 50000, sparse=True)\n temp_source_nn = copy.deepcopy((RT4@temp_source2.T).T+O2) \n Source_nnnn = copy.deepcopy((RT4.T@Source_nnn.T).T - O2)\n\n\n RT_TOT.append(RT4.T)\n T_TOT.append(-O2)\n\n\n if VISUALIZE:\n o3d.visualization.draw_geometries([NumpyToPCD(Source_nnnn).paint_uniform_color([0, 0, 1]),NumpyToPCD(Target).paint_uniform_color([1, 0, 0]),NumpyToPCD(Source_nnn).paint_uniform_color([0, 1, 1])])\n return Source_nnnn, RT_TOT, T_TOT, error\n \n \n\n def Manual_SparseAlignment(self,Source,Target):\n '''\n input: Source and Target PointClouds as numpy arrays,\n need to display the pointcloud in order to get user point input\n output: Source transformed and RT , T transformation\n ''' \n Source_pcd = NumpyToPCD(Source)\n Target_pcd = NumpyToPCD(Target)\n picked_id_source = self.pick_points(Source_pcd)\n picked_id_target = self.pick_points(Target_pcd)\n RT_TOT = []\n T_TOT = []\n assert (len(picked_id_source) >= 3 and len(picked_id_target) >= 3)\n assert (len(picked_id_source) == len(picked_id_target))\n corr = np.zeros((len(picked_id_source), 2))\n corr[:, 0] = picked_id_source\n corr[:, 1] = picked_id_target\n \n # estimate rough transformation using correspondences\n print(\"Compute a rough transform using the correspondences given by user\")\n p2p = o3d.pipelines.registration.TransformationEstimationPointToPoint()\n trans_init = p2p.compute_transformation(Source_pcd,Target_pcd,\n o3d.utility.Vector2iVector(corr))\n # point-to-point ICP for refinement\n print(\"Perform point-to-point ICP refinement\")\n threshold = 0.03 # 3cm distance threshold\n reg_p2p = o3d.pipelines.registration.registration_icp(\n Source_pcd, Target_pcd, threshold, trans_init,\n o3d.pipelines.registration.TransformationEstimationPointToPoint())\n self.draw_registration_result(NumpyToPCD(Source), NumpyToPCD(Target), reg_p2p.transformation)\n print(\"\")\n transf = reg_p2p.transformation\n RT = [transf[0][0:3],\n transf[1][0:3],\n transf[2][0:3]]\n T = np.asarray([transf[0][3], transf[1][3], transf[2][3]])\n RT_TOT.append(RT)\n T_TOT.append(T)\n Source_pcd.transform(transf)\n Source_n = PCDToNumpy(Source_pcd)\n #now I invert source and target, easier to align sparse scan to cad\n temp_source_pcd = copy.deepcopy(Target_pcd)\n temp_source = PCDToNumpy(temp_source_pcd)\n temp_target_pcd = copy.deepcopy(Source_pcd)\n temp_target = PCDToNumpy(temp_target_pcd)\n RT3, O2,error = ICP_utils.ICP_Registration(temp_source,temp_target,npoints=30000)\n temp_source_n = copy.deepcopy((RT3@temp_source.T).T+O2) \n Source_nn = copy.deepcopy((RT3.T@Source_n.T).T - O2) #inverse rotation and translation\n frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=500)\n o3d.visualization.draw_geometries([NumpyToPCD(Source_nn).paint_uniform_color([1, 0, 0]), Target_pcd.paint_uniform_color([0, 1, 0]), frame])\n\n RT_TOT.append(RT3.T)\n T_TOT.append(-O2)\n\n\n return Source_nn, RT_TOT, T_TOT, error\n\n def pick_points(self,pcd):\n print(\"\")\n print(\n \"1) Please pick at least three correspondences using [shift + left click]\"\n )\n print(\" Press [shift + right click] to undo point picking\")\n print(\"2) After picking points, press 'Q' to close the window\")\n vis = o3d.visualization.VisualizerWithEditing()\n vis.create_window()\n vis.add_geometry(pcd)\n vis.run() # user picks points\n vis.destroy_window()\n print(\"\")\n return vis.get_picked_points()\n\n def Transformation_with_list(self,Points,RT_ls, T_ls):\n \"\"\"\n input: Points, np.array(N,3) that need to be transformed, RT_ls rotation matrix list, T_ls translation vector list, need to be of the same length\n output: rototranslated points list\n \"\"\"\n temp = copy.deepcopy(Points)\n for i in range(len(RT_ls)):\n RT_i = RT_ls[i]\n T_i = T_ls[i]\n temp = (RT_i@temp.T).T+T_i \n return temp\n\n def draw_registration_result(self,source, target, transformation):\n source_temp = copy.deepcopy(source)\n target_temp = copy.deepcopy(target)\n source_temp.paint_uniform_color([1, 0.706, 0])\n target_temp.paint_uniform_color([0, 0.651, 0.929])\n source_temp.transform(transformation)\n frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=500)\n o3d.visualization.draw_geometries([source_temp, target_temp, frame])\n","repo_name":"OguzhanKirik/inspector_ws","sub_path":"frank/include/REGISTRATION/PC_registration.py","file_name":"PC_registration.py","file_ext":"py","file_size_in_byte":17336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18076594193","text":"import os\nimport logging\nimport json\nimport csv\nimport requests\n\nfrom io import StringIO\nfrom datetime import datetime\nfrom pwnagotchi.utils import WifiInfo, FieldNotFoundError, extract_from_pcap, StatusFile, remove_whitelisted\nfrom threading import Lock\nfrom pwnagotchi import plugins\nfrom pwnagotchi._version import __version__ as __pwnagotchi_version__\n\n\ndef _extract_gps_data(path):\n \"\"\"\n Extract data from gps-file\n\n return json-obj\n \"\"\"\n\n try:\n if path.endswith('.geo.json'):\n with open(path, 'r') as json_file:\n tempJson = json.load(json_file)\n d = datetime.utcfromtimestamp(int(tempJson[\"ts\"]))\n return {\"Latitude\": tempJson[\"location\"][\"lat\"], \"Longitude\": tempJson[\"location\"][\"lng\"], \"Altitude\": 10, \"Updated\": d.strftime('%Y-%m-%dT%H:%M:%S.%f')}\n else:\n with open(path, 'r') as json_file:\n return json.load(json_file)\n except OSError as os_err:\n raise os_err\n except json.JSONDecodeError as json_err:\n raise json_err\n\n\ndef _format_auth(data):\n out = \"\"\n for auth in data:\n out = f\"{out}[{auth}]\"\n return out\n\n\ndef _transform_wigle_entry(gps_data, pcap_data, plugin_version):\n \"\"\"\n Transform to wigle entry in file\n \"\"\"\n dummy = StringIO()\n # write kismet header\n dummy.write(\n \"WigleWifi-1.4,appRelease={},model=pwnagotchi,release={},device=pwnagotchi,display=kismet,board=kismet,brand=pwnagotchi\\n\".format(plugin_version, __pwnagotchi_version__))\n dummy.write(\n \"MAC,SSID,AuthMode,FirstSeen,Channel,RSSI,CurrentLatitude,CurrentLongitude,AltitudeMeters,AccuracyMeters,Type\")\n\n writer = csv.writer(dummy, delimiter=\",\", quoting=csv.QUOTE_NONE, escapechar=\"\\\\\")\n writer.writerow([\n pcap_data[WifiInfo.BSSID],\n pcap_data[WifiInfo.ESSID],\n _format_auth(pcap_data[WifiInfo.ENCRYPTION]),\n datetime.strptime(gps_data['Updated'].rsplit('.')[0],\n \"%Y-%m-%dT%H:%M:%S\").strftime('%Y-%m-%d %H:%M:%S'),\n pcap_data[WifiInfo.CHANNEL],\n pcap_data[WifiInfo.RSSI],\n gps_data['Latitude'],\n gps_data['Longitude'],\n gps_data['Altitude'],\n 0, # accuracy?\n 'WIFI'])\n return dummy.getvalue()\n\n\ndef _send_to_wigle(lines, api_key, donate=True, timeout=30):\n \"\"\"\n Uploads the file to wigle-net\n \"\"\"\n\n dummy = StringIO()\n\n for line in lines:\n dummy.write(f\"{line}\")\n\n dummy.seek(0)\n\n headers = {'Authorization': f\"Basic {api_key}\",\n 'Accept': 'application/json'}\n data = {'donate': 'on' if donate else 'false'}\n payload = {'file': dummy, 'type': 'text/csv'}\n try:\n res = requests.post('https://api.wigle.net/api/v2/file/upload',\n data=data,\n headers=headers,\n files=payload,\n timeout=timeout)\n json_res = res.json()\n if not json_res['success']:\n raise requests.exceptions.RequestException(json_res['message'])\n except requests.exceptions.RequestException as re_e:\n raise re_e\n\n\nclass Wigle(plugins.Plugin):\n __author__ = '33197631+dadav@users.noreply.github.com'\n __version__ = '2.0.0'\n __license__ = 'GPL3'\n __description__ = 'This plugin automatically uploads collected wifis to wigle.net'\n\n def __init__(self):\n self.ready = False\n self.report = StatusFile('/root/.wigle_uploads', data_format='json')\n self.skip = list()\n self.lock = Lock()\n\n def on_loaded(self):\n if 'api_key' not in self.options or ('api_key' in self.options and self.options['api_key'] is None):\n logging.debug(\"WIGLE: api_key isn't set. Can't upload to wigle.net\")\n return\n\n if not 'whitelist' in self.options:\n self.options['whitelist'] = list()\n\n if not 'donate' in self.options:\n self.options['donate'] = True\n\n self.ready = True\n logging.info(\"WIGLE: ready\")\n\n def on_internet_available(self, agent):\n \"\"\"\n Called in manual mode when there's internet connectivity\n \"\"\"\n if not self.ready or self.lock.locked():\n return\n\n from scapy.all import Scapy_Exception\n\n config = agent.config()\n display = agent.view()\n reported = self.report.data_field_or('reported', default=list())\n handshake_dir = config['bettercap']['handshakes']\n all_files = os.listdir(handshake_dir)\n all_gps_files = [os.path.join(handshake_dir, filename)\n for filename in all_files\n if filename.endswith('.gps.json') or filename.endswith('.paw-gps.json') or filename.endswith('.geo.json')]\n\n all_gps_files = remove_whitelisted(all_gps_files, self.options['whitelist'])\n new_gps_files = set(all_gps_files) - set(reported) - set(self.skip)\n if new_gps_files:\n logging.info(\"WIGLE: Internet connectivity detected. Uploading new handshakes to wigle.net\")\n csv_entries = list()\n no_err_entries = list()\n for gps_file in new_gps_files:\n if gps_file.endswith('.gps.json'):\n pcap_filename = gps_file.replace('.gps.json', '.pcap')\n if gps_file.endswith('.paw-gps.json'):\n pcap_filename = gps_file.replace('.paw-gps.json', '.pcap')\n if gps_file.endswith('.geo.json'):\n pcap_filename = gps_file.replace('.geo.json', '.pcap')\n if not os.path.exists(pcap_filename):\n logging.debug(\"WIGLE: Can't find pcap for %s\", gps_file)\n self.skip.append(gps_file)\n continue\n try:\n gps_data = _extract_gps_data(gps_file)\n except OSError as os_err:\n logging.debug(\"WIGLE: %s\", os_err)\n self.skip.append(gps_file)\n continue\n except json.JSONDecodeError as json_err:\n logging.debug(\"WIGLE: %s\", json_err)\n self.skip.append(gps_file)\n continue\n if gps_data['Latitude'] == 0 and gps_data['Longitude'] == 0:\n logging.debug(\"WIGLE: Not enough gps-information for %s. Trying again next time.\", gps_file)\n self.skip.append(gps_file)\n continue\n try:\n pcap_data = extract_from_pcap(pcap_filename, [WifiInfo.BSSID,\n WifiInfo.ESSID,\n WifiInfo.ENCRYPTION,\n WifiInfo.CHANNEL,\n WifiInfo.RSSI])\n except FieldNotFoundError:\n logging.debug(\"WIGLE: Could not extract all information. Skip %s\", gps_file)\n self.skip.append(gps_file)\n continue\n except Scapy_Exception as sc_e:\n logging.debug(\"WIGLE: %s\", sc_e)\n self.skip.append(gps_file)\n continue\n new_entry = _transform_wigle_entry(gps_data, pcap_data, self.__version__)\n csv_entries.append(new_entry)\n no_err_entries.append(gps_file)\n if csv_entries:\n display.on_uploading('wigle.net')\n\n try:\n _send_to_wigle(csv_entries, self.options['api_key'], donate=self.options['donate'])\n reported += no_err_entries\n self.report.update(data={'reported': reported})\n logging.info(\"WIGLE: Successfully uploaded %d files\", len(no_err_entries))\n except requests.exceptions.RequestException as re_e:\n self.skip += no_err_entries\n logging.debug(\"WIGLE: Got an exception while uploading %s\", re_e)\n except OSError as os_e:\n self.skip += no_err_entries\n logging.debug(\"WIGLE: Got the following error: %s\", os_e)\n\n display.on_normal()\n","repo_name":"evilsocket/pwnagotchi","sub_path":"pwnagotchi/plugins/default/wigle.py","file_name":"wigle.py","file_ext":"py","file_size_in_byte":8319,"program_lang":"python","lang":"en","doc_type":"code","stars":6448,"dataset":"github-code","pt":"53"} +{"seq_id":"21064153875","text":"import logging\nimport sys\n\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\n\nimport virtualpdu.core\nfrom virtualpdu.drivers import libvirt_driver\nfrom virtualpdu.pdu import apc_rackpdu\nfrom virtualpdu.pdu import pysnmp_handler\n\nMISSING_CONFIG_MESSAGE = 'Missing configuration file as first parameter.\\n'\nlogging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)\n\n\ndef main():\n try:\n config_file = sys.argv[1]\n except IndexError:\n sys.stderr.write(MISSING_CONFIG_MESSAGE)\n return 1\n else:\n config = configparser.RawConfigParser(\n {'debug_snmp': 'no',\n 'snmp_versions': '1,2c',\n # SNMPv2c\n 'community': None,\n # SNMPv3\n 'engine_id': None,\n 'context_engine_id': None,\n 'context_name': '',\n 'user': None,\n 'auth_key': None,\n 'auth_protocol': None,\n 'priv_key': None,\n 'priv_protocol': None}\n )\n\n config.read(config_file)\n driver = get_driver_from_config(config)\n mapping = get_mapping_for_config(config)\n outlet_default_state = get_default_state_from_config(config)\n\n debug_snmp = config.get('global', 'debug_snmp')\n\n core = virtualpdu.core.Core(driver=driver, mapping=mapping, store={},\n default_state=outlet_default_state)\n\n pdu_threads = []\n\n for pdu in [s for s in config.sections() if s != 'global']:\n apc_pdu = apc_rackpdu.APCRackPDU(pdu, core)\n\n listen_address = config.get(pdu, 'listen_address')\n listen_port = int(config.get(pdu, 'listen_port'))\n\n snmp_versions = config.get(pdu, 'snmp_versions')\n snmp_versions = [x.strip() for x in snmp_versions.split(',')]\n\n # SNMPv1/v2c options\n community = config.get(pdu, 'community')\n\n # SNMPv3 options\n engine_id = config.get(pdu, 'engine_id')\n if engine_id and engine_id.startswith('0x'):\n engine_id = engine_id[2:]\n context_engine_id = config.get(pdu, 'context_engine_id')\n if context_engine_id and context_engine_id.startswith('0x'):\n context_engine_id = context_engine_id[2:]\n context_name = config.get(pdu, 'context_name')\n user = config.get(pdu, 'user')\n auth_key = config.get(pdu, 'auth_key')\n auth_protocol = config.get(pdu, 'auth_protocol')\n priv_key = config.get(pdu, 'priv_key')\n priv_protocol = config.get(pdu, 'priv_protocol')\n\n snmp_harness = pysnmp_handler.SNMPPDUHarness(\n apc_pdu,\n listen_address,\n listen_port,\n snmp_versions=snmp_versions,\n community=community,\n engine_id=engine_id,\n context_engine_id=context_engine_id,\n context_name=context_name,\n user=user,\n auth_key=auth_key,\n auth_protocol=auth_protocol,\n priv_key=priv_key,\n priv_protocol=priv_protocol,\n debug_snmp=debug_snmp in ('yes', 'true', '1')\n )\n\n pdu_threads.append(snmp_harness)\n\n for t in pdu_threads:\n t.start()\n\n try:\n for t in pdu_threads:\n while t.is_alive():\n t.join(1)\n\n except KeyboardInterrupt:\n for t in pdu_threads:\n t.stop()\n return 1\n\n return 0\n\n\ndef parse_default_state_config(default_state):\n supported_states = {\n 'ON': virtualpdu.core.POWER_ON,\n 'OFF': virtualpdu.core.POWER_OFF\n }\n try:\n return supported_states[default_state]\n except KeyError:\n invalid_outlet = \"outlet_default_state must be \" \\\n \"one of {{{}}} but was {}\"\n raise UnableToParseConfig(invalid_outlet.format(\n \", \".join(supported_states.keys()),\n default_state))\n\n\ndef get_driver_from_config(conf):\n try:\n uri = conf.get('global', 'libvirt_uri')\n except (configparser.NoSectionError, configparser.NoOptionError) as e:\n raise UnableToParseConfig(e)\n return libvirt_driver.LibvirtDriver(uri=uri)\n\n\ndef get_mapping_for_config(conf):\n sections = [s for s in conf.sections() if s != 'global']\n mapping = {}\n try:\n for pdu in sections:\n ports = conf.get(pdu, 'ports')\n list_of_ports = ports.split(',')\n for data in list_of_ports:\n port, host = data.split(':')\n mapping[(pdu, int(port))] = host\n except configparser.NoOptionError as e:\n raise UnableToParseConfig(e)\n return mapping\n\n\ndef get_default_state_from_config(conf):\n try:\n default_state = conf.get('global', 'outlet_default_state')\n except (configparser.NoSectionError, configparser.NoOptionError):\n default_state = 'ON'\n return parse_default_state_config(default_state)\n\n\nclass UnableToParseConfig(Exception):\n pass\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"openstack/virtualpdu","sub_path":"virtualpdu/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36823085784","text":"from sklearn.neighbors import KNeighborsClassifier\n\nfrom sklearn.model_selection import GridSearchCV, cross_val_score\nfrom sklearn.ensemble import BaggingClassifier\nfrom data import importdata\n\nimport numpy as np\n\ndataset = ['abalone16_29', 'balance_scale', 'breast_cancer', 'car', 'cmc',\n 'ecoli', 'glass', 'haberman', 'heart_cleveland', 'hepatitis',\n 'new_thyroid', 'postoperative', 'solar_flare', 'transfusion', 'vehicle',\n 'yeastME3', 'bupa', 'german', 'horse_colic', 'ionosphere', 'seeds', 'vertebal']\nfolds = 10\nfor data in dataset:\n db = getattr(importdata, 'load_' + data)()\n print(\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\")\n print('Zbior danych: %s' % data)\n importdata.print_info(db.target)\n n_neighbors = [1, 2, 3, 5]\n n_estimators = [5, 10, 15, 20, 50, 100]\n for estimator in n_estimators:\n for neighbors in n_neighbors:\n param_grid = [\n {'max_features': [0.4, 0.6, 0.7, 0.8, 0.9, 1.0], 'max_samples': [0.4, 0.6, 0.7, 0.8, 0.9, 1.0]}]\n clf = BaggingClassifier(KNeighborsClassifier(n_neighbors=neighbors), n_estimators=estimator)\n length_data = len(data)\n\n grid_search = GridSearchCV(clf, scoring='f1', cv=folds, param_grid=param_grid)\n\n grid_search.fit(db.data, db.target)\n results = grid_search.cv_results_\n best_parameters2 = []\n\n best_index = np.flatnonzero(results[\"rank_test_score\"] == 1)[0]\n if len((np.flatnonzero(results[\"rank_test_score\"] == 2))) > 0:\n best_index2 = np.flatnonzero(results[\"rank_test_score\"] == 2)[0]\n best_parameters2 = results[\"params\"][best_index2]\n\n best_parameters = results[\"params\"][best_index]\n print(best_parameters)\n print(best_parameters2)\n","repo_name":"kob22/pracamgr","sub_path":"tests/gridsearch/bagging_knn.py","file_name":"bagging_knn.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17077538954","text":"import random\nimport os\nimport string\n\n\ndef makeRandomString(length=8):\n s = \"\"\n for i in range(length):\n c = string.ascii_letters\n s += random.choice(list(c))\n return s\n\n\nclass Base:\n def __init__(self):\n pass\n\n\nclass Program(Base):\n def __init__(self, name, function, unlocked=False, price=0, classPlease=False):\n super().__init__()\n self.name = name\n self.function = function\n self.unlocked = unlocked\n self.price = price\n self.classPlease = classPlease\n\n def execute(self, args, player=None):\n if player:\n return self.function(args, player)\n else:\n return self.function(args)\n\n def __lt__(self, other):\n return self.name < other.name\n\n\nclass User(Base):\n def __init__(self, name, password=None, isAdmin=False):\n super().__init__()\n self.name = name\n self.password = password if password else makeRandomString()\n\n\nclass Port(Base):\n def __init__(self, num, name, open=False):\n super().__init__()\n self.num = num\n self.name = name\n self.open = False\n\n def toggleOpen(self):\n self.open = False if self.open else True\n\n\nclass BinaryFile(Base):\n def __init__(self, size=32, length=32):\n super().__init__()\n self.size = size\n self.length = length\n\n def data(self):\n data = []\n for x in range(self.length):\n d = \"\"\n for i in range(self.size):\n d += random.choice([\"0\", \"1\"])\n data.append(d)\n return \"\\n\".join(data)\n\n\nclass File(Base):\n def __init__(self, name, data=None):\n super().__init__()\n self.name = name\n if data:\n self.data = data\n else:\n self.data = BinaryFile().data()\n\n\nclass Folder(Base):\n def __init__(self, name, files=[]):\n super().__init__()\n self.name = name\n self.files = files\n\n def listDir(self):\n result = []\n for item in self.files:\n if isinstance(item, Folder):\n result.append([item.name] + item.listDir())\n else:\n result.append(item)\n return result\n\n\nclass Log(Base):\n def __init__(self, text, address=None):\n super().__init__()\n if address:\n self.address = address\n else:\n self.address = data.getNode(\"localhost\").address\n self.text = text\n\n\nclass Node(Base):\n def __init__(\n self,\n name,\n uid,\n address,\n files=[],\n users=[],\n ports=[],\n minPorts=0,\n linked=[],\n hacked=False,\n player=None,\n ):\n super().__init__()\n self.name = name\n self.uid = uid\n self.player = player\n self.address = address\n self.files = files + [Folder(\"sys\", [File(\"core.sys\"), File(\"x-server.sys\")])]\n self.ports = ports\n self.minPorts = minPorts\n self.users = users\n self.hacked = hacked\n self.linked = linked\n self.visited = False\n self.nmap = False\n self.logs = []\n self.firewall = None\n\n def create_log(self, ip_address, text):\n self.logs.append(Log(ip_address, text))\n\n def clone(self, new_address):\n cloned_node = type(self)(\n name=self.name,\n uid=self.uid,\n address=new_address,\n files=self.files, # Here, files will be shared between the original and the clone\n users=self.users.copy(), # Other attributes that should be copied\n ports=self.ports.copy(),\n minPorts=self.minPorts,\n linked=self.linked.copy(),\n hacked=self.hacked,\n player=self.player,\n )\n\n return cloned_node\n","repo_name":"WinFan3672/reHack","sub_path":"resource/classes/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24421378788","text":"\"\"\"A Pythagorean triplet is a set of three natural numbers, a 1000 and not solved:\n a = 0\n difference += 1\n b = a + difference\n print(f\"Trying a={a}, b={b}...\")\n\nc = 1000 - a - b\nproduct = a * b * c\n\nprint(f\"Answer: {product}\")\nprint(f\"a = {a}, b = {b}, c = {c}\")\n","repo_name":"thomasjdelaney/Euler","sub_path":"py/problem_9.py","file_name":"problem_9.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7348024218","text":"# --------------------------------------------------------\n# Vision Transformer\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Yubo Liu\n# --------------------------------------------------------\n\nimport tensorflow as tf\nfrom networks.vit_functions import gelu\nimport numpy as np\n\nclass PatchEmbedding(tf.keras.layers.Layer):\n \"\"\" \n Args:\n img_size (int): Image size, 224 is set by default.\n patch_size (tuple[int]): Size of patch, 16 is set by default.\n embed_dim (int): Dimension for embedding output\n name(str): Model Name.\n \"\"\"\n def __init__(self, img_size=224, patch_size=16, embed_dim=768, name=None):\n super(PatchEmbedding, self).__init__(name=name)\n self.embed_dim = embed_dim\n self.img_size = (img_size, img_size)\n self.grid_size = (img_size // patch_size, img_size // patch_size)\n self.num_patches = self.grid_size[0] * self.grid_size[1] # 32 by default.\n\n self.proj = tf.keras.layers.Conv2D(filters=embed_dim, kernel_size=patch_size,\n strides=patch_size, padding='SAME',\n bias_initializer=tf.keras.initializers.Zeros())\n\n def call(self, inputs):\n batch_size, height, width, channel = inputs.shape\n assert height == self.img_size[0] and width == self.img_size[1], \\\n f\"Input image size ({height}*{width}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).\"\n x = self.proj(inputs)\n # [B, H, W, C] -> [B, H*W, C]\n x = tf.reshape(x, (-1, self.num_patches, self.embed_dim))\n return x\n\nclass MultHeadAttentionLayer(tf.keras.layers.Layer):\n\n def __init__(self, \n dim, \n num_heads=8, \n qkv_bias=False, \n qk_scale=None,\n attn_drop_ratio=0.,\n proj_drop_ratio=0., \n name=None):\n self.kernel_init_strategy = 'glorot_uniform'\n self.bias_init_strategy = tf.keras.initializers.Zeros()\n \n super(MultHeadAttentionLayer, self).__init__(name=name)\n self.num_heads = num_heads\n head_dim = int(dim // self.num_heads)\n self.all_head_dims = head_dim * self.num_heads\n self.scale = qk_scale if qk_scale != None else head_dim ** -0.5\n qkv_dim = self.all_head_dims*3\n self.qkv = tf.keras.layers.Dense(qkv_dim, use_bias=qkv_bias, name=\"qkv\", \n kernel_initializer=self.kernel_init_strategy, \n bias_initializer=self.bias_init_strategy)\n self.attention_drop = tf.keras.layers.Dropout(attn_drop_ratio)\n self.proj = tf.keras.layers.Dense(dim, \n name=\"out\",\n kernel_initializer=self.kernel_init_strategy, \n bias_initializer=self.bias_init_strategy)\n self.proj_drop = tf.keras.layers.Dropout(proj_drop_ratio)\n\n def call(self, inputs, training=None):\n # input' shape -> [batch_size, num_patches + 1, embed_dim]\n B, N, _ = inputs.shape \n # qkv'shape -> [batch_size, num_patches + 1, 3 * all_head_dim]\n qkv = self.qkv(inputs)\n # split q, k, v and its corresponding head.\n qkv = tf.reshape(qkv, [-1, N, 3, self.num_heads, self.all_head_dims // self.num_heads])\n # qkv'shape is transposed to [3, batch_size, num_heads, num_patches+1, embed_dim_per_head]\n qkv = tf.transpose(qkv, [2, 0, 3, 1, 4])\n # q/k/v 's shape -> [batch_size, num_heads, num_patches+1, embed_dim_per_head]\n # transposed key's shape -> [batch_size, num_heads, embed_dim_per_head, num_patches+1]\n query, key, value = qkv[0], qkv[1], qkv[2]\n # attention' shape -> [batch_size, num_heads, num_patches+1, num_patches+1]\n attention = tf.matmul(a = query, b = key, transpose_b=True) * self.scale\n attention = tf.nn.softmax(attention, axis=-1)\n attention = self.attention_drop(attention, training=training)\n # x'shape -> [batch_size, num_heads, num_patches+1, embed_dim_per_head]\n x = tf.matmul(attention, value)\n # x'shape -> [batch_size, num_patches+1, num_heads, embed_dim_per_head]\n x = tf.transpose(x, [0, 2, 1, 3])\n x = tf.reshape(x, [-1, N, self.all_head_dims])\n x = self.proj(x)\n # projected x'shape -> [B, N, self.dim] by weights[B, N, self.all_head_dims, dim]\n x = self.proj_drop(x, training=training)\n return x\n\nclass MLP(tf.keras.layers.Layer):\n\n def __init__(self, in_features, mlp_ratio=4.0, drop=0., name=None):\n self.kernel_init_strategy = 'glorot_uniform'\n self.bias_init_strategy = tf.keras.initializers.RandomNormal(stddev=1e-6)\n super(MLP, self).__init__(name=name)\n self.fc1 = tf.keras.layers.Dense(int(in_features * mlp_ratio), name=\"dense_0\",\n kernel_initializer=self.kernel_init_strategy, \n bias_initializer=self.bias_init_strategy)\n self.act = gelu\n self.fc2 = tf.keras.layers.Dense(in_features, name=\"dense_1\",\n kernel_initializer=self.kernel_init_strategy, \n bias_initializer=self.bias_init_strategy)\n self.drop = tf.keras.layers.Dropout(drop)\n\n def call(self, inputs, training=None):\n x = self.fc1(inputs)\n x = self.act(x)\n x = self.drop(x, training=training)\n x = self.fc2(x)\n x = self.drop(x, training=training)\n return x\n\nclass ClassToken_PosEmbed(tf.keras.layers.Layer):\n def __init__(self, embed_dim= 768, num_patches=196, name=None):\n super(ClassToken_PosEmbed, self).__init__(name=name)\n self.embed_dim = embed_dim\n self.num_patches = num_patches\n\n def build(self, input_shape):\n self.cls_token = self.add_weight(name='cls_token',\n shape=[int(1), int(1), int(self.embed_dim)], \n initializer=tf.keras.initializers.Zeros(), \n trainable=True,\n dtype=tf.float32)\n # self.cls_token = tf.Variable(name='cls_token', \n # initial_value=tf.zeros_initializer(shape=(1, 1, self.embed_dim)))\n self.pos_embed = self.add_weight(name='pos_embed',\n shape=[1, self.num_patches+1, self.embed_dim], \n initializer=tf.keras.initializers.RandomNormal(stddev=0.02), \n trainable=True,\n dtype=tf.float32)\n\n def call(self, inputs):\n batch_size, _, _ = inputs.shape\n print(\"batch_size -> \", batch_size)\n # TODO fix this bug.\n cls_token = tf.broadcast_to(self.cls_token, shape=[batch_size, int(1), int(self.embed_dim)])\n \n x = tf.concat([cls_token, inputs], axis=1)\n x = x + self.pos_embed\n return x\n\nclass Encoder(tf.keras.layers.Layer):\n def __init__(self, \n dim, \n num_heads=8,\n qkv_bias=False, \n qk_scale=None,\n drop_ratio=0.,\n attn_drop_ratio=0.,\n drop_path_ratio=0., \n name=None):\n super(Encoder, self).__init__(name=name)\n self.norm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6, name='layer_norm_0')\n self.attention = MultHeadAttentionLayer(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, \n attn_drop_ratio=attn_drop_ratio, proj_drop_ratio=drop_ratio,\n name='mult_head_self_attention')\n self.drop_path = tf.keras.layers.Dropout(rate=drop_path_ratio, noise_shape=(None, 1, 1)) if drop_path_ratio > 0. \\\n else tf.keras.layers.Activation('linear')\n self.norm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6, name='layer_norm_1')\n self.mlp = MLP(dim, drop=drop_ratio, name='mpl_block')\n \n def call(self, inputs, training=None):\n x = self.norm1(inputs)\n x = self.attention(x)\n if isinstance(self.drop_path, tf.keras.layers.Activation):\n x = self.drop_path(x)\n else:\n x = self.drop_path(x, training=training)\n x = inputs + x\n x1 = self.norm2(x)\n x1 = self.mlp(x1) \n if isinstance(self.drop_path, tf.keras.layers.Activation):\n x1 = self.drop_path(x1)\n else:\n x1 = self.drop_path(x1, training=training)\n x1 = x + x1\n return x1\n \nclass VisionTransformer(tf.keras.Model):\n def __init__(self, img_size = 224, patch_size = 16, embed_dim = 768, \n depth=12, num_heads=8, qkv_bias=True, qk_scale=None,\n drop_ratio=0., attn_drop_ratio=0., drop_path_ratio=0.,\n representation_size=None, num_classes=10, name=\"ViT-B/16\"):\n super(VisionTransformer, self).__init__(name=name)\n self.num_classes = num_classes\n self.embed_dim = embed_dim\n self.depth = depth\n self.qkv_bias = qkv_bias\n self.patch_embed = PatchEmbedding(img_size=img_size, patch_size=patch_size, embed_dim=embed_dim, name='patch_embed')\n num_patches = self.patch_embed.num_patches\n self.cls_token_pos_embed = ClassToken_PosEmbed(embed_dim=embed_dim, \n num_patches=num_patches, \n name='cls_pos')\n self.pos_drop = tf.keras.layers.Dropout(drop_ratio)\n dpr = np.linspace(0., drop_path_ratio, depth)\n self.blocks = [Encoder(dim=embed_dim, num_heads=num_heads, \n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop_ratio=drop_ratio, attn_drop_ratio=attn_drop_ratio, \n drop_path_ratio=0., name=\"encoder_block_{}\".format(i)) for i in range(depth)]\n self.norm = tf.keras.layers.LayerNormalization(epsilon=1e-6, name=\"encoder_norm\")\n if representation_size:\n self.has_logits = True\n self.pre_logits = tf.keras.layers.Dense(representation_size, activation=\"tanh\", name=\"pre_logits\")\n else:\n self.has_logits = False\n self.pre_logits = tf.keras.layers.Activation(\"linear\")\n self.head = tf.keras.layers.Dense(num_classes, name=\"head\", kernel_initializer=tf.keras.initializers.Zeros())\n \n def call(self, inputs, training=None):\n # input'shape -> [B, H, W, C]\n x = self.patch_embed(inputs) # [B, num_patches, embed_dim]\n x = self.cls_token_pos_embed(x) # [B, num_patches+1, embed_dim]\n x = self.pos_drop(x, training=training)\n #! Encoder Problem\n for i, block in enumerate(self.blocks):\n x = block(x, training=training)\n #! LayerNormalization\n x = self.norm(x)\n x = self.pre_logits(x[:, 0]) # [B, 1, embed_dim] or [B, 1, representation_size]\n x = self.head(x) # [B, 1, num_classes]\n return x\n \n\n\n \n ","repo_name":"Crystal-Dragon-Liu/DigitalRockRecognization","sub_path":"networks/vit.py","file_name":"vit.py","file_ext":"py","file_size_in_byte":10972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36688214572","text":"##Solving the Yule-Walker equations to get the coefficients of the AR model\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nfrom scipy.linalg import solve\n\n#Defining the AR model\n\n#AN np.array with 1.16 as 0 value and -0.4 as 1 and -1 value, and 0 for the rest\n#NOTE: For values that is zero in the array I will just type in zero\nYxx = np.array([1.16, -0.4,-0.4])\n#Add zeroes to Yxx\n\n##Fist order Yu-Walker equation\n\n#Equation on form Ax = B\nB1 = -Yxx[1]\nA1 = Yxx[0]\n\nX1 = solve(A1,B1)\n#print with line\nprint(\"First order Yu-Walker equation: \\n\")\nprint(\"The coefficent of a1 is \" + str(X1)+\"\\n\")\n\n#Second order Yu-Walker equation\n\n#Creating a 2x2 matrix with values from Yxx\nA2 = np.array([[Yxx[0],Yxx[1]], [Yxx[-1],Yxx[0]]])\n#Create a 1x2 matrix with values from B2\nB2 = np.array([[-Yxx[-1]],[0]])\nX2 = solve (A2,B2)\n#print with line\nprint(\"Second order Yu-Walker equation: \\n\")\nprint(\"The coefficient of a1 is \" + str(X2[0]) + \". The coefficient of a2 is \" + str(X2[1]) + \"\\n\")\n\n#Third order Yu-Walker equation\n#Creating a 3x3 matrix with values from Yxx\nA3 = np.array([[Yxx[0],Yxx[1],0], [Yxx[-1],Yxx[0],Yxx[1]], [0,Yxx[-1],Yxx[0]]])\n#Create a 1x3 matrix with values from B3\nB3 = np.array([[-Yxx[-1]],[0],[0]])\nX3 = solve (A3,B3)\n#print with line\nprint(\"Third order Yu-Walker equation: \\n\")\nprint(\"The coefficient of a1 is \" + str(X3[0]) + \". The coefficient of a2 is \" + str(X3[1]) + \". The coefficient of a3 is \" + str(X3[2]) + \"\\n\")\n#We need to find the corresponding variance for each of the coefficients\n#First order\nOf1 = np.sum([X1+1]*Yxx[:2])\n#Second order\nOf2 = 1*Yxx[0]+X2[0]*Yxx[1]+X2[1]*0\n#Third order\nOf3 = 1*Yxx[0]+X3[0]*Yxx[1]+X3[1]*0+X3[2]*0\nprint(\"The variance of the first order is \" + str(Of1) + \". The variance of the second order is \" + str(Of2) + \". The variance of the theird order is \" + str(Of3) + \"\\n\")\n\n#Oppgave 2d)\n#Find the expression of the PSD of the AR model\n\n#First order\nf = np.linspace(0,0.5,100)\ndenominatorOfRff1 = np.zeros(len(f))\nfor i in range(len(f)):\n sum = (X1) * np.e**(-1j*2*np.pi*f[i])\n denominatorOfRff1[i] = np.abs((1+sum))**2\nRff1 = Of1 / denominatorOfRff1\n#Second order\ndenominatorOfRff2 = np.zeros(len(f))\nfor i in range(len(f)):\n sum = (X2[0]) * np.e**(-1j*2*np.pi*f[i])+ (X2[1]) * np.e**(-1j*2*np.pi*f[i]*2)\n denominatorOfRff2[i] = np.abs((1+sum))**2\nRff2 = Of2 / denominatorOfRff2\n#Third order\ndenominatorOfRff3 = np.zeros(len(f))\nfor i in range(len(f)):\n sum = (X3[0]) * np.e**(-1j*2*np.pi*f[i]) + (X3[1]) * np.e**(-1j*2*np.pi*f[i]*2) + (X3[2]) * np.e**(-1j*2*np.pi*f[i]*3)\n denominatorOfRff3[i] = np.abs((1+sum))**2\nRff3 = Of3 / denominatorOfRff3\n\n#The actual PSD found in task 2b)\nTrueRff = -0.8*np.cos(2*np.pi*f)+1.16\n#Plotting the PSD\n# Create a single figure with three subplots\nfig, axes = plt.subplots(3, 1, figsize=(6, 12))\n\n# Plot each data in a separate subplot\naxes[0].plot(f, Rff1, label=\"First order\")\naxes[0].plot(f, np.abs(TrueRff), color='red', label=\"True PSD\")\naxes[0].set_xlabel(\"Frequency\")\naxes[0].set_ylabel(\"PSD\")\naxes[0].legend()\n\naxes[1].plot(f, Rff2, label=\"Second order\")\naxes[1].plot(f, TrueRff, color='red', label=\"True PSD\")\naxes[1].set_xlabel(\"Frequency\")\naxes[1].set_ylabel(\"PSD\")\naxes[1].legend()\n\naxes[2].plot(f, Rff3, label=\"Third order\")\naxes[2].plot(f, TrueRff, color='red', label=\"True PSD\")\naxes[2].set_xlabel(\"Frequency\")\naxes[2].set_ylabel(\"PSD\")\naxes[2].legend()\n\nplt.tight_layout() # Ensures subplots don't overlap\nplt.show()","repo_name":"Mamolb/DigSig","sub_path":"DIGSIG Øving 8/Oppgave2.py","file_name":"Oppgave2.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14993009489","text":"#importing the animal class\nfrom animal import Animal\n#definining Dog class\nclass Dog(Animal):\n\t#init params must match Animal __init__ params\n\tdef __init__(self, name):\n\t\tsuper(Dog, self).__init__(name) #<----pass name to Animal here\n\t\tself.health = 150\n\n\tdef pet(self):\n\t\tself.health += 5\n\t\treturn self\n#defining new instance of Dog\ndog1 = Dog('Crypto')\n#chaining class methods\ndog1.walk().walk().walk().run().run().run().pet().displayHealth()","repo_name":"nmccrory/py-playground","sub_path":"OOP/dog.py","file_name":"dog.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42655503440","text":"#!/usr/bin/env python\n# _*_ coding:utf-8 _*_\n\nfrom __future__ import print_function\n\nimport rospy\nimport numpy as np\nimport os,cv2\nimport face_recognition\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom sensor_msgs.msg import Image, CompressedImage\nfrom vision_msgs.msg import BoundingBox2D\n\nimport socket\nimport sys\n\nimport select, termios, tty\nfrom geometry_msgs.msg import Twist\nimport roslib; roslib.load_manifest('teleop_twist_keyboard')\nimport threading\n\nimport struct\n \nHOST, PORT = \"172.16.38.29\", 19984\nmode = 'test'\nif mode == 'train':\n print(\"######### Start Training ##########\")\nelse:\n print(\"######### Start Testing ##########\")\n\n\nmsg = \"\"\"\nReading from the keyboard and Publishing to Twist!\n---------------------------\nMoving around:\n u i o\n j k l\n m , .\n\nFor Holonomic mode (strafing), hold down the shift key:\n---------------------------\n U I O\n J K L\n M < >\n\nt : up (+z)\nb : down (-z)\n\nanything else : stop\n\nq/z : increase/decrease max speeds by 10%\nw/x : increase/decrease only linear speed by 10%\ne/c : increase/decrease only angular speed by 10%\n\nCTRL-C to quit\n\"\"\"\n\nmoveBindings = {\n 'i':(1,0,0,0),\n 'o':(1,0,0,-1),\n 'j':(0,0,0,1),\n 'l':(0,0,0,-1),\n 'u':(1,0,0,1),\n ',':(-1,0,0,0),\n '.':(-1,0,0,1),\n 'm':(-1,0,0,-1),\n 'O':(1,-1,0,0),\n 'I':(1,0,0,0),\n 'J':(0,1,0,0),\n 'L':(0,-1,0,0),\n 'U':(1,1,0,0),\n '<':(-1,0,0,0),\n '>':(-1,-1,0,0),\n 'M':(-1,1,0,0),\n 't':(0,0,1,0),\n 'b':(0,0,-1,0),\n }\n\nspeedBindings={\n 'q':(1.1,1.1),\n 'z':(.9,.9),\n 'w':(1.1,1),\n 'x':(.9,1),\n 'e':(1,1.1),\n 'c':(1,.9),\n }\nsettings = termios.tcgetattr(sys.stdin)\n\nclass PublishThread(threading.Thread):\n def __init__(self, rate):\n super(PublishThread, self).__init__()\n self.publisher = rospy.Publisher('cmd_vel', Twist, queue_size = 1)\n self.x = 0.0\n self.y = 0.0\n self.z = 0.0\n self.th = 0.0\n self.speed = 0.0\n self.turn = 0.0\n self.condition = threading.Condition()\n self.done = False\n\n # Set timeout to None if rate is 0 (causes new_message to wait forever\n # for new data to publish)\n if rate != 0.0:\n self.timeout = 1.0 / rate\n else:\n self.timeout = None\n\n self.start()\n\n def wait_for_subscribers(self):\n i = 0\n while not rospy.is_shutdown() and self.publisher.get_num_connections() == 0:\n if i == 4:\n print(\"Waiting for subscriber to connect to {}\".format(self.publisher.name))\n rospy.sleep(0.5)\n i += 1\n i = i % 5\n if rospy.is_shutdown():\n raise Exception(\"Got shutdown request before subscribers connected\")\n\n def update(self, x, y, z, th, speed, turn):\n self.condition.acquire()\n self.x = x\n self.y = y\n self.z = z\n self.th = th\n self.speed = speed\n self.turn = turn\n # Notify publish thread that we have a new message.\n self.condition.notify()\n self.condition.release()\n\n def stop(self):\n self.done = True\n self.update(0, 0, 0, 0, 0, 0)\n self.join()\n\n def run(self):\n twist = Twist()\n while not self.done:\n self.condition.acquire()\n # Wait for a new message or timeout.\n self.condition.wait(self.timeout)\n\n # Copy state into twist message.\n twist.linear.x = self.x * self.speed\n twist.linear.y = self.y * self.speed\n twist.linear.z = self.z * self.speed\n twist.angular.x = 0\n twist.angular.y = 0\n twist.angular.z = self.th * self.turn\n\n self.condition.release()\n\n # Publish.\n self.publisher.publish(twist)\n\n # Publish stop message when thread exits.\n twist.linear.x = 0\n twist.linear.y = 0\n twist.linear.z = 0\n twist.angular.x = 0\n twist.angular.y = 0\n twist.angular.z = 0\n self.publisher.publish(twist)\n\n\ndef getKey(key_timeout):\n tty.setraw(sys.stdin.fileno())\n rlist, _, _ = select.select([sys.stdin], [], [], key_timeout)\n if rlist:\n key = sys.stdin.read(1)\n else:\n key = ''\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)\n return key\n\n\ndef vels(speed, turn):\n return \"currently:\\tspeed %s\\tturn %s \" % (speed,turn)\n\n\nclass NavigationClient():\n def __init__(self):\n rospy.init_node('video_language_navigation_node', log_level=rospy.INFO)\n self.camera_topic = rospy.get_param('~camera_topic', '/camera/rgb/image_raw')\n r = rospy.Rate(1) # 10\n rospy.on_shutdown(self.shutdown)\n\n self.sub_image_type = \"raw\"\n self.pub_face_type = \"raw\"\n self.cv_image = None\n\n if self.sub_image_type == \"compressed\":\n self.sub_image_original = rospy.Subscriber(self.camera_topic+'compressed', CompressedImage, self.ImageCallback, queue_size = 1)\n elif self.sub_image_type == \"raw\":\n self.sub_image_original = rospy.Subscriber(self.camera_topic, Image, self.ImageCallback, queue_size = 1)\n\n self.cvBridge = CvBridge()\n\n # ----------- Keybord teaching -----------\n speed = 1.0 # rospy.get_param(\"~speed\", 0.5)\n turn = 1.0 # rospy.get_param(\"~turn\", 1.0)\n repeat = rospy.get_param(\"~repeat_rate\", 0.0)\n key_timeout = rospy.get_param(\"~key_timeout\", 0.0)\n if key_timeout == 0.0:\n key_timeout = None\n\n self.pub_thread = PublishThread(repeat)\n\n x = 0\n y = 0\n z = 0\n th = 0\n status = 0\n\n self.pub_thread.wait_for_subscribers()\n self.pub_thread.update(x, y, z, th, speed, turn)\n\n prompt = input(\"Please input the language prompt: \")\n\n print(msg)\n print(vels(speed,turn))\n\n while not rospy.is_shutdown():\n # Resize frame of video to 1/4 size for faster face recognition processing\n if self.cv_image is not None:\n\n if mode == \"train\":\n key = getKey(key_timeout)\n if key in moveBindings.keys():\n x = moveBindings[key][0]\n y = moveBindings[key][1]\n z = moveBindings[key][2]\n th = moveBindings[key][3]\n elif key in speedBindings.keys():\n speed = speed * speedBindings[key][0]\n turn = turn * speedBindings[key][1]\n\n print(vels(speed,turn))\n if (status == 14):\n print(msg)\n status = (status + 1) % 15\n else:\n # Skip updating cmd_vel if key timeout and robot already\n # stopped.\n if key == '' and x == 0 and y == 0 and z == 0 and th == 0:\n continue\n x = 0\n y = 0\n z = 0\n th = 0\n if (key == '\\x03'):\n break\n \n self.pub_thread.update(x, y, z, th, speed, turn)\n\n small_frame = cv2.resize(self.cv_image, (0, 0), fx=0.25, fy=0.25)\n\n # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)\n rgb_small_frame = small_frame[:, :, ::-1]\n face_locations = face_recognition.face_locations(rgb_small_frame)\n \n # -------------------- Send to server --------------------\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((HOST, PORT))\n arrBuf = bytearray(b'\\xff\\xaa\\xff\\xaa')\n \n picBytes = cv2.imencode(\".jpg\", self.cv_image)[1].tobytes()\n \n picSize = len(picBytes)\n \n datalen = 64 + 1 + 128 + 4 + 4 + 4 + 4 + 4 + 4 + picSize\n \n arrBuf += bytearray(datalen.to_bytes(4, byteorder='little'))\n guid = 23458283482894382928948\n arrBuf += bytearray(guid.to_bytes(64, byteorder='little'))\n arrBuf += b'\\x00' if mode == \"train\" else b'\\x01'\n arrBuf += prompt.ljust(128, \"*\").encode('utf-8')\n arrBuf += bytearray(struct.pack('